Find a face using Haar and copy it somewhere else

I want to detect a face using the Haar algorithm in OpenCV and draw it in another area.

I’m able to draw a white box around faces in the correct place on the original image.

However, when I copy the region of the pixels detected by Haar into an ofTexture and draw it somewhere else on the screen, something strange happens.

The image that gets drawn is the same size as the face, but the pixels are not in the correct place.
If I put my face somewhere near the center, it will be lined up correctly, but if I move out of the center, the image pans, so it’s almost as if my face is a cursor showing which region of the x-y plane to show in the copied image.

I’m using some of the ofxCvHaarFinderExample addon code and injecting some of nick_k’s code:
http://forum.openframeworks.cc/t/face-tracking/167/0
download/file.php?id=7

Here is my code:

  
  
// testApp.h  
#pragma once  
  
#include "ofMain.h"  
#include "ofxCvHaarFinder.h"  
#include "ofxOpenCv.h"  
  
class testApp : public ofBaseApp{  
	public:  
		void setup();  
		void update();  
		void draw();  
		  
		void keyPressed(int key);  
		void keyReleased(int key);  
		void mouseMoved(int x, int y );  
		void mouseDragged(int x, int y, int button);  
		void mousePressed(int x, int y, int button);  
		void mouseReleased(int x, int y, int button);  
		void windowResized(int w, int h);  
		void dragEvent(ofDragInfo dragInfo);  
		void gotMessage(ofMessage msg);		  
  
		ofImage img;  
		ofxCvHaarFinder finder;  
	  
		ofVideoGrabber 		vidGrabber;  
		ofxCvColorImage			colorImg;  
	  
		ofxCvGrayscaleImage 	grayImage;  
		ofxCvGrayscaleImage 	grayBg;  
		ofxCvGrayscaleImage 	grayDiff;  
		  
		ofxCvContourFinder 	contourFinder;  
		  
		int 				threshold;  
		bool				bLearnBakground;	  
	  
		ofTexture cropTexture;  
	  
		unsigned char * pixels;  
};  
  
// testApp.cpp  
#include "testApp.h"  
  
//--------------------------------------------------------------  
void testApp::setup(){  
	vidGrabber.setVerbose(true);  
	vidGrabber.initGrabber(320,240);  
	  
    colorImg.allocate(320,240);  
	grayImage.allocate(320,240);  
	grayBg.allocate(320,240);  
	grayDiff.allocate(320,240);  
	  
	bLearnBakground = true;  
	threshold = 80;  
  
	  
	//img.loadImage("test.jpg");  
	finder.setup("haarcascade_frontalface_default.xml");  
	//finder.findHaarObjects(img);  
}  
  
//--------------------------------------------------------------  
void testApp::update(){  
	ofBackground(100,100,100);  
	  
    bool bNewFrame = false;  
	  
	vidGrabber.grabFrame();  
	bNewFrame = vidGrabber.isFrameNew();  
	  
	if (bNewFrame){  
		  
		pixels = vidGrabber.getPixels();  
		colorImg.setFromPixels(pixels, 320,240);  
		  
        grayImage = colorImg;  
		if (bLearnBakground == true){  
			grayBg = grayImage;		// the = sign copys the pixels from grayImage into grayBg (operator overloading)  
			bLearnBakground = false;  
		}  
		  
		// take the abs value of the difference between background and incoming and then threshold:  
		grayDiff.absDiff(grayBg, grayImage);  
		grayDiff.threshold(threshold);  
		  
		// find contours which are between the size of 20 pixels and 1/3 the w*h pixels.  
		// also, find holes is set to true so we will get interior contours as well....  
		contourFinder.findContours(grayDiff, 20, (340*240)/3, 10, true);	// find holes  
		finder.findHaarObjects(grayDiff);  
	}  
}  
  
//--------------------------------------------------------------  
void testApp::draw(){  
	colorImg.draw(0,0);  
  
	glPushMatrix();  
	if (finder.blobs.size() > 0) {  
		ofRectangle cur = finder.blobs[0].boundingRect;  
  
		float x,y,w,h;	  
		  
		int cropWidth;  
		int cropHeight;  
		  
		int haarfinderx;  
		int haarfindery;  
		int mainPixelPos;  
		int subPixlPos;		  
  
		int camWidth = 320;  
		  
        x = finder.blobs[0].boundingRect.x;  
		y = finder.blobs[0].boundingRect.y;  
		w = finder.blobs[0].boundingRect.width;  
		h = finder.blobs[0].boundingRect.height;  
  
		cropWidth = (int) w;  
		cropHeight = (int) h;  
		haarfinderx = (int) x;  
		haarfindery = (int) y;  
		  
        cropTexture.allocate(cropWidth, cropHeight, GL_RGB);  
		  
        unsigned char subRegion[ cropWidth * cropHeight * 3  ];  // R G B  
		  
        for (int i = 0; i < cropHeight; i++) {  
			  
            for (int j = 0; j < cropWidth; j++) {  
				  
                mainPixelPos = ((j+haarfinderx) * camWidth + (i+haarfindery)) * 3;  
                subPixlPos = (j * cropWidth + i) * 3;  
                subRegion[subPixlPos] = pixels[mainPixelPos];          // R  
                subRegion[subPixlPos + 1] = pixels[mainPixelPos + 1];  // G  
                subRegion[subPixlPos + 2] = pixels[mainPixelPos + 2];  // B  
				  
            }  
        }  
		  
		  
        cropTexture.loadData(subRegion, cropWidth, cropHeight, GL_RGB);  
		  
        glPushMatrix();  
		cropTexture.draw(500, 500);  
        glPopMatrix();  
  
	}  
	  
	glPushMatrix();  
	  
	  
	ofNoFill();  
	for(int i = 0; i < finder.blobs.size(); i++) {  
		ofRectangle cur = finder.blobs[i].boundingRect;  
		ofRect(cur.x, cur.y, cur.width, cur.height);  
	}  
}  
  
//--------------------------------------------------------------  
void testApp::keyPressed(int key){  
  
}  
  
//--------------------------------------------------------------  
void testApp::keyReleased(int key){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mouseMoved(int x, int y ){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mouseDragged(int x, int y, int button){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mousePressed(int x, int y, int button){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mouseReleased(int x, int y, int button){  
  
}  
  
//--------------------------------------------------------------  
void testApp::windowResized(int w, int h){  
  
}  
  
//--------------------------------------------------------------  
void testApp::gotMessage(ofMessage msg){  
  
}  
  
//--------------------------------------------------------------  
void testApp::dragEvent(ofDragInfo dragInfo){   
  
}  

Hiya - I was just sorting this out. I cobbled together the function below from various forum posts. Seems to work ok. This is based on a user selection but it should work with any rect data. Full test.cpp at bottom.

Oh - it wasn’t clear to me if any memory clean up is needed in the crop routine. Pretty new at C++.

  
ofImage testApp::crop(ofImage* sImg, int x, int y, int w, int h){  
  
	int sW = sImg->getWidth();  
	int sH = sImg->getHeight();  
  
	ofImage tmpImg;  
 	tmpImg.allocate(w, h, OF_IMAGE_COLOR);  
	   
	unsigned char subRegion[ w * h * 3  ];   
	unsigned char * srcPixels = sImg->getPixels();  
		  
	for (int i = 0; i < w; i++){  
		for (int j = 0; j < h; j++){  
			int mainPixelPos = ((j + y) * sW + (i + x)) * 3;  
			int subPixlPos = (j * w + i) * 3;  
			  
			subRegion[subPixlPos] = srcPixels[mainPixelPos];   // R  
			subRegion[subPixlPos + 1] = srcPixels[mainPixelPos + 1];  // G  
			subRegion[subPixlPos + 2] = srcPixels[mainPixelPos + 2];  // B  
		}  
	}  
	tmpImg.setFromPixels(subRegion, w, h,  OF_IMAGE_COLOR);  
	return tmpImg;  
}  

…test.cpp

  
#include "testApp.h"  
  
//--------------------------------------------------------------  
void testApp::setup(){  
	selecting   = false;  
	selection.x = 0;  
	selection.y = 0;  
	selection.width = 1;  
	selection.height = 1;  
	im.loadImage("img3.jpg");  
   
	cropped = crop(&im, 100, 100, 100, 100);  
}  
  
//--------------------------------------------------------------  
void testApp::update(){  
  
}  
  
//--------------------------------------------------------------  
void testApp::draw(){  
	im.draw(0, 0);  
   
	if(selecting){  
		ofNoFill();  
		ofRect(selection.x, selection.y, selection.width, selection.height);	  
	}  
	cropped.draw(600,300);  
}  
  
ofImage testApp::crop(ofImage* sImg, int x, int y, int w, int h){  
  
	int sW = sImg->getWidth();  
	int sH = sImg->getHeight();  
  
	ofImage tmpImg;  
 	tmpImg.allocate(w, h, OF_IMAGE_COLOR);  
	   
	unsigned char subRegion[ w * h * 3  ];   
	unsigned char * srcPixels = sImg->getPixels();  
		  
	for (int i = 0; i < w; i++){  
		for (int j = 0; j < h; j++){  
			int mainPixelPos = ((j + y) * sW + (i + x)) * 3;  
			int subPixlPos = (j * w + i) * 3;  
			  
			subRegion[subPixlPos] = srcPixels[mainPixelPos];   // R  
			subRegion[subPixlPos + 1] = srcPixels[mainPixelPos + 1];  // G  
			subRegion[subPixlPos + 2] = srcPixels[mainPixelPos + 2];  // B  
		}  
	}  
	tmpImg.setFromPixels(subRegion, w, h,  OF_IMAGE_COLOR);  
	return tmpImg;  
}  
  
//--------------------------------------------------------------  
void testApp::keyPressed(int key){  
  
}  
  
//--------------------------------------------------------------  
void testApp::keyReleased(int key){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mouseDragged(int x, int y, int button){  
	selection.x = MIN(x,origin.x);  
	selection.y = MIN(y,origin.y);  
	selection.width = selection.x + CV_IABS(x - origin.x);  
	selection.height = selection.y + CV_IABS(y - origin.y);  
	  
	selection.x = MAX( selection.x, 0 );  
	selection.y = MAX( selection.y, 0 );  
	selection.width = MIN( selection.width, ofGetWidth() );  
	selection.height = MIN( selection.height, ofGetHeight() );  
	selection.width -= selection.x;  
	selection.height -= selection.y;  
}  
  
//--------------------------------------------------------------  
void testApp::mousePressed(int x, int y, int button){  
	origin = cvPoint(x,y);  
	selection = cvRect(x,y,0,0);  
	selecting = true;  
}  
  
//--------------------------------------------------------------  
void testApp::mouseReleased(int x, int y, int button){  
	if( selection.width > 0 && selection.height > 0 ){  
		cropped.clear();  
		cropped = crop(&im, selection.x, selection.y, selection.width, selection.height);  
		}  
	selecting = false;  
}  
//--------------------------------------------------------------  
void testApp::mouseMoved(int x, int y ){  
  
}  
  
   
  
//--------------------------------------------------------------  
void testApp::windowResized(int w, int h){  
  
}  
  
  

…test.h

  
#ifndef _TEST_APP  
#define _TEST_APP  
  
  
#include "ofMain.h"  
#include "ofxOpenCv.h"  
  
class testApp : public ofBaseApp{  
  
	public:  
		void setup();  
		void update();  
		void draw();  
  
		void keyPressed  (int key);  
		void keyReleased(int key);  
		void mouseMoved(int x, int y );  
		void mouseDragged(int x, int y, int button);  
		void mousePressed(int x, int y, int button);  
		void mouseReleased(int x, int y, int button);  
		void windowResized(int w, int h);  
		  
		ofImage crop(ofImage* sImg, int x1, int y1, int w, int h);  
   
		ofTexture*  invert_color_image(ofImage* im);  
		ofTexture*   invert_color_image_selection(ofImage* im, int w, int h);  
		ofImage im;  
		ofImage cropped;  
		ofTexture* inverted;  
		   
		bool				selecting;  
		CvPoint				origin;  
		CvRect				selection;  
};  
  
#endif  

Awesome! Here’s updated code:

  
// testApp.h  
#pragma once  
  
#include "ofMain.h"  
#include "ofxCvHaarFinder.h"  
#include "ofxOpenCv.h"  
  
class testApp : public ofBaseApp{  
	public:  
		void setup();  
		void update();  
		void draw();  
		  
		void keyPressed(int key);  
		void keyReleased(int key);  
		void mouseMoved(int x, int y );  
		void mouseDragged(int x, int y, int button);  
		void mousePressed(int x, int y, int button);  
		void mouseReleased(int x, int y, int button);  
		void windowResized(int w, int h);  
		void dragEvent(ofDragInfo dragInfo);  
		void gotMessage(ofMessage msg);  
		ofImage crop(ofImage* sImg, int x1, int y1, int w, int h);  
  
		ofImage img;  
		ofxCvHaarFinder finder;  
	  
		ofVideoGrabber 		vidGrabber;  
		ofxCvColorImage			colorImg;  
	  
		ofxCvGrayscaleImage 	grayImage;  
		ofxCvGrayscaleImage 	grayBg;  
		ofxCvGrayscaleImage 	grayDiff;  
		  
		ofxCvContourFinder 	contourFinder;  
		  
		int 				threshold;  
		bool				bLearnBakground;	  
	  
		ofTexture cropTexture;  
	  
		unsigned char * pixels;  
	  
		ofImage im;  
		ofImage cropped;  
};  
  
// testApp.cpp  
#include "testApp.h"  
  
//--------------------------------------------------------------  
void testApp::setup(){  
	vidGrabber.setVerbose(true);  
	vidGrabber.initGrabber(320,240);  
	  
    colorImg.allocate(320,240);  
	grayImage.allocate(320,240);  
	grayBg.allocate(320,240);  
	grayDiff.allocate(320,240);  
	  
	im.allocate(320, 240, OF_IMAGE_COLOR);  
	  
	bLearnBakground = true;  
	threshold = 80;  
  
	  
	//img.loadImage("test.jpg");  
	finder.setup("haarcascade_frontalface_default.xml");  
	//finder.findHaarObjects(img);  
}  
  
//--------------------------------------------------------------  
void testApp::update(){  
	ofBackground(100,100,100);  
	  
    bool bNewFrame = false;  
	  
	vidGrabber.grabFrame();  
	bNewFrame = vidGrabber.isFrameNew();  
	  
	if (bNewFrame){  
		  
		pixels = vidGrabber.getPixels();  
		colorImg.setFromPixels(pixels, 320,240);  
		im.setFromPixels(pixels, 320, 240, OF_IMAGE_COLOR);  
		  
        grayImage = colorImg;  
		if (bLearnBakground == true){  
			grayBg = grayImage;		// the = sign copys the pixels from grayImage into grayBg (operator overloading)  
			bLearnBakground = false;  
		}  
		  
		// take the abs value of the difference between background and incoming and then threshold:  
		grayDiff.absDiff(grayBg, grayImage);  
		grayDiff.threshold(threshold);  
		  
		// find contours which are between the size of 20 pixels and 1/3 the w*h pixels.  
		// also, find holes is set to true so we will get interior contours as well....  
		contourFinder.findContours(grayDiff, 20, (340*240)/3, 10, true);	// find holes  
		finder.findHaarObjects(grayDiff);  
	}  
}  
  
//--------------------------------------------------------------  
void testApp::draw(){  
	colorImg.draw(0,0);  
	if (finder.blobs.size() > 0) {  
		ofRectangle cur = finder.blobs[0].boundingRect;  
		cropped.allocate(cur.width, cur.height, OF_IMAGE_COLOR);  
		cropped = crop(&im, cur.x, cur.y, cur.width, cur.height);  
	}  
	  
	cropped.draw(400, 400);  
	  
	ofNoFill();  
	for(int i = 0; i < finder.blobs.size(); i++) {  
		ofRectangle cur = finder.blobs[i].boundingRect;  
		ofRect(cur.x, cur.y, cur.width, cur.height);  
	}  
}  
  
ofImage testApp::crop(ofImage* sImg, int x, int y, int w, int h){  
	  
	int sW = sImg->getWidth();  
	int sH = sImg->getHeight();  
	  
	ofImage tmpImg;  
	tmpImg.allocate(w, h, OF_IMAGE_COLOR);  
	  
	unsigned char subRegion[ w * h * 3  ];  
	unsigned char * srcPixels = sImg->getPixels();  
	  
	for (int i = 0; i < w; i++){  
		for (int j = 0; j < h; j++){  
			int mainPixelPos = ((j + y) * sW + (i + x)) * 3;  
			int subPixlPos = (j * w + i) * 3;  
			  
			subRegion[subPixlPos] = srcPixels[mainPixelPos];   // R  
			subRegion[subPixlPos + 1] = srcPixels[mainPixelPos + 1];  // G  
			subRegion[subPixlPos + 2] = srcPixels[mainPixelPos + 2];  // B  
		}  
	}  
	tmpImg.setFromPixels(subRegion, w, h,  OF_IMAGE_COLOR);  
   return tmpImg;  
}  
  
//--------------------------------------------------------------  
void testApp::keyPressed(int key){  
  
}  
  
//--------------------------------------------------------------  
void testApp::keyReleased(int key){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mouseMoved(int x, int y ){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mouseDragged(int x, int y, int button){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mousePressed(int x, int y, int button){  
  
}  
  
//--------------------------------------------------------------  
void testApp::mouseReleased(int x, int y, int button){  
  
}  
  
//--------------------------------------------------------------  
void testApp::windowResized(int w, int h){  
  
}  
  
//--------------------------------------------------------------  
void testApp::gotMessage(ofMessage msg){  
  
}  
  
//--------------------------------------------------------------  
void testApp::dragEvent(ofDragInfo dragInfo){   
  
}  
  

Glad it worked.

I noticed you are from Boston - me too. Anyone else? Maybe we can get a meetup group together.

I would like to get this working within xcode, however I am struggling to do this. Does anyone have an xcode project example of this, or something similar which does basic face recognition. Many thanks.

I recommend using the openCVexample project, if it runs fine, then copy it and overwrite the files with the code here. if something fails, report back with errors.

Thank you. I have done this and I am getting 8 errors. I have attached a screenshot within these errors. Would you be able to help? Cheers.

![](http://forum.openframeworks.cc/uploads/default/1605/Screen shot 2011-05-12 at 13.21.53.png)

Ok please ignore my previous post. I am now getting a new error. The application successfully launches however, in doing so i get “GDB: Program received signal: “EXC_BAD_ACCESS””. It then points to this line of code:

subRegion[subPixlPos] = srcPixels[mainPixelPos]; // R

Any ideas?
Thanks.

I have fixed this. It was becasue there was a file missing in my data folder.

Has anyone been successful at making a similar effect happen, but with a gradient ellipse around the face instead of a solid rectangle? I am trying to work from this example, but not getting very far. Does it involve a complex looping of the pixels to create a gradient around a certain area? Any help would be very very…helpful!

Thanks.