Face tracking code only tracks faces; it does not do any interchange. So, basically this is
what I have been trying to do lately. I have looked at the code and also at the cropping-part-of-an-image example and I have come up with the following (I am also including the
source files).
During this phase of implementation I want to grab the tracked face,crop it and
display it somewhere else on the screen. Well it works in a way, but it displays the
following error: when the tracked face is at the left side of the display window (move
your face to the left of the rect), the diplayed cropped image appears fine. But when
you move your face to the right, the cropped face gets distorted. Thanks in advance for
any help.
When all this is done (implement face interchange), I plan to upload all the source.
By the way, thanks openframeworks!
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
camWidth = 320; // try to grab at this size.
camHeight = 240;
// camWidth = 800; // try to grab at this size.
// camHeight = 600;
#ifdef _USE_LIVE_VIDEO
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(camWidth,camHeight);
cWidth = vidGrabber.width;
cHeight = vidGrabber.height;
#else
vidPlayer.loadMovie("fingers.mp4");
vidPlayer.play();
#endif
colorImg.allocate(camWidth,camHeight);
grayImage.allocate(camWidth,camHeight);
grayBg.allocate(camWidth,camHeight);
grayDiff.allocate(camWidth,camHeight);
bLearnBakground = true;
threshold = 80;
//lets load in our face xml file
haarFinder.setup("haarXML/haarcascade_frontalface_default.xml");
}
//--------------------------------------------------------------
void testApp::update(){
ofBackground(100,100,100);
bool bNewFrame = false;
#ifdef _USE_LIVE_VIDEO
vidGrabber.grabFrame();
bNewFrame = vidGrabber.isFrameNew();
#else
vidPlayer.idleMovie();
bNewFrame = vidPlayer.isFrameNew();
#endif
if (bNewFrame){
#ifdef _USE_LIVE_VIDEO
colorImg.setFromPixels(vidGrabber.getPixels(), camWidth,camHeight);
#else
colorImg.setFromPixels(vidPlayer.getPixels(), camWidth,camHeight);
#endif
grayImage = colorImg;
if (bLearnBakground == true){
grayBg = grayImage; // the = sign copys the pixels from grayImage into grayBg (operator overloading)
bLearnBakground = false;
}
haarFinder.findHaarObjects(grayImage, 10, 99999999, 10);
// take the abs value of the difference between background and incoming and then threshold:
grayDiff.absDiff(grayBg, grayImage);
grayDiff.threshold(threshold);
// find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
// also, find holes is set to true so we will get interior contours as well....
contourFinder.findContours(grayDiff, 20, (camWidth*camHeight)/3, 10, true); // find holes
}
}
//--------------------------------------------------------------
void testApp::draw(){
// draw the incoming, the grayscale, the bg and the thresholded difference
ofSetColor(0xffffff);
colorImg.draw(20,20);
// grayImage.draw(360,20);
// grayBg.draw(20,280);
// grayDiff.draw(360,280);
haarFinder.draw(20, 20);
int numFace = haarFinder.blobs.size();
pixels = vidGrabber.getPixels();
glPushMatrix();
glTranslatef(20, 20, 0);
for(int i = 0; i < numFace; i++) {
float x = haarFinder.blobs[i].boundingRect.x;
float y = haarFinder.blobs[i].boundingRect.y;
float w = haarFinder.blobs[i].boundingRect.width;
float h = haarFinder.blobs[i].boundingRect.height;
cropWidth = (int) w;
cropHeight = (int) h;
haarfinderx = (int) x;
haarfindery = (int) y;
float cx = haarFinder.blobs[i].centroid.x;
float cy = haarFinder.blobs[i].centroid.y;
cropTexture.allocate(cropWidth, cropHeight, GL_RGB);
ofSetColor(0xFF0000);
// ofRect(x, y, w, h);
ofSetColor(0xFFFFFF);
// ofDrawBitmapString("face "+ofToString(i), cx, cy);
//copy a subpart of the current frame
//[http://forum.openframeworks.cc/t/subpicture/38/0](http://forum.openframeworks.cc/t/subpicture/38/0)
facetrack_and_crop_source.tar