Masking Image Fade In/Fade out with Face Detection

Hello!

My goal is to have a video image playing. Then, read a life webcam ready to recognize a face. If 1 or more faces is present, fade in and composite a cropped/feathered/masked video of the face over the pre-playing video image. If 0 faces are present, fade out/remove live face overlay video.

Thanks to Paul Houx’s code here: http://forum.openframeworks.cc/t/using-grayscale-video-as-a-mask…/346/0 I have been able to mask a live video cam image and place it over my video.

Now, using my still developing cobbling skills, I am trying to get the live cam to fade in and out when a face is detected. At this point, I am working on a swap and then going for the fade.
What is it about this opengl mask code that doesn’t recognize my “if (numMatches >=1)”?

I appreciate any help on my project and am glad to offer more information!

Here is my testApp.cpp:

  
#include "testApp.h"  
  
// Sample code for masking an image using another image's alpha channel  
// Author: Paul Houx (droozle), Amsterdam, The Netherlands  
//--------------------------------------------------------------  
void testApp::setup(){  
	int width = 620;  
	int height = 480;  
    // turn off vertical sync so we can evaluate performance  
    ofSetVerticalSync(false);  
	  
    // the bind() function might complain about texture not  
    // being allocated, yet it seems to work fine. Uncomment  
    // the following line to stop the warning from displaying.  
    ofSetLogLevel(OF_LOG_ERROR);  
	  
    // load our images  
    background.loadImage("background.png");  
    mask.loadImage("mask3.png");  
	  
    // initialize webcam  
    webcam.initGrabber(width, height);  
	  
	boarMovie.loadMovie("video/boarShort.mp4");  
	boarMovie.play();  
	  
	//FACE FINDER BELOW  
	webcam.setVerbose(true);  
	//vidGrabber.initGrabber(320,240);  
  
	videoTexture.allocate(width,height, GL_RGB);  
	  
    colorImg.allocate(width,height);  
	grayImage.allocate(width,height);  
	//grayBg.allocate(320,240);    
    //grayDiff.allocate(320,240); 	  
			  
	finder.setup("haarXML/haarcascade_frontalface_default.xml");  
}  
//--------------------------------------------------------------  
void testApp::update(){  
	int width = 620;  
	int height = 480;  
    // update the webcam image  
    webcam.update();  
	  
	bool bNewFrame = false;  
	  
	webcam.grabFrame();  
	  
	bNewFrame = webcam.isFrameNew();  
	  
	if (bNewFrame){    
        colorImg.setFromPixels(webcam.getPixels(), width, height);    
        grayImage = colorImg;    
        finder.findHaarObjects(grayImage);    
    }    
}  
//--------------------------------------------------------------  
void testApp::draw(){  
    // draw the background  
	int offsetX = 500;  
	int offsetY = 100;  
	//colorImg.draw(20,20);	  
	grayImage.draw(360,20);  
	  
	background.draw(0, 0);  
	//boarMovie.draw(0,0);  
	  
	// how many matches did you find?  
	int numMatches = finder.blobs.size();  
	  
	// drawing the matches  
	  
	glTranslatef(20, 20, 0);  
	for(int i = 0; i < numMatches && numMatches != 0; i++){  
		float x = finder.blobs[i].boundingRect.x;  
		float y = finder.blobs[i].boundingRect.y;  
		float w = finder.blobs[i].boundingRect.width;  
		float h = finder.blobs[i].boundingRect.height;  
		  
		float cx = finder.blobs[i].centroid.x;  
		float cy = finder.blobs[i].centroid.y;  
		  
		ofSetColor(0x00FF00);  
		ofSetLineWidth(8);  
		ofNoFill();  
		ofEllipse(x+w/2, y+h/2, w, 4*h/3);  
		  
		ofSetColor(0xffffff);  
		string reportStr= "matches found: "+ ofToString(numMatches, 0)+", framerate: "+ofToString(ofGetFrameRate(),0);  
		ofDrawBitmapString(reportStr, 20, 10);  
		}  
  
if (numMatches >= 1){  
    // make sure alpha blending is enabled  
	ofEnableAlphaBlending();  
    // set up multi-texturing  
	glActiveTexture(GL_TEXTURE0);  
	webcam.getTextureReference().bind(); // may generate an allocation warning  
  
    glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);  
    glTexEnvf (GL_TEXTURE_ENV, GL_COMBINE_RGB_EXT, GL_REPLACE);  
	  
	glActiveTexture(GL_TEXTURE1);  
	mask.getTextureReference().bind(); // may generate an allocation warning  
    glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);  
    glTexEnvf (GL_TEXTURE_ENV, GL_COMBINE_RGB_EXT, GL_BLEND);  
	  
	// render masked and mask images as one multi-textured quad  
	glBegin(GL_QUADS);  
	glMultiTexCoord2f(GL_TEXTURE0, 0.0f, 480.0f);  
	glMultiTexCoord2f(GL_TEXTURE1, 0.0f, 480.0f);  
	glVertex3f(0.0f, 480.0f, 0);  
	glMultiTexCoord2f(GL_TEXTURE0, 0.0f, 0.0f);  
	glMultiTexCoord2f(GL_TEXTURE1, 0.0f, 0.0f);  
	glVertex3f(0.0f, 0.0f, 0);  
	glMultiTexCoord2f(GL_TEXTURE0, 640.0f, 0.0f);  
	glMultiTexCoord2f(GL_TEXTURE1, 640.0f, 0.0f);  
	glVertex3f(640.0f, 0.0f, 0);  
	glMultiTexCoord2f(GL_TEXTURE0, 640.0f, 480.0f);  
	glMultiTexCoord2f(GL_TEXTURE1, 640.0f, 480.0f);  
	glVertex3f(640.0f, 480.0f, 0);  
	glEnd();  
	  
    // properly unbind the textures  
    mask.getTextureReference().unbind();  
	glActiveTexture(GL_TEXTURE0);  
	masked.getTextureReference().unbind();  
	  
	// disable alpha blending again  
	ofDisableAlphaBlending();  
	}  
  
	else {  
	boarMovie.draw(0,0);  
	}  
		  
    // draw frames per second  
	ofSetColor(0xFFFFFF);  
	//ofDrawBitmapString( ofToString( ofGetFrameRate(), 0 ) + " FPS", 10, 15);	  
}  
  

and my testApp.h:

  
#ifndef _TEST_APP  
#define _TEST_APP  
  
#pragma once    
  
#include "ofMain.h"  
#include "ofxCvHaarFinder.h"    
#include "ofxOpenCv.h"    
  
class testApp : public ofBaseApp{  
	  
public:  
	  
	void setup();  
	void update();  
	void draw();  
	  
	void keyPressed(int key);  
	void keyReleased(int key);  
	void mouseMoved(int x, int y );  
	void mouseDragged(int x, int y, int button);  
	void mousePressed(int x, int y, int button);  
	void mouseReleased(int x, int y, int button);  
	void resized(int w, int h);  
	  
	ofImage background;  
	ofImage mask;  
	ofImage masked;  
	  
	ofVideoGrabber webcam;  
	ofVideoPlayer  boarMovie;	  
	  
	ofxCvHaarFinder finder;  
	ofxCvColorImage colorImg;  
	  
	ofTexture			videoTexture;  
        
	ofxCvGrayscaleImage     grayImage;    
	ofxCvGrayscaleImage     grayBg;    
	ofxCvGrayscaleImage     grayDiff;    
            
	ofxCvContourFinder  contourFinder;    
            
	int                 threshold;    
	bool                bLearnBakground;   
};  
  
#endif  
  
//CODE FROM:  [http://forum.openframeworks.cc/t/using-grayscale-video-as-a-mask.../346/0](http://forum.openframeworks.cc/t/using-grayscale-video-as-a-mask.../346/0)  

Ok… Getting much closer. I’ve tried several combinations and now I have a masked ellipse gradient face image overlaying a live video image! I am now trying to figure out the coordinate system so the face is placed in the right spot. I’d like to use the dims of the face area, but that is being obstructed by the nature of this openGL masking code (which I don’t want to mess with too much!), so until I find the better solution, I will just set the mask and the face at a general size that should work for the installation context.

A 2-3 second fade in/out when (numMatches >=1) would be nice to figure out next. I understand how to set the alpha of the texture, but a timed fade-in verses an on/off effect still eludes me.

Here is the updated code that allows the image to turn on/off if it finds a face…

  
#include "testApp.h"  
  
  
//--------------------------------------------------------------  
void testApp::setup(){	   
	// turn off vertical sync so we can evaluate performance  
    ofSetVerticalSync(false);  
	  
    // the bind() function might complain about texture not  
    // being allocated, yet it seems to work fine. Uncomment  
    // the following line to stop the warning from displaying.  
    ofSetLogLevel(OF_LOG_ERROR);  
	  
    // load our images  
    mask.loadImage("mask5.png"); //must be the same size as the open GL dims for this to work  
	  
    // initialize webcam  
   // webcam.initGrabber(640, 480);  
	  
	boarMovie.loadMovie("video/boarShort.mp4");  
	boarMovie.play();  
//FACES  
	webcam.setVerbose(true);  
	webcam.initGrabber(320,240);  
		  
        colorImg.allocate(320,240);  
	grayImage.allocate(320,240);  
	  
	// load the correct xml file from the bin/data/haarXML/ folder  
	  
	haarFinder.setup("haarXML/haarcascade_frontalface_default.xml");  
}  
  
//--------------------------------------------------------------  
void testApp::update(){  
	  
	ofBackground(0,0,0);  
      
	bool bNewFrame = false;  
	webcam.grabFrame();  
	bNewFrame = webcam.isFrameNew();  
      
	  
	if (bNewFrame){  
		  
		colorImg.setFromPixels(webcam.getPixels(), 320,240);  
		  
        grayImage = colorImg;  
		  
		haarFinder.findHaarObjects(grayImage);  
		  
	}  
}  
  
//--------------------------------------------------------------  
void testApp::draw(){  
	  
	// how many matches did you find?  
	int numMatches = haarFinder.blobs.size();  
	  
	/*  
	// drawing the matches  
	glPushMatrix();  
	glTranslatef(20, 20, 0);  
	for(int i = 0; i < numMatches; i++){  
		float x = haarFinder.blobs[i].boundingRect.x;  
		float y = haarFinder.blobs[i].boundingRect.y;  
		float w = haarFinder.blobs[i].boundingRect.width;  
		float h = haarFinder.blobs[i].boundingRect.height;  
		  
		float cx = haarFinder.blobs[i].centroid.x;  
		float cy = haarFinder.blobs[i].centroid.y;  
		  
		ofSetColor(0x00FF00);  
		ofSetLineWidth(4);  
		ofNoFill();  
		ofEllipse(x+w/2, y+h/2, w, 4*h/3);  
		  
		ofSetColor(0xFFFFFF);  
		ofDrawBitmapString(ofToString(i), cx, cy);  
		  
	}  
	glPopMatrix();*/  
	  
if (numMatches >= 1) {  
	boarMovie.draw(0,0);  
		// make sure alpha blending is enabled  
			// make sure alpha blending is enabled  
	ofEnableAlphaBlending();  
	  
    // set up multi-texturing  
	glActiveTexture(GL_TEXTURE0); //this sets the live image as "GL_TEXTURE0"***  
	webcam.getTextureReference().bind(); // may generate an allocation warning  
    glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);  
    glTexEnvf (GL_TEXTURE_ENV, GL_COMBINE_RGB_EXT, GL_REPLACE);  
	  
	glActiveTexture(GL_TEXTURE1); //this sets the mask image as "GL_TEXTURE1"***  
	mask.getTextureReference().bind(); // may generate an allocation warning  
    glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);  
    glTexEnvf (GL_TEXTURE_ENV, GL_COMBINE_RGB_EXT, GL_BLEND);  
	  
	// render masked and mask images as one multi-textured quad  
	glBegin(GL_QUADS);  
	glMultiTexCoord2f(GL_TEXTURE0, 0.0f, 240.0f);  
	glMultiTexCoord2f(GL_TEXTURE1, 0.0f, 240.0f);  
	glVertex3f(0.0f, 240.0f, 0);  
	glMultiTexCoord2f(GL_TEXTURE0, 0.0f, 0.0f);  
	glMultiTexCoord2f(GL_TEXTURE1, 0.0f, 0.0f);  
	glVertex3f(0.0f, 0.0f, 0);  
	glMultiTexCoord2f(GL_TEXTURE0, 320.0f, 0.0f);  
	glMultiTexCoord2f(GL_TEXTURE1, 320.0f, 0.0f);  
	glVertex3f(320.0f, 0.0f, 0);  
	glMultiTexCoord2f(GL_TEXTURE0, 320.0f, 240.0f);  
	glMultiTexCoord2f(GL_TEXTURE1, 320.0f, 240.0f);  
	glVertex3f(320.0f, 240.0f, 0);  
	glEnd();  
	  
    // properly unbind the textures  
    mask.getTextureReference().unbind();  
	glActiveTexture(GL_TEXTURE0);  
	masked.getTextureReference().unbind();  
	  
	  
	// disable alpha blending again  
	ofDisableAlphaBlending();  
  
	}  
if (numMatches == 0) {  
	boarMovie.draw(0,0);  
}  
	// drawing some info  
	ofSetColor(0xffffff);  
	string reportStr= "matches found: "+ ofToString(numMatches, 0)+", framerate: "+ofToString(ofGetFrameRate(),0);  
	ofDrawBitmapString(reportStr, 20, 10);  
	  
}  
  

Used PushMatrix(), PopMatrix(), and glTranslated() to get everything lined up in place. I am very pleased with this first oF project (translated from a Jitter project complete 2 years ago, I actually find oF more intuitive). Thanks oF community and creators for all. Next project coming right up…