Hello!
My goal is to have a video image playing. Then, read a life webcam ready to recognize a face. If 1 or more faces is present, fade in and composite a cropped/feathered/masked video of the face over the pre-playing video image. If 0 faces are present, fade out/remove live face overlay video.
Thanks to Paul Houx’s code here: http://forum.openframeworks.cc/t/using-grayscale-video-as-a-mask…/346/0 I have been able to mask a live video cam image and place it over my video.
Now, using my still developing cobbling skills, I am trying to get the live cam to fade in and out when a face is detected. At this point, I am working on a swap and then going for the fade.
What is it about this opengl mask code that doesn’t recognize my “if (numMatches >=1)”?
I appreciate any help on my project and am glad to offer more information!
Here is my testApp.cpp:
#include "testApp.h"
// Sample code for masking an image using another image's alpha channel
// Author: Paul Houx (droozle), Amsterdam, The Netherlands
//--------------------------------------------------------------
void testApp::setup(){
int width = 620;
int height = 480;
// turn off vertical sync so we can evaluate performance
ofSetVerticalSync(false);
// the bind() function might complain about texture not
// being allocated, yet it seems to work fine. Uncomment
// the following line to stop the warning from displaying.
ofSetLogLevel(OF_LOG_ERROR);
// load our images
background.loadImage("background.png");
mask.loadImage("mask3.png");
// initialize webcam
webcam.initGrabber(width, height);
boarMovie.loadMovie("video/boarShort.mp4");
boarMovie.play();
//FACE FINDER BELOW
webcam.setVerbose(true);
//vidGrabber.initGrabber(320,240);
videoTexture.allocate(width,height, GL_RGB);
colorImg.allocate(width,height);
grayImage.allocate(width,height);
//grayBg.allocate(320,240);
//grayDiff.allocate(320,240);
finder.setup("haarXML/haarcascade_frontalface_default.xml");
}
//--------------------------------------------------------------
void testApp::update(){
int width = 620;
int height = 480;
// update the webcam image
webcam.update();
bool bNewFrame = false;
webcam.grabFrame();
bNewFrame = webcam.isFrameNew();
if (bNewFrame){
colorImg.setFromPixels(webcam.getPixels(), width, height);
grayImage = colorImg;
finder.findHaarObjects(grayImage);
}
}
//--------------------------------------------------------------
void testApp::draw(){
// draw the background
int offsetX = 500;
int offsetY = 100;
//colorImg.draw(20,20);
grayImage.draw(360,20);
background.draw(0, 0);
//boarMovie.draw(0,0);
// how many matches did you find?
int numMatches = finder.blobs.size();
// drawing the matches
glTranslatef(20, 20, 0);
for(int i = 0; i < numMatches && numMatches != 0; i++){
float x = finder.blobs[i].boundingRect.x;
float y = finder.blobs[i].boundingRect.y;
float w = finder.blobs[i].boundingRect.width;
float h = finder.blobs[i].boundingRect.height;
float cx = finder.blobs[i].centroid.x;
float cy = finder.blobs[i].centroid.y;
ofSetColor(0x00FF00);
ofSetLineWidth(8);
ofNoFill();
ofEllipse(x+w/2, y+h/2, w, 4*h/3);
ofSetColor(0xffffff);
string reportStr= "matches found: "+ ofToString(numMatches, 0)+", framerate: "+ofToString(ofGetFrameRate(),0);
ofDrawBitmapString(reportStr, 20, 10);
}
if (numMatches >= 1){
// make sure alpha blending is enabled
ofEnableAlphaBlending();
// set up multi-texturing
glActiveTexture(GL_TEXTURE0);
webcam.getTextureReference().bind(); // may generate an allocation warning
glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);
glTexEnvf (GL_TEXTURE_ENV, GL_COMBINE_RGB_EXT, GL_REPLACE);
glActiveTexture(GL_TEXTURE1);
mask.getTextureReference().bind(); // may generate an allocation warning
glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);
glTexEnvf (GL_TEXTURE_ENV, GL_COMBINE_RGB_EXT, GL_BLEND);
// render masked and mask images as one multi-textured quad
glBegin(GL_QUADS);
glMultiTexCoord2f(GL_TEXTURE0, 0.0f, 480.0f);
glMultiTexCoord2f(GL_TEXTURE1, 0.0f, 480.0f);
glVertex3f(0.0f, 480.0f, 0);
glMultiTexCoord2f(GL_TEXTURE0, 0.0f, 0.0f);
glMultiTexCoord2f(GL_TEXTURE1, 0.0f, 0.0f);
glVertex3f(0.0f, 0.0f, 0);
glMultiTexCoord2f(GL_TEXTURE0, 640.0f, 0.0f);
glMultiTexCoord2f(GL_TEXTURE1, 640.0f, 0.0f);
glVertex3f(640.0f, 0.0f, 0);
glMultiTexCoord2f(GL_TEXTURE0, 640.0f, 480.0f);
glMultiTexCoord2f(GL_TEXTURE1, 640.0f, 480.0f);
glVertex3f(640.0f, 480.0f, 0);
glEnd();
// properly unbind the textures
mask.getTextureReference().unbind();
glActiveTexture(GL_TEXTURE0);
masked.getTextureReference().unbind();
// disable alpha blending again
ofDisableAlphaBlending();
}
else {
boarMovie.draw(0,0);
}
// draw frames per second
ofSetColor(0xFFFFFF);
//ofDrawBitmapString( ofToString( ofGetFrameRate(), 0 ) + " FPS", 10, 15);
}
and my testApp.h:
#ifndef _TEST_APP
#define _TEST_APP
#pragma once
#include "ofMain.h"
#include "ofxCvHaarFinder.h"
#include "ofxOpenCv.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void resized(int w, int h);
ofImage background;
ofImage mask;
ofImage masked;
ofVideoGrabber webcam;
ofVideoPlayer boarMovie;
ofxCvHaarFinder finder;
ofxCvColorImage colorImg;
ofTexture videoTexture;
ofxCvGrayscaleImage grayImage;
ofxCvGrayscaleImage grayBg;
ofxCvGrayscaleImage grayDiff;
ofxCvContourFinder contourFinder;
int threshold;
bool bLearnBakground;
};
#endif
//CODE FROM: [http://forum.openframeworks.cc/t/using-grayscale-video-as-a-mask.../346/0](http://forum.openframeworks.cc/t/using-grayscale-video-as-a-mask.../346/0)