Hello
I’m so happy with ofx! I’m really enjoying using it.
I’m am however still new to ofx and having a small problem with live video and haarfinder:
I want to mirror (flip horizontaly) the video feed before I send it to haarFinder (btw. thanks so much for all the info and code this) but am not sure where or how I should be doing this.
Redrawing the video back mirrored does not help becuase the haarfinder is still detecting faces at the original location, which means all the graphics and text that I am locating where the face is detected is in the wrong location.
So the question is:
- Can I (should I) be reversing the capture in the setup and update at “Img.allocate” and “Img.setFromPixels(vidGrabber.getPixels(), videoWidth, videoHeight);” respectively? And if so how would I do this?
- Or – should I just reverse the x that the haarfinder finds before sending it to the graphics and text? And what would be the best way to do this?
Sorry for asking to questions but I really want to know what will be best.
Thanks so much.
I’ve added the relevant bits of code.
//--------------------------------------------------------------
void testApp::setup(){
ofBackground(0,0,0);
//font
testFont.loadFont("AlteHaasGroteskBold.ttf", 14, true, true, true);
//video
videoWidth = 320;
videoHeight = 240;
scrnRatio = 3.2;
sizeRatio = 1.28;
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(videoWidth, videoHeight);
colorImg.allocate(videoWidth, videoHeight);
grayImage.allocate(videoWidth, videoHeight);
cross.loadImage("images/cross.png");
eye.loadImage("images/eye.png");
// load the correct xml file from the bin/data/haarXML/ folder
haarFinder.setup("haarXML/haarcascade_frontalface_default.xml");
// haarFinder.setup("haarXML/haarcascade_lefteye_2splits.xml");
// haarFinder.setup("haarXML/haarcascade_eye.xml");
//haarFinder.setup("haarXML/haarcascade_profileface.xml");
//sound setup and start
ofSoundStreamSetup(0,2,this, 44100 , 256, 4);
left = new float[256];
right = new float[256];
Change = 1;
timeCount = 0;
//particle
particle_count = 0;
for(int ii = 0; ii< MAX_PARTICLE_COUNT; ii++){
particles[ii] = new Particle(0,0,1,255,255,255);
particles[ii]->setm_Active(false);
}
}
//--------------------------------------------------------------
void testApp::update(){
//video
bool bNewFrame = false;
vidGrabber.grabFrame();
bNewFrame = vidGrabber.isFrameNew();
if (bNewFrame){
colorImg.setFromPixels(vidGrabber.getPixels(), videoWidth, videoHeight);
grayImage = colorImg;
haarFinder.findHaarObjects(grayImage, 10, 10000, 5);
}
//particle
particles[particle_count]->setm_Active(true);
particle_count++;
if (particle_count >= MAX_PARTICLE_COUNT) {
particle_count = 0; // Start over
}
//Timer set and check
if (!myTimer.isGolden()) {
ofDrawBitmapString("timer going", 100, 200);
Change = 2;
timeCount ++;
if(timeCount > 300){
Change = 3;}
if(timeCount > 450){
Change = 4;
}
if(timeCount > 600){
Change = 5;
}
if(timeCount > 800){
Change = 6;
}
if(timeCount > 1000){
Change = 7;
}
//string Str = ofToString(timeCount, 0);
//ofDrawBitmapString(Str, 300, 200);
} else if (myTimer.isGolden()){
Change = 1;
timeCount=0;
}
}
//--------------------------------------------------------------
void testApp::draw(){
// draw the incoming & the grayscale
ofSetColor(0xffffff);
//colorImg.draw(20, 20, 1024, 768);
grayImage.draw(20, 20, 1024, 768);
// how many matches did you find?
int numMatches = haarFinder.blobs.size();
//use numMatches to indicate when to process audio or not.
if (numMatches > 0) {
for (int ii = 0; ii < 256; ii++) {
if (left[ii] > 0.1){
testFont.drawString("i hear you", 100, 100);
myTimer.start();
}
}
}
// drawing the matches
glPushMatrix();
glTranslatef(20, 20, 0);
for(int i = 0; i < numMatches; i++){
float x = haarFinder.blobs[i].boundingRect.x;
float y = haarFinder.blobs[i].boundingRect.y;
float w = haarFinder.blobs[i].boundingRect.width;
float h = haarFinder.blobs[i].boundingRect.height;
float cx = haarFinder.blobs[i].centroid.x;
float cy = haarFinder.blobs[i].centroid.y;