i’ve attempted looking through the addon examples/documentation to create a very basic face detection example with a video stream. I am not sure what I am doing incorrectly, but I see no bounding rectangle around the detected face from the webcam feed.
code below:
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
camWidth = 320; // try to grab at this size.
camHeight = 240;
red.set(255,0,0);
//we can now get back a list of devices.
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for(int i = 0; i < devices.size(); i++){
cout << devices[i].id << ": " << devices[i].deviceName;
if( devices[i].bAvailable ){
cout << endl;
}else{
cout << " - unavailable " << endl;
}
}
vidGrabber.setDeviceID(0);
vidGrabber.setDesiredFrameRate(60);
vidGrabber.initGrabber(camWidth,camHeight);
videoInverted = new unsigned char[camWidth*camHeight*3];
videoTexture.allocate(camWidth,camHeight, GL_RGB);
ofSetVerticalSync(true);
finder.setup("haarcascade_frontalface_default.xml");
finder.findHaarObjects(grayImage);
}
//--------------------------------------------------------------
void testApp::update(){
ofBackground(100,100,100);
bool bNewFrame = false;
vidGrabber.update();
bNewFrame = vidGrabber.isFrameNew();
if (bNewFrame){
colorImg.setFromPixels(vidGrabber.getPixels(), 320,240);
colorImg.setFromPixels(vidGrabber.getPixelsRef());
grayImage = colorImg; // convert our color image to a grayscale
}
}
//--------------------------------------------------------------
void testApp::draw(){
ofSetHexColor(0xffffff);
grayImage.draw(360,20);
ofNoFill();
for(unsigned int i = 0; i < finder.blobs.size(); i++) {
ofRectangle cur = finder.blobs[i].boundingRect;
ofRect(cur.x, cur.y, cur.width, cur.height);
}
}