ofxLibdc-FireflyMV and ofxFaceTracker

Hello,

I’m trying to combine both ofxLibdc-Firefly-USB and ofxFaceTracker “extraction” examples, but I’m getting this error while running the application:

  
  
OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cvtColor, file /Users/theo/Downloads/OpenCV-2.3.1/modules/imgproc/src/color.cpp, line 2834  
libc++abi.dylib: terminate called throwing an exception  
  

I’m using ofxLibdc because we need to use a Point Grey FireflyMV USB camera, since it is capable of capturing very good images in low light conditions (NIR frequencies). Also, I’m running Xcode 4.4, Mac OS 10.8.5, and oF v008. Below is the code I have so far.

testapp.h:

  
  
  
#pragma once  
  
#include "ofMain.h"  
#include "ofxCv.h"  
#include "ofxFaceTracker.h"  
#include "ofxLibdc.h"  
#include "ofxOpenCv.h"  
  
class testApp : public ofBaseApp {  
public:  
    void setup();  
	void update();  
	void draw();  
	  
    void keyPressed(int key);  
      
    //uncomment to use with regular camera:  
	//ofVideoGrabber cam;  
	ofxFaceTracker tracker;  
      
	ofxLibdc::Camera camera;  
	  
	ofImage curFrame;  
	  
    ofxLibdc::Grabber grabber;  
    ofxLibdc::PointGrey pointGrey;  
      
    ofxCvGrayscaleImage rawImage;  
    float brightness;  
};  
  
  

testapp.cpp:

  
  
  
#include "testApp.h"  
  
using namespace ofxCv;  
  
void testApp::setup() {  
      
	ofSetVerticalSync(true);  
    ofSetLogLevel(OF_LOG_VERBOSE);   
      
    camera.setup();  
	  
    ofSetWindowShape(camera.getWidth(), camera.getHeight());  
      
    rawImage.allocate(camera.getWidth(), camera.getHeight());  
      
    tracker.setup();  
    ofBackground(0, 0, 0);  
}  
  
void testApp::update() {  
      
	if(camera.grabVideo(curFrame)) {  
		curFrame.update();  
        tracker.update(toCv(curFrame));  
	}  
}  
  
void testApp::draw() {  
	  
    ofPolyline facePoints = tracker.getImageFeature(ofxFaceTracker::FACE_OUTLINE);       
    ofRectangle faceBox = facePoints.getBoundingBox();  
    ofPoint faceCenter = faceBox.getCenter();  
      
    ofPixels pixels;  
    curFrame.getTextureReference().readToPixels(pixels);  
    pixels.crop(faceBox.x,faceBox.y,faceBox.width,faceBox.height);  
      
    int totalPixels = pixels.getWidth()*pixels.getHeight();  
    for (int x = 0; x < pixels.getWidth(); x++){  
        for (int y = 0; y < pixels.getHeight(); y++){  
            ofPoint checkpoint = ofPoint(x+faceBox.x,y+faceBox.y);  
              
            if(facePoints.inside(checkpoint)){   
            } else {  
                ofColor b = ofColor(0);  
                pixels.setColor(x,y,b);   
            }  
        }  
     }  
    ofImage face;  
    face.setFromPixels(pixels);  
    face.draw(280,50,500,700);  
      
    //camera image test:  
      
//    if(camera.isReady()) {  
//		// Camera doesn't draw itself, curFrame does.  
//		//curFrame.draw(0, 0, ofGetWidth()/2, ofGetHeight());  
//        curFrame.draw(0, 0, ofGetWidth(), ofGetHeight());  
//	}  
}  
  
void testApp::keyPressed(int key){  
      
	switch (key){  
        case 'r':  
            tracker.reset();  
            break;  
    }  
}  
  
  

Any suggestions?

Thank you!

replying to my own question, if this comes to be handy to anyone out there

turns out the ofxFaceTracker library was the problem. it required a 3 or 4 channel image to be analysed through openCV. i wrote to kyle mcdonald, and he changed the library, so now it accepts grayscale images as well.

so now, in order to use ofxFaceTracker with ofxLibdc (for point grey cameras), i’m using:

  
  
void testApp::setup(){  
    tracker.setup();  
      
    ofBackground(0, 0, 0);  
    ofSetVerticalSync(true);  
    ofSetLogLevel(OF_LOG_VERBOSE);  
      
	camera.setup();  
      
    cout << "camera: " << camera.getLibdcCamera() << endl;  
    cout << "camera: " << camera.getImageType() << endl;  
	       
    allocate();  
  
}  
void testApp::allocate() {  
    rawImage.allocate(camera.getWidth(), camera.getHeight());  
}  
  
  
//--------------------------------------------------------------  
void testApp::update() {  
    if(camera.grabVideo(curFrame, true)) {  
        curFrame.update();  
        rawImage.setFromPixels(curFrame.getPixels(), camera.getWidth(), camera.getHeight());  
    }  
      
    tracker.update(ofxCv::toCv(rawImage));  
      
    if(tracker.getFound()) {  
         
        ofPolyline facePoints = tracker.getImageFeature(ofxFaceTracker::FACE_OUTLINE);  
        ofRectangle faceBox = facePoints.getBoundingBox();  
        ofPoint faceCenter = faceBox.getCenter();  
          
        ofPixels pixels;  
        //copy camera image to ofPixels:  
        rawImage.getTextureReference().readToPixels(pixels);  
          
        //Crop them to the bounding box of the recognized face:  
        pixels.crop(faceBox.x,faceBox.y,faceBox.width,faceBox.height);  
          
        int totalPixels = pixels.getWidth()*pixels.getHeight();  
        for (int x = 0; x < pixels.getWidth(); x++){  
            for (int y = 0; y < pixels.getHeight(); y++){  
                //make a point to check whether it is inside the face, but include the x and y of bounding box:  
                ofPoint checkpoint = ofPoint(x+faceBox.x,y+faceBox.y);  
                  
                if(facePoints.inside(checkpoint)){  
                    //if inside, do nothing  
                } else {  
                    //make current pixel black:  
                    ofColor b = ofColor(0);  
                    pixels.setColor(x,y,b);  
                }  
            }  
        }  
        //create an ofImages out of the set of pixels:  
        face.setFromPixels(pixels);  
}