Hey, all, I’ve gotten Lucas-Kanade optical flow working but want to try a simple block method. I found Takashi and Joel’s method on another thread, and it all compiles fine, but I get the following error:
“OpenCV Error: Sizes of input arguments do not match () in unknown function, file
src\optflowbm.cpp, line 88”
Which I cannot track down anywhere.
//
// ofxCvOpticalFlowBM.h - a OpenCV cvCalcOpticalFlowBM warpper for openFrameworks
//
// Copyright (C) 2008 Takashi Maekawa <takachin@generative.info>
// Copyright (C) 2008 Satoru Higa
// All rights reserved.
// This is free software with ABSOLUTELY NO WARRANTY.
//
// You can redistribute it and/or modify it under the terms of
// the GNU Lesser General Public License.
//
#pragma once
#include <cv.h>
#include "ofMain.h"
#include "ofxCvConstants.h"
#include "ofxCvGrayscaleImage.h"
class ofxCvOpticalFlowBM
{
public:
ofxCvOpticalFlowBM(void);
~ofxCvOpticalFlowBM(void);
void allocate(int _w, int _h);
void calc( ofxCvGrayscaleImage & pastImage,
ofxCvGrayscaleImage & currentImage,
int size
);
void setCalcStep(int _cols, int _rows);
void reset();
void draw();
//private:
public:
static const int DEFAULT_CAPTURE_WIDTH = 320;
static const int DEFAULT_CAPTURE_HEIGHT = 240;
int captureWidth;
int captureHeight;
static const int DEFAULT_CAPTURE_COLS_STEP = 4;
static const int DEFAULT_CAPTURE_ROWS_STEP = 4;
int captureColsStep;
int captureRowsStep;
int block_size;
int shift_size;
int rows,cols;
//int cw, ch;
CvSize block;
CvSize shift;
CvSize max_range;
CvMat *vel_x, *vel_y;
};
and
//
// ofxCvOpticalFlowBM.c - a OpenCV cvCalcOpticalFlowBM warpper for openFrameworks
//
// Copyright (C) 2008 Takashi Maekawa <takachin@generative.info>
// Copyright (C) 2008 Satoru Higa
// All rights reserved.
// This is free software with ABSOLUTELY NO WARRANTY.
//
// You can redistribute it and/or modify it under the terms of
// the GNU Lesser General Public License.
//
#include <stdio.h>
#include "ofxCvOpticalFlowBM.h"
ofxCvOpticalFlowBM::ofxCvOpticalFlowBM(void)
{
captureWidth = DEFAULT_CAPTURE_WIDTH;
captureHeight = DEFAULT_CAPTURE_HEIGHT;
}
ofxCvOpticalFlowBM::~ofxCvOpticalFlowBM(void)
{
// TODO : release cv matrices
//cvReleaseImage(&vel_x);
//cvReleaseImage(&vel_y);
}
void ofxCvOpticalFlowBM::allocate(int _w, int _h){
captureWidth = _w;
captureHeight = _h;
//cw = 320; ch = 240;
block_size = 10;
shift_size = 1;
rows = int(ceil(double(captureHeight) / block_size));
cols = int(ceil(double(captureWidth) / block_size));
vel_x = cvCreateMat (rows, cols, CV_32FC1);
vel_y = cvCreateMat (rows, cols, CV_32FC1);
cvSetZero(vel_x);
cvSetZero(vel_y);
block = cvSize (block_size, block_size);
shift = cvSize (shift_size, shift_size);
max_range = cvSize (10, 10);
}
void ofxCvOpticalFlowBM::setCalcStep(int _cols, int _rows){
captureColsStep = _cols;
captureRowsStep = _rows;
}
void ofxCvOpticalFlowBM::calc( ofxCvGrayscaleImage & pastImage,
ofxCvGrayscaleImage & currentImage,
int size
)
{
cvCalcOpticalFlowBM(pastImage.getCvImage(), currentImage.getCvImage(),
block, shift, max_range, 0, vel_x, vel_y);
}
void ofxCvOpticalFlowBM::draw(void){
ofEnableAlphaBlending();
ofSetHexColor(0xffffff);
ofNoFill();
// draw for debug display
int x, y, dx, dy;
for(int y = 0; y < rows; y++){
for(int x = 0; x < cols; x++){
int dx = (int) cvGetReal2D (vel_x, y, x);
int dy = (int) cvGetReal2D (vel_y, y, x);
int xx = x * block_size;
int yy = y * block_size;
ofLine(xx, yy, xx + dx, yy + dy);
}
}
ofDisableAlphaBlending();
}
#pragma once
#include "ofMain.h"
#include "ofxOpenCv.h"
//#define _USE_LIVE_VIDEO // uncomment this to use a live camera
// otherwise, we'll use a movie file
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
#ifdef _USE_LIVE_VIDEO
ofVideoGrabber vidGrabber;
#else
ofVideoPlayer vidPlayer;
#endif
ofxCvColorImage colorImg;
ofxCvGrayscaleImage grayImage;
ofxCvGrayscaleImage grayBg;
ofxCvGrayscaleImage grayPast;
int rows, cols;
int block_size;
int shift_size;
CvMat *velx, *vely;
CvSize block;
CvSize shift;
CvSize max_range;
//ofxCvContourFinder contourFinder;
//ofxCvOpticalFlowLK flow;
ofxCvOpticalFlowBM flow;
int threshold;
bool bLearnBakground;
int w,h;
};
and the .cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
#ifdef _USE_LIVE_VIDEO
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(320,240);
#else
vidPlayer.loadMovie("fingers.mov");
vidPlayer.play();
#endif
w = vidPlayer.width;
h = vidPlayer.height;
colorImg.allocate(w,h);
grayImage.allocate(w,h);
grayBg.allocate(w,h);
grayPast.allocate(w,h);
bLearnBakground = true;
threshold = 75;
flow.allocate(w,h);
}
//--------------------------------------------------------------
void testApp::update(){
ofBackground(100,100,100);
bool bNewFrame = false;
#ifdef _USE_LIVE_VIDEO
vidGrabber.grabFrame();
bNewFrame = vidGrabber.isFrameNew();
#else
vidPlayer.idleMovie();
bNewFrame = vidPlayer.isFrameNew();
#endif
if (bNewFrame){
#ifdef _USE_LIVE_VIDEO
colorImg.setFromPixels(vidGrabber.getPixels(), 320,240);
#else
colorImg.setFromPixels(vidPlayer.getPixels(), w,h);
#endif
grayImage = colorImg;
if (bLearnBakground == true){
grayBg = grayImage; // the = sign copys the pixels from grayImage into grayBg (operator overloading)
bLearnBakground = false;
}
// take the abs value of the difference between background and incoming and then threshold:
//grayDiff.absDiff(grayBg, grayImage);
grayImage.blurGaussian(6);
grayImage.blurMedian(25);
grayImage.threshold(threshold);
grayImage.blurGaussian(1);
//printf("%p %p\n", grayPast.getCvImage(), grayImage.getCvImage());
flow.calc(grayPast, grayImage,10);
grayPast = grayImage;
}
}
//--------------------------------------------------------------
void testApp::draw(){
// draw the incoming, the grayscale, the bg and the thresholded difference
ofSetHexColor(0xFFFFFF);
colorImg.draw(360,20);
grayImage.draw(360,h+40);
//grayBg.draw(20,280);
//grayDiff.draw(360,20);
//ofFill();
//ofSetHexColor(0x333333);
//ofRect(360,20,320,240);
ofSetHexColor(0xffffff);
//glPushMatrix();
//glTranslatef(360,20,0);
flow.draw();
//glPopMatrix();
//float horizontalVelocityAtXY = cvGetReal2D( flow.vel_x, y, x );
//float verticalVelocityAtXY = cvGetReal2D( flow.vel_y, y, x );
/*//get the average vector back and scale
//we scale by a lot because the number of changed pixels
//is quite small in comparison to the total number
//if you wanted to get motion but didn't care about magnitude
//you could normalize this result.
ofVec2f overall = vision.getOverallMotionFromField();
overall *= 30000.0;
//our center point - just for drawing a line from the center of the screen
ofPoint center(ofGetWidth()/2, ofGetHeight()/2);
//draw a line representing the average vector
ofSetHexColor(0xFF00FF);
ofLine(center.x, center.y, center.x + overall.x, center.y + overall.y);
//draw just the horizontal component
ofSetHexColor(0x0000FF);
ofLine(center.x, center.y, center.x + overall.x, center.y);*/
//ofSetHexColor(0xffffff);
// then draw the contours:
/*ofFill();
ofSetHexColor(0x333333);
ofRect(360,540,320,240);
ofSetHexColor(0xffffff);
// we could draw the whole contour finder
//contourFinder.draw(360,540);
// or, instead we can draw each blob individually,
// this is how to get access to them:
for (int i = 0; i < contourFinder.nBlobs; i++){
contourFinder.blobs[i].draw(360,540);
}*/
// finally, a report:
//ofSetHexColor(0xffffff);
//char reportStr[1024];
//sprintf(reportStr, "bg subtraction and blob detection\npress ' ' to capture bg\nthreshold %i (press: +/-)\nnum blobs found %i, fps: %f", threshold, contourFinder.nBlobs, ofGetFrameRate());
//ofDrawBitmapString(reportStr, 20, 600);
}
//--------------------------------------------------------------
void testApp::keyPressed(int key){
switch (key){
case ' ':
bLearnBakground = true;
break;
case '+':
threshold ++;
if (threshold > 255) threshold = 255;
break;
case '-':
threshold --;
if (threshold < 0) threshold = 0;
break;
}
}
//--------------------------------------------------------------
void testApp::keyReleased(int key){
}
//--------------------------------------------------------------
void testApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void testApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void testApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void testApp::dragEvent(ofDragInfo dragInfo){
}
The Lucas-Kanade doesn’t complain about the sizes and all works well. I can post those . and .cpp if needed.
Any idea why it thinks my 2 inputs are different sizes? Once I transfer the colorImg to grayImg it should be single channel, right?