Hi Ben,
See here: http://www.hangaar.net/#past-forward-technical-info
I’m planning to clean the code, add color frames (jpeg compression) & release it on github, both the sender in c# and the receiver for OF.
I’ve found that the cpu gain and system stability went up (even with the crappy code below :D) on the OF side by not using the kinect locally. And a windows pc running an almost default example was very stable (and you have features like face tracking, seated mode, etc available).
If youre intrested, this is the code used in the project described (OF receiver):
void Input::threadedFunction() {
networkStatus = "Connecting to " + ofToString(kinectServerIP) + " at port " + ofToString(kinectServerPort);
cout << "Connecting to " << kinectServerIP << " @ " << kinectServerPort << endl;
while(isThreadRunning()) {
//try to connect
tcpClientConnected = tcpClient.setup(kinectServerIP, kinectServerPort,true);
if (!tcpClientConnected) {
networkStatus = "Connection failed to " + ofToString(kinectServerIP) + " at port " + ofToString(kinectServerPort) + ". Retrying... ";
//cout << "Connection failed. Waiting to retry..." << endl;
ofSleepMillis(5000);
continue;
}
int framecntr = 0;
int seconds = 0;
//We are tcpClientConnected
while (tcpClient.isConnected()) {
int width = 320;
int size = 320*240;
char* receivePos = pixelsRcv;
int length = 1280;
int totalReceivedBytes = 0;
int receivedBytes = 0;
int time1 = ofGetElapsedTimeMillis();
int time2 = 0;
while (totalReceivedBytes < size && tcpClient.isConnected() && receivedBytes != -1) {
receivedBytes = tcpClient.receiveRawBytes(receivePos, length); //returns received bytes
if (time2 == 0) time2 = ofGetElapsedTimeMillis();
totalReceivedBytes += receivedBytes;
receivePos += receivedBytes;
}
// cout << "Wait: " << time2-time1<< ", rcv: " << ofGetElapsedTimeMillis()-time2 << endl;
if (!tcpClient.isConnected() || receivedBytes == -1) {
cout << "Connection lost!" << endl;
networkStatus = "Connection lost";
tcpClientConnected = false;
continue;
}
framecntr++;
int time = (int)(ofGetElapsedTimeMillis()/1000);
if (seconds != time) {
seconds = time;
networkStatus = "Connected to " + ofToString(kinectServerIP) + " at port " + ofToString(kinectServerPort) +". Receiving frames at " + ofToString(framecntr);
//cout << "\t Kinect server FPS: " << framecntr << endl;
framecntr= 0;
}
//lock();
{
//cout << "writepixels start" << endl;
for (int j = 0; j < 320*240;j++) {
unsigned char valx = (pixelsRcv[j]< 0? 256+pixelsRcv[j] : pixelsRcv[j]);
if (valx >= (128+64)) {
//belong to unknown player (3,4,5,6,7,8)
char val = valx - 128 - 64;
tcpClientLabelPixelsReceived[j] = 255;
tcpClientDepthPixelsReceived[j] = val * 4 ;
} else if (valx >= (128)) {
char val = valx - 128;
//belong to player 2
tcpClientLabelPixelsReceived[j] = 255;
tcpClientDepthPixelsReceived[j] = val * 4 ;
} else if (valx >= 64){
char val = valx - 64;
//belong to player 1
tcpClientDepthPixelsReceived[j] = val * 4 ;
tcpClientLabelPixelsReceived[j] = 255;
} else {
//belong to none
tcpClientDepthPixelsReceived[j] = valx * 4 ;
tcpClientLabelPixelsReceived[j] = 0;
}
}
//cout << "writepixels stop" << endl;
depthLowRes.setFromPixels(tcpClientDepthPixelsReceived,320,240);
labelLowRes.setFromPixels(tcpClientLabelPixelsReceived,320,240);
//scale & warp
depthHiResCalib.warpIntoMe(depthLowRes, srcCalibLoRes, dstCalib);
labelHiResCalib.warpIntoMe(labelLowRes, srcCalibLoRes, dstCalib);
lock();
labelHiResCalibDoubleBuffer = labelHiResCalib;
depthHiResCalibDoubleBuffer = depthHiResCalib;
unlock();
threadRcvAndReseizeTime = ofGetElapsedTimeMillis() - time2;
newFrame = true;
}
//unlock();
}
}
}
And this is the C# code on windows for sending the frames. Its an adaptation of the SkeletonBasics-WPF example.
using System.Threading;
using System.Net;
...
TcpClient client;
...
private void WindowLoaded(object sender, RoutedEventArgs e)
{
....
if (null != this.sensor)
{
//open tcp connection
client = new TcpClient();
client.Connect(IPAddress.Loopback, 1001);
.......
}
}
private void SensorDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
// Copy the pixel data from the image to a temporary array
depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);
//generate new byte array
byte[] outputPlayerAndDepth = new Byte[320 * 240];
// Get the min and max reliable depth for the current frame
int minDepth = MinMinDepth;
int maxDepth = MaxMaxDepth;
int currentFrame = 0;
for (int y = 0; y < 480; y++)
{
for (int x = 0; x < 640; x++)
{
int i = y * 640 + x;
//goto 320x240
if (x % 2 == 0 && y % 2 == 0)
{
short d = depthPixels[i].Depth;
if (d >= maxDepth - 1) d = (short)(maxDepth - 2);
if (d <= minDepth ) d =0;
byte depth = intensityTable[d];
int player = depthPixels[i].PlayerIndex;
//get the most significant bits of depth
byte value = (byte)depth;
//push them into 6 bytes
value /= 4; //from 256 values to 64 => /4
if (value >= 64)
value = 63;
//value = unchecked((byte)((value << 2) & 0xFF)); //of * 4...
//change the 2 player bits
if (player == 0) { }
else if (player == 1) { value += 64; }
else if (player == 2) { value += 128; }
else if (player >= 3) { value += 64+128; };
//add it to our output
outputPlayerAndDepth[currentFrame] = value;
currentFrame++;
}
}
}
//here we should send the whole bunch
// Get a client stream for reading and writing.
/*
using (NetworkStream stream = client.GetStream())
{
// server is ready
stream.Write(outputPlayerAndDepth, 0, outputPlayerAndDepth.Length);
}
*/
for (int i = 0; i < 320 * 240; i += 1280)
{
client.GetStream().Write(outputPlayerAndDepth, i, 1280);
}
}
}
}