Faster way to map ofShortPixels to ofPixels? (gpu?)

I’m working with a raw depth image that comes in unsigned short (16bit) values, representing depth in mm. Now I want to map a range of these raw depth values to 8bit ofPixels for drawing.

(side note: this is for a [new addon] ( for the Intel RealSense sr300 camera I’m writing)

I’ve tried a lookup table (based on this code) but it’s really slow. The depth image I’m working with is registered to the dimensions of the camera’s HD color stream, so it’s 1920x1080. I get 5 fps with this CPU-based LUT.

Is it possible/practical to load the ofShortPixels into an ofTexture and process the LUT on the GPU? Perhaps using something like this code?

The issue I’m afraid of is how to deal with a uint16 texture, and uint16 values in GLSL…

Is there any good way to convert/normalize the uint16 texture to a float texture (i.e. 0 - 65,535 -> 0 - 1), perhaps as I’m loading the ofShortPixels data to the ofTexture?

This stack overflow post is what has me worried about dealing with unsigned ints in the shader, since I’m a bit pressed for time on this project, so don’t want to get in too over my head right now.

using a shader sounds like the best way to do this and by default in the shader you’ll see the texture value as floats but with 16bit precission if you use a tipical sampler2D or samplerRect.


Thanks Arturo!
Yeah, after some more research, I found out about OpenGL normalized integers and how they correspond to color formats, so now it all makes sense.
e.g. the ofTexture loaded with the raw depth pixels reports a GL_LUMINANCE16 glInternalFormat, so it uses normalized integers as there’s no “UI” suffix after “16”

Gonna test out the gpu LUT and will post the relevant code if (when…) I get it working for future reference.

1 Like

I got it working using a 1D texture for the lookup table.

Here’s the app:


#pragma once

#include "ofMain.h"
#include "ofxRealSense.h"

class ofApp : public ofBaseApp{


		void setup();
		void update();
		void draw();
		void exit();

		void generateGpuLUT(unsigned short nearMM, unsigned short farMM);
		void applyLUT(const ofTexture& tex); // uses shader, draws into mapFbo

		ofxRealSense realsense;

		ofTexture depthTex;
		ofFbo depthFbo;

		// Shader
		ofShader lutFilter;
		ofPlanePrimitive plane;

		// LUT
		GLuint texture1D;
		int LUTsize = 4096;
		struct Rgb { 
			float r, g, b; 
			Rgb(float v) : r(v), g(v), b(v) {}


#include "ofApp.h"

void ofApp::setup() {

	ofDisableArbTex(); // normalize tex coords

	generateGpuLUT(150, 1000); // 15 cm - 1 meter range, 1D lookup texture

	// Load the shader

	realsense.setup(true, true, true, false, false);;


void ofApp::update() {


	auto& depthPix = realsense.getDepthRawPixelsInColorFrame();

	if (depthPix.isAllocated()) {


void ofApp::draw() {

	if (depthTex.isAllocated()) {
		applyLUT(depthTex); // draws into depthFbo
		depthFbo.draw(0, 0, 1280, 720);

	// fps
	ofDrawBitmapStringHighlight("fps: " + ofToString(ofGetFrameRate()), 10, 20);

void ofApp::exit() {

void ofApp::applyLUT(const ofTexture& tex) {

	if (!depthFbo.isAllocated()) { // allocated fbo on first call

		float w = depthTex.getWidth();
		float h = depthTex.getHeight();
		depthFbo.allocate(w, h, GL_RGB);

		// position plane for drawing
		plane.set(w, h, 2, 2); // screen size
		plane.setPosition(w * 0.5, h * 0.5, 0); // center on screen



	lutFilter.setUniformTexture("tex", depthTex, 0);
	lutFilter.setUniformTexture("lutTexure", GL_TEXTURE_1D, texture1D, 1);
	lutFilter.setUniform1f("lutSize", LUTsize);




void ofApp::generateGpuLUT(unsigned short nearMM, unsigned short farMM) { // far -> black, near -> white

	// safety range check
	if (farMM < 255) farMM = 255;
	if (nearMM > farMM - 255) nearMM = farMM - 255;

	// LUT and 1D texture code based on 3D LUT here:

	// create LUT
	vector<Rgb> lut;
	float maxDepth = 65535.f; // max unsigned short

	lut.push_back(Rgb(0.)); // 0 mm -> black
	for (int i = 1; i < LUTsize; i++) {
		float inputVal = ofMap(i, 0, LUTsize - 1, 0, maxDepth); // the depth in mm for this pixel index in the table
		float outputVal = ofMap(inputVal, nearMM, farMM, 1.f, 0.f, true); // the luminance that depth should produce (0-1, clamped)

	// create 1D texture with LUT data


	glGenTextures(1, &texture1D);
	glBindTexture(GL_TEXTURE_1D, texture1D);



	glTexImage1D(GL_TEXTURE_1D, 0, GL_RGB, LUTsize, 0, GL_RGB, GL_FLOAT, &lut[0]);
	glBindTexture(GL_TEXTURE_1D, 0);


and the shader files:


#version 120

varying vec2 texCoordVarying;

void main() {
    texCoordVarying = gl_MultiTexCoord0.xy;
    gl_Position = ftransform();


#version 120

uniform sampler2D tex;
uniform sampler1D lutTexure;

varying vec2 texCoordVarying;

uniform float lutSize;

void main() {
    // Based on "GPU Gems 2 — Chapter 24. Using Lookup Tables to Accelerate Color Transformations"
    // More info and credits @

    float rawLum = texture2D(tex, texCoordVarying).r;

    // Compute the 1D LUT lookup scale/offset factor
    float scale = (lutSize - 1.0) / lutSize;
    float offset = 1.0 / (2.0 * lutSize);

    // ****** Apply 1D LUT color transform! **************
    // This is our dependent texture read; The 1D texture's
    // lookup coordinate is dependent on the
    // previous texture read's result

    vec3 color = texture1D(lutTexure, rawLum * scale + offset).rgb;

    //color = mix(rawColor, color, vec3(mouse.x));
    gl_FragColor = vec4(color, 1.0);
1 Like