Thats what I changed exactly:
In library_html5audio.js i changed html5audio_stream_create to:
html5audio_stream_create: function(context_id, bufferSize, inputChannels, outputChannels, inbuffer, outbuffer, callback, userData, pthreadPtr){
out("Buffer size: " + bufferSize);
// Initializes the audio context and the pthread it it's AudioWorkletGlobalScope
// Create the context
Module.audioCtx = new AudioContext({sampleRate: 88200});
// Initialize the pthread shared by all AudioWorkletNodes in this context
PThread.initAudioWorkletPThread(Module.audioCtx, pthreadPtr).then(function() {
out("Audio worklet PThread context initialized!")
}, function(err) {
out("Audio worklet PThread context initialization failed: " + [err, err.stack]);
});
// Creates an AudioWorkletNode and connects it to the output once it's created
PThread.createAudioWorkletNode(
Module.audioCtx,
'native-passthrough-processor',
{
numberOfInputs: 0,
numberOfOutputs : 1,
outputChannelCount : [2],
processorOptions: {
inputChannels : inputChannels,
outputChannels : outputChannels,
inbuffer : inbuffer,
outbuffer : outbuffer,
bufferSize : bufferSize,
callback : callback,
userData : userData
}
}
).then(function(workletNode) {
// Connect the worklet to the audio context output
out("Audio worklet node created! Tap/click on the window if you don't hear audio!");
workletNode.connect(Module.audioCtx.destination);
}, function(err) {
console.log("Audio worklet node creation failed: " + [err, err.stack]);
});
// To make this example usable we setup a resume on user interaction as browsers
// all require the user to interact with the page before letting audio play
if (window && window.addEventListener) {
var opts = { capture: true, passive : true };
window.addEventListener("touchstart", function() { Module.audioCtx.resume() }, opts);
window.addEventListener("mousedown", function() { Module.audioCtx.resume() }, opts);
window.addEventListener("keydown", function() { Module.audioCtx.resume() }, opts);
}
},
I made this change in ofxEmscriptenSoundStream.cpp:
bool ofxEmscriptenSoundStream::setup(const ofSoundStreamSettings & settings) {
inbuffer.allocate(settings.bufferSize, settings.numInputChannels);
outbuffer.allocate(settings.bufferSize, settings.numOutputChannels);
static pthread_t pthreadPtr = 0;
this->settings = settings;
stream = html5audio_stream_create(context,settings.bufferSize,settings.numInputChannels,settings.numOutputChannels,inbuffer.getBuffer().data(),outbuffer.getBuffer().data(),&audio_cb, this, pthreadPtr);
return true;
}
And in ofxEmscriptenSoundSTream.h:
#include <pthread.h>
And this is the AudioWorklet:
/**
* This is the JS side of the AudioWorklet processing that creates our
* AudioWorkletProcessor that fetches the audio data from native code and
* copies it into the output buffers.
*
* This is intentionally not made part of Emscripten AudioWorklet integration
* because apps will usually want to a lot of control here (formats, channels,
* additional processors etc.)
*/
// Register our audio processors if the code loads in an AudioWorkletGlobalScope
if (typeof AudioWorkletGlobalScope === "function") {
// This processor node is a simple proxy to the audio generator in native code.
// It calls the native function then copies the samples into the output buffer
var counter = 0;
var inputChannels = 0;
var outputChannels = 0;
var inbuffer = 0;
var outbuffer = 0;
var bufferSize = 0;
var callback = 0;
var userData = 0;
class NativePassthroughProcessor extends AudioWorkletProcessor {
constructor (options) {
super()
inputChannels = options.processorOptions.inputChannels;
outputChannels = options.processorOptions.outputChannels;
inbuffer = options.processorOptions.inbuffer;
outbuffer = options.processorOptions.outbuffer;
bufferSize = options.processorOptions.bufferSize;
callback = options.processorOptions.callback;
userData = options.processorOptions.userData;
}
process(inputs, outputs, parameters) {
counter = currentFrame / 128 % (bufferSize / 64);
if (counter == 0) {
dynCall('viiii',callback, [bufferSize,inputChannels,outputChannels,userData]);
}
const output = outputs[0];
for (let channel = 0; channel < 2; ++channel) {
const outputChannel = output[channel];
outputChannel.set(Module.HEAPF32.subarray(outbuffer>>2,(outbuffer>>2)+bufferSize*outputChannels).slice(counter * 128, counter * 128 + 128));
}
return true;
}
}
// Register the processor as per the audio worklet spec
registerProcessor('native-passthrough-processor', NativePassthroughProcessor);
}
And I use this Emscripten branch: