I'm working on an audio editor based on web, in Javascript. It's not a live audio player (like a virtual dj console) where filters and effects are applied in real time. It's a web app that allows user to cut, copy, paste track segments. The track is loaded from a file. I'm using Web Audio Api, React with Wavesurfer.js library (link) and its plugins. Thank's by Regions plugin (link) and some guides (link) I can allow user to cut, copy, paste and change volume track segments.
Until now I manipulated the array buffer (Float32Array Object) and the AudioBuffer object of the track.
For example it's easy to adjust volume to a track segment/region:
function adjustGainToRegion(region, gain) {
const start = region.start;
const end = region.end;
const originalAudioBuffer = waveSurfer.getDecodedData();
var newAudioBuffer = audioContext.createBuffer(
originalAudioBuffer.numberOfChannels,
originalAudioBuffer.length,
originalAudioBuffer.sampleRate
);
for (var channel = 0; channel < originalAudioBuffer.numberOfChannels; channel++) {
var originalChanData = originalAudioBuffer.getChannelData(channel);
var newChanData = newAudioBuffer.getChannelData(channel);
var midData = originalChanData.subarray(start * originalAudioBuffer.sampleRate, end * originalAudioBuffer.sampleRate);
newChanData.set(originalChanData);
for(let i=0;i<midData.length;i++){
newChanData[i] = newChanData[i] * gain;
}
}
return newAudioBuffer;
}
But, how to manipulate the AudioBuffer object or array buffer for apply filters (highpass, lowpass, bandpass, notch) and effects (reverb, delay, echo, ect...)? Is it possible? Or is a workaround needed?
EDIT
The above function isn't correct, because it always updates the newAudioBuffer from track start (0s) and not from region start.
function adjustGainToRegion(region, gain) {
const start = region.start;
const end = region.end;
const regionDuration = Math.round(end - start);
const originalAudioBuffer = waveSurfer.getDecodedData();
var newAudioBuffer = audioContext.createBuffer(
originalAudioBuffer.numberOfChannels,
originalAudioBuffer.length,
originalAudioBuffer.sampleRate
);
for (var channel = 0; channel < originalAudioBuffer.numberOfChannels; channel++) {
var originalChanData = originalAudioBuffer.getChannelData(channel);
var newChanData = newAudioBuffer.getChannelData(channel);
var beforeData = originalChanData.subarray(0, start * originalAudioBuffer.sampleRate);
var midData = originalChanData.subarray(start * originalAudioBuffer.sampleRate, end * originalAudioBuffer.sampleRate);
var afterData = originalChanData.subarray(Math.floor(end * originalAudioBuffer.sampleRate), (originalAudioBuffer.length * originalAudioBuffer.sampleRate));
for(let i=0;i<midData.length;i++){
midData[i] = midData[i] * gain;
}
newChanData.set(beforeData);
newChanData.set(midData, (start * newAudioBuffer.sampleRate));
newChanData.set(afterData, (start + regionDuration) * newAudioBuffer.sampleRate);
}
return newAudioBuffer;
}
This can be done with an OfflineAudioContext
. You could for example rewrite your code to use an AudioBufferSourceNode
to slice the original audio data and a GainNode
to manipulate the gain.
function adjustGainToRegion(region, gain) {
const originalAudioBuffer = waveSurfer.getDecodedData();
const offlineAudioContext = new OfflineAudioContext({
length: Math.round((region.end - region.start) * originalAudioBuffer.sampleRate),
numberOfChannels: originalAudioBuffer.numberOfChannels,
sampleRate: originalAudioBuffer.sampleRate
});
const audioBufferSourceNode = new AudioBufferSourceNode(
offlineAudioContext,
{ buffer: originalAudioBuffer }
);
const gainNode = new GainNode(offlineAudioContext, { gain });
audioBufferSourceNode
.connect(gainNode)
.connect(offlineAudioContext.destination);
audioBufferSourceNode.start(0, region.start);
return offlineAudioContext.startRendering();
}
You can for example replace or combine the GainNode
with a BiquadFilterNode
or any other node provided by the Web Audio API.