问题
I have an audio buffer rendered using webkitOfflineAudioContext
. Now, I wish to export it into a WAV file. How do I do it? I tried using recorder.js but couldn't figure out how to use it. Here's my code: http://jsfiddle.net/GBQV8/.
回答1:
Here's a gist that should help: https://gist.github.com/kevincennis/9754325.
I haven't actually tested this, so there might be a stupid typo or something, but the basic approach will work (I've done it before).
Essentially, you're going to use the web worker from Recorder.js directly so that you can process one big AudioBuffer all in one shot, rather than recording it incrementally in real-time.
I'll paste the code here too, just in case something happens to the gist...
// assuming a var named `buffer` exists and is an AudioBuffer instance
// start a new worker
// we can't use Recorder directly, since it doesn't support what we're trying to do
var worker = new Worker('recorderWorker.js');
// initialize the new worker
worker.postMessage({
command: 'init',
config: {sampleRate: 44100}
});
// callback for `exportWAV`
worker.onmessage = function( e ) {
var blob = e.data;
// this is would be your WAV blob
};
// send the channel data from our buffer to the worker
worker.postMessage({
command: 'record',
buffer: [
buffer.getChannelData(0),
buffer.getChannelData(1)
]
});
// ask the worker for a WAV
worker.postMessage({
command: 'exportWAV',
type: 'audio/wav'
});
回答2:
I figured I'd share a working solution that I managed to put together from the Kevin's answer.
Here's the waveWorker.js
script:
self.onmessage = function( e ){
var wavPCM = new WavePCM( e['data']['config'] );
wavPCM.record( e['data']['pcmArrays'] );
wavPCM.requestData();
};
var WavePCM = function( config ){
this.sampleRate = config['sampleRate'] || 48000;
this.bitDepth = config['bitDepth'] || 16;
this.recordedBuffers = [];
this.bytesPerSample = this.bitDepth / 8;
};
WavePCM.prototype.record = function( buffers ){
this.numberOfChannels = this.numberOfChannels || buffers.length;
var bufferLength = buffers[0].length;
var reducedData = new Uint8Array( bufferLength * this.numberOfChannels * this.bytesPerSample );
// Interleave
for ( var i = 0; i < bufferLength; i++ ) {
for ( var channel = 0; channel < this.numberOfChannels; channel++ ) {
var outputIndex = ( i * this.numberOfChannels + channel ) * this.bytesPerSample;
var sample = buffers[ channel ][ i ];
// Check for clipping
if ( sample > 1 ) {
sample = 1;
}
else if ( sample < -1 ) {
sample = -1;
}
// bit reduce and convert to uInt
switch ( this.bytesPerSample ) {
case 4:
sample = sample * 2147483648;
reducedData[ outputIndex ] = sample;
reducedData[ outputIndex + 1 ] = sample >> 8;
reducedData[ outputIndex + 2 ] = sample >> 16;
reducedData[ outputIndex + 3 ] = sample >> 24;
break;
case 3:
sample = sample * 8388608;
reducedData[ outputIndex ] = sample;
reducedData[ outputIndex + 1 ] = sample >> 8;
reducedData[ outputIndex + 2 ] = sample >> 16;
break;
case 2:
sample = sample * 32768;
reducedData[ outputIndex ] = sample;
reducedData[ outputIndex + 1 ] = sample >> 8;
break;
case 1:
reducedData[ outputIndex ] = ( sample + 1 ) * 128;
break;
default:
throw "Only 8, 16, 24 and 32 bits per sample are supported";
}
}
}
this.recordedBuffers.push( reducedData );
};
WavePCM.prototype.requestData = function(){
var bufferLength = this.recordedBuffers[0].length;
var dataLength = this.recordedBuffers.length * bufferLength;
var headerLength = 44;
var wav = new Uint8Array( headerLength + dataLength );
var view = new DataView( wav.buffer );
view.setUint32( 0, 1380533830, false ); // RIFF identifier 'RIFF'
view.setUint32( 4, 36 + dataLength, true ); // file length minus RIFF identifier length and file description length
view.setUint32( 8, 1463899717, false ); // RIFF type 'WAVE'
view.setUint32( 12, 1718449184, false ); // format chunk identifier 'fmt '
view.setUint32( 16, 16, true ); // format chunk length
view.setUint16( 20, 1, true ); // sample format (raw)
view.setUint16( 22, this.numberOfChannels, true ); // channel count
view.setUint32( 24, this.sampleRate, true ); // sample rate
view.setUint32( 28, this.sampleRate * this.bytesPerSample * this.numberOfChannels, true ); // byte rate (sample rate * block align)
view.setUint16( 32, this.bytesPerSample * this.numberOfChannels, true ); // block align (channel count * bytes per sample)
view.setUint16( 34, this.bitDepth, true ); // bits per sample
view.setUint32( 36, 1684108385, false); // data chunk identifier 'data'
view.setUint32( 40, dataLength, true ); // data chunk length
for (var i = 0; i < this.recordedBuffers.length; i++ ) {
wav.set( this.recordedBuffers[i], i * bufferLength + headerLength );
}
self.postMessage( wav, [wav.buffer] );
self.close();
};
And here's how you can use it:
async function audioBufferToWaveBlob(audioBuffer) {
return new Promise(function(resolve, reject) {
var worker = new Worker('./waveWorker.js');
worker.onmessage = function( e ) {
var blob = new Blob([e.data.buffer], {type:"audio/wav"});
resolve(blob);
};
let pcmArrays = [];
for(let i = 0; i < audioBuffer.numberOfChannels; i++) {
pcmArrays.push(audioBuffer.getChannelData(i));
}
worker.postMessage({
pcmArrays,
config: {sampleRate: audioBuffer.sampleRate}
});
});
}
It's pretty quickly hacked together so feel free (of course) to fix it up and post a link to a better version in the comments :)
回答3:
When using recorder.js, make sure you start with recording a piece of audio, and then stop it. After you stopped the recorder you can call the .exportWAV
function. The callback contains a blob in wav format. Instead of recording the buffer yourself, you'd better use recorder.js's buffer creation, because if you call exportWAV, it will export the buffer it previously saved. It created the buffer from the source object you entered when creating a new recorder.
var rec = new Recorder(yourSourceObject);
rec.record();
//let it record
rec.stop();
rec.exportWAV(function(blob){
//the generated blob contains the wav file
}])
You can also check out the source code of recorderWorker.js and find out how to convert a buffer to a wav file yourself.
回答4:
For no real time processing, take a look at OfflineAudioContext.
That might be useful to process audio data as if it was a regular AudioContext, but not in real time. If your data does not com from the microphone, you probably want to want to process it as fast as possible. Then, you'll need OfflineAudioContext to create a buffer before encoding it to wav
来源:https://stackoverflow.com/questions/22560413/html5-web-audio-convert-audio-buffer-into-wav-file