resample audio buffer from 44100 to 16000

末鹿安然 提交于 2019-12-20 10:56:24

问题


I have audio data in format of data-uri, then I converted this data-uri into a buffer now I need this buffer data in new samplerate, currently audio data is in 44.1khz and I need data in 16khz, and If I recorded the audio using RecordRTC API and if I record audio in low sample rate then I got distorted audio voice, So I am not getting how to resample my audio buffer,

If any of you any idea regarding this then please help me out.

Thanks in advance :)


回答1:


You can use an OfflineAudioContext to do the resampling, but you need to convert your data-uri to an ArrayBuffer first. This solution works in the browser, not on the server, as it's better to send lower quality audio (lower sample rate) on the network, than send a lot of data and resample on the server.

// `source` is an AudioBuffer instance of the source audio
// at the original sample rate.

var TARGET_SAMPLE_RATE = 16000;

var offlineCtx = new OfflineAudioContext(source.numberOfChannels, source.duration * source.numberOfChannels * TARGET_SAMPLE_RATE, TARGET_SAMPLE_RATE);

var buffer = offlineCtx.createBuffer(source.numberOfChannels, buffer.length, buffer.sampleRate);

// Copy the source data into the offline AudioBuffer
for (var channel = 0; channel < buffer.numberOfChannels; channel++) {
    resampledBuffer.copyToChannel(buffer.getChannelData(channel), channel);
}

// Play it from the beginning.
var source = offlineCtx.createBufferSource();
source.buffer = buffer;
source.connect(offlineCtx.destination);
source.start(0);
offlineCtx.oncomplete = function(resampled) {
    // `resampled` contains an AudioBuffer resampled at 16000Hz.
    // use resampled.getChannelData(x) to get an Float32Array for channel x.
}

o.startRendering();



回答2:


No answers are correct. Here is the perfect code.

// `sourceAudioBuffer` is an AudioBuffer instance of the source audio
// at the original sample rate.
const DESIRED_SAMPLE_RATE = 16000;
const offlineCtx = new OfflineAudioContext(sourceAudioBuffer.numberOfChannels, sourceAudioBuffer.duration * DESIRED_SAMPLE_RATE, DESIRED_SAMPLE_RATE);
const cloneBuffer = offlineCtx.createBuffer(sourceAudioBuffer.numberOfChannels, sourceAudioBuffer.length, sourceAudioBuffer.sampleRate);
// Copy the source data into the offline AudioBuffer
for (let channel = 0; channel < sourceAudioBuffer.numberOfChannels; channel++) {
    cloneBuffer.copyToChannel(sourceAudioBuffer.getChannelData(channel), channel);
}
// Play it from the beginning.
const source = offlineCtx.createBufferSource();
source.buffer = cloneBuffer;
source.connect(offlineCtx.destination);
offlineCtx.oncomplete = function(e) {
  // `resampledAudioBuffer` contains an AudioBuffer resampled at 16000Hz.
  // use resampled.getChannelData(x) to get an Float32Array for channel x.
  const resampledAudioBuffer = e.renderedBuffer;
}
offlineCtx.startRendering();
source.start(0);



回答3:


This is just a copy of the answer from padenot, which I updated to avoid confusion for others who may find this post and have problems with missing variable definitions or how to get the final resampled float32array. This works for me in firefox quantum 64.0:

  var sourceAudioBuffer = e.inputBuffer;  // directly received by the audioprocess event from the microphone in the browser

  var TARGET_SAMPLE_RATE = 8000;
  var offlineCtx = new OfflineAudioContext(sourceAudioBuffer.numberOfChannels, sourceAudioBuffer.duration * sourceAudioBuffer.numberOfChannels * TARGET_SAMPLE_RATE, TARGET_SAMPLE_RATE);
  var buffer = offlineCtx.createBuffer(sourceAudioBuffer.numberOfChannels, sourceAudioBuffer.length, sourceAudioBuffer.sampleRate);
  // Copy the source data into the offline AudioBuffer
  for (var channel = 0; channel < sourceAudioBuffer.numberOfChannels; channel++) {
      buffer.copyToChannel(sourceAudioBuffer.getChannelData(channel), channel);
  }
  // Play it from the beginning.
  var source = offlineCtx.createBufferSource();
  source.buffer = sourceAudioBuffer;
  source.connect(offlineCtx.destination);
  source.start(0);
  offlineCtx.oncomplete = function(e) {
    // `resampled` contains an AudioBuffer resampled at 16000Hz.
    // use resampled.getChannelData(x) to get an Float32Array for channel x.
    var resampled = e.renderedBuffer;
    var leftFloat32Array = resampled.getChannelData(0);
    // use this float32array to send the samples to the server or whatever
  }
  offlineCtx.startRendering();

In my case, the raw resampled 8000 pcm data is piped into ffmpeg via udp broadcasts like this

ffmpeg -fflags nobuffer -analyzeduration 1M -f f32le -ar 8000 -ac 1 -i udp://127.0.0.1:12000 -ar 44100 -ac 2 -f alsa hw:0

So a websocket server just receives the base64 encoded pcm data, decodes the base64 string and just broadcasts via udp. The result is played back by ffmpeg on the speaker.




回答4:


An easier way is to have a standalone call to resample, which simply takes an input audio buffer, an input sample rate, an output sample rate, and returns the output buffer. I found this link to do this: audio resampling

This works quite well (not much noise introduced at audio frequency ranges..). Thanks to the author.



来源:https://stackoverflow.com/questions/27598270/resample-audio-buffer-from-44100-to-16000

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!