htl5 voice recording with isualization [closed]

徘徊边缘 提交于 2019-12-02 14:30:42

问题


I'm building a HTML5 software that records a voice and when playing that voice a visualizer should be in action.
Here is my code:

// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;

if (!navigator.getUserMedia) navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;

if (navigator.getUserMedia) {
    navigator.getUserMedia({
        audio: true
    }, success, function (e) {
        alert('Error capturing audio.');
    });
} else alert('getUserMedia not supported in this browser.');

// when pressing record

function getVal(value) {

    // if R is pressed, we start recording
    if (value == "record") {
        recording = true;
        // reset the buffers for the new recording
        leftchannel.length = rightchannel.length = 0;
        recordingLength = 0;
        document.getElementById('output').innerHTML = "Recording now...";

        // if S is pressed, we stop the recording and package the WAV file
    } else if (value == "stop") {

        // we stop recording
        recording = false;
        document.getElementById('output').innerHTML = "Building wav file...";

        // we flat the left and right channels down
        var leftBuffer = mergeBuffers(leftchannel, recordingLength);
        var rightBuffer = mergeBuffers(rightchannel, recordingLength);
        // we interleave both channels together
        var interleaved = interleave(leftBuffer, rightBuffer);


        var buffer = new ArrayBuffer(44 + interleaved.length * 2);
        var view = new DataView(buffer);

        // RIFF chunk descriptor
        writeUTFBytes(view, 0, 'RIFF');
        view.setUint32(4, 44 + interleaved.length * 2, true);
        writeUTFBytes(view, 8, 'WAVE');
        // FMT sub-chunk
        writeUTFBytes(view, 12, 'fmt ');
        view.setUint32(16, 16, true);
        view.setUint16(20, 1, true);
        // stereo (2 channels)
        view.setUint16(22, 2, true);
        view.setUint32(24, sampleRate, true);
        view.setUint32(28, sampleRate * 4, true);
        view.setUint16(32, 4, true);
        view.setUint16(34, 16, true);
        // data sub-chunk
        writeUTFBytes(view, 36, 'data');
        view.setUint32(40, interleaved.length * 2, true);


        var lng = interleaved.length;
        var index = 44;
        var volume = 1;
        for (var i = 0; i < lng; i++) {
            view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
            index += 2;
        }


        var blob = new Blob([view], {
            type: 'audio/wav'
        });

        // let's save it locally

        document.getElementById('output').innerHTML = 'Handing off the file now...';
        var url = (window.URL || window.webkitURL).createObjectURL(blob);

        var li = document.createElement('li');
        var au = document.createElement('audio');
        var hf = document.createElement('a');

        au.controls = true;
        au.src = url;
        hf.href = url;
        hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
        hf.innerHTML = hf.download;
        li.appendChild(au);
        li.appendChild(hf);
        recordingList.appendChild(li);

    }
}


function success(e) {

    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();


    volume = context.createGain();

    // creates an audio node from the microphone incoming stream(source)
    source = context.createMediaStreamSource(e);

    // connect the stream(source) to the gain node
    source.connect(volume);


    var bufferSize = 2048;

    recorder = context.createScriptProcessor(bufferSize, 2, 2);
    //node for the visualizer
    analyser = context.createAnalyser();
    analyser.smoothingTimeConstant = 0.3;
    analyser.fftSize = 1024;

    analyser2 = context.createAnalyser();
    analyser2.smoothingTimeConstant = 0.0;
    analyser2.fftSize = 1024;

    splitter = context.createChannelSplitter();
    //when recording happens
    recorder.onaudioprocess = function (e) {
        if (!recording) return;
        var left = e.inputBuffer.getChannelData(0);
        var right = e.inputBuffer.getChannelData(1);

        // get the average of the first channel, bincount is fftsize / 2
        var array = new Uint8Array(analyser.frequencyBinCount);
        analyser.getByteFrequencyData(array);
        var average = getAverageVolume(array);

        // get the average for the second channel
        var array2 = new Uint8Array(analyser2.frequencyBinCount);
        analyser2.getByteFrequencyData(array2);
        var average2 = getAverageVolume(array2);
        // clear the current state
        ctx.clearRect(0, 0, 60, 130);

        // set the fill style
        ctx.fillStyle = gradient;

        // create the meters
        ctx.fillRect(0, 130 - average, 25, 130);
        ctx.fillRect(30, 130 - average2, 25, 130);
    }

    function getAverageVolume(array) {
        var values = 0;
        var average;

        var length = array.length;

        // get all the frequency amplitudes
        for (var i = 0; i < length; i++) {
            values += array[i];
        }

        average = values / length;
        return average;
    }

    leftchannel.push(new Float32Array(left));
    rightchannel.push(new Float32Array(right));
    recordingLength += bufferSize;


}

// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);

function mergeBuffers(channelBuffer, recordingLength) {
    var result = new Float32Array(recordingLength);
    var offset = 0;
    var lng = channelBuffer.length;
    for (var i = 0; i < lng; i++) {
        var buffer = channelBuffer[i];
        result.set(buffer, offset);
        offset += buffer.length;
    }
    return result;
}

function interleave(leftChannel, rightChannel) {
    var length = leftChannel.length + rightChannel.length;
    var result = new Float32Array(length);

    var inputIndex = 0;

    for (var index = 0; index < length;) {
        result[index++] = leftChannel[inputIndex];
        result[index++] = rightChannel[inputIndex];
        inputIndex++;
    }
    return result;
}


function writeUTFBytes(view, offset, string) {
    var lng = string.length;
    for (var i = 0; i < lng; i++) {
        view.setUint8(offset + i, string.charCodeAt(i));
    }
}

My problem is that when running it's giving me an error :

cannot read property 'connect' of null in this statement:  volume.connect(splitter);

What is going wrong??


回答1:


The creation of the volume gain node is done only after the success of getUserMedia, in the success function.

By the time the code encounter the volume connect command, volume is not yet allocated.

You have to 'chain' all your node connection starting from success.

Quick fix : just put those lines :

// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
splitter.connect(analyser2, 1, 0);
analyser.connect(recorder);
recorder.connect(context.destination);

at the end of the success function.



来源:https://stackoverflow.com/questions/24606998/htl5-voice-recording-with-isualization

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!