Create a waveform of the full track with Web Audio API

前端 未结 3 1057
我寻月下人不归
我寻月下人不归 2020-12-07 23:04

Realtime moving Waveform

I\'m currently playing with Web Audio API and made a spectrum using canvas.



        
相关标签:
3条回答
  • 2020-12-07 23:44

    Ok, so what i would do is to load the sound with an XMLHttpRequest, then decode it using webaudio, then display it 'carefully' to have the colors you are searching for.

    I just made a quick version, copy-pasting from various of my projects, it is quite working, as you might see with this picture :

    enter image description here

    The issue is that it is slow as hell. To have (more) decent speed, you'll have to do some computation to reduce the number of lines to draw on the canvas, because at 441000 Hz, you very quickly get too many lines to draw.

    // AUDIO CONTEXT
    window.AudioContext = window.AudioContext || window.webkitAudioContext ;
    
    if (!AudioContext) alert('This site cannot be run in your Browser. Try a recent Chrome or Firefox. ');
    
    var audioContext = new AudioContext();
    var currentBuffer  = null;
    
    // CANVAS
    var canvasWidth = 512,  canvasHeight = 120 ;
    var newCanvas   = createCanvas (canvasWidth, canvasHeight);
    var context     = null;
    
    window.onload = appendCanvas;
    function appendCanvas() { document.body.appendChild(newCanvas);
                              context = newCanvas.getContext('2d'); }
    
    // MUSIC LOADER + DECODE
    function loadMusic(url) {   
        var req = new XMLHttpRequest();
        req.open( "GET", url, true );
        req.responseType = "arraybuffer";    
        req.onreadystatechange = function (e) {
              if (req.readyState == 4) {
                 if(req.status == 200)
                      audioContext.decodeAudioData(req.response, 
                        function(buffer) {
                                 currentBuffer = buffer;
                                 displayBuffer(buffer);
                        }, onDecodeError);
                 else
                      alert('error during the load.Wrong url or cross origin issue');
              }
        } ;
        req.send();
    }
    
    function onDecodeError() {  alert('error while decoding your file.');  }
    
    // MUSIC DISPLAY
    function displayBuffer(buff /* is an AudioBuffer */) {
       var leftChannel = buff.getChannelData(0); // Float32Array describing left channel     
       var lineOpacity = canvasWidth / leftChannel.length  ;      
       context.save();
       context.fillStyle = '#222' ;
       context.fillRect(0,0,canvasWidth,canvasHeight );
       context.strokeStyle = '#121';
       context.globalCompositeOperation = 'lighter';
       context.translate(0,canvasHeight / 2);
       context.globalAlpha = 0.06 ; // lineOpacity ;
       for (var i=0; i<  leftChannel.length; i++) {
           // on which line do we get ?
           var x = Math.floor ( canvasWidth * i / leftChannel.length ) ;
           var y = leftChannel[i] * canvasHeight / 2 ;
           context.beginPath();
           context.moveTo( x  , 0 );
           context.lineTo( x+1, y );
           context.stroke();
       }
       context.restore();
       console.log('done');
    }
    
    function createCanvas ( w, h ) {
        var newCanvas = document.createElement('canvas');
        newCanvas.width  = w;     newCanvas.height = h;
        return newCanvas;
    };
    
    
    loadMusic('could_be_better.mp3');
    

    Edit : The issue here is that we have too much data to draw. Take a 3 minutes mp3, you'll have 3*60*44100 = about 8.000.000 line to draw. On a display that has, say, 1024 px resolution, that makes 8.000 lines per pixel...
    In the code above, the canvas is doing the 'resampling', by drawing lines with low-opacity and in 'ligther' composition mode (e.g. pixel's r,g,b will add-up).
    To speed-up things, you have to re-sample by yourself, but to get some colors, it's not just a down-sampling, you'll have to handle a set (within a performance array most probably) of 'buckets', one for each horizontal pixel (so, say 1024), and in every bucket you compute the cumulated sound pressure, the variance, min, max and then, at display time, you decide how you will render that with colors.
    For instance :
    values between 0 positiveMin are very clear. (any sample is below that point).
    values between positiveMin and positiveAverage - variance are darker,
    values between positiveAverage - variance and positiveAverage + variance are darker,
    and values between positiveAverage+variance and positiveMax lighter .
    (same for negative values) That makes 5 colors for each bucket, and it's still quite some work, for you to code and for the browser to compute.
    I don't know if the performance could get decent with this, but i fear the statistics accuracy and the color coding of the software you mention can't be reached on a browser (obviously not in real-time), and that you'll have to make some compromises.

    Edit 2 :
    I tried to get some colors out of stats but it quite failed. My guess, now, is that the guys at tracktor also change color depending on frequency.... quite some work here....

    Anyway, just for the record, the code for an average / mean variation follows.
    (variance was too low, i had to use mean variation).

    enter image description here

    // MUSIC DISPLAY
    function displayBuffer2(buff /* is an AudioBuffer */) {
       var leftChannel = buff.getChannelData(0); // Float32Array describing left channel       
       // we 'resample' with cumul, count, variance
       // Offset 0 : PositiveCumul  1: PositiveCount  2: PositiveVariance
       //        3 : NegativeCumul  4: NegativeCount  5: NegativeVariance
       // that makes 6 data per bucket
       var resampled = new Float64Array(canvasWidth * 6 );
       var i=0, j=0, buckIndex = 0;
       var min=1e3, max=-1e3;
       var thisValue=0, res=0;
       var sampleCount = leftChannel.length;
       // first pass for mean
       for (i=0; i<sampleCount; i++) {
            // in which bucket do we fall ?
            buckIndex = 0 | ( canvasWidth * i / sampleCount );
            buckIndex *= 6;
            // positive or negative ?
            thisValue = leftChannel[i];
            if (thisValue>0) {
                resampled[buckIndex    ] += thisValue;
                resampled[buckIndex + 1] +=1;               
            } else if (thisValue<0) {
                resampled[buckIndex + 3] += thisValue;
                resampled[buckIndex + 4] +=1;                           
            }
            if (thisValue<min) min=thisValue;
            if (thisValue>max) max = thisValue;
       }
       // compute mean now
       for (i=0, j=0; i<canvasWidth; i++, j+=6) {
           if (resampled[j+1] != 0) {
                 resampled[j] /= resampled[j+1]; ;
           }
           if (resampled[j+4]!= 0) {
                 resampled[j+3] /= resampled[j+4];
           }
       }
       // second pass for mean variation  ( variance is too low)
       for (i=0; i<leftChannel.length; i++) {
            // in which bucket do we fall ?
            buckIndex = 0 | (canvasWidth * i / leftChannel.length );
            buckIndex *= 6;
            // positive or negative ?
            thisValue = leftChannel[i];
            if (thisValue>0) {
                resampled[buckIndex + 2] += Math.abs( resampled[buckIndex] - thisValue );               
            } else  if (thisValue<0) {
                resampled[buckIndex + 5] += Math.abs( resampled[buckIndex + 3] - thisValue );                           
            }
       }
       // compute mean variation/variance now
       for (i=0, j=0; i<canvasWidth; i++, j+=6) {
            if (resampled[j+1]) resampled[j+2] /= resampled[j+1];
            if (resampled[j+4]) resampled[j+5] /= resampled[j+4];   
       }
       context.save();
       context.fillStyle = '#000' ;
       context.fillRect(0,0,canvasWidth,canvasHeight );
       context.translate(0.5,canvasHeight / 2);   
      context.scale(1, 200);
    
       for (var i=0; i< canvasWidth; i++) {
            j=i*6;
           // draw from positiveAvg - variance to negativeAvg - variance 
           context.strokeStyle = '#F00';
           context.beginPath();
           context.moveTo( i  , (resampled[j] - resampled[j+2] ));
           context.lineTo( i  , (resampled[j +3] + resampled[j+5] ) );
           context.stroke();
           // draw from positiveAvg - variance to positiveAvg + variance 
           context.strokeStyle = '#FFF';
           context.beginPath();
           context.moveTo( i  , (resampled[j] - resampled[j+2] ));
           context.lineTo( i  , (resampled[j] + resampled[j+2] ) );
           context.stroke();
           // draw from negativeAvg + variance to negativeAvg - variance 
           // context.strokeStyle = '#FFF';
           context.beginPath();
           context.moveTo( i  , (resampled[j+3] + resampled[j+5] ));
           context.lineTo( i  , (resampled[j+3] - resampled[j+5] ) );
           context.stroke();
       }
       context.restore();
       console.log('done 231 iyi');
    }
    
    0 讨论(0)
  • 2020-12-07 23:49

    // AUDIO CONTEXT
    window.AudioContext = (window.AudioContext || 
    window.webkitAudioContext || 
    window.mozAudioContext || 
    window.oAudioContext || 
    window.msAudioContext);
    
    if (!AudioContext) alert('This site cannot be run in your Browser. Try a recent Chrome or Firefox. ');
    
    var audioContext = new AudioContext();
    var currentBuffer  = null;
    
    // CANVAS
    var canvasWidth = window.innerWidth,  canvasHeight = 120 ;
    var newCanvas   = createCanvas (canvasWidth, canvasHeight);
    var context     = null;
    
    window.onload = appendCanvas;
    function appendCanvas() { document.body.appendChild(newCanvas);
                              context = newCanvas.getContext('2d'); }
    
    // MUSIC LOADER + DECODE
    function loadMusic(url) {   
        var req = new XMLHttpRequest();
        req.open( "GET", url, true );
        req.responseType = "arraybuffer";    
        req.onreadystatechange = function (e) {
              if (req.readyState == 4) {
                 if(req.status == 200)
                      audioContext.decodeAudioData(req.response, 
                        function(buffer) {
                                 currentBuffer = buffer;
                                 displayBuffer(buffer);
                        }, onDecodeError);
                 else
                      alert('error during the load.Wrong url or cross origin issue');
              }
        } ;
        req.send();
    }
    
    function onDecodeError() {  alert('error while decoding your file.');  }
    
    // MUSIC DISPLAY
    function displayBuffer(buff /* is an AudioBuffer */) {
      
      var drawLines = 500;
       var leftChannel = buff.getChannelData(0); // Float32Array describing left channel     
       var lineOpacity = canvasWidth / leftChannel.length  ;      
       context.save();
       context.fillStyle = '#080808' ;
       context.fillRect(0,0,canvasWidth,canvasHeight );
       context.strokeStyle = '#46a0ba';
       context.globalCompositeOperation = 'lighter';
       context.translate(0,canvasHeight / 2);
       //context.globalAlpha = 0.6 ; // lineOpacity ;
       context.lineWidth=1;
       var totallength = leftChannel.length;
       var eachBlock = Math.floor(totallength / drawLines);
       var lineGap = (canvasWidth/drawLines);
    
      context.beginPath();
       for(var i=0;i<=drawLines;i++){
          var audioBuffKey = Math.floor(eachBlock * i);
           var x = i*lineGap;
           var y = leftChannel[audioBuffKey] * canvasHeight / 2;
           context.moveTo( x, y );
           context.lineTo( x, (y*-1) );
       }
       context.stroke();
       context.restore();
    }
    
    function createCanvas ( w, h ) {
        var newCanvas = document.createElement('canvas');
        newCanvas.width  = w;     newCanvas.height = h;
        return newCanvas;
    };
    
    
    loadMusic('https://raw.githubusercontent.com/katspaugh/wavesurfer.js/master/example/media/demo.wav');

    0 讨论(0)
  • 2020-12-08 00:03

    Hi was also facing loading time issue. Just i have controlled that by reducing number of lines want to draw and little canvas function call placement. see following code for your reference.

    // AUDIO CONTEXT
    window.AudioContext = (window.AudioContext || 
    window.webkitAudioContext || 
    window.mozAudioContext || 
    window.oAudioContext || 
    window.msAudioContext);
    
    if (!AudioContext) alert('This site cannot be run in your Browser. Try a recent Chrome or Firefox. ');
    
    var audioContext = new AudioContext();
    var currentBuffer  = null;
    
    // CANVAS
    var canvasWidth = window.innerWidth,  canvasHeight = 120 ;
    var newCanvas   = createCanvas (canvasWidth, canvasHeight);
    var context     = null;
    
    window.onload = appendCanvas;
    function appendCanvas() { document.body.appendChild(newCanvas);
                              context = newCanvas.getContext('2d'); }
    
    // MUSIC LOADER + DECODE
    function loadMusic(url) {   
        var req = new XMLHttpRequest();
        req.open( "GET", url, true );
        req.responseType = "arraybuffer";    
        req.onreadystatechange = function (e) {
              if (req.readyState == 4) {
                 if(req.status == 200)
                      audioContext.decodeAudioData(req.response, 
                        function(buffer) {
                                 currentBuffer = buffer;
                                 displayBuffer(buffer);
                        }, onDecodeError);
                 else
                      alert('error during the load.Wrong url or cross origin issue');
              }
        } ;
        req.send();
    }
    
    function onDecodeError() {  alert('error while decoding your file.');  }
    
    // MUSIC DISPLAY
    function displayBuffer(buff /* is an AudioBuffer */) {
      
      var drawLines = 500;
       var leftChannel = buff.getChannelData(0); // Float32Array describing left channel     
       var lineOpacity = canvasWidth / leftChannel.length  ;      
       context.save();
       context.fillStyle = '#080808' ;
       context.fillRect(0,0,canvasWidth,canvasHeight );
       context.strokeStyle = '#46a0ba';
       context.globalCompositeOperation = 'lighter';
       context.translate(0,canvasHeight / 2);
       //context.globalAlpha = 0.6 ; // lineOpacity ;
       context.lineWidth=1;
       var totallength = leftChannel.length;
       var eachBlock = Math.floor(totallength / drawLines);
       var lineGap = (canvasWidth/drawLines);
    
      context.beginPath();
       for(var i=0;i<=drawLines;i++){
          var audioBuffKey = Math.floor(eachBlock * i);
           var x = i*lineGap;
           var y = leftChannel[audioBuffKey] * canvasHeight / 2;
           context.moveTo( x, y );
           context.lineTo( x, (y*-1) );
       }
       context.stroke();
       context.restore();
    }
    
    function createCanvas ( w, h ) {
        var newCanvas = document.createElement('canvas');
        newCanvas.width  = w;     newCanvas.height = h;
        return newCanvas;
    };
    
    
    loadMusic('could_be_better.mp3');

    0 讨论(0)
提交回复
热议问题