how to read video Frames directly into memory with Nodejs?

假装没事ソ 提交于 2020-06-17 06:46:22

问题


What i am trying to do is taking a video and diving it to frames and passing this frames to a Model to detect objects in each frame but the problem is the extraction process cost so much time and i don't need the frames on my disk.


回答1:


fmpeg-stream offers stream capabilities. So there is no need to write to a file.

It is also possible to use directly ffmpeg and spawn a new child process. Its .stdout property is a readable stream. On the event data, the chunk can be read.

const fs = require("fs");
const tf = require("@tensorflow/tfjs-node")

const logStream = fs.createWriteStream('./logFile.log');


const spawnProcess = require('child_process').spawn,
    ffmpeg = spawnProcess('ffmpeg', [
        '-i', 'videfile.mp4',
        '-vcodec', 'png',
        '-f', 'rawvideo',
        '-s', 'h*w', // size of one frame
        'pipe:1'
    ]);

ffmpeg.stderr.pipe(logStream); // for debugging

let i = 0

ffmpeg.stdout.on('data', (data) => {
    try {
        console.log(tf.node.decodeImage(data).shape)
        console.log(`${++i} frames read`)
        // dispose all tensors
    } catch(e) {
        console.log(e)
    } 
})


ffmpeg.on('close', function (code) {
    console.log('child process exited with code ' + code);
});

Decoding the image is in a try catch block to prevent error raised when the chunk does not match a frame.

A more robust code to prevent decoding chunks that do not correspond to images will be the following:

const { Transform } = require("stream")

class ExtractFrames extends Transform {
    constructor(delimiter) {
        super({ readableObjectMode: true })
        this.delimiter = Buffer.from(delimiter, "hex")
        this.buffer = Buffer.alloc(0)
    }

    _transform(data, enc, cb) {
        // Add new data to buffer
        this.buffer = Buffer.concat([this.buffer, data])
        const start = this.buffer.indexOf(this.delimiter)
        if (start < 0) return // there's no frame data at all
        const end = this.buffer.indexOf(
            this.delimiter,
            start + this.delimiter.length,
        )
        if (end < 0) return // we haven't got the whole frame yet
        this.push(this.buffer.slice(start, end)) // emit a frame
        this.buffer = this.buffer.slice(end) // remove frame data from buffer

        if (start > 0) console.error(`Discarded ${start} bytes of invalid data`)
        cb()
    }
    _flush(callback) {
        // push remaining buffer to readable stream
        callback(null, this.buffer);
    }
}

const fs = require("fs");
const tf = require("@tensorflow/tfjs-node")

const logStream = fs.createWriteStream('./logFile.log');


const spawnProcess = require('child_process').spawn,
    ffmpeg = spawnProcess('ffmpeg', [
        '-i', 'generique.mp4',
        '-vcodec', 'mjpeg',
        '-f', 'rawvideo',
        '-s', '420x360', // size of one frame
        'pipe:1'
    ]);

ffmpeg.stderr.pipe(logStream); // for debugging

let i = 0

ffmpeg.stdout
.pipe(new ExtractFrames("FFD8FF")).on('data', (data) => {
    try {
        console.log(tf.node.decodeImage(data).shape)
        console.log(`${++i} frames read`)
        // dispose all tensors
    } catch(e) {
        console.log(e)
    } 
})


ffmpeg.on('close', function (code) {
    console.log('child process exited with code ' + code);
});

Though, the above code works, it will still fill up quickly the memory. Separating the frame extraction from the data processing itself will help.

async function* frames() {
        let resolve;
        let promise = new Promise(r => resolve = r);
        let bool = true;
        ls.stdout.pipe(new ExtractFrames("FFD8FF")).on('data', data => {
            resolve(data); 

            promise = new Promise(r => resolve = r); 
        });

        ls.on('close', function (code) {
            bool = false
            console.log('code')
        });

        while (bool) {
            const data = await promise; 
            yield data;
        }
}

(async() => {
     // data processing
     // possibly create tf.dataset for training
     for await (const data of stream()) {
         console.log(tf.node.decodeImage(data).shape)
         console.log(data);
    }
})()


来源:https://stackoverflow.com/questions/62050534/how-to-read-video-frames-directly-into-memory-with-nodejs

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!