Sending live video frame over network in python opencv

后端 未结 7 1625
南旧
南旧 2020-11-28 06:39

I\'m trying to send live video frame that I catch with my camera to a server and process them. I\'m usig opencv for image processing and python for the language. Here is my

相关标签:
7条回答
  • 2020-11-28 06:43

    I have made it to work on my MacOS.

    I used the code from @mguijarr and changed the struct.pack from "H" to "L".

    Server.py:
    ==========
    import socket
    import sys
    import cv2
    import pickle
    import numpy as np
    import struct ## new
    
    HOST=''
    PORT=8089
    
    s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    print 'Socket created'
    
    s.bind((HOST,PORT))
    print 'Socket bind complete'
    s.listen(10)
    print 'Socket now listening'
    
    conn,addr=s.accept()
    
    ### new
    data = ""
    payload_size = struct.calcsize("L") 
    while True:
        while len(data) < payload_size:
            data += conn.recv(4096)
        packed_msg_size = data[:payload_size]
        data = data[payload_size:]
        msg_size = struct.unpack("L", packed_msg_size)[0]
        while len(data) < msg_size:
            data += conn.recv(4096)
        frame_data = data[:msg_size]
        data = data[msg_size:]
        ###
    
        frame=pickle.loads(frame_data)
        print frame
        cv2.imshow('frame',frame)
    
        key = cv2.waitKey(10)
        if (key == 27) or (key == 113):
            break
    
    cv2.destroyAllWindows()
    
    
    
    Client.py:
    ==========
    import cv2
    import numpy as np
    import socket
    import sys
    import pickle
    import struct ### new code
    cap=cv2.VideoCapture(0)
    clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    clientsocket.connect(('localhost',8089))
    while True:
        ret,frame=cap.read()
        data = pickle.dumps(frame) ### new code
        clientsocket.sendall(struct.pack("L", len(data))+data) ### new code
    
    0 讨论(0)
  • 2020-11-28 06:54

    Few things:

    • use sendall instead of send since you're not guaranteed everything will be sent in one go
    • pickle is ok for data serialization but you have to make a protocol of you own for the messages you exchange between the client and the server, this way you can know in advance the amount of data to read for unpickling (see below)
    • for recv you will get better performance if you receive big chunks, so replace 80 by 4096 or even more
    • beware of sys.getsizeof: it returns the size of the object in memory, which is not the same as the size (length) of the bytes to send over the network ; for a Python string the two values are not the same at all
    • be mindful of the size of the frame you are sending. Code below supports a frame up to 65535. Change "H" to "L" if you have a larger frame.

    A protocol example:

    client_cv.py

    import cv2
    import numpy as np
    import socket
    import sys
    import pickle
    import struct ### new code
    cap=cv2.VideoCapture(0)
    clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    clientsocket.connect(('localhost',8089))
    while True:
        ret,frame=cap.read()
        data = pickle.dumps(frame) ### new code
        clientsocket.sendall(struct.pack("H", len(data))+data) ### new code
    

    server_cv.py

    import socket
    import sys
    import cv2
    import pickle
    import numpy as np
    import struct ## new
    
    HOST=''
    PORT=8089
    
    s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    print 'Socket created'
    
    s.bind((HOST,PORT))
    print 'Socket bind complete'
    s.listen(10)
    print 'Socket now listening'
    
    conn,addr=s.accept()
    
    ### new
    data = ""
    payload_size = struct.calcsize("H") 
    while True:
        while len(data) < payload_size:
            data += conn.recv(4096)
        packed_msg_size = data[:payload_size]
        data = data[payload_size:]
        msg_size = struct.unpack("H", packed_msg_size)[0]
        while len(data) < msg_size:
            data += conn.recv(4096)
        frame_data = data[:msg_size]
        data = data[msg_size:]
        ###
    
        frame=pickle.loads(frame_data)
        print frame
        cv2.imshow('frame',frame)
    

    You can probably optimize all this a lot (less copying, using the buffer interface, etc) but at least you can get the idea.

    0 讨论(0)
  • 2020-11-28 06:59

    After months of searching the internet, this is what I came up with, I have neatly packaged it into classes, with unit tests and documentation as SmoothStream check it out, it was the only simple and working version of streaming I could find anywhere.

    I used this code and wrapped mine around it.

    Viewer.py

    import cv2
    import zmq
    import base64
    import numpy as np
    
    context = zmq.Context()
    footage_socket = context.socket(zmq.SUB)
    footage_socket.bind('tcp://*:5555')
    footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
    
    while True:
        try:
            frame = footage_socket.recv_string()
            img = base64.b64decode(frame)
            npimg = np.fromstring(img, dtype=np.uint8)
            source = cv2.imdecode(npimg, 1)
            cv2.imshow("Stream", source)
            cv2.waitKey(1)
    
        except KeyboardInterrupt:
            cv2.destroyAllWindows()
            break
    

    Streamer.py

    import base64
    import cv2
    import zmq
    
    context = zmq.Context()
    footage_socket = context.socket(zmq.PUB)
    footage_socket.connect('tcp://localhost:5555')
    
    camera = cv2.VideoCapture(0)  # init the camera
    
    while True:
        try:
            grabbed, frame = camera.read()  # grab the current frame
            frame = cv2.resize(frame, (640, 480))  # resize the frame
            encoded, buffer = cv2.imencode('.jpg', frame)
            jpg_as_text = base64.b64encode(buffer)
            footage_socket.send(jpg_as_text)
    
        except KeyboardInterrupt:
            camera.release()
            cv2.destroyAllWindows()
            break
    
    0 讨论(0)
  • 2020-11-28 07:00

    I changed the code from @mguijarr to work with Python 3. Changes made to the code:

    • data is now a byte literal instead of a string literal
    • Changed "H" to "L" to send larger frame sizes. Based on the documentation, we can now send frames of size 2^32 instead of just 2^16.

    Server.py

    import pickle
    import socket
    import struct
    
    import cv2
    
    HOST = ''
    PORT = 8089
    
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    print('Socket created')
    
    s.bind((HOST, PORT))
    print('Socket bind complete')
    s.listen(10)
    print('Socket now listening')
    
    conn, addr = s.accept()
    
    data = b'' ### CHANGED
    payload_size = struct.calcsize("L") ### CHANGED
    
    while True:
    
        # Retrieve message size
        while len(data) < payload_size:
            data += conn.recv(4096)
    
        packed_msg_size = data[:payload_size]
        data = data[payload_size:]
        msg_size = struct.unpack("L", packed_msg_size)[0] ### CHANGED
    
        # Retrieve all data based on message size
        while len(data) < msg_size:
            data += conn.recv(4096)
    
        frame_data = data[:msg_size]
        data = data[msg_size:]
    
        # Extract frame
        frame = pickle.loads(frame_data)
    
        # Display
        cv2.imshow('frame', frame)
        cv2.waitKey(1)
    

    Client.py

    import cv2
    import numpy as np
    import socket
    import sys
    import pickle
    import struct
    
    cap=cv2.VideoCapture(0)
    clientsocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
    clientsocket.connect(('localhost',8089))
    
    while True:
        ret,frame=cap.read()
        # Serialize frame
        data = pickle.dumps(frame)
    
        # Send message length first
        message_size = struct.pack("L", len(data)) ### CHANGED
    
        # Then data
        clientsocket.sendall(message_size + data)
    
    0 讨论(0)
  • 2020-11-28 07:00

    Recently I publish imagiz package for Fast and none blocking live video streaming over network with OpenCV and ZMQ.

    https://pypi.org/project/imagiz/

    Client :

    import imagiz
    import cv2
    
    
    client=imagiz.Client("cc1",server_ip="localhost")
    vid=cv2.VideoCapture(0)
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
    
    while True:
        r,frame=vid.read()
        if r:
            r, image = cv2.imencode('.jpg', frame, encode_param)
            client.send(image)
        else:
            break
    

    Server :

    import imagiz
    import cv2
    
    server=imagiz.Server()
    while True:
        message=server.recive()
        frame=cv2.imdecode(message.image,1)
        cv2.imshow("",frame)
        cv2.waitKey(1)
    
    0 讨论(0)
  • 2020-11-28 07:01

    as @Rohan Sawant said i used zmq library without using base64 encoding. here is the new code

    Streamer.py

    import base64
    import cv2
    import zmq
    import numpy as np
    import time
    
    context = zmq.Context()
    footage_socket = context.socket(zmq.PUB)
    footage_socket.connect('tcp://192.168.1.3:5555')
    
    camera = cv2.VideoCapture(0)  # init the camera
    
    while True:
            try:
                    grabbed, frame = camera.read()  # grab the current frame
                    frame = cv2.resize(frame, (640, 480))  # resize the frame
                    encoded, buffer = cv2.imencode('.jpg', frame)
                    footage_socket.send(buffer)
    
    
            except KeyboardInterrupt:
                    camera.release()
                    cv2.destroyAllWindows()
                    break
    

    Viewer.py

    import cv2
    import zmq
    import base64
    import numpy as np
    
    context = zmq.Context()
    footage_socket = context.socket(zmq.SUB)
    footage_socket.bind('tcp://*:5555')
    footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
    
    while True:
        try:
            frame = footage_socket.recv()
            npimg = np.frombuffer(frame, dtype=np.uint8)
            #npimg = npimg.reshape(480,640,3)
            source = cv2.imdecode(npimg, 1)
            cv2.imshow("Stream", source)
            cv2.waitKey(1)
    
        except KeyboardInterrupt:
            cv2.destroyAllWindows()
            break
    
    0 讨论(0)
提交回复
热议问题