iOS screen sharing (using ReplayKit) using WebRTC in swift

…衆ロ難τιáo~ 提交于 2020-12-08 03:53:18

问题


I have successfully implemented ReplayKit.

SampleHandler.swift

class SampleHandler: RPBroadcastSampleHandler {

   override func broadcastStarted(withSetupInfo setupInfo: [String : NSObject]?) {

   }

  override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType:  RPSampleBufferType) {
        switch sampleBufferType {

        case RPSampleBufferType.video:
            break

        case RPSampleBufferType.audioApp:
            break

        case RPSampleBufferType.audioMic:
            break
        @unknown default:
            return
        }
    }
}

Question :

  1. How I can display screen share video too receiver using WebRTC?
  2. If I want save screen share video in my document directory folder or in gallery than what I need to do?

I am using WebRTC SDK for mobile in swift. My WebRTCClient file.

    final class WebRTCClient: NSObject {
    
    // The `RTCPeerConnectionFactory` is in charge of creating new RTCPeerConnection instances.
    // A new RTCPeerConnection should be created every new call, but the factory is shared.
     static let factory: RTCPeerConnectionFactory = {
        RTCInitializeSSL()
        let videoEncoderFactory = RTCDefaultVideoEncoderFactory()
        let videoDecoderFactory = RTCDefaultVideoDecoderFactory()
        return RTCPeerConnectionFactory(encoderFactory: videoEncoderFactory, decoderFactory: videoDecoderFactory)
    }()
    
    weak var delegate: WebRTCClientDelegate?
    let peerConnection: RTCPeerConnection
    private let rtcAudioSession =  RTCAudioSession.sharedInstance()
    private let audioQueue = DispatchQueue(label: "audio")
    private let mediaConstrains = [kRTCMediaConstraintsOfferToReceiveAudio: kRTCMediaConstraintsValueTrue,
                                   kRTCMediaConstraintsOfferToReceiveVideo: kRTCMediaConstraintsValueTrue]    
    private var videoCapturer: RTCVideoCapturer?
    private var localVideoTrack: RTCVideoTrack?
    private var remoteVideoTrack: RTCVideoTrack?
    private var localDataChannel: RTCDataChannel?
    private var remoteDataChannel: RTCDataChannel?

    @available(*, unavailable)
    override init() {
        fatalError("WebRTCClient:init is unavailable")
    }
    
    required init(iceServers: [String]) {

        let config = RTCConfiguration()
       // config.iceServers = [RTCIceServer(urlStrings: iceServers)]
        
        config.iceServers = [RTCIceServer(urlStrings:["//Turn server URL"],
                                          username:"username",
                                          credential:"password")]
        
        
        // Unified plan is more superior than planB
        config.sdpSemantics = .unifiedPlan
        
        // gatherContinually will let WebRTC to listen to any network changes and send any new candidates to the other client
        config.continualGatheringPolicy = .gatherContinually
        
        let constraints = RTCMediaConstraints(mandatoryConstraints: nil,
                                              optionalConstraints: ["DtlsSrtpKeyAgreement":kRTCMediaConstraintsValueTrue])
        self.peerConnection = WebRTCClient.factory.peerConnection(with: config, constraints: constraints, delegate: nil)
        
        super.init()
        self.createMediaSenders()
        self.configureAudioSession()
        self.peerConnection.delegate = self
    }
    
    // MARK: Signaling
    func offer(completion: @escaping (_ sdp: RTCSessionDescription) -> Void) {
        let constrains = RTCMediaConstraints(mandatoryConstraints: self.mediaConstrains,
                                             optionalConstraints: nil)
        self.peerConnection.offer(for: constrains) { (sdp, error) in
            guard let sdp = sdp else {
                return
            }
            
            self.peerConnection.setLocalDescription(sdp, completionHandler: { (error) in
                completion(sdp)
            })
        }
    }
    
    func answer(completion: @escaping (_ sdp: RTCSessionDescription) -> Void)  {
        let constrains = RTCMediaConstraints(mandatoryConstraints: self.mediaConstrains,
                                             optionalConstraints: nil)
        self.peerConnection.answer(for: constrains) { (sdp, error) in
            guard let sdp = sdp else {
                return
            }
            
            self.peerConnection.setLocalDescription(sdp, completionHandler: { (error) in
                completion(sdp)
            })
        }
    }
    
    func set(remoteSdp: RTCSessionDescription, completion: @escaping (Error?) -> ()) {
        self.peerConnection.setRemoteDescription(remoteSdp, completionHandler: completion)
    }
    
    func set(remoteCandidate: RTCIceCandidate) {
        self.peerConnection.add(remoteCandidate)
    }
    
    // MARK: Media
    func startCaptureLocalVideo(renderer: RTCVideoRenderer, position : AVCaptureDevice.Position) {
        guard let capturer = self.videoCapturer as? RTCCameraVideoCapturer else {
            return
        }

        guard
            let frontCamera = (RTCCameraVideoCapturer.captureDevices().first { $0.position == position }),
            // choose highest res
            let format = (RTCCameraVideoCapturer.supportedFormats(for: frontCamera).sorted { (f1, f2) -> Bool in
                let width1 = CMVideoFormatDescriptionGetDimensions(f1.formatDescription).width
                let width2 = CMVideoFormatDescriptionGetDimensions(f2.formatDescription).width
                return width1 < width2
            }).last,
        
            // choose highest fps
            let fps = (format.videoSupportedFrameRateRanges.sorted { return $0.maxFrameRate < $1.maxFrameRate }.last) else {
            return
        }

        capturer.startCapture(with: frontCamera,
                              format: format,
                              fps: Int(fps.maxFrameRate))
        
        self.localVideoTrack?.add(renderer)
    }
    
    func startCaptureLocalTestVideo(renderer: RTCVideoRenderer, position : AVCaptureDevice.Position) {
        guard (self.videoCapturer as? RTCCameraVideoCapturer) != nil else {
            return
        }

        guard
            let frontCamera = (RTCCameraVideoCapturer.captureDevices().first { $0.position == position }),
            // choose highest res
            let format = (RTCCameraVideoCapturer.supportedFormats(for: frontCamera).sorted { (f1, f2) -> Bool in
                let width1 = CMVideoFormatDescriptionGetDimensions(f1.formatDescription).width
                let width2 = CMVideoFormatDescriptionGetDimensions(f2.formatDescription).width
                return width1 < width2
            }).last,
        
            // choose highest fps
            let _ = (format.videoSupportedFrameRateRanges.sorted { return $0.maxFrameRate < $1.maxFrameRate }.last) else {
            return
        }

        capturer.startCapture(with: frontCamera,
                              format: format,
                              fps: Int(fps.maxFrameRate))
        
        self.localVideoTrack?.add(renderer)
    }
    
    func renderRemoteVideo(to renderer: RTCVideoRenderer) {
        self.remoteVideoTrack?.add(renderer)
    }
    
    private func configureAudioSession() {
        self.rtcAudioSession.lockForConfiguration()
        do {
            try self.rtcAudioSession.setCategory(AVAudioSession.Category.playAndRecord.rawValue)
            try self.rtcAudioSession.setMode(AVAudioSession.Mode.voiceChat.rawValue)
        } catch let error {
            debugPrint("Error changeing AVAudioSession category: \(error)")
        }
        self.rtcAudioSession.unlockForConfiguration()
    }
    
    private func createMediaSenders() {
        let streamId = "stream"
        
        // Audio
        let audioTrack = self.createAudioTrack()
        self.peerConnection.add(audioTrack, streamIds: [streamId])
        
        // Video
        let videoTrack = self.createVideoTrack()
        self.localVideoTrack = videoTrack
        self.peerConnection.add(videoTrack, streamIds: [streamId])
        self.remoteVideoTrack = self.peerConnection.transceivers.first { $0.mediaType == .video }?.receiver.track as? RTCVideoTrack
        
        // Data
        if let dataChannel = createDataChannel() {
            dataChannel.delegate = self
            self.localDataChannel = dataChannel
        }
    }
    
    private func createAudioTrack() -> RTCAudioTrack {
        let audioConstrains = RTCMediaConstraints(mandatoryConstraints: nil, optionalConstraints: nil)
        let audioSource = WebRTCClient.factory.audioSource(with: audioConstrains)
        let audioTrack = WebRTCClient.factory.audioTrack(with: audioSource, trackId: "audio0")
        return audioTrack
    }
    
    private func createVideoTrack() -> RTCVideoTrack {
        let videoSource = WebRTCClient.factory.videoSource()
        
        #if TARGET_OS_SIMULATOR
        self.videoCapturer = RTCFileVideoCapturer(delegate: videoSource)
        #else
        self.videoCapturer = RTCCameraVideoCapturer(delegate: videoSource)
        #endif
        
        let videoTrack = WebRTCClient.factory.videoTrack(with: videoSource, trackId: "video0")
        return videoTrack
    }
    
    // MARK: Data Channels
    private func createDataChannel() -> RTCDataChannel? {
        let config = RTCDataChannelConfiguration()
        guard let dataChannel = self.peerConnection.dataChannel(forLabel: "WebRTCData", configuration: config) else {
            debugPrint("Warning: Couldn't create data channel.")
            return nil
        }
        return dataChannel
    }
    
    func sendData(_ data: Data) {
        let buffer = RTCDataBuffer(data: data, isBinary: true)
        self.remoteDataChannel?.sendData(buffer)
    }
}

extension WebRTCClient: RTCPeerConnectionDelegate {
    
    func peerConnection(_ peerConnection: RTCPeerConnection, didChange stateChanged: RTCSignalingState) {
        debugPrint("peerConnection new signaling state: \(stateChanged)")
    }
    
    func peerConnection(_ peerConnection: RTCPeerConnection, didAdd stream: RTCMediaStream) {
        debugPrint("peerConnection did add stream")
    }
    
    func peerConnection(_ peerConnection: RTCPeerConnection, didRemove stream: RTCMediaStream) {
        debugPrint("peerConnection did remote stream")
    }
    
    func peerConnectionShouldNegotiate(_ peerConnection: RTCPeerConnection) {
        debugPrint("peerConnection should negotiate")
    }
    
    func peerConnection(_ peerConnection: RTCPeerConnection, didChange newState: RTCIceConnectionState) {
        debugPrint("peerConnection new connection state: \(newState)")
        self.delegate?.webRTCClient(self, didChangeConnectionState: newState)
    }
    
    func peerConnection(_ peerConnection: RTCPeerConnection, didChange newState: RTCIceGatheringState) {
        debugPrint("peerConnection new gathering state: \(newState)")
    }
    
    func peerConnection(_ peerConnection: RTCPeerConnection, didGenerate candidate: RTCIceCandidate) {
        self.delegate?.webRTCClient(self, didDiscoverLocalCandidate: candidate)
    }
    
    func peerConnection(_ peerConnection: RTCPeerConnection, didRemove candidates: [RTCIceCandidate]) {
        debugPrint("peerConnection did remove candidate(s)")
    }
    
    func peerConnection(_ peerConnection: RTCPeerConnection, didOpen dataChannel: RTCDataChannel) {
        debugPrint("peerConnection did open data channel")
        self.remoteDataChannel = dataChannel
    }
}

// MARK:- Audio control
extension WebRTCClient {
    func muteAudio() {
      //  self.setAudioEnabled(false)
        swapCameraToFront()
    }
    
    func unmuteAudio() {
        self.setAudioEnabled(true)
    }
    
    func muteVideo() {
           self.setVideoEnabled(false)
    }
       
    func unmuteVideo() {
           self.setVideoEnabled(true)
       }
    
    
    // Fallback to the default playing device: headphones/bluetooth/ear speaker
    func speakerOff() {
        self.audioQueue.async { [weak self] in
            guard let self = self else {
                return
            }
            
            self.rtcAudioSession.lockForConfiguration()
            do {
                try self.rtcAudioSession.setCategory(AVAudioSession.Category.playAndRecord.rawValue)
                try self.rtcAudioSession.overrideOutputAudioPort(.none)
            } catch let error {
                debugPrint("Error setting AVAudioSession category: \(error)")
            }
            self.rtcAudioSession.unlockForConfiguration()
        }
    }
    
    // Force speaker
    func speakerOn() {
        self.audioQueue.async { [weak self] in
            guard let self = self else {
                return
            }
            
            self.rtcAudioSession.lockForConfiguration()
            do {
                try self.rtcAudioSession.setCategory(AVAudioSession.Category.playAndRecord.rawValue)
                try self.rtcAudioSession.overrideOutputAudioPort(.speaker)
                try self.rtcAudioSession.setActive(true)
            } catch let error {
                debugPrint("Couldn't force audio to speaker: \(error)")
            }
            self.rtcAudioSession.unlockForConfiguration()
        }
    }
    
    private func setAudioEnabled(_ isEnabled: Bool) {
        
        let audioTracks = self.peerConnection.transceivers.compactMap { return $0.sender.track as? RTCAudioTrack }
        audioTracks.forEach { $0.isEnabled = isEnabled }
    }
    
    private func setVideoEnabled(_ isEnabled: Bool) {
        
        let videoTracks = self.peerConnection.transceivers.compactMap { return $0.sender.track as? RTCVideoTrack }
          videoTracks.forEach { $0.isEnabled = isEnabled }
        
    }
    
    func swapCameraToFront() {
        
        let localStream: RTCMediaStream? = peerConnection.localStreams.first
        localStream?.removeVideoTrack((localStream?.videoTracks.first)!)
        
        let localVideoTrack: RTCVideoTrack? = self.createVideoTrack()
        if localVideoTrack != nil {
            localStream?.addVideoTrack(localVideoTrack!)
          //  delegate?.appClient(self, didReceiveLocalVideoTrack: localVideoTrack!)
        }
        peerConnection.remove(localStream!)
        peerConnection.add(localStream!)
    }

    func swapCameraToBack() {
        let localStream: RTCMediaStream? = peerConnection.localStreams.first
        localStream?.removeVideoTrack((localStream?.videoTracks.first)!)
        let localVideoTrack: RTCVideoTrack? = self.createVideoTrack()
        if localVideoTrack != nil {
             localStream?.addVideoTrack(localVideoTrack!)
        }
        peerConnection.remove(localStream!)
        peerConnection.add(localStream!)
    }
    
}

extension WebRTCClient: RTCDataChannelDelegate {
    func dataChannelDidChangeState(_ dataChannel: RTCDataChannel) {
        debugPrint("dataChannel did change state: \(dataChannel.readyState)")
    }
    
    func dataChannel(_ dataChannel: RTCDataChannel, didReceiveMessageWith buffer: RTCDataBuffer) {
        self.delegate?.webRTCClient(self, didReceiveData: buffer.data)
    }
}

ViewController.swift file

override func viewDidLoad() {
    super.viewDidLoad()
   let localRenderer = RTCMTLVideoView(frame: self.localVideoView?.frame ?? CGRect.zero
    let remoteRenderer = RTCMTLVideoView(frame: self.view.frame)

    self.view.insertSubview(localRenderer, at: 0)
    self.view.insertSubview(remoteRenderer, at: -1)

    self.webRTCClient.startCaptureLocalVideo(renderer: localRenderer, position: .front)
    self.webRTCClient.renderRemoteVideo(to: remoteRenderer)

}

I am using below WEBRTC project https://github.com/stasel/WebRTC-iOS

More detailed app to demonstrate WebRTC:


回答1:


You should send every bytes of samplebuffer data in relevant format of RTCVideoFrame and then send on WebRTC with push video frame.

videoSource.capturer(videoCapturer, didCapture: videoFrame)

like this.

Steps to do:

  1. convert sample buffer to image
    var cgImage:CGImage?
    guard let sourceImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else{return}
    VTCreateCGImageFromCVPixelBuffer(sourceImageBuffer, options: nil, imageOut: &cgImage)
    let image = UIImage(cgImage: cgImage!)
  1. Since sample buffer is quite fast around 15fps, so we reduces it 6fps.
    let kDesiredFrameRate = 6.0
    let currentTimestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
    let delta = CMTimeSubtract(currentTimestamp, lastVideoTimestamp).seconds
    let threshold = Double(1.0/kDesiredFrameRate)
    guard delta > threshold else {return}
    lastVideoTimestamp = currentTimestamp
  1. Orientation of the screen
    enum VideoRotation: Int {
        case _0 = 0
        case _90 = 90
        case _180 = 180
        case _270 = 270
    }
    var videoOrientation = VideoRotation._0
    let orientationAttachment =  CMGetAttachment(sampleBuffer, key: RPVideoSampleOrientationKey as CFString, attachmentModeOut: nil) as? NSNumber
    let orientation: CGImagePropertyOrientation = CGImagePropertyOrientation(rawValue: orientationAttachment.uint32Value) ?? .up
    switch (orientation) {
    case .up, .upMirrored, .down, .downMirrored:
        videoOrientation = ._0
        break
    case .leftMirrored:
        videoOrientation = ._90
    case .left:
        videoOrientation = ._90
    case .rightMirrored:
        videoOrientation = ._270
    case .right:
        videoOrientation = ._270
    @unknown default:
        break
    }

Now next 2 steps will be handled in main app:

  1. Converting of image in CVPixelBuffer frame:

    guard let videoImage = UIImage(data: imageData!) else {return}
    guard let cgImage = videoImage.cgImage else {return}
    guard let imageBuffer = videoImage.pixelBuffer(forImage: cgImage) else {return}
    

Helper method

```
func pixelBuffer(forImage image:CGImage) -> CVPixelBuffer? {
    let frameSize = CGSize(width: image.width, height: image.height)

    var pixelBuffer:CVPixelBuffer? = nil

    let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(frameSize.width), Int(frameSize.height), kCVPixelFormatType_32BGRA , nil, &pixelBuffer)

    guaed status == kCVReturnSuccess else {return nil}

    CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags.init(rawValue: 0))
    let data = CVPixelBufferGetBaseAddress(pixelBuffer!)
    let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
    let bitmapInfo = CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
    let context = CGContext(data: data, width: Int(frameSize.width), height: Int(frameSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: bitmapInfo.rawValue)

    context?.draw(image, in: CGRect(x: 0, y: 0, width: image.width, height: image.height))

    CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))

    return pixelBuffer
}
```
  1. Final step - conversation of CVPixelBuffer to RTVideoFrame

    let rotation = RTCVideoRotation(rawValue: videoRotation)
    let rtcPixlBuffer = RTCCVPixelBuffer(pixelBuffer: imageBuffer)
    let rtcVideoFrame = RTCVideoFrame(buffer: rtcPixlBuffer,
                                      rotation: rotation ?? ._0,
                                      timeStampNs: Int64(timeStampNs))
    


来源:https://stackoverflow.com/questions/62664114/ios-screen-sharing-using-replaykit-using-webrtc-in-swift

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!