Image/Text overlay in video swift

前端 未结 5 2037
野趣味
野趣味 2021-01-30 15:09

I am work with image overlay for watermark effect in video using swift.I am using AVFoundation for this but somehow I am not succeed.

Following is my code

相关标签:
5条回答
  • 2021-01-30 15:36

    To supplement, here's a function that creates CATextLayers based on an array UITextViews supplied by copying over their rotation, scale and font. Just add these to your container-layer supplied to AVVideoCompositionCoreAnimationTool:

    private static func createTextLayer(totalSize: CGSize,
                                            textView: UITextView) -> CATextLayer {
            let textLayer: CACenteredTextLayer = CACenteredTextLayer()
            textLayer.backgroundColor = UIColor.clear
            textLayer.foregroundColor = textView.textColor?.cgColor
            textLayer.masksToBounds = false
            textLayer.isWrapped = true
    
            let scale: CGFloat = UIScreen.main.scale
    
            if let font: UIFont = textView.font {
                let upscaledFont: UIFont = font.withSize(font.pointSize * scale)
                let attributedString = NSAttributedString(
                    string: textView.text,
                    attributes: [NSAttributedString.Key.font: upscaledFont,
                                 NSAttributedString.Key.foregroundColor: textView.textColor ?? UIColor.white])
                textLayer.string = attributedString
            }
    
            // Set text alignment
            let alignment: CATextLayerAlignmentMode
            switch textView.textAlignment {
            case NSTextAlignment.left:
                alignment = CATextLayerAlignmentMode.left
            case NSTextAlignment.center:
                alignment = CATextLayerAlignmentMode.center
            default:
                alignment = CATextLayerAlignmentMode.right
            }
            textLayer.alignmentMode = alignment
    
            let originalFrame: CGRect = textView.frame
    
            // Also take scale into consideration
            let targetSize: CGSize = CGSize(width: originalFrame.width * scale,
                                            height: originalFrame.height * scale)
    
            // The CALayer positioning is inverted on the Y-axes, so apply this
            let origin: CGPoint = CGPoint(x: originalFrame.origin.x * scale,
                                          y: (totalSize.height - (originalFrame.origin.y * scale)) - targetSize.height)
    
            textLayer.frame = CGRect(x: origin.x,
                                     y: origin.y,
                                     width: targetSize.width,
                                     height: targetSize.height)
    
            // Determine the scale
            textLayer.anchorPoint = CGPoint(x: 0.5,
                                            y: 0.5)
    
            var newTransform: CATransform3D = CATransform3DMakeScale(textView.transform.xScale,
                                                                     textView.transform.yScale,
                                                                     0)
    
            // Convert to degrees, invert the amount and convert back to radians to apply
            newTransform = CATransform3DRotate(newTransform,
                                               textView.transform.radiansFor3DTransform,
                                               0,
                                               0,
                                               1)
            textLayer.transform = newTransform
    
            return textLayer
    }
    

    Combine this with this subclassing of CATextLayer to center the text vertically:

    final class CACenteredTextLayer: CATextLayer {
        override func draw(in ctx: CGContext) {
            guard let attributedString = string as? NSAttributedString else { return }
    
            let height = self.bounds.size.height
            let boundingRect: CGRect = attributedString.boundingRect(
                with: CGSize(width: bounds.width,
                             height: CGFloat.greatestFiniteMagnitude),
                options: NSStringDrawingOptions.usesLineFragmentOrigin,
                context: nil)
            let yDiff: CGFloat = (height - boundingRect.size.height) / 2
    
            ctx.saveGState()
            ctx.translateBy(x: 0.0, y: yDiff)
            super.draw(in: ctx)
            ctx.restoreGState()
        }
    }
    
    private extension CGAffineTransform {
        var xScale: CGFloat {
            return sqrt((a*a) + (c*c))
        }
    
        var yScale: CGFloat {
            return sqrt((b*b) + (d*d))
        }
    
        var radiansFor3DTransform: CGFloat {
            let radians: CGFloat = atan2(b, a);
            let degrees: CGFloat = -(radians * 180 / CGFloat.pi)
            let convertedRadians: CGFloat = CGFloat(degrees * (CGFloat.pi / 180))
            return convertedRadians
        }
    }
    
    0 讨论(0)
  • 2021-01-30 15:37

    Here's an update that's working in Swift 4:

    import UIKit
    import AVFoundation
    import AVKit
    import Photos
    
    class ViewController: UIViewController {
    
    var myurl: URL?
    
    override func viewDidLoad() {
        super.viewDidLoad()
        // Do any additional setup after loading the view, typically from a nib.
    
    }
    
    @IBAction func saveVideoTapper(_ sender: Any) {
    
        let path = Bundle.main.path(forResource: "sample_video", ofType:"mp4")
        let fileURL = NSURL(fileURLWithPath: path!)
    
        let composition = AVMutableComposition()
        let vidAsset = AVURLAsset(url: fileURL as URL, options: nil)
    
        // get video track
        let vtrack =  vidAsset.tracks(withMediaType: AVMediaType.video)
        let videoTrack: AVAssetTrack = vtrack[0]
        let vid_timerange = CMTimeRangeMake(start: CMTime.zero, duration: vidAsset.duration)
    
        let tr: CMTimeRange = CMTimeRange(start: CMTime.zero, duration: CMTime(seconds: 10.0, preferredTimescale: 600))
        composition.insertEmptyTimeRange(tr)
    
        let trackID:CMPersistentTrackID = CMPersistentTrackID(kCMPersistentTrackID_Invalid)
    
        if let compositionvideoTrack: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: trackID) {
    
            do {
                try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: CMTime.zero)
            } catch {
                print("error")
            }
    
            compositionvideoTrack.preferredTransform = videoTrack.preferredTransform
    
        } else {
            print("unable to add video track")
            return
        }
    
    
        // Watermark Effect
        let size = videoTrack.naturalSize
    
        let imglogo = UIImage(named: "image.png")
        let imglayer = CALayer()
        imglayer.contents = imglogo?.cgImage
        imglayer.frame = CGRect(x: 5, y: 5, width: 100, height: 100)
        imglayer.opacity = 0.6
    
        // create text Layer
        let titleLayer = CATextLayer()
        titleLayer.backgroundColor = UIColor.white.cgColor
        titleLayer.string = "Dummy text"
        titleLayer.font = UIFont(name: "Helvetica", size: 28)
        titleLayer.shadowOpacity = 0.5
        titleLayer.alignmentMode = CATextLayerAlignmentMode.center
        titleLayer.frame = CGRect(x: 0, y: 50, width: size.width, height: size.height / 6)
    
    
        let videolayer = CALayer()
        videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
    
        let parentlayer = CALayer()
        parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
        parentlayer.addSublayer(videolayer)
        parentlayer.addSublayer(imglayer)
        parentlayer.addSublayer(titleLayer)
    
        let layercomposition = AVMutableVideoComposition()
        layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
        layercomposition.renderSize = size
        layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
    
        // instruction for watermark
        let instruction = AVMutableVideoCompositionInstruction()
        instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: composition.duration)
        let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
        let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
        instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject] as! [AVVideoCompositionLayerInstruction]
        layercomposition.instructions = NSArray(object: instruction) as [AnyObject] as! [AVVideoCompositionInstructionProtocol]
    
        //  create new file to receive data
        let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
        let docsDir = dirPaths[0] as NSString
        let movieFilePath = docsDir.appendingPathComponent("result.mov")
        let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
    
        // use AVAssetExportSession to export video
        let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality)
        assetExport?.outputFileType = AVFileType.mov
        assetExport?.videoComposition = layercomposition
    
        // Check exist and remove old file
        FileManager.default.removeItemIfExisted(movieDestinationUrl as URL)
    
        assetExport?.outputURL = movieDestinationUrl as URL
        assetExport?.exportAsynchronously(completionHandler: {
            switch assetExport!.status {
            case AVAssetExportSession.Status.failed:
                print("failed")
                print(assetExport?.error ?? "unknown error")
            case AVAssetExportSession.Status.cancelled:
                print("cancelled")
                print(assetExport?.error ?? "unknown error")
            default:
                print("Movie complete")
    
                self.myurl = movieDestinationUrl as URL
    
                PHPhotoLibrary.shared().performChanges({
                    PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
                }) { saved, error in
                    if saved {
                        print("Saved")
                    }
                }
    
                self.playVideo()
    
            }
        })
    
    }
    
    
    func playVideo() {
        let player = AVPlayer(url: myurl!)
        let playerLayer = AVPlayerLayer(player: player)
        playerLayer.frame = self.view.bounds
        self.view.layer.addSublayer(playerLayer)
        player.play()
        print("playing...")
    }
    
    
    
    }
    
    
    extension FileManager {
    func removeItemIfExisted(_ url:URL) -> Void {
        if FileManager.default.fileExists(atPath: url.path) {
            do {
                try FileManager.default.removeItem(atPath: url.path)
            }
            catch {
                print("Failed to delete file")
            }
        }
    }
    }
    
    0 讨论(0)
  • 2021-01-30 15:43

    The code provided by @El Captain would work. It's only missing:

        assetExport.videoComposition = layercomposition
    

    You can add this right after the instantiation of the AVAssetExportSession

    NOTE: The code originally provided would only export the video track but not the audio track. If you need the audio track you could add something like this after you configure the compositionvideoTrack:

    let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
        for audioTrack in audioTracks {
            try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, ofTrack: audioTrack, atTime: kCMTimeZero)
        }
    
    0 讨论(0)
  • 2021-01-30 15:44

    For me (what I see in your code), your are not adding the parentlayer to the screen.

    You create a CALayer() to add videolayer, imglayer and titleLayer into a new layer but you don't add this last one on the screen.

    yourView.layer.addSublayer(parentlayer)
    

    Hope this help you

    0 讨论(0)
  • 2021-01-30 15:48

    @Rey Hernandez this just helped me a lot! If anyone wants further clarification on how to add an audio asset with to the video here is the code to combine them

        let vtrack =  vidAsset.tracksWithMediaType(AVMediaTypeVideo)
        let videoTrack:AVAssetTrack = vtrack[0] 
        let vid_duration = videoTrack.timeRange.duration
        let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
    
        let atrack =  vidAsset.tracksWithMediaType(AVMediaTypeAudio)
        let audioTrack:AVAssetTrack = atrack[0]
        let audio_duration = audioTrack.timeRange.duration
        let audio_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
    
        do {
            let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
    
            try compositionvideoTrack.insertTimeRange(vid_timerange, ofTrack: videoTrack, atTime: kCMTimeZero)
    
            compositionvideoTrack.preferredTransform = videoTrack.preferredTransform
    
    
    
            let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
            try! compositionAudioTrack.insertTimeRange(audio_timerange, ofTrack: audioTrack, atTime: kCMTimeZero)
    
            compositionvideoTrack.preferredTransform = audioTrack.preferredTransform
    
        } catch {
            print(error)
        }
    
    0 讨论(0)
提交回复
热议问题