How do I export UIImage array as a movie?

后端 未结 10 1858
臣服心动
臣服心动 2020-11-22 02:11

I have a serious problem: I have an NSArray with several UIImage objects. What I now want to do, is create movie from those UIImages.

10条回答
  •  既然无缘
    2020-11-22 02:43

    NOTE: This is a Swift 2.1 solution (iOS8+, XCode 7.2).

    Last week I set out to write the iOS code to generate a video from images. I had a little bit of AVFoundation experience, but had never even heard of a CVPixelBuffer. I came across the answers on this page and also here. It took several days to dissect everything and put it all back together in Swift in a way that made sense to my brain. Below is what I came up with.

    NOTE: If you copy/paste all the code below into a single Swift file, it should compile. You'll just need to tweak loadImages() and the RenderSettings values.

    Part 1: Setting things up

    Here I group all the export-related settings into a single RenderSettings struct.

    import AVFoundation
    import UIKit
    import Photos
    
    struct RenderSettings {
    
        var width: CGFloat = 1280
        var height: CGFloat = 720
        var fps: Int32 = 2   // 2 frames per second
        var avCodecKey = AVVideoCodecH264
        var videoFilename = "render"
        var videoFilenameExt = "mp4"
    
        var size: CGSize {
            return CGSize(width: width, height: height)
        }
    
        var outputURL: NSURL {
            // Use the CachesDirectory so the rendered video file sticks around as long as we need it to.
            // Using the CachesDirectory ensures the file won't be included in a backup of the app.
            let fileManager = NSFileManager.defaultManager()
            if let tmpDirURL = try? fileManager.URLForDirectory(.CachesDirectory, inDomain: .UserDomainMask, appropriateForURL: nil, create: true) {
                return tmpDirURL.URLByAppendingPathComponent(videoFilename).URLByAppendingPathExtension(videoFilenameExt)
            }
            fatalError("URLForDirectory() failed")
        }
    }
    

    Part 2: The ImageAnimator

    The ImageAnimator class knows about your images and uses the VideoWriter class to perform the rendering. The idea is to keep the video content code separate from the low-level AVFoundation code. I also added saveToLibrary() here as a class function which gets called at the end of the chain to save the video to the Photo Library.

    class ImageAnimator {
    
        // Apple suggests a timescale of 600 because it's a multiple of standard video rates 24, 25, 30, 60 fps etc.
        static let kTimescale: Int32 = 600
    
        let settings: RenderSettings
        let videoWriter: VideoWriter
        var images: [UIImage]!
    
        var frameNum = 0
    
        class func saveToLibrary(videoURL: NSURL) {
            PHPhotoLibrary.requestAuthorization { status in
                guard status == .Authorized else { return }
    
                PHPhotoLibrary.sharedPhotoLibrary().performChanges({
                    PHAssetChangeRequest.creationRequestForAssetFromVideoAtFileURL(videoURL)
                    }) { success, error in
                        if !success {
                            print("Could not save video to photo library:", error)
                        }
                }
            }
        }
    
        class func removeFileAtURL(fileURL: NSURL) {
            do {
                try NSFileManager.defaultManager().removeItemAtPath(fileURL.path!)
            }
            catch _ as NSError {
                // Assume file doesn't exist.
            }
        }
    
        init(renderSettings: RenderSettings) {
            settings = renderSettings
            videoWriter = VideoWriter(renderSettings: settings)
            images = loadImages()
        }
    
        func render(completion: ()->Void) {
    
            // The VideoWriter will fail if a file exists at the URL, so clear it out first.
            ImageAnimator.removeFileAtURL(settings.outputURL)
    
            videoWriter.start()
            videoWriter.render(appendPixelBuffers) {
                ImageAnimator.saveToLibrary(self.settings.outputURL)
                completion()
            }
    
        }
    
        // Replace this logic with your own.
        func loadImages() -> [UIImage] {
            var images = [UIImage]()
            for index in 1...10 {
                let filename = "\(index).jpg"
                images.append(UIImage(named: filename)!)
            }
            return images
        }
    
        // This is the callback function for VideoWriter.render()
        func appendPixelBuffers(writer: VideoWriter) -> Bool {
    
            let frameDuration = CMTimeMake(Int64(ImageAnimator.kTimescale / settings.fps), ImageAnimator.kTimescale)
    
            while !images.isEmpty {
    
                if writer.isReadyForData == false {
                    // Inform writer we have more buffers to write.
                    return false
                }
    
                let image = images.removeFirst()
                let presentationTime = CMTimeMultiply(frameDuration, Int32(frameNum))
                let success = videoWriter.addImage(image, withPresentationTime: presentationTime)
                if success == false {
                    fatalError("addImage() failed")
                }
    
                frameNum++
            }
    
            // Inform writer all buffers have been written.
            return true
        }
    
    }
    

    Part 3: The VideoWriter

    The VideoWriter class does all AVFoundation heavy lifting. It's mostly a wrapper around AVAssetWriter and AVAssetWriterInput. It also contains fancy code written by not me that knows how to translate an image into a CVPixelBuffer.

    class VideoWriter {
    
        let renderSettings: RenderSettings
    
        var videoWriter: AVAssetWriter!
        var videoWriterInput: AVAssetWriterInput!
        var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor!
    
        var isReadyForData: Bool {
            return videoWriterInput?.readyForMoreMediaData ?? false
        }
    
        class func pixelBufferFromImage(image: UIImage, pixelBufferPool: CVPixelBufferPool, size: CGSize) -> CVPixelBuffer {
    
            var pixelBufferOut: CVPixelBuffer?
    
            let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBufferOut)
            if status != kCVReturnSuccess {
                fatalError("CVPixelBufferPoolCreatePixelBuffer() failed")
            }
    
            let pixelBuffer = pixelBufferOut!
    
            CVPixelBufferLockBaseAddress(pixelBuffer, 0)
    
            let data = CVPixelBufferGetBaseAddress(pixelBuffer)
            let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
            let context = CGBitmapContextCreate(data, Int(size.width), Int(size.height),
                8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
    
            CGContextClearRect(context, CGRectMake(0, 0, size.width, size.height))
    
            let horizontalRatio = size.width / image.size.width
            let verticalRatio = size.height / image.size.height
            //aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
            let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
    
            let newSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio)
    
            let x = newSize.width < size.width ? (size.width - newSize.width) / 2 : 0
            let y = newSize.height < size.height ? (size.height - newSize.height) / 2 : 0
    
            CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), image.CGImage)
            CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
    
            return pixelBuffer
        }
    
        init(renderSettings: RenderSettings) {
            self.renderSettings = renderSettings
        }
    
        func start() {
    
            let avOutputSettings: [String: AnyObject] = [
                AVVideoCodecKey: renderSettings.avCodecKey,
                AVVideoWidthKey: NSNumber(float: Float(renderSettings.width)),
                AVVideoHeightKey: NSNumber(float: Float(renderSettings.height))
            ]
    
            func createPixelBufferAdaptor() {
                let sourcePixelBufferAttributesDictionary = [
                    kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB),
                    kCVPixelBufferWidthKey as String: NSNumber(float: Float(renderSettings.width)),
                    kCVPixelBufferHeightKey as String: NSNumber(float: Float(renderSettings.height))
                ]
                pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
                    sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
            }
    
            func createAssetWriter(outputURL: NSURL) -> AVAssetWriter {
                guard let assetWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeMPEG4) else {
                    fatalError("AVAssetWriter() failed")
                }
    
                guard assetWriter.canApplyOutputSettings(avOutputSettings, forMediaType: AVMediaTypeVideo) else {
                    fatalError("canApplyOutputSettings() failed")
                }
    
                return assetWriter
            }
    
            videoWriter = createAssetWriter(renderSettings.outputURL)
            videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: avOutputSettings)
    
            if videoWriter.canAddInput(videoWriterInput) {
                videoWriter.addInput(videoWriterInput)
            }
            else {
                fatalError("canAddInput() returned false")
            }
    
            // The pixel buffer adaptor must be created before we start writing.
            createPixelBufferAdaptor()
    
            if videoWriter.startWriting() == false {
                fatalError("startWriting() failed")
            }
    
            videoWriter.startSessionAtSourceTime(kCMTimeZero)
    
            precondition(pixelBufferAdaptor.pixelBufferPool != nil, "nil pixelBufferPool")
        }
    
        func render(appendPixelBuffers: (VideoWriter)->Bool, completion: ()->Void) {
    
            precondition(videoWriter != nil, "Call start() to initialze the writer")
    
            let queue = dispatch_queue_create("mediaInputQueue", nil)
            videoWriterInput.requestMediaDataWhenReadyOnQueue(queue) {
                let isFinished = appendPixelBuffers(self)
                if isFinished {
                    self.videoWriterInput.markAsFinished()
                    self.videoWriter.finishWritingWithCompletionHandler() {
                        dispatch_async(dispatch_get_main_queue()) {
                            completion()
                        }
                    }
                }
                else {
                    // Fall through. The closure will be called again when the writer is ready.
                }
            }
        }
    
        func addImage(image: UIImage, withPresentationTime presentationTime: CMTime) -> Bool {
    
            precondition(pixelBufferAdaptor != nil, "Call start() to initialze the writer")
    
            let pixelBuffer = VideoWriter.pixelBufferFromImage(image, pixelBufferPool: pixelBufferAdaptor.pixelBufferPool!, size: renderSettings.size)
            return pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
        }
    
    }
    

    Part 4: Make it happen

    Once everything is in place, these are your 3 magic lines:

    let settings = RenderSettings()
    let imageAnimator = ImageAnimator(renderSettings: settings)
    imageAnimator.render() {
        print("yes")
    }
    

提交回复
热议问题