问题
I am trying to add text overlay on video, When recording in iPhone 5s or lower devices in High quality and writing text on that then after 1 or 2 seconds audio goes missing, But this doesn't happen on larger devices like iPhone 6/6s. If i remove that text writer method then it works properly on all devices or if i reduce the video quality in 5s then also it works fine. How i can get video with audio in iPhone 5s with overlay text.
Here is my code
import Foundation
import AVFoundation
import AssetsLibrary
import UIKit
import CoreImage
class VideoWriter : NSObject{
var fileWriter: AVAssetWriter!
var videoInput: AVAssetWriterInput!
var audioInput: AVAssetWriterInput!
var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
var presentationTime = kCMTimeZero
var wod:WOD!
var watermark = Watermark()
var watermarkData: Dictionary<String, Any>?
init(fileUrl:URL!, height:Int, width:Int, channels:Int, samples:Float64) {
fileWriter = try? AVAssetWriter(outputURL: fileUrl, fileType: AVFileType.mp4)
let videoOutputSettings: [String: Any] = [
AVVideoCodecKey : AVVideoCodecH264 as AnyObject,
AVVideoWidthKey : width as AnyObject,
AVVideoHeightKey : height as AnyObject
];
videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
videoInput.expectsMediaDataInRealTime = true
fileWriter.add(videoInput)
let sourcePixeBufferAttributes :[String: Any] = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA, //kCVPixelFormatType_32BGRA,
kCVPixelBufferWidthKey as String: width,
kCVPixelBufferHeightKey as String: height
]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: sourcePixeBufferAttributes)
let audioOutputSettings: [String: Any] = [
AVFormatIDKey : Int(kAudioFormatMPEG4AAC) as AnyObject,
AVNumberOfChannelsKey : channels as AnyObject,
AVSampleRateKey : samples as AnyObject,
AVEncoderBitRateKey : 128000 as AnyObject
]
audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings)
audioInput.expectsMediaDataInRealTime = true
fileWriter.add(audioInput)
}
func write(_ sample: CMSampleBuffer, isVideo: Bool){
if CMSampleBufferDataIsReady(sample) {
if fileWriter.status == AVAssetWriterStatus.unknown {
print("Start writing, isVideo = \(isVideo), status = \(fileWriter.status.rawValue)")
let startTime = CMSampleBufferGetPresentationTimeStamp(sample)
fileWriter.startWriting()
fileWriter.startSession(atSourceTime: startTime)
}
if fileWriter.status == AVAssetWriterStatus.failed {
print("Error occured, isVideo = \(isVideo), status = \(fileWriter.status.rawValue), \(fileWriter.error!.localizedDescription)")
return
}
if isVideo {
if videoInput.isReadyForMoreMediaData {
let time = CMSampleBufferGetPresentationTimeStamp(sample)
let pixelBuffer = self.watermark.addWatermark(data: sample, values: self.watermarkData!)
self.assetWriterPixelBufferInput!.append(pixelBuffer, withPresentationTime: time)
}
}else{
if audioInput.isReadyForMoreMediaData {
audioInput.append(sample)
}
}
}
}
}
If instead of assetWriterPixelBufferInput
we use videoInput.append(sample)
this works but we lose text overlay.
Water mark (Text Overlay) Sample code --
class Watermark {
func addWatermark(data: CMSampleBuffer, values: Dictionary<String, Any>) -> CVPixelBuffer{
let pixelBuffer = CMSampleBufferGetImageBuffer(data)
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
......
self.writeImage(image: image,timerType: timerType, date: date, name: name, wod: wod, timer:timer, rounds:rounds, reps:reps, status: status, toBuffer: pixelBuffer!)
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer!
}
func writeImage(image overlayImage:UIImage, timerType:String, date:String, name:String, wod: String, timer:String, rounds:String, reps:String,status:String, toBuffer pixelBuffer:CVPixelBuffer){
let textImage = self.createTextImage(image: overlayImage, timerType: timerType, date: date, userName: name, myWod: wod, timer: timer, rounds:rounds, reps:reps, status: status, size: CGSize(width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer)))
let maskImage = CIImage(image: textImage)
let colorSpace = CGColorSpaceCreateDeviceRGB();
let options = [kCIImageColorSpace: colorSpace]
let inputImage = CIImage(cvImageBuffer: pixelBuffer, options: options)
let filter = CIFilter(name: "CISourceOverCompositing")
filter?.setValue(inputImage, forKey: kCIInputBackgroundImageKey)
filter?.setValue(maskImage, forKey: kCIInputImageKey)
let outputImage = filter?.outputImage
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Little.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedFirst.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
let context = CGContext(data: CVPixelBufferGetBaseAddress(pixelBuffer), width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: bitmapInfo)
if context != nil{
let ciContext = CIContext(cgContext: context!, options: nil)
ciContext.render(outputImage!, to: pixelBuffer, bounds: outputImage!.extent, colorSpace: CGColorSpaceCreateDeviceRGB())
}
}
来源:https://stackoverflow.com/questions/48338067/audio-missing-when-adding-text-on-pixelbuffer