问题
I am trying to re-sample the input audio 44.1 kHz to 48 kHz.
- using AudioToolbox's
AUAudioUnit.inputHandler
- writing out the input 44.1 kHZ to a wav file (this is working perfectly)
- converting the 44.1 kHz to 48 kHz and writing out this converted bytes to file. https://developer.apple.com/documentation/audiotoolbox/1503098-audioconverterfillcomplexbuffer
The problem is in the 3rd step. After writing out to a file the voice is very noisy. here is my code:
// convert to 48kHz
var audioConverterRef: AudioConverterRef?
CheckError(AudioConverterNew(&self.hardwareFormat,
&self.convertingFormat,
&audioConverterRef), "AudioConverterNew failed")
let outputBufferSize = inNumBytes
let outputBuffer = UnsafeMutablePointer<Int16>.allocate(capacity: MemoryLayout<Int16>.size * Int(outputBufferSize))
let convertedData = AudioBufferList.allocate(maximumBuffers: 1)
convertedData[0].mNumberChannels = self.hardwareFormat.mChannelsPerFrame
convertedData[0].mDataByteSize = outputBufferSize
convertedData[0].mData = UnsafeMutableRawPointer(outputBuffer)
var ioOutputDataPackets = UInt32(inNumPackets)
CheckError(AudioConverterFillComplexBuffer(audioConverterRef!,
self.coverterCallback,
&bufferList,
&ioOutputDataPackets,
convertedData.unsafeMutablePointer,
nil), "AudioConverterFillComplexBuffer error")
let convertedmData = convertedData[0].mData!
let convertedmDataByteSize = convertedData[0].mDataByteSize
// Write converted packets to file -> audio_unit_int16_48.wav
CheckError(AudioFileWritePackets(self.outputFile48000!,
false,
convertedmDataByteSize,
nil,
recordPacket,
&ioOutputDataPackets,
convertedmData), "AudioFileWritePackets error")
and the conversion callback body is here:
let buffers = UnsafeMutableBufferPointer<AudioBuffer>(start: &bufferList.mBuffers, count: Int(bufferList.mNumberBuffers))
let dataPtr = UnsafeMutableAudioBufferListPointer(ioData)
dataPtr[0].mNumberChannels = 1
dataPtr[0].mData = buffers[0].mData
dataPtr[0].mDataByteSize = buffers[0].mDataByteSize
ioDataPacketCount.pointee = buffers[0].mDataByteSize / UInt32(MemoryLayout<Int16>.size)
the sample project is here: https://drive.google.com/file/d/1GvCJ5hEqf7PsBANwUpVTRE1L7S_zQxnL/view?usp=sharing
回答1:
If part of your chain is still AVAudioEngine, there's sample code from Apple for offline processing of AVAudioFiles.
Here's a modified version that includes the sampleRate change:
import Cocoa
import AVFoundation
import PlaygroundSupport
let outputSampleRate = 48_000.0
let outputAudioFormat = AVAudioFormat(standardFormatWithSampleRate: outputSampleRate, channels: 2)!
// file needs to be in ~/Documents/Shared Playground Data
let localURL = playgroundSharedDataDirectory.appendingPathComponent("inputFile_44.aiff")
let outputURL = playgroundSharedDataDirectory.appendingPathComponent("outputFile_48.aiff")
let sourceFile: AVAudioFile
let format: AVAudioFormat
do {
sourceFile = try AVAudioFile(forReading: localURL)
format = sourceFile.processingFormat
} catch {
fatalError("Unable to load the source audio file: \(error.localizedDescription).")
}
let sourceSettings = sourceFile.fileFormat.settings
var outputSettings = sourceSettings
outputSettings[AVSampleRateKey] = outputSampleRate
let engine = AVAudioEngine()
let player = AVAudioPlayerNode()
engine.attach(player)
// Connect the nodes.
engine.connect(player, to: engine.mainMixerNode, format: format)
// Schedule the source file.
player.scheduleFile(sourceFile, at: nil)
do {
// The maximum number of frames the engine renders in any single render call.
let maxFrames: AVAudioFrameCount = 4096
try engine.enableManualRenderingMode(.offline, format: outputAudioFormat,
maximumFrameCount: maxFrames)
} catch {
fatalError("Enabling manual rendering mode failed: \(error).")
}
do {
try engine.start()
player.play()
} catch {
fatalError("Unable to start audio engine: \(error).")
}
let buffer = AVAudioPCMBuffer(pcmFormat: engine.manualRenderingFormat, frameCapacity: engine.manualRenderingMaximumFrameCount)!
var outputFile: AVAudioFile?
do {
outputFile = try AVAudioFile(forWriting: outputURL, settings: outputSettings)
} catch {
fatalError("Unable to open output audio file: \(error).")
}
let outputLengthD = Double(sourceFile.length) * outputSampleRate / sourceFile.fileFormat.sampleRate
let outputLength = Int64(ceil(outputLengthD)) // no sample left behind
while engine.manualRenderingSampleTime < outputLength {
do {
let frameCount = outputLength - engine.manualRenderingSampleTime
let framesToRender = min(AVAudioFrameCount(frameCount), buffer.frameCapacity)
let status = try engine.renderOffline(framesToRender, to: buffer)
switch status {
case .success:
// The data rendered successfully. Write it to the output file.
try outputFile?.write(from: buffer)
case .insufficientDataFromInputNode:
// Applicable only when using the input node as one of the sources.
break
case .cannotDoInCurrentContext:
// The engine couldn't render in the current render call.
// Retry in the next iteration.
break
case .error:
// An error occurred while rendering the audio.
fatalError("The manual rendering failed.")
}
} catch {
fatalError("The manual rendering failed: \(error).")
}
}
// Stop the player node and engine.
player.stop()
engine.stop()
outputFile = nil // AVAudioFile won't close until it goes out of scope, so we set output file back to nil here
来源:https://stackoverflow.com/questions/60711929/change-sample-rate-with-audioconverter