I have successfully created some functions to take an SNSpeechSynthesizer.startSpeakingString(string, url)
and attach the resulting .aiff file to an SCNAu
SO after a lot of researching around to get any of the available AVAudioUnitEffec* effects into a SceneKit scene, I've finally got a solution, tested, tried and played around.
The following sub-class of AVAudioEngine will 1-Instantiate an AVAudioEngine with certain configurations 2-Added a few methods to encapsulate error handling and effects preset loading 3-Have a wire method to put every player and effect nodes into the audio engine graph 4-Create AVAudioPCMBuffer instances with configured frame count, file format as helper method to ease calling these functions from SceneKit
Note: Multi-Channel code was not included as I don't have a surround sound 5.1 system and am already very happy with the HRTF (Head Related Transfer Functions) algorithm exposed from the AVAudioEnvironmentNode class. Beware as this algorithm is the most computer intensive though it is a binaural format.
Possible additions: 1-Adding a reverb zone preset switcher which will require disconnecting the audio engine, rewiring the environment node to a new reverb preset (large hall, small room, etc) 2-Create a RayCast based echo transfer dimension from the SceneKit SCNNode list to add more realistic effects, IE: you are at the central bar of a T junction, an enemy is creaming on the left of the top bar of the junction, the sound traverses the RayCast leaving the enemy and bounces against a wall that is facing you. The AVAudioUnitDelay class has internal functions to change early delay to create the echo effect desired without washing the node with the same ffect wherever you are.
Code here:
import Foundation
import SceneKit
import AVFoundation
class AudioLayerEngine:AVAudioEngine{
var engine:AVAudioEngine!
var environment:AVAudioEnvironmentNode!
var outputBuffer:AVAudioPCMBuffer!
var voicePlayer:AVAudioPlayerNode!
var multiChannelEnabled:Bool!
//audio effects
let delay = AVAudioUnitDelay()
let distortion = AVAudioUnitDistortion()
let reverb = AVAudioUnitReverb()
override init(){
super.init()
engine = AVAudioEngine()
environment = AVAudioEnvironmentNode()
engine.attachNode(self.environment)
voicePlayer = AVAudioPlayerNode()
engine.attachNode(voicePlayer)
voicePlayer.volume = 1.0
outputBuffer = loadVoice()
wireEngine()
startEngine()
voicePlayer.scheduleBuffer(self.outputBuffer, completionHandler: nil)
voicePlayer.play()
}
func startEngine(){
do{
try engine.start()
}catch{
print("error loading engine")
}
}
func loadVoice()->AVAudioPCMBuffer{
let URL = NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource("art.scnassets/sounds/interface/test", ofType: "aiff")!)
do{
let soundFile = try AVAudioFile(forReading: URL, commonFormat: AVAudioCommonFormat.PCMFormatFloat32, interleaved: false)
outputBuffer = AVAudioPCMBuffer(PCMFormat: soundFile.processingFormat, frameCapacity: AVAudioFrameCount(soundFile.length))
do{
try soundFile.readIntoBuffer(outputBuffer)
}catch{
print("somethign went wrong with loading the buffer into the sound fiel")
}
print("returning buffer")
return outputBuffer
}catch{
}
return outputBuffer
}
func wireEngine(){
loadDistortionPreset(AVAudioUnitDistortionPreset.MultiCellphoneConcert)
engine.attachNode(distortion)
engine.attachNode(delay)
engine.connect(voicePlayer, to: distortion, format: self.outputBuffer.format)
engine.connect(distortion, to: delay, format: self.outputBuffer.format)
engine.connect(delay, to: environment, format: self.outputBuffer.format)
engine.connect(environment, to: engine.outputNode, format: constructOutputFormatForEnvironment())
}
func constructOutputFormatForEnvironment()->AVAudioFormat{
let outputChannelCount = self.engine.outputNode.outputFormatForBus(1).channelCount
let hardwareSampleRate = self.engine.outputNode.outputFormatForBus(1).sampleRate
let environmentOutputConnectionFormat = AVAudioFormat(standardFormatWithSampleRate: hardwareSampleRate, channels: outputChannelCount)
multiChannelEnabled = false
return environmentOutputConnectionFormat
}
func loadDistortionPreset(preset: AVAudioUnitDistortionPreset){
distortion.loadFactoryPreset(preset)
}
func createPlayer(node: SCNNode){
let player = AVAudioPlayerNode()
distortion.loadFactoryPreset(AVAudioUnitDistortionPreset.SpeechCosmicInterference)
engine.attachNode(player)
engine.attachNode(distortion)
engine.connect(player, to: distortion, format: outputBuffer.format)
engine.connect(distortion, to: environment, format: constructOutputFormatForEnvironment())
let algo = AVAudio3DMixingRenderingAlgorithm.HRTF
player.renderingAlgorithm = algo
player.reverbBlend = 0.3
player.renderingAlgorithm = AVAudio3DMixingRenderingAlgorithm.HRTF
}
}
e