I\'d like to play a synthesised sound in an iPhone. Instead of using a pre-recorded sound and using SystemSoundID to play an existing binary, I\'d like to synthesise it. Partial
Many of the audio technologies allow for data to be passed in instead of a sound file. AVAudioPlayer, for example, has:
-initWithData:error:
Initializes and returns an audio player for playing a designated memory buffer.
- (id)initWithData:(NSData *)data error:(NSError **)outError
However, I am not sure how you would pass in a data ptr, start the sound, and then keeping it looping by passing in other data ptrs, or repeating the same, etc.
Davide Vosti's link to http://lists.apple.com/archives/coreaudio-api/2008/Dec/msg00173.html no longer works, since Apple lists seem to be unresponsive. Here is Google's cache for completeness.
//
// AudioUnitTestAppDelegate.m
// AudioUnitTest
//
// Created by Marc Vaillant on 11/25/08.
// Copyright __MyCompanyName__ 2008. All rights reserved.
//
#import "AudioUnitTestAppDelegate.h"
#include <AudioUnit/AudioUnit.h>
//#include "MachTimer.hpp"
#include <vector>
#include <iostream>
using namespace std;
#define kOutputBus 0
#define kInputBus 1
#define SAMPLE_RATE 44100
vector<int> _pcm;
int _index;
@implementation AudioUnitTestAppDelegate
@synthesize window;
void generateTone(
vector<int>& pcm,
int freq,
double lengthMS,
int sampleRate,
double riseTimeMS,
double gain)
{
int numSamples = ((double) sampleRate) * lengthMS / 1000.;
int riseTimeSamples = ((double) sampleRate) * riseTimeMS / 1000.;
if(gain > 1.)
gain = 1.;
if(gain < 0.)
gain = 0.;
pcm.resize(numSamples);
for(int i = 0; i < numSamples; ++i)
{
double value = sin(2. * M_PI * freq * i / sampleRate);
if(i < riseTimeSamples)
value *= sin(i * M_PI / (2.0 * riseTimeSamples));
if(i > numSamples - riseTimeSamples - 1)
value *= sin(2. * M_PI * (i - (numSamples - riseTimeSamples) + riseTimeSamples)/ (4. * riseTimeSamples));
pcm[i] = (int) (value * 32500.0 * gain);
pcm[i] += (pcm[i]<<16);
}
}
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
cout<<"index = "<<_index<<endl;
cout<<"numBuffers = "<<ioData->mNumberBuffers<<endl;
int totalNumberOfSamples = _pcm.size();
for(UInt32 i = 0; i < ioData->mNumberBuffers; ++i)
{
int samplesLeft = totalNumberOfSamples - _index;
int numSamples = ioData->mBuffers[i].mDataByteSize / 4;
if(samplesLeft > 0)
{
if(samplesLeft < numSamples)
{
memcpy(ioData->mBuffers[i].mData, &_pcm[_index], samplesLeft * 4);
_index += samplesLeft;
memset((char*) ioData->mBuffers[i].mData + samplesLeft * 4, 0, (numSamples - samplesLeft) * 4) ;
}
else
{
memcpy(ioData->mBuffers[i].mData, &_pcm[_index], numSamples * 4) ;
_index += numSamples;
}
}
else
memset(ioData->mBuffers[i].mData, 0, ioData->mBuffers[i].mDataByteSize);
}
return noErr;
}
- (void)applicationDidFinishLaunching:(UIApplication *)application
{
//generate pcm tone freq = 800, duration = 1s, rise/fall time = 5ms
generateTone(_pcm, 800, 1000, SAMPLE_RATE, 5, 0.8);
_index = 0;
OSStatus status;
AudioComponentInstance audioUnit;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
//checkStatus(status);
UInt32 flag = 1;
// Enable IO for playback
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
//checkStatus(status);
// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = SAMPLE_RATE;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 2;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 4;
audioFormat.mBytesPerFrame = 4;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
// checkStatus(status);
// Set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
// Initialize
status = AudioUnitInitialize(audioUnit);
// Start playing
status = AudioOutputUnitStart(audioUnit);
[window makeKeyAndVisible];
}
- (void)dealloc {
[window release];
[super dealloc];
}
@end
What you want to do it probably to setup an AudioQueue. It allows you to fill a buffer with synthesized audio data in a callback. You would setup the AudeioQueue to run in a new thread as such:
#define BUFFER_SIZE 16384
#define BUFFER_COUNT 3
static AudioQueueRef audioQueue;
void SetupAudioQueue() {
OSStatus err = noErr;
// Setup the audio device.
AudioStreamBasicDescription deviceFormat;
deviceFormat.mSampleRate = 44100;
deviceFormat.mFormatID = kAudioFormatLinearPCM;
deviceFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
deviceFormat.mBytesPerPacket = 4;
deviceFormat.mFramesPerPacket = 1;
deviceFormat.mBytesPerFrame = 4;
deviceFormat.mChannelsPerFrame = 2;
deviceFormat.mBitsPerChannel = 16;
deviceFormat.mReserved = 0;
// Create a new output AudioQueue for the device.
err = AudioQueueNewOutput(&deviceFormat, AudioQueueCallback, NULL,
CFRunLoopGetCurrent(), kCFRunLoopCommonModes,
0, &audioQueue);
// Allocate buffers for the AudioQueue, and pre-fill them.
for (int i = 0; i < BUFFER_COUNT; ++i) {
AudioQueueBufferRef mBuffer;
err = AudioQueueAllocateBuffer(audioQueue, BUFFER_SIZE, mBuffer);
if (err != noErr) break;
AudioQueueCallback(NULL, audioQueue, mBuffer);
}
if (err == noErr) err = AudioQueueStart(audioQueue, NULL);
if (err == noErr) CFRunLoopRun();
}
You callback method AudioQueueCallback will then be called whenever the AudioQueue needs more data. Implement with something like:
void AudioQueueCallback(void* inUserData, AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer) {
void* pBuffer = inBuffer->mAudioData;
UInt32 bytes = inBuffer->mAudioDataBytesCapacity;
// Write max <bytes> bytes of audio to <pBuffer>
outBuffer->mAudioDataByteSize = actualNumberOfBytesWritten
err = AudioQueueEnqueueBuffer(audioQueue, inBuffer, 0, NULL);
}