Record and play audio simultaneously in iOS

后端 未结 3 1197
有刺的猬
有刺的猬 2021-01-11 17:12

I am trying to play the recorded content simultaneously while recording. Currently I am using AVAudioRecorder for recording and AVAudioPlayer for p

相关标签:
3条回答
  • 2021-01-11 17:20

    This is achievable , Use these link and download it: https://code.google.com/p/ios-coreaudio-example/downloads/detail?name=Aruts.zip&can=2&q=

    This link will play sound from speaker but will not record it , I have implemented record functionality as well Below is full code description..

    IN .h File

    #import <Foundation/Foundation.h>
    #import <AudioToolbox/AudioToolbox.h>
    
    #ifndef max
    #define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
    #endif
    
    #ifndef min
    #define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
    #endif
    
    
    @interface IosAudioController : NSObject {
        AudioComponentInstance audioUnit;
        AudioBuffer tempBuffer; // this will hold the latest data from the microphone
        ExtAudioFileRef             mAudioFileRef;
    }
    @property (readonly)ExtAudioFileRef        mAudioFileRef;
    @property (readonly) AudioComponentInstance audioUnit;
    @property (readonly) AudioBuffer tempBuffer;
    
    - (void) start;
    - (void) stop;
    - (void) processAudio: (AudioBufferList*) bufferList;
    
    @end
    
    // setup a global iosAudio variable, accessible everywhere
    extern IosAudioController* iosAudio;
    

    IN .m

    #import "IosAudioController.h"
    #import <AudioToolbox/AudioToolbox.h>
    #import <AVFoundation/AVFoundation.h>
    #define kOutputBus 0
    #define kInputBus 1
    
    IosAudioController* iosAudio;
    
    void checkStatus(int status){
        if (status) {
            printf("Status not 0! %d\n", status);
    //      exit(1);
        }
    }
    
    
    
    
    static void printAudioUnitRenderActionFlags(AudioUnitRenderActionFlags * ioActionFlags)
    {
        if (*ioActionFlags == 0) {
    
            printf("AudioUnitRenderActionFlags(%lu) ", *ioActionFlags);
            return;
        }
        printf("AudioUnitRenderActionFlags(%lu): ", *ioActionFlags);
        if (*ioActionFlags & kAudioUnitRenderAction_PreRender)              printf("kAudioUnitRenderAction_PreRender ");
        if (*ioActionFlags & kAudioUnitRenderAction_PostRender)             printf("kAudioUnitRenderAction_PostRender ");
        if (*ioActionFlags & kAudioUnitRenderAction_OutputIsSilence)        printf("kAudioUnitRenderAction_OutputIsSilence ");
        if (*ioActionFlags & kAudioOfflineUnitRenderAction_Preflight)       printf("kAudioOfflineUnitRenderAction_Prefli ght ");
        if (*ioActionFlags & kAudioOfflineUnitRenderAction_Render)          printf("kAudioOfflineUnitRenderAction_Render");
        if (*ioActionFlags & kAudioOfflineUnitRenderAction_Complete)        printf("kAudioOfflineUnitRenderAction_Complete ");
        if (*ioActionFlags & kAudioUnitRenderAction_PostRenderError)        printf("kAudioUnitRenderAction_PostRenderError ");
        if (*ioActionFlags & kAudioUnitRenderAction_DoNotCheckRenderArgs)   printf("kAudioUnitRenderAction_DoNotCheckRenderArgs ");
    }
    
    
    /**
     This callback is called when new audio data from the microphone is
     available.
     */
    static OSStatus recordingCallback(void *inRefCon, 
                                      AudioUnitRenderActionFlags *ioActionFlags, 
                                      const AudioTimeStamp *inTimeStamp, 
                                      UInt32 inBusNumber, 
                                      UInt32 inNumberFrames, 
                                      AudioBufferList *ioData) {
    
        double timeInSeconds = inTimeStamp->mSampleTime / 44100.00;
    
         printf("\n%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);
    
        printAudioUnitRenderActionFlags(ioActionFlags);
    
        // Because of the way our audio format (setup below) is chosen:
        // we only need 1 buffer, since it is mono
        // Samples are 16 bits = 2 bytes.
        // 1 frame includes only 1 sample
    
        AudioBuffer buffer;
    
        buffer.mNumberChannels = 1;
        buffer.mDataByteSize = inNumberFrames * 2;
        buffer.mData = malloc( inNumberFrames * 2 );
    
        // Put buffer in a AudioBufferList
        AudioBufferList bufferList;
    
         SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
        memset (&samples, 0, sizeof (samples));
    
    
    
        bufferList.mNumberBuffers = 1;
        bufferList.mBuffers[0] = buffer;
    
        // Then:
        // Obtain recorded samples
    
        OSStatus status;
    
        status = AudioUnitRender([iosAudio audioUnit], 
                                 ioActionFlags, 
                                 inTimeStamp, 
                                 inBusNumber, 
                                 inNumberFrames, 
                                 &bufferList);
        checkStatus(status);
    
        // Now, we have the samples we just read sitting in buffers in bufferList
        // Process the new data
        [iosAudio processAudio:&bufferList];
    
    
        // Now, we have the samples we just read sitting in buffers in bufferList
          ExtAudioFileWriteAsync([iosAudio mAudioFileRef], inNumberFrames, &bufferList);
    
        // release the malloc'ed data in the buffer we created earlier
        free(bufferList.mBuffers[0].mData);
    
        return noErr;
    }
    
    
    
    
    /**
     This callback is called when the audioUnit needs new data to play through the
     speakers. If you don't have any, just don't write anything in the buffers
     */
    static OSStatus playbackCallback(void *inRefCon, 
                                     AudioUnitRenderActionFlags *ioActionFlags, 
                                     const AudioTimeStamp *inTimeStamp, 
                                     UInt32 inBusNumber, 
                                     UInt32 inNumberFrames, 
                                     AudioBufferList *ioData) {    
        // Notes: ioData contains buffers (may be more than one!)
        // Fill them up as much as you can. Remember to set the size value in each buffer to match how
        // much data is in the buffer.
    
        for (int i=0; i < ioData->mNumberBuffers; i++) { // in practice we will only ever have 1 buffer, since audio format is mono
            AudioBuffer buffer = ioData->mBuffers[i];
    
    //      NSLog(@"  Buffer %d has %d channels and wants %d bytes of data.", i, buffer.mNumberChannels, buffer.mDataByteSize);
    
            // copy temporary buffer data to output buffer
            UInt32 size = min(buffer.mDataByteSize, [iosAudio tempBuffer].mDataByteSize); // dont copy more data then we have, or then fits
            memcpy(buffer.mData, [iosAudio tempBuffer].mData, size);
            buffer.mDataByteSize = size; // indicate how much data we wrote in the buffer
    
            // uncomment to hear random noise
            /*
            UInt16 *frameBuffer = buffer.mData;
            for (int j = 0; j < inNumberFrames; j++) {
                frameBuffer[j] = rand();
            }
            */
    
        }
    
        return noErr;
    }
    
    @implementation IosAudioController
    
    @synthesize audioUnit, tempBuffer,mAudioFileRef;
    
    /**
     Initialize the audioUnit and allocate our own temporary buffer.
     The temporary buffer will hold the latest data coming in from the microphone,
     and will be copied to the output when this is requested.
     */
    - (id) init {
        self = [super init];
    
        OSStatus status;
    
        AVAudioSession *session = [AVAudioSession sharedInstance];
        NSLog(@"%f",session.preferredIOBufferDuration);
    
    
        // Describe audio component
        AudioComponentDescription desc;
        desc.componentType = kAudioUnitType_Output;
        desc.componentSubType = kAudioUnitSubType_RemoteIO;
        desc.componentFlags = 0;
        desc.componentFlagsMask = 0;
        desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    
        // Get component
        AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
    
        // Get audio units
        status = AudioComponentInstanceNew(inputComponent, &audioUnit);
        checkStatus(status);
    
        // Enable IO for recording
        UInt32 flag = 1;
        status = AudioUnitSetProperty(audioUnit, 
                                      kAudioOutputUnitProperty_EnableIO, 
                                      kAudioUnitScope_Input, 
                                      kInputBus,
                                      &flag, 
                                      sizeof(flag));
        checkStatus(status);
    
        // Enable IO for playback
        status = AudioUnitSetProperty(audioUnit, 
                                      kAudioOutputUnitProperty_EnableIO, 
                                      kAudioUnitScope_Output, 
                                      kOutputBus,
                                      &flag, 
                                      sizeof(flag));
        checkStatus(status);
    
        // Describe format
        AudioStreamBasicDescription audioFormat;
        audioFormat.mSampleRate         = 44100.00;
        audioFormat.mFormatID           = kAudioFormatLinearPCM;
        audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
        audioFormat.mFramesPerPacket    = 1;
        audioFormat.mChannelsPerFrame   = 1;
        audioFormat.mBitsPerChannel     = 16;
        audioFormat.mBytesPerPacket     = 2;
        audioFormat.mBytesPerFrame      = 2;
    
        // Apply format
        status = AudioUnitSetProperty(audioUnit, 
                                      kAudioUnitProperty_StreamFormat, 
                                      kAudioUnitScope_Output, 
                                      kInputBus, 
                                      &audioFormat, 
                                      sizeof(audioFormat));
        checkStatus(status);
        status = AudioUnitSetProperty(audioUnit, 
                                      kAudioUnitProperty_StreamFormat, 
                                      kAudioUnitScope_Input, 
                                      kOutputBus, 
                                      &audioFormat, 
                                      sizeof(audioFormat));
        checkStatus(status);
    
    
        // Set input callback
        AURenderCallbackStruct callbackStruct;
        callbackStruct.inputProc = recordingCallback;
        callbackStruct.inputProcRefCon = self;
        status = AudioUnitSetProperty(audioUnit, 
                                      kAudioOutputUnitProperty_SetInputCallback, 
                                      kAudioUnitScope_Global, 
                                      kInputBus, 
                                      &callbackStruct, 
                                      sizeof(callbackStruct));
        checkStatus(status);
    
        // Set output callback
        callbackStruct.inputProc = playbackCallback;
        callbackStruct.inputProcRefCon = self;
        status = AudioUnitSetProperty(audioUnit, 
                                      kAudioUnitProperty_SetRenderCallback, 
                                      kAudioUnitScope_Global, 
                                      kOutputBus,
                                      &callbackStruct, 
                                      sizeof(callbackStruct));
        checkStatus(status);
    
        // Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
        flag = 0;
        status = AudioUnitSetProperty(audioUnit, 
                                      kAudioUnitProperty_ShouldAllocateBuffer,
                                      kAudioUnitScope_Output, 
                                      kInputBus,
                                      &flag, 
                                      sizeof(flag));
    
        // set preferred buffer size
        Float32 audioBufferSize = (0.023220);
        UInt32 size = sizeof(audioBufferSize);
        status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
                                         size, &audioBufferSize);
    
        // Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
        // Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
        tempBuffer.mNumberChannels = 1;
        tempBuffer.mDataByteSize = 512 * 2;
        tempBuffer.mData = malloc( 512 * 2 );
    
    
    
    
    
         NSArray  *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
         NSString *documentsDirectory = [paths objectAtIndex:0];
        NSString *destinationFilePath = [[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory];
        NSLog(@">>> %@\n", destinationFilePath);
    
         CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, ( CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
    
        OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &mAudioFileRef);
        CFRelease(destinationURL);
    
        NSAssert(setupErr == noErr, @"Couldn't create file for writing");
    
    
        setupErr = ExtAudioFileSetProperty(mAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
        NSAssert(setupErr == noErr, @"Couldn't create file for format");
    
    
        setupErr =  ExtAudioFileWriteAsync(mAudioFileRef, 0, NULL);
        NSAssert(setupErr == noErr, @"Couldn't initialize write buffers for audio file");
    
        // Initialise
        status = AudioUnitInitialize(audioUnit);
        checkStatus(status);
    
      //   [NSTimer scheduledTimerWithTimeInterval:5 target:self selector:@selector(stopRecording:) userInfo:nil repeats:NO];
    
        return self;
    }
    
    /**
     Start the audioUnit. This means data will be provided from
     the microphone, and requested for feeding to the speakers, by
     use of the provided callbacks.
     */
    - (void) start {
        OSStatus status = AudioOutputUnitStart(audioUnit);
        checkStatus(status);
    }
    
    /**
     Stop the audioUnit
     */
    - (void) stop {
        OSStatus status = AudioOutputUnitStop(audioUnit);
        checkStatus(status);
        [self stopRecording:nil];
    }
    
    /**
     Change this function to decide what is done with incoming
     audio data from the microphone.
     Right now we copy it to our own temporary buffer.
     */
    - (void) processAudio: (AudioBufferList*) bufferList{
        AudioBuffer sourceBuffer = bufferList->mBuffers[0];
    
        // fix tempBuffer size if it's the wrong size
        if (tempBuffer.mDataByteSize != sourceBuffer.mDataByteSize) {
            free(tempBuffer.mData);
            tempBuffer.mDataByteSize = sourceBuffer.mDataByteSize;
            tempBuffer.mData = malloc(sourceBuffer.mDataByteSize);
        }
    
        // copy incoming audio data to temporary buffer
        memcpy(tempBuffer.mData, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
    }
    
    
    - (void)stopRecording:(NSTimer*)theTimer
    {
        printf("\nstopRecording\n");
        OSStatus status = ExtAudioFileDispose(mAudioFileRef);
        printf("OSStatus(ExtAudioFileDispose): %ld\n", status);
    }
    
    /**
     Clean up.
     */
    - (void) dealloc {
        [super  dealloc];
        AudioUnitUninitialize(audioUnit);
        free(tempBuffer.mData);
    }
    

    This Will definitely help you people..

    Another Best Way of Doing this is to download Audio Touch from https://github.com/tkzic/audiograph and see Echo function of this application it repeat voice as you speak , but it does not record audio so Add Recording function into it , AS mentioned below:

    IN MixerHostAudio.h
    
    @property (readwrite) ExtAudioFileRef   mRecordFile;
    -(void)Record;
    -(void)StopRecord;
    
    
    
    IN MixerHostAudio.m
    

    //ADD these two function in this class

    -(void)Record{
        NSString *completeFileNameAndPath = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject] stringByAppendingString:@"/Record.wav"];
        //create the url that the recording object needs to reference the file
        CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)[completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]] , strlen([completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]]), false);
       AudioStreamBasicDescription dstFormat, clientFormat;
        memset(&dstFormat, 0, sizeof(dstFormat));
        memset(&clientFormat, 0, sizeof(clientFormat));
    
        AudioFileTypeID fileTypeId = kAudioFileWAVEType;
            UInt32 size = sizeof(dstFormat);
        dstFormat.mFormatID = kAudioFormatLinearPCM;
    
        // setup the output file format
        dstFormat.mSampleRate = 44100.0; // set sample rate
    
        // create a 16-bit 44100kHz Stereo format
        dstFormat.mChannelsPerFrame = 2;
        dstFormat.mBitsPerChannel = 16;
        dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 4;
        dstFormat.mFramesPerPacket = 1;
        dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
    
        //get the client format directly from
        UInt32 asbdSize = sizeof (AudioStreamBasicDescription);
        AudioUnitGetProperty(mixerUnit,
                             kAudioUnitProperty_StreamFormat,
                             kAudioUnitScope_Input,
                             0, // input bus
                             &clientFormat,
                             &asbdSize);
    
         ExtAudioFileCreateWithURL(audioFileURL, fileTypeId, &dstFormat, NULL, kAudioFileFlags_EraseFile, &mRecordFile);
    
    
            printf("recording\n");
            ExtAudioFileSetProperty(mRecordFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
            //call this once as this will alloc space on the first call
            ExtAudioFileWriteAsync(mRecordFile, 0, NULL);
    
    
    }
    
    
    
    -(void)StopRecord{
        ExtAudioFileDispose(mRecordFile);
    }
    
    
    
    //In micLineInCallback function Add this line at last before  return noErr; :
    
      ExtAudioFileWriteAsync([THIS mRecordFile] , inNumberFrames, ioData);
    

    And call these function from MixerHostViewController.m in - (IBAction) playOrStop: (id) sender method

    0 讨论(0)
  • 2021-01-11 17:25

    You'll need to use AudioUnits if you want real-time monitoring of your audio input.

    • Apple's Audio Unit Hosting Guide
    • Tutorial on configuring the Remote I/O Audio Unit
    0 讨论(0)
  • 2021-01-11 17:25

    The RemoteIO Audio Unit can be used for simultaneous record and play. There are plenty of examples of recording using RemoteIO (aurioTouch) and playing using RemoteIO. Just enable both unit input and unit output, and handle both buffer callbacks. See an example here

    0 讨论(0)
提交回复
热议问题