Created
February 11, 2021 23:18
-
-
Save Philanatidae/1b917093c1514e56ad70cbcb38d1f49a to your computer and use it in GitHub Desktop.
Sine Wave Playback Example of Audio Queues
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #include <string.h> | |
| #include <math.h> | |
| #include <unistd.h> | |
| #include <assert.h> | |
| #include <stdlib.h> | |
| #import <CoreAudio/CoreAudio.h> | |
| #import <AudioToolbox/AudioToolbox.h> | |
| typedef struct soundState { | |
| float framesBuffered; | |
| float sampleRate; | |
| float volume; | |
| float leftFrequency; | |
| float rightFrequency; | |
| int bytesPerFrame; | |
| } SoundState; | |
| void streamCallback(void* userData, AudioQueueRef audioQueue, AudioQueueBufferRef buffer) { | |
| SoundState* soundState = (SoundState*)userData; | |
| // Retreive how many frames the buffer holds | |
| int bufferFrameCapacity = buffer->mAudioDataBytesCapacity / (*soundState).bytesPerFrame; | |
| // Grab a pointer to the sample buffer | |
| int16_t* sampleBuffer = (int16_t*)buffer->mAudioData; | |
| // Fill the entire buffer with a sine wave | |
| for(int i = 0; i < bufferFrameCapacity; i++) { | |
| // We can find how many frames have passed by using the sample rate | |
| float t = ((*soundState).framesBuffered / (*soundState).sampleRate); | |
| float x1 = 2 * 3.141592f * t * (*soundState).leftFrequency; | |
| float x2 = 2 * 3.141592f * t * (*soundState).rightFrequency; | |
| // Left channel | |
| *(sampleBuffer++) = (int16_t)(32767.0f * sin(x1)); | |
| // Right channel | |
| *(sampleBuffer++) = (int16_t)(32767.0f * sin(x2)); | |
| // Increase the number of frames that have been buffered | |
| (*soundState).framesBuffered += 1; | |
| } | |
| // Write how many frames we buffered | |
| buffer->mAudioDataByteSize = bufferFrameCapacity * (*soundState).bytesPerFrame; | |
| // Queue the buffer into the Audio Queue for playing | |
| AudioQueueEnqueueBuffer(audioQueue, buffer, 0, 0); | |
| } | |
| int main(int argc, const char * argv[]) { | |
| @autoreleasepool { | |
| AudioStreamBasicDescription streamDesc; | |
| streamDesc.mSampleRate = 48000.0f; // Choose 48 kHz | |
| streamDesc.mFormatID = kAudioFormatLinearPCM; | |
| streamDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; | |
| streamDesc.mBitsPerChannel = 16; // Choose 16-bit signed integers | |
| streamDesc.mChannelsPerFrame = 2; // Choose 2 channels per frame | |
| streamDesc.mFramesPerPacket = 1; // 1 frame per packet; this number will be different for different formats | |
| streamDesc.mBytesPerFrame = streamDesc.mBitsPerChannel / 8 * streamDesc.mChannelsPerFrame; | |
| streamDesc.mBytesPerPacket = streamDesc.mBytesPerFrame * streamDesc.mFramesPerPacket; | |
| SoundState state; | |
| state.framesBuffered = 0.0f; | |
| state.sampleRate = streamDesc.mSampleRate; | |
| state.volume = 0.1f; // Lower the volume; do not set this value too high while wearing headphones! | |
| state.leftFrequency = 150.0f; // 150 Hz for left channel | |
| state.rightFrequency = 250.0f; // 250 Hz for right channel | |
| state.bytesPerFrame = streamDesc.mBytesPerFrame; | |
| AudioQueueRef audioQueue = 0; | |
| OSStatus err = AudioQueueNewOutput(&streamDesc, | |
| &streamCallback, | |
| &state, // User data, as (void*) | |
| 0, 0, 0, // Used for compression formats, not needed for linear PCM | |
| &audioQueue); | |
| assert(!err); | |
| // Generate buffers holding at most 1/16th of a second of data | |
| int bufferSize = streamDesc.mBytesPerFrame * (streamDesc.mSampleRate / 16); | |
| AudioQueueBufferRef audioQueueBuffers[2]; | |
| err = AudioQueueAllocateBuffer(audioQueue, bufferSize, &(audioQueueBuffers[0])); | |
| assert(!err); | |
| err = AudioQueueAllocateBuffer(audioQueue, bufferSize, &(audioQueueBuffers[1])); | |
| assert(!err); | |
| // Prime the buffers. This will also queue the buffers | |
| // into the Audio Queue since we are calling the stream | |
| // callback. | |
| streamCallback(&state, audioQueue, audioQueueBuffers[0]); | |
| streamCallback(&state, audioQueue, audioQueueBuffers[1]); | |
| // Start the audio queue | |
| AudioQueueStart(audioQueue, 0); | |
| // This will keep the program alive long enough to | |
| // hear some sound. | |
| usleep(2000000); | |
| AudioQueueDispose(audioQueue, YES); // Destroy immediately | |
| } | |
| return EXIT_SUCCESS; | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment