コード例 #1
0
ファイル: iphone_audio.c プロジェクト: gameblabla/temper
int app_OpenSound()
{
  Float64 sampleRate = 44100.0;
  int i;
  UInt32 bufferBytes;
  Uint32 err;
    
  app_MuteSound();
  
  soundInit = 0;
  
  if(!config.enable_sound)
    return 0;

  in.mDataFormat.mSampleRate = sampleRate;
  in.mDataFormat.mFormatID = kAudioFormatLinearPCM;
  in.mDataFormat.mFormatFlags =
   kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
  in.mDataFormat.mBytesPerPacket = 4;
  in.mDataFormat.mFramesPerPacket = 2;
  in.mDataFormat.mBytesPerFrame = 2;
  in.mDataFormat.mChannelsPerFrame = 1;
  in.mDataFormat.mBitsPerChannel = 16;
    
  // Pre-buffer before we turn on audio
  err = AudioQueueNewOutput(&in.mDataFormat, AQBufferCallback,
   NULL, NULL, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode,
   0, &in.queue);
  
  bufferBytes = IPHONE_AUDIO_BUFFER_SIZE;
  
  for(i = 0; i < IPHONE_AUDIO_BUFFERS; i++)
  {
    err = AudioQueueAllocateBuffer(in.queue, bufferBytes, &in.mBuffers[i]);
    in.mBuffers[i]->mAudioDataByteSize = IPHONE_AUDIO_BUFFER_SIZE;
    // "Prime" by calling the callback once per buffer
    AudioQueueEnqueueBuffer(in.queue, in.mBuffers[i], 0, NULL);
  }
  
  soundInit = 1;
  err = AudioQueueStart(in.queue, NULL);
  
  return 0;
}
コード例 #2
0
u32 AudioPluginOSX::AudioThread(void * arg)
{
	AudioPluginOSX * plugin = static_cast<AudioPluginOSX *>(arg);

	AudioStreamBasicDescription format;

	format.mSampleRate       = kOutputFrequency;
	format.mFormatID         = kAudioFormatLinearPCM;
	format.mFormatFlags      = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
	format.mBitsPerChannel   = 8 * sizeof(s16);
	format.mChannelsPerFrame = kNumChannels;
	format.mBytesPerFrame    = sizeof(s16) * kNumChannels;
	format.mFramesPerPacket  = 1;
	format.mBytesPerPacket   = format.mBytesPerFrame * format.mFramesPerPacket;
	format.mReserved         = 0;

	AudioQueueRef			queue;
	AudioQueueBufferRef		buffers[kNumBuffers];
	AudioQueueNewOutput(&format, &AudioCallback, plugin, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &queue);

	for (u32 i = 0; i < kNumBuffers; ++i)
	{
		AudioQueueAllocateBuffer(queue, kAudioQueueBufferLength, &buffers[i]);

		buffers[i]->mAudioDataByteSize = kAudioQueueBufferLength;

		AudioCallback(plugin, queue, buffers[i]);
	}

	AudioQueueStart(queue, NULL);

	CFRunLoopRun();

	AudioQueueStop(queue, false);
	AudioQueueDispose(queue, false);

	for (u32 i = 0; i < kNumBuffers; ++i)
	{
		AudioQueueFreeBuffer(queue, buffers[i]);
		buffers[i] = NULL;
	}

	return 0;
}
コード例 #3
0
bool IPhoneSoundDevice::Init()
{
    // Initialize the default audio session object to tell it
    // to allow background music, and to tell us when audio
    // gets resumed (like if a phone call comes in, iphone takes
    // over audio. If the user then ignores the phone call, the
    // audio needs to be turned on again.

    AudioSessionInitialize(NULL, NULL, wi::InterruptionListener, this);
    UInt32 category = kAudioSessionCategory_UserInterfaceSoundEffects;
    AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
            sizeof(category), &category);
    AudioSessionSetActive(true);

    // Set up streaming

    AudioStreamBasicDescription desc;
    desc.mSampleRate = 8000;
    desc.mFormatID = kAudioFormatLinearPCM;
    desc.mFormatFlags = kAudioFormatFlagIsPacked;
    desc.mBytesPerPacket = 1;
    desc.mFramesPerPacket = 1;
    desc.mBytesPerFrame = 1;
    desc.mChannelsPerFrame = 1;
    desc.mBitsPerChannel = 8;

    OSStatus err = AudioQueueNewOutput(&desc, AudioCallback, this,
            NULL,
            kCFRunLoopCommonModes,
            0, &m_haq);
    if (err != 0) {
        return false;
    }

    for (int i = 0; i < kcBuffers; i++) {
        err = AudioQueueAllocateBuffer(m_haq, kcbBuffer, &m_apaqb[i]);
        if (err != 0) {
           return false;
        }
    }

    return true;
}
コード例 #4
0
ファイル: aqsnd.c プロジェクト: LaughingAngus/linphone-vs2008
void setupWrite(MSFilter * f)
{
	AQData *d = (AQData *) f->data;
	OSStatus err;

	int bufferIndex;

	for (bufferIndex = 0; bufferIndex < kNumberAudioOutDataBuffers;
		 ++bufferIndex) {

		err = AudioQueueAllocateBuffer(d->writeQueue,
									   d->writeBufferByteSize,
									   &d->writeBuffers[bufferIndex]
			);
		if (err != noErr) {
			ms_error("setupWrite:AudioQueueAllocateBuffer %d", err);
		}
	}
}
コード例 #5
0
ファイル: osx-audio.c プロジェクト: also/spotfm
void audio_init(audio_player_t *player) {
    int i;
    TAILQ_INIT(&player->af.q);
    player->af.qlen = 0;

    pthread_mutex_init(&player->af.mutex, NULL);
    pthread_cond_init(&player->af.cond, NULL);

	player->internal_state = malloc(sizeof(state_t));
	state_t *state = (state_t *) player->internal_state;
    bzero(state, sizeof(state_t));

    state->desc.mFormatID = kAudioFormatLinearPCM;
    state->desc.mFormatFlags = kAudioFormatFlagIsSignedInteger	| kAudioFormatFlagIsPacked;
    state->desc.mSampleRate = 44100;
    state->desc.mChannelsPerFrame = 2;
    state->desc.mFramesPerPacket = 1;
    state->desc.mBytesPerFrame = sizeof (short) * state->desc.mChannelsPerFrame;
    state->desc.mBytesPerPacket = state->desc.mBytesPerFrame;
    state->desc.mBitsPerChannel = (state->desc.mBytesPerFrame*8) / state->desc.mChannelsPerFrame;
    state->desc.mReserved = 0;

    state->buffer_size = state->desc.mBytesPerFrame * state->desc.mSampleRate;

    if (noErr != AudioQueueNewOutput(&state->desc, audio_callback, player, NULL, NULL, 0, &state->queue)) {
		fprintf(stderr, "audioqueue error\n");
		return;
    }

	AudioQueueAddPropertyListener(state->queue, kAudioQueueProperty_IsRunning, stopped_callback, player);

    // Start some empty playback so we'll get the callbacks that fill in the actual audio.
    for (i = 0; i < BUFFER_COUNT; ++i) {
		AudioQueueAllocateBuffer(state->queue, state->buffer_size, &state->buffers[i]);
		state->free_buffers[i] = state->buffers[i];
    }
	state->free_buffer_count = BUFFER_COUNT;
	state->playing = false;
	state->should_stop = false;
}
コード例 #6
0
void audio_init(audio_fifo_t *af)
{
    int i;
    TAILQ_INIT(&af->q);
    af->qlen = 0;

    pthread_mutex_init(&af->mutex, NULL);
    pthread_cond_init(&af->cond, NULL);

    bzero(&state, sizeof(state));

    state.desc.mFormatID = kAudioFormatLinearPCM;
    state.desc.mFormatFlags = kAudioFormatFlagIsSignedInteger	| kAudioFormatFlagIsPacked;
    state.desc.mSampleRate = 44100;
    state.desc.mChannelsPerFrame = 2;
    state.desc.mFramesPerPacket = 1;
    state.desc.mBytesPerFrame = sizeof(short) * state.desc.mChannelsPerFrame;
    state.desc.mBytesPerPacket = state.desc.mBytesPerFrame;
    state.desc.mBitsPerChannel = (state.desc.mBytesPerFrame*8)/state.desc.mChannelsPerFrame;
    state.desc.mReserved = 0;

    state.buffer_size = state.desc.mBytesPerFrame * kSampleCountPerBuffer;

    if (noErr != AudioQueueNewOutput(&state.desc, audio_callback, af, NULL, NULL, 0, &state.queue)) {
	printf("audioqueue error\n");
	return;
    }

    // Start some empty playback so we'll get the callbacks that fill in the actual audio.
    for (i = 0; i < BUFFER_COUNT; ++i) {
		AudioQueueAllocateBuffer(state.queue, state.buffer_size, &state.buffers[i]);
		state.buffers[i]->mAudioDataByteSize = state.buffer_size;
		AudioQueueEnqueueBuffer(state.queue, state.buffers[i], 0, NULL);
    }
    if (noErr != AudioQueueStart(state.queue, NULL)) puts("AudioQueueStart failed");
}
コード例 #7
0
void DoAQOfflineRender(CFURLRef sourceURL, CFURLRef destinationURL) 
{
    // main audio queue code
	try {
		AQTestInfo myInfo;
        
		myInfo.mDone = false;
		myInfo.mFlushed = false;
		myInfo.mCurrentPacket = 0;
		
        // get the source file
        XThrowIfError(AudioFileOpenURL(sourceURL, 0x01/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile), "AudioFileOpen failed");
			
		UInt32 size = sizeof(myInfo.mDataFormat);
		XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyDataFormat, &size, &myInfo.mDataFormat), "couldn't get file's data format");
		
		printf ("File format: "); myInfo.mDataFormat.Print();

        // create a new audio queue output
		XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat,      // The data format of the audio to play. For linear PCM, only interleaved formats are supported.
                                          AQTestBufferCallback,     // A callback function to use with the playback audio queue.
                                          &myInfo,                  // A custom data structure for use with the callback function.
                                          CFRunLoopGetCurrent(),    // The event loop on which the callback function pointed to by the inCallbackProc parameter is to be called.
                                                                    // If you specify NULL, the callback is invoked on one of the audio queue’s internal threads.
                                          kCFRunLoopCommonModes,    // The run loop mode in which to invoke the callback function specified in the inCallbackProc parameter. 
                                          0,                        // Reserved for future use. Must be 0.
                                          &myInfo.mQueue),          // On output, the newly created playback audio queue object.
                                          "AudioQueueNew failed");

		UInt32 bufferByteSize;
		
		// we need to calculate how many packets we read at a time and how big a buffer we need
		// we base this on the size of the packets in the file and an approximate duration for each buffer
		{
			bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
			
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			UInt32 maxPacketSize;
			size = sizeof(maxPacketSize);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
			
			// adjust buffer size to represent about a second of audio based on this format
			CalculateBytesForTime(myInfo.mDataFormat, maxPacketSize, 1.0/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
			
			if (isFormatVBR) {
				myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
			} else {
				myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
            }
				
			printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
		}

		// if the file has a magic cookie, we should get it and set it on the AQ
		size = sizeof(UInt32);
		OSStatus result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

		if (!result && size) {
			char* cookie = new char [size];		
			XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
			XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
			delete [] cookie;
		}

		// channel layout?
		OSStatus err = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, NULL);
		AudioChannelLayout *acl = NULL;
		if (err == noErr && size > 0) {
			acl = (AudioChannelLayout *)malloc(size);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &size, acl), "get audio file's channel layout");
			XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, acl, size), "set channel layout on queue");
		}

		//allocate the input read buffer
		XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffer), "AudioQueueAllocateBuffer");

		// prepare a canonical interleaved capture format
		CAStreamBasicDescription captureFormat;
		captureFormat.mSampleRate = myInfo.mDataFormat.mSampleRate;
		captureFormat.SetAUCanonical(myInfo.mDataFormat.mChannelsPerFrame, true); // interleaved
		XThrowIfError(AudioQueueSetOfflineRenderFormat(myInfo.mQueue, &captureFormat, acl), "set offline render format");			
		
		ExtAudioFileRef captureFile;
        
		// prepare a 16-bit int file format, sample channel count and sample rate
		CAStreamBasicDescription dstFormat;
		dstFormat.mSampleRate = myInfo.mDataFormat.mSampleRate;
		dstFormat.mChannelsPerFrame = myInfo.mDataFormat.mChannelsPerFrame;
		dstFormat.mFormatID = kAudioFormatLinearPCM;
		dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian
		dstFormat.mBitsPerChannel = 16;
		dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 2 * dstFormat.mChannelsPerFrame;
		dstFormat.mFramesPerPacket = 1;
		
		// create the capture file
        XThrowIfError(ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &dstFormat, acl, kAudioFileFlags_EraseFile, &captureFile), "ExtAudioFileCreateWithURL");
		
        // set the capture file's client format to be the canonical format from the queue
		XThrowIfError(ExtAudioFileSetProperty(captureFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &captureFormat), "set ExtAudioFile client format");
		
		// allocate the capture buffer, just keep it at half the size of the enqueue buffer
        // we don't ever want to pull any faster than we can push data in for render
        // this 2:1 ratio keeps the AQ Offline Render happy
		const UInt32 captureBufferByteSize = bufferByteSize / 2;
		
        AudioQueueBufferRef captureBuffer;
		AudioBufferList captureABL;
		
		XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, captureBufferByteSize, &captureBuffer), "AudioQueueAllocateBuffer");
		
        captureABL.mNumberBuffers = 1;
		captureABL.mBuffers[0].mData = captureBuffer->mAudioData;
		captureABL.mBuffers[0].mNumberChannels = captureFormat.mChannelsPerFrame;

		// lets start playing now - stop is called in the AQTestBufferCallback when there's
		// no more to read from the file
		XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed");

		AudioTimeStamp ts;
		ts.mFlags = kAudioTimeStampSampleTimeValid;
		ts.mSampleTime = 0;

		// we need to call this once asking for 0 frames
		XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, 0), "AudioQueueOfflineRender");

		// we need to enqueue a buffer after the queue has started
		AQTestBufferCallback(&myInfo, myInfo.mQueue, myInfo.mBuffer);

		while (true) {
			UInt32 reqFrames = captureBufferByteSize / captureFormat.mBytesPerFrame;
			
            XThrowIfError(AudioQueueOfflineRender(myInfo.mQueue, &ts, captureBuffer, reqFrames), "AudioQueueOfflineRender");
			
            captureABL.mBuffers[0].mData = captureBuffer->mAudioData;
			captureABL.mBuffers[0].mDataByteSize = captureBuffer->mAudioDataByteSize;
			UInt32 writeFrames = captureABL.mBuffers[0].mDataByteSize / captureFormat.mBytesPerFrame;
			
            printf("t = %.f: AudioQueueOfflineRender:  req %d fr/%d bytes, got %ld fr/%d bytes\n", ts.mSampleTime, (int)reqFrames, (int)captureBufferByteSize, writeFrames, (int)captureABL.mBuffers[0].mDataByteSize);
            
			XThrowIfError(ExtAudioFileWrite(captureFile, writeFrames, &captureABL), "ExtAudioFileWrite");
			
            if (myInfo.mFlushed) break;
			
			ts.mSampleTime += writeFrames;
		}

		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);

		XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed");
		XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed");
		XThrowIfError(ExtAudioFileDispose(captureFile), "ExtAudioFileDispose failed");

		if (myInfo.mPacketDescs) delete [] myInfo.mPacketDescs;
		if (acl) free(acl);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
    
    return;
}
コード例 #8
0
// ____________________________________________________________________________________
// main program
int	main(int argc, const char *argv[])
{
	const char *recordFileName = NULL;
	int i, nchannels, bufferByteSize;
	float seconds = 0;
	AudioStreamBasicDescription recordFormat;
	MyRecorder aqr;
	UInt32 size;
	CFURLRef url;
    OSStatus err = noErr;
	
	// fill structures with 0/NULL
	memset(&recordFormat, 0, sizeof(recordFormat));
	memset(&aqr, 0, sizeof(aqr));
	
	// parse arguments
	for (i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		
		if (arg[0] == '-') {
			switch (arg[1]) {
			case 'c':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%d", &nchannels) != 1)
					usage();
				recordFormat.mChannelsPerFrame = nchannels;
				break;
			case 'd':
				if (++i == argc) MissingArgument(arg);
				if (StrTo4CharCode(argv[i], &recordFormat.mFormatID) == 0)
					ParseError(arg, argv[i]);
				break;
			case 'r':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%lf", &recordFormat.mSampleRate) != 1)
					ParseError(arg, argv[i]);
				break;
			case 's':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%f", &seconds) != 1)
					ParseError(arg, argv[i]);
				break;
			case 'v':
				aqr.verbose = TRUE;
				break;
			default:
				fprintf(stderr, "unknown option: '%s'\n\n", arg);
				usage();
			}
		} else if (recordFileName != NULL) {
			fprintf(stderr, "may only specify one file to record\n\n");
			usage();
		} else
			recordFileName = arg;
	}
	if (recordFileName == NULL) // no record file path provided
		usage();
	
	// determine file format
	AudioFileTypeID audioFileType = kAudioFileCAFType;	// default to CAF
	CFStringRef cfRecordFileName = CFStringCreateWithCString(NULL, recordFileName, kCFStringEncodingUTF8);
	InferAudioFileFormatFromFilename(cfRecordFileName, &audioFileType);
	CFRelease(cfRecordFileName);

	// adapt record format to hardware and apply defaults
	if (recordFormat.mSampleRate == 0.)
		MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);

	if (recordFormat.mChannelsPerFrame == 0)
		recordFormat.mChannelsPerFrame = 2;
	
	if (recordFormat.mFormatID == 0 || recordFormat.mFormatID == kAudioFormatLinearPCM) {
		// default to PCM, 16 bit int
		recordFormat.mFormatID = kAudioFormatLinearPCM;
		recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
		recordFormat.mBitsPerChannel = 16;
		if (MyFileFormatRequiresBigEndian(audioFileType, recordFormat.mBitsPerChannel))
			recordFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
		recordFormat.mBytesPerPacket = recordFormat.mBytesPerFrame =
			(recordFormat.mBitsPerChannel / 8) * recordFormat.mChannelsPerFrame;
		recordFormat.mFramesPerPacket = 1;
		recordFormat.mReserved = 0;
	}

	try {
		// create the queue
		XThrowIfError(AudioQueueNewInput(
			&recordFormat,
			MyInputBufferHandler,
			&aqr /* userData */,
			NULL /* run loop */, NULL /* run loop mode */,
			0 /* flags */, &aqr.queue), "AudioQueueNewInput failed");

		// get the record format back from the queue's audio converter --
		// the file may require a more specific stream description than was necessary to create the encoder.
		size = sizeof(recordFormat);
		XThrowIfError(AudioQueueGetProperty(aqr.queue, kAudioConverterCurrentOutputStreamDescription,
			&recordFormat, &size), "couldn't get queue's format");

		// convert recordFileName from C string to CFURL
		url = CFURLCreateFromFileSystemRepresentation(NULL, (Byte *)recordFileName, strlen(recordFileName), FALSE);
		XThrowIfError(!url, "couldn't create record file");
        
		// create the audio file
        err = AudioFileCreateWithURL(url, audioFileType, &recordFormat, kAudioFileFlags_EraseFile,
                                              &aqr.recordFile);
        CFRelease(url); // release first, and then bail out on error
		XThrowIfError(err, "AudioFileCreateWithURL failed");
		

		// copy the cookie first to give the file object as much info as we can about the data going in
		MyCopyEncoderCookieToFile(aqr.queue, aqr.recordFile);

		// allocate and enqueue buffers
		bufferByteSize = MyComputeRecordBufferSize(&recordFormat, aqr.queue, 0.5);	// enough bytes for half a second
		for (i = 0; i < kNumberRecordBuffers; ++i) {
			AudioQueueBufferRef buffer;
			XThrowIfError(AudioQueueAllocateBuffer(aqr.queue, bufferByteSize, &buffer),
				"AudioQueueAllocateBuffer failed");
			XThrowIfError(AudioQueueEnqueueBuffer(aqr.queue, buffer, 0, NULL),
				"AudioQueueEnqueueBuffer failed");
		}
		
		// record
		if (seconds > 0) {
			// user requested a fixed-length recording (specified a duration with -s)
			// to time the recording more accurately, watch the queue's IsRunning property
			XThrowIfError(AudioQueueAddPropertyListener(aqr.queue, kAudioQueueProperty_IsRunning,
				MyPropertyListener, &aqr), "AudioQueueAddPropertyListener failed");
			
			// start the queue
			aqr.running = TRUE;
			XThrowIfError(AudioQueueStart(aqr.queue, NULL), "AudioQueueStart failed");
			CFAbsoluteTime waitUntil = CFAbsoluteTimeGetCurrent() + 10;

			// wait for the started notification
			while (aqr.queueStartStopTime == 0.) {
				CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.010, FALSE);
				if (CFAbsoluteTimeGetCurrent() >= waitUntil) {
					fprintf(stderr, "Timeout waiting for the queue's IsRunning notification\n");
					goto cleanup;
				}
			}
			printf("Recording...\n");
			CFAbsoluteTime stopTime = aqr.queueStartStopTime + seconds;
			CFAbsoluteTime now = CFAbsoluteTimeGetCurrent();
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, stopTime - now, FALSE);
		} else {
			// start the queue
			aqr.running = TRUE;
			XThrowIfError(AudioQueueStart(aqr.queue, NULL), "AudioQueueStart failed");
			
			// and wait
			printf("Recording, press <return> to stop:\n");
			getchar();
		}

		// end recording
		printf("* recording done *\n");
		
		aqr.running = FALSE;
		XThrowIfError(AudioQueueStop(aqr.queue, TRUE), "AudioQueueStop failed");
		
		// a codec may update its cookie at the end of an encoding session, so reapply it to the file now
		MyCopyEncoderCookieToFile(aqr.queue, aqr.recordFile);
		
cleanup:
		AudioQueueDispose(aqr.queue, TRUE);
		AudioFileClose(aqr.recordFile);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "MyInputBufferHandler: %s (%s)\n", e.mOperation, e.FormatError(buf));
		return e.mError;
	}
		
	return 0;
}
コード例 #9
0
ファイル: darwin_cahal_device.c プロジェクト: bcarr092/CAHAL
OSStatus
darwin_configure_output_audio_queue_buffer  (
                                    AudioStreamBasicDescription*  in_asbd,
                                    cahal_playback_info*          in_playback,
                                    darwin_context*               out_context,
                                    AudioQueueRef                 io_audio_queue
                                          )
{
  UINT32 bytes_per_buffer = 0;
  OSStatus result         = noErr;
  
  if( NULL != in_asbd )
  {
    out_context->number_of_buffers = CAHAL_QUEUE_NUMBER_OF_QUEUES;
    
    if  (
         CPC_ERROR_CODE_NO_ERROR
         == cpc_safe_malloc (
                 ( void** ) &out_context->audio_buffers,
                 sizeof( AudioQueueBufferRef ) * out_context->number_of_buffers
                             )
         )
    {
      result =
      darwin_compute_bytes_per_buffer  (
                                        in_asbd,
                                        CAHAL_QUEUE_BUFFER_DURATION,
                                        &bytes_per_buffer
                                        );
      
      if( noErr == result )
      {
        for( UINT32 i = 0; i < CAHAL_QUEUE_NUMBER_OF_QUEUES; i++ )
        {
          AudioQueueBufferRef buffer;
          
          result =
          AudioQueueAllocateBuffer  (
                                     io_audio_queue,
                                     bytes_per_buffer,
                                     &buffer
                                     );
          
          if( noErr == result )
          {
            out_context->audio_buffers[ i ] = buffer;
            
            darwin_playback_callback( in_playback, io_audio_queue, buffer );
          }
          else
          {
            CPC_ERROR(
                      "Error allocating a new audio queue buffer: %d",
                      result
                      );
            
            CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
          }
        }
      }
    }
  }
  else
  {
    CPC_LOG_STRING( CPC_LOG_LEVEL_ERROR, "Invalid basic stream description" );
  }
  
  return( result );
}
コード例 #10
0
ファイル: recorder.c プロジェクト: AmesianX/baresip
int coreaudio_recorder_alloc(struct ausrc_st **stp, const struct ausrc *as,
			     struct media_ctx **ctx,
			     struct ausrc_prm *prm, const char *device,
			     ausrc_read_h *rh, ausrc_error_h *errh, void *arg)
{
	AudioStreamBasicDescription fmt;
	struct ausrc_st *st;
	uint32_t sampc, bytc, i;
	OSStatus status;
	int err;

	(void)ctx;
	(void)device;
	(void)errh;

	if (!stp || !as || !prm)
		return EINVAL;

	st = mem_zalloc(sizeof(*st), ausrc_destructor);
	if (!st)
		return ENOMEM;

	st->ptime = prm->ptime;
	st->as  = as;
	st->rh  = rh;
	st->arg = arg;

	sampc = prm->srate * prm->ch * prm->ptime / 1000;
	bytc  = sampc * 2;

	err = pthread_mutex_init(&st->mutex, NULL);
	if (err)
		goto out;

	err = audio_session_enable();
	if (err)
		goto out;

	fmt.mSampleRate       = (Float64)prm->srate;
	fmt.mFormatID         = kAudioFormatLinearPCM;
	fmt.mFormatFlags      = kLinearPCMFormatFlagIsSignedInteger |
		                kAudioFormatFlagIsPacked;
#ifdef __BIG_ENDIAN__
	fmt.mFormatFlags     |= kAudioFormatFlagIsBigEndian;
#endif

	fmt.mFramesPerPacket  = 1;
	fmt.mBytesPerFrame    = prm->ch * 2;
	fmt.mBytesPerPacket   = prm->ch * 2;
	fmt.mChannelsPerFrame = prm->ch;
	fmt.mBitsPerChannel   = 16;

	status = AudioQueueNewInput(&fmt, record_handler, st, NULL,
				     kCFRunLoopCommonModes, 0, &st->queue);
	if (status) {
		warning("coreaudio: AudioQueueNewInput error: %i\n", status);
		err = ENODEV;
		goto out;
	}

	for (i=0; i<ARRAY_SIZE(st->buf); i++)  {

		status = AudioQueueAllocateBuffer(st->queue, bytc,
						  &st->buf[i]);
		if (status)  {
			err = ENOMEM;
			goto out;
		}

		AudioQueueEnqueueBuffer(st->queue, st->buf[i], 0, NULL);
	}

	status = AudioQueueStart(st->queue, NULL);
	if (status)  {
		warning("coreaudio: AudioQueueStart error %i\n", status);
		err = ENODEV;
		goto out;
	}

 out:
	if (err)
		mem_deref(st);
	else
		*stp = st;

	return err;
}
コード例 #11
0
bool UBAudioQueueRecorder::init(const QString& waveInDeviceName)
{
    if(mIsRecording)
    {
        setLastErrorMessage("Already recording ...");
        return false;
    }

    OSStatus err = AudioQueueNewInput (&sAudioFormat, UBAudioQueueRecorder::audioQueueInputCallback,
                    this, 0, 0, 0, &mQueue);

    if (err)
    {
        setLastErrorMessage(QString("Cannot acquire audio input %1").arg(err));
        mQueue = 0;
        close();
        return false;
    }

    //qDebug() << "init with waveInDeviceName ..." << waveInDeviceName;

    if (waveInDeviceName.length() > 0 && waveInDeviceName != "Default")
    {
        AudioDeviceID deviceID = deviceIDFromDeviceName(waveInDeviceName);

        if (deviceID)
        {
            QString deviceUID = deviceUIDFromDeviceID(deviceID);
            if (deviceUID.length() > 0)
            {
                CFStringRef sr = CFStringCreateWithCString(0, deviceUID.toUtf8().constData(), kCFStringEncodingUTF8);

                err = AudioQueueSetProperty(mQueue, kAudioQueueProperty_CurrentDevice, &sr, sizeof(CFStringRef));
                if (err)
                {
                    setLastErrorMessage(QString("Cannot set audio input %1 (%2)").arg(waveInDeviceName).arg(err));
                }
                else
                {
                    qDebug() << "recording with input" << waveInDeviceName;
                }
            }
            else
            {
                setLastErrorMessage(QString("Cannot find audio input device UID with ID %1 (%2)").arg(deviceID).arg(err));
            }
        }
        else
        {
            setLastErrorMessage(QString("Cannot find audio input with name %1 (%2)").arg(waveInDeviceName).arg(err));
        }
    }

    UInt32 monitor = true;

    err = AudioQueueSetProperty(mQueue, kAudioQueueProperty_EnableLevelMetering , &monitor, sizeof(UInt32));
    if (err)
    {
        qWarning() << QString("Cannot set recording level monitoring %1").arg(err);
    }

    int nbBuffers = 6;
    mSampleBufferSize = sAudioFormat.mSampleRate *  sAudioFormat.mChannelsPerFrame
                * 2 * mBufferLengthInMs / 1000; // 44.1 Khz * stereo * 16bit * buffer length

    for (int i = 0; i < nbBuffers; i++)
    {
        AudioQueueBufferRef outBuffer;
        err = AudioQueueAllocateBuffer(mQueue, mSampleBufferSize, &outBuffer);

        if (err)
        {
            setLastErrorMessage(QString("Cannot allocate audio buffer %1").arg(err));
            close();
            return false;
        }

        mBuffers << outBuffer;
    }

    foreach(AudioQueueBufferRef buffer, mBuffers)
    {
        err = AudioQueueEnqueueBuffer(mQueue, buffer, 0, 0);
        if (err)
        {
            setLastErrorMessage(QString("Cannot enqueue audio buffer %1").arg(err));
            close();
            return false;
        }
    }
コード例 #12
0
/*auNew---------------------------------------------------*/
Audio* auAlloc(int sizeofstarself, auAudioCallback_t callback, BOOL isOutput, unsigned numChannels)
{
  Audio* self = (Audio*)calloc(1, sizeofstarself);

  if(self != NULL)
    {
      self->isOutput                     = isOutput;
      self->isPlaying                    = NO;
      self->audioCallback                = callback;
      self->numChannels                  = numChannels;
      self->targetMasterVolume           = 1;
      auSetMasterVolumeSmoothing(self, 0.9999);
      
#if defined __APPLE__
      int i;
      OSStatus error;
      self->dataFormat.mSampleRate       = AU_SAMPLE_RATE;
      self->dataFormat.mBytesPerPacket   = 4 * numChannels  ;
      self->dataFormat.mFramesPerPacket  = 1             ;
      self->dataFormat.mBytesPerFrame    = 4 * numChannels  ;
      self->dataFormat.mBitsPerChannel   = 32            ;
      self->dataFormat.mChannelsPerFrame = numChannels      ;
      self->dataFormat.mFormatID         = kAudioFormatLinearPCM;
      //self->dataFormat.mFormatFlags    = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
      self->dataFormat.mFormatFlags      = kLinearPCMFormatFlagIsFloat;
      
      if(isOutput)
        error = AudioQueueNewOutput(&(self->dataFormat), auAudioOutputCallback, self, NULL, NULL, 0, &(self->queue));
      else
        error = AudioQueueNewInput (&(self->dataFormat), auAudioInputCallback, self, NULL, NULL, 0, &(self->queue));
      if(error) 
        {
          fprintf(stderr, "Audio.c: unable to allocate Audio queue\n"); 
          return auDestroy(self);
        }
      else //(!error)
        {
          unsigned bufferNumBytes = AU_BUFFER_NUM_FRAMES * numChannels * sizeof(auSample_t);
          for(i=0; i<AU_NUM_AUDIO_BUFFERS; i++)
            {
              error = AudioQueueAllocateBuffer(self->queue, bufferNumBytes, &((self->buffers)[i]));
              if(error) 
                {
                  self = auDestroy(self);
                  fprintf(stderr, "Audio.c: allocate buffer error\n");
                  break;
                }
            }
        }
        
#elif defined __linux__
      int error = 0;

      snd_pcm_hw_params_t  *hardwareParameters;
      snd_pcm_hw_params_alloca(&hardwareParameters);

      //untested for input stream...
      const char* name = (isOutput) ? AU_SPEAKER_DEVICE_NAME : AU_MIC_DEVICE_NAME;
      unsigned direction = (isOutput) ? SND_PCM_STREAM_PLAYBACK : SND_PCM_STREAM_CAPTURE;
      
      error = snd_pcm_open(&(self->device), AU_SPEAKER_DEVICE_NAME, SND_PCM_STREAM_PLAYBACK, 0);
      if(error < 0) fprintf(stderr, "Audio.c: Unable to open speaker device %s: %s\n", AU_SPEAKER_DEVICE_NAME, snd_strerror(error));
            
      if(error >= 0)
        {
          error = snd_pcm_hw_params_any(self->device, hardwareParameters);
            if(error < 0) fprintf(stderr, "Audio.c: Unable to get a generic hardware configuration: %s\n", snd_strerror(error));
        }
      if(error >= 0)
        {
          error = snd_pcm_hw_params_set_access(self->device, hardwareParameters, SND_PCM_ACCESS_RW_INTERLEAVED);
          if(error < 0) fprintf(stderr, "Audio.c: Device does not support interleaved audio access: %s\n", snd_strerror(error));
        }
      if(error >= 0)
        {
          error = snd_pcm_hw_params_set_format(self->device, hardwareParameters, /*SND_PCM_FORMAT_S16*/ SND_PCM_FORMAT_FLOAT) ;
          if(error < 0) fprintf(stderr, "Audio.c: Unable to set sample format: %s\n", snd_strerror(error));
        }
      if(error >= 0)
        {
          //self->numChannels = AU_NUM_CHANNELS;
          error = snd_pcm_hw_params_set_channels_near(self->device, hardwareParameters, &self->numChannels);
          if(error < 0) fprintf(stderr, "Audio.c: unable to set the number of channels to %i: %s\n", self->numChannels, snd_strerror(error));
          else if(self->numChannels != numChannels)
            fprintf(stderr, "Audio.c: device does not support %u numChannels, %i will be used instead\n", numChannels, self->numChannels);  
        }
      if(error >= 0)
        {
          unsigned int sampleRate = AU_SAMPLE_RATE;
          error = snd_pcm_hw_params_set_rate_near(self->device, hardwareParameters, &sampleRate, 0);
          if(error < 0) fprintf(stderr, "Audio.c: Unable to set the sample rate to %u: %s\n", sampleRate, snd_strerror(error));
          else if(sampleRate != AU_SAMPLE_RATE)
            fprintf(stderr, "Audio.c: device does not support %i sample rate, %u will be used instead\n", (int)AU_SAMPLE_RATE, sampleRate);
        }
      if(error >= 0)
        {
          int dir = 0;
          self->bufferNumFrames = AU_BUFFER_NUM_FRAMES; //the buffer I give to ALSA
          error = snd_pcm_hw_params_set_period_size_near(self->device, hardwareParameters, &(self->bufferNumFrames), &dir);
          if(error < 0) fprintf(stderr, "Audio.cpp: Unable to set the sample buffer size to %lu: %s\n", self->bufferNumFrames, snd_strerror(error));
          else if(self->bufferNumFrames != AU_BUFFER_NUM_FRAMES)
            fprintf(stderr, "Audio.c: device does not support %i period size, %lu will be used instead\n", AU_BUFFER_NUM_FRAMES, self->bufferNumFrames);
        }
      if(error >= 0)
        {
          unsigned long int internalBufferNumFrames = self->bufferNumFrames * AU_NUM_AUDIO_BUFFERS; //the buffer ALSA uses internally
          error = snd_pcm_hw_params_set_buffer_size_near(self->device, hardwareParameters, &internalBufferNumFrames);
          if(error < 0) fprintf(stderr, "Audio.c: Unable to set the internal buffer size to %lu: %s\n", internalBufferNumFrames, snd_strerror(error));
          else if(internalBufferNumFrames != AU_NUM_AUDIO_BUFFERS * self->bufferNumFrames)
              fprintf(stderr, "Audio.c: device does not support %lu internal buffer size, %lu will be used instead\n", AU_NUM_AUDIO_BUFFERS * self->bufferNumFrames, internalBufferNumFrames);
        }
      if(error >= 0)
        {
          error = snd_pcm_hw_params(self->device, hardwareParameters);
          if(error < 0) fprintf(stderr, "Audio.c: Unable to load the hardware parameters into the device: %s\n", snd_strerror(error));
        }
      if(error >= 0)
       {
         unsigned int size = sizeof(auSample_t) * self->numChannels * self->bufferNumFrames;
         self->sampleBuffer = (auSample_t*)malloc(size);
         if(self->sampleBuffer == NULL)
           {
              error = -1;
              perror("Audio.c: Unable to allocate audio buffers \n");
           }
       }
      if (error < 0) self = auDestroy(self);
#endif
    }
  else perror("Audio.c: Unable to create new Audio object");
  
  srandom((unsigned)time(NULL));

  return self;
}
コード例 #13
0
int WavPlayer::PlayBuffer(void *pcmbuffer, unsigned long len)
{
    AQCallbackStruct aqc;
    UInt32 err, bufferSize;
    int i;

    aqc.DataFormat.mSampleRate = SampleRate;
    aqc.DataFormat.mFormatID = kAudioFormatLinearPCM;
    if(BitsPerSample == 16)
    {
        aqc.DataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
			| kAudioFormatFlagIsPacked;
    }
    aqc.DataFormat.mBytesPerPacket = NumChannels * (BitsPerSample / 8);
    aqc.DataFormat.mFramesPerPacket = 1;
    aqc.DataFormat.mBytesPerFrame = NumChannels * (BitsPerSample / 8);
    aqc.DataFormat.mChannelsPerFrame = NumChannels;
    aqc.DataFormat.mBitsPerChannel = BitsPerSample;
    aqc.FrameCount = SampleRate / 60;
    aqc.SampleLen = (UInt32)(len);
    aqc.PlayPtr = 0;
    aqc.PCMBuffer = static_cast<unsigned char*>(pcmbuffer);

    err = AudioQueueNewOutput(&aqc.DataFormat,
                              aqBufferCallback,
                              &aqc,
                              NULL,
                              kCFRunLoopCommonModes,
                              0,
                              &aqc.Queue);
    if(err)
    {
        return err;
    }

    aqc.FrameCount = SampleRate / 60;
    bufferSize = aqc.FrameCount * aqc.DataFormat.mBytesPerPacket;

    for(i = 0; i < AUDIO_BUFFERS; i++)
    {
        err = AudioQueueAllocateBuffer(aqc.Queue, bufferSize,
                                       &aqc.Buffers[i]);
        if(err)
        {
            return err;
        }
        aqBufferCallback(&aqc, aqc.Queue, aqc.Buffers[i]);
    }

    err = AudioQueueStart(aqc.Queue, NULL);
    if(err)
    {
        return err;
    }

    while(true)
    {
    }
    sleep(1);
    return 0;
}
コード例 #14
0
ファイル: minimal.cpp プロジェクト: Dipri/imame4all
int sound_open_AudioQueue(int rate, int bits, int stereo){

    Float64 sampleRate = 44100.0;
    int i;
    int fps;
    
    stereo_cached = stereo;
    rate_cached = rate;
    bits_cached = bits;

    if(rate==32000)
    	sampleRate = 32000.0;
    else if(rate==22050)
    	sampleRate = 22050.0;
    else if(rate==11025)
    	sampleRate = 11025.0;

	//SQ Roundup for games like Galaxians
    fps = ceil(Machine->drv->frames_per_second);

    if( soundInit == 1 )
    {
    	sound_close_AudioQueue();
    }

    soundInit = 0;
    memset (&in.mDataFormat, 0, sizeof (in.mDataFormat));
    in.mDataFormat.mSampleRate = sampleRate;
    in.mDataFormat.mFormatID = kAudioFormatLinearPCM;
    in.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger  | kAudioFormatFlagIsPacked;
    in.mDataFormat.mBytesPerPacket =  (stereo == 1 ? 4 : 2 );
    in.mDataFormat.mFramesPerPacket = 1;
    in.mDataFormat.mBytesPerFrame = (stereo ==  1? 4 : 2);
    in.mDataFormat.mChannelsPerFrame = (stereo == 1 ? 2 : 1);
    in.mDataFormat.mBitsPerChannel = 16;
	in.frameCount = rate / fps;
/*
    printf("Sound format %f %d %d %d %d %d %d\n",
            in.mDataFormat.mSampleRate,
    		in.mDataFormat.mBytesPerPacket,
    		in.mDataFormat.mFramesPerPacket,
    		in.mDataFormat.mBytesPerFrame,
    		in.mDataFormat.mChannelsPerFrame,
    		in.mDataFormat.mBitsPerChannel,
            in.frameCount);
*/
    /* Pre-buffer before we turn on audio */
    UInt32 err;
    err = AudioQueueNewOutput(&in.mDataFormat,
							  AQBufferCallback,
							  NULL,
							  NULL,
							  kCFRunLoopCommonModes,
							  0,
							  &in.queue);

    //printf("res %ld",err);

    unsigned long bufferSize;
    bufferSize = in.frameCount * in.mDataFormat.mBytesPerFrame;
    
	for (i=0; i<AUDIO_BUFFERS; i++)
	{
		err = AudioQueueAllocateBuffer(in.queue, bufferSize, &in.mBuffers[i]);
		in.mBuffers[i]->mAudioDataByteSize = bufferSize;
		AudioQueueEnqueueBuffer(in.queue, in.mBuffers[i], 0, NULL);
	}

	soundInit = 1;
	err = AudioQueueStart(in.queue, NULL);

	return 0;

}
コード例 #15
0
ファイル: player.c プロジェクト: FihlaTV/BareSip-Android
int coreaudio_player_alloc(struct auplay_st **stp, struct auplay *ap,
			   struct auplay_prm *prm, const char *device,
			   auplay_write_h *wh, void *arg)
{
	AudioStreamBasicDescription fmt;
	struct auplay_st *st;
	uint32_t bytc, i;
	OSStatus status;
	int err;

	(void)device;

	st = mem_zalloc(sizeof(*st), auplay_destructor);
	if (!st)
		return ENOMEM;

	st->ap  = mem_ref(ap);
	st->wh  = wh;
	st->arg = arg;

	err = pthread_mutex_init(&st->mutex, NULL);
	if (err)
		goto out;

	err = audio_session_enable();
	if (err)
		goto out;

	fmt.mSampleRate       = (Float64)prm->srate;
	fmt.mFormatID         = audio_fmt(prm->fmt);
	fmt.mFormatFlags      = kLinearPCMFormatFlagIsSignedInteger |
		                kAudioFormatFlagIsPacked;
#ifdef __BIG_ENDIAN__
	fmt.mFormatFlags     |= kAudioFormatFlagIsBigEndian;
#endif
	fmt.mFramesPerPacket  = 1;
	fmt.mBytesPerFrame    = prm->ch * bytesps(prm->fmt);
	fmt.mBytesPerPacket   = prm->ch * bytesps(prm->fmt);
	fmt.mChannelsPerFrame = prm->ch;
	fmt.mBitsPerChannel   = 8*bytesps(prm->fmt);

	status = AudioQueueNewOutput(&fmt, play_handler, st, NULL,
				     kCFRunLoopCommonModes, 0, &st->queue);
	if (status) {
		re_fprintf(stderr, "AudioQueueNewOutput error: %i\n", status);
		err = ENODEV;
		goto out;
	}

	bytc = prm->frame_size * bytesps(prm->fmt);

	for (i=0; i<ARRAY_SIZE(st->buf); i++)  {

		status = AudioQueueAllocateBuffer(st->queue, bytc,
						  &st->buf[i]);
		if (status)  {
			err = ENOMEM;
			goto out;
		}

		st->buf[i]->mAudioDataByteSize = bytc;

		memset(st->buf[i]->mAudioData, 0,
		       st->buf[i]->mAudioDataByteSize);

		(void)AudioQueueEnqueueBuffer(st->queue, st->buf[i], 0, NULL);
	}

	status = AudioQueueStart(st->queue, NULL);
	if (status)  {
		re_fprintf(stderr, "AudioQueueStart error %i\n", status);
		err = ENODEV;
		goto out;
	}

 out:
	if (err)
		mem_deref(st);
	else
		*stp = st;

	return err;
}
コード例 #16
0
ファイル: player.c プロジェクト: Yoonster/dhun
void playFile(const char* filePath) {

  CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation(NULL,
                                                                  (UInt8*) filePath,
                                                                  strlen (filePath),
                                                                  false);


  OSStatus result = AudioFileOpenURL(audioFileURL,
                                     fsRdPerm,
                                     0,
                                     &aqData.mAudioFile);

  CFRelease (audioFileURL);

  UInt32 dataFormatSize = sizeof (aqData.mDataFormat);

  AudioFileGetProperty(aqData.mAudioFile,
                       kAudioFilePropertyDataFormat,
                       &dataFormatSize,
                       &aqData.mDataFormat);

  AudioQueueNewOutput(&aqData.mDataFormat,
                      HandleOutputBuffer,
                      &aqData,
                      CFRunLoopGetCurrent(),
                      kCFRunLoopCommonModes,
                      0,
                      &aqData.mQueue);

  UInt32 maxPacketSize;
  UInt32 propertySize = sizeof (maxPacketSize);
  AudioFileGetProperty(aqData.mAudioFile,
                       kAudioFilePropertyPacketSizeUpperBound,
                       &propertySize,
                       &maxPacketSize);


  DeriveBufferSize(&aqData.mDataFormat,
                   maxPacketSize,
                   0.5,
                   &aqData.bufferByteSize,
                   &aqData.mNumPacketsToRead);

  bool isFormatVBR = (aqData.mDataFormat.mBytesPerPacket == 0 ||
                      aqData.mDataFormat.mFramesPerPacket == 0);

  if (isFormatVBR) {
    // LOG("%s\n","VBR");
    aqData.mPacketDescs =
      (AudioStreamPacketDescription*)
      malloc (aqData.mNumPacketsToRead * sizeof (AudioStreamPacketDescription));
  } else {
    aqData.mPacketDescs = NULL;
  }

  UInt32 cookieSize = sizeof (UInt32);
  bool couldNotGetProperty =
    AudioFileGetPropertyInfo (aqData.mAudioFile,
                              kAudioFilePropertyMagicCookieData,
                              &cookieSize,
                              NULL);

  if (!couldNotGetProperty && cookieSize) {
    char* magicCookie = (char *) malloc (cookieSize);

    AudioFileGetProperty (aqData.mAudioFile,
                          kAudioFilePropertyMagicCookieData,
                          &cookieSize,
                          magicCookie);

    AudioQueueSetProperty (aqData.mQueue,
                           kAudioQueueProperty_MagicCookie,
                           magicCookie,
                           cookieSize);

    free (magicCookie);
  }

  aqData.mCurrentPacket = 0;
  aqData.mIsRunning = true;

  //LOG("%d\n", aqData.mNumPacketsToRead);
  for (int i = 0; i < kNumberBuffers; ++i) {
    AudioQueueAllocateBuffer (aqData.mQueue,
                              aqData.bufferByteSize,
                              &aqData.mBuffers[i]);

    HandleOutputBuffer (&aqData,
                        aqData.mQueue,
                        aqData.mBuffers[i]);
  }

  Float32 gain = 1.0;
  // Optionally, allow user to override gain setting here
  AudioQueueSetParameter (aqData.mQueue,
                          kAudioQueueParam_Volume,
                          gain);


  //LOG("%s\n","Starting play");


  // IMPORTANT NOTE : This value must be set
  // Before the call to HandleOutputBuffer
  //a qData.mIsRunning = true;

  AudioQueueStart (aqData.mQueue,
                   NULL);

}
コード例 #17
0
ファイル: mf_rdpsnd.c プロジェクト: 4hosi/FreeRDP
static void mf_peer_rdpsnd_activated(rdpsnd_server_context* context)
{
	printf("RDPSND Activated\n");
    
    
    
    printf("Let's create an audio queue for input!\n");
    
    OSStatus status;
    
    recorderState.dataFormat.mSampleRate = 44100.0;
    recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
    recorderState.dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
    recorderState.dataFormat.mBytesPerPacket = 4;
    recorderState.dataFormat.mFramesPerPacket = 1;
    recorderState.dataFormat.mBytesPerFrame = 4;
    recorderState.dataFormat.mChannelsPerFrame = 2;
    recorderState.dataFormat.mBitsPerChannel = 16;
    
    recorderState.snd_context = context;
    
    status = AudioQueueNewInput(&recorderState.dataFormat,
                                mf_peer_rdpsnd_input_callback,
                                &recorderState,
                                NULL,
                                kCFRunLoopCommonModes,
                                0,
                                &recorderState.queue);
    
    if (status != noErr)
    {
        printf("Failed to create a new Audio Queue. Status code: %d\n", status);
    }
    
    
    UInt32 dataFormatSize = sizeof (recorderState.dataFormat);
    
    AudioQueueGetProperty(recorderState.queue,
                          kAudioConverterCurrentInputStreamDescription,
                          &recorderState.dataFormat,
                          &dataFormatSize);
    
    
    mf_rdpsnd_derive_buffer_size(recorderState.queue, &recorderState.dataFormat, 0.05, &recorderState.bufferByteSize);
    
    
    printf("Preparing a set of buffers...");
    
    for (int i = 0; i < snd_numBuffers; ++i)
    {
        AudioQueueAllocateBuffer(recorderState.queue,
                                 recorderState.bufferByteSize,
                                 &recorderState.buffers[i]);
        
        AudioQueueEnqueueBuffer(recorderState.queue,
                                recorderState.buffers[i],
                                0,
                                NULL);
    }
    
    printf("done\n");
    
    printf("recording...\n");
    
    
    
    recorderState.currentPacket = 0;
    recorderState.isRunning = true;
    
    context->SelectFormat(context, 4);
    context->SetVolume(context, 0x7FFF, 0x7FFF);
    
    AudioQueueStart (recorderState.queue, NULL);

}
コード例 #18
0
ファイル: aqplay.cpp プロジェクト: AdamDiment/CocoaSampleCode
int main (int argc, const char * argv[]) 
{
#if TARGET_OS_WIN32
	InitializeQTML(0L);
#endif
	const char *fpath = NULL;
	Float32 volume = 1;
	Float32 duration = -1;
	Float32 currentTime = 0.0;
	Float32 rate = 0;
	int rQuality = 0;
	
	bool doPrint = false;
	for (int i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		if (arg[0] != '-') {
			if (fpath != NULL) {
				fprintf(stderr, "may only specify one file to play\n");
				usage();
			}
			fpath = arg;
		} else {
			arg += 1;
			if (arg[0] == 'v' || !strcmp(arg, "-volume")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];
				sscanf(arg, "%f", &volume);
			} else if (arg[0] == 't' || !strcmp(arg, "-time")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &duration);
			} else if (arg[0] == 'r' || !strcmp(arg, "-rate")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%f", &rate);
			} else if (arg[0] == 'q' || !strcmp(arg, "-rQuality")) {
				if (++i == argc)
					MissingArgument();
				arg = argv[i];				
				sscanf(arg, "%d", &rQuality);
			} else if (arg[0] == 'h' || !strcmp(arg, "-help")) {
				usage();
			} else if (arg[0] == 'd' || !strcmp(arg, "-debug")) {
				doPrint = true;
			} else {
				fprintf(stderr, "unknown argument: %s\n\n", arg - 1);
				usage();
			}
		}
	}

	if (fpath == NULL)
		usage();
	
	if (doPrint)
		printf ("Playing file: %s\n", fpath);
	
	try {
		AQTestInfo myInfo;
		
		CFURLRef sndFile = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)fpath, strlen(fpath), false);
		if (!sndFile) XThrowIfError (!sndFile, "can't parse file path");
			
		OSStatus result = AudioFileOpenURL (sndFile, 0x1/*fsRdPerm*/, 0/*inFileTypeHint*/, &myInfo.mAudioFile);
		CFRelease (sndFile);
						
		XThrowIfError(result, "AudioFileOpen failed");
		
		UInt32 size;
		XThrowIfError(AudioFileGetPropertyInfo(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, NULL), "couldn't get file's format list info");
		UInt32 numFormats = size / sizeof(AudioFormatListItem);
		AudioFormatListItem *formatList = new AudioFormatListItem [ numFormats ];

		XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyFormatList, &size, formatList), "couldn't get file's data format");
		numFormats = size / sizeof(AudioFormatListItem); // we need to reassess the actual number of formats when we get it
		if (numFormats == 1) {
				// this is the common case
			myInfo.mDataFormat = formatList[0].mASBD;
			
				// see if there is a channel layout (multichannel file)
			result = AudioFileGetPropertyInfo(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, NULL);
			if (result == noErr && myInfo.mChannelLayoutSize > 0) {
				myInfo.mChannelLayout = (AudioChannelLayout *)new char [myInfo.mChannelLayoutSize];
				XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, kAudioFilePropertyChannelLayout, &myInfo.mChannelLayoutSize, myInfo.mChannelLayout), "get audio file's channel layout");
			}
		} else {
			if (doPrint) {
				printf ("File has a %d layered data format:\n", (int)numFormats);
				for (unsigned int i = 0; i < numFormats; ++i)
					CAStreamBasicDescription(formatList[i].mASBD).Print();
			}
			// now we should look to see which decoders we have on the system
			XThrowIfError(AudioFormatGetPropertyInfo(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size), "couldn't get decoder id's");
			UInt32 numDecoders = size / sizeof(OSType);
			OSType *decoderIDs = new OSType [ numDecoders ];
			XThrowIfError(AudioFormatGetProperty(kAudioFormatProperty_DecodeFormatIDs, 0, NULL, &size, decoderIDs), "couldn't get decoder id's");			
			unsigned int i = 0;
			for (; i < numFormats; ++i) {
				OSType decoderID = formatList[i].mASBD.mFormatID;
				bool found = false;
				for (unsigned int j = 0; j < numDecoders; ++j) {
					if (decoderID == decoderIDs[j]) {
						found = true;
						break;
					}
				}
				if (found) break;
			}
			delete [] decoderIDs;
			
			if (i >= numFormats) {
				fprintf (stderr, "Cannot play any of the formats in this file\n");
				throw kAudioFileUnsupportedDataFormatError;
			}
			myInfo.mDataFormat = formatList[i].mASBD;
			myInfo.mChannelLayoutSize = sizeof(AudioChannelLayout);
			myInfo.mChannelLayout = (AudioChannelLayout*)new char [myInfo.mChannelLayoutSize];
			myInfo.mChannelLayout->mChannelLayoutTag = formatList[i].mChannelLayoutTag;
			myInfo.mChannelLayout->mChannelBitmap = 0;
			myInfo.mChannelLayout->mNumberChannelDescriptions = 0;
		}
		delete [] formatList;
		
		if (doPrint) {
			printf ("Playing format: "); 
			myInfo.mDataFormat.Print();
		}
		
		XThrowIfError(AudioQueueNewOutput(&myInfo.mDataFormat, AQTestBufferCallback, &myInfo, 
									CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &myInfo.mQueue), "AudioQueueNew failed");

		UInt32 bufferByteSize;		
		// we need to calculate how many packets we read at a time, and how big a buffer we need
		// we base this on the size of the packets in the file and an approximate duration for each buffer
		{
			bool isFormatVBR = (myInfo.mDataFormat.mBytesPerPacket == 0 || myInfo.mDataFormat.mFramesPerPacket == 0);
			
			// first check to see what the max size of a packet is - if it is bigger
			// than our allocation default size, that needs to become larger
			UInt32 maxPacketSize;
			size = sizeof(maxPacketSize);
			XThrowIfError(AudioFileGetProperty(myInfo.mAudioFile, 
									kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize), "couldn't get file's max packet size");
			
			// adjust buffer size to represent about a half second of audio based on this format
			CalculateBytesForTime (myInfo.mDataFormat, maxPacketSize, 0.5/*seconds*/, &bufferByteSize, &myInfo.mNumPacketsToRead);
			
			if (isFormatVBR)
				myInfo.mPacketDescs = new AudioStreamPacketDescription [myInfo.mNumPacketsToRead];
			else
				myInfo.mPacketDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
				
			if (doPrint)
				printf ("Buffer Byte Size: %d, Num Packets to Read: %d\n", (int)bufferByteSize, (int)myInfo.mNumPacketsToRead);
		}

		// (2) If the file has a cookie, we should get it and set it on the AQ
		size = sizeof(UInt32);
		result = AudioFileGetPropertyInfo (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, NULL);

		if (!result && size) {
			char* cookie = new char [size];		
			XThrowIfError (AudioFileGetProperty (myInfo.mAudioFile, kAudioFilePropertyMagicCookieData, &size, cookie), "get cookie from file");
			XThrowIfError (AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_MagicCookie, cookie, size), "set cookie on queue");
			delete [] cookie;
		}

		// set ACL if there is one
		if (myInfo.mChannelLayout)
			XThrowIfError(AudioQueueSetProperty(myInfo.mQueue, kAudioQueueProperty_ChannelLayout, myInfo.mChannelLayout, myInfo.mChannelLayoutSize), "set channel layout on queue");

		// prime the queue with some data before starting
		myInfo.mDone = false;
		myInfo.mCurrentPacket = 0;
		for (int i = 0; i < kNumberBuffers; ++i) {
			XThrowIfError(AudioQueueAllocateBuffer(myInfo.mQueue, bufferByteSize, &myInfo.mBuffers[i]), "AudioQueueAllocateBuffer failed");

			AQTestBufferCallback (&myInfo, myInfo.mQueue, myInfo.mBuffers[i]);
			
			if (myInfo.mDone) break;
		}	
			// set the volume of the queue
		XThrowIfError (AudioQueueSetParameter(myInfo.mQueue, kAudioQueueParam_Volume, volume), "set queue volume");
		
		XThrowIfError (AudioQueueAddPropertyListener (myInfo.mQueue, kAudioQueueProperty_IsRunning, MyAudioQueuePropertyListenerProc, NULL), "add listener");
		
#if !TARGET_OS_IPHONE
		if (rate > 0) {
			UInt32 propValue = 1;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_EnableTimePitch, &propValue, sizeof(propValue)), "enable time pitch");
			
			propValue = rQuality ? kAudioQueueTimePitchAlgorithm_Spectral : kAudioQueueTimePitchAlgorithm_TimeDomain;
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchAlgorithm, &propValue, sizeof(propValue)), "time pitch algorithm");
			
			propValue = (rate == 1.0f ? 1 : 0); // bypass rate if 1.0
			XThrowIfError (AudioQueueSetProperty (myInfo.mQueue, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue)), "bypass time pitch");
			if (rate != 1) {
				XThrowIfError (AudioQueueSetParameter (myInfo.mQueue, kAudioQueueParam_PlayRate, rate), "set playback rate");
			}
			
			if (doPrint) {
				printf ("Enable rate-scaled playback (rate = %.2f) using %s algorithm\n", rate, (rQuality ? "Spectral": "Time Domain"));
			}
		}
#endif
			// lets start playing now - stop is called in the AQTestBufferCallback when there's
			// no more to read from the file
		XThrowIfError(AudioQueueStart(myInfo.mQueue, NULL), "AudioQueueStart failed");

		do {
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false);
			currentTime += .25;
			if (duration > 0 && currentTime >= duration)
				break;
			
		} while (gIsRunning);
			
		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1, false);

		XThrowIfError(AudioQueueDispose(myInfo.mQueue, true), "AudioQueueDispose(true) failed");
		XThrowIfError(AudioFileClose(myInfo.mAudioFile), "AudioQueueDispose(false) failed");
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
	}
	catch (...) {
		fprintf(stderr, "Unspecified exception\n");
	}
	
    return 0;
}
コード例 #19
0
ファイル: mf_rdpsnd.c プロジェクト: 10084462/FreeRDP
static void mf_peer_rdpsnd_activated(RdpsndServerContext* context)
{
	OSStatus status;
	int i, j;
	BOOL formatAgreed = FALSE;
	AUDIO_FORMAT* agreedFormat = NULL;
	
	//we should actually loop through the list of client formats here
	//and see if we can send the client something that it supports...
	
	printf("Client supports the following %d formats: \n", context->num_client_formats);
	
	for (i = 0; i < context->num_client_formats; i++)
	{
		/* TODO: improve the way we agree on a format */
		for (j = 0; j < context->num_server_formats; j++)
		{
			if ((context->client_formats[i].wFormatTag == context->server_formats[j].wFormatTag) &&
			    (context->client_formats[i].nChannels == context->server_formats[j].nChannels) &&
			    (context->client_formats[i].nSamplesPerSec == context->server_formats[j].nSamplesPerSec))
			{
				printf("agreed on format!\n");
				formatAgreed = TRUE;
				agreedFormat = (AUDIO_FORMAT*)&context->server_formats[j];
				break;
			}
		}
		if (formatAgreed == TRUE)
			break;
		
	}
	
	if (formatAgreed == FALSE)
	{
		printf("Could not agree on a audio format with the server\n");
		return;
	}
	
	context->SelectFormat(context, i);
	context->SetVolume(context, 0x7FFF, 0x7FFF);
	
	switch (agreedFormat->wFormatTag)
	{
		case WAVE_FORMAT_ALAW:
			recorderState.dataFormat.mFormatID = kAudioFormatDVIIntelIMA;
			break;
			
		case WAVE_FORMAT_PCM:
			recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
			break;
			
		default:
			recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
			break;
	}
	
	recorderState.dataFormat.mSampleRate = agreedFormat->nSamplesPerSec;
	recorderState.dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;;
	recorderState.dataFormat.mBytesPerPacket = 4;
	recorderState.dataFormat.mFramesPerPacket = 1;
	recorderState.dataFormat.mBytesPerFrame = 4;
	recorderState.dataFormat.mChannelsPerFrame = agreedFormat->nChannels;
	recorderState.dataFormat.mBitsPerChannel = agreedFormat->wBitsPerSample;
	
	
	recorderState.snd_context = context;
	
	status = AudioQueueNewInput(&recorderState.dataFormat,
				    mf_peer_rdpsnd_input_callback,
				    &recorderState,
				    NULL,
				    kCFRunLoopCommonModes,
				    0,
				    &recorderState.queue);
	
	if (status != noErr)
	{
		printf("Failed to create a new Audio Queue. Status code: %d\n", status);
	}
	
	
	UInt32 dataFormatSize = sizeof (recorderState.dataFormat);
	
	AudioQueueGetProperty(recorderState.queue,
			      kAudioConverterCurrentInputStreamDescription,
			      &recorderState.dataFormat,
			      &dataFormatSize);
	
	
	mf_rdpsnd_derive_buffer_size(recorderState.queue, &recorderState.dataFormat, 0.05, &recorderState.bufferByteSize);
	
		
	for (i = 0; i < SND_NUMBUFFERS; ++i)
	{
		AudioQueueAllocateBuffer(recorderState.queue,
					 recorderState.bufferByteSize,
					 &recorderState.buffers[i]);
		
		AudioQueueEnqueueBuffer(recorderState.queue,
					recorderState.buffers[i],
					0,
					NULL);
	}
	
	
	recorderState.currentPacket = 0;
	recorderState.isRunning = true;
	
	AudioQueueStart (recorderState.queue, NULL);
	
}
コード例 #20
0
void SoundRecorder::init(const char* audioSource,const SoundDataFormat& sFormat,const char* outputFileName)
	{
	/* Store and sanify the sound data format: */
	format.mSampleRate=double(sFormat.framesPerSecond);
	format.mFormatID=kAudioFormatLinearPCM;
	format.mFormatFlags=0x0;
	format.mBitsPerChannel=sFormat.bitsPerSample>8?(sFormat.bitsPerSample+7)&~0x7:8;
	format.mChannelsPerFrame=sFormat.samplesPerFrame>=1?sFormat.samplesPerFrame:1;
	format.mBytesPerFrame=format.mChannelsPerFrame*(format.mBitsPerChannel/8);
	format.mFramesPerPacket=1;
	format.mBytesPerPacket=format.mFramesPerPacket*format.mBytesPerFrame;
	
	/* Determine the output file format from the file name extension: */
	AudioFileTypeID audioFileType=kAudioFileWAVEType; // Not really a default; just to make compiler happy
	const char* ext=Misc::getExtension(outputFileName);
	if(*ext=='\0'||strcasecmp(ext,".aiff")==0)
		{
		/* Adjust the sound data format for AIFF files: */
		audioFileType=kAudioFileAIFFType;
		format.mFormatFlags=kLinearPCMFormatFlagIsBigEndian|kLinearPCMFormatFlagIsSignedInteger|kLinearPCMFormatFlagIsPacked;
		}
	else if(strcasecmp(ext,".wav")==0)
		{
		/* Adjust the sound data format for WAV files: */
		audioFileType=kAudioFileWAVEType;
		format.mFormatFlags=kLinearPCMFormatFlagIsPacked;
		if(format.mBitsPerChannel>8)
			format.mFormatFlags|=kLinearPCMFormatFlagIsSignedInteger;
		}
	else
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Output file name %s has unrecognized extension",outputFileName);
	
	/* Create the recording audio queue: */
	if(AudioQueueNewInput(&format,handleInputBufferWrapper,this,0,kCFRunLoopCommonModes,0,&queue)!=noErr)
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while creating audio queue");
	
	/* Retrieve the fully specified audio data format from the audio queue: */
	UInt32 formatSize=sizeof(format);
	if(AudioQueueGetProperty(queue,kAudioConverterCurrentOutputStreamDescription,&format,&formatSize)!=noErr)
		{
		AudioQueueDispose(queue,true);
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while retrieving audio queue sound format");
		}
	
	/* Open the target audio file: */
	CFURLRef audioFileURL=CFURLCreateFromFileSystemRepresentation(0,reinterpret_cast<const UInt8*>(outputFileName),strlen(outputFileName),false);
	if(AudioFileCreateWithURL(audioFileURL,audioFileType,&format,kAudioFileFlags_EraseFile,&audioFile)!=noErr)
		{
		AudioQueueDispose(queue,true);
		CFRelease(audioFileURL);
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while opening output file %s",outputFileName);
		}
	CFRelease(audioFileURL);
	
	/* Calculate an appropriate buffer size and allocate the sound buffers: */
	int maxPacketSize=format.mBytesPerPacket;
	if(maxPacketSize==0) // Must be a variable bit rate sound format
		{
		/* Query the expected maximum packet size from the audio queue: */
		UInt32 maxVBRPacketSize=sizeof(maxPacketSize);
		if(AudioQueueGetProperty(queue,kAudioConverterPropertyMaximumOutputPacketSize,&maxPacketSize,&maxVBRPacketSize)!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while calcuating sample buffer size");
			}
		}
	
	/* Calculate an appropriate buffer size based on the given duration: */
	int numPackets=int(floor(double(format.mSampleRate)*0.25+0.5));
	bufferSize=UInt32(numPackets*maxPacketSize);
	
	/* Create the sample buffers: */
	for(int i=0;i<2;++i)
		{
		/* Create the sound buffer: */
		if(AudioQueueAllocateBuffer(queue,bufferSize,&buffers[i])!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while allocating sample buffer %d",i);
			}
		
		/* Add the buffer to the queue: */
		if(AudioQueueEnqueueBuffer(queue,buffers[i],0,0)!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while enqueuing sample buffer %d",i);
			}
		}
	}
コード例 #21
0
    void AudioOutputDeviceCoreAudio::CreateAndStartAudioQueue() throw(Exception) {
        OSStatus res = AudioQueueNewOutput (
            &aqPlayerState.mDataFormat,
            HandleOutputBuffer,
            &aqPlayerState,
            CFRunLoopGetCurrent(),
            kCFRunLoopCommonModes,
            0,
            &aqPlayerState.mQueue
        );

        if(res) {
            String s = String("AudioQueueNewOutput: Error ") + ToString(res);
            throw Exception(s);
        }

        CFStringRef devUID = CFStringCreateWithCString (
            NULL, CurrentDevice.GetUID().c_str(), kCFStringEncodingASCII
        );
        res = AudioQueueSetProperty (
            aqPlayerState.mQueue,
            kAudioQueueProperty_CurrentDevice,
            &devUID, sizeof(CFStringRef)
        );
        CFRelease(devUID);

        if(res) {
            String s = String("Failed to set audio device: ") + ToString(res);
            throw Exception(s);
        }

        for (int i = 0; i < uiBufferNumber; ++i) {
            res = AudioQueueAllocateBuffer (
                aqPlayerState.mQueue,
                aqPlayerState.bufferByteSize,
                &aqPlayerState.mBuffers[i]
            );

            if(res) {
                String s = String("AudioQueueAllocateBuffer: Error ");
                throw Exception(s + ToString(res));
            }
        }

        res = AudioQueueAddPropertyListener (
            aqPlayerState.mQueue,
            kAudioQueueProperty_CurrentDevice,
            AudioQueueListener,
            NULL
        );
        if(res) std::cerr << "Failed to register device change listener: " << res << std::endl;

        res = AudioQueueAddPropertyListener (
            aqPlayerState.mQueue,
            kAudioQueueProperty_IsRunning,
            AudioQueueListener,
            NULL
        );
        if(res) std::cerr << "Failed to register running listener: " << res << std::endl;

        Float32 gain = 1.0;

        res = AudioQueueSetParameter (
            aqPlayerState.mQueue,
            kAudioQueueParam_Volume,
            gain
        );

        if(res) std::cerr << "AudioQueueSetParameter: Error " << res << std::endl;

        atomic_set(&(aqPlayerState.mIsRunning), 1);
        FillBuffers();
        PrimeAudioQueue();

        res = AudioQueueStart(aqPlayerState.mQueue, NULL);
        if(res) {
            String s = String("AudioQueueStart: Error ") + ToString(res);
            throw Exception(s);
        }
    }
コード例 #22
0
int	main(int argc, const char *argv[])
{
	MyRecorder recorder = {0};
	AudioStreamBasicDescription recordFormat = {0};
	memset(&recordFormat, 0, sizeof(recordFormat));
	
	// Configure the output data format to be AAC
	recordFormat.mFormatID = kAudioFormatMPEG4AAC;
	recordFormat.mChannelsPerFrame = 2;
	
	// get the sample rate of the default input device
	// we use this to adapt the output data format to match hardware capabilities
	MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);
	
	// ProTip: Use the AudioFormat API to trivialize ASBD creation.
	//         input: atleast the mFormatID, however, at this point we already have
	//                mSampleRate, mFormatID, and mChannelsPerFrame
	//         output: the remainder of the ASBD will be filled out as much as possible
	//                 given the information known about the format
	UInt32 propSize = sizeof(recordFormat);
	CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL,
									  &propSize, &recordFormat), "AudioFormatGetProperty failed");
	
	// create a input (recording) queue
	AudioQueueRef queue = {0};
	CheckError(AudioQueueNewInput(&recordFormat, // ASBD
								  MyAQInputCallback, // Callback
								  &recorder, // user data
								  NULL, // run loop
								  NULL, // run loop mode
								  0, // flags (always 0)
								  // &recorder.queue), // output: reference to AudioQueue object
								  &queue),
			   "AudioQueueNewInput failed");
	
	// since the queue is now initilized, we ask it's Audio Converter object
	// for the ASBD it has configured itself with. The file may require a more
	// specific stream description than was necessary to create the audio queue.
	//
	// for example: certain fields in an ASBD cannot possibly be known until it's
	// codec is instantiated (in this case, by the AudioQueue's Audio Converter object)
	UInt32 size = sizeof(recordFormat);
	CheckError(AudioQueueGetProperty(queue, kAudioConverterCurrentOutputStreamDescription,
									 &recordFormat, &size), "couldn't get queue's format");
	
	// create the audio file
	CFURLRef myFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("./output.caf"), kCFURLPOSIXPathStyle, false);
	CFShow (myFileURL);
	CheckError(AudioFileCreateWithURL(myFileURL, kAudioFileCAFType, &recordFormat,
									  kAudioFileFlags_EraseFile, &recorder.recordFile), "AudioFileCreateWithURL failed");
	CFRelease(myFileURL);
	
	// many encoded formats require a 'magic cookie'. we set the cookie first
	// to give the file object as much info as we can about the data it will be receiving
	MyCopyEncoderCookieToFile(queue, recorder.recordFile);
	
	// allocate and enqueue buffers
	int bufferByteSize = MyComputeRecordBufferSize(&recordFormat, queue, 0.5);	// enough bytes for half a second
	int bufferIndex;
    for (bufferIndex = 0; bufferIndex < kNumberRecordBuffers; ++bufferIndex)
	{
		AudioQueueBufferRef buffer;
		CheckError(AudioQueueAllocateBuffer(queue, bufferByteSize, &buffer),
				   "AudioQueueAllocateBuffer failed");
		CheckError(AudioQueueEnqueueBuffer(queue, buffer, 0, NULL),
				   "AudioQueueEnqueueBuffer failed");
	}
	
	// start the queue. this function return immedatly and begins
	// invoking the callback, as needed, asynchronously.
	recorder.running = TRUE;
	CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
	
	// and wait
	printf("Recording, press <return> to stop:\n");
	getchar();
	
	// end recording
	printf("* recording done *\n");
	recorder.running = FALSE;
	CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed");
	
	// a codec may update its magic cookie at the end of an encoding session
	// so reapply it to the file now
	MyCopyEncoderCookieToFile(queue, recorder.recordFile);
	
cleanup:
	AudioQueueDispose(queue, TRUE);
	AudioFileClose(recorder.recordFile);
	
	return 0;
}
コード例 #23
0
ファイル: sndfile-play.c プロジェクト: tmatth/libsndfile
static void
macosx_play (int argc, char *argv [])
{	MacOSXAudioData 	audio_data ;
	OSStatus	err ;
	int 		i ;
	int 		k ;

	memset (&audio_data, 0x55, sizeof (audio_data)) ;

	for (k = 1 ; k < argc ; k++)
	{	memset (&(audio_data.sfinfo), 0, sizeof (audio_data.sfinfo)) ;

		printf ("Playing %s\n", argv [k]) ;
		if (! (audio_data.sndfile = sf_open (argv [k], SFM_READ, &(audio_data.sfinfo))))
		{	puts (sf_strerror (NULL)) ;
			continue ;
			} ;

		if (audio_data.sfinfo.channels < 1 || audio_data.sfinfo.channels > 2)
		{	printf ("Error : channels = %d.\n", audio_data.sfinfo.channels) ;
			continue ;
			} ;

		/*  fill ASBD */
		audio_data.format.mSampleRate = audio_data.sfinfo.samplerate ;
		audio_data.format.mChannelsPerFrame	= audio_data.sfinfo.channels ;
		audio_data.format.mFormatID			= kAudioFormatLinearPCM ;
		audio_data.format.mFormatFlags		= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked ;
		audio_data.format.mBytesPerPacket	= audio_data.format.mChannelsPerFrame * 2 ;
		audio_data.format.mFramesPerPacket	= 1 ;
		audio_data.format.mBytesPerFrame	= audio_data.format.mBytesPerPacket ;
		audio_data.format.mBitsPerChannel	= 16 ;
		audio_data.format.mReserved			= 0 ;

		/* create the queue */
		if ((err = AudioQueueNewOutput (&(audio_data.format), macosx_audio_out_callback, &audio_data,
					CFRunLoopGetCurrent (), kCFRunLoopCommonModes, 0, &(audio_data.queue))) != noErr)
		{	printf ("AudioQueueNewOutput failed\n") ;
			return ;
			} ;

		/*  add property listener */
		if ((err = AudioQueueAddPropertyListener (audio_data.queue, kAudioQueueProperty_IsRunning, macosx_audio_out_property_callback, &audio_data)) != noErr)
		{	printf ("AudioQueueAddPropertyListener failed\n") ;
			return ;
			} ;

		/*  create the buffers */
		for (i = 0 ; i < kNumberOfAudioBuffers ; i++)
		{	if ((err = AudioQueueAllocateBuffer (audio_data.queue, kBytesPerAudioBuffer, &audio_data.queueBuffer [i])) != noErr)
			{	printf ("AudioQueueAllocateBuffer failed\n") ;
				return ;
				} ;

			macosx_fill_buffer (&audio_data, audio_data.queueBuffer [i]) ;
			} ;

		audio_data.done_playing = SF_FALSE ;

		/* start queue */
		if ((err = AudioQueueStart (audio_data.queue, NULL)) != noErr)
		{	printf ("AudioQueueStart failed\n") ;
			return ;
		} ;

		while (audio_data.done_playing == SF_FALSE)
			CFRunLoopRun () ;

		/*  free the buffers */
		for (i = 0 ; i < kNumberOfAudioBuffers ; i++)
		{	if ((err = AudioQueueFreeBuffer (audio_data.queue, audio_data.queueBuffer [i])) != noErr)
			{	printf ("AudioQueueFreeBuffer failed\n") ;
				return ;
				} ;
			} ;

		/*  free the queue */
		if ((err = AudioQueueDispose (audio_data.queue, true)) != noErr)
		{	printf ("AudioQueueDispose failed\n") ;
			return ;
			} ;

		sf_close (audio_data.sndfile) ;
		} ;

	return ;
} /* macosx_play, AudioQueue implementation */
コード例 #24
0
static int tdav_producer_audioqueue_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
{
    OSStatus ret;
	tsk_size_t i;
	tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
	
	if(!producer || !codec && codec->plugin){
		TSK_DEBUG_ERROR("Invalid parameter");
		return -1;
	}

	TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
	TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
	TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
	/* codec should have ptime */
	
	
	// Set audio category
#if TARGET_OS_IPHONE
	UInt32 category = kAudioSessionCategory_PlayAndRecord;
	AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
#endif
    // Create the audio stream description
    AudioStreamBasicDescription *description = &(producer->description);
    description->mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
    description->mFormatID = kAudioFormatLinearPCM;
    description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    description->mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
    description->mFramesPerPacket = 1;
    description->mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
    description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
    description->mBytesPerFrame = description->mBytesPerPacket;
    description->mReserved = 0;
    
    int packetperbuffer = 1000 / TMEDIA_PRODUCER(producer)->audio.ptime;
    producer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
    
    // Create the record audio queue
    ret = AudioQueueNewInput(&(producer->description),
							 __handle_input_buffer,
							 producer,
							 NULL, 
							 kCFRunLoopCommonModes,
							 0,
							 &(producer->queue));
    
    for(i = 0; i < CoreAudioRecordBuffers; i++) {
        // Create the buffer for the queue
        ret = AudioQueueAllocateBuffer(producer->queue, producer->buffer_size, &(producer->buffers[i]));
        if (ret) {
            break;
        }
        
        // Clear the data
        memset(producer->buffers[i]->mAudioData, 0, producer->buffer_size);
        producer->buffers[i]->mAudioDataByteSize = producer->buffer_size;
        
        // Enqueue the buffer
        ret = AudioQueueEnqueueBuffer(producer->queue, producer->buffers[i], 0, NULL);
        if (ret) {
            break;
        }
    }
	
	return 0;
}
コード例 #25
0
ファイル: audio_queue.cpp プロジェクト: DZamataev/wos.ru_iOS
void Audio_Queue::handlePropertyChange(AudioFileStreamID inAudioFileStream, AudioFileStreamPropertyID inPropertyID, UInt32 *ioFlags)
{
    OSStatus err = noErr;
    
    AQ_TRACE("found property '%u%u%u%u'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255);
    
    switch (inPropertyID) {
        case kAudioFileStreamProperty_ReadyToProducePackets:
        {
            cleanup();
            
            // create the audio queue
            err = AudioQueueNewOutput(&m_streamDesc, audioQueueOutputCallback, this, CFRunLoopGetCurrent(), NULL, 0, &m_outAQ);
            if (err) {
                AQ_TRACE("%s: error in AudioQueueNewOutput\n", __PRETTY_FUNCTION__);
                
                m_lastError = err;
                
                if (m_delegate) {
                    m_delegate->audioQueueInitializationFailed();
                }
                
                break;
            }
            
            Stream_Configuration *configuration = Stream_Configuration::configuration();
            
            // allocate audio queue buffers
            for (unsigned int i = 0; i < configuration->bufferCount; ++i) {
                err = AudioQueueAllocateBuffer(m_outAQ, configuration->bufferSize, &m_audioQueueBuffer[i]);
                if (err) {
                    /* If allocating the buffers failed, everything else will fail, too.
                     *  Dispose the queue so that we can later on detect that this
                     *  queue in fact has not been initialized.
                     */
                    
                    AQ_TRACE("%s: error in AudioQueueAllocateBuffer\n", __PRETTY_FUNCTION__);
                    
                    (void)AudioQueueDispose(m_outAQ, true);
                    m_outAQ = 0;
                    
                    m_lastError = err;
                    
                    if (m_delegate) {
                        m_delegate->audioQueueInitializationFailed();
                    }
                    
                    break;
                }
            }
            
            // listen for kAudioQueueProperty_IsRunning
            err = AudioQueueAddPropertyListener(m_outAQ, kAudioQueueProperty_IsRunning, audioQueueIsRunningCallback, this);
            if (err) {
                AQ_TRACE("%s: error in AudioQueueAddPropertyListener\n", __PRETTY_FUNCTION__);
                m_lastError = err;
                break;
            }
            
            break;
        }
    }
}
コード例 #26
0
ファイル: audio_queue.cpp プロジェクト: jsonsnow/MusicPlayers
void Audio_Queue::init()
{
    OSStatus err = noErr;
    
    cleanup();
        
    // create the audio queue
    err = AudioQueueNewOutput(&m_streamDesc, audioQueueOutputCallback, this, CFRunLoopGetCurrent(), NULL, 0, &m_outAQ);
    if (err) {
        AQ_TRACE("%s: error in AudioQueueNewOutput\n", __PRETTY_FUNCTION__);
        
        m_lastError = err;
        
        if (m_delegate) {
            m_delegate->audioQueueInitializationFailed();
        }
        
        return;
    }
    
    Stream_Configuration *configuration = Stream_Configuration::configuration();
    
    // allocate audio queue buffers
    for (unsigned int i = 0; i < configuration->bufferCount; ++i) {
        err = AudioQueueAllocateBuffer(m_outAQ, configuration->bufferSize, &m_audioQueueBuffer[i]);
        if (err) {
            /* If allocating the buffers failed, everything else will fail, too.
             *  Dispose the queue so that we can later on detect that this
             *  queue in fact has not been initialized.
             */
            
            AQ_TRACE("%s: error in AudioQueueAllocateBuffer\n", __PRETTY_FUNCTION__);
            
            (void)AudioQueueDispose(m_outAQ, true);
            m_outAQ = 0;
            
            m_lastError = err;
            
            if (m_delegate) {
                m_delegate->audioQueueInitializationFailed();
            }
            
            return;
        }
    }
    
    // listen for kAudioQueueProperty_IsRunning
    err = AudioQueueAddPropertyListener(m_outAQ, kAudioQueueProperty_IsRunning, audioQueueIsRunningCallback, this);
    if (err) {
        AQ_TRACE("%s: error in AudioQueueAddPropertyListener\n", __PRETTY_FUNCTION__);
        m_lastError = err;
        return;
    }
    
    if (configuration->enableTimeAndPitchConversion) {
        UInt32 enableTimePitchConversion = 1;
        
        err = AudioQueueSetProperty (m_outAQ, kAudioQueueProperty_EnableTimePitch, &enableTimePitchConversion, sizeof(enableTimePitchConversion));
        if (err != noErr) {
            AQ_TRACE("Failed to enable time and pitch conversion. Play rate setting will fail\n");
        }
    }
    
    if (m_initialOutputVolume != 1.0) {
        setVolume(m_initialOutputVolume);
    }
}
コード例 #27
0
int	main(int argc, const char *argv[])
{
	MyPlayer player = {0};
	
	CFURLRef myFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, kPlaybackFileLocation, kCFURLPOSIXPathStyle, false);
	
	// open the audio file
	//	CheckError(AudioFileOpenURL(myFileURL, fsRdPerm, 0, &player.playbackFile), "AudioFileOpenURL failed");
	CheckError(AudioFileOpenURL(myFileURL, kAudioFileReadPermission, 0, &player.playbackFile), "AudioFileOpenURL failed");
	CFRelease(myFileURL);
	
	// get the audio data format from the file
	AudioStreamBasicDescription dataFormat;
	UInt32 propSize = sizeof(dataFormat);
	CheckError(AudioFileGetProperty(player.playbackFile, kAudioFilePropertyDataFormat,
									&propSize, &dataFormat), "couldn't get file's data format");
	
	// create a output (playback) queue
	AudioQueueRef queue;
	CheckError(AudioQueueNewOutput(&dataFormat, // ASBD
								   MyAQOutputCallback, // Callback
								   &player, // user data
								   NULL, // run loop
								   NULL, // run loop mode
								   0, // flags (always 0)
								   &queue), // output: reference to AudioQueue object
			   "AudioQueueNewOutput failed");
	
	
	// adjust buffer size to represent about a half second (0.5) of audio based on this format
 	UInt32 bufferByteSize;
	CalculateBytesForTime(player.playbackFile, dataFormat,  0.5, &bufferByteSize, &player.numPacketsToRead);
	
	// check if we are dealing with a VBR file. ASBDs for VBR files always have 
	// mBytesPerPacket and mFramesPerPacket as 0 since they can fluctuate at any time.
	// If we are dealing with a VBR file, we allocate memory to hold the packet descriptions
	bool isFormatVBR = (dataFormat.mBytesPerPacket == 0 || dataFormat.mFramesPerPacket == 0);
	if (isFormatVBR)
		player.packetDescs = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * player.numPacketsToRead);
	else
		player.packetDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
	
	// get magic cookie from file and set on queue
	MyCopyEncoderCookieToQueue(player.playbackFile, queue);
	
	// allocate the buffers and prime the queue with some data before starting
	AudioQueueBufferRef	buffers[kNumberPlaybackBuffers];
	player.isDone = false;
	player.packetPosition = 0;
	int i;
	for (i = 0; i < kNumberPlaybackBuffers; ++i)
	{
		CheckError(AudioQueueAllocateBuffer(queue, bufferByteSize, &buffers[i]), "AudioQueueAllocateBuffer failed");
		
		// manually invoke callback to fill buffers with data
		MyAQOutputCallback(&player, queue, buffers[i]);
		
		// EOF (the entire file's contents fit in the buffers)
		if (player.isDone)
			break;
	}	
	
	
	//CheckError(AudioQueueAddPropertyListener(aqp.queue, kAudioQueueProperty_IsRunning, MyAQPropertyListenerCallback, &aqp), "AudioQueueAddPropertyListener(kAudioQueueProperty_IsRunning) failed");
	
	// start the queue. this function returns immedatly and begins
	// invoking the callback, as needed, asynchronously.
	CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
	
	// and wait
	printf("Playing...\n");
	do
	{
		CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false);
	} while (!player.isDone /*|| gIsRunning*/);
	
	// isDone represents the state of the Audio File enqueuing. This does not mean the
	// Audio Queue is actually done playing yet. Since we have 3 half-second buffers in-flight
	// run for continue to run for a short additional time so they can be processed
	CFRunLoopRunInMode(kCFRunLoopDefaultMode, 2, false);
	
	// end playback
	player.isDone = true;
	CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed");
	
cleanup:
	AudioQueueDispose(queue, TRUE);
	AudioFileClose(player.playbackFile);
	
	return 0;
}
コード例 #28
0
ファイル: audio.c プロジェクト: styxyang/codelib
int
main (int argc, const char *argv[])
{
    CFURLRef audioFileURLRef;
    OSStatus ret;

    audioFileURLRef = CFURLCreateWithFileSystemPath (
            kCFAllocatorDefault,
			/* CFSTR ("../misc/test.wav"), */
			CFSTR ("../misc/alin.wav"),
            kCFURLPOSIXPathStyle,
            FALSE
            );

    ret = AudioFileOpenURL(
            audioFileURLRef,
            kAudioFileReadWritePermission,
            0,
            &myInfo.mAudioFile);
    if (ret != noErr) {
        printf("fail to open audio file %x\n", ret);
        return 1;
    }

    UInt32 propSize = sizeof(myInfo.mDataFormat);
    ret = AudioFileGetProperty(
            myInfo.mAudioFile,
            kAudioFilePropertyDataFormat,
            &propSize,
            &myInfo.mDataFormat
            );
    if (ret != noErr) {
        printf("AudioFileGetProperty error code %d\n", ret);
        return 1;
    }
    printf("sample rate: %f\n"
            "mFormatID: %u\n"
            "mFormatFlags: %u\n"
            "mBytesPerPacket: %u\n"
            "mChannelsPerFrame: %u\n",
            myInfo.mDataFormat.mSampleRate,
            myInfo.mDataFormat.mFormatID,
            myInfo.mDataFormat.mFormatFlags,
            myInfo.mDataFormat.mBytesPerPacket,
            myInfo.mDataFormat.mChannelsPerFrame
          );

    // Instantiate an audio queue object
    ret = AudioQueueNewOutput(
            &myInfo.mDataFormat,
            AQTestBufferCallback,
            &myInfo,
            CFRunLoopGetCurrent(),
            kCFRunLoopCommonModes,
            0,
            &myInfo.mQueue
            );
    if (ret != noErr) {
        printf("AudioQueueNewOutput error code %d\n", ret);
        return 1;
    }

    AudioQueueAddPropertyListener(myInfo.mQueue, kAudioQueueProperty_IsRunning,
            AudioEnginePropertyListenerProc, &myInfo);
    

    /* FIXME allocate AudioQueue buffer */ 
    int i;
    for (i = 0; i < 3; i++) {
        AudioQueueAllocateBuffer(myInfo.mQueue, 441 * 4, &myInfo.mBuffers[i]);
    }
    AudioQueueStart(myInfo.mQueue, NULL);

    printf("Run loop\n");
    // create a system sound ID to represent the sound file
    /* OSStatus error = AudioServicesCreateSystemSoundID (myURLRef, &mySSID); */

    // Register the sound completion callback.
    // Again, useful when you need to free memory after playing.
    /* AudioServicesAddSystemSoundCompletion ( */
            /* mySSID, */
            /* NULL, */
            /* NULL, */
            /* MyCompletionCallback, */
            /* (void *) myURLRef */
            /* ); */

    // Play the sound file.
    /* AudioServicesPlaySystemSound (mySSID); */

    // Invoke a run loop on the current thread to keep the application
    // running long enough for the sound to play; the sound completion
    // callback later stops this run loop.
    CFRunLoopRun ();
    return 0;
}
コード例 #29
0
void Audio_Queue::handlePropertyChange(AudioFileStreamID inAudioFileStream, AudioFileStreamPropertyID inPropertyID, UInt32 *ioFlags)
{
    OSStatus err = noErr;
    
    AQ_TRACE("found property '%lu%lu%lu%lu'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255);
    
    switch (inPropertyID) {
        case kAudioFileStreamProperty_ReadyToProducePackets:
        {
            cleanup();
            
            // the file stream parser is now ready to produce audio packets.
            // get the stream format.
            memset(&m_streamDesc, 0, sizeof(m_streamDesc));
            UInt32 asbdSize = sizeof(m_streamDesc);
            err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &m_streamDesc);
            if (err) {
                AQ_TRACE("%s: error in kAudioFileStreamProperty_DataFormat\n", __PRETTY_FUNCTION__);
                m_lastError = err;
                break;
            }
            
            // create the audio queue
            err = AudioQueueNewOutput(&m_streamDesc, audioQueueOutputCallback, this, CFRunLoopGetCurrent(), NULL, 0, &m_outAQ);
            if (err) {
                AQ_TRACE("%s: error in AudioQueueNewOutput\n", __PRETTY_FUNCTION__);
                
                if (m_delegate) {
                    m_delegate->audioQueueInitializationFailed();
                }
                
                m_lastError = err;
                break;
            }
            
            // allocate audio queue buffers
            for (unsigned int i = 0; i < AQ_BUFFERS; ++i) {
                err = AudioQueueAllocateBuffer(m_outAQ, AQ_BUFSIZ, &m_audioQueueBuffer[i]);
                if (err) {
                    /* If allocating the buffers failed, everything else will fail, too.
                     *  Dispose the queue so that we can later on detect that this
                     *  queue in fact has not been initialized.
                     */
                    
                    AQ_TRACE("%s: error in AudioQueueAllocateBuffer\n", __PRETTY_FUNCTION__);
                    
                    (void)AudioQueueDispose(m_outAQ, true);
                    m_outAQ = 0;
                    
                    if (m_delegate) {
                        m_delegate->audioQueueInitializationFailed();
                    }
                    
                    m_lastError = err;
                    break;
                }
            }
            
            setCookiesForStream(inAudioFileStream);
            
            // listen for kAudioQueueProperty_IsRunning
            err = AudioQueueAddPropertyListener(m_outAQ, kAudioQueueProperty_IsRunning, audioQueueIsRunningCallback, this);
            if (err) {
                AQ_TRACE("%s: error in AudioQueueAddPropertyListener\n", __PRETTY_FUNCTION__);
                m_lastError = err;
                break;
            }
            
            break;
        }
    }
}
コード例 #30
0
ファイル: mac_audio.c プロジェクト: stevestreza/pianobar
void StreamPropertyListenerProc(void * inClientData,
                                AudioFileStreamID inAudioFileStream,
                                AudioFileStreamPropertyID inPropertyID,
                                UInt32 * ioFlags)
{
    // this is called by audio file stream when it finds property values
    struct audioPlayer* player = (struct audioPlayer*)inClientData;
    OSStatus err = noErr;

//    printf("found property '%c%c%c%c'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255);

    switch (inPropertyID) {
    case kAudioFileStreamProperty_ReadyToProducePackets :
    {
        // the file stream parser is now ready to produce audio packets.
        // get the stream format.
        AudioStreamBasicDescription asbd;
        UInt32 asbdSize = sizeof(asbd);
        err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &asbd);
        if (err) {
            PRINTERROR("get kAudioFileStreamProperty_DataFormat");
            player->failed = true;
            break;
        }

        //TODO: Is this really right!?!
        player->songDuration = player->waith.contentLength * 2000 / asbd.mSampleRate;
        player->samplerate = asbd.mSampleRate;

        player->packetDuration = asbd.mFramesPerPacket / asbd.mSampleRate;

        // create the audio queue
        err = AudioQueueNewOutput(&asbd, PianobarAudioQueueOutputCallback, player, NULL, NULL, 0, &player->audioQueue);
        if (err) {
            PRINTERROR("AudioQueueNewOutput");
            player->failed = true;
            break;
        }

        // allocate audio queue buffers
        for (unsigned int i = 0; i < kNumAQBufs; ++i) {
            err = AudioQueueAllocateBuffer(player->audioQueue, kAQBufSize, &player->audioQueueBuffer[i]);
            if (err) {
                PRINTERROR("AudioQueueAllocateBuffer");
                player->failed = true;
                break;
            }
        }


        // get the cookie size
        UInt32 cookieSize;
        Boolean writable;
        err = AudioFileStreamGetPropertyInfo(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, &writable);
        if (err) {
            PRINTERROR("info kAudioFileStreamProperty_MagicCookieData");
            break;
        }

        // get the cookie data
        void* cookieData = calloc(1, cookieSize);
        err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, cookieData);
        if (err) {
            PRINTERROR("get kAudioFileStreamProperty_MagicCookieData");
            free(cookieData);
            break;
        }

        // set the cookie on the queue.
        err = AudioQueueSetProperty(player->audioQueue, kAudioQueueProperty_MagicCookie, cookieData, cookieSize);
        free(cookieData);
        if (err) {
            PRINTERROR("set kAudioQueueProperty_MagicCookie");
            break;
        }

        // listen for kAudioQueueProperty_IsRunning
        err = AudioQueueAddPropertyListener(player->audioQueue, kAudioQueueProperty_IsRunning, AudioQueueIsRunningCallback, player);
        if (err) {
            PRINTERROR("AudioQueueAddPropertyListener");
            player->failed = true;
            break;
        }

        break;
    }
    }
}