Beispiel #1
0
void set_stream()
{
    OSStatus err;
    UInt32 propertySize;

    // get the magic cookie, if any, from the converter
    err = AudioQueueGetPropertySize(d_owner->queue,
                                    kAudioConverterCompressionMagicCookie,
                                    &propertySize);

    if (err == noErr && propertySize > 0) {
        // there is valid cookie data to be fetched;  get it
        Byte *magicCookie = (Byte *)malloc(propertySize);
        err = AudioQueueGetProperty(d_owner->queue,
                                    kAudioConverterCompressionMagicCookie,
                                    magicCookie,
                                    &propertySize);
        if (err == 0)
        {
            // now set the magic cookie on the output file
            // even though some formats have cookies, some files don't take them, so we ignore the error
            /*err =*/
            AudioFileSetProperty(d_owner->stm,
                                 kAudioFilePropertyMagicCookieData,
                                 propertySize,
                                 magicCookie);
        }
        free(magicCookie);
    }

    /*
     if (err != 0)
     trace_fmt("failed to set stream, %.4s", &err);
     */
}
Beispiel #2
0
void AudioEnginePropertyListenerProc (void *inUserData, AudioQueueRef inAQ, AudioQueuePropertyID inID) {
    //We are only interested in the property kAudioQueueProperty_IsRunning
    if (inID != kAudioQueueProperty_IsRunning) return;

	struct myAQStruct *myInfo = (struct myAQStruct *)inUserData;

	/* Get the current status of the AQ, running or stopped */ 
    UInt32 isQueueRunning = false;
    UInt32 size = sizeof(isQueueRunning);
    AudioQueueGetProperty(myInfo->mQueue, kAudioQueueProperty_IsRunning, &isQueueRunning, &size);

	/* The callback event is the start of the queue */
    if (isQueueRunning) {
		/* reset current packet counter */
        myInfo->mCurrentPacket = 0;

        for (int i = 0; i < 3; i++) {
			/*
			 * For the first time allocate buffers for this AQ.
			 * Buffers are reused in turns until the AQ stops 
			 */
            AudioQueueAllocateBuffer(myInfo->mQueue, bufferSizeInSamples * 4, &myInfo->mBuffers[i]);

            UInt32 bytesRead = bufferSizeInSamples * 4;
            UInt32 packetsRead = bufferSizeInSamples;

			/*
			 * Read data from audio source into the buffer of AQ
			 * supplied in this callback event. Buffers are used in turns
			 * to hide the latency
			 */
            AudioFileReadPacketData(
					myInfo->mAudioFile,
					false, /* isUseCache, set to false */
					&bytesRead,
					NULL,
					myInfo->mCurrentPacket,
					&packetsRead,
					myInfo->mBuffers[i]->mAudioData);
			/* in case the buffer size is smaller than bytes requestd to read */ 
            myInfo->mBuffers[i]->mAudioDataByteSize = bytesRead;
            myInfo->mCurrentPacket += packetsRead;

            AudioQueueEnqueueBuffer(myInfo->mQueue, myInfo->mBuffers[i], 0, NULL);
        }
    } else {
		/* The callback event is the state of AQ changed to not running */
        if (myInfo->mAudioFile != NULL) {
			AudioQueueStop(myInfo->mQueue, false);
            AudioFileClose(myInfo->mAudioFile);
            myInfo->mAudioFile = NULL;

            for (int i = 0; i < 3; i++) {
                AudioQueueFreeBuffer(myInfo->mQueue, myInfo->mBuffers[i]);
                myInfo->mBuffers[i] = NULL;
            }
			CFRunLoopStop(CFRunLoopGetCurrent());
        }
    }
}
// ____________________________________________________________________________________
// Copy a queue's encoder's magic cookie to an audio file.
static void MyCopyEncoderCookieToFile(AudioQueueRef theQueue, AudioFileID theFile)
{
	OSStatus err;
	UInt32 propertySize;
	
	// get the magic cookie, if any, from the converter		
	err = AudioQueueGetPropertySize(theQueue, kAudioConverterCompressionMagicCookie, &propertySize);
	
	if (err == noErr && propertySize > 0) {
		// there is valid cookie data to be fetched;  get it
		Byte *magicCookie = (Byte *)malloc(propertySize);
		try {
			XThrowIfError(AudioQueueGetProperty(theQueue, kAudioConverterCompressionMagicCookie, magicCookie,
				&propertySize), "get audio converter's magic cookie");
			// now set the magic cookie on the output file
            // even though some formats have cookies, some files don't take them, so we ignore the error
			/*err =*/ AudioFileSetProperty(theFile, kAudioFilePropertyMagicCookieData, propertySize, magicCookie);
		} 
		catch (CAXException e) {
			char buf[256];
			fprintf(stderr, "MyCopyEncoderCookieToFile: %s (%s)\n", e.mOperation, e.FormatError(buf));
		}
        catch (...) {
            fprintf(stderr, "MyCopyEncoderCookieToFile: Unexpected exception\n");
        }
		free(magicCookie);
	}
}
Beispiel #4
0
OSStatus SetMagicCookieForFile (
    AudioQueueRef inQueue,                                      // 1
    AudioFileID   inFile                                        // 2
) {
    OSStatus result = noErr;                                    // 3
    UInt32 cookieSize;                                          // 4
 
    if (
            AudioQueueGetPropertySize (                         // 5
                inQueue,
                kAudioQueueProperty_MagicCookie,
                &cookieSize
            ) == noErr
    ) {
        char* magicCookie =
            (char *) malloc (cookieSize);                       // 6
        if (
                AudioQueueGetProperty (                         // 7
                    inQueue,
                    kAudioQueueProperty_MagicCookie,
                    magicCookie,
                    &cookieSize
                ) == noErr
        )
            result =    AudioFileSetProperty (                  // 8
                            inFile,
                            kAudioFilePropertyMagicCookieData,
                            cookieSize,
                            magicCookie
                        );
        free (magicCookie);                                     // 9
    }
    return result;                                              // 10
}
Beispiel #5
0
void DeriveBufferSize (
    AudioQueueRef                audioQueue,                  // 1
    AudioStreamBasicDescription  &ASBDescription,             // 2
    Float64                      seconds,                     // 3
    UInt32                       *outBufferSize               // 4
) {
    static const int maxBufferSize = 0x50000;                 // 5
 
    int maxPacketSize = ASBDescription.mBytesPerPacket;       // 6
    if (maxPacketSize == 0) {                                 // 7
        UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
        AudioQueueGetProperty (
                audioQueue,
                kAudioConverterPropertyMaximumOutputPacketSize,
                &maxPacketSize,
                &maxVBRPacketSize
        );
    }
 
    Float64 numBytesForTime =
        ASBDescription.mSampleRate * maxPacketSize * seconds; // 8
    *outBufferSize =
    UInt32 (numBytesForTime < maxBufferSize ?
        numBytesForTime : maxBufferSize);                     // 9
}
// ____________________________________________________________________________________
// Determine the size, in bytes, of a buffer necessary to represent the supplied number
// of seconds of audio data.
static int MyComputeRecordBufferSize(const AudioStreamBasicDescription *format, AudioQueueRef queue, float seconds)
{
	int packets, frames, bytes;
	
	frames = (int)ceil(seconds * format->mSampleRate);
	
	if (format->mBytesPerFrame > 0)
		bytes = frames * format->mBytesPerFrame;
	else {
		UInt32 maxPacketSize;
		if (format->mBytesPerPacket > 0)
			maxPacketSize = format->mBytesPerPacket;	// constant packet size
		else {
			UInt32 propertySize = sizeof(maxPacketSize); 
			XThrowIfError(AudioQueueGetProperty(queue, kAudioConverterPropertyMaximumOutputPacketSize, &maxPacketSize,
				&propertySize), "couldn't get queue's maximum output packet size");
		}
		if (format->mFramesPerPacket > 0)
			packets = frames / format->mFramesPerPacket;
		else
			packets = frames;	// worst-case scenario: 1 frame in a packet
		if (packets == 0)		// sanity check
			packets = 1;
		bytes = packets * maxPacketSize;
	}
	return bytes;
}
void MyCopyEncoderCookieToFile(AudioQueueRef queue,
							AudioFileID theFile)
{
	OSStatus error;
	UInt32 propertySize;

	error = AudioQueueGetPropertySize(queue, kAudioConverterCompressionMagicCookie, &propertySize);

	if (error == noErr && propertySize > 0)
	{
		Byte *magicCookie = (Byte *)malloc(propertySize);
		CheckError(AudioQueueGetProperty(queue,
										kAudioQueueProperty_MagicCookie,
										magicCookie,
										&propertySize),
				"Couldn't get audio queue's magic cookie!");

		CheckError(AudioFileSetProperty(theFile,
										kAudioFilePropertyMagicCookieData,
										propertySize,
										magicCookie),
				"Couldn't set audio file's magic cookie");
		free(magicCookie);
	}
}
Beispiel #8
0
 bool music_obj<audio_queue_driver>::is_playing() const
 {
     UInt32 size, is_running;
     
     AudioQueueGetPropertySize(queue_, kAudioQueueProperty_IsRunning, &size);
     AudioQueueGetProperty(queue_, kAudioQueueProperty_IsRunning, &is_running, &size);
     
     return is_running && !is_paused_;
 }
Beispiel #9
0
void	MyAudioQueuePropertyListenerProc (  void *              inUserData,
										AudioQueueRef           inAQ,
										AudioQueuePropertyID    inID)
{
	UInt32 size = sizeof(gIsRunning);
	OSStatus err = AudioQueueGetProperty (inAQ, kAudioQueueProperty_IsRunning, &gIsRunning, &size);
	if (err) 
		gIsRunning = 0;
}
Beispiel #10
0
static void
macosx_audio_out_property_callback (void *user_data, AudioQueueRef audio_queue, AudioQueuePropertyID prop)
{	MacOSXAudioData *audio_data = (MacOSXAudioData *) user_data ;

	if (prop == kAudioQueueProperty_IsRunning)
	{	UInt32 is_running = 0 ;
		UInt32 is_running_size = sizeof (is_running) ;

		AudioQueueGetProperty (audio_queue, kAudioQueueProperty_IsRunning, &is_running, &is_running_size) ;

		if (!is_running)
		{	audio_data->done_playing = SF_TRUE ;
			CFRunLoopStop (CFRunLoopGetCurrent ()) ;
			} ;
		} ;
} /* macosx_audio_out_property_callback */
void MyAudioQueueIsRunningCallback(		void*					inClientData, 
										AudioQueueRef			inAQ, 
										AudioQueuePropertyID	inID)
{
	MyData* myData = (MyData*)inClientData;
	
	UInt32 running;
	UInt32 size;
	OSStatus err = AudioQueueGetProperty(inAQ, kAudioQueueProperty_IsRunning, &running, &size);
	if (err) { PRINTERROR("get kAudioQueueProperty_IsRunning"); return; }
	if (!running) {
		pthread_mutex_lock(&myData->mutex);
		pthread_cond_signal(&myData->done);
		pthread_mutex_unlock(&myData->mutex);
	}
}
MUSIKEngine::PlayState AudioQueueStreamOut::GetPlayState()
{
    if(mInfo.mQueue)
    {
        UInt32 IsRunning = 0;
        UInt32 size = sizeof(IsRunning);
        AudioQueueGetProperty (mInfo.mQueue, kAudioQueueProperty_IsRunning, &IsRunning, &size);
        if(  m_bPaused  && IsRunning )
            return MUSIKEngine::Paused;
        else if( !m_bPaused && IsRunning )
            return MUSIKEngine::Playing;
        else
            return MUSIKEngine::Stopped;
    }
    return MUSIKEngine::Invalid;
}
Beispiel #13
0
AudioQueueLevelMeterState Audio_Queue::levels()
{
    if (!m_levelMeteringEnabled) {
        UInt32 enabledLevelMeter = true;
        AudioQueueSetProperty(m_outAQ,
                              kAudioQueueProperty_EnableLevelMetering,
                              &enabledLevelMeter,
                              sizeof(UInt32));
        
        m_levelMeteringEnabled = true;
    }
    
    AudioQueueLevelMeterState levelMeter;
    UInt32 levelMeterSize = sizeof(AudioQueueLevelMeterState);
    AudioQueueGetProperty(m_outAQ, kAudioQueueProperty_CurrentLevelMeterDB, &levelMeter, &levelMeterSize);
    return levelMeter;
}
void SoundRecorder::setAudioFileMagicCookie(void)
	{
	/* Query the size of the magic cookie: */
	UInt32 magicCookieSize;
	if(AudioQueueGetPropertySize(queue,kAudioQueueProperty_MagicCookie,&magicCookieSize)==noErr)
		{
		/* Allocate a buffer for the magic cookie: */
		char* magicCookie=new char[magicCookieSize];
		
		/* Copy the magic cookie from the audio queue into the audio file: */
		if(AudioQueueGetProperty(queue,kAudioQueueProperty_MagicCookie,magicCookie,&magicCookieSize)==noErr)
			AudioFileSetProperty(audioFile,kAudioFilePropertyMagicCookieData,magicCookieSize,magicCookie);
		
		/* Delete the cookie buffer: */
		delete[] magicCookie;
		}
	}
Beispiel #15
0
// ____________________________________________________________________________________
// Copy a queue's encoder's magic cookie to an audio file.
static void MyCopyEncoderCookieToFile(AudioQueueRef theQueue, AudioFileID theFile)
{
	OSStatus err;
	UInt32 propertySize;
	
	// get the magic cookie, if any, from the converter		
	err = AudioQueueGetPropertySize(theQueue, kAudioConverterCompressionMagicCookie, &propertySize);
	
	if (err == noErr && propertySize > 0) {
		// there is valid cookie data to be fetched;  get it
		Byte *magicCookie = (Byte *)malloc(propertySize);
		CheckError(AudioQueueGetProperty(theQueue, kAudioConverterCompressionMagicCookie, magicCookie,
			&propertySize), "get audio converter's magic cookie");
		// now set the magic cookie on the output file
		err = AudioFileSetProperty(theFile, kAudioFilePropertyMagicCookieData, propertySize, magicCookie);
		free(magicCookie);
	}
}
Beispiel #16
0
void Audio_Queue::audioQueueIsRunningCallback(void *inClientData, AudioQueueRef inAQ, AudioQueuePropertyID inID)
{
    Audio_Queue *audioQueue = static_cast<Audio_Queue*>(inClientData);
    
    UInt32 running;
    UInt32 size;
    OSStatus err = AudioQueueGetProperty(inAQ, kAudioQueueProperty_IsRunning, &running, &size);
    if (err) {
        AQ_TRACE("error in kAudioQueueProperty_IsRunning");
        audioQueue->setState(IDLE);
        audioQueue->m_lastError = err;
        return;
    }
    if (running) {
        audioQueue->setState(RUNNING);
    } else {
        audioQueue->setState(IDLE);
    }
}    
Beispiel #17
0
void AudioQueuePropertyListener( void *userData, AudioQueueRef queue, AudioQueuePropertyID propertyID )
{
    if (propertyID == kAudioQueueProperty_IsRunning)
    {
        UInt32 isRunning = 0;
        UInt32 size = sizeof(isRunning);

        AudioQueueGetProperty(
            queue,
            propertyID,
            &isRunning,
            &size
        );

        if (isRunning == 0) {
            CFRunLoopStop(CFRunLoopGetCurrent());
        }
    }
}
void AudioStreamDecoder::QueueRunningCallback(AudioQueueRef queue, AudioQueuePropertyID property)
{
	long err;

	BAIL_IF(!queue || queue != mQueue, "Invalid queue %p\n", queue);
	BAIL_IF(!mStarted, "Queue not started\n");

	UInt32 running, size;
	err = AudioQueueGetProperty(mQueue, kAudioQueueProperty_IsRunning, &running, &size);
	BAIL_IF(err, "AudioQueueGetProperty returned %ld\n", err);

	if (!running)
	{
		err = SetFinished();
		BAIL_IF(err, "Finished returned %ld\n", err);
	}

bail:
	return;
}
void Audio_Queue::audioQueueIsRunningCallback(void *inClientData, AudioQueueRef inAQ, AudioQueuePropertyID inID)
{
    Audio_Queue *audioQueue = static_cast<Audio_Queue*>(inClientData);
    
    AQ_TRACE("%s: enter\n", __PRETTY_FUNCTION__);
    
    UInt32 running;
    UInt32 output = sizeof(running);
    OSStatus err = AudioQueueGetProperty(inAQ, kAudioQueueProperty_IsRunning, &running, &output);
    if (err) {
        AQ_TRACE("%s: error in kAudioQueueProperty_IsRunning\n", __PRETTY_FUNCTION__);
        return;
    }
    if (running) {
        AQ_TRACE("audio queue running!\n");
        audioQueue->setState(RUNNING);
    } else {
        audioQueue->setState(IDLE);
    }
}    
Beispiel #20
0
static void stopped_callback(void *aux, AudioQueueRef queue, AudioQueuePropertyID property) {
	audio_player_t *player = (audio_player_t *) aux;
	state_t *state = (state_t *) player->internal_state;

	UInt32 is_running;
	UInt32 size = (UInt32) sizeof(is_running);
	AudioQueueGetProperty(queue, kAudioQueueProperty_IsRunning, &is_running, &size);

	state->running = state->playing = is_running ? true : false;

	if (state->playing) {
		if (player->on_start) {
			player->on_start(player);
		}
	}
	else {
		if (player->on_stop) {
			player->on_stop(player);
		}
	}
}
int MyComputeRecordBufferSize(const AudioStreamBasicDescription *format,
							AudioQueueRef queue,
							float seconds)
{
	int packets, frames, bytes;
	frames = (int)ceil(seconds * format->mSampleRate);

	if (format->mBytesPerFrame > 0)
	{
		bytes = frames * format->mBytesPerFrame;
	} else {
		UInt32 maxPacketSize;
		if (format->mBytesPerPacket)
			//constant packet size
			maxPacketSize = format->mBytesPerPacket;
		else
		{
			//Get the largest single packet size possible
			UInt32 propertySize = sizeof(maxPacketSize);
			CheckError(AudioQueueGetProperty(queue,
											kAudioConverterPropertyMaximumOutputPacketSize,
											&maxPacketSize,
											&propertySize),
					"Couldn't get queue's maximum output packet size");
		}
		if (format->mFramesPerPacket > 0)
			packets = frames /format->mFramesPerPacket;
		else
			//Worst case scenario: 1 frame in a packet
			packets = frames;

		//Sanity Check
		if (packets == 0)
			packets = 1;
		bytes = packets * maxPacketSize;
	}
	return bytes;
}
Beispiel #22
0
usize calc_buffer_size() const
{
    int packets, frames, bytes;

    frames = (int)ceil(d_owner->seconds * d_owner->format->mSampleRate);

    if (d_owner->format->mBytesPerFrame > 0)
    {
        bytes = frames * d_owner->format->mBytesPerFrame;
    }
    else
    {
        UInt32 maxPacketSize;
        if (d_owner->format->mBytesPerPacket > 0)
        {
            maxPacketSize = d_owner->format->mBytesPerPacket;	// constant packet size
        }
        else
        {
            UInt32 propertySize = sizeof(maxPacketSize);
            if (AudioQueueGetProperty(d_owner->queue,
                                      kAudioConverterPropertyMaximumOutputPacketSize,
                                      &maxPacketSize,
                                      &propertySize))
                return 0;
        }
        if (d_owner->format->mFramesPerPacket > 0)
            packets = frames / d_owner->format->mFramesPerPacket;
        else
            packets = frames;	// worst-case scenario: 1 frame in a packet
        if (packets == 0)		// sanity check
            packets = 1;
        bytes = packets * maxPacketSize;
    }

    return bytes;
}
Beispiel #23
0
		static void QueueStoppedProc(	void *                  inUserData,
										AudioQueueRef           inAQ,
										AudioQueuePropertyID    inID)
		{
			UInt32 isRunning;
			UInt32 propSize = sizeof(isRunning);

			BackgroundTrackMgr *THIS = (BackgroundTrackMgr*)inUserData;
			OSStatus result = AudioQueueGetProperty(inAQ, kAudioQueueProperty_IsRunning, &isRunning, &propSize);
				
			if ((!isRunning) && (THIS->mMakeNewQueueWhenStopped))
			{
				result = AudioQueueDispose(inAQ, true);
					AssertNoError("Error disposing queue", end);
				result = THIS->SetupQueue(CurFileInfo);
					AssertNoError("Error setting up new queue", end);
				result = THIS->SetupBuffers(CurFileInfo);
					AssertNoError("Error setting up new queue buffers", end);
				result = THIS->Start();
					AssertNoError("Error starting queue", end);
			}
		end:
			return;
		}
Beispiel #24
0
void mf_rdpsnd_derive_buffer_size (AudioQueueRef                audioQueue,
                                   AudioStreamBasicDescription  *ASBDescription,
                                   Float64                      seconds,
                                   UInt32                       *outBufferSize)
{
	static const int maxBufferSize = 0x50000;
	
	int maxPacketSize = ASBDescription->mBytesPerPacket;
	if (maxPacketSize == 0)
	{
		UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
		AudioQueueGetProperty (audioQueue,
				       kAudioQueueProperty_MaximumOutputPacketSize,
				       // in Mac OS X v10.5, instead use
				       //   kAudioConverterPropertyMaximumOutputPacketSize
				       &maxPacketSize,
				       &maxVBRPacketSize
				       );
	}
	
	Float64 numBytesForTime =
	ASBDescription->mSampleRate * maxPacketSize * seconds;
	*outBufferSize = (UInt32) (numBytesForTime < maxBufferSize ? numBytesForTime : maxBufferSize);
}
Beispiel #25
0
// ____________________________________________________________________________________
// main program
int	main(int argc, const char *argv[])
{
	const char *recordFileName = NULL;
	int i, nchannels, bufferByteSize;
	float seconds = 0;
	AudioStreamBasicDescription recordFormat;
	MyRecorder aqr;
	UInt32 size;
	CFURLRef url;
    OSStatus err = noErr;
	
	// fill structures with 0/NULL
	memset(&recordFormat, 0, sizeof(recordFormat));
	memset(&aqr, 0, sizeof(aqr));
	
	// parse arguments
	for (i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		
		if (arg[0] == '-') {
			switch (arg[1]) {
			case 'c':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%d", &nchannels) != 1)
					usage();
				recordFormat.mChannelsPerFrame = nchannels;
				break;
			case 'd':
				if (++i == argc) MissingArgument(arg);
				if (StrTo4CharCode(argv[i], &recordFormat.mFormatID) == 0)
					ParseError(arg, argv[i]);
				break;
			case 'r':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%lf", &recordFormat.mSampleRate) != 1)
					ParseError(arg, argv[i]);
				break;
			case 's':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%f", &seconds) != 1)
					ParseError(arg, argv[i]);
				break;
			case 'v':
				aqr.verbose = TRUE;
				break;
			default:
				fprintf(stderr, "unknown option: '%s'\n\n", arg);
				usage();
			}
		} else if (recordFileName != NULL) {
			fprintf(stderr, "may only specify one file to record\n\n");
			usage();
		} else
			recordFileName = arg;
	}
	if (recordFileName == NULL) // no record file path provided
		usage();
	
	// determine file format
	AudioFileTypeID audioFileType = kAudioFileCAFType;	// default to CAF
	CFStringRef cfRecordFileName = CFStringCreateWithCString(NULL, recordFileName, kCFStringEncodingUTF8);
	InferAudioFileFormatFromFilename(cfRecordFileName, &audioFileType);
	CFRelease(cfRecordFileName);

	// adapt record format to hardware and apply defaults
	if (recordFormat.mSampleRate == 0.)
		MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);

	if (recordFormat.mChannelsPerFrame == 0)
		recordFormat.mChannelsPerFrame = 2;
	
	if (recordFormat.mFormatID == 0 || recordFormat.mFormatID == kAudioFormatLinearPCM) {
		// default to PCM, 16 bit int
		recordFormat.mFormatID = kAudioFormatLinearPCM;
		recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
		recordFormat.mBitsPerChannel = 16;
		if (MyFileFormatRequiresBigEndian(audioFileType, recordFormat.mBitsPerChannel))
			recordFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
		recordFormat.mBytesPerPacket = recordFormat.mBytesPerFrame =
			(recordFormat.mBitsPerChannel / 8) * recordFormat.mChannelsPerFrame;
		recordFormat.mFramesPerPacket = 1;
		recordFormat.mReserved = 0;
	}

	try {
		// create the queue
		XThrowIfError(AudioQueueNewInput(
			&recordFormat,
			MyInputBufferHandler,
			&aqr /* userData */,
			NULL /* run loop */, NULL /* run loop mode */,
			0 /* flags */, &aqr.queue), "AudioQueueNewInput failed");

		// get the record format back from the queue's audio converter --
		// the file may require a more specific stream description than was necessary to create the encoder.
		size = sizeof(recordFormat);
		XThrowIfError(AudioQueueGetProperty(aqr.queue, kAudioConverterCurrentOutputStreamDescription,
			&recordFormat, &size), "couldn't get queue's format");

		// convert recordFileName from C string to CFURL
		url = CFURLCreateFromFileSystemRepresentation(NULL, (Byte *)recordFileName, strlen(recordFileName), FALSE);
		XThrowIfError(!url, "couldn't create record file");
        
		// create the audio file
        err = AudioFileCreateWithURL(url, audioFileType, &recordFormat, kAudioFileFlags_EraseFile,
                                              &aqr.recordFile);
        CFRelease(url); // release first, and then bail out on error
		XThrowIfError(err, "AudioFileCreateWithURL failed");
		

		// copy the cookie first to give the file object as much info as we can about the data going in
		MyCopyEncoderCookieToFile(aqr.queue, aqr.recordFile);

		// allocate and enqueue buffers
		bufferByteSize = MyComputeRecordBufferSize(&recordFormat, aqr.queue, 0.5);	// enough bytes for half a second
		for (i = 0; i < kNumberRecordBuffers; ++i) {
			AudioQueueBufferRef buffer;
			XThrowIfError(AudioQueueAllocateBuffer(aqr.queue, bufferByteSize, &buffer),
				"AudioQueueAllocateBuffer failed");
			XThrowIfError(AudioQueueEnqueueBuffer(aqr.queue, buffer, 0, NULL),
				"AudioQueueEnqueueBuffer failed");
		}
		
		// record
		if (seconds > 0) {
			// user requested a fixed-length recording (specified a duration with -s)
			// to time the recording more accurately, watch the queue's IsRunning property
			XThrowIfError(AudioQueueAddPropertyListener(aqr.queue, kAudioQueueProperty_IsRunning,
				MyPropertyListener, &aqr), "AudioQueueAddPropertyListener failed");
			
			// start the queue
			aqr.running = TRUE;
			XThrowIfError(AudioQueueStart(aqr.queue, NULL), "AudioQueueStart failed");
			CFAbsoluteTime waitUntil = CFAbsoluteTimeGetCurrent() + 10;

			// wait for the started notification
			while (aqr.queueStartStopTime == 0.) {
				CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.010, FALSE);
				if (CFAbsoluteTimeGetCurrent() >= waitUntil) {
					fprintf(stderr, "Timeout waiting for the queue's IsRunning notification\n");
					goto cleanup;
				}
			}
			printf("Recording...\n");
			CFAbsoluteTime stopTime = aqr.queueStartStopTime + seconds;
			CFAbsoluteTime now = CFAbsoluteTimeGetCurrent();
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, stopTime - now, FALSE);
		} else {
			// start the queue
			aqr.running = TRUE;
			XThrowIfError(AudioQueueStart(aqr.queue, NULL), "AudioQueueStart failed");
			
			// and wait
			printf("Recording, press <return> to stop:\n");
			getchar();
		}

		// end recording
		printf("* recording done *\n");
		
		aqr.running = FALSE;
		XThrowIfError(AudioQueueStop(aqr.queue, TRUE), "AudioQueueStop failed");
		
		// a codec may update its cookie at the end of an encoding session, so reapply it to the file now
		MyCopyEncoderCookieToFile(aqr.queue, aqr.recordFile);
		
cleanup:
		AudioQueueDispose(aqr.queue, TRUE);
		AudioFileClose(aqr.recordFile);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "MyInputBufferHandler: %s (%s)\n", e.mOperation, e.FormatError(buf));
		return e.mError;
	}
		
	return 0;
}
Beispiel #26
0
OSStatus
darwin_configure_input_audio_queue (
                                 cahal_device*                 in_device,
                                 cahal_recorder_info*          in_callback_info,
                                 AudioStreamBasicDescription*  io_asbd,
                                 AudioQueueRef*                out_audio_queue
                                 )
{
  OSStatus result = noErr;
  
  if( NULL != io_asbd )
  {
    result =
    AudioQueueNewInput  (
                         io_asbd,
                         darwin_recorder_callback,
                         in_callback_info,
                         NULL,
                         kCFRunLoopCommonModes,
                         0,
                         out_audio_queue
                         );
    
    if( noErr == result )
    {
      if( NULL != in_device->device_uid )
      {
        CFStringRef device_uid =
        CFStringCreateWithCString (
                                   NULL,
                                   in_device->device_uid,
                                   kCFStringEncodingASCII
                                   );
        
        CPC_LOG (
                 CPC_LOG_LEVEL_TRACE,
                 "Setting queue device to %s.",
                 in_device->device_uid
                 );
        
        result =
        AudioQueueSetProperty (
                               *out_audio_queue,
                               kAudioQueueProperty_CurrentDevice,
                               &device_uid,
                               sizeof( device_uid )
                               );
        
        if( NULL != device_uid )
        {
          CFRelease( device_uid );
        }
      }
      
      if( result )
      {
        CPC_ERROR (
                   "Error setting current device (0x%x) to %s: 0x%x",
                   kAudioQueueProperty_CurrentDevice,
                   in_device->device_uid,
                   result
                   );
        
        CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
      }
      else
      {
        UINT32 property_size = sizeof( AudioStreamBasicDescription );
        
        result =
        AudioQueueGetProperty (
                               *out_audio_queue,
                               kAudioQueueProperty_StreamDescription,
                               io_asbd,
                               &property_size
                               );
        
        if( result )
        {
          CPC_ERROR(
                    "Error accessing property 0x%x on AudioQueue: %d",
                    kAudioConverterCurrentInputStreamDescription,
                    result
                    );
          
          CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
        }
      }
    }
    else
    {
      CPC_ERROR (
                 "Error creating AudioQueue: 0x%x.",
                 result
                 );
      
      CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
    }
  }
  else
  {
    CPC_LOG_STRING  (
                     CPC_LOG_LEVEL_ERROR,
                     "Invalid basic stream description"
                     );
  }
  
  return( result );
}
int	main(int argc, const char *argv[])
{
	MyRecorder recorder = {0};
	AudioStreamBasicDescription recordFormat = {0};
	memset(&recordFormat, 0, sizeof(recordFormat));
	
	// Configure the output data format to be AAC
	recordFormat.mFormatID = kAudioFormatMPEG4AAC;
	recordFormat.mChannelsPerFrame = 2;
	
	// get the sample rate of the default input device
	// we use this to adapt the output data format to match hardware capabilities
	MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);
	
	// ProTip: Use the AudioFormat API to trivialize ASBD creation.
	//         input: atleast the mFormatID, however, at this point we already have
	//                mSampleRate, mFormatID, and mChannelsPerFrame
	//         output: the remainder of the ASBD will be filled out as much as possible
	//                 given the information known about the format
	UInt32 propSize = sizeof(recordFormat);
	CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL,
									  &propSize, &recordFormat), "AudioFormatGetProperty failed");
	
	// create a input (recording) queue
	AudioQueueRef queue = {0};
	CheckError(AudioQueueNewInput(&recordFormat, // ASBD
								  MyAQInputCallback, // Callback
								  &recorder, // user data
								  NULL, // run loop
								  NULL, // run loop mode
								  0, // flags (always 0)
								  // &recorder.queue), // output: reference to AudioQueue object
								  &queue),
			   "AudioQueueNewInput failed");
	
	// since the queue is now initilized, we ask it's Audio Converter object
	// for the ASBD it has configured itself with. The file may require a more
	// specific stream description than was necessary to create the audio queue.
	//
	// for example: certain fields in an ASBD cannot possibly be known until it's
	// codec is instantiated (in this case, by the AudioQueue's Audio Converter object)
	UInt32 size = sizeof(recordFormat);
	CheckError(AudioQueueGetProperty(queue, kAudioConverterCurrentOutputStreamDescription,
									 &recordFormat, &size), "couldn't get queue's format");
	
	// create the audio file
	CFURLRef myFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("./output.caf"), kCFURLPOSIXPathStyle, false);
	CFShow (myFileURL);
	CheckError(AudioFileCreateWithURL(myFileURL, kAudioFileCAFType, &recordFormat,
									  kAudioFileFlags_EraseFile, &recorder.recordFile), "AudioFileCreateWithURL failed");
	CFRelease(myFileURL);
	
	// many encoded formats require a 'magic cookie'. we set the cookie first
	// to give the file object as much info as we can about the data it will be receiving
	MyCopyEncoderCookieToFile(queue, recorder.recordFile);
	
	// allocate and enqueue buffers
	int bufferByteSize = MyComputeRecordBufferSize(&recordFormat, queue, 0.5);	// enough bytes for half a second
	int bufferIndex;
    for (bufferIndex = 0; bufferIndex < kNumberRecordBuffers; ++bufferIndex)
	{
		AudioQueueBufferRef buffer;
		CheckError(AudioQueueAllocateBuffer(queue, bufferByteSize, &buffer),
				   "AudioQueueAllocateBuffer failed");
		CheckError(AudioQueueEnqueueBuffer(queue, buffer, 0, NULL),
				   "AudioQueueEnqueueBuffer failed");
	}
	
	// start the queue. this function return immedatly and begins
	// invoking the callback, as needed, asynchronously.
	recorder.running = TRUE;
	CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
	
	// and wait
	printf("Recording, press <return> to stop:\n");
	getchar();
	
	// end recording
	printf("* recording done *\n");
	recorder.running = FALSE;
	CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed");
	
	// a codec may update its magic cookie at the end of an encoding session
	// so reapply it to the file now
	MyCopyEncoderCookieToFile(queue, recorder.recordFile);
	
cleanup:
	AudioQueueDispose(queue, TRUE);
	AudioFileClose(recorder.recordFile);
	
	return 0;
}
Beispiel #28
0
static void mf_peer_rdpsnd_activated(rdpsnd_server_context* context)
{
	printf("RDPSND Activated\n");
    
    
    
    printf("Let's create an audio queue for input!\n");
    
    OSStatus status;
    
    recorderState.dataFormat.mSampleRate = 44100.0;
    recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
    recorderState.dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
    recorderState.dataFormat.mBytesPerPacket = 4;
    recorderState.dataFormat.mFramesPerPacket = 1;
    recorderState.dataFormat.mBytesPerFrame = 4;
    recorderState.dataFormat.mChannelsPerFrame = 2;
    recorderState.dataFormat.mBitsPerChannel = 16;
    
    recorderState.snd_context = context;
    
    status = AudioQueueNewInput(&recorderState.dataFormat,
                                mf_peer_rdpsnd_input_callback,
                                &recorderState,
                                NULL,
                                kCFRunLoopCommonModes,
                                0,
                                &recorderState.queue);
    
    if (status != noErr)
    {
        printf("Failed to create a new Audio Queue. Status code: %d\n", status);
    }
    
    
    UInt32 dataFormatSize = sizeof (recorderState.dataFormat);
    
    AudioQueueGetProperty(recorderState.queue,
                          kAudioConverterCurrentInputStreamDescription,
                          &recorderState.dataFormat,
                          &dataFormatSize);
    
    
    mf_rdpsnd_derive_buffer_size(recorderState.queue, &recorderState.dataFormat, 0.05, &recorderState.bufferByteSize);
    
    
    printf("Preparing a set of buffers...");
    
    for (int i = 0; i < snd_numBuffers; ++i)
    {
        AudioQueueAllocateBuffer(recorderState.queue,
                                 recorderState.bufferByteSize,
                                 &recorderState.buffers[i]);
        
        AudioQueueEnqueueBuffer(recorderState.queue,
                                recorderState.buffers[i],
                                0,
                                NULL);
    }
    
    printf("done\n");
    
    printf("recording...\n");
    
    
    
    recorderState.currentPacket = 0;
    recorderState.isRunning = true;
    
    context->SelectFormat(context, 4);
    context->SetVolume(context, 0x7FFF, 0x7FFF);
    
    AudioQueueStart (recorderState.queue, NULL);

}
void SoundRecorder::init(const char* audioSource,const SoundDataFormat& sFormat,const char* outputFileName)
	{
	/* Store and sanify the sound data format: */
	format.mSampleRate=double(sFormat.framesPerSecond);
	format.mFormatID=kAudioFormatLinearPCM;
	format.mFormatFlags=0x0;
	format.mBitsPerChannel=sFormat.bitsPerSample>8?(sFormat.bitsPerSample+7)&~0x7:8;
	format.mChannelsPerFrame=sFormat.samplesPerFrame>=1?sFormat.samplesPerFrame:1;
	format.mBytesPerFrame=format.mChannelsPerFrame*(format.mBitsPerChannel/8);
	format.mFramesPerPacket=1;
	format.mBytesPerPacket=format.mFramesPerPacket*format.mBytesPerFrame;
	
	/* Determine the output file format from the file name extension: */
	AudioFileTypeID audioFileType=kAudioFileWAVEType; // Not really a default; just to make compiler happy
	const char* ext=Misc::getExtension(outputFileName);
	if(*ext=='\0'||strcasecmp(ext,".aiff")==0)
		{
		/* Adjust the sound data format for AIFF files: */
		audioFileType=kAudioFileAIFFType;
		format.mFormatFlags=kLinearPCMFormatFlagIsBigEndian|kLinearPCMFormatFlagIsSignedInteger|kLinearPCMFormatFlagIsPacked;
		}
	else if(strcasecmp(ext,".wav")==0)
		{
		/* Adjust the sound data format for WAV files: */
		audioFileType=kAudioFileWAVEType;
		format.mFormatFlags=kLinearPCMFormatFlagIsPacked;
		if(format.mBitsPerChannel>8)
			format.mFormatFlags|=kLinearPCMFormatFlagIsSignedInteger;
		}
	else
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Output file name %s has unrecognized extension",outputFileName);
	
	/* Create the recording audio queue: */
	if(AudioQueueNewInput(&format,handleInputBufferWrapper,this,0,kCFRunLoopCommonModes,0,&queue)!=noErr)
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while creating audio queue");
	
	/* Retrieve the fully specified audio data format from the audio queue: */
	UInt32 formatSize=sizeof(format);
	if(AudioQueueGetProperty(queue,kAudioConverterCurrentOutputStreamDescription,&format,&formatSize)!=noErr)
		{
		AudioQueueDispose(queue,true);
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while retrieving audio queue sound format");
		}
	
	/* Open the target audio file: */
	CFURLRef audioFileURL=CFURLCreateFromFileSystemRepresentation(0,reinterpret_cast<const UInt8*>(outputFileName),strlen(outputFileName),false);
	if(AudioFileCreateWithURL(audioFileURL,audioFileType,&format,kAudioFileFlags_EraseFile,&audioFile)!=noErr)
		{
		AudioQueueDispose(queue,true);
		CFRelease(audioFileURL);
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while opening output file %s",outputFileName);
		}
	CFRelease(audioFileURL);
	
	/* Calculate an appropriate buffer size and allocate the sound buffers: */
	int maxPacketSize=format.mBytesPerPacket;
	if(maxPacketSize==0) // Must be a variable bit rate sound format
		{
		/* Query the expected maximum packet size from the audio queue: */
		UInt32 maxVBRPacketSize=sizeof(maxPacketSize);
		if(AudioQueueGetProperty(queue,kAudioConverterPropertyMaximumOutputPacketSize,&maxPacketSize,&maxVBRPacketSize)!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while calcuating sample buffer size");
			}
		}
	
	/* Calculate an appropriate buffer size based on the given duration: */
	int numPackets=int(floor(double(format.mSampleRate)*0.25+0.5));
	bufferSize=UInt32(numPackets*maxPacketSize);
	
	/* Create the sample buffers: */
	for(int i=0;i<2;++i)
		{
		/* Create the sound buffer: */
		if(AudioQueueAllocateBuffer(queue,bufferSize,&buffers[i])!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while allocating sample buffer %d",i);
			}
		
		/* Add the buffer to the queue: */
		if(AudioQueueEnqueueBuffer(queue,buffers[i],0,0)!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while enqueuing sample buffer %d",i);
			}
		}
	}
Beispiel #30
0
static void mf_peer_rdpsnd_activated(RdpsndServerContext* context)
{
	OSStatus status;
	int i, j;
	BOOL formatAgreed = FALSE;
	AUDIO_FORMAT* agreedFormat = NULL;
	
	//we should actually loop through the list of client formats here
	//and see if we can send the client something that it supports...
	
	printf("Client supports the following %d formats: \n", context->num_client_formats);
	
	for (i = 0; i < context->num_client_formats; i++)
	{
		/* TODO: improve the way we agree on a format */
		for (j = 0; j < context->num_server_formats; j++)
		{
			if ((context->client_formats[i].wFormatTag == context->server_formats[j].wFormatTag) &&
			    (context->client_formats[i].nChannels == context->server_formats[j].nChannels) &&
			    (context->client_formats[i].nSamplesPerSec == context->server_formats[j].nSamplesPerSec))
			{
				printf("agreed on format!\n");
				formatAgreed = TRUE;
				agreedFormat = (AUDIO_FORMAT*)&context->server_formats[j];
				break;
			}
		}
		if (formatAgreed == TRUE)
			break;
		
	}
	
	if (formatAgreed == FALSE)
	{
		printf("Could not agree on a audio format with the server\n");
		return;
	}
	
	context->SelectFormat(context, i);
	context->SetVolume(context, 0x7FFF, 0x7FFF);
	
	switch (agreedFormat->wFormatTag)
	{
		case WAVE_FORMAT_ALAW:
			recorderState.dataFormat.mFormatID = kAudioFormatDVIIntelIMA;
			break;
			
		case WAVE_FORMAT_PCM:
			recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
			break;
			
		default:
			recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
			break;
	}
	
	recorderState.dataFormat.mSampleRate = agreedFormat->nSamplesPerSec;
	recorderState.dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;;
	recorderState.dataFormat.mBytesPerPacket = 4;
	recorderState.dataFormat.mFramesPerPacket = 1;
	recorderState.dataFormat.mBytesPerFrame = 4;
	recorderState.dataFormat.mChannelsPerFrame = agreedFormat->nChannels;
	recorderState.dataFormat.mBitsPerChannel = agreedFormat->wBitsPerSample;
	
	
	recorderState.snd_context = context;
	
	status = AudioQueueNewInput(&recorderState.dataFormat,
				    mf_peer_rdpsnd_input_callback,
				    &recorderState,
				    NULL,
				    kCFRunLoopCommonModes,
				    0,
				    &recorderState.queue);
	
	if (status != noErr)
	{
		printf("Failed to create a new Audio Queue. Status code: %d\n", status);
	}
	
	
	UInt32 dataFormatSize = sizeof (recorderState.dataFormat);
	
	AudioQueueGetProperty(recorderState.queue,
			      kAudioConverterCurrentInputStreamDescription,
			      &recorderState.dataFormat,
			      &dataFormatSize);
	
	
	mf_rdpsnd_derive_buffer_size(recorderState.queue, &recorderState.dataFormat, 0.05, &recorderState.bufferByteSize);
	
		
	for (i = 0; i < SND_NUMBUFFERS; ++i)
	{
		AudioQueueAllocateBuffer(recorderState.queue,
					 recorderState.bufferByteSize,
					 &recorderState.buffers[i]);
		
		AudioQueueEnqueueBuffer(recorderState.queue,
					recorderState.buffers[i],
					0,
					NULL);
	}
	
	
	recorderState.currentPacket = 0;
	recorderState.isRunning = true;
	
	AudioQueueStart (recorderState.queue, NULL);
	
}