Esempio n. 1
0
int coreaudio_recorder_alloc(struct ausrc_st **stp, const struct ausrc *as,
			     struct media_ctx **ctx,
			     struct ausrc_prm *prm, const char *device,
			     ausrc_read_h *rh, ausrc_error_h *errh, void *arg)
{
	AudioStreamBasicDescription fmt;
	struct ausrc_st *st;
	uint32_t sampc, bytc, i;
	OSStatus status;
	int err;

	(void)ctx;
	(void)device;
	(void)errh;

	if (!stp || !as || !prm)
		return EINVAL;

	st = mem_zalloc(sizeof(*st), ausrc_destructor);
	if (!st)
		return ENOMEM;

	st->ptime = prm->ptime;
	st->as  = as;
	st->rh  = rh;
	st->arg = arg;

	sampc = prm->srate * prm->ch * prm->ptime / 1000;
	bytc  = sampc * 2;

	err = pthread_mutex_init(&st->mutex, NULL);
	if (err)
		goto out;

	err = audio_session_enable();
	if (err)
		goto out;

	fmt.mSampleRate       = (Float64)prm->srate;
	fmt.mFormatID         = kAudioFormatLinearPCM;
	fmt.mFormatFlags      = kLinearPCMFormatFlagIsSignedInteger |
		                kAudioFormatFlagIsPacked;
#ifdef __BIG_ENDIAN__
	fmt.mFormatFlags     |= kAudioFormatFlagIsBigEndian;
#endif

	fmt.mFramesPerPacket  = 1;
	fmt.mBytesPerFrame    = prm->ch * 2;
	fmt.mBytesPerPacket   = prm->ch * 2;
	fmt.mChannelsPerFrame = prm->ch;
	fmt.mBitsPerChannel   = 16;

	status = AudioQueueNewInput(&fmt, record_handler, st, NULL,
				     kCFRunLoopCommonModes, 0, &st->queue);
	if (status) {
		warning("coreaudio: AudioQueueNewInput error: %i\n", status);
		err = ENODEV;
		goto out;
	}

	for (i=0; i<ARRAY_SIZE(st->buf); i++)  {

		status = AudioQueueAllocateBuffer(st->queue, bytc,
						  &st->buf[i]);
		if (status)  {
			err = ENOMEM;
			goto out;
		}

		AudioQueueEnqueueBuffer(st->queue, st->buf[i], 0, NULL);
	}

	status = AudioQueueStart(st->queue, NULL);
	if (status)  {
		warning("coreaudio: AudioQueueStart error %i\n", status);
		err = ENODEV;
		goto out;
	}

 out:
	if (err)
		mem_deref(st);
	else
		*stp = st;

	return err;
}
void SoundRecorder::init(const char* audioSource,const SoundDataFormat& sFormat,const char* outputFileName)
	{
	/* Store and sanify the sound data format: */
	format.mSampleRate=double(sFormat.framesPerSecond);
	format.mFormatID=kAudioFormatLinearPCM;
	format.mFormatFlags=0x0;
	format.mBitsPerChannel=sFormat.bitsPerSample>8?(sFormat.bitsPerSample+7)&~0x7:8;
	format.mChannelsPerFrame=sFormat.samplesPerFrame>=1?sFormat.samplesPerFrame:1;
	format.mBytesPerFrame=format.mChannelsPerFrame*(format.mBitsPerChannel/8);
	format.mFramesPerPacket=1;
	format.mBytesPerPacket=format.mFramesPerPacket*format.mBytesPerFrame;
	
	/* Determine the output file format from the file name extension: */
	AudioFileTypeID audioFileType=kAudioFileWAVEType; // Not really a default; just to make compiler happy
	const char* ext=Misc::getExtension(outputFileName);
	if(*ext=='\0'||strcasecmp(ext,".aiff")==0)
		{
		/* Adjust the sound data format for AIFF files: */
		audioFileType=kAudioFileAIFFType;
		format.mFormatFlags=kLinearPCMFormatFlagIsBigEndian|kLinearPCMFormatFlagIsSignedInteger|kLinearPCMFormatFlagIsPacked;
		}
	else if(strcasecmp(ext,".wav")==0)
		{
		/* Adjust the sound data format for WAV files: */
		audioFileType=kAudioFileWAVEType;
		format.mFormatFlags=kLinearPCMFormatFlagIsPacked;
		if(format.mBitsPerChannel>8)
			format.mFormatFlags|=kLinearPCMFormatFlagIsSignedInteger;
		}
	else
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Output file name %s has unrecognized extension",outputFileName);
	
	/* Create the recording audio queue: */
	if(AudioQueueNewInput(&format,handleInputBufferWrapper,this,0,kCFRunLoopCommonModes,0,&queue)!=noErr)
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while creating audio queue");
	
	/* Retrieve the fully specified audio data format from the audio queue: */
	UInt32 formatSize=sizeof(format);
	if(AudioQueueGetProperty(queue,kAudioConverterCurrentOutputStreamDescription,&format,&formatSize)!=noErr)
		{
		AudioQueueDispose(queue,true);
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while retrieving audio queue sound format");
		}
	
	/* Open the target audio file: */
	CFURLRef audioFileURL=CFURLCreateFromFileSystemRepresentation(0,reinterpret_cast<const UInt8*>(outputFileName),strlen(outputFileName),false);
	if(AudioFileCreateWithURL(audioFileURL,audioFileType,&format,kAudioFileFlags_EraseFile,&audioFile)!=noErr)
		{
		AudioQueueDispose(queue,true);
		CFRelease(audioFileURL);
		Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while opening output file %s",outputFileName);
		}
	CFRelease(audioFileURL);
	
	/* Calculate an appropriate buffer size and allocate the sound buffers: */
	int maxPacketSize=format.mBytesPerPacket;
	if(maxPacketSize==0) // Must be a variable bit rate sound format
		{
		/* Query the expected maximum packet size from the audio queue: */
		UInt32 maxVBRPacketSize=sizeof(maxPacketSize);
		if(AudioQueueGetProperty(queue,kAudioConverterPropertyMaximumOutputPacketSize,&maxPacketSize,&maxVBRPacketSize)!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while calcuating sample buffer size");
			}
		}
	
	/* Calculate an appropriate buffer size based on the given duration: */
	int numPackets=int(floor(double(format.mSampleRate)*0.25+0.5));
	bufferSize=UInt32(numPackets*maxPacketSize);
	
	/* Create the sample buffers: */
	for(int i=0;i<2;++i)
		{
		/* Create the sound buffer: */
		if(AudioQueueAllocateBuffer(queue,bufferSize,&buffers[i])!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while allocating sample buffer %d",i);
			}
		
		/* Add the buffer to the queue: */
		if(AudioQueueEnqueueBuffer(queue,buffers[i],0,0)!=noErr)
			{
			AudioQueueDispose(queue,true);
			AudioFileClose(audioFile);
			Misc::throwStdErr("SoundRecorder::SoundRecorder: Error while enqueuing sample buffer %d",i);
			}
		}
	}
bool UBAudioQueueRecorder::init(const QString& waveInDeviceName)
{
    if(mIsRecording)
    {
        setLastErrorMessage("Already recording ...");
        return false;
    }

    OSStatus err = AudioQueueNewInput (&sAudioFormat, UBAudioQueueRecorder::audioQueueInputCallback,
                    this, 0, 0, 0, &mQueue);

    if (err)
    {
        setLastErrorMessage(QString("Cannot acquire audio input %1").arg(err));
        mQueue = 0;
        close();
        return false;
    }

    //qDebug() << "init with waveInDeviceName ..." << waveInDeviceName;

    if (waveInDeviceName.length() > 0 && waveInDeviceName != "Default")
    {
        AudioDeviceID deviceID = deviceIDFromDeviceName(waveInDeviceName);

        if (deviceID)
        {
            QString deviceUID = deviceUIDFromDeviceID(deviceID);
            if (deviceUID.length() > 0)
            {
                CFStringRef sr = CFStringCreateWithCString(0, deviceUID.toUtf8().constData(), kCFStringEncodingUTF8);

                err = AudioQueueSetProperty(mQueue, kAudioQueueProperty_CurrentDevice, &sr, sizeof(CFStringRef));
                if (err)
                {
                    setLastErrorMessage(QString("Cannot set audio input %1 (%2)").arg(waveInDeviceName).arg(err));
                }
                else
                {
                    qDebug() << "recording with input" << waveInDeviceName;
                }
            }
            else
            {
                setLastErrorMessage(QString("Cannot find audio input device UID with ID %1 (%2)").arg(deviceID).arg(err));
            }
        }
        else
        {
            setLastErrorMessage(QString("Cannot find audio input with name %1 (%2)").arg(waveInDeviceName).arg(err));
        }
    }

    UInt32 monitor = true;

    err = AudioQueueSetProperty(mQueue, kAudioQueueProperty_EnableLevelMetering , &monitor, sizeof(UInt32));
    if (err)
    {
        qWarning() << QString("Cannot set recording level monitoring %1").arg(err);
    }

    int nbBuffers = 6;
    mSampleBufferSize = sAudioFormat.mSampleRate *  sAudioFormat.mChannelsPerFrame
                * 2 * mBufferLengthInMs / 1000; // 44.1 Khz * stereo * 16bit * buffer length

    for (int i = 0; i < nbBuffers; i++)
    {
        AudioQueueBufferRef outBuffer;
        err = AudioQueueAllocateBuffer(mQueue, mSampleBufferSize, &outBuffer);

        if (err)
        {
            setLastErrorMessage(QString("Cannot allocate audio buffer %1").arg(err));
            close();
            return false;
        }

        mBuffers << outBuffer;
    }

    foreach(AudioQueueBufferRef buffer, mBuffers)
    {
        err = AudioQueueEnqueueBuffer(mQueue, buffer, 0, 0);
        if (err)
        {
            setLastErrorMessage(QString("Cannot enqueue audio buffer %1").arg(err));
            close();
            return false;
        }
    }
Esempio n. 4
0
// ____________________________________________________________________________________
// main program
int	main(int argc, const char *argv[])
{
	const char *recordFileName = NULL;
	int i, nchannels, bufferByteSize;
	float seconds = 0;
	AudioStreamBasicDescription recordFormat;
	MyRecorder aqr;
	UInt32 size;
	CFURLRef url;
    OSStatus err = noErr;
	
	// fill structures with 0/NULL
	memset(&recordFormat, 0, sizeof(recordFormat));
	memset(&aqr, 0, sizeof(aqr));
	
	// parse arguments
	for (i = 1; i < argc; ++i) {
		const char *arg = argv[i];
		
		if (arg[0] == '-') {
			switch (arg[1]) {
			case 'c':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%d", &nchannels) != 1)
					usage();
				recordFormat.mChannelsPerFrame = nchannels;
				break;
			case 'd':
				if (++i == argc) MissingArgument(arg);
				if (StrTo4CharCode(argv[i], &recordFormat.mFormatID) == 0)
					ParseError(arg, argv[i]);
				break;
			case 'r':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%lf", &recordFormat.mSampleRate) != 1)
					ParseError(arg, argv[i]);
				break;
			case 's':
				if (++i == argc) MissingArgument(arg);
				if (sscanf(argv[i], "%f", &seconds) != 1)
					ParseError(arg, argv[i]);
				break;
			case 'v':
				aqr.verbose = TRUE;
				break;
			default:
				fprintf(stderr, "unknown option: '%s'\n\n", arg);
				usage();
			}
		} else if (recordFileName != NULL) {
			fprintf(stderr, "may only specify one file to record\n\n");
			usage();
		} else
			recordFileName = arg;
	}
	if (recordFileName == NULL) // no record file path provided
		usage();
	
	// determine file format
	AudioFileTypeID audioFileType = kAudioFileCAFType;	// default to CAF
	CFStringRef cfRecordFileName = CFStringCreateWithCString(NULL, recordFileName, kCFStringEncodingUTF8);
	InferAudioFileFormatFromFilename(cfRecordFileName, &audioFileType);
	CFRelease(cfRecordFileName);

	// adapt record format to hardware and apply defaults
	if (recordFormat.mSampleRate == 0.)
		MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);

	if (recordFormat.mChannelsPerFrame == 0)
		recordFormat.mChannelsPerFrame = 2;
	
	if (recordFormat.mFormatID == 0 || recordFormat.mFormatID == kAudioFormatLinearPCM) {
		// default to PCM, 16 bit int
		recordFormat.mFormatID = kAudioFormatLinearPCM;
		recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
		recordFormat.mBitsPerChannel = 16;
		if (MyFileFormatRequiresBigEndian(audioFileType, recordFormat.mBitsPerChannel))
			recordFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
		recordFormat.mBytesPerPacket = recordFormat.mBytesPerFrame =
			(recordFormat.mBitsPerChannel / 8) * recordFormat.mChannelsPerFrame;
		recordFormat.mFramesPerPacket = 1;
		recordFormat.mReserved = 0;
	}

	try {
		// create the queue
		XThrowIfError(AudioQueueNewInput(
			&recordFormat,
			MyInputBufferHandler,
			&aqr /* userData */,
			NULL /* run loop */, NULL /* run loop mode */,
			0 /* flags */, &aqr.queue), "AudioQueueNewInput failed");

		// get the record format back from the queue's audio converter --
		// the file may require a more specific stream description than was necessary to create the encoder.
		size = sizeof(recordFormat);
		XThrowIfError(AudioQueueGetProperty(aqr.queue, kAudioConverterCurrentOutputStreamDescription,
			&recordFormat, &size), "couldn't get queue's format");

		// convert recordFileName from C string to CFURL
		url = CFURLCreateFromFileSystemRepresentation(NULL, (Byte *)recordFileName, strlen(recordFileName), FALSE);
		XThrowIfError(!url, "couldn't create record file");
        
		// create the audio file
        err = AudioFileCreateWithURL(url, audioFileType, &recordFormat, kAudioFileFlags_EraseFile,
                                              &aqr.recordFile);
        CFRelease(url); // release first, and then bail out on error
		XThrowIfError(err, "AudioFileCreateWithURL failed");
		

		// copy the cookie first to give the file object as much info as we can about the data going in
		MyCopyEncoderCookieToFile(aqr.queue, aqr.recordFile);

		// allocate and enqueue buffers
		bufferByteSize = MyComputeRecordBufferSize(&recordFormat, aqr.queue, 0.5);	// enough bytes for half a second
		for (i = 0; i < kNumberRecordBuffers; ++i) {
			AudioQueueBufferRef buffer;
			XThrowIfError(AudioQueueAllocateBuffer(aqr.queue, bufferByteSize, &buffer),
				"AudioQueueAllocateBuffer failed");
			XThrowIfError(AudioQueueEnqueueBuffer(aqr.queue, buffer, 0, NULL),
				"AudioQueueEnqueueBuffer failed");
		}
		
		// record
		if (seconds > 0) {
			// user requested a fixed-length recording (specified a duration with -s)
			// to time the recording more accurately, watch the queue's IsRunning property
			XThrowIfError(AudioQueueAddPropertyListener(aqr.queue, kAudioQueueProperty_IsRunning,
				MyPropertyListener, &aqr), "AudioQueueAddPropertyListener failed");
			
			// start the queue
			aqr.running = TRUE;
			XThrowIfError(AudioQueueStart(aqr.queue, NULL), "AudioQueueStart failed");
			CFAbsoluteTime waitUntil = CFAbsoluteTimeGetCurrent() + 10;

			// wait for the started notification
			while (aqr.queueStartStopTime == 0.) {
				CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.010, FALSE);
				if (CFAbsoluteTimeGetCurrent() >= waitUntil) {
					fprintf(stderr, "Timeout waiting for the queue's IsRunning notification\n");
					goto cleanup;
				}
			}
			printf("Recording...\n");
			CFAbsoluteTime stopTime = aqr.queueStartStopTime + seconds;
			CFAbsoluteTime now = CFAbsoluteTimeGetCurrent();
			CFRunLoopRunInMode(kCFRunLoopDefaultMode, stopTime - now, FALSE);
		} else {
			// start the queue
			aqr.running = TRUE;
			XThrowIfError(AudioQueueStart(aqr.queue, NULL), "AudioQueueStart failed");
			
			// and wait
			printf("Recording, press <return> to stop:\n");
			getchar();
		}

		// end recording
		printf("* recording done *\n");
		
		aqr.running = FALSE;
		XThrowIfError(AudioQueueStop(aqr.queue, TRUE), "AudioQueueStop failed");
		
		// a codec may update its cookie at the end of an encoding session, so reapply it to the file now
		MyCopyEncoderCookieToFile(aqr.queue, aqr.recordFile);
		
cleanup:
		AudioQueueDispose(aqr.queue, TRUE);
		AudioFileClose(aqr.recordFile);
	}
	catch (CAXException e) {
		char buf[256];
		fprintf(stderr, "MyInputBufferHandler: %s (%s)\n", e.mOperation, e.FormatError(buf));
		return e.mError;
	}
		
	return 0;
}
Esempio n. 5
0
static void mf_peer_rdpsnd_activated(RdpsndServerContext* context)
{
	OSStatus status;
	int i, j;
	BOOL formatAgreed = FALSE;
	AUDIO_FORMAT* agreedFormat = NULL;
	
	//we should actually loop through the list of client formats here
	//and see if we can send the client something that it supports...
	
	printf("Client supports the following %d formats: \n", context->num_client_formats);
	
	for (i = 0; i < context->num_client_formats; i++)
	{
		/* TODO: improve the way we agree on a format */
		for (j = 0; j < context->num_server_formats; j++)
		{
			if ((context->client_formats[i].wFormatTag == context->server_formats[j].wFormatTag) &&
			    (context->client_formats[i].nChannels == context->server_formats[j].nChannels) &&
			    (context->client_formats[i].nSamplesPerSec == context->server_formats[j].nSamplesPerSec))
			{
				printf("agreed on format!\n");
				formatAgreed = TRUE;
				agreedFormat = (AUDIO_FORMAT*)&context->server_formats[j];
				break;
			}
		}
		if (formatAgreed == TRUE)
			break;
		
	}
	
	if (formatAgreed == FALSE)
	{
		printf("Could not agree on a audio format with the server\n");
		return;
	}
	
	context->SelectFormat(context, i);
	context->SetVolume(context, 0x7FFF, 0x7FFF);
	
	switch (agreedFormat->wFormatTag)
	{
		case WAVE_FORMAT_ALAW:
			recorderState.dataFormat.mFormatID = kAudioFormatDVIIntelIMA;
			break;
			
		case WAVE_FORMAT_PCM:
			recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
			break;
			
		default:
			recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
			break;
	}
	
	recorderState.dataFormat.mSampleRate = agreedFormat->nSamplesPerSec;
	recorderState.dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;;
	recorderState.dataFormat.mBytesPerPacket = 4;
	recorderState.dataFormat.mFramesPerPacket = 1;
	recorderState.dataFormat.mBytesPerFrame = 4;
	recorderState.dataFormat.mChannelsPerFrame = agreedFormat->nChannels;
	recorderState.dataFormat.mBitsPerChannel = agreedFormat->wBitsPerSample;
	
	
	recorderState.snd_context = context;
	
	status = AudioQueueNewInput(&recorderState.dataFormat,
				    mf_peer_rdpsnd_input_callback,
				    &recorderState,
				    NULL,
				    kCFRunLoopCommonModes,
				    0,
				    &recorderState.queue);
	
	if (status != noErr)
	{
		printf("Failed to create a new Audio Queue. Status code: %d\n", status);
	}
	
	
	UInt32 dataFormatSize = sizeof (recorderState.dataFormat);
	
	AudioQueueGetProperty(recorderState.queue,
			      kAudioConverterCurrentInputStreamDescription,
			      &recorderState.dataFormat,
			      &dataFormatSize);
	
	
	mf_rdpsnd_derive_buffer_size(recorderState.queue, &recorderState.dataFormat, 0.05, &recorderState.bufferByteSize);
	
		
	for (i = 0; i < SND_NUMBUFFERS; ++i)
	{
		AudioQueueAllocateBuffer(recorderState.queue,
					 recorderState.bufferByteSize,
					 &recorderState.buffers[i]);
		
		AudioQueueEnqueueBuffer(recorderState.queue,
					recorderState.buffers[i],
					0,
					NULL);
	}
	
	
	recorderState.currentPacket = 0;
	recorderState.isRunning = true;
	
	AudioQueueStart (recorderState.queue, NULL);
	
}
Esempio n. 6
0
static void aq_start_r(MSFilter * f)
{
	AQData *d = (AQData *) f->data;
	if (d->read_started == FALSE) {
		OSStatus aqresult;

		d->readAudioFormat.mSampleRate = d->rate;
		d->readAudioFormat.mFormatID = kAudioFormatLinearPCM;
		d->readAudioFormat.mFormatFlags =
			kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
		d->readAudioFormat.mFramesPerPacket = 1;
		d->readAudioFormat.mChannelsPerFrame = 1;
		d->readAudioFormat.mBitsPerChannel = d->bits;
		d->readAudioFormat.mBytesPerPacket = d->bits / 8;
		d->readAudioFormat.mBytesPerFrame = d->bits / 8;

		//show_format("input device", &d->devicereadFormat);
		//show_format("data from input filter", &d->readAudioFormat);

		memcpy(&d->devicereadFormat, &d->readAudioFormat,
			   sizeof(d->readAudioFormat));
		d->readBufferByteSize =
			kSecondsPerBuffer * d->devicereadFormat.mSampleRate *
			(d->devicereadFormat.mBitsPerChannel / 8) *
			d->devicereadFormat.mChannelsPerFrame;

#if 0
		aqresult = AudioConverterNew(&d->devicereadFormat,
									 &d->readAudioFormat,
									 &d->readAudioConverter);
		if (aqresult != noErr) {
			ms_error("d->readAudioConverter = %d", aqresult);
			d->readAudioConverter = NULL;
		}
#endif
		
		aqresult = AudioQueueNewInput(&d->devicereadFormat, readCallback, d,	// userData
									  NULL,	// run loop
									  NULL,	// run loop mode
									  0,	// flags
									  &d->readQueue);
		if (aqresult != noErr) {
			ms_error("AudioQueueNewInput = %d", aqresult);
		}

		if (d->uidname!=NULL){
			char uidname[256];
			CFStringGetCString(d->uidname, uidname, 256,
							   CFStringGetSystemEncoding());
			ms_message("AQ: using uidname:%s", uidname);
			aqresult =
				AudioQueueSetProperty(d->readQueue,
								  kAudioQueueProperty_CurrentDevice,
								  &d->uidname, sizeof(CFStringRef));
			if (aqresult != noErr) {
				ms_error
					("AudioQueueSetProperty on kAudioQueueProperty_CurrentDevice %d",
					 aqresult);
			}
		}

		setupRead(f);
		aqresult = AudioQueueStart(d->readQueue, NULL);	// start time. NULL means ASAP.
		if (aqresult != noErr) {
			ms_error("AudioQueueStart -read- %d", aqresult);
		}
		d->read_started = TRUE;
	}
}
Esempio n. 7
0
static int tdav_producer_audioqueue_prepare(tmedia_producer_t* self, const tmedia_codec_t* codec)
{
    OSStatus ret;
	tsk_size_t i;
	tdav_producer_audioqueue_t* producer = (tdav_producer_audioqueue_t*)self;
	
	if(!producer || !codec && codec->plugin){
		TSK_DEBUG_ERROR("Invalid parameter");
		return -1;
	}

	TMEDIA_PRODUCER(producer)->audio.channels = TMEDIA_CODEC_CHANNELS_AUDIO_ENCODING(codec);
	TMEDIA_PRODUCER(producer)->audio.rate = TMEDIA_CODEC_RATE_ENCODING(codec);
	TMEDIA_PRODUCER(producer)->audio.ptime = TMEDIA_CODEC_PTIME_AUDIO_ENCODING(codec);
	/* codec should have ptime */
	
	
	// Set audio category
#if TARGET_OS_IPHONE
	UInt32 category = kAudioSessionCategory_PlayAndRecord;
	AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
#endif
    // Create the audio stream description
    AudioStreamBasicDescription *description = &(producer->description);
    description->mSampleRate = TMEDIA_PRODUCER(producer)->audio.rate;
    description->mFormatID = kAudioFormatLinearPCM;
    description->mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    description->mChannelsPerFrame = TMEDIA_PRODUCER(producer)->audio.channels;
    description->mFramesPerPacket = 1;
    description->mBitsPerChannel = TMEDIA_PRODUCER(producer)->audio.bits_per_sample;
    description->mBytesPerPacket = description->mBitsPerChannel / 8 * description->mChannelsPerFrame;
    description->mBytesPerFrame = description->mBytesPerPacket;
    description->mReserved = 0;
    
    int packetperbuffer = 1000 / TMEDIA_PRODUCER(producer)->audio.ptime;
    producer->buffer_size = description->mSampleRate * description->mBytesPerFrame / packetperbuffer;
    
    // Create the record audio queue
    ret = AudioQueueNewInput(&(producer->description),
							 __handle_input_buffer,
							 producer,
							 NULL, 
							 kCFRunLoopCommonModes,
							 0,
							 &(producer->queue));
    
    for(i = 0; i < CoreAudioRecordBuffers; i++) {
        // Create the buffer for the queue
        ret = AudioQueueAllocateBuffer(producer->queue, producer->buffer_size, &(producer->buffers[i]));
        if (ret) {
            break;
        }
        
        // Clear the data
        memset(producer->buffers[i]->mAudioData, 0, producer->buffer_size);
        producer->buffers[i]->mAudioDataByteSize = producer->buffer_size;
        
        // Enqueue the buffer
        ret = AudioQueueEnqueueBuffer(producer->queue, producer->buffers[i], 0, NULL);
        if (ret) {
            break;
        }
    }
	
	return 0;
}
Esempio n. 8
0
static void mf_peer_rdpsnd_activated(rdpsnd_server_context* context)
{
	printf("RDPSND Activated\n");
    
    
    
    printf("Let's create an audio queue for input!\n");
    
    OSStatus status;
    
    recorderState.dataFormat.mSampleRate = 44100.0;
    recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
    recorderState.dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
    recorderState.dataFormat.mBytesPerPacket = 4;
    recorderState.dataFormat.mFramesPerPacket = 1;
    recorderState.dataFormat.mBytesPerFrame = 4;
    recorderState.dataFormat.mChannelsPerFrame = 2;
    recorderState.dataFormat.mBitsPerChannel = 16;
    
    recorderState.snd_context = context;
    
    status = AudioQueueNewInput(&recorderState.dataFormat,
                                mf_peer_rdpsnd_input_callback,
                                &recorderState,
                                NULL,
                                kCFRunLoopCommonModes,
                                0,
                                &recorderState.queue);
    
    if (status != noErr)
    {
        printf("Failed to create a new Audio Queue. Status code: %d\n", status);
    }
    
    
    UInt32 dataFormatSize = sizeof (recorderState.dataFormat);
    
    AudioQueueGetProperty(recorderState.queue,
                          kAudioConverterCurrentInputStreamDescription,
                          &recorderState.dataFormat,
                          &dataFormatSize);
    
    
    mf_rdpsnd_derive_buffer_size(recorderState.queue, &recorderState.dataFormat, 0.05, &recorderState.bufferByteSize);
    
    
    printf("Preparing a set of buffers...");
    
    for (int i = 0; i < snd_numBuffers; ++i)
    {
        AudioQueueAllocateBuffer(recorderState.queue,
                                 recorderState.bufferByteSize,
                                 &recorderState.buffers[i]);
        
        AudioQueueEnqueueBuffer(recorderState.queue,
                                recorderState.buffers[i],
                                0,
                                NULL);
    }
    
    printf("done\n");
    
    printf("recording...\n");
    
    
    
    recorderState.currentPacket = 0;
    recorderState.isRunning = true;
    
    context->SelectFormat(context, 4);
    context->SetVolume(context, 0x7FFF, 0x7FFF);
    
    AudioQueueStart (recorderState.queue, NULL);

}
Esempio n. 9
0
/*auNew---------------------------------------------------*/
Audio* auAlloc(int sizeofstarself, auAudioCallback_t callback, BOOL isOutput, unsigned numChannels)
{
  Audio* self = (Audio*)calloc(1, sizeofstarself);

  if(self != NULL)
    {
      self->isOutput                     = isOutput;
      self->isPlaying                    = NO;
      self->audioCallback                = callback;
      self->numChannels                  = numChannels;
      self->targetMasterVolume           = 1;
      auSetMasterVolumeSmoothing(self, 0.9999);
      
#if defined __APPLE__
      int i;
      OSStatus error;
      self->dataFormat.mSampleRate       = AU_SAMPLE_RATE;
      self->dataFormat.mBytesPerPacket   = 4 * numChannels  ;
      self->dataFormat.mFramesPerPacket  = 1             ;
      self->dataFormat.mBytesPerFrame    = 4 * numChannels  ;
      self->dataFormat.mBitsPerChannel   = 32            ;
      self->dataFormat.mChannelsPerFrame = numChannels      ;
      self->dataFormat.mFormatID         = kAudioFormatLinearPCM;
      //self->dataFormat.mFormatFlags    = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
      self->dataFormat.mFormatFlags      = kLinearPCMFormatFlagIsFloat;
      
      if(isOutput)
        error = AudioQueueNewOutput(&(self->dataFormat), auAudioOutputCallback, self, NULL, NULL, 0, &(self->queue));
      else
        error = AudioQueueNewInput (&(self->dataFormat), auAudioInputCallback, self, NULL, NULL, 0, &(self->queue));
      if(error) 
        {
          fprintf(stderr, "Audio.c: unable to allocate Audio queue\n"); 
          return auDestroy(self);
        }
      else //(!error)
        {
          unsigned bufferNumBytes = AU_BUFFER_NUM_FRAMES * numChannels * sizeof(auSample_t);
          for(i=0; i<AU_NUM_AUDIO_BUFFERS; i++)
            {
              error = AudioQueueAllocateBuffer(self->queue, bufferNumBytes, &((self->buffers)[i]));
              if(error) 
                {
                  self = auDestroy(self);
                  fprintf(stderr, "Audio.c: allocate buffer error\n");
                  break;
                }
            }
        }
        
#elif defined __linux__
      int error = 0;

      snd_pcm_hw_params_t  *hardwareParameters;
      snd_pcm_hw_params_alloca(&hardwareParameters);

      //untested for input stream...
      const char* name = (isOutput) ? AU_SPEAKER_DEVICE_NAME : AU_MIC_DEVICE_NAME;
      unsigned direction = (isOutput) ? SND_PCM_STREAM_PLAYBACK : SND_PCM_STREAM_CAPTURE;
      
      error = snd_pcm_open(&(self->device), AU_SPEAKER_DEVICE_NAME, SND_PCM_STREAM_PLAYBACK, 0);
      if(error < 0) fprintf(stderr, "Audio.c: Unable to open speaker device %s: %s\n", AU_SPEAKER_DEVICE_NAME, snd_strerror(error));
            
      if(error >= 0)
        {
          error = snd_pcm_hw_params_any(self->device, hardwareParameters);
            if(error < 0) fprintf(stderr, "Audio.c: Unable to get a generic hardware configuration: %s\n", snd_strerror(error));
        }
      if(error >= 0)
        {
          error = snd_pcm_hw_params_set_access(self->device, hardwareParameters, SND_PCM_ACCESS_RW_INTERLEAVED);
          if(error < 0) fprintf(stderr, "Audio.c: Device does not support interleaved audio access: %s\n", snd_strerror(error));
        }
      if(error >= 0)
        {
          error = snd_pcm_hw_params_set_format(self->device, hardwareParameters, /*SND_PCM_FORMAT_S16*/ SND_PCM_FORMAT_FLOAT) ;
          if(error < 0) fprintf(stderr, "Audio.c: Unable to set sample format: %s\n", snd_strerror(error));
        }
      if(error >= 0)
        {
          //self->numChannels = AU_NUM_CHANNELS;
          error = snd_pcm_hw_params_set_channels_near(self->device, hardwareParameters, &self->numChannels);
          if(error < 0) fprintf(stderr, "Audio.c: unable to set the number of channels to %i: %s\n", self->numChannels, snd_strerror(error));
          else if(self->numChannels != numChannels)
            fprintf(stderr, "Audio.c: device does not support %u numChannels, %i will be used instead\n", numChannels, self->numChannels);  
        }
      if(error >= 0)
        {
          unsigned int sampleRate = AU_SAMPLE_RATE;
          error = snd_pcm_hw_params_set_rate_near(self->device, hardwareParameters, &sampleRate, 0);
          if(error < 0) fprintf(stderr, "Audio.c: Unable to set the sample rate to %u: %s\n", sampleRate, snd_strerror(error));
          else if(sampleRate != AU_SAMPLE_RATE)
            fprintf(stderr, "Audio.c: device does not support %i sample rate, %u will be used instead\n", (int)AU_SAMPLE_RATE, sampleRate);
        }
      if(error >= 0)
        {
          int dir = 0;
          self->bufferNumFrames = AU_BUFFER_NUM_FRAMES; //the buffer I give to ALSA
          error = snd_pcm_hw_params_set_period_size_near(self->device, hardwareParameters, &(self->bufferNumFrames), &dir);
          if(error < 0) fprintf(stderr, "Audio.cpp: Unable to set the sample buffer size to %lu: %s\n", self->bufferNumFrames, snd_strerror(error));
          else if(self->bufferNumFrames != AU_BUFFER_NUM_FRAMES)
            fprintf(stderr, "Audio.c: device does not support %i period size, %lu will be used instead\n", AU_BUFFER_NUM_FRAMES, self->bufferNumFrames);
        }
      if(error >= 0)
        {
          unsigned long int internalBufferNumFrames = self->bufferNumFrames * AU_NUM_AUDIO_BUFFERS; //the buffer ALSA uses internally
          error = snd_pcm_hw_params_set_buffer_size_near(self->device, hardwareParameters, &internalBufferNumFrames);
          if(error < 0) fprintf(stderr, "Audio.c: Unable to set the internal buffer size to %lu: %s\n", internalBufferNumFrames, snd_strerror(error));
          else if(internalBufferNumFrames != AU_NUM_AUDIO_BUFFERS * self->bufferNumFrames)
              fprintf(stderr, "Audio.c: device does not support %lu internal buffer size, %lu will be used instead\n", AU_NUM_AUDIO_BUFFERS * self->bufferNumFrames, internalBufferNumFrames);
        }
      if(error >= 0)
        {
          error = snd_pcm_hw_params(self->device, hardwareParameters);
          if(error < 0) fprintf(stderr, "Audio.c: Unable to load the hardware parameters into the device: %s\n", snd_strerror(error));
        }
      if(error >= 0)
       {
         unsigned int size = sizeof(auSample_t) * self->numChannels * self->bufferNumFrames;
         self->sampleBuffer = (auSample_t*)malloc(size);
         if(self->sampleBuffer == NULL)
           {
              error = -1;
              perror("Audio.c: Unable to allocate audio buffers \n");
           }
       }
      if (error < 0) self = auDestroy(self);
#endif
    }
  else perror("Audio.c: Unable to create new Audio object");
  
  srandom((unsigned)time(NULL));

  return self;
}
int	main(int argc, const char *argv[])
{
	MyRecorder recorder = {0};
	AudioStreamBasicDescription recordFormat = {0};
	memset(&recordFormat, 0, sizeof(recordFormat));
	
	// Configure the output data format to be AAC
	recordFormat.mFormatID = kAudioFormatMPEG4AAC;
	recordFormat.mChannelsPerFrame = 2;
	
	// get the sample rate of the default input device
	// we use this to adapt the output data format to match hardware capabilities
	MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);
	
	// ProTip: Use the AudioFormat API to trivialize ASBD creation.
	//         input: atleast the mFormatID, however, at this point we already have
	//                mSampleRate, mFormatID, and mChannelsPerFrame
	//         output: the remainder of the ASBD will be filled out as much as possible
	//                 given the information known about the format
	UInt32 propSize = sizeof(recordFormat);
	CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL,
									  &propSize, &recordFormat), "AudioFormatGetProperty failed");
	
	// create a input (recording) queue
	AudioQueueRef queue = {0};
	CheckError(AudioQueueNewInput(&recordFormat, // ASBD
								  MyAQInputCallback, // Callback
								  &recorder, // user data
								  NULL, // run loop
								  NULL, // run loop mode
								  0, // flags (always 0)
								  // &recorder.queue), // output: reference to AudioQueue object
								  &queue),
			   "AudioQueueNewInput failed");
	
	// since the queue is now initilized, we ask it's Audio Converter object
	// for the ASBD it has configured itself with. The file may require a more
	// specific stream description than was necessary to create the audio queue.
	//
	// for example: certain fields in an ASBD cannot possibly be known until it's
	// codec is instantiated (in this case, by the AudioQueue's Audio Converter object)
	UInt32 size = sizeof(recordFormat);
	CheckError(AudioQueueGetProperty(queue, kAudioConverterCurrentOutputStreamDescription,
									 &recordFormat, &size), "couldn't get queue's format");
	
	// create the audio file
	CFURLRef myFileURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, CFSTR("./output.caf"), kCFURLPOSIXPathStyle, false);
	CFShow (myFileURL);
	CheckError(AudioFileCreateWithURL(myFileURL, kAudioFileCAFType, &recordFormat,
									  kAudioFileFlags_EraseFile, &recorder.recordFile), "AudioFileCreateWithURL failed");
	CFRelease(myFileURL);
	
	// many encoded formats require a 'magic cookie'. we set the cookie first
	// to give the file object as much info as we can about the data it will be receiving
	MyCopyEncoderCookieToFile(queue, recorder.recordFile);
	
	// allocate and enqueue buffers
	int bufferByteSize = MyComputeRecordBufferSize(&recordFormat, queue, 0.5);	// enough bytes for half a second
	int bufferIndex;
    for (bufferIndex = 0; bufferIndex < kNumberRecordBuffers; ++bufferIndex)
	{
		AudioQueueBufferRef buffer;
		CheckError(AudioQueueAllocateBuffer(queue, bufferByteSize, &buffer),
				   "AudioQueueAllocateBuffer failed");
		CheckError(AudioQueueEnqueueBuffer(queue, buffer, 0, NULL),
				   "AudioQueueEnqueueBuffer failed");
	}
	
	// start the queue. this function return immedatly and begins
	// invoking the callback, as needed, asynchronously.
	recorder.running = TRUE;
	CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
	
	// and wait
	printf("Recording, press <return> to stop:\n");
	getchar();
	
	// end recording
	printf("* recording done *\n");
	recorder.running = FALSE;
	CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed");
	
	// a codec may update its magic cookie at the end of an encoding session
	// so reapply it to the file now
	MyCopyEncoderCookieToFile(queue, recorder.recordFile);
	
cleanup:
	AudioQueueDispose(queue, TRUE);
	AudioFileClose(recorder.recordFile);
	
	return 0;
}
Esempio n. 11
0
OSStatus
darwin_configure_input_audio_queue (
                                 cahal_device*                 in_device,
                                 cahal_recorder_info*          in_callback_info,
                                 AudioStreamBasicDescription*  io_asbd,
                                 AudioQueueRef*                out_audio_queue
                                 )
{
  OSStatus result = noErr;
  
  if( NULL != io_asbd )
  {
    result =
    AudioQueueNewInput  (
                         io_asbd,
                         darwin_recorder_callback,
                         in_callback_info,
                         NULL,
                         kCFRunLoopCommonModes,
                         0,
                         out_audio_queue
                         );
    
    if( noErr == result )
    {
      if( NULL != in_device->device_uid )
      {
        CFStringRef device_uid =
        CFStringCreateWithCString (
                                   NULL,
                                   in_device->device_uid,
                                   kCFStringEncodingASCII
                                   );
        
        CPC_LOG (
                 CPC_LOG_LEVEL_TRACE,
                 "Setting queue device to %s.",
                 in_device->device_uid
                 );
        
        result =
        AudioQueueSetProperty (
                               *out_audio_queue,
                               kAudioQueueProperty_CurrentDevice,
                               &device_uid,
                               sizeof( device_uid )
                               );
        
        if( NULL != device_uid )
        {
          CFRelease( device_uid );
        }
      }
      
      if( result )
      {
        CPC_ERROR (
                   "Error setting current device (0x%x) to %s: 0x%x",
                   kAudioQueueProperty_CurrentDevice,
                   in_device->device_uid,
                   result
                   );
        
        CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
      }
      else
      {
        UINT32 property_size = sizeof( AudioStreamBasicDescription );
        
        result =
        AudioQueueGetProperty (
                               *out_audio_queue,
                               kAudioQueueProperty_StreamDescription,
                               io_asbd,
                               &property_size
                               );
        
        if( result )
        {
          CPC_ERROR(
                    "Error accessing property 0x%x on AudioQueue: %d",
                    kAudioConverterCurrentInputStreamDescription,
                    result
                    );
          
          CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
        }
      }
    }
    else
    {
      CPC_ERROR (
                 "Error creating AudioQueue: 0x%x.",
                 result
                 );
      
      CPC_PRINT_CODE( CPC_LOG_LEVEL_ERROR, result );
    }
  }
  else
  {
    CPC_LOG_STRING  (
                     CPC_LOG_LEVEL_ERROR,
                     "Invalid basic stream description"
                     );
  }
  
  return( result );
}
Esempio n. 12
0
void Recorder::start()
{
	AudioQueueNewInput (                                              // 1
						&aqData.mDataFormat,                          // 2
						HandleInputBuffer,                            // 3
						&aqData,                                      // 4
						NULL,                                         // 5
						kCFRunLoopCommonModes,                        // 6
						0,                                            // 7
						&aqData.mQueue                                // 8
	);

	UInt32 dataFormatSize = sizeof (aqData.mDataFormat);       // 1
	 
	AudioQueueGetProperty (                                    // 2
	  aqData.mQueue,                                           // 3
	  kAudioConverterCurrentOutputStreamDescription,           // 4
	  &aqData.mDataFormat,                                     // 5
	  &dataFormatSize                                          // 6
	);
	
	const char *filePath = "recording.wav";
	
	audioFileURL =
		CFURLCreateFromFileSystemRepresentation (            // 1
			NULL,                                            // 2
			(const UInt8 *) filePath,                        // 3
			strlen (filePath),                               // 4
			false                                            // 5
		);
	AudioFileCreateWithURL (                                 // 6
		audioFileURL,                                        // 7
		fileType,                                            // 8
		&aqData.mDataFormat,                                 // 9
		kAudioFileFlags_EraseFile,                           // 10
		&aqData.mAudioFile                                   // 11
	);
	
	DeriveBufferSize (                               // 1
		aqData.mQueue,                               // 2
		aqData.mDataFormat,                          // 3
		0.5,                                         // 4
		&aqData.bufferByteSize                       // 5
	);

	for (int i = 0; i < kNumberBuffers; ++i) {           // 1
		AudioQueueAllocateBuffer (                       // 2
			aqData.mQueue,                               // 3
			aqData.bufferByteSize,                       // 4
			&aqData.mBuffers[i]                          // 5
		);
	 
		AudioQueueEnqueueBuffer (                        // 6
			aqData.mQueue,                               // 7
			aqData.mBuffers[i],                          // 8
			0,                                           // 9
			NULL                                         // 10
		);
	}

	aqData.mCurrentPacket = 0;                           // 1

	aqData.mIsRunning = true;

	AudioQueueStart (                                    // 3
		aqData.mQueue,                                   // 4
		NULL                                             // 5
	);
}