Example #1
0
static OSStatus input_proc(AudioDeviceID device,
	const AudioTimeStamp *currentTime,
	const AudioBufferList *inputData,
	const AudioTimeStamp *inputTime,
	AudioBufferList *outputData,
	const AudioTimeStamp *outputTime,
	void *context)
{
	OSStatus err = noErr;
	phastream_t *as = ((phastream_t *) context);
	ca_dev *cadev = (ca_dev *) as->drvinfo;
/* <UOLFONE <pTime adjustment> > */
	//unsigned decodedFrameSize = as->ms.codec->decoded_framesize;
	unsigned decodedFrameSize = ph_astream_decoded_framesize_get(as);
/* </UOLFONE> */

	if (as->actual_rate != as->clock_rate) {
		decodedFrameSize *= 2;
	}

	DBG_DYNA_AUDIO_DRV("**CoreAudio: available input data: %d\n",
		inputData->mBuffers[0].mDataByteSize);

	DBG_DYNA_AUDIO_DRV("**CoreAudio: phapi framesize:%d, input converter buffer size: %d\n",
		decodedFrameSize, cadev->inputConverterBufferSize);
	
	memcpy(cadev->tmpInputBuffer + cadev->tmpInputCount, 
		inputData->mBuffers[0].mData, inputData->mBuffers[0].mDataByteSize);
	cadev->tmpInputCount += inputData->mBuffers[0].mDataByteSize;
	
	while (cadev->tmpInputCount >= cadev->inputConverterBufferSize) {
		cadev->convertedInputCount = decodedFrameSize;
		unsigned savedtmpInputCount = cadev->tmpInputCount;
		cadev->currentInputBuffer = cadev->tmpInputBuffer;
		cadev->sumDataSize = 0;
		
		err = AudioConverterFillBuffer(cadev->inputConverter, buffer_data_proc,
			cadev, &cadev->convertedInputCount, cadev->convertedInputBuffer);
		if (err != noErr) {
			DBG_DYNA_AUDIO_DRV("!!CoreAudio: error while converting\n");
		}
		DBG_DYNA_AUDIO_DRV("**CoreAudio: converted data: %d\n", cadev->convertedInputCount);
		
		// Send converted data to phapi
		cadev->cbk(as, cadev->convertedInputBuffer, cadev->convertedInputCount, NULL, 0);
		
		unsigned usedData = cadev->sumDataSize;
		if (usedData != 0) {
			unsigned remainingData = savedtmpInputCount - usedData;
			memcpy(cadev->tmpInputBuffer, cadev->tmpInputBuffer + usedData, remainingData);
			cadev->tmpInputCount = remainingData;
		}
		DBG_DYNA_AUDIO_DRV("**CoreAudio: used data: %d\n", usedData);
	}
	
	return noErr;
}
Example #2
0
// shipout to device (usually 512 frames at 44k1 for builtin audio and
// USB).  this is asynchronous and runs (implicitly) in its own thread.
// 
static OSStatus ioProcOutput(AudioDeviceID	    device,
			     const AudioTimeStamp  *currentTime,
			     const AudioBufferList *inputData,
			     const AudioTimeStamp  *inputTime,
			     AudioBufferList	   *outputData,	// io param
			     const AudioTimeStamp  *outputTime,
			     void		   *context)
{
  Stream *s= (Stream *)context;
  Buffer *b= s->buffer;
  if (Buffer_free(b) >= s->imgBufSize)
    ioProcSignal(s->semaphore);		// restart SoundRecorder
  return AudioConverterFillBuffer(((Stream *)context)->converter, bufferDataProc, context,
				  &outputData->mBuffers[0].mDataByteSize,
				  outputData->mBuffers[0].mData);
}
Example #3
0
static sqInt sound_RecordSamplesIntoAtLength(sqInt buf, sqInt startSliceIndex, sqInt bufferSizeInBytes)
{
  if (input)
    {
      if (Buffer_avail(input->buffer) >= (512 * DeviceFrameSize))
	{
	  int    start= startSliceIndex * SqueakFrameSize / 2;
	  UInt32 count= min(input->cvtBufSize, bufferSizeInBytes - start);
	  if (kAudioHardwareNoError == AudioConverterFillBuffer(input->converter, bufferDataProc, input,
								&count, pointerForOop(buf) + start))
	    return count / (SqueakFrameSize / 2) / input->channels;
	}
      return 0;
    }
  success(false);
  return 0;
}
JNIEXPORT jint JNICALL Java_com_apple_audio_toolbox_AudioConverter_AudioConverterFillBuffer
  (JNIEnv *, jclass, jint inAudioConverter, jint inInputDataProc, jint inInputDataProcUserData, jint ioOutputDataSize, jint outOutputData)
{
	return (jint)AudioConverterFillBuffer((AudioConverterRef)inAudioConverter, (AudioConverterInputDataProc)inInputDataProc, (void *)inInputDataProcUserData, (UInt32 *)ioOutputDataSize, (void *)outOutputData);
}