Пример #1
0
IOReturn VoodooHDAEngine::convertInputSamples(const void *sampleBuf, void *destBuf,
		UInt32 firstSampleFrame, UInt32 numSampleFrames, const IOAudioStreamFormat *streamFormat,
		__unused IOAudioStream *audioStream)
{
	UInt32	numSamplesLeft, numSamples;
	Float32 	*floatDestBuf;
	
//    floatDestBuf = (float *)destBuf;
	UInt32 firstSample = firstSampleFrame * streamFormat->fNumChannels;
	numSamples = numSamplesLeft = numSampleFrames * streamFormat->fNumChannels;
	long int noiseMask = ~((1 << noiseLevel) - 1);
	
	UInt8 *sourceBuf = (UInt8 *) sampleBuf; 

	// figure out what sort of blit we need to do
	if ((streamFormat->fSampleFormat == kIOAudioStreamSampleFormatLinearPCM) && streamFormat->fIsMixable) {
		// it's linear PCM, which means the target is Float32 and we will be calling a blitter, which
		// works in samples not frames
		floatDestBuf = (Float32 *) destBuf;

		if (streamFormat->fNumericRepresentation == kIOAudioStreamNumericRepresentationSignedInt) {
			// it's some kind of signed integer, which we handle as some kind of even byte length
			bool nativeEndianInts;
			nativeEndianInts = (streamFormat->fByteOrder == kIOAudioStreamByteOrderLittleEndian);

			switch (streamFormat->fBitWidth) {
				case 8:
					SInt8 *inputBuf8;
					
					inputBuf8 = &(((SInt8 *)sampleBuf)[firstSample]);
#if defined(__ppc__)
					Int8ToFloat32(inputBuf8, floatDestBuf, numSamplesLeft);
#elif defined(__i386__) || defined(__x86_64__)
					while (numSamplesLeft-- > 0) 
					{	
						*(floatDestBuf++) = (float)(*(inputBuf8++) &= (SInt8)noiseMask) * kOneOverMaxSInt8Value;
					}
#endif
					
					break;
				case 16:
				if (nativeEndianInts)
					if (vectorize) {
						NativeInt16ToFloat32((SInt16 *) &sampleBuf[2 * firstSample], floatDestBuf, numSamples);
					} else {
						SInt16 *inputBuf16;
						
						inputBuf16 = &(((SInt16 *)sampleBuf)[firstSample]);						
#if defined(__ppc__)
						SwapInt16ToFloat32(inputBuf16, floatDestBuf, numSamplesLeft, 16);
#elif defined(__i386__) || defined(__x86_64__)
						while (numSamplesLeft-- > 0) 
						{	
							*(floatDestBuf++) = (float)(*(inputBuf16++) &= (SInt16)noiseMask) * kOneOverMaxSInt16Value;
						}
#endif
					}

					
				else
					SwapInt16ToFloat32((SInt16 *) &sampleBuf[2 * firstSample], floatDestBuf, numSamples);
				break;

			case 20:
			case 24:
				if (nativeEndianInts)
					if (vectorize) {
						NativeInt24ToFloat32(&sourceBuf[3 * firstSample], floatDestBuf, numSamples);
					} else {
						register SInt8 *inputBuf24;
						
						// Multiply by 3 because 20 and 24 bit samples are packed into only three bytes, so we have to index bytes, not shorts or longs
						inputBuf24 = &(((SInt8 *)sampleBuf)[firstSample * 3]);
						
#if defined(__ppc__)
						SwapInt24ToFloat32((long *)inputBuf24, floatDestBuf, numSamplesLeft, 24);
#elif defined(__i386__) || defined(__x86_64__)
						register SInt32 inputSample;
						
						// [rdar://4311684] - Fixed 24-bit input convert routine. /thw
						while (numSamplesLeft-- > 1) 
						{	
							inputSample = (* (UInt32 *)inputBuf24) & 0x00FFFFFF & noiseMask;
							// Sign extend if necessary
							if (inputSample > 0x7FFFFF)
							{
								inputSample |= 0xFF000000;
							}
							inputBuf24 += 3;
							*(floatDestBuf++) = (float)inputSample * kOneOverMaxSInt24Value;
						}
						// Convert last sample. The following line does the same work as above without going over the edge of the buffer.
						inputSample = SInt32 ((UInt32 (*(UInt16 *) inputBuf24) & 0x0000FFFF & noiseMask)
											  | (SInt32 (*(inputBuf24 + 2)) << 16));
						*(floatDestBuf++) = (float)inputSample * kOneOverMaxSInt24Value;
#endif
						
					}

					
				else
					SwapInt24ToFloat32(&sourceBuf[3 * firstSample], floatDestBuf, numSamples);
				break;

			case 32:
				if (nativeEndianInts) {
					if (vectorize) {
						NativeInt32ToFloat32((SInt32 *) &sourceBuf[4 * firstSample], floatDestBuf, numSamples);
					} else {
						register SInt32 *inputBuf32;
						inputBuf32 = &(((SInt32 *)sampleBuf)[firstSample]);
						
#if defined(__ppc__)
						SwapInt32ToFloat32(inputBuf32, floatDestBuf, numSamplesLeft, 32);
#elif defined(__i386__) || defined(__x86_64__)
						while (numSamplesLeft-- > 0) {	
							*(floatDestBuf++) = (float)(*(inputBuf32++) & noiseMask) * kOneOverMaxSInt32Value;
						}
#endif
						
					}
				}
				else
					SwapInt32ToFloat32((SInt32 *) &sourceBuf[4 * firstSample], floatDestBuf, numSamples);
				break;

			default:
				errorMsg("convertInputSamples: can't handle signed integers with a bit width of %d",
						streamFormat->fBitWidth);
				break;

			}
			
			//Меняю местами значения для левого и правого канала
			if(mDevice && mDevice->mSwitchCh && (streamFormat->fNumChannels > 1)) {
				UInt32 i;
				Float32 tempSamples;
				
				for(i = 0; i < numSamples; i+= streamFormat->fNumChannels) {
					tempSamples = floatDestBuf[i];
					floatDestBuf[i] = floatDestBuf[i+1];
					floatDestBuf[i+1] = tempSamples;
				}
			}
			
		} else if (streamFormat->fNumericRepresentation == kIOAudioStreamNumericRepresentationIEEE754Float) {
			// it is some kind of floating point format
			if ((streamFormat->fBitWidth == 32) && (streamFormat->fBitDepth == 32) &&
					(streamFormat->fByteOrder == kIOAudioStreamByteOrderLittleEndian)) {
				// it's Float32, so we are just going to copy the data
				memcpy(floatDestBuf, &((Float32 *) sampleBuf)[firstSample], numSamples * sizeof (Float32));
			} else
				errorMsg("convertInputSamples: can't handle floats with a bit width of %d, bit depth of %d, "
						"and/or the given byte order", streamFormat->fBitWidth, streamFormat->fBitDepth);
		}
	} else {
		// it's not linear PCM or it's not mixable, so just copy the data into the target buffer
		UInt32 offset = firstSampleFrame * (streamFormat->fBitWidth / 8) * streamFormat->fNumChannels;
		UInt32 size = numSampleFrames * (streamFormat->fBitWidth / 8) * streamFormat->fNumChannels;
		memcpy(destBuf, &sourceBuf[offset], size);
	}

	return kIOReturnSuccess;
}
Пример #2
0
void	SMACscom::GetSourceData(SoundComponentData** outData)
{
	ComponentResult theError = 0;
	UInt32	theNumberOutputPackets = 0;
	UInt32	theEncoderStatus = 0;
	UInt32	theActualOutputDataByteSize = 0;
	UInt32  maxPacketSize = 0;
#if !TARGET_MAC_OS || !IS_COMPILER_WORKING
	float * tempFloatPtr;
	unsigned long tempULong = 0;
	float tempFloat;
#endif	
	*outData = NULL;
	
	//	make sure we have some source data to start with
	if(SMACSCDUtility::NeedMoreSourceData(mSourceData))
	{
		theError = SoundComponentGetSourceData(mSourceComponent, &mSourceData);
		ThrowIfError(theError, (CAException)theError, "SMACscom::GetSourceData: got an error from SoundComponentGetSourceData");
	}
	
	//	spin until we produce a packet of data or we run completely out of source data
	UInt32	theFramesProduced = 0;
	UInt32	theOutputDataByteSize = 0;
	while(SMACSCDUtility::HasData(mSourceData) && (theFramesProduced < mPacketFrameSize))
	{
		//	stuff as much input data into the encoder as we can
		UInt32 theInputDataByteSize = SMACSCDUtility::GetDataByteSize(mSourceData);
		UInt32 theNumberOfPackets = 0;
		maxPacketSize = min(kMaxInputSamples, theInputDataByteSize/sizeof(SInt16));
#if TARGET_MAC_OS && IS_COMPILER_WORKING
		NativeInt16ToFloat32( mHasAltiVec, ((short *)(mSourceData->buffer)), mFloatBuffer, maxPacketSize, 16);
#else
		for (UInt32 i = 0; i < maxPacketSize; i++)
		{
			tempFloat = (float)(((short *)(mSourceData->buffer))[i]);
			tempFloatPtr = &tempFloat;
			if (tempFloat != 0.0 && tempFloat != -0.0)
			{
				tempULong = *((unsigned long *)tempFloatPtr);
				tempULong -= 0x07800000;
				tempFloatPtr = (float *)(&tempULong);
			}
			mFloatBuffer[i] = *tempFloatPtr;
		}
#endif
		theInputDataByteSize = maxPacketSize * 4; // this is all we have converted
		//theInputDataByteSize *= 2;
		theError = AudioCodecAppendInputData(mEncoder, mFloatBuffer, &theInputDataByteSize, &theNumberOfPackets, NULL);
		ThrowIfError(theError, (CAException)theError, "SMACscom::GetSourceData: got an error from AudioCodecAppendInputData");
		
		//	update the source data with the amount of data consumed
		SMACSCDUtility::ConsumedData(mSourceData, theInputDataByteSize >> 1);
		
		//	see if we can get a packet of output data
		theActualOutputDataByteSize = mMaxPacketByteSize;
		theNumberOutputPackets = 1;
		theEncoderStatus = kAudioCodecProduceOutputPacketFailure;
		theError = AudioCodecProduceOutputPackets(mEncoder, mOutputBuffer, &theActualOutputDataByteSize, &theNumberOutputPackets, NULL, &theEncoderStatus);
		ThrowIfError(theError, (CAException)theError, "SMACscom::GetSourceData: got an error from AudioCodecProduceOutputPackets");
		
		if(theNumberOutputPackets == 1)
		{
			//	we produced a full packet of frames, so we're done
			theFramesProduced = mPacketFrameSize;
			theOutputDataByteSize += theActualOutputDataByteSize;
			
		}
		else
		{
			//	we didn't get the data, so get more input data if we have to
			if(SMACSCDUtility::NeedMoreSourceData(mSourceData))
			{
				theError = SoundComponentGetSourceData(mSourceComponent, &mSourceData);
				ThrowIfError(theError, (CAException)theError, "SMACscom::GetSourceData: got an error from SoundComponentGetSourceData");
			}
		}
	}
	
	if(theFramesProduced < mPacketFrameSize)
	{
		// Tell the encoder to pad with 0s -- this will change once the API is added
        if (mSourceIsExhausted)
        {
            UInt32 theInputDataByteSize = SMACSCDUtility::GetDataByteSize(mSourceData); // better be 0
            UInt32 theNumberOfPackets = 0;
			maxPacketSize = min(kMaxInputSamples, theInputDataByteSize/sizeof(SInt16));
#if TARGET_MAC_OS && IS_COMPILER_WORKING
			NativeInt16ToFloat32( mHasAltiVec, ((short *)(mSourceData->buffer)), mFloatBuffer, maxPacketSize, 16);
#else
			for (UInt32 i = 0; i < maxPacketSize; i++)
			{
				tempFloat = (float)(((short *)(mSourceData->buffer))[i]);
				tempFloatPtr = &tempFloat;
				if (tempFloat != 0.0 && tempFloat != -0.0)
				{
					tempULong = *((unsigned long *)tempFloatPtr);
					tempULong -= 0x07800000;
					tempFloatPtr = (float *)(&tempULong);
				}
				mFloatBuffer[i] = *tempFloatPtr;
			}
#endif
			theInputDataByteSize = maxPacketSize * 4; // this is all we have converted
			//theInputDataByteSize *= 2;
			theError = AudioCodecAppendInputData(mEncoder, mFloatBuffer, &theInputDataByteSize, &theNumberOfPackets, NULL);
            ThrowIfError(theError, (CAException)theError, "SMACscom::GetSourceData: got an error from AudioCodecAppendInputData");
		}
        
		//	we ran out of input data, but we still haven't produced enough output data
		//	so we have to try one last time to pull on the encoder in case it has
		//	some left overs that it can give us
		theActualOutputDataByteSize = mMaxPacketByteSize;
		theNumberOutputPackets = 1;
		theEncoderStatus = kAudioCodecProduceOutputPacketFailure;
		theError = AudioCodecProduceOutputPackets(mEncoder, mOutputBuffer, &theActualOutputDataByteSize, &theNumberOutputPackets, NULL, &theEncoderStatus);
		ThrowIfError(theError, (CAException)theError, "SMACscom::GetSourceData: got an error from AudioCodecProduceOutputPackets");
		
		if(theNumberOutputPackets == 1)
		{
			//	we produced a full packet of frames, so we're done
			theFramesProduced = mPacketFrameSize;
			theOutputDataByteSize += theActualOutputDataByteSize;
		}
	}
	
	//	set up the return values if any data was produced
	if(theFramesProduced > 0)
	{
		mOutputData.desc.buffer = mOutputBuffer;
		mOutputData.desc.sampleCount = theFramesProduced;
		mOutputData.bufferSize = theOutputDataByteSize;
		mOutputData.frameCount = 1;
		mOutputData.commonFrameSize = theOutputDataByteSize;
        mOutputData.desc.numChannels = mSourceData->numChannels;
		*outData = reinterpret_cast<SoundComponentData*>(&mOutputData);
	}
}
Пример #3
0
IOReturn AREngine::convertInputSamples(const void* inSourceBuffer, void* outTargetBuffer, UInt32 inFirstFrame, UInt32 inNumberFrames, const IOAudioStreamFormat* inFormat, IOAudioStream* /*inStream*/)
{
	//	figure out what sort of blit we need to do
	if((inFormat->fSampleFormat == kIOAudioStreamSampleFormatLinearPCM) && inFormat->fIsMixable)
	{
		//	it's linear PCM, which means the target is Float32 and we will be calling a blitter, which works in samples not frames
		Float32* theTargetBuffer = (Float32*)outTargetBuffer;
		UInt32 theFirstSample = inFirstFrame * inFormat->fNumChannels;
		UInt32 theNumberSamples = inNumberFrames * inFormat->fNumChannels;
	
		if(inFormat->fNumericRepresentation == kIOAudioStreamNumericRepresentationSignedInt)
		{
			//	it's some kind of signed integer, which we handle as some kind of even byte length
			bool nativeEndianInts;
			#if TARGET_RT_BIG_ENDIAN
				nativeEndianInts = (inFormat->fByteOrder == kIOAudioStreamByteOrderBigEndian);
			#else
				nativeEndianInts = (inFormat->fByteOrder == kIOAudioStreamByteOrderLittleEndian);
			#endif
			
			switch(inFormat->fBitWidth)
			{
				case 8:
					{
						DebugMessage("AREngine::convertInputSamples: can't handle signed integers with a bit width of 8 at the moment");
					}
					break;
				
				case 16:
					{
						SInt16* theSourceBuffer = (SInt16*)inSourceBuffer;
						if (nativeEndianInts)
							NativeInt16ToFloat32(mHasVectorUnit, &(theSourceBuffer[theFirstSample]), theTargetBuffer, theNumberSamples);
						else
							SwapInt16ToFloat32(mHasVectorUnit, &(theSourceBuffer[theFirstSample]), theTargetBuffer, theNumberSamples);
					}
					break;
				
				case 24:
					{
						UInt8* theSourceBuffer = (UInt8*)inSourceBuffer;
						if (nativeEndianInts)
							NativeInt24ToFloat32(mHasVectorUnit, &(theSourceBuffer[3*theFirstSample]), theTargetBuffer, theNumberSamples);
						else
							SwapInt24ToFloat32(mHasVectorUnit, &(theSourceBuffer[3*theFirstSample]), theTargetBuffer, theNumberSamples);
					}
					break;
				
				case 32:
					{
						SInt32* theSourceBuffer = (SInt32*)inSourceBuffer;
						if (nativeEndianInts)
							NativeInt32ToFloat32(mHasVectorUnit, &(theSourceBuffer[theFirstSample]), theTargetBuffer, theNumberSamples);
						else
							SwapInt32ToFloat32(mHasVectorUnit, &(theSourceBuffer[theFirstSample]), theTargetBuffer, theNumberSamples);
					}
					break;
				
				default:
					DebugMessageN1("AREngine::convertInputSamples: can't handle signed integers with a bit width of %d", inFormat->fBitWidth);
					break;
				
			}
		}
		else if(inFormat->fNumericRepresentation == kIOAudioStreamNumericRepresentationIEEE754Float)
		{
			//	it is some kind of floating point format
		#if TARGET_RT_BIG_ENDIAN
			if((inFormat->fBitWidth == 32) && (inFormat->fBitDepth == 32) && (inFormat->fByteOrder == kIOAudioStreamByteOrderBigEndian))
		#else
			if((inFormat->fBitWidth == 32) && (inFormat->fBitDepth == 32) && (inFormat->fByteOrder == kIOAudioStreamByteOrderLittleEndian))
		#endif
			{
				//	it's Float32, so we are just going to copy the data
				Float32* theSourceBuffer = (Float32*)inSourceBuffer;
				memcpy(theTargetBuffer, &(theSourceBuffer[theFirstSample]), theNumberSamples * sizeof(Float32));
			}
			else
			{
				DebugMessageN2("AREngine::convertInputSamples: can't handle floats with a bit width of %d, bit depth of %d, and/or the given byte order", inFormat->fBitWidth, inFormat->fBitDepth);
			}
		}
	}
	else
	{
		//	it's not linear PCM or it's not mixable, so just copy the data into the target buffer
		SInt8* theSourceBuffer = (SInt8*)inSourceBuffer;
		UInt32 theFirstByte = inFirstFrame * (inFormat->fBitWidth / 8) * inFormat->fNumChannels;
		UInt32 theNumberBytes = inNumberFrames * (inFormat->fBitWidth / 8) * inFormat->fNumChannels;
		memcpy(outTargetBuffer, &(theSourceBuffer[theFirstByte]), theNumberBytes);
	}

	return kIOReturnSuccess;
}