FIOSAudioSoundBuffer* FIOSAudioSoundBuffer::CreateNativeBuffer(FIOSAudioDevice* IOSAudioDevice, USoundWave* InWave)
{
    FWaveModInfo WaveInfo;

    InWave->InitAudioResource(IOSAudioDevice->GetRuntimeFormat(InWave));
    if (!InWave->ResourceData || InWave->ResourceSize <= 0 || !WaveInfo.ReadWaveInfo(InWave->ResourceData, InWave->ResourceSize))
    {
        InWave->RemoveAudioResource();
        return NULL;
    }

    uint32 UncompressedBlockSize = 0;
    uint32 CompressedBlockSize = 0;
    const uint32 PreambleSize = 7;
    const uint32 BlockSize = *WaveInfo.pBlockAlign;

    switch (*WaveInfo.pFormatTag)
    {
    case SoundFormat_ADPCM:
        // (BlockSize - PreambleSize) * 2 (samples per byte) + 2 (preamble samples)
        UncompressedBlockSize = (2 + (BlockSize - PreambleSize) * 2) * sizeof(int16);
        CompressedBlockSize = BlockSize;

        if ((WaveInfo.SampleDataSize % CompressedBlockSize) != 0)
        {
            InWave->RemoveAudioResource();
            return NULL;
        }
        break;

    case SoundFormat_LPCM:
        break;
    }

    // Create new buffer
    FIOSAudioSoundBuffer* Buffer = new FIOSAudioSoundBuffer(IOSAudioDevice, static_cast<ESoundFormat>(*WaveInfo.pFormatTag));

    Buffer->NumChannels = InWave->NumChannels;
    Buffer->SampleRate = InWave->SampleRate;
    Buffer->UncompressedBlockSize = UncompressedBlockSize;
    Buffer->CompressedBlockSize = CompressedBlockSize;
    Buffer->BufferSize = WaveInfo.SampleDataSize;

    Buffer->SampleData = static_cast<int16*>(FMemory::Malloc(Buffer->BufferSize));
    FMemory::Memcpy(Buffer->SampleData, WaveInfo.SampleDataStart, Buffer->BufferSize);

    FAudioDeviceManager* AudioDeviceManager = GEngine->GetAudioDeviceManager();
    check(AudioDeviceManager != nullptr);

    AudioDeviceManager->TrackResource(InWave, Buffer);
    InWave->RemoveAudioResource();

    return Buffer;
}
void USoundWaveThumbnailRenderer::Draw(UObject* Object, int32 X, int32 Y, uint32 Width, uint32 Height, FRenderTarget*, FCanvas* Canvas)
{
	static bool bDrawChannels = true;
	static bool bDrawAsCurve = false;

	USoundWave* SoundWave = Cast<USoundWave>(Object);
	if (SoundWave != nullptr && SoundWave->NumChannels > 0)
	{
		// check if there is any raw sound data
		if( SoundWave->RawData.GetBulkDataSize() > 0 )
		{
			FCanvasLineItem LineItem;
			LineItem.SetColor( FLinearColor::White );
			// Lock raw wave data.
			uint8* RawWaveData = ( uint8* )SoundWave->RawData.Lock( LOCK_READ_ONLY );
			int32 RawDataSize = SoundWave->RawData.GetBulkDataSize();
			FWaveModInfo WaveInfo;

			// parse the wave data
			if( WaveInfo.ReadWaveHeader( RawWaveData, RawDataSize, 0 ) )
			{
				const float SampleYScale = Height / (2.f * 32767 * (bDrawChannels ? SoundWave->NumChannels : 1));

				int16* SamplePtr = reinterpret_cast<int16*>(WaveInfo.SampleDataStart);

				uint32 SampleCount = 0;
				uint32 SampleCounts[10] = {0};

				if (SoundWave->NumChannels <= 2)
				{
					SampleCount = WaveInfo.SampleDataSize / (2 * SoundWave->NumChannels);
				}
				else
				{
					for (int32 ChannelIndex = 0; ChannelIndex < SoundWave->NumChannels; ++ChannelIndex)
					{
						SampleCounts[ChannelIndex] = SoundWave->ChannelSizes[ChannelIndex] / 2;
						SampleCount = FMath::Max(SampleCount, SampleCounts[ChannelIndex]);
					}
				}
				const uint32 SamplesPerX = (SampleCount / Width) + 1;
				float LastScaledSample[10] = {0.f};

				for (uint32 XOffset = 0; XOffset < Width-1; ++XOffset )
				{
					int64 SampleSum[10] = {0};
					if (SoundWave->NumChannels <= 2)
					{
						for (uint32 PerXSampleIndex = 0; PerXSampleIndex < SamplesPerX; ++PerXSampleIndex)
						{
							for (int32 ChannelIndex = 0; ChannelIndex < SoundWave->NumChannels; ++ChannelIndex)
							{
								const int16 SampleValue = (bDrawAsCurve ? *SamplePtr : FMath::Abs(*SamplePtr));
								SampleSum[ChannelIndex] += SampleValue;
								++SamplePtr;
							}
						}
					}
					else
					{
						for (int32 ChannelIndex = 0; ChannelIndex < SoundWave->NumChannels; ++ChannelIndex)
						{
							if (SampleCounts[ChannelIndex] >= SamplesPerX)
							{
								for (uint32 PerXSampleIndex = 0; PerXSampleIndex < SamplesPerX; ++PerXSampleIndex)
								{
									int16 SampleValue =*(SamplePtr + (SamplesPerX * XOffset) + PerXSampleIndex + SoundWave->ChannelOffsets[ChannelIndex] / 2);
									if (!bDrawAsCurve)
									{
										SampleValue = FMath::Abs(SampleValue);
									}
									SampleSum[ChannelIndex] += SampleValue;
								}
								SampleCounts[ChannelIndex] -= SamplesPerX;
							}
						}
					}
					if (bDrawChannels)
					{
						for (int32 ChannelIndex = 0; ChannelIndex < SoundWave->NumChannels; ++ChannelIndex)
						{
							const float ScaledSample = static_cast<float>(SampleSum[ChannelIndex]) / SamplesPerX * SampleYScale;
							if (bDrawAsCurve)
							{
								if (XOffset > 0)
								{
									const float YCenter = Y + ((2 * ChannelIndex) + 1) * Height / (2.f * SoundWave->NumChannels);
									LineItem.Draw( Canvas,  FVector2D(X + XOffset - 1, YCenter + LastScaledSample[ChannelIndex]), FVector2D(X + XOffset, YCenter + ScaledSample ) );
								}
								LastScaledSample[ChannelIndex] = ScaledSample;
							}
							else if (ScaledSample > 0.001f)
							{
								const float YCenter = Y + ((2 * ChannelIndex) + 1) * Height / (2.f * SoundWave->NumChannels);
								LineItem.Draw( Canvas, FVector2D(X + XOffset, YCenter - ScaledSample), FVector2D(X + XOffset, YCenter + ScaledSample) );
							}
						}
					}
					else
					{
						if (bDrawAsCurve)
						{
							float ScaledSampleSum = 0.f;
							int32 ActiveChannelCount = 0;
							for (int32 ChannelIndex = 0; ChannelIndex < SoundWave->NumChannels; ++ChannelIndex)
							{
								const float ScaledSample = static_cast<float>(SampleSum[ChannelIndex]) / SamplesPerX * SampleYScale;
								if (FMath::Abs(ScaledSample) > 0.001f)
								{
									ScaledSampleSum += ScaledSample;
									++ActiveChannelCount;
								}
							}
							const float ScaledSample = (ActiveChannelCount > 0 ? ScaledSampleSum / ActiveChannelCount : 0.f);
							if (XOffset > 0)
							{
								const float YCenter = Y + 0.5f * Height;
								LineItem.Draw( Canvas, FVector2D(X + XOffset - 1, YCenter + LastScaledSample[0]), FVector2D(X + XOffset, YCenter + ScaledSample) );
							}
							LastScaledSample[0] = ScaledSample;
						}
						else
						{
							float MaxScaledSample = 0.f;
							for (int32 ChannelIndex = 0; ChannelIndex < SoundWave->NumChannels; ++ChannelIndex)
							{
								const float ScaledSample = static_cast<float>(SampleSum[ChannelIndex]) / SamplesPerX * SampleYScale;
								MaxScaledSample = FMath::Max(MaxScaledSample, ScaledSample);
							}
							if (MaxScaledSample > 0.001f)
							{
								const float YCenter = Y + 0.5f * Height;
								LineItem.Draw( Canvas, FVector2D(X + XOffset, YCenter - MaxScaledSample), FVector2D(X + XOffset, YCenter + MaxScaledSample) );
							}
						}
					}
				}
			}

			SoundWave->RawData.Unlock();
		}
	}
}
예제 #3
0
/**
 * Cook a multistream (normally 5.1) wave
 */
void CookSurroundWave( USoundWave* SoundWave, FName FormatName, const IAudioFormat& Format, TArray<uint8>& Output)
{
	check(!Output.Num());
#if WITH_EDITORONLY_DATA
	int32					i;
	uint32					SampleDataSize = 0;
	FWaveModInfo			WaveInfo;
	TArray<TArray<uint8> >	SourceBuffers;
	TArray<int32>			RequiredChannels;

	uint8* RawWaveData = ( uint8* )SoundWave->RawData.Lock( LOCK_READ_ONLY );

	// Front left channel is the master
	static_assert(SPEAKER_FrontLeft == 0, "Front-left speaker must be first.");

	// loop through channels to find which have data and which are required
	for (i = 0; i < SPEAKER_Count; i++)
	{
		FWaveModInfo WaveInfoInner;

		// Only mono files allowed
		if (WaveInfoInner.ReadWaveHeader(RawWaveData, SoundWave->ChannelSizes[i], SoundWave->ChannelOffsets[i])
			&& *WaveInfoInner.pChannels == 1)
		{
			if (SampleDataSize == 0)
			{
				// keep wave info/size of first channel data we find
				WaveInfo = WaveInfoInner;
				SampleDataSize = WaveInfo.SampleDataSize;
			}
			switch (i)
			{
				case SPEAKER_FrontLeft:
				case SPEAKER_FrontRight:
				case SPEAKER_LeftSurround:
				case SPEAKER_RightSurround:
					// Must have quadraphonic surround channels
					RequiredChannels.AddUnique(SPEAKER_FrontLeft);
					RequiredChannels.AddUnique(SPEAKER_FrontRight);
					RequiredChannels.AddUnique(SPEAKER_LeftSurround);
					RequiredChannels.AddUnique(SPEAKER_RightSurround);
					break;
				case SPEAKER_FrontCenter:
				case SPEAKER_LowFrequency:
					// Must have 5.1 surround channels
					for (int32 Channel = SPEAKER_FrontLeft; Channel <= SPEAKER_RightSurround; Channel++)
					{
						RequiredChannels.AddUnique(Channel);
					}
					break;
				case SPEAKER_LeftBack:
				case SPEAKER_RightBack:
					// Must have all previous channels
					for (int32 Channel = 0; Channel < i; Channel++)
					{
						RequiredChannels.AddUnique(Channel);
					}
					break;
				default:
					// unsupported channel count
					break;
			}
		}
	}

	if (SampleDataSize != 0)
	{
		int32 ChannelCount = 0;
		// Extract all the info for channels or insert blank data
		for( i = 0; i < SPEAKER_Count; i++ )
		{
			FWaveModInfo WaveInfoInner;
			if( WaveInfoInner.ReadWaveHeader( RawWaveData, SoundWave->ChannelSizes[ i ], SoundWave->ChannelOffsets[ i ] )
				&& *WaveInfoInner.pChannels == 1 )
			{
				ChannelCount++;
				TArray<uint8>& Input = *new (SourceBuffers) TArray<uint8>;
				Input.AddUninitialized(WaveInfoInner.SampleDataSize);
				FMemory::Memcpy(Input.GetData(), WaveInfoInner.SampleDataStart, WaveInfoInner.SampleDataSize);
				SampleDataSize = FMath::Min<uint32>(WaveInfoInner.SampleDataSize, SampleDataSize);
			}
			else if (RequiredChannels.Contains(i))
			{
				// Add an empty channel for cooking
				ChannelCount++;
				TArray<uint8>& Input = *new (SourceBuffers) TArray<uint8>;
				Input.AddZeroed(SampleDataSize);
			}
		}
	
		// Only allow the formats that can be played back through
		if( ChannelCount == 4 || ChannelCount == 6 || ChannelCount == 7 || ChannelCount == 8 )
		{
			UE_LOG(LogAudioDerivedData, Log,  TEXT( "Cooking %d channels for: %s" ), ChannelCount, *SoundWave->GetFullName() );

			FSoundQualityInfo QualityInfo = { 0 };

			QualityInfo.Quality = SoundWave->CompressionQuality;
			QualityInfo.NumChannels = ChannelCount;
			QualityInfo.SampleRate = *WaveInfo.pSamplesPerSec;
			QualityInfo.SampleDataSize = SampleDataSize;
			QualityInfo.DebugName = SoundWave->GetFullName();
			//@todo tighten up the checking for empty results here
			if(Format.CookSurround(FormatName, SourceBuffers, QualityInfo, Output)) 
			{
				if (SoundWave->SampleRate != *WaveInfo.pSamplesPerSec)
				{
					UE_LOG(LogAudioDerivedData, Warning, TEXT( "Updated SoundWave->SampleRate during cooking %s." ), *SoundWave->GetFullName() );
					SoundWave->SampleRate = *WaveInfo.pSamplesPerSec;
				}
				if (SoundWave->NumChannels != ChannelCount)
				{
					UE_LOG(LogAudioDerivedData, Warning, TEXT( "Updated SoundWave->NumChannels during cooking %s." ), *SoundWave->GetFullName() );
					SoundWave->NumChannels = ChannelCount;
				}
				if (SoundWave->RawPCMDataSize != SampleDataSize * ChannelCount)
				{
					UE_LOG(LogAudioDerivedData, Warning, TEXT( "Updated SoundWave->RawPCMDataSize during cooking %s." ), *SoundWave->GetFullName() );
					SoundWave->RawPCMDataSize = SampleDataSize * ChannelCount;
				}
				if (SoundWave->Duration != ( float )SampleDataSize / (SoundWave->SampleRate * sizeof( int16 )))
				{
					UE_LOG(LogAudioDerivedData, Warning, TEXT( "Updated SoundWave->Duration during cooking %s." ), *SoundWave->GetFullName() );
					SoundWave->Duration = ( float )SampleDataSize / (SoundWave->SampleRate * sizeof( int16 ));
				}			
			}
		}
		else
		{
			UE_LOG(LogAudioDerivedData, Warning, TEXT( "No format available for a %d channel surround sound: %s" ), ChannelCount, *SoundWave->GetFullName() );
		}
	}
	else
	{
		UE_LOG(LogAudioDerivedData, Warning, TEXT( "Cooking surround sound failed: %s" ), *SoundWave->GetPathName() );
	}
	SoundWave->RawData.Unlock();
#endif
}
예제 #4
0
/**
 * Cook a simple mono or stereo wave
 */
static void CookSimpleWave(USoundWave* SoundWave, FName FormatName, const IAudioFormat& Format, TArray<uint8>& Output)
{
	FWaveModInfo WaveInfo;
	TArray<uint8> Input;
	check(!Output.Num());

	bool bWasLocked = false;

	// check if there is any raw sound data
	if( SoundWave->RawData.GetBulkDataSize() > 0 )
	{
		// Lock raw wave data.
		uint8* RawWaveData = ( uint8* )SoundWave->RawData.Lock( LOCK_READ_ONLY );
		bWasLocked = true;
		int32 RawDataSize = SoundWave->RawData.GetBulkDataSize();

		// parse the wave data
		if( !WaveInfo.ReadWaveHeader( RawWaveData, RawDataSize, 0 ) )
		{
			UE_LOG(LogAudioDerivedData, Warning, TEXT( "Only mono or stereo 16 bit waves allowed: %s (%d bytes)" ), *SoundWave->GetFullName(), RawDataSize );
		}
		else
		{
			Input.AddUninitialized(WaveInfo.SampleDataSize);
			FMemory::Memcpy(Input.GetData(), WaveInfo.SampleDataStart, WaveInfo.SampleDataSize);
		}
	}

	if(!Input.Num())
	{
		UE_LOG(LogAudioDerivedData, Warning, TEXT( "Can't cook %s because there is no source compressed or uncompressed PC sound data" ), *SoundWave->GetFullName() );
	}
	else
	{
		FSoundQualityInfo QualityInfo = { 0 };

		QualityInfo.Quality = SoundWave->CompressionQuality;
		QualityInfo.NumChannels = *WaveInfo.pChannels;
		QualityInfo.SampleRate = *WaveInfo.pSamplesPerSec;
		QualityInfo.SampleDataSize = Input.Num();
		QualityInfo.DebugName = SoundWave->GetFullName();

		// Cook the data.
		if(Format.Cook(FormatName, Input, QualityInfo, Output)) 
		{
			//@todo tighten up the checking for empty results here
			if (SoundWave->SampleRate != *WaveInfo.pSamplesPerSec)
			{
				UE_LOG(LogAudioDerivedData, Warning, TEXT( "Updated SoundWave->SampleRate during cooking %s." ), *SoundWave->GetFullName() );
				SoundWave->SampleRate = *WaveInfo.pSamplesPerSec;
			}
			if (SoundWave->NumChannels != *WaveInfo.pChannels)
			{
				UE_LOG(LogAudioDerivedData, Warning, TEXT( "Updated SoundWave->NumChannels during cooking %s." ), *SoundWave->GetFullName() );
				SoundWave->NumChannels = *WaveInfo.pChannels;
			}
			if (SoundWave->RawPCMDataSize != Input.Num())
			{
				UE_LOG(LogAudioDerivedData, Warning, TEXT( "Updated SoundWave->RawPCMDataSize during cooking %s." ), *SoundWave->GetFullName() );
				SoundWave->RawPCMDataSize = Input.Num();
			}
			if (SoundWave->Duration != ( float )SoundWave->RawPCMDataSize / (SoundWave->SampleRate * sizeof( int16 ) * SoundWave->NumChannels))
			{
				UE_LOG(LogAudioDerivedData, Warning, TEXT( "Updated SoundWave->Duration during cooking %s." ), *SoundWave->GetFullName() );
				SoundWave->Duration = ( float )SoundWave->RawPCMDataSize / (SoundWave->SampleRate * sizeof( int16 ) * SoundWave->NumChannels);
			}
		}
	}
	if (bWasLocked)
	{
		SoundWave->RawData.Unlock();
	}
}
예제 #5
0
FALSoundBuffer* FALSoundBuffer::CreateNativeBuffer( FALAudioDevice* AudioDevice, USoundWave* Wave)
{
	SCOPE_CYCLE_COUNTER( STAT_AudioResourceCreationTime );

	// This code is not relevant for now on HTML5 but adding this for consistency with other platforms.
	// Check to see if thread has finished decompressing on the other thread

	if (Wave->AudioDecompressor != NULL)
	{
		Wave->AudioDecompressor->EnsureCompletion();

		// Remove the decompressor
		delete Wave->AudioDecompressor;
		Wave->AudioDecompressor = NULL;
	}

	// Can't create a buffer without any source data
	if( Wave == NULL || Wave->NumChannels == 0 )
	{
		return( NULL );
	}
	FWaveModInfo WaveInfo;

	Wave->InitAudioResource(AudioDevice->GetRuntimeFormat(Wave));

	FALSoundBuffer* Buffer = NULL;

	// Find the existing buffer if any
	if( Wave->ResourceID )
	{
		Buffer = static_cast<FALSoundBuffer*>(AudioDevice->WaveBufferMap.FindRef( Wave->ResourceID ));
	}

	if( Buffer == NULL )
	{
		// Create new buffer.
		Buffer = new FALSoundBuffer( AudioDevice );

		alGenBuffers( 1, Buffer->BufferIds );

		AudioDevice->alError( TEXT( "RegisterSound" ) );

		AudioDevice->TrackResource(Wave, Buffer);

		Buffer->InternalFormat = AudioDevice->GetInternalFormat( Wave->NumChannels );
		Buffer->NumChannels = Wave->NumChannels;
		Buffer->SampleRate = Wave->SampleRate;

		if (Wave->RawPCMData)
		{
			// upload it
			Buffer->BufferSize = Wave->RawPCMDataSize;
			alBufferData( Buffer->BufferIds[0], Buffer->InternalFormat, Wave->RawPCMData, Wave->RawPCMDataSize, Buffer->SampleRate );

			// Free up the data if necessary
			if( Wave->bDynamicResource )
			{
				FMemory::Free( Wave->RawPCMData );
				Wave->RawPCMData = NULL;
				Wave->bDynamicResource = false;
			}
		}
		else
		{
			// get the raw data
			uint8* SoundData = ( uint8* )Wave->RawData.Lock( LOCK_READ_ONLY );
			// it's (possibly) a pointer to a wave file, so skip over the header
			int SoundDataSize = Wave->RawData.GetBulkDataSize();

			// is there a wave header?
			FWaveModInfo WaveInfo;
			if (WaveInfo.ReadWaveInfo(SoundData, SoundDataSize))
			{
				// if so, modify the location and size of the sound data based on header
				SoundData = WaveInfo.SampleDataStart;
				SoundDataSize = WaveInfo.SampleDataSize;
			}
			// let the Buffer know the final size
			Buffer->BufferSize = SoundDataSize;

			// upload it
			alBufferData( Buffer->BufferIds[0], Buffer->InternalFormat, SoundData, Buffer->BufferSize, Buffer->SampleRate );
			// unload it
			Wave->RawData.Unlock();
		}

		if( AudioDevice->alError( TEXT( "RegisterSound (buffer data)" ) ) || ( Buffer->BufferSize == 0 ) )
		{
			Buffer->InternalFormat = 0;
		}

		if( Buffer->InternalFormat == 0 )
		{
			UE_LOG ( LogAudio, Log,TEXT( "Audio: sound format not supported for '%s' (%d)" ), *Wave->GetName(), Wave->NumChannels );
			delete Buffer;
			Buffer = NULL;
		}
	}

	return Buffer;
}