예제 #1
0
 void OpenALChannel::addData(const ByteArray &inBytes)
 {
    if (!mDynamicStackSize)
    {
       //LOG_SOUND("Adding data with no buffers?");
       return;
    }
    mDynamicDone = false;
    ALuint buffer = mDynamicStack[0];
    mDynamicStack[0] = mDynamicStack[1];
    mDynamicStackSize--;
    QueueBuffer(buffer,inBytes);
    
    // Make sure it is still playing ...
    if (!mDynamicDone && mDynamicStackSize==1)
    {
       ALint val = 0;
       alGetSourcei(mSourceID, AL_SOURCE_STATE, &val);
       if(val != AL_PLAYING)
       {
          //LOG_SOUND("Kickstart (%d/%d)",val,mDynamicStackSize);
          
          // This is an indication that the previous buffer finished playing before we could deliver the new buffer.
          // You will hear ugly popping noises...
          alSourcePlay(mSourceID);
       }
    }
 }
예제 #2
0
 OpenALChannel::OpenALChannel(const ByteArray &inBytes,const SoundTransform &inTransform)
 {
    //LOG_SOUND("OpenALChannel dynamic %d",inBytes.Size());
    mSound = 0;
    mSourceID = 0;
    mUseStream = false;
    
    mDynamicBuffer[0] = 0;
    mDynamicBuffer[1] = 0;
    mDynamicStackSize = 0;
    mSampleBuffer = 0;
    mWasPlaying = true;
    mStream = 0;
    
    alGenBuffers(2, mDynamicBuffer);
    if (!mDynamicBuffer[0])
    {
       //LOG_SOUND("Error creating dynamic sound buffer!");
    }
    else
    {
       mSampleBuffer = new short[8192*STEREO_SAMPLES];
       
       // grab a source ID from openAL
       alGenSources(1, &mSourceID); 
       
       QueueBuffer(mDynamicBuffer[0],inBytes);
       
       if (!mDynamicDone)
          mDynamicStack[mDynamicStackSize++] = mDynamicBuffer[1];
       
       // set some basic source prefs
       alSourcef(mSourceID, AL_PITCH, 1.0f);
       alSourcef(mSourceID, AL_GAIN, inTransform.volume);
       alSource3f(mSourceID, AL_POSITION, (float) cos((inTransform.pan - 1) * (1.5707)), 0, (float) sin((inTransform.pan + 1) * (1.5707)));
       
       alSourcePlay(mSourceID);
    }
    
    //sgOpenChannels.push_back((intptr_t)this);
 }
예제 #3
0
ManifestorStatus_t Manifestor_Audio_c::QueueDecodeBuffer(class Buffer_c *Buffer)
{
	ManifestorStatus_t Status;
	BufferStatus_t BufferStatus;
	unsigned int BufferIndex;
	//MANIFESTOR_DEBUG(">><<\n");
	AssertComponentState("Manifestor_Audio_c::QueueDecodeBuffer", ComponentRunning);
	//
	// Obtain the index for the buffer and populate the parameter data.
	//
	BufferStatus = Buffer->GetIndex(&BufferIndex);
	if (BufferStatus != BufferNoError)
	{
		MANIFESTOR_ERROR("Unable to lookup buffer index %x.\n", BufferStatus);
		return ManifestorError;
	}
	StreamBuffer[BufferIndex].Buffer = Buffer;
	StreamBuffer[BufferIndex].EventPending = EventPending;
	EventPending = false;
	BufferStatus = Buffer->ObtainMetaDataReference(Player->MetaDataParsedFrameParametersReferenceType,
						       (void **) &StreamBuffer[BufferIndex].FrameParameters);
	if (BufferStatus != BufferNoError)
	{
		MANIFESTOR_ERROR("Unable to access buffer parsed frame parameters %x.\n", BufferStatus);
		return ManifestorError;
	}
	BufferStatus = Buffer->ObtainMetaDataReference(Player->MetaDataParsedAudioParametersType,
						       (void **) &StreamBuffer[BufferIndex].AudioParameters);
	if (BufferStatus != BufferNoError)
	{
		MANIFESTOR_ERROR("Unable to access buffer parsed audio parameters %x.\n", BufferStatus);
		return ManifestorError;
	}
	Buffer->DumpToRelayFS(ST_RELAY_TYPE_DECODED_AUDIO_BUFFER, ST_RELAY_SOURCE_AUDIO_MANIFESTOR + RelayfsIndex, (void *)Player);
	BufferStatus = Buffer->ObtainMetaDataReference(Player->MetaDataAudioOutputTimingType,
						       (void **) &StreamBuffer[BufferIndex].AudioOutputTiming);
	if (BufferStatus != BufferNoError)
	{
		MANIFESTOR_ERROR("Unable to access buffer audio output timing parameters %x.\n", BufferStatus);
		return ManifestorError;
	}
	BufferStatus = Buffer->ObtainDataReference(NULL, NULL,
						   (void **)(&StreamBuffer[BufferIndex].Data), UnCachedAddress);
	if (BufferStatus != BufferNoError)
	{
		MANIFESTOR_ERROR("Unable to obtain buffer's data reference %x.\n", BufferStatus);
		return ManifestorError;
	}
	StreamBuffer[BufferIndex].QueueAsCodedData = true;
	//
	// Check if there are new audio parameters (i.e. change of sample rate etc.) and note this
	//
	if (0 == memcmp(&LastSeenAudioParameters, StreamBuffer[BufferIndex].AudioParameters,
			sizeof(LastSeenAudioParameters)))
	{
		StreamBuffer[BufferIndex].UpdateAudioParameters = false;
	}
	else
	{
		StreamBuffer[BufferIndex].UpdateAudioParameters = true;
		memcpy(&LastSeenAudioParameters, StreamBuffer[BufferIndex].AudioParameters,
		       sizeof(LastSeenAudioParameters));
	}
	//
	// Allow the sub-class to have a peek at the buffer before we queue it for display
	//
	Status = QueueBuffer(BufferIndex);
	if (Status != ManifestorNoError)
	{
		MANIFESTOR_ERROR("Unable to queue buffer %x.\n", Status);
		return Status;
	}
	//
	// Enqueue the buffer for display within the playback thread
	//
	OS_LockMutex(&BufferQueueLock);
	QueuedBufferCount++;
	StreamBuffer[BufferIndex].NextIndex = INVALID_BUFFER_ID; // end marker
	if (BufferQueueHead == INVALID_BUFFER_ID)
	{
		BufferQueueHead = BufferIndex;
	}
	else
	{
		StreamBuffer[BufferQueueTail].NextIndex = BufferIndex;
	}
	BufferQueueTail = BufferIndex;
	OS_UnLockMutex(&BufferQueueLock);
	OS_SetEvent(&BufferQueueUpdated);
	return ManifestorNoError;
}