Example #1
0
void AudioSource::AddAudioSegment(AudioSegment *newSegment, float curVolume)
{
    for (UINT i=0; i<audioFilters.Num(); i++)
    {
        if (newSegment)
            newSegment = audioFilters[i]->Process(newSegment);
    }

    if (newSegment)
    {
        MultiplyAudioBuffer(newSegment->audioData.Array(), newSegment->audioData.Num(), curVolume*sourceVolume);
        audioSegments << newSegment;
    }
}
/**************************************************
获取音频数据函数
参数:
lpData :输入数据的内存
size:输入数据的长度
pts:输入数据的时间戳
***************************************************/
void CDemandMediaAudio::PushAudio(const void *lpData, unsigned int size, int64_t pts, IBaseVideo *Video, bool bCanPlay)
{
	VideoSource *Source = dynamic_cast<VideoSource*>(Video);

	if (!m_uBlockSize || !Source)
		return;
	
	if (m_sAudioParam.iChannel <= 2)
	{
		if (fVolume != 1.0f)
		{
			short *Tem = (short*)lpData;
			for (int i = 0; i < size; i += 2)
			{
				long sVolume = Tem[i / 2];

				sVolume *= fVolume;

				if (sVolume > 0x7fff)
				{
					sVolume = 0x7fff;
				}
				else if (sVolume < -0x8000)
				{
					sVolume = -0x8000;
				}

				Tem[i / 2] = (short)sVolume;
			}
		}

		Source->PlayCallBackAudio((LPBYTE)lpData, size);
	}
	else
	{
		UINT totalSamples = size * 8 / m_sAudioParam.iBitPerSample;
		if (TemconvertBuffer.Num() < totalSamples)
			TemconvertBuffer.SetSize(totalSamples);

		OutputconvertBuffer.SetSize(totalSamples / m_sAudioParam.iChannel * 2);


		if (m_sAudioParam.iBitPerSample == 8)
		{
			float *tempConvert = TemconvertBuffer.Array();
			char *tempSByte = (char*)lpData;

			while (totalSamples--)
			{
				*(tempConvert++) = float(*(tempSByte++)) / 127.0f;
			}
		}
		else if (m_sAudioParam.iBitPerSample == 16)
		{
			float *tempConvert = TemconvertBuffer.Array();
			short *tempShort = (short*)lpData;

			while (totalSamples--)
			{
				*(tempConvert++) = float(*(tempShort++)) / 32767.0f;
			}
		}
		else if (m_sAudioParam.iBitPerSample == 24)
		{
			float *tempConvert = TemconvertBuffer.Array();
			BYTE *tempTriple = (BYTE*)lpData;
			TripleToLong valOut;

			while (totalSamples--)
			{
				TripleToLong &valIn = (TripleToLong&)tempTriple;

				valOut.wVal = valIn.wVal;
				valOut.tripleVal = valIn.tripleVal;
				if (valOut.tripleVal > 0x7F)
					valOut.lastByte = 0xFF;

				*(tempConvert++) = float(double(valOut.val) / 8388607.0);
				tempTriple += 3;
			}
		}
		else if (m_sAudioParam.iBitPerSample == 32)
		{
			float *tempConvert = TemconvertBuffer.Array();
			long *tempShort = (long*)lpData;

			while (totalSamples--)
			{
				*(tempConvert++) = float(double(*(tempShort++)) / 2147483647.0);
			}
		}

		float *inputTemp = TemconvertBuffer.Array();
		float *outputTemp = OutputconvertBuffer.Array();

		UINT numFloats = size * 8 / m_sAudioParam.iBitPerSample;

		if (m_sAudioParam.iChannel == 3)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];

				// Drop LFE since we don't need it
				//float lfe       = inputTemp[2]*lowFreqMix;

				*(outputTemp++) = left;
				*(outputTemp++) = right;

				inputTemp += 3;
			}
		}
		else if (m_sAudioParam.iChannel == 4)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];
				float frontCenter = inputTemp[2];
				float lowFreq = inputTemp[3];

				*(outputTemp++) = left;
				*(outputTemp++) = right;

				inputTemp += 4;
			}
		}
		else if (m_sAudioParam.iChannel == 5)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];

				// Skip LFE , we don't really need it.
				//float lfe       = inputTemp[2];

				float rearLeft = inputTemp[3] * surroundMix4;
				float rearRight = inputTemp[4] * surroundMix4;

				// Same idea as with 5.1 downmix

				*(outputTemp++) = (left + rearLeft)  * attn4dotX;
				*(outputTemp++) = (right + rearRight) * attn4dotX;

				inputTemp += 5;
			}
		}
		else if (m_sAudioParam.iChannel == 6)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];
				float center = inputTemp[2] * centerMix;


				float rearLeft = inputTemp[4] * surroundMix;
				float rearRight = inputTemp[5] * surroundMix;


				*(outputTemp++) = (left + center + rearLeft) * attn5dot1;
				*(outputTemp++) = (right + center + rearRight) * attn5dot1;

				inputTemp += 6;
			}
		}

		else if (m_sAudioParam.iChannel == 8)
		{
			float *endTemp = inputTemp + numFloats;

			while (inputTemp < endTemp)
			{
				float left = inputTemp[0];
				float right = inputTemp[1];

				float center = inputTemp[2] * centerMix;

				// Drop LFE since we don't need it
				//float lowFreq       = inputTemp[3]*lowFreqMix;

				float rearLeft = inputTemp[4] * surroundMix;
				float rearRight = inputTemp[5] * surroundMix;

				// Drop SPEAKER_FRONT_LEFT_OF_CENTER , SPEAKER_FRONT_RIGHT_OF_CENTER
				//float centerLeft    = inputTemp[6];
				//float centerRight   = inputTemp[7];

				// Downmix from 5.1 to stereo
				*(outputTemp++) = (left + center + rearLeft)  * attn5dot1;
				*(outputTemp++) = (right + center + rearRight) * attn5dot1;

				inputTemp += 8;
			}
		}

		if (fVolume != 1.0f)
			MultiplyAudioBuffer(OutputconvertBuffer.Array(), OutputconvertBuffer.Num(),fVolume);


		Source->PlayCallBackAudio((LPBYTE)OutputconvertBuffer.Array(), OutputconvertBuffer.Num() * 4);

		if (bCanPlay)
		{
			bool bPlayLive = false;
			if (bLiveInstance)
			{
				AudioTimestamp audioTimestamp;
				EnterCriticalSection(&sampleBufferLock);
				sampleBuffer.AppendArray((BYTE *)(OutputconvertBuffer.Array()), OutputconvertBuffer.Num() * 4);
				audioTimestamp.count = size / m_uBlockSize;
				audioTimestamp.pts = pts;
				//sampleBufferPts.push_back(audioTimestamp);
				LeaveCriticalSection(&sampleBufferLock);
				bPlayLive = m_bPlayPcmLive;
			}
			else
			{
				EnterCriticalSection(&sampleBufferLock);
				sampleBuffer.RemoveRange(0, sampleBuffer.Num());
				LeaveCriticalSection(&sampleBufferLock);
			}

			int Len = OutputconvertBuffer.Num();
			char *OutBuffer;
			CaculateVolume((LPVOID)OutputconvertBuffer.Array(), Len, (void**)&OutBuffer);

			EnterCriticalSection(&sampleBufferLock);
			
			if (m_pAudioWaveOut && (m_bPlayPcmLocal || bPlayLive))
			{
				m_pAudioWaveOut->push_pcm_data((char*)OutBuffer, Len * 4);

				if (!bSameDevice && bProjector && m_pSecWaveOut)
					m_pSecWaveOut->push_pcm_data((char*)OutBuffer, Len * 4);

			}
			else if (bProjector)
			{
				if (bSameDevice && m_pAudioWaveOut)
				{
					m_pAudioWaveOut->push_pcm_data((char*)OutBuffer, Len * 4);
				}
				else if (m_pSecWaveOut)
				{
					m_pSecWaveOut->push_pcm_data((char*)OutBuffer, Len * 4);
				}
			}
			LeaveCriticalSection(&sampleBufferLock);
		}
		else
		{
			int Len = OutputconvertBuffer.Num();
			char *OutBuffer;
			CaculateVolume((LPVOID)OutputconvertBuffer.Array(), Len, (void**)&OutBuffer, true);
		}
		
		
		return;
	}

	if (bCanPlay)
	{
		bool bPlayLive = false;
		size = size / m_uBlockSize;
		if (bLiveInstance)
		{
			AudioTimestamp audioTimestamp;
			EnterCriticalSection(&sampleBufferLock);
			sampleBuffer.AppendArray(static_cast<const BYTE *>(lpData), size * m_uBlockSize);
			audioTimestamp.count = size;
			audioTimestamp.pts = pts;
			//sampleBufferPts.push_back(audioTimestamp);
			LeaveCriticalSection(&sampleBufferLock);
			bPlayLive = m_bPlayPcmLive;
		}
		else
		{
			EnterCriticalSection(&sampleBufferLock);
			sampleBuffer.RemoveRange(0, sampleBuffer.Num());
			LeaveCriticalSection(&sampleBufferLock);
		}

		EnterCriticalSection(&sampleBufferLock);

		int Len = size  * m_uBlockSize;
		char *OutBuffer;
		CaculateVolume((LPVOID)lpData, Len, (void**)&OutBuffer);

		if (m_pAudioWaveOut && (m_bPlayPcmLocal || bPlayLive))
		{
			m_pAudioWaveOut->push_pcm_data((char*)OutBuffer, Len);

			if (!bSameDevice && bProjector && m_pSecWaveOut)
				m_pSecWaveOut->push_pcm_data((char*)OutBuffer, Len);

		}
		else if (bProjector)
		{
			if (bSameDevice && m_pAudioWaveOut)
			{
				m_pAudioWaveOut->push_pcm_data((char*)OutBuffer, Len);
			}
			else if (m_pSecWaveOut)
			{
				m_pSecWaveOut->push_pcm_data((char*)OutBuffer, Len);
			}
		}
		LeaveCriticalSection(&sampleBufferLock);
	}
	else
	{
		int Len = size;
		char *OutBuffer;
		CaculateVolume((LPVOID)lpData, Len, (void**)&OutBuffer,true);
	}
}
Example #3
0
UINT MMDeviceAudioSource::GetNextBuffer(float curVolume)
{
    UINT captureSize = 0;
    HRESULT err = mmCapture->GetNextPacketSize(&captureSize);
    if(FAILED(err))
    {
        RUNONCE AppWarning(TEXT("MMDeviceAudioSource::GetBuffer: GetNextPacketSize failed"));
        return NoAudioAvailable;
    }

    float *outputBuffer = NULL;

    if(captureSize)
    {
        LPBYTE captureBuffer;
        DWORD dwFlags = 0;
        UINT numAudioFrames = 0;

        UINT64 devPosition;
        UINT64 qpcTimestamp;
        err = mmCapture->GetBuffer(&captureBuffer, &numAudioFrames, &dwFlags, &devPosition, &qpcTimestamp);
        if(FAILED(err))
        {
            RUNONCE AppWarning(TEXT("MMDeviceAudioSource::GetBuffer: GetBuffer failed"));
            return NoAudioAvailable;
        }

        QWORD newTimestamp;

        if(dwFlags & AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR)
        {
            RUNONCE AppWarning(TEXT("MMDeviceAudioSource::GetBuffer: woa woa woa, getting timestamp errors from the audio subsystem.  device = %s"), GetDeviceName().Array());
            if(!bBrokenTimestamp)
                newTimestamp = lastUsedTimestamp + numAudioFrames*1000/inputSamplesPerSec;
        }
        else
        {
            if(!bBrokenTimestamp)
                newTimestamp = qpcTimestamp/10000;

            /*UINT64 freq;
            mmClock->GetFrequency(&freq);
            Log(TEXT("position: %llu, numAudioFrames: %u, freq: %llu, newTimestamp: %llu, test: %llu"), devPosition, numAudioFrames, freq, newTimestamp, devPosition*8000/freq);*/
        }

        //have to do this crap to account for broken devices or device drivers.  absolutely unbelievable.
        if(!bFirstFrameReceived)
        {
            LARGE_INTEGER clockFreq;
            QueryPerformanceFrequency(&clockFreq);
            QWORD curTime = GetQPCTimeMS(clockFreq.QuadPart);

            if(newTimestamp < (curTime-1000) || newTimestamp > (curTime+1000))
            {
                bBrokenTimestamp = true;

                Log(TEXT("MMDeviceAudioSource::GetNextBuffer: Got bad audio timestamp offset %lld from device: '%s', timestamps for this device will be calculated.  curTime: %llu, newTimestamp: %llu"), (LONGLONG)(newTimestamp - curTime), GetDeviceName().Array(), curTime, newTimestamp);
                lastUsedTimestamp = newTimestamp = curTime;
            }
            else
                lastUsedTimestamp = newTimestamp;

            bFirstFrameReceived = true;
        }

        if(tempBuffer.Num() < numAudioFrames*2)
            tempBuffer.SetSize(numAudioFrames*2);

        outputBuffer = tempBuffer.Array();
        float *tempOut = outputBuffer;

        //------------------------------------------------------------
        // channel upmix/downmix

        if(inputChannels == 1)
        {
            UINT  numFloats   = numAudioFrames;
            float *inputTemp  = (float*)captureBuffer;
            float *outputTemp = outputBuffer;

            if(App->SSE2Available() && (UPARAM(inputTemp) & 0xF) == 0 && (UPARAM(outputTemp) & 0xF) == 0)
            {
                UINT alignedFloats = numFloats & 0xFFFFFFFC;
                for(UINT i=0; i<alignedFloats; i += 4)
                {
                    __m128 inVal   = _mm_load_ps(inputTemp+i);

                    __m128 outVal1 = _mm_unpacklo_ps(inVal, inVal);
                    __m128 outVal2 = _mm_unpackhi_ps(inVal, inVal);

                    _mm_store_ps(outputTemp+(i*2),   outVal1);
                    _mm_store_ps(outputTemp+(i*2)+4, outVal2);
                }

                numFloats  -= alignedFloats;
                inputTemp  += alignedFloats;
                outputTemp += alignedFloats*2;
            }

            while(numFloats--)
            {
                float inputVal = *inputTemp;
                *(outputTemp++) = inputVal;
                *(outputTemp++) = inputVal;

                inputTemp++;
            }
        }
        else if(inputChannels == 2) //straight up copy
        {
            if(App->SSE2Available())
                SSECopy(outputBuffer, captureBuffer, numAudioFrames*2*sizeof(float));
            else
                mcpy(outputBuffer, captureBuffer, numAudioFrames*2*sizeof(float));
        }
        else
        {
            //todo: downmix optimization, also support for other speaker configurations than ones I can merely "think" of.  ugh.
            float *inputTemp  = (float*)captureBuffer;
            float *outputTemp = outputBuffer;

            if(inputChannelMask == KSAUDIO_SPEAKER_QUAD)
            {
                UINT numFloats = numAudioFrames*4;
                float *endTemp = inputTemp+numFloats;

                while(inputTemp < endTemp)
                {
                    float left      = inputTemp[0];
                    float right     = inputTemp[1];
                    float rear      = (inputTemp[2]+inputTemp[3])*surroundMix;

                    *(outputTemp++) = left  - rear;
                    *(outputTemp++) = right + rear;

                    inputTemp  += 4;
                }
            }
            else if(inputChannelMask == KSAUDIO_SPEAKER_2POINT1)
            {
                UINT numFloats = numAudioFrames*3;
                float *endTemp = inputTemp+numFloats;

                while(inputTemp < endTemp)
                {
                    float left      = inputTemp[0];
                    float right     = inputTemp[1];
                    float lfe       = inputTemp[2]*lowFreqMix;

                    *(outputTemp++) = left  + lfe;
                    *(outputTemp++) = right + lfe;

                    inputTemp  += 3;
                }
            }
            else if(inputChannelMask == KSAUDIO_SPEAKER_4POINT1)
            {
                UINT numFloats = numAudioFrames*5;
                float *endTemp = inputTemp+numFloats;

                while(inputTemp < endTemp)
                {
                    float left      = inputTemp[0];
                    float right     = inputTemp[1];
                    float lfe       = inputTemp[2]*lowFreqMix;
                    float rear      = (inputTemp[3]+inputTemp[4])*surroundMix;

                    *(outputTemp++) = left  + lfe - rear;
                    *(outputTemp++) = right + lfe + rear;

                    inputTemp  += 5;
                }
            }
            else if(inputChannelMask == KSAUDIO_SPEAKER_SURROUND)
            {
                UINT numFloats = numAudioFrames*4;
                float *endTemp = inputTemp+numFloats;

                while(inputTemp < endTemp)
                {
                    float left      = inputTemp[0];
                    float right     = inputTemp[1];
                    float center    = inputTemp[2]*centerMix;
                    float rear      = inputTemp[3]*(surroundMix*dbMinus3);

                    *(outputTemp++) = left  + center - rear;
                    *(outputTemp++) = right + center + rear;

                    inputTemp  += 4;
                }
            }
            //don't think this will work for both
            else if(inputChannelMask == KSAUDIO_SPEAKER_5POINT1)
            {
                UINT numFloats = numAudioFrames*6;
                float *endTemp = inputTemp+numFloats;

                while(inputTemp < endTemp)
                {
                    float left      = inputTemp[0];
                    float right     = inputTemp[1];
                    float center    = inputTemp[2]*centerMix;
                    float lowFreq   = inputTemp[3]*lowFreqMix;
                    float rear      = (inputTemp[4]+inputTemp[5])*surroundMix;

                    *(outputTemp++) = left  + center + lowFreq - rear;
                    *(outputTemp++) = right + center + lowFreq + rear;

                    inputTemp  += 6;
                }
            }
            //todo ------------------
            //not sure if my 5.1/7.1 downmixes are correct
            else if(inputChannelMask == KSAUDIO_SPEAKER_5POINT1_SURROUND)
            {
                UINT numFloats = numAudioFrames*6;
                float *endTemp = inputTemp+numFloats;

                while(inputTemp < endTemp)
                {
                    float left      = inputTemp[0];
                    float right     = inputTemp[1];
                    float center    = inputTemp[2]*centerMix;
                    float lowFreq   = inputTemp[3]*lowFreqMix;
                    float sideLeft  = inputTemp[4]*dbMinus3;
                    float sideRight = inputTemp[5]*dbMinus3;

                    *(outputTemp++) = left  + center + sideLeft  + lowFreq;
                    *(outputTemp++) = right + center + sideRight + lowFreq;

                    inputTemp  += 6;
                }
            }
            else if(inputChannelMask == KSAUDIO_SPEAKER_7POINT1)
            {
                UINT numFloats = numAudioFrames*8;
                float *endTemp = inputTemp+numFloats;

                while(inputTemp < endTemp)
                {
                    float left          = inputTemp[0];
                    float right         = inputTemp[1];
                    float center        = inputTemp[2]*(centerMix*dbMinus3);
                    float lowFreq       = inputTemp[3]*lowFreqMix;
                    float rear          = (inputTemp[4]+inputTemp[5])*surroundMix;
                    float centerLeft    = inputTemp[6]*dbMinus6;
                    float centerRight   = inputTemp[7]*dbMinus6;

                    *(outputTemp++) = left  + centerLeft  + center + lowFreq - rear;
                    *(outputTemp++) = right + centerRight + center + lowFreq + rear;

                    inputTemp  += 8;
                }
            }
            else if(inputChannelMask == KSAUDIO_SPEAKER_7POINT1_SURROUND)
            {
                UINT numFloats = numAudioFrames*8;
                float *endTemp = inputTemp+numFloats;

                while(inputTemp < endTemp)
                {
                    float left      = inputTemp[0];
                    float right     = inputTemp[1];
                    float center    = inputTemp[2]*centerMix;
                    float lowFreq   = inputTemp[3]*lowFreqMix;
                    float rear      = (inputTemp[4]+inputTemp[5])*(surroundMix*dbMinus3);
                    float sideLeft  = inputTemp[6]*dbMinus6;
                    float sideRight = inputTemp[7]*dbMinus6;

                    *(outputTemp++) = left  + sideLeft + center + lowFreq - rear;
                    *(outputTemp++) = right + sideLeft + center + lowFreq + rear;

                    inputTemp  += 8;
                }
            }
        }

        mmCapture->ReleaseBuffer(numAudioFrames);

        //------------------------------------------------------------
        // resample

        if(bResample)
        {
            UINT frameAdjust = UINT((double(numAudioFrames) * resampleRatio) + 1.0);
            UINT newFrameSize = frameAdjust*2;

            if(tempResampleBuffer.Num() < newFrameSize)
                tempResampleBuffer.SetSize(newFrameSize);

            SRC_DATA data;
            data.src_ratio = resampleRatio;

            data.data_in = tempBuffer.Array();
            data.input_frames = numAudioFrames;

            data.data_out = tempResampleBuffer.Array();
            data.output_frames = frameAdjust;

            data.end_of_input = 0;

            int err = src_process(resampler, &data);
            if(err)
            {
                RUNONCE AppWarning(TEXT("Was unable to resample audio"));
                return NoAudioAvailable;
            }

            if(data.input_frames_used != numAudioFrames)
            {
                RUNONCE AppWarning(TEXT("Failed to downsample buffer completely, which shouldn't actually happen because it should be using 10ms of samples"));
                return NoAudioAvailable;
            }

            numAudioFrames = data.output_frames_gen;
        }

        //-----------------------------------------------------------------------------
        // sort all audio frames into 10 millisecond increments (done because not all devices output in 10ms increments)
        // NOTE: 0.457+ - instead of using the timestamps from windows, just compare and make sure it stays within a 100ms of their timestamps

        float *newBuffer = (bResample) ? tempResampleBuffer.Array() : tempBuffer.Array();

        if(storageBuffer.Num() == 0 && numAudioFrames == 441)
        {
            lastUsedTimestamp += 10;
            if(!bBrokenTimestamp) 
            {
                QWORD difVal = GetQWDif(newTimestamp, lastUsedTimestamp);
                if(difVal > 70)
                    lastUsedTimestamp = newTimestamp;
            }

            if(lastUsedTimestamp > lastSentTimestamp)
            {
                QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp);
                if(adjustVal < 10)
                    lastUsedTimestamp += 10-adjustVal;

                AudioSegment &newSegment = *audioSegments.CreateNew();
                newSegment.audioData.CopyArray(newBuffer, numAudioFrames*2);
                newSegment.timestamp = lastUsedTimestamp;
                MultiplyAudioBuffer(newSegment.audioData.Array(), numAudioFrames*2, curVolume);

                lastSentTimestamp = lastUsedTimestamp;
            }
        }
        else
        {
            UINT storedFrames = storageBuffer.Num();

            storageBuffer.AppendArray(newBuffer, numAudioFrames*2);
            if(storageBuffer.Num() >= (441*2))
            {
                lastUsedTimestamp += 10;
                if(!bBrokenTimestamp)
                {
                    QWORD difVal = GetQWDif(newTimestamp, lastUsedTimestamp);
                    if(difVal > 70)
                        lastUsedTimestamp = newTimestamp - (QWORD(storedFrames)/2*1000/44100);
                }

                //------------------------
                // add new data

                if(lastUsedTimestamp > lastSentTimestamp)
                {
                    QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp);
                    if(adjustVal < 10)
                        lastUsedTimestamp += 10-adjustVal;

                    AudioSegment &newSegment = *audioSegments.CreateNew();
                    newSegment.audioData.CopyArray(storageBuffer.Array(), (441*2));
                    newSegment.timestamp = lastUsedTimestamp;
                    MultiplyAudioBuffer(newSegment.audioData.Array(), 441*2, curVolume);

                    storageBuffer.RemoveRange(0, (441*2));
                }

                //------------------------
                // if still data pending (can happen)

                while(storageBuffer.Num() >= (441*2))
                {
                    lastUsedTimestamp += 10;

                    if(lastUsedTimestamp > lastSentTimestamp)
                    {
                        QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp);
                        if(adjustVal < 10)
                            lastUsedTimestamp += 10-adjustVal;

                        AudioSegment &newSegment = *audioSegments.CreateNew();
                        newSegment.audioData.CopyArray(storageBuffer.Array(), (441*2));
                        storageBuffer.RemoveRange(0, (441*2));
                        MultiplyAudioBuffer(newSegment.audioData.Array(), 441*2, curVolume);

                        newSegment.timestamp = lastUsedTimestamp;

                        lastSentTimestamp = lastUsedTimestamp;
                    }
                }
            }
        }

        //-----------------------------------------------------------------------------

        return ContinueAudioRequest;
    }

    return NoAudioAvailable;
}