示例#1
0
	void AudioEffect::ProcessCaptureStream( int16_t* audio_samples, size_t frame_byte_size, int16_t* outSample, size_t& len_of_byte )
	{
		if(!m_bInit)
		{
			return;
		}
		if(!m_bEnable)
		{
			return;
		}

		if(rec_resample.infreq == 44100)
		{
			frame_byte_size = 880 * rec_resample.inchannel;
		}

		if(rec_resample.inchannel == 2 && rec_resample.channel == 1)
		{
			AudioResample::ToMono( audio_samples, frame_byte_size / 2 );
			frame_byte_size /= 2;
		}

		AudioFrame af;
		size_t outLen = 0;
		int err = 0;
		if(0 != ( err = m_recResample.Push( audio_samples,
			frame_byte_size / sizeof( int16_t ),
			af.data_,
			sizeof( af.data_ ),
			outLen ) ))
		{
			return;
		}

		af.UpdateFrame( 0,
			GetTimeStamp(),
			af.data_,
			kTargetRecSampleRate / 100,
			kTargetRecSampleRate,
			AudioFrame::kNormalSpeech,
			AudioFrame::kVadUnknown,
			rec_resample.channel );
		m_apm->set_stream_delay_ms( m_stream_delay );
		if(0 != ( err = m_apm->ProcessStream( &af ) ))
		{
			return;
		}


		size_t inLen = outLen;
		if(0 != ( err = m_recReverseResample.Push( af.data_,
			inLen,
			outSample,
			len_of_byte / 2,
			outLen ) ))
		{
			return;
		}

		if(rec_resample.outchannel == 2 && rec_resample.channel == 1)
		{
			AudioResample::Tostereo( outSample, outLen );
			outLen *= 2;
		}

		if(rec_resample.outfreq == 44100)
		{
			if(rec_resample.outchannel == 1)
			{
				outSample[440] = outSample[439];
			}
			else
			{
				outSample[880] = outSample[878];
				outSample[880 + 1] = outSample[879];
			}
		}

		len_of_byte = rec_resample.outfreq / 100 * rec_resample.outchannel * 2;
	}
示例#2
0
	void AudioEffect::ProcessRenderStream( int16_t*  inSamples, size_t frame_byte_size, int16_t*outSample, size_t& len_of_byte )
	{
		if(!m_bInit)
		{
			return;
		}
		if(!m_bEnable)
		{
			return;
		}
		if(ply_resample.infreq == 44100)
		{
			frame_byte_size = 880 * ply_resample.inchannel;
		}
		if(ply_resample.inchannel == 2 && ply_resample.channel == 1)
		{
			AudioResample::ToMono( inSamples, frame_byte_size / 2 );
			frame_byte_size /= 2;
		}

		size_t outLen;
		int err = 0;
		AudioFrame af;
		if(0 != ( err = m_plyResample.Push( inSamples,
			frame_byte_size / sizeof( int16_t ),
			af.data_,
			sizeof( af.data_ ),
			outLen ) ))
		{
			return;
		}
		af.UpdateFrame( 0,
			GetTimeStamp(),
			af.data_,
			kTargetPlySampleRate / 100,
			kTargetPlySampleRate,
			AudioFrame::kNormalSpeech,
			AudioFrame::kVadUnknown,
			ply_resample.channel );

		if(0 != ( err = m_apm->AnalyzeReverseStream( &af ) ))
		{
			return;
		}
		if(ply_resample.infreq == ply_resample.outfreq)
		{
			if(ply_resample.inchannel == 1 && ply_resample.outchannel == 2)
			{
				AudioResample::Tostereo( inSamples, frame_byte_size / 2, outSample );
			}
			else if(inSamples != outSample)
			{
				memcpy( outSample, inSamples, len_of_byte );
			}
			len_of_byte = ply_resample.outfreq / 100 * 2 * ply_resample.outchannel;
			return;
		}

		size_t inLen = outLen;
		if(ply_resample.infreq != ply_resample.outfreq || inSamples != outSample)
		{
			if(0 != ( err = m_plyReverseResample.Push( inSamples,
				frame_byte_size / 2,
				outSample,
				1920,
				outLen ) ))
			{
				return;
			}
		}

		if(ply_resample.inchannel == 1 && ply_resample.outchannel == 2)
		{
			AudioResample::Tostereo( outSample, outLen );
		}

		if(ply_resample.outfreq == 44100)
		{
			if(ply_resample.outchannel == 1)
			{
				outSample[440] = outSample[439];
			}
			else
			{
				outSample[880] = outSample[878];
				outSample[880 + 1] = outSample[879];
			}
		}

		len_of_byte = ply_resample.outfreq / 100 * 2 * ply_resample.outchannel;
	}