Esempio n. 1
0
void AudioInput::addMic(const void *data, unsigned int nsamp) {
	while (nsamp > 0) {
		unsigned int left = qMin(nsamp, iMicLength - iMicFilled);

		imfMic(pfMicInput + iMicFilled, data, left, iMicChannels);

		iMicFilled += left;
		nsamp -= left;

		if (nsamp > 0) {
			if (eMicFormat == SampleFloat)
				data = reinterpret_cast<const float *>(data) + left * iMicChannels;
			else
				data = reinterpret_cast<const short *>(data) + left * iMicChannels;
		}

		if (iMicFilled == iMicLength) {
			iMicFilled = 0;

			float *ptr = srsMic ? pfOutput : pfMicInput;
			if (srsMic) {
				spx_uint32_t inlen = iMicLength;
				spx_uint32_t outlen = iFrameSize;
				speex_resampler_process_float(srsMic, 0, pfMicInput, &inlen, pfOutput, &outlen);
			}
			const float mul = 32768.f;
			for (int j=0;j<iFrameSize;++j)
				psMic[j] = static_cast<short>(ptr[j] * mul);

			if (iEchoChannels > 0) {
				JitterBufferPacket jbp;
				jbp.data = reinterpret_cast<char *>(psSpeaker);
				jbp.len = iFrameSize * sizeof(short);
				jbp.timestamp = 0;
				jbp.span = 0;
				jbp.sequence = 0;
				jbp.user_data = 0;

				spx_int32_t offs;

				jitter_buffer_get(jb, &jbp, 10, &offs);
				jitter_buffer_tick(jb);
			}
			encodeAudioFrame();
		}
	}
}
Esempio n. 2
0
void AudioInput::addMic(const void *data, unsigned int nsamp) {
	while (nsamp > 0) {
		unsigned int left = min(nsamp, iMicLength - iMicFilled);

		imfMic(pfMicInput + iMicFilled, data, left, iMicChannels);

		iMicFilled += left;
		nsamp -= left;

		if (nsamp > 0) {
			if (eMicFormat == SampleFloat)
				data = reinterpret_cast<const float *>(data) + left * iMicChannels;
			else
				data = reinterpret_cast<const short *>(data) + left * iMicChannels;
		}

		if (iMicFilled == iMicLength) {
			iMicFilled = 0;

			float *ptr = srsMic ? pfOutput : pfMicInput;
			if (srsMic) {
				spx_uint32_t inlen = iMicLength;
				spx_uint32_t outlen = iFrameSize;
				speex_resampler_process_float(srsMic, 0, pfMicInput, &inlen, pfOutput, &outlen);
			}
			const float mul = 32768.f;
			for (int j=0;j<iFrameSize;++j)
				psMic[j] = static_cast<short>(ptr[j] * mul);


			if (iEchoChannels > 0) {
				short *echo = NULL;

				//获取回音数据
				{
					MutexLocker l(&qmEcho);

					if (qlEchoFrames.empty()) {
						iJitterSeq = 0;
						iMinBuffered = 1000;
					} else {
						iMinBuffered = min(iMinBuffered, qlEchoFrames.size());

						if ((iJitterSeq > 100) && (iMinBuffered > 1)) {
							iJitterSeq = 0;
							iMinBuffered = 1000;
							delete [] qlEchoFrames.front();
							qlEchoFrames.pop_front();
						}
						echo = qlEchoFrames.front();
						qlEchoFrames.pop_front();
					}
				}

				if (echo) {
					if (psSpeaker)
						delete [] psSpeaker;
					psSpeaker = echo;
				}
			}
			encodeAudioFrame();
		}
	}
}
Esempio n. 3
0
void AudioInput::addMic(const void *data, unsigned int nsamp) {
	while (nsamp > 0) {
		// Make sure we don't overrun the frame buffer
		const unsigned int left = qMin(nsamp, iMicLength - iMicFilled);

		// Append mix into pfMicInput frame buffer (converts 16bit pcm->float if necessary)
		imfMic(pfMicInput + iMicFilled, data, left, iMicChannels);

		iMicFilled += left;
		nsamp -= left;

		// If new samples are left offset data pointer to point at the first one for next iteration
		if (nsamp > 0) {
			if (eMicFormat == SampleFloat)
				data = reinterpret_cast<const float *>(data) + left * iMicChannels;
			else
				data = reinterpret_cast<const short *>(data) + left * iMicChannels;
		}

		if (iMicFilled == iMicLength) {
			// Frame complete
			iMicFilled = 0;

			// If needed resample frame
			float *ptr = srsMic ? pfOutput : pfMicInput;

			if (srsMic) {
				spx_uint32_t inlen = iMicLength;
				spx_uint32_t outlen = iFrameSize;
				speex_resampler_process_float(srsMic, 0, pfMicInput, &inlen, pfOutput, &outlen);
			}

			// Convert float to 16bit PCM
			const float mul = 32768.f;
			for (int j = 0; j < iFrameSize; ++j)
				psMic[j] = static_cast<short>(qBound(-32768.f, (ptr[j] * mul), 32767.f));

			// If we have echo chancellation enabled...
			if (iEchoChannels > 0) {
				short *echo = NULL;

				{
					QMutexLocker l(&qmEcho);

					if (qlEchoFrames.isEmpty()) {
						iJitterSeq = 0;
						iMinBuffered = 1000;
					} else {
						// Compensate for drift between the microphone and the echo source
						iMinBuffered = qMin(iMinBuffered, qlEchoFrames.count());

						if ((iJitterSeq > 100) && (iMinBuffered > 1)) {
							iJitterSeq = 0;
							iMinBuffered = 1000;
							delete [] qlEchoFrames.takeFirst();
						}
						echo = qlEchoFrames.takeFirst();
					}
				}

				if (echo) {
					// We have echo data for the current frame, remember that
					delete [] psSpeaker;
					psSpeaker = echo;
				}
			}

			// Encode and send frame
			encodeAudioFrame();
		}
	}
}