void JackAudioSystem::initializeOutput() {
	QMutexLocker lock(&qmWait);

	if (!jasys->bJackIsGood) {
		return;
	}

	AudioOutputPtr ao = g.ao;
	JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get());

	allocOutputBuffer(iBufferSize);

	if (jao) {
		jao->qmMutex.lock();
	}

	for (unsigned int i = 0; i < iOutPorts; ++i) {
		char name[10];
		snprintf(name, 10, "output_%d", i + 1);

		out_ports[i] = jack_port_register(client, name, JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0);
		if (out_ports[i] == NULL) {
			qWarning("JackAudioSystem: unable to register 'output' port");
			break;
		}
	}

	bOutputIsGood = true;

	if (jao) {
		jao->qmMutex.unlock();
	}
}
void JackAudioSystem::destroyOutput() {
	AudioOutputPtr ao = g.ao;
	JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get());

	if (jao) {
		jao->qmMutex.lock();
	}

	delete [] output_buffer;
	output_buffer = NULL;

	for (unsigned int i = 0; i < iOutPorts; ++i) {
		if (out_ports[i] != NULL) {
			int err = jack_port_unregister(client, out_ports[i]);
			if (err != 0)  {
				qWarning("JackAudioSystem: unable to unregister out port - jack_port_unregister() returned %i", err);
			}
			out_ports[i] = NULL;
		}
	}

	bOutputIsGood = false;

	if (jao) {
		jao->qmMutex.unlock();
	}
}
Beispiel #3
0
void LoopPlayer::addFrame(const QByteArray &packet, int seq) {
	if (DOUBLE_RAND < g.s.dPacketLoss) {
		qWarning("Drop");
		return;
	}

	bool restart = (qtLastFetch.elapsed() > 100);

	{
		QMutexLocker l(&qmLock);

		double time = qtTicker.elapsed();

		double r;
		if (restart)
			r = 0.0;
		else
			r = DOUBLE_RAND * g.s.dMaxPacketDelay;

		qmPackets.insert(static_cast<float>(time + r), Packet(seq, packet));
	}

	// Restart check
	if (qtLastFetch.elapsed() > 100) {
		AudioOutputPtr ao = g.ao;
		if (ao) {
			ao->addFrameToBuffer(this, QByteArray(), 0);
		}
	}

}
Beispiel #4
0
void ClientUser::remove(unsigned int uiSession) {
	QWriteLocker lock(&c_qrwlUsers);
	ClientUser *p = c_qmUsers.take(uiSession);
	if (p && p->cChannel)
		p->cChannel->removeUser(p);
	if (p) {
		AudioOutputPtr ao = g.ao;
		if (ao)
			ao->removeBuffer(p);
	}
}
Beispiel #5
0
void ClientUser::remove(unsigned int uiSession) {
	QWriteLocker lock(&c_qrwlUsers);
	ClientUser *p = c_qmUsers.take(uiSession);
	if (p) {
		if (p->cChannel)
			p->cChannel->removeUser(p);

		AudioOutputPtr ao = g.ao;
		if (ao)
			ao->removeBuffer(p);

		if (p->tsState != Settings::Passive) {
			QWriteLocker writeLock(&c_qrwlTalking);
			c_qlTalking.removeAll(p);
		}
	}
}
Beispiel #6
0
void LoopPlayer::fetchFrames() {
	QMutexLocker l(&qmLock);

	AudioOutputPtr ao = g.ao;
	if (!ao || qmPackets.isEmpty())
		return;

	double cmp = qtTicker.elapsed();

	QMultiMap<float, Packet>::iterator i = qmPackets.begin();

	while (i != qmPackets.end()) {
		if (i.key() > cmp)
			break;
		ao->addFrameToBuffer(this, i.value().second, i.value().first);
		i = qmPackets.erase(i);
	}

	qtLastFetch.restart();
}
void JackAudioSystem::allocOutputBuffer(jack_nframes_t frames) {
	iBufferSize = frames;
	AudioOutputPtr ao = g.ao;
	JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get());

	if (jao) {
		jao->qmMutex.lock();
	}
	if (output_buffer) {
		delete [] output_buffer;
		output_buffer = NULL;
	}
	output_buffer = new jack_default_audio_sample_t[frames * iOutPorts];
	if (output_buffer == NULL) {
		bJackIsGood = false;
	}

	if (jao) {
		jao->qmMutex.unlock();
	}
}
Beispiel #8
0
Datei: Audio.cpp Projekt: uvbs/V8
//加入到音频播放队列
void LoopUser::addFrame(const QByteArray &packet) {
    if (DOUBLE_RAND < g_struct.s.dPacketLoss) {
        Trace("Drop");
        return;
    }

    bool restart = (qtLastFetch.elapsed()*CLOCKS_PER_SEC > 100);

    {
        MutexLocker l(&qmLock);

        double time = qtTicker.elapsed();

        double r;
        if (restart)
            r = 0.0;
        else
            r = DOUBLE_RAND * g_struct.s.dMaxPacketDelay;

        qmPackets.insert(make_pair(static_cast<float>(time + r), packet));

    }

    // Restart check
    if (qtLastFetch.elapsed()*CLOCKS_PER_SEC > 100) {
        AudioOutputPtr ao = g_struct.ao;
        if (ao) {
            MessageHandler::UDPMessageType msgType = MessageHandler::UDPVoiceAACPlus/*MessageHandler::UDPVoiceCELT*/;//static_cast<MessageHandler::UDPMessageType>((packet.at(0) >> 5) & 0x7);
            for (int i=0; i<PER_FRAME_OF_SAMEPLE; i++)
            {
                ao->addFrameToBuffer(this, QByteArray(), 0, msgType);
                char buf[512] = {0};
                sprintf(buf,"LoopUser::addFrame QByteArray()  t=%d\n",qtLastFetch.elapsed()*CLOCKS_PER_SEC );
                //OutputDebugStringA(buf);
            }
        }
    }

}
Beispiel #9
0
Datei: Audio.cpp Projekt: uvbs/V8
//从音频队列中取帧
void LoopUser::fetchFrames() {
    MutexLocker l(&qmLock);

    AudioOutputPtr ao = g_struct.ao;
    if (!ao || qmPackets.empty()) {
        return;
    }

    double cmp = qtTicker.elapsed();

    std::multimap<float, QByteArray>::iterator i = qmPackets.begin();

    while (i != qmPackets.end()) {
// 		if (i->first > cmp)
// 		{
// 			char buf[512] = {0};
// 			sprintf(buf,"LoopUser::fetchFrames() cmp=%f\n",i->first );
// 			OutputDebugStringA(buf);
// 			break;
// 		}


        const QByteArray &data = i->second;
        PacketDataStream pds((char*)&data[0], data.size());

        unsigned int msgFlags = 0;
        int iSeq = static_cast<unsigned int>(pds.next8());

        QByteArray qba;
        pds.dataBlock(pds.left(), qba);

        ao->addFrameToBuffer(this, qba, iSeq, MessageHandler::UDPVoiceAACPlus/*MessageHandler::UDPVoiceCELT*/);
        i = qmPackets.erase(i);
    }

    qtLastFetch.restart();
}
Beispiel #10
0
void PulseAudioSystem::write_callback(pa_stream *s, size_t bytes, void *userdata) {
	PulseAudioSystem *pas = reinterpret_cast<PulseAudioSystem *>(userdata);
	Q_ASSERT(s == pas->pasOutput);

	AudioOutputPtr ao = g.ao;
	PulseAudioOutput *pao = dynamic_cast<PulseAudioOutput *>(ao.get());

	unsigned char buffer[bytes];

	if (! pao) {
		// Transitioning, but most likely transitions back, so just zero.
		memset(buffer, 0, bytes);
		pa_stream_write(s, buffer, bytes, NULL, 0, PA_SEEK_RELATIVE);
		pas->wakeup();
		return;
	}

	const pa_sample_spec *pss = pa_stream_get_sample_spec(s);
	const pa_channel_map *pcm = pa_stream_get_channel_map(pas->pasOutput);
	if (!pa_sample_spec_equal(pss, &pao->pss) || !pa_channel_map_equal(pcm, &pao->pcm)) {
		pao->pss = *pss;
		pao->pcm = *pcm;
		if (pss->format == PA_SAMPLE_FLOAT32NE)
			pao->eSampleFormat = PulseAudioOutput::SampleFloat;
		else
			pao->eSampleFormat = PulseAudioOutput::SampleShort;
		pao->iMixerFreq = pss->rate;
		pao->iChannels = pss->channels;
		unsigned int chanmasks[pss->channels];
		for (int i=0;i<pss->channels;++i) {
			unsigned int cm = 0;
			switch (pcm->map[i]) {
				case PA_CHANNEL_POSITION_LEFT:
					cm = SPEAKER_FRONT_LEFT;
					break;
				case PA_CHANNEL_POSITION_RIGHT:
					cm = SPEAKER_FRONT_RIGHT;
					break;
				case PA_CHANNEL_POSITION_CENTER:
					cm = SPEAKER_FRONT_CENTER;
					break;
				case PA_CHANNEL_POSITION_REAR_LEFT:
					cm = SPEAKER_BACK_LEFT;
					break;
				case PA_CHANNEL_POSITION_REAR_RIGHT:
					cm = SPEAKER_BACK_RIGHT;
					break;
				case PA_CHANNEL_POSITION_REAR_CENTER:
					cm = SPEAKER_BACK_CENTER;
					break;
				case PA_CHANNEL_POSITION_LFE:
					cm = SPEAKER_LOW_FREQUENCY;
					break;
				case PA_CHANNEL_POSITION_SIDE_LEFT:
					cm = SPEAKER_SIDE_LEFT;
					break;
				case PA_CHANNEL_POSITION_SIDE_RIGHT:
					cm = SPEAKER_SIDE_RIGHT;
					break;
				case PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER:
					cm = SPEAKER_FRONT_LEFT_OF_CENTER;
					break;
				case PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER:
					cm = SPEAKER_FRONT_RIGHT_OF_CENTER;
					break;
				default:
					cm = 0;
					break;
			}
			chanmasks[i] = cm;
		}
		pao->initializeMixer(chanmasks);
	}

	const unsigned int iSampleSize = pao->iSampleSize;
	const unsigned int samples = static_cast<unsigned int>(bytes) / iSampleSize;
	bool oldAttenuation = pas->bAttenuating;

	// do we have some mixed output?
	if (pao->mix(buffer, samples)) {
		// attenuate if instructed to or it's in settings
		pas->bAttenuating = (g.bAttenuateOthers || g.s.bAttenuateOthers);

	} else {
		memset(buffer, 0, bytes);

		// attenuate if intructed to (self-activated)
		pas->bAttenuating = g.bAttenuateOthers;
	}

	// if the attenuation state has changed
	if (oldAttenuation != pas->bAttenuating) {
		pas->setVolumes();
	}

	pa_stream_write(s, buffer, iSampleSize * samples, NULL, 0, PA_SEEK_RELATIVE);
}
Beispiel #11
0
void PulseAudioSystem::eventCallback(pa_mainloop_api *api, pa_defer_event *) {
	api->defer_enable(pade, false);

	if (! bSourceDone || ! bSinkDone || ! bServerDone)
		return;

	AudioInputPtr ai = g.ai;
	AudioOutputPtr ao = g.ao;
	AudioInput *raw_ai = ai.get();
	AudioOutput *raw_ao = ao.get();
	PulseAudioInput *pai = dynamic_cast<PulseAudioInput *>(raw_ai);
	PulseAudioOutput *pao = dynamic_cast<PulseAudioOutput *>(raw_ao);

	if (raw_ao) {
		QString odev = outputDevice();
		pa_stream_state ost = pasOutput ? pa_stream_get_state(pasOutput) : PA_STREAM_TERMINATED;
		bool do_stop = false;
		bool do_start = false;

		if (! pao && (ost == PA_STREAM_READY)) {
			do_stop = true;
		} else if (pao) {
			switch (ost) {
				case PA_STREAM_TERMINATED: {
						if (pasOutput)
							pa_stream_unref(pasOutput);

						pa_sample_spec pss = qhSpecMap.value(odev);
						pa_channel_map pcm = qhChanMap.value(odev);
						if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE))
							pss.format = PA_SAMPLE_FLOAT32NE;
						if (pss.rate == 0)
							pss.rate = SAMPLE_RATE;
						if ((pss.channels == 0) || (! g.s.doPositionalAudio()))
							pss.channels = 1;

						pasOutput = pa_stream_new(pacContext, mumble_sink_input, &pss, (pss.channels == 1) ? NULL : &pcm);
						pa_stream_set_state_callback(pasOutput, stream_callback, this);
						pa_stream_set_write_callback(pasOutput, write_callback, this);
					}
				case PA_STREAM_UNCONNECTED:
					do_start = true;
					break;
				case PA_STREAM_READY: {
						if (g.s.iOutputDelay != iDelayCache) {
							do_stop = true;
						} else if (g.s.doPositionalAudio() != bPositionalCache) {
							do_stop = true;
						} else if (odev != qsOutputCache) {
							do_stop = true;
						}
						break;
					}
				default:
					break;
			}
		}
		if (do_stop) {
			qWarning("PulseAudio: Stopping output");
			pa_stream_disconnect(pasOutput);
			iSinkId = -1;
		} else if (do_start) {
			qWarning("PulseAudio: Starting output: %s", qPrintable(odev));
			pa_buffer_attr buff;
			const pa_sample_spec *pss = pa_stream_get_sample_spec(pasOutput);
			const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short);
			const unsigned int iBlockLen = ((pao->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize);
			buff.tlength = iBlockLen * (g.s.iOutputDelay+1);
			buff.minreq = iBlockLen;
			buff.maxlength = -1;
			buff.prebuf = -1;
			buff.fragsize = iBlockLen;

			iDelayCache = g.s.iOutputDelay;
			bPositionalCache = g.s.doPositionalAudio();
			qsOutputCache = odev;

			pa_stream_connect_playback(pasOutput, qPrintable(odev), &buff, PA_STREAM_ADJUST_LATENCY, NULL, NULL);
			pa_context_get_sink_info_by_name(pacContext, qPrintable(odev), sink_info_callback, this);
		}
	}

	if (raw_ai) {
		QString idev = inputDevice();
		pa_stream_state ist = pasInput ? pa_stream_get_state(pasInput) : PA_STREAM_TERMINATED;
		bool do_stop = false;
		bool do_start = false;

		if (! pai && (ist == PA_STREAM_READY)) {
			do_stop = true;
		} else if (pai) {
			switch (ist) {
				case PA_STREAM_TERMINATED: {
						if (pasInput)
							pa_stream_unref(pasInput);

						pa_sample_spec pss = qhSpecMap.value(idev);
						if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE))
							pss.format = PA_SAMPLE_FLOAT32NE;
						if (pss.rate == 0)
							pss.rate = SAMPLE_RATE;
						pss.channels = 1;

						pasInput = pa_stream_new(pacContext, "Microphone", &pss, NULL);
						pa_stream_set_state_callback(pasInput, stream_callback, this);
						pa_stream_set_read_callback(pasInput, read_callback, this);
					}
				case PA_STREAM_UNCONNECTED:
					do_start = true;
					break;
				case PA_STREAM_READY: {
						if (idev != qsInputCache) {
							do_stop = true;
						}
						break;
					}
				default:
					break;
			}
		}
		if (do_stop) {
			qWarning("PulseAudio: Stopping input");
			pa_stream_disconnect(pasInput);
		} else if (do_start) {
			qWarning("PulseAudio: Starting input %s",qPrintable(idev));
			pa_buffer_attr buff;
			const pa_sample_spec *pss = pa_stream_get_sample_spec(pasInput);
			const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short);
			const unsigned int iBlockLen = ((pai->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize);
			buff.tlength = iBlockLen;
			buff.minreq = iBlockLen;
			buff.maxlength = -1;
			buff.prebuf = -1;
			buff.fragsize = iBlockLen;

			qsInputCache = idev;

			pa_stream_connect_record(pasInput, qPrintable(idev), &buff, PA_STREAM_ADJUST_LATENCY);
		}
	}

	if (raw_ai) {
		QString odev = outputDevice();
		QString edev = qhEchoMap.value(odev);
		pa_stream_state est = pasSpeaker ? pa_stream_get_state(pasSpeaker) : PA_STREAM_TERMINATED;
		bool do_stop = false;
		bool do_start = false;

		if ((! pai || ! g.s.doEcho()) && (est == PA_STREAM_READY)) {
			do_stop = true;
		} else if (pai && g.s.doEcho()) {
			switch (est) {
				case PA_STREAM_TERMINATED: {
						if (pasSpeaker)
							pa_stream_unref(pasSpeaker);

						pa_sample_spec pss = qhSpecMap.value(edev);
						pa_channel_map pcm = qhChanMap.value(edev);
						if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE))
							pss.format = PA_SAMPLE_FLOAT32NE;
						if (pss.rate == 0)
							pss.rate = SAMPLE_RATE;
						if ((pss.channels == 0) || (! g.s.bEchoMulti))
							pss.channels = 1;

						pasSpeaker = pa_stream_new(pacContext, mumble_echo, &pss, (pss.channels == 1) ? NULL : &pcm);
						pa_stream_set_state_callback(pasSpeaker, stream_callback, this);
						pa_stream_set_read_callback(pasSpeaker, read_callback, this);
					}
				case PA_STREAM_UNCONNECTED:
					do_start = true;
					break;
				case PA_STREAM_READY: {
						if (g.s.bEchoMulti != bEchoMultiCache) {
							do_stop = true;
						} else if (edev != qsEchoCache) {
							do_stop = true;
						}
						break;
					}
				default:
					break;
			}
		}
		if (do_stop) {
			qWarning("PulseAudio: Stopping echo");
			pa_stream_disconnect(pasSpeaker);
		} else if (do_start) {
			qWarning("PulseAudio: Starting echo: %s", qPrintable(edev));
			pa_buffer_attr buff;
			const pa_sample_spec *pss = pa_stream_get_sample_spec(pasSpeaker);
			const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short);
			const unsigned int iBlockLen = ((pai->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize);
			buff.tlength = iBlockLen;
			buff.minreq = iBlockLen;
			buff.maxlength = -1;
			buff.prebuf = -1;
			buff.fragsize = iBlockLen;

			bEchoMultiCache = g.s.bEchoMulti;
			qsEchoCache = edev;

			pa_stream_connect_record(pasSpeaker, qPrintable(edev), &buff, PA_STREAM_ADJUST_LATENCY);
		}
	}
}
Beispiel #12
0
void AudioInput::encodeAudioFrame() {
	int iArg;
	int i;
	float sum;
	short max;

	short *psSource;

	iFrameCounter++;

	if (! bRunning)
		return;

	sum=1.0f;
	max = 1;
	for (i=0;i<iFrameSize;i++) {
		sum += static_cast<float>(psMic[i] * psMic[i]);
		max = std::max(static_cast<short>(abs(psMic[i])), max);
	}
	dPeakMic = qMax(20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f), -96.0f);
	dMaxMic = max;

	if (psSpeaker && (iEchoChannels > 0)) {
		sum=1.0f;
		for (i=0;i<iFrameSize;i++)
			sum += static_cast<float>(psSpeaker[i] * psSpeaker[i]);
		dPeakSpeaker = qMax(20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f), -96.0f);
	} else {
		dPeakSpeaker = 0.0;
	}

	QMutexLocker l(&qmSpeex);
	resetAudioProcessor();

	speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_AGC_GAIN, &iArg);
	float gainValue = static_cast<float>(iArg);
	iArg = g.s.iNoiseSuppress - iArg;
	speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &iArg);

	if (sesEcho && psSpeaker) {
		speex_echo_cancellation(sesEcho, psMic, psSpeaker, psClean);
		speex_preprocess_run(sppPreprocess, psClean);
		psSource = psClean;
	} else {
		speex_preprocess_run(sppPreprocess, psMic);
		psSource = psMic;
	}

	sum=1.0f;
	for (i=0;i<iFrameSize;i++)
		sum += static_cast<float>(psSource[i] * psSource[i]);
	float micLevel = sqrtf(sum / static_cast<float>(iFrameSize));
	dPeakSignal = qMax(20.0f*log10f(micLevel / 32768.0f), -96.0f);

	spx_int32_t prob = 0;
	speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob);
	fSpeechProb = static_cast<float>(prob) / 100.0f;

	// clean microphone level: peak of filtered signal attenuated by AGC gain
	dPeakCleanMic = qMax(dPeakSignal - gainValue, -96.0f);
	float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakCleanMic / 96.0f);

	bool bIsSpeech = false;

	if (level > g.s.fVADmax)
		bIsSpeech = true;
	else if (level > g.s.fVADmin && bPreviousVoice)
		bIsSpeech = true;

	if (! bIsSpeech) {
		iHoldFrames++;
		if (iHoldFrames < g.s.iVoiceHold)
			bIsSpeech = true;
	} else {
		iHoldFrames = 0;
	}

	if (g.s.atTransmit == Settings::Continuous)
		bIsSpeech = true;
	else if (g.s.atTransmit == Settings::PushToTalk)
		bIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush));

	bIsSpeech = bIsSpeech || (g.iPushToTalk > 0);

	ClientUser *p = ClientUser::get(g.uiSession);
	if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && (p->bMute || p->bSuppress)) || g.bPushToMute || (g.iTarget < 0)) {
		bIsSpeech = false;
	}

	if (bIsSpeech) {
		iSilentFrames = 0;
	} else {
		iSilentFrames++;
		if (iSilentFrames > 500)
			iFrameCounter = 0;
	}

	if (p) {
		if (! bIsSpeech)
			p->setTalking(Settings::Passive);
		else if (g.iTarget == 0)
			p->setTalking(Settings::Talking);
		else
			p->setTalking(Settings::Shouting);
	}

	if (g.s.bTxAudioCue && g.uiSession != 0) {
		AudioOutputPtr ao = g.ao;
		if (bIsSpeech && ! bPreviousVoice && ao)
			ao->playSample(g.s.qsTxAudioCueOn);
		else if (ao && !bIsSpeech && bPreviousVoice)
			ao->playSample(g.s.qsTxAudioCueOff);
	}

	if (! bIsSpeech && ! bPreviousVoice) {
		iBitrate = 0;

		if (g.s.iaeIdleAction != Settings::Nothing && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) {

			if (g.s.iaeIdleAction == Settings::Deafen && !g.s.bDeaf) {
				tIdle.restart();
				emit doDeaf();
			} else if (g.s.iaeIdleAction == Settings::Mute && !g.s.bMute) {
				tIdle.restart();
				emit doMute();
			}
		}

		spx_int32_t increment = 0;
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
		return;
	} else {
		spx_int32_t increment = 12;
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
	}

	if (bIsSpeech && !bPreviousVoice) {
		bResetEncoder = true;
	}

	tIdle.restart();

	EncodingOutputBuffer buffer;
	Q_ASSERT(buffer.size() >= static_cast<size_t>(iAudioQuality / 100 * iAudioFrames / 8));
	
	int len = 0;

	bool encoded = true;
	if (!selectCodec())
		return;

	if (umtType == MessageHandler::UDPVoiceCELTAlpha || umtType == MessageHandler::UDPVoiceCELTBeta) {
		len = encodeCELTFrame(psSource, buffer);
		if (len <= 0) {
			iBitrate = 0;
			qWarning() << "encodeCELTFrame failed" << iBufferedFrames << iFrameSize << len;
			return;
		}
		++iBufferedFrames;
	} else if (umtType == MessageHandler::UDPVoiceOpus) {
		encoded = false;
		opusBuffer.insert(opusBuffer.end(), psSource, psSource + iFrameSize);
		++iBufferedFrames;

		if (!bIsSpeech || iBufferedFrames >= iAudioFrames) {
			if (iBufferedFrames < iAudioFrames) {
				// Stuff frame to framesize if speech ends and we don't have enough audio
				// this way we are guaranteed to have a valid framecount and won't cause
				// a codec configuration switch by suddenly using a wildly different
				// framecount per packet.
				const int missingFrames = iAudioFrames - iBufferedFrames;
				opusBuffer.insert(opusBuffer.end(), iFrameSize * missingFrames, 0);
				iBufferedFrames += missingFrames;
				iFrameCounter += missingFrames;
			}
			
			Q_ASSERT(iBufferedFrames == iAudioFrames);

			len = encodeOpusFrame(&opusBuffer[0], iBufferedFrames * iFrameSize, buffer);
			opusBuffer.clear();
			if (len <= 0) {
				iBitrate = 0;
				qWarning() << "encodeOpusFrame failed" << iBufferedFrames << iFrameSize << len;
				iBufferedFrames = 0; // These are lost. Make sure not to mess up our sequence counter next flushCheck.
				return;
			}
			encoded = true;
		}
	}

	if (encoded) {
		flushCheck(QByteArray(reinterpret_cast<char *>(&buffer[0]), len), !bIsSpeech);
	}

	if (! bIsSpeech)
		iBitrate = 0;

	bPreviousVoice = bIsSpeech;
}
Beispiel #13
0
void AudioInput::encodeAudioFrame() {
	int iArg;
	ClientPlayer *p=ClientPlayer::get(g.uiSession);
	int i;
	float sum;
	short max;

	short *psSource;

	iFrameCounter++;

	if (! bRunning) {
		return;
	}

	sum=1.0f;
	for (i=0;i<iFrameSize;i++)
		sum += static_cast<float>(psMic[i] * psMic[i]);
	dPeakMic=20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f);
	if (dPeakMic < -96.0f)
		dPeakMic = -96.0f;

	max = 1;
	for (i=0;i<iFrameSize;i++)
		max = static_cast<short>(abs(psMic[i]) > max ? abs(psMic[i]) : max);
	dMaxMic = max;

	if (g.bEchoTest) {
		STACKVAR(float, fft, iFrameSize);
		STACKVAR(float, power, iFrameSize);
		float scale = 1.f / static_cast<float>(iFrameSize);
		for (i=0;i<iFrameSize;i++)
			fft[i] = static_cast<float>(psMic[i]) * scale;
		mumble_drft_forward(&fftTable, fft);
		float mp = 0.0f;
		int bin = 0;
		power[0]=power[1]=0.0f;
		for (i=2;i < iFrameSize / 2;i++) {
			power[i] = sqrtf(fft[2*i]*fft[2*i]+fft[2*i-1]*fft[2*i-1]);
			if (power[i] > mp) {
				bin = i;
				mp = power[i];
			}
		}
		for (i=2;i< iFrameSize / 2;i++) {
			if (power[i] * 2 > mp) {
				if (i != bin)
					bin = 0;
			}
		}
		iBestBin = bin * 2;
	}

	if (iEchoChannels > 0) {
		sum=1.0f;
		for (i=0;i<iFrameSize;i++)
			sum += static_cast<float>(psSpeaker[i] * psSpeaker[i]);
		dPeakSpeaker=20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f);
		if (dPeakSpeaker < -96.0f)
			dPeakSpeaker = -96.0f;
	} else {
		dPeakSpeaker = 0.0;
	}

	QMutexLocker l(&qmSpeex);

	if (bResetProcessor) {
		if (sppPreprocess)
			speex_preprocess_state_destroy(sppPreprocess);
		if (sesEcho)
			speex_echo_state_destroy(sesEcho);

		sppPreprocess = speex_preprocess_state_init(iFrameSize, SAMPLE_RATE);

		iArg = 1;
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_VAD, &iArg);
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_DENOISE, &iArg);
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC, &iArg);
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_DEREVERB, &iArg);

		iArg = 30000;
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_TARGET, &iArg);

		float v = 30000.0f / static_cast<float>(g.s.iMinLoudness);
		iArg = lroundf(floorf(20.0f * log10f(v)));
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_MAX_GAIN, &iArg);

		iArg = g.s.iNoiseSuppress;
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &iArg);

		if (iEchoChannels > 0) {
			sesEcho = speex_echo_state_init(iFrameSize, iFrameSize*10);
			iArg = SAMPLE_RATE;
			speex_echo_ctl(sesEcho, SPEEX_SET_SAMPLING_RATE, &iArg);
			speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_ECHO_STATE, sesEcho);

			jitter_buffer_reset(jb);
			qWarning("AudioInput: ECHO CANCELLER ACTIVE");
		} else {
			sesEcho = NULL;
		}

		iFrames = 0;
		speex_bits_reset(&sbBits);

		bResetProcessor = false;
	}

	int iIsSpeech;

	if (sesEcho) {
		speex_echo_cancellation(sesEcho, psMic, psSpeaker, psClean);
		iIsSpeech=speex_preprocess_run(sppPreprocess, psClean);
		psSource = psClean;
	} else {
		iIsSpeech=speex_preprocess_run(sppPreprocess, psMic);
		psSource = psMic;
	}

	sum=1.0f;
	for (i=0;i<iFrameSize;i++)
		sum += static_cast<float>(psSource[i] * psSource[i]);
	float micLevel = sqrtf(sum / static_cast<float>(iFrameSize));
	dPeakSignal=20.0f*log10f(micLevel / 32768.0f);
	if (dPeakSignal < -96.0f)
		dPeakSignal = -96.0f;

	spx_int32_t prob = 0;
	speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob);
	fSpeechProb = static_cast<float>(prob) / 100.0f;

	float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakMic / 96.0f);

	if (level > g.s.fVADmax)
		iIsSpeech = 1;
	else if (level > g.s.fVADmin && bPreviousVoice)
		iIsSpeech = 1;
	else
		iIsSpeech = 0;

	if (! iIsSpeech) {
		iHoldFrames++;
		if (iHoldFrames < g.s.iVoiceHold)
			iIsSpeech=1;
	} else {
		iHoldFrames = 0;
	}

	if (g.s.atTransmit == Settings::Continous)
		iIsSpeech = 1;
	else if (g.s.atTransmit == Settings::PushToTalk)
		iIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush));

	iIsSpeech = iIsSpeech || (g.iPushToTalk > 0) || (g.iAltSpeak > 0);

	if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && p->bMute) || g.bPushToMute) {
		iIsSpeech = 0;
	}

	if (iIsSpeech) {
		iSilentFrames = 0;
	} else {
		iSilentFrames++;
		if (iSilentFrames > 200)
			iFrameCounter = 0;
	}

	if (p)
		p->setTalking(iIsSpeech, (g.iAltSpeak > 0));

	if (g.s.bPushClick && (g.s.atTransmit == Settings::PushToTalk)) {
		AudioOutputPtr ao = g.ao;
		if (iIsSpeech && ! bPreviousVoice && ao)
			ao->playSine(400.0f,1200.0f,5);
		else if (ao && !iIsSpeech && bPreviousVoice && ao)
			ao->playSine(620.0f,-1200.0f,5);
	}
	if (! iIsSpeech && ! bPreviousVoice) {
		iBitrate = 0;
		if (g.s.iIdleTime && ! g.s.bMute && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) {
			emit doMute();
			tIdle.restart();
		}
		return;
	}

	bPreviousVoice = iIsSpeech;

	tIdle.restart();

	if (! iIsSpeech) {
		memset(psMic, 0, sizeof(short) * iFrameSize);
	}

	if (g.s.bTransmitPosition && g.p && ! g.bCenterPosition && (iFrames == 0) && g.p->fetch()) {
		QByteArray q;
		QDataStream ds(&q, QIODevice::WriteOnly);
		ds << g.p->fPosition[0];
		ds << g.p->fPosition[1];
		ds << g.p->fPosition[2];

		speex_bits_pack(&sbBits, 13, 5);
		speex_bits_pack(&sbBits, q.size(), 4);

		const unsigned char *d=reinterpret_cast<const unsigned char*>(q.data());
		for (i=0;i<q.size();i++) {
			speex_bits_pack(&sbBits, d[i], 8);
		}
	}

	speex_encode_int(esEncState, psSource, &sbBits);
	iFrames++;

	speex_encoder_ctl(esEncState, SPEEX_GET_BITRATE, &iBitrate);

	flushCheck();
}
Beispiel #14
0
void AudioInput::encodeAudioFrame() {
	int iArg;
	ClientUser *p=ClientUser::get(g.uiSession);
	int i;
	float sum;
	short max;

	short *psSource;

	iFrameCounter++;

	if (! bRunning)
		return;

	sum=1.0f;
	for (i=0;i<iFrameSize;i++)
		sum += static_cast<float>(psMic[i] * psMic[i]);
	dPeakMic = qMax(20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f), -96.0f);

	max = 1;
	for (i=0;i<iFrameSize;i++)
		max = static_cast<short>(abs(psMic[i]) > max ? abs(psMic[i]) : max);
	dMaxMic = max;

	if (psSpeaker && (iEchoChannels > 0)) {
		sum=1.0f;
		for (i=0;i<iFrameSize;i++)
			sum += static_cast<float>(psSpeaker[i] * psSpeaker[i]);
		dPeakSpeaker = qMax(20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f), -96.0f);
	} else {
		dPeakSpeaker = 0.0;
	}

	QMutexLocker l(&qmSpeex);
	resetAudioProcessor();

	speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_AGC_GAIN, &iArg);
	float gainValue = static_cast<float>(iArg);
	iArg = g.s.iNoiseSuppress - iArg;
	speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &iArg);

	if (sesEcho && psSpeaker) {
		speex_echo_cancellation(sesEcho, psMic, psSpeaker, psClean);
		speex_preprocess_run(sppPreprocess, psClean);
		psSource = psClean;
	} else {
		speex_preprocess_run(sppPreprocess, psMic);
		psSource = psMic;
	}

	sum=1.0f;
	for (i=0;i<iFrameSize;i++)
		sum += static_cast<float>(psSource[i] * psSource[i]);
	float micLevel = sqrtf(sum / static_cast<float>(iFrameSize));
	dPeakSignal = qMax(20.0f*log10f(micLevel / 32768.0f), -96.0f);

	spx_int32_t prob = 0;
	speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob);
	fSpeechProb = static_cast<float>(prob) / 100.0f;

	// clean microphone level: peak of filtered signal attenuated by AGC gain
	dPeakCleanMic = qMax(dPeakSignal - gainValue, -96.0f);
	float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakCleanMic / 96.0f);

	bool bIsSpeech = false;

	if (level > g.s.fVADmax)
		bIsSpeech = true;
	else if (level > g.s.fVADmin && bPreviousVoice)
		bIsSpeech = true;

	if (! bIsSpeech) {
		iHoldFrames++;
		if (iHoldFrames < g.s.iVoiceHold)
			bIsSpeech = true;
	} else {
		iHoldFrames = 0;
	}

	if (g.s.atTransmit == Settings::Continous)
		bIsSpeech = true;
	else if (g.s.atTransmit == Settings::PushToTalk)
		bIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush));

	bIsSpeech = bIsSpeech || (g.iPushToTalk > 0);

	if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && (p->bMute || p->bSuppress)) || g.bPushToMute || (g.iTarget < 0)) {
		bIsSpeech = false;
	}

	if (bIsSpeech) {
		iSilentFrames = 0;
	} else {
		iSilentFrames++;
		if (iSilentFrames > 500)
			iFrameCounter = 0;
	}

	if (p) {
		if (! bIsSpeech)
			p->setTalking(Settings::Passive);
		else if (g.iTarget == 0)
			p->setTalking(Settings::Talking);
		else
			p->setTalking(Settings::Shouting);
	}

	if (g.s.bTxAudioCue && g.uiSession != 0) {
		AudioOutputPtr ao = g.ao;
		if (bIsSpeech && ! bPreviousVoice && ao)
			ao->playSample(g.s.qsTxAudioCueOn);
		else if (ao && !bIsSpeech && bPreviousVoice && ao)
			ao->playSample(g.s.qsTxAudioCueOff);
	}

	if (! bIsSpeech && ! bPreviousVoice) {
		iBitrate = 0;
		if (g.s.iIdleTime && ! g.s.bDeaf && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) {
			emit doDeaf();
			tIdle.restart();
		}
		spx_int32_t increment = 0;
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
		return;
	} else {
		spx_int32_t increment = 12;
		speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment);
	}

	tIdle.restart();
	/*
		int r = celt_encoder_ctl(ceEncoder, CELT_SET_POST_MDCT_CALLBACK(celtBack, NULL));
		qWarning() << "Set Callback" << r;
	*/

	unsigned char buffer[512];
	int len;

	if (umtType != MessageHandler::UDPVoiceSpeex) {
		len = encodeCELTFrame(psSource, buffer);
		if (len == 0)
			return;
	} else {
		len = encodeSpeexFrame(psSource, buffer);
	}

	flushCheck(QByteArray(reinterpret_cast<const char *>(buffer), len), ! bIsSpeech);

	if (! bIsSpeech)
		iBitrate = 0;

	bPreviousVoice = bIsSpeech;
}
Beispiel #15
-1
int JackAudioSystem::process_callback(jack_nframes_t nframes, void *arg) {
	JackAudioSystem * const jas = static_cast<JackAudioSystem*>(arg);

	if (jas && jas->bJackIsGood) {
		AudioInputPtr ai = g.ai;
		AudioOutputPtr ao = g.ao;
		JackAudioInput * const jai = dynamic_cast<JackAudioInput *>(ai.get());
		JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get());

		if (jai && jai->isRunning() && jai->iMicChannels > 0 && !jai->isFinished()) {
			QMutexLocker(&jai->qmMutex);
			void *input = jack_port_get_buffer(jas->in_port, nframes);
			if (input != NULL) {
				jai->addMic(input, nframes);
			}
		}

		if (jao && jao->isRunning() && jao->iChannels > 0 && !jao->isFinished()) {
			QMutexLocker(&jao->qmMutex);

			jack_default_audio_sample_t *port_buffers[JACK_MAX_OUTPUT_PORTS];
			for (unsigned int i = 0; i < jao->iChannels; ++i) {

				port_buffers[i] = (jack_default_audio_sample_t*)jack_port_get_buffer(jas->out_ports[i], nframes);
				if (port_buffers[i] == NULL) {
					return 1;
				}
			}

			jack_default_audio_sample_t * const buffer = jas->output_buffer;
			memset(buffer, 0, sizeof(jack_default_audio_sample_t) * nframes * jao->iChannels);

			jao->mix(buffer, nframes);

			if (jao->iChannels == 1) {

				memcpy(port_buffers[0], buffer, sizeof(jack_default_audio_sample_t) * nframes);
			} else {

				// de-interleave channels
				for (unsigned int i = 0; i < nframes * jao->iChannels; ++i) {
					port_buffers[i % jao->iChannels][i / jao->iChannels] = buffer[i];
				}
			}
		}
	}

	return 0;
}