void AudioInput::encodeAudioFrame() { int iArg; //ClientUser *p=ClientUser::get(g.uiSession); int i; float sum; short max; short *psSource; iFrameCounter++; if (! bRunning) { return; } /*sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psMic[i] * psMic[i]); iLevel = sqrtf(sum / static_cast<float>(iFrameSize)) * 9/32768.0f; dPeakMic=20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f); if (dPeakMic < -96.0f) dPeakMic = -96.0f; max = 1; for (i=0;i<iFrameSize;i++) max = static_cast<short>(abs(psMic[i]) > max ? abs(psMic[i]) : max); dMaxMic = max; if (psSpeaker && (iEchoChannels > 0)) { sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSpeaker[i] * psSpeaker[i]); dPeakSpeaker=20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f); if (dPeakSpeaker < -96.0f) dPeakSpeaker = -96.0f; } else { dPeakSpeaker = 0.0; }*/ MutexLocker l(&qmSpeex); bResetProcessor = false; if (bResetProcessor) { if (sppPreprocess) speex_preprocess_state_destroy(sppPreprocess); if (sesEcho) speex_echo_state_destroy(sesEcho); sppPreprocess = speex_preprocess_state_init(iFrameSize, iSampleRate); iArg = 1; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_VAD, &iArg); //speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_DENOISE, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_DEREVERB, &iArg); iArg = 30000; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_TARGET, &iArg); float v = 30000.0f / static_cast<float>(g_struct.s.iMinLoudness); iArg = (floorf(20.0f * log10f(v))); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_MAX_GAIN, &iArg); iArg = g_struct.s.iNoiseSuppress; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &iArg); if (iEchoChannels > 0) { sesEcho = speex_echo_state_init_mc(iFrameSize, iFrameSize*10, 1, bEchoMulti ? iEchoChannels : 1); iArg = iSampleRate; speex_echo_ctl(sesEcho, SPEEX_ECHO_SET_SAMPLING_RATE, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_ECHO_STATE, sesEcho); Trace("AudioInput: ECHO CANCELLER ACTIVE"); } else { sesEcho = NULL; } bResetProcessor = false; } int iIsSpeech=1; psSource = psMic; /* //回音消除和音质处理 if (bEcho && sesEcho && psSpeaker) { speex_echo_cancellation(sesEcho, psMic, psSpeaker, psClean); iIsSpeech=speex_preprocess_run(sppPreprocess, psClean); psSource = psClean; } else { iIsSpeech=speex_preprocess_run(sppPreprocess, psMic); psSource = psMic; }*/ /*sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSource[i] * psSource[i]); float micLevel = sqrtf(sum / static_cast<float>(iFrameSize)); dPeakSignal=20.0f*log10f(micLevel / 32768.0f); if (dPeakSignal < -96.0f) dPeakSignal = -96.0f; spx_int32_t prob = 0; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob); fSpeechProb = static_cast<float>(prob) / 100.0f; float level = (g_struct.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakMic / 96.0f); if (level > g_struct.s.fVADmax) iIsSpeech = 1; else if (level > g_struct.s.fVADmin && bPreviousVoice) iIsSpeech = 1; else iIsSpeech = 0; if (! iIsSpeech) { iHoldFrames++; if (iHoldFrames < g_struct.s.iVoiceHold) iIsSpeech=1; } else { iHoldFrames = 0; }*/ //tIdle.restart(); /* int r = celt_encoder_ctl(ceEncoder, CELT_SET_POST_MDCT_CALLBACK(celtBack, NULL)); qWarning() << "Set Callback" << r; */ //编码 speex或者CELT unsigned char buffer[512]; int len; if (umtType != MessageHandler::UDPVoiceSpeex) { if (cCodec == NULL) { cCodec = new CELTCodec; umtType = MessageHandler::UDPVoiceCELT; ceEncoder = cCodec->encoderCreate(); } else if (cCodec && ! bPreviousVoice) { cCodec->encoder_ctl(ceEncoder, CELT_RESET_STATE); } cCodec->encoder_ctl(ceEncoder, CELT_SET_PREDICTION(0)); cCodec->encoder_ctl(ceEncoder,CELT_SET_BITRATE(iAudioQuality)); len = cCodec->encode(ceEncoder, psSource, SAMPLE_RATE / 100, buffer, 512); iBitrate = len * 100 * 8; } else { int vbr = 0; speex_encoder_ctl(esSpeex, SPEEX_GET_VBR_MAX_BITRATE, &vbr); if (vbr != iAudioQuality) { vbr = iAudioQuality; speex_encoder_ctl(esSpeex, SPEEX_SET_VBR_MAX_BITRATE, &vbr); } if (! bPreviousVoice) speex_encoder_ctl(esSpeex, SPEEX_RESET_STATE, NULL); speex_encode_int(esSpeex, psSource, &sbBits); len = speex_bits_write(&sbBits, reinterpret_cast<char *>(buffer), 127); iBitrate = len * 50 * 8; speex_bits_reset(&sbBits); } QByteArray qba; for(int i=0; i<len; i++) { qba.push_back(buffer[i]); } flushCheck(qba, false); if (! iIsSpeech) iBitrate = 0; bPreviousVoice = iIsSpeech; }
int Calculator::qualityOfCards(Card* hand1, Card* hand2, Card* flop0, Card* flop1, Card* flop2, Card* turn, Card* river) { cards[0] = hand1; cards[1] = hand2; cards[2] = flop0; cards[3] = flop1; cards[4] = flop2; cards[5] = turn; cards[6] = river; /* HIGHCARD, PAIR, TWOPAIR, TRIPS, STRAIGHT, FLUSH, FULLHOUSE, QUADS, STRAIGHTFLUSH, ROYALFLUSH */ bubbleSortByValue(); int flushCheckValue = flushCheck(); if (flushCheckValue >= 0 && straightCheck(flushCheckValue) >= 0) { return STRAIGHTFLUSH; } keyCards.Empty(); if (quadsCheck() >= 0) { fillKeyCards(); return QUADS; } keyCards.Empty(); int tripsCheckValue = tripsCheck(-1); if ((tripsCheckValue >= 0) && pairCheck(tripsCheckValue) != -1) { fillKeyCards(); return FULLHOUSE; } keyCards.Empty(); if (flushCheck() >= 0) { return FLUSH; } keyCards.Empty(); if (straightCheck(-1) >= 0) { return STRAIGHT; } keyCards.Empty(); if (tripsCheck(-1) >= 0) { fillKeyCards(); return TRIPS; } keyCards.Empty(); if (pairCheck(pairCheck(-1)) >= 0) { fillKeyCards(); return TWOPAIR; } keyCards.Empty(); if (pairCheck(-1) >= 0) { fillKeyCards(); return PAIR; } keyCards.Empty(); //if (returnValue == HIGHCARD) fillKeyCards(); return HIGHCARD; }
void AudioInput::encodeAudioFrame() { int iArg; int i; float sum; short max; short *psSource; iFrameCounter++; if (! bRunning) return; sum=1.0f; max = 1; for (i=0;i<iFrameSize;i++) { sum += static_cast<float>(psMic[i] * psMic[i]); max = std::max(static_cast<short>(abs(psMic[i])), max); } dPeakMic = qMax(20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f), -96.0f); dMaxMic = max; if (psSpeaker && (iEchoChannels > 0)) { sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSpeaker[i] * psSpeaker[i]); dPeakSpeaker = qMax(20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f), -96.0f); } else { dPeakSpeaker = 0.0; } QMutexLocker l(&qmSpeex); resetAudioProcessor(); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_AGC_GAIN, &iArg); float gainValue = static_cast<float>(iArg); iArg = g.s.iNoiseSuppress - iArg; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &iArg); if (sesEcho && psSpeaker) { speex_echo_cancellation(sesEcho, psMic, psSpeaker, psClean); speex_preprocess_run(sppPreprocess, psClean); psSource = psClean; } else { speex_preprocess_run(sppPreprocess, psMic); psSource = psMic; } sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSource[i] * psSource[i]); float micLevel = sqrtf(sum / static_cast<float>(iFrameSize)); dPeakSignal = qMax(20.0f*log10f(micLevel / 32768.0f), -96.0f); spx_int32_t prob = 0; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob); fSpeechProb = static_cast<float>(prob) / 100.0f; // clean microphone level: peak of filtered signal attenuated by AGC gain dPeakCleanMic = qMax(dPeakSignal - gainValue, -96.0f); float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakCleanMic / 96.0f); bool bIsSpeech = false; if (level > g.s.fVADmax) bIsSpeech = true; else if (level > g.s.fVADmin && bPreviousVoice) bIsSpeech = true; if (! bIsSpeech) { iHoldFrames++; if (iHoldFrames < g.s.iVoiceHold) bIsSpeech = true; } else { iHoldFrames = 0; } if (g.s.atTransmit == Settings::Continuous) bIsSpeech = true; else if (g.s.atTransmit == Settings::PushToTalk) bIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush)); bIsSpeech = bIsSpeech || (g.iPushToTalk > 0); ClientUser *p = ClientUser::get(g.uiSession); if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && (p->bMute || p->bSuppress)) || g.bPushToMute || (g.iTarget < 0)) { bIsSpeech = false; } if (bIsSpeech) { iSilentFrames = 0; } else { iSilentFrames++; if (iSilentFrames > 500) iFrameCounter = 0; } if (p) { if (! bIsSpeech) p->setTalking(Settings::Passive); else if (g.iTarget == 0) p->setTalking(Settings::Talking); else p->setTalking(Settings::Shouting); } if (g.s.bTxAudioCue && g.uiSession != 0) { AudioOutputPtr ao = g.ao; if (bIsSpeech && ! bPreviousVoice && ao) ao->playSample(g.s.qsTxAudioCueOn); else if (ao && !bIsSpeech && bPreviousVoice) ao->playSample(g.s.qsTxAudioCueOff); } if (! bIsSpeech && ! bPreviousVoice) { iBitrate = 0; if (g.s.iaeIdleAction != Settings::Nothing && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) { if (g.s.iaeIdleAction == Settings::Deafen && !g.s.bDeaf) { tIdle.restart(); emit doDeaf(); } else if (g.s.iaeIdleAction == Settings::Mute && !g.s.bMute) { tIdle.restart(); emit doMute(); } } spx_int32_t increment = 0; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment); return; } else { spx_int32_t increment = 12; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment); } if (bIsSpeech && !bPreviousVoice) { bResetEncoder = true; } tIdle.restart(); EncodingOutputBuffer buffer; Q_ASSERT(buffer.size() >= static_cast<size_t>(iAudioQuality / 100 * iAudioFrames / 8)); int len = 0; bool encoded = true; if (!selectCodec()) return; if (umtType == MessageHandler::UDPVoiceCELTAlpha || umtType == MessageHandler::UDPVoiceCELTBeta) { len = encodeCELTFrame(psSource, buffer); if (len <= 0) { iBitrate = 0; qWarning() << "encodeCELTFrame failed" << iBufferedFrames << iFrameSize << len; return; } ++iBufferedFrames; } else if (umtType == MessageHandler::UDPVoiceOpus) { encoded = false; opusBuffer.insert(opusBuffer.end(), psSource, psSource + iFrameSize); ++iBufferedFrames; if (!bIsSpeech || iBufferedFrames >= iAudioFrames) { if (iBufferedFrames < iAudioFrames) { // Stuff frame to framesize if speech ends and we don't have enough audio // this way we are guaranteed to have a valid framecount and won't cause // a codec configuration switch by suddenly using a wildly different // framecount per packet. const int missingFrames = iAudioFrames - iBufferedFrames; opusBuffer.insert(opusBuffer.end(), iFrameSize * missingFrames, 0); iBufferedFrames += missingFrames; iFrameCounter += missingFrames; } Q_ASSERT(iBufferedFrames == iAudioFrames); len = encodeOpusFrame(&opusBuffer[0], iBufferedFrames * iFrameSize, buffer); opusBuffer.clear(); if (len <= 0) { iBitrate = 0; qWarning() << "encodeOpusFrame failed" << iBufferedFrames << iFrameSize << len; iBufferedFrames = 0; // These are lost. Make sure not to mess up our sequence counter next flushCheck. return; } encoded = true; } } if (encoded) { flushCheck(QByteArray(reinterpret_cast<char *>(&buffer[0]), len), !bIsSpeech); } if (! bIsSpeech) iBitrate = 0; bPreviousVoice = bIsSpeech; }
void AudioInput::encodeAudioFrame() { int iArg; ClientPlayer *p=ClientPlayer::get(g.uiSession); int i; float sum; short max; short *psSource; iFrameCounter++; if (! bRunning) { return; } sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psMic[i] * psMic[i]); dPeakMic=20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f); if (dPeakMic < -96.0f) dPeakMic = -96.0f; max = 1; for (i=0;i<iFrameSize;i++) max = static_cast<short>(abs(psMic[i]) > max ? abs(psMic[i]) : max); dMaxMic = max; if (g.bEchoTest) { STACKVAR(float, fft, iFrameSize); STACKVAR(float, power, iFrameSize); float scale = 1.f / static_cast<float>(iFrameSize); for (i=0;i<iFrameSize;i++) fft[i] = static_cast<float>(psMic[i]) * scale; mumble_drft_forward(&fftTable, fft); float mp = 0.0f; int bin = 0; power[0]=power[1]=0.0f; for (i=2;i < iFrameSize / 2;i++) { power[i] = sqrtf(fft[2*i]*fft[2*i]+fft[2*i-1]*fft[2*i-1]); if (power[i] > mp) { bin = i; mp = power[i]; } } for (i=2;i< iFrameSize / 2;i++) { if (power[i] * 2 > mp) { if (i != bin) bin = 0; } } iBestBin = bin * 2; } if (iEchoChannels > 0) { sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSpeaker[i] * psSpeaker[i]); dPeakSpeaker=20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f); if (dPeakSpeaker < -96.0f) dPeakSpeaker = -96.0f; } else { dPeakSpeaker = 0.0; } QMutexLocker l(&qmSpeex); if (bResetProcessor) { if (sppPreprocess) speex_preprocess_state_destroy(sppPreprocess); if (sesEcho) speex_echo_state_destroy(sesEcho); sppPreprocess = speex_preprocess_state_init(iFrameSize, SAMPLE_RATE); iArg = 1; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_VAD, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_DENOISE, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_DEREVERB, &iArg); iArg = 30000; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_TARGET, &iArg); float v = 30000.0f / static_cast<float>(g.s.iMinLoudness); iArg = lroundf(floorf(20.0f * log10f(v))); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_MAX_GAIN, &iArg); iArg = g.s.iNoiseSuppress; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &iArg); if (iEchoChannels > 0) { sesEcho = speex_echo_state_init(iFrameSize, iFrameSize*10); iArg = SAMPLE_RATE; speex_echo_ctl(sesEcho, SPEEX_SET_SAMPLING_RATE, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_ECHO_STATE, sesEcho); jitter_buffer_reset(jb); qWarning("AudioInput: ECHO CANCELLER ACTIVE"); } else { sesEcho = NULL; } iFrames = 0; speex_bits_reset(&sbBits); bResetProcessor = false; } int iIsSpeech; if (sesEcho) { speex_echo_cancellation(sesEcho, psMic, psSpeaker, psClean); iIsSpeech=speex_preprocess_run(sppPreprocess, psClean); psSource = psClean; } else { iIsSpeech=speex_preprocess_run(sppPreprocess, psMic); psSource = psMic; } sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSource[i] * psSource[i]); float micLevel = sqrtf(sum / static_cast<float>(iFrameSize)); dPeakSignal=20.0f*log10f(micLevel / 32768.0f); if (dPeakSignal < -96.0f) dPeakSignal = -96.0f; spx_int32_t prob = 0; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob); fSpeechProb = static_cast<float>(prob) / 100.0f; float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakMic / 96.0f); if (level > g.s.fVADmax) iIsSpeech = 1; else if (level > g.s.fVADmin && bPreviousVoice) iIsSpeech = 1; else iIsSpeech = 0; if (! iIsSpeech) { iHoldFrames++; if (iHoldFrames < g.s.iVoiceHold) iIsSpeech=1; } else { iHoldFrames = 0; } if (g.s.atTransmit == Settings::Continous) iIsSpeech = 1; else if (g.s.atTransmit == Settings::PushToTalk) iIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush)); iIsSpeech = iIsSpeech || (g.iPushToTalk > 0) || (g.iAltSpeak > 0); if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && p->bMute) || g.bPushToMute) { iIsSpeech = 0; } if (iIsSpeech) { iSilentFrames = 0; } else { iSilentFrames++; if (iSilentFrames > 200) iFrameCounter = 0; } if (p) p->setTalking(iIsSpeech, (g.iAltSpeak > 0)); if (g.s.bPushClick && (g.s.atTransmit == Settings::PushToTalk)) { AudioOutputPtr ao = g.ao; if (iIsSpeech && ! bPreviousVoice && ao) ao->playSine(400.0f,1200.0f,5); else if (ao && !iIsSpeech && bPreviousVoice && ao) ao->playSine(620.0f,-1200.0f,5); } if (! iIsSpeech && ! bPreviousVoice) { iBitrate = 0; if (g.s.iIdleTime && ! g.s.bMute && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) { emit doMute(); tIdle.restart(); } return; } bPreviousVoice = iIsSpeech; tIdle.restart(); if (! iIsSpeech) { memset(psMic, 0, sizeof(short) * iFrameSize); } if (g.s.bTransmitPosition && g.p && ! g.bCenterPosition && (iFrames == 0) && g.p->fetch()) { QByteArray q; QDataStream ds(&q, QIODevice::WriteOnly); ds << g.p->fPosition[0]; ds << g.p->fPosition[1]; ds << g.p->fPosition[2]; speex_bits_pack(&sbBits, 13, 5); speex_bits_pack(&sbBits, q.size(), 4); const unsigned char *d=reinterpret_cast<const unsigned char*>(q.data()); for (i=0;i<q.size();i++) { speex_bits_pack(&sbBits, d[i], 8); } } speex_encode_int(esEncState, psSource, &sbBits); iFrames++; speex_encoder_ctl(esEncState, SPEEX_GET_BITRATE, &iBitrate); flushCheck(); }
void AudioInput::encodeAudioFrame() { int iArg; //ClientUser *p=ClientUser::get(g.uiSession); int i; float sum; short max; short *psSource; iFrameCounter++; if (! bRunning) { return; } MutexLocker l(&qmSpeex); bResetProcessor = false; if (bResetProcessor) { if (sppPreprocess) speex_preprocess_state_destroy(sppPreprocess); if (sesEcho) speex_echo_state_destroy(sesEcho); sppPreprocess = speex_preprocess_state_init(iFrameSize, iSampleRate); iArg = 1; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_VAD, &iArg); //speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_DENOISE, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_DEREVERB, &iArg); iArg = 30000; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_TARGET, &iArg); float v = 30000.0f / static_cast<float>(g_struct.s.iMinLoudness); iArg = (floorf(20.0f * log10f(v))); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_MAX_GAIN, &iArg); iArg = g_struct.s.iNoiseSuppress; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &iArg); if (iEchoChannels > 0) { sesEcho = speex_echo_state_init_mc(iFrameSize, iFrameSize*10, 1, bEchoMulti ? iEchoChannels : 1); iArg = iSampleRate; speex_echo_ctl(sesEcho, SPEEX_ECHO_SET_SAMPLING_RATE, &iArg); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_ECHO_STATE, sesEcho); Trace("AudioInput: ECHO CANCELLER ACTIVE"); } else { sesEcho = NULL; } bResetProcessor = false; } int iIsSpeech=1; psSource = psMic; /* //回音消除和音质处理 if (bEcho && sesEcho && psSpeaker) { speex_echo_cancellation(sesEcho, psMic, psSpeaker, psClean); iIsSpeech=speex_preprocess_run(sppPreprocess, psClean); psSource = psClean; } else { iIsSpeech=speex_preprocess_run(sppPreprocess, psMic); psSource = psMic; }*/ /*sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSource[i] * psSource[i]); float micLevel = sqrtf(sum / static_cast<float>(iFrameSize)); dPeakSignal=20.0f*log10f(micLevel / 32768.0f); if (dPeakSignal < -96.0f) dPeakSignal = -96.0f; spx_int32_t prob = 0; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob); fSpeechProb = static_cast<float>(prob) / 100.0f; float level = (g_struct.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakMic / 96.0f); if (level > g_struct.s.fVADmax) iIsSpeech = 1; else if (level > g_struct.s.fVADmin && bPreviousVoice) iIsSpeech = 1; else iIsSpeech = 0; if (! iIsSpeech) { iHoldFrames++; if (iHoldFrames < g_struct.s.iVoiceHold) iIsSpeech=1; } else { iHoldFrames = 0; }*/ //tIdle.restart(); /* int r = celt_encoder_ctl(ceEncoder, CELT_SET_POST_MDCT_CALLBACK(celtBack, NULL)); qWarning() << "Set Callback" << r; */ //编码 speex或者CELT unsigned char buffer[512]; int len; if (umtType == MessageHandler::UDPVoiceCELT) { if (cCodec == NULL) { cCodec = CELTCodec::instance(); ceEncoder = cCodec->encoderCreate(); } else if (cCodec && ! bPreviousVoice) { cCodec->encoder_ctl(ceEncoder, CELT_RESET_STATE); } cCodec->encoder_ctl(ceEncoder, CELT_SET_PREDICTION(0)); cCodec->encoder_ctl(ceEncoder,CELT_SET_BITRATE(iAudioQuality)); len = cCodec->encode(ceEncoder, psSource, SAMPLE_RATE / 50, buffer, 512); iBitrate = len * 50 * 8; /*//////////////////////////////////////////////////////////////////////// if (m_de_cdDecoder == NULL) { m_de_cdDecoder = cCodec->decoderCreate(); } celt_int16 fout2[2560]={0}; if (cCodec) { int len3 = cCodec->decode(m_de_cdDecoder, buffer, len, fout2, SAMPLE_RATE / 50); len3++; UINT dwDataWrote; if( FAILED(g_pWaveFile.Write( SAMPLE_RATE / 50*2*2, (BYTE*)fout2, &dwDataWrote ) )) { int a=0; a++; } else { OutputDebugString(L"plushuwav g_pWaveFile.Write 3"); } } ///////////////////////////////////////////////////////////////////////*/ } else { assert(0); } QByteArray qba; for(int i=0; i<len; i++) { qba.push_back(buffer[i]); } flushCheck(qba, false); if (! iIsSpeech) iBitrate = 0; bPreviousVoice = iIsSpeech; }
void AudioInput::encodeAudioFrame() { int iArg; ClientUser *p=ClientUser::get(g.uiSession); int i; float sum; short max; short *psSource; iFrameCounter++; if (! bRunning) return; sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psMic[i] * psMic[i]); dPeakMic = qMax(20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f), -96.0f); max = 1; for (i=0;i<iFrameSize;i++) max = static_cast<short>(abs(psMic[i]) > max ? abs(psMic[i]) : max); dMaxMic = max; if (psSpeaker && (iEchoChannels > 0)) { sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSpeaker[i] * psSpeaker[i]); dPeakSpeaker = qMax(20.0f*log10f(sqrtf(sum / static_cast<float>(iFrameSize)) / 32768.0f), -96.0f); } else { dPeakSpeaker = 0.0; } QMutexLocker l(&qmSpeex); resetAudioProcessor(); speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_AGC_GAIN, &iArg); float gainValue = static_cast<float>(iArg); iArg = g.s.iNoiseSuppress - iArg; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_NOISE_SUPPRESS, &iArg); if (sesEcho && psSpeaker) { speex_echo_cancellation(sesEcho, psMic, psSpeaker, psClean); speex_preprocess_run(sppPreprocess, psClean); psSource = psClean; } else { speex_preprocess_run(sppPreprocess, psMic); psSource = psMic; } sum=1.0f; for (i=0;i<iFrameSize;i++) sum += static_cast<float>(psSource[i] * psSource[i]); float micLevel = sqrtf(sum / static_cast<float>(iFrameSize)); dPeakSignal = qMax(20.0f*log10f(micLevel / 32768.0f), -96.0f); spx_int32_t prob = 0; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_GET_PROB, &prob); fSpeechProb = static_cast<float>(prob) / 100.0f; // clean microphone level: peak of filtered signal attenuated by AGC gain dPeakCleanMic = qMax(dPeakSignal - gainValue, -96.0f); float level = (g.s.vsVAD == Settings::SignalToNoise) ? fSpeechProb : (1.0f + dPeakCleanMic / 96.0f); bool bIsSpeech = false; if (level > g.s.fVADmax) bIsSpeech = true; else if (level > g.s.fVADmin && bPreviousVoice) bIsSpeech = true; if (! bIsSpeech) { iHoldFrames++; if (iHoldFrames < g.s.iVoiceHold) bIsSpeech = true; } else { iHoldFrames = 0; } if (g.s.atTransmit == Settings::Continous) bIsSpeech = true; else if (g.s.atTransmit == Settings::PushToTalk) bIsSpeech = g.s.uiDoublePush && ((g.uiDoublePush < g.s.uiDoublePush) || (g.tDoublePush.elapsed() < g.s.uiDoublePush)); bIsSpeech = bIsSpeech || (g.iPushToTalk > 0); if (g.s.bMute || ((g.s.lmLoopMode != Settings::Local) && p && (p->bMute || p->bSuppress)) || g.bPushToMute || (g.iTarget < 0)) { bIsSpeech = false; } if (bIsSpeech) { iSilentFrames = 0; } else { iSilentFrames++; if (iSilentFrames > 500) iFrameCounter = 0; } if (p) { if (! bIsSpeech) p->setTalking(Settings::Passive); else if (g.iTarget == 0) p->setTalking(Settings::Talking); else p->setTalking(Settings::Shouting); } if (g.s.bTxAudioCue && g.uiSession != 0) { AudioOutputPtr ao = g.ao; if (bIsSpeech && ! bPreviousVoice && ao) ao->playSample(g.s.qsTxAudioCueOn); else if (ao && !bIsSpeech && bPreviousVoice && ao) ao->playSample(g.s.qsTxAudioCueOff); } if (! bIsSpeech && ! bPreviousVoice) { iBitrate = 0; if (g.s.iIdleTime && ! g.s.bDeaf && ((tIdle.elapsed() / 1000000ULL) > g.s.iIdleTime)) { emit doDeaf(); tIdle.restart(); } spx_int32_t increment = 0; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment); return; } else { spx_int32_t increment = 12; speex_preprocess_ctl(sppPreprocess, SPEEX_PREPROCESS_SET_AGC_INCREMENT, &increment); } tIdle.restart(); /* int r = celt_encoder_ctl(ceEncoder, CELT_SET_POST_MDCT_CALLBACK(celtBack, NULL)); qWarning() << "Set Callback" << r; */ unsigned char buffer[512]; int len; if (umtType != MessageHandler::UDPVoiceSpeex) { len = encodeCELTFrame(psSource, buffer); if (len == 0) return; } else { len = encodeSpeexFrame(psSource, buffer); } flushCheck(QByteArray(reinterpret_cast<const char *>(buffer), len), ! bIsSpeech); if (! bIsSpeech) iBitrate = 0; bPreviousVoice = bIsSpeech; }