void JackAudioSystem::initializeInput() { QMutexLocker lock(&qmWait); if (!jasys->bJackIsGood) { return; } AudioInputPtr ai = g.ai; JackAudioInput * const jai = dynamic_cast<JackAudioInput *>(ai.get()); if (jai) { jai->qmMutex.lock(); } in_port = jack_port_register(client, "input", JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0); if (in_port == NULL) { qWarning("JackAudioSystem: unable to register 'input' port"); return; } bInputIsGood = true; if (jai) { jai->qmMutex.unlock(); } }
void AudioInput::setMaxBandwidth(int bitspersec) { if (bitspersec == g.iMaxBandwidth) return; int frames; int bitrate; adjustBandwidth(bitspersec, bitrate, frames); g.iMaxBandwidth = bitspersec; if (bitspersec != -1) { if ((bitrate != g.s.iQuality) || (frames != g.s.iFramesPerPacket)) g.mw->msgBox(tr("Server maximum network bandwidth is only %1 kbit/s. Audio quality auto-adjusted to %2 kbit/s (%3 ms)").arg(bitspersec / 1000).arg(bitrate / 1000).arg(frames*10)); } AudioInputPtr ai = g.ai; if (ai) { g.iAudioBandwidth = getNetworkBandwidth(bitrate, frames); ai->iAudioQuality = bitrate; ai->iAudioFrames = frames; return; } ai.reset(); Audio::stopInput(); Audio::startInput(); }
void JackAudioSystem::destroyInput() { AudioInputPtr ai = g.ai; JackAudioInput * const jai = dynamic_cast<JackAudioInput *>(ai.get()); if (jai) { jai->qmMutex.lock(); } if (in_port != NULL) { int err = jack_port_unregister(client, in_port); if (err != 0) { qWarning("JackAudioSystem: unable to unregister in port - jack_port_unregister() returned %i", err); return; } } bInputIsGood = false; if (jai) { jai->qmMutex.unlock(); } }
void PulseAudioSystem::read_callback(pa_stream *s, size_t bytes, void *userdata) { PulseAudioSystem *pas = reinterpret_cast<PulseAudioSystem *>(userdata); size_t length = bytes; const void *data = NULL; pa_stream_peek(s, &data, &length); if (data == NULL && length > 0) { qWarning("PulseAudio: pa_stream_peek reports no data at current read index."); } else if (data == NULL && length == 0) { qWarning("PulseAudio: pa_stream_peek reports empty memblockq."); } else if (data == NULL || length == 0) { qWarning("PulseAudio: invalid pa_stream_peek state encountered."); return; } AudioInputPtr ai = g.ai; PulseAudioInput *pai = dynamic_cast<PulseAudioInput *>(ai.get()); if (! pai) { if (length > 0) { pa_stream_drop(s); } pas->wakeup(); return; } const pa_sample_spec *pss = pa_stream_get_sample_spec(s); if (s == pas->pasInput) { if (!pa_sample_spec_equal(pss, &pai->pssMic)) { pai->pssMic = *pss; pai->iMicFreq = pss->rate; pai->iMicChannels = pss->channels; if (pss->format == PA_SAMPLE_FLOAT32NE) pai->eMicFormat = PulseAudioInput::SampleFloat; else pai->eMicFormat = PulseAudioInput::SampleShort; pai->initializeMixer(); } if (data != NULL) { pai->addMic(data, static_cast<unsigned int>(length) / pai->iMicSampleSize); } } else if (s == pas->pasSpeaker) { if (!pa_sample_spec_equal(pss, &pai->pssEcho)) { pai->pssEcho = *pss; pai->iEchoFreq = pss->rate; pai->iEchoChannels = pss->channels; if (pss->format == PA_SAMPLE_FLOAT32NE) pai->eEchoFormat = PulseAudioInput::SampleFloat; else pai->eEchoFormat = PulseAudioInput::SampleShort; pai->initializeMixer(); } if (data != NULL) { pai->addEcho(data, static_cast<unsigned int>(length) / pai->iEchoSampleSize); } } if (length > 0) { pa_stream_drop(s); } }
void PulseAudioSystem::eventCallback(pa_mainloop_api *api, pa_defer_event *) { api->defer_enable(pade, false); if (! bSourceDone || ! bSinkDone || ! bServerDone) return; AudioInputPtr ai = g.ai; AudioOutputPtr ao = g.ao; AudioInput *raw_ai = ai.get(); AudioOutput *raw_ao = ao.get(); PulseAudioInput *pai = dynamic_cast<PulseAudioInput *>(raw_ai); PulseAudioOutput *pao = dynamic_cast<PulseAudioOutput *>(raw_ao); if (raw_ao) { QString odev = outputDevice(); pa_stream_state ost = pasOutput ? pa_stream_get_state(pasOutput) : PA_STREAM_TERMINATED; bool do_stop = false; bool do_start = false; if (! pao && (ost == PA_STREAM_READY)) { do_stop = true; } else if (pao) { switch (ost) { case PA_STREAM_TERMINATED: { if (pasOutput) pa_stream_unref(pasOutput); pa_sample_spec pss = qhSpecMap.value(odev); pa_channel_map pcm = qhChanMap.value(odev); if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE)) pss.format = PA_SAMPLE_FLOAT32NE; if (pss.rate == 0) pss.rate = SAMPLE_RATE; if ((pss.channels == 0) || (! g.s.doPositionalAudio())) pss.channels = 1; pasOutput = pa_stream_new(pacContext, mumble_sink_input, &pss, (pss.channels == 1) ? NULL : &pcm); pa_stream_set_state_callback(pasOutput, stream_callback, this); pa_stream_set_write_callback(pasOutput, write_callback, this); } case PA_STREAM_UNCONNECTED: do_start = true; break; case PA_STREAM_READY: { if (g.s.iOutputDelay != iDelayCache) { do_stop = true; } else if (g.s.doPositionalAudio() != bPositionalCache) { do_stop = true; } else if (odev != qsOutputCache) { do_stop = true; } break; } default: break; } } if (do_stop) { qWarning("PulseAudio: Stopping output"); pa_stream_disconnect(pasOutput); iSinkId = -1; } else if (do_start) { qWarning("PulseAudio: Starting output: %s", qPrintable(odev)); pa_buffer_attr buff; const pa_sample_spec *pss = pa_stream_get_sample_spec(pasOutput); const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short); const unsigned int iBlockLen = ((pao->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize); buff.tlength = iBlockLen * (g.s.iOutputDelay+1); buff.minreq = iBlockLen; buff.maxlength = -1; buff.prebuf = -1; buff.fragsize = iBlockLen; iDelayCache = g.s.iOutputDelay; bPositionalCache = g.s.doPositionalAudio(); qsOutputCache = odev; pa_stream_connect_playback(pasOutput, qPrintable(odev), &buff, PA_STREAM_ADJUST_LATENCY, NULL, NULL); pa_context_get_sink_info_by_name(pacContext, qPrintable(odev), sink_info_callback, this); } } if (raw_ai) { QString idev = inputDevice(); pa_stream_state ist = pasInput ? pa_stream_get_state(pasInput) : PA_STREAM_TERMINATED; bool do_stop = false; bool do_start = false; if (! pai && (ist == PA_STREAM_READY)) { do_stop = true; } else if (pai) { switch (ist) { case PA_STREAM_TERMINATED: { if (pasInput) pa_stream_unref(pasInput); pa_sample_spec pss = qhSpecMap.value(idev); if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE)) pss.format = PA_SAMPLE_FLOAT32NE; if (pss.rate == 0) pss.rate = SAMPLE_RATE; pss.channels = 1; pasInput = pa_stream_new(pacContext, "Microphone", &pss, NULL); pa_stream_set_state_callback(pasInput, stream_callback, this); pa_stream_set_read_callback(pasInput, read_callback, this); } case PA_STREAM_UNCONNECTED: do_start = true; break; case PA_STREAM_READY: { if (idev != qsInputCache) { do_stop = true; } break; } default: break; } } if (do_stop) { qWarning("PulseAudio: Stopping input"); pa_stream_disconnect(pasInput); } else if (do_start) { qWarning("PulseAudio: Starting input %s",qPrintable(idev)); pa_buffer_attr buff; const pa_sample_spec *pss = pa_stream_get_sample_spec(pasInput); const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short); const unsigned int iBlockLen = ((pai->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize); buff.tlength = iBlockLen; buff.minreq = iBlockLen; buff.maxlength = -1; buff.prebuf = -1; buff.fragsize = iBlockLen; qsInputCache = idev; pa_stream_connect_record(pasInput, qPrintable(idev), &buff, PA_STREAM_ADJUST_LATENCY); } } if (raw_ai) { QString odev = outputDevice(); QString edev = qhEchoMap.value(odev); pa_stream_state est = pasSpeaker ? pa_stream_get_state(pasSpeaker) : PA_STREAM_TERMINATED; bool do_stop = false; bool do_start = false; if ((! pai || ! g.s.doEcho()) && (est == PA_STREAM_READY)) { do_stop = true; } else if (pai && g.s.doEcho()) { switch (est) { case PA_STREAM_TERMINATED: { if (pasSpeaker) pa_stream_unref(pasSpeaker); pa_sample_spec pss = qhSpecMap.value(edev); pa_channel_map pcm = qhChanMap.value(edev); if ((pss.format != PA_SAMPLE_FLOAT32NE) && (pss.format != PA_SAMPLE_S16NE)) pss.format = PA_SAMPLE_FLOAT32NE; if (pss.rate == 0) pss.rate = SAMPLE_RATE; if ((pss.channels == 0) || (! g.s.bEchoMulti)) pss.channels = 1; pasSpeaker = pa_stream_new(pacContext, mumble_echo, &pss, (pss.channels == 1) ? NULL : &pcm); pa_stream_set_state_callback(pasSpeaker, stream_callback, this); pa_stream_set_read_callback(pasSpeaker, read_callback, this); } case PA_STREAM_UNCONNECTED: do_start = true; break; case PA_STREAM_READY: { if (g.s.bEchoMulti != bEchoMultiCache) { do_stop = true; } else if (edev != qsEchoCache) { do_stop = true; } break; } default: break; } } if (do_stop) { qWarning("PulseAudio: Stopping echo"); pa_stream_disconnect(pasSpeaker); } else if (do_start) { qWarning("PulseAudio: Starting echo: %s", qPrintable(edev)); pa_buffer_attr buff; const pa_sample_spec *pss = pa_stream_get_sample_spec(pasSpeaker); const size_t sampleSize = (pss->format == PA_SAMPLE_FLOAT32NE) ? sizeof(float) : sizeof(short); const unsigned int iBlockLen = ((pai->iFrameSize * pss->rate) / SAMPLE_RATE) * pss->channels * static_cast<unsigned int>(sampleSize); buff.tlength = iBlockLen; buff.minreq = iBlockLen; buff.maxlength = -1; buff.prebuf = -1; buff.fragsize = iBlockLen; bEchoMultiCache = g.s.bEchoMulti; qsEchoCache = edev; pa_stream_connect_record(pasSpeaker, qPrintable(edev), &buff, PA_STREAM_ADJUST_LATENCY); } } }
void AudioStats::on_Tick_timeout() { AudioInputPtr ai = g.ai; if (ai.get() == NULL || ! ai->sppPreprocess) return; bool nTalking = ai->isTransmitting(); QString txt; txt.sprintf("%06.2f dB",ai->dPeakMic); qlMicLevel->setText(txt); txt.sprintf("%06.2f dB",ai->dPeakSpeaker); qlSpeakerLevel->setText(txt); txt.sprintf("%06.2f dB",ai->dPeakSignal); qlSignalLevel->setText(txt); spx_int32_t ps_size = 0; speex_preprocess_ctl(ai->sppPreprocess, SPEEX_PREPROCESS_GET_PSD_SIZE, &ps_size); STACKVAR(spx_int32_t, noise, ps_size); STACKVAR(spx_int32_t, ps, ps_size); speex_preprocess_ctl(ai->sppPreprocess, SPEEX_PREPROCESS_GET_PSD, ps); speex_preprocess_ctl(ai->sppPreprocess, SPEEX_PREPROCESS_GET_NOISE_PSD, noise); float s = 0.0f; float n = 0.0001f; int start = (ps_size * 300) / SAMPLE_RATE; int stop = (ps_size * 2000) / SAMPLE_RATE; for (int i=start;i<stop;i++) { s += sqrtf(static_cast<float>(ps[i])); n += sqrtf(static_cast<float>(noise[i])); } txt.sprintf("%06.3f",s / n); qlMicSNR->setText(txt); spx_int32_t v; speex_preprocess_ctl(ai->sppPreprocess, SPEEX_PREPROCESS_GET_AGC_GAIN, &v); float fv = powf(10.0f, (static_cast<float>(v) / 20.0f)); txt.sprintf("%03.0f%%",100.0f / fv); qlMicVolume->setText(txt); txt.sprintf("%03.0f%%",ai->fSpeechProb * 100.0f); qlSpeechProb->setText(txt); txt.sprintf("%04.1f kbit/s",static_cast<float>(ai->iBitrate) / 1000.0f); qlBitrate->setText(txt); if (nTalking != bTalking) { bTalking = nTalking; QFont f = qlSpeechProb->font(); f.setBold(bTalking); qlSpeechProb->setFont(f); } if (g.uiDoublePush > 1000000) txt = tr(">1000 ms"); else txt.sprintf("%04llu ms",g.uiDoublePush / 1000); qlDoublePush->setText(txt); abSpeech->iBelow = iroundf(g.s.fVADmin * 32767.0f); abSpeech->iAbove = iroundf(g.s.fVADmax * 32767.0f); if (g.s.vsVAD == Settings::Amplitude) { abSpeech->iValue = iroundf((32767.f/96.0f) * (96.0f + ai->dPeakMic)); } else { abSpeech->iValue = iroundf(ai->fSpeechProb * 32767.0f); } abSpeech->update(); anwNoise->update(); if (aewEcho) aewEcho->updateGL(); }
void AudioNoiseWidget::paintEvent(QPaintEvent *) { QPainter paint(this); QPalette pal; paint.fillRect(rect(), pal.color(QPalette::Background)); AudioInputPtr ai = g.ai; if (ai.get() == NULL || ! ai->sppPreprocess) return; QPolygonF poly; ai->qmSpeex.lock(); spx_int32_t ps_size = 0; speex_preprocess_ctl(ai->sppPreprocess, SPEEX_PREPROCESS_GET_PSD_SIZE, &ps_size); STACKVAR(spx_int32_t, noise, ps_size); STACKVAR(spx_int32_t, ps, ps_size); speex_preprocess_ctl(ai->sppPreprocess, SPEEX_PREPROCESS_GET_PSD, ps); speex_preprocess_ctl(ai->sppPreprocess, SPEEX_PREPROCESS_GET_NOISE_PSD, noise); ai->qmSpeex.unlock(); qreal sx, sy; sx = (static_cast<float>(width()) - 1.0f) / static_cast<float>(ps_size); sy = static_cast<float>(height()) - 1.0f; poly << QPointF(0.0f, height() - 1); float fftmul = 1.0 / (32768.0); for (int i=0; i < ps_size; i++) { qreal xp, yp; xp = i * sx; yp = sqrtf(sqrtf(static_cast<float>(noise[i]))) - 1.0f; yp = yp * fftmul; yp = qMin<qreal>(yp * 3000.0f, 1.0f); yp = (1 - yp) * sy; poly << QPointF(xp, yp); } poly << QPointF(width() - 1, height() - 1); poly << QPointF(0.0f, height() - 1); paint.setPen(Qt::blue); paint.setBrush(Qt::blue); paint.drawPolygon(poly); poly.clear(); for (int i=0;i < ps_size; i++) { qreal xp, yp; xp = i * sx; yp = sqrtf(sqrtf(static_cast<float>(ps[i]))) - 1.0f; yp = yp * fftmul; yp = qMin(yp * 3000.0, 1.0); yp = (1 - yp) * sy; poly << QPointF(xp, yp); } paint.setPen(Qt::red); paint.drawPolyline(poly); }
int JackAudioSystem::process_callback(jack_nframes_t nframes, void *arg) { JackAudioSystem * const jas = static_cast<JackAudioSystem*>(arg); if (jas && jas->bJackIsGood) { AudioInputPtr ai = g.ai; AudioOutputPtr ao = g.ao; JackAudioInput * const jai = dynamic_cast<JackAudioInput *>(ai.get()); JackAudioOutput * const jao = dynamic_cast<JackAudioOutput *>(ao.get()); if (jai && jai->isRunning() && jai->iMicChannels > 0 && !jai->isFinished()) { QMutexLocker(&jai->qmMutex); void *input = jack_port_get_buffer(jas->in_port, nframes); if (input != NULL) { jai->addMic(input, nframes); } } if (jao && jao->isRunning() && jao->iChannels > 0 && !jao->isFinished()) { QMutexLocker(&jao->qmMutex); jack_default_audio_sample_t *port_buffers[JACK_MAX_OUTPUT_PORTS]; for (unsigned int i = 0; i < jao->iChannels; ++i) { port_buffers[i] = (jack_default_audio_sample_t*)jack_port_get_buffer(jas->out_ports[i], nframes); if (port_buffers[i] == NULL) { return 1; } } jack_default_audio_sample_t * const buffer = jas->output_buffer; memset(buffer, 0, sizeof(jack_default_audio_sample_t) * nframes * jao->iChannels); jao->mix(buffer, nframes); if (jao->iChannels == 1) { memcpy(port_buffers[0], buffer, sizeof(jack_default_audio_sample_t) * nframes); } else { // de-interleave channels for (unsigned int i = 0; i < nframes * jao->iChannels; ++i) { port_buffers[i % jao->iChannels][i / jao->iChannels] = buffer[i]; } } } } return 0; }