bool Engine::initialize() { bool result = false; QAudioFormat format = m_format; if (selectFormat()) { if (m_format != format) { resetAudioDevices(); if (m_file) { emit bufferLengthChanged(bufferLength()); emit dataLengthChanged(dataLength()); emit bufferChanged(0, 0, m_buffer); setRecordPosition(bufferLength()); result = true; } else { m_bufferLength = audioLength(m_format, BufferDurationUs); m_buffer.resize(m_bufferLength); m_buffer.fill(0); emit bufferLengthChanged(bufferLength()); if (m_generateTone) { if (0 == m_tone.endFreq) { const qreal nyquist = nyquistFrequency(m_format); m_tone.endFreq = qMin(qreal(SpectrumHighFreq), nyquist); } // Call function defined in utils.h, at global scope ::generateTone(m_tone, m_format, m_buffer); m_dataLength = m_bufferLength; emit dataLengthChanged(dataLength()); emit bufferChanged(0, m_dataLength, m_buffer); setRecordPosition(m_bufferLength); result = true; } else { emit bufferChanged(0, 0, m_buffer); m_audioInput = new QAudioInput(m_audioInputDevice, m_format, this); m_audioInput->setNotifyInterval(NotifyIntervalMs); result = true; } } m_audioOutput = new QAudioOutput(m_audioOutputDevice, m_format, this); m_audioOutput->setNotifyInterval(NotifyIntervalMs); } } else { if (m_file) emit errorMessage(tr("Audio format not supported"), formatToString(m_format)); else if (m_generateTone) emit errorMessage(tr("No suitable format found"), ""); else emit errorMessage(tr("No common input / output format found"), ""); } ENGINE_DEBUG << "Engine::initialize" << "m_bufferLength" << m_bufferLength; ENGINE_DEBUG << "Engine::initialize" << "m_dataLength" << m_dataLength; ENGINE_DEBUG << "Engine::initialize" << "format" << m_format; return result; }
void Engine::audioNotify() { switch (m_mode) { case QAudio::AudioInput: { const qint64 recordPosition = qMin(m_bufferLength, audioLength(m_format, m_audioInput->processedUSecs())); setRecordPosition(recordPosition); const qint64 levelPosition = m_dataLength - m_levelBufferLength; if (levelPosition >= 0) calculateLevel(levelPosition, m_levelBufferLength); if (m_dataLength >= m_spectrumBufferLength) { const qint64 spectrumPosition = m_dataLength - m_spectrumBufferLength; calculateSpectrum(spectrumPosition); } emit bufferChanged(0, m_dataLength, m_buffer); } break; case QAudio::AudioOutput: { const qint64 playPosition = audioLength(m_format, m_audioOutput->processedUSecs()); setPlayPosition(qMin(bufferLength(), playPosition)); const qint64 levelPosition = playPosition - m_levelBufferLength; const qint64 spectrumPosition = playPosition - m_spectrumBufferLength; if (m_file) { if (levelPosition > m_bufferPosition || spectrumPosition > m_bufferPosition || qMax(m_levelBufferLength, m_spectrumBufferLength) > m_dataLength) { m_bufferPosition = 0; m_dataLength = 0; // Data needs to be read into m_buffer in order to be analysed const qint64 readPos = qMax(qint64(0), qMin(levelPosition, spectrumPosition)); const qint64 readEnd = qMin(m_analysisFile->size(), qMax(levelPosition + m_levelBufferLength, spectrumPosition + m_spectrumBufferLength)); const qint64 readLen = readEnd - readPos + audioLength(m_format, WaveformWindowDuration); qDebug() << "Engine::audioNotify [1]" << "analysisFileSize" << m_analysisFile->size() << "readPos" << readPos << "readLen" << readLen; if (m_analysisFile->seek(readPos + m_analysisFile->headerLength())) { m_buffer.resize(readLen); m_bufferPosition = readPos; m_dataLength = m_analysisFile->read(m_buffer.data(), readLen); qDebug() << "Engine::audioNotify [2]" << "bufferPosition" << m_bufferPosition << "dataLength" << m_dataLength; } else { qDebug() << "Engine::audioNotify [2]" << "file seek error"; } emit bufferChanged(m_bufferPosition, m_dataLength, m_buffer); } } else { if (playPosition >= m_dataLength) stopPlayback(); } if (levelPosition >= 0 && levelPosition + m_levelBufferLength < m_bufferPosition + m_dataLength) calculateLevel(levelPosition, m_levelBufferLength); if (spectrumPosition >= 0 && spectrumPosition + m_spectrumBufferLength < m_bufferPosition + m_dataLength) calculateSpectrum(spectrumPosition); } break; } }
void QAbstractAttribute::setBuffer(QAbstractBuffer *buffer) { Q_D(QAbstractAttribute); if (d->m_buffer == buffer) return; if (d->m_buffer && d->m_changeArbiter) { QScenePropertyChangePtr change(new QScenePropertyChange(NodeRemoved, QSceneChange::Node, id())); change->setPropertyName("buffer"); change->setValue(QVariant::fromValue(d->m_buffer->id())); d->notifyObservers(change); } // We need to add it as a child of the current node if it has been declared inline // Or not previously added as a child of the current node so that // 1) The backend gets notified about it's creation // 2) When the current node is destroyed, it gets destroyed as well if (buffer && !buffer->parent()) buffer->setParent(this); d->m_buffer = buffer; const bool blocked = blockNotifications(true); emit bufferChanged(); blockNotifications(blocked); if (d->m_buffer && d->m_changeArbiter) { QScenePropertyChangePtr change(new QScenePropertyChange(NodeAdded, QSceneChange::Node, id())); change->setPropertyName("buffer"); change->setValue(QVariant::fromValue(buffer->id())); d->notifyObservers(change); } }
void TextInput::setBuffer(IrcBuffer* buffer) { if (d.buffer != buffer) { unbind(d.buffer, d.parser); bind(buffer, d.parser); if (d.buffer) d.states.insert(d.buffer, saveState()); d.buffer = buffer; if (buffer) restoreState(d.states.value(buffer)); emit bufferChanged(buffer); } }
void BufferWidget::doTabChanged(int i) { Q_EMIT bufferChanged(); Pane *p = dynamic_cast<Pane *>(tabWidget->widget(i)); if(p != nullptr) { QTextCursor curs = p->getBuffer()->textCursor(); p->getBuffer()->setFocus(Qt::OtherFocusReason); Q_EMIT pathChanged(p->getBuffer()->getPath()); Q_EMIT encodingChanged(p->getBuffer()->getEncoding()); } else { Q_EMIT pathChanged(""); Q_EMIT encodingChanged("UTF-8"); } }
void AudioCdRecord::checkProgressLine( const QString & str ) { QRegExp reg; int pos; bool ok; reg.setPattern( "(\\d+)(?:\\s*)of\\s+\\d+\\s+(KB|MB|GB)" ); pos = reg.indexIn( str ); if( pos > -1 ) p->written_size_int = reg.cap(1).toInt(&ok); reg.setPattern( "(\\d+)(?:\\s*)(KB|MB|GB)" ); pos = reg.indexIn( str ); if( pos > -1 ) p->image_size_int = reg.cap(1).toInt(&ok); reg.setPattern( "fifo\\s+(\\d+)(?:\\s*)%" ); pos = reg.indexIn( str ); if( pos > -1 ) p->ring_buffer_percent_int = reg.cap(1).toInt(&ok); reg.setPattern( "buf\\s+(\\d+)(?:\\s*)%" ); pos = reg.indexIn( str ); if( pos > -1 ) p->buffer_percent_int = reg.cap(1).toInt(&ok); reg.setPattern( "(\\d+\\.\\d)(?:\\s*)x\\.$" ); pos = reg.indexIn( str ); if( pos > -1 ) p->written_speed_int = reg.cap(1).toDouble(&ok); if( p->image_size_int > 0 ) p->process_precent = 100 * p->written_size_int / p->image_size_int; emit ringBufferChanged( p->ring_buffer_percent_int ); emit bufferChanged( p->buffer_percent_int ); emit writeSizeChenged( p->written_size_int ); emit writeSpeedChanged( p->written_speed_int ); emit percentChanged( p->process_precent ); }
void ViAudioTransmission::setBuffer(ViBuffer *buffer) { mBuffer = buffer; emit bufferChanged(); }
BufferNode::BufferNode(std::shared_ptr<Simulation> simulation): Node(Lav_OBJTYPE_BUFFER_NODE, simulation, 0, 1), player(simulation->getBlockSize(), simulation->getSr()) { appendOutputConnection(0, 1); getProperty(Lav_BUFFER_BUFFER).setPostChangedCallback([&] () {bufferChanged();}); end_callback = std::make_shared<Callback<void()>>(); }
void GrlMedia::bufferingProgress(int progress) { lb_buffer = QString(tr("Buffering %4%").arg(progress)); emit bufferChanged(lb_buffer); }