/*! Returns the number of frames represented by \a byteCount in this format. Note that some rounding may occur if \a byteCount is not an exact multiple of the number of bytes per frame. Each frame has one sample per channel. \sa framesForDuration() */ qint32 AudioFormat::framesForBytes(qint32 byteCount) const { int size = bytesPerFrame(); if (size > 0) return byteCount / size; return 0; }
/*! Returns the number of microseconds represented by \a bytes in this format. Returns 0 if this format is not valid-> Note that some rounding may occur if \a bytes is not an exact multiple of the number of bytes per frame. \sa bytesForDuration() */ qint64 AudioFormat::durationForBytes(qint32 bytes) const { if (!isValid() || bytes <= 0) return 0; // We round the byte count to ensure whole frames return qint64(kHz * (bytes / bytesPerFrame())) / sampleRate(); }
void store (char const * source, size_t numFrames, qint16 * dest) { qint16 const * begin (reinterpret_cast<qint16 const *> (source)); for ( qint16 const * i = begin; i != begin + numFrames * (bytesPerFrame () / sizeof (qint16)); i += bytesPerFrame () / sizeof (qint16)) { switch (m_channel) { case Mono: *dest++ = *i; break; case Right: *dest++ = *(i + 1); break; case Both: // should be able to happen but if it // does we'll take left Q_ASSERT (Both == m_channel); case Left: *dest++ = *i; break; } } }
qint64 Modulator::readData (char * data, qint64 maxSize) { static int j0=-1; static double toneFrequency0; double toneFrequency; if(maxSize==0) return 0; Q_ASSERT (!(maxSize % qint64 (bytesPerFrame ()))); // no torn frames Q_ASSERT (isOpen ()); qint64 numFrames (maxSize / bytesPerFrame ()); qint16 * samples (reinterpret_cast<qint16 *> (data)); qint16 * end (samples + numFrames * (bytesPerFrame () / sizeof (qint16))); qint64 framesGenerated (0); // qDebug () << "Modulator: " << numFrames << " requested, m_ic = " << m_ic << ", tune mode is " << m_tuning; // qDebug() << "C" << maxSize << numFrames << bytesPerFrame(); switch (m_state) { case Synchronizing: { if (m_silentFrames) { // send silence up to first second framesGenerated = qMin (m_silentFrames, numFrames); for ( ; samples != end; samples = load (0, samples)) { // silence } m_silentFrames -= framesGenerated; return framesGenerated * bytesPerFrame (); } Q_EMIT stateChanged ((m_state = Active)); m_cwLevel = false; m_ramp = 0; // prepare for CW wave shaping } // fall through case Active: { unsigned isym (m_tuning ? 0 : m_ic / (4.0 * m_nsps)); // Actual fsample=48000 if (isym >= m_symbolsLength && icw[0] > 0) { // start CW condition // Output the CW ID m_dphi = m_twoPi * m_frequency / m_frameRate; unsigned const ic0 = m_symbolsLength * 4 * m_nsps; unsigned j (0); while (samples != end) { j = (m_ic - ic0) / m_nspd + 1; // symbol of this sample bool level {bool (icw[j])}; m_phi += m_dphi; if (m_phi > m_twoPi) m_phi -= m_twoPi; qint16 sample ((SOFT_KEYING ? qAbs (m_ramp - 1) : (m_ramp ? 32767 : 0)) * qSin (m_phi)); if (int (j) <= icw[0] && j < NUM_CW_SYMBOLS) // stop condition { samples = load (postProcessSample (sample), samples); ++framesGenerated; ++m_ic; } else { Q_EMIT stateChanged ((m_state = Idle)); return framesGenerated * bytesPerFrame (); } // adjust ramp if ((m_ramp != 0 && m_ramp != std::numeric_limits<qint16>::min ()) || level != m_cwLevel) { // either ramp has terminated at max/min or direction // has changed m_ramp += RAMP_INCREMENT; // ramp } // if (m_cwLevel != level) // { // qDebug () << "@m_ic:" << m_ic << "icw[" << j << "] =" << icw[j] << "@" << framesGenerated << "in numFrames:" << numFrames; // } m_cwLevel = level; } return framesGenerated * bytesPerFrame (); } double const baud (12000.0 / m_nsps); // fade out parameters (no fade out for tuning) unsigned const i0 = m_tuning ? 999 * m_nsps : (m_symbolsLength - 0.017) * 4.0 * m_nsps; unsigned const i1 = m_tuning ? 999 * m_nsps : m_symbolsLength * 4.0 * m_nsps; for (unsigned i = 0; i < numFrames && m_ic <= i1; ++i) { isym = m_tuning ? 0 : m_ic / (4.0 * m_nsps); //Actual fsample=48000 if (isym != m_isym0) { // qDebug () << "@m_ic:" << m_ic << "itone[" << isym << "] =" << itone[isym] << "@" << i << "in numFrames:" << numFrames; if(m_toneSpacing==0.0) { toneFrequency0=m_frequency + itone[isym]*baud; } else { toneFrequency0=m_frequency + itone[isym]*m_toneSpacing; } m_dphi = m_twoPi * toneFrequency0 / m_frameRate; m_isym0 = isym; } int j=m_ic/480; if(m_fSpread>0.0 and j!=j0) { float x1=(float)rand()/RAND_MAX; float x2=(float)rand()/RAND_MAX; toneFrequency = toneFrequency0 + 0.5*m_fSpread*(x1+x2-1.0); m_dphi = m_twoPi * toneFrequency / m_frameRate; j0=j; } m_phi += m_dphi; if (m_phi > m_twoPi) m_phi -= m_twoPi; if (m_ic > i0) m_amp = 0.98 * m_amp; if (m_ic > i1) m_amp = 0.0; samples = load (postProcessSample (m_amp * qSin (m_phi)), samples); ++framesGenerated; ++m_ic; } if (m_amp == 0.0) { // TODO G4WJS: compare double with zero might not be wise if (icw[0] == 0) { // no CW ID to send Q_EMIT stateChanged ((m_state = Idle)); return framesGenerated * bytesPerFrame (); } m_phi = 0.0; } // done for this chunk - continue on next call return framesGenerated * bytesPerFrame (); } // fall through case Idle: break; } Q_ASSERT (Idle == m_state); return 0; }
/*! Returns the number of bytes required for \a frameCount frames of this format. Returns 0 if this format is not valid-> \sa bytesForDuration() */ qint32 AudioFormat::bytesForFrames(qint32 frameCount) const { return frameCount * bytesPerFrame(); }
/*! Returns the number of bytes required for this audio format for \a duration microseconds. Returns 0 if this format is not valid-> Note that some rounding may occur if \a duration is not an exact fraction of the sampleRate(). \sa durationForBytes() */ qint32 AudioFormat::bytesForDuration(qint64 duration) const { return bytesPerFrame() * framesForDuration(duration); }