예제 #1
0
qint64 audioLength(const QAudioFormat &format, qint64 microSeconds)
{
   qint64 result = (format.sampleRate() * format.channelCount() * (format.sampleSize() / 8))
       * microSeconds / 1000000;
   result -= result % (format.channelCount() * format.sampleSize());
   return result;
}
예제 #2
0
void Generator::generateData(const QAudioFormat &format, qint64 durationUs, int sampleRate)
{
	const int channelBytes = format.sampleSize() / 8;

	qint64 length = (format.sampleRate() * format.channelCount() * (format.sampleSize() / 8))
	                * durationUs / 1000000;

	m_buffer.resize(length);
	unsigned char *ptr = reinterpret_cast<unsigned char *>(m_buffer.data());
	int sampleIndex = 0;

	PHDBG() << "Type :" << format.sampleType() << " Size : " << format.sampleSize() << " Channel Count : " << format.channelCount();

	while (length) {
		const qreal x = qSin(2 * M_PI * sampleRate * qreal(sampleIndex % format.sampleRate()) / format.sampleRate());
		for (int i = 0; i < format.channelCount(); ++i) {
			qint16 value = static_cast<qint16>(x * 32767);
			qToLittleEndian<qint16>(value, ptr);

			ptr += channelBytes;
			length -= channelBytes;
		}
		++sampleIndex;
	}
}
예제 #3
0
bool WaveFileWriter::writeHeader(const QAudioFormat &format)
{
    // check if format is supported
    if (format.byteOrder() == QAudioFormat::BigEndian || format.sampleType() != QAudioFormat::SignedInt)
        return false;

    CombinedHeader header;
    memset(&header, 0, HeaderLength);

#ifndef Q_LITTLE_ENDIAN
    // only implemented for LITTLE ENDIAN
    return false;
#else
    // RIFF header
    memcpy(header.riff.descriptor.id, "RIFF", 4);
    header.riff.descriptor.size = 0; // this will be updated with correct duration:
                                     // m_dataLength + HeaderLength - 8
    // WAVE header
    memcpy(header.riff.type, "WAVE", 4);
    memcpy(header.wave.descriptor.id, "fmt ", 4);
    header.wave.descriptor.size = quint32(16);
    header.wave.audioFormat = quint16(1);
    header.wave.numChannels = quint16(format.channelCount());
    header.wave.sampleRate = quint32(format.sampleRate());
    header.wave.byteRate = quint32(format.sampleRate() * format.channelCount() * format.sampleSize() / 8);
    header.wave.blockAlign = quint16(format.channelCount() * format.sampleSize() / 8);
    header.wave.bitsPerSample = quint16(format.sampleSize());

    // DATA header
    memcpy(header.data.descriptor.id,"data", 4);
    header.data.descriptor.size = 0; // this will be updated with correct data length: m_dataLength

    return (file.write(reinterpret_cast<const char *>(&header), HeaderLength) == HeaderLength);
#endif
}
예제 #4
0
파일: utils.cpp 프로젝트: AlexSeiden/dLite
// Returns position in bytes, given position in microSeconds.
qint64 audioLength(const QAudioFormat &format, qint64 microSeconds)
{
    // format.sampleRate() is in Hz, format.sampleSize() in bits
   qint64 result = (format.sampleRate() * format.channelCount() * (format.sampleSize() / 8))
       * microSeconds / 1000000;
   // Round off to start of the channel 0.
   result -= result % (format.channelCount() * format.sampleSize());
   return result;
}
예제 #5
0
파일: aafm.cpp 프로젝트: Vasyaaa/auFM
void Generator::generateData(const QAudioFormat &format, qint64 durationUs, int sampleRate)
{
    const int channelBytes = format.sampleSize() / 8;//разрядность
    const int sampleBytes = format.channelCount() * channelBytes;//количество байт умнож на кол-во каналов

    qint64 length = (format.sampleRate() * format.channelCount() * (format.sampleSize() / 8))
                        * durationUs / 1000000;

    Q_ASSERT(length % sampleBytes == 0);
    Q_UNUSED(sampleBytes) // suppress warning in release builds

    m_buffer.resize(length);
    unsigned char *ptr = reinterpret_cast<unsigned char *>(m_buffer.data());
    int sampleIndex = 0;

    while (length)
    {
        const qreal x = qSin(2 * M_PI * sampleRate * qreal(sampleIndex % format.sampleRate()) / format.sampleRate());
        for (int i=0; i<format.channelCount(); ++i)
        {
            if (format.sampleSize() == 8 && format.sampleType() == QAudioFormat::UnSignedInt)
            {
                const quint8 value = static_cast<quint8>((1.0 + x) / 2 * 255);
                *reinterpret_cast<quint8*>(ptr) = value;
            }
            else if (format.sampleSize() == 8 && format.sampleType() == QAudioFormat::SignedInt)
            {
                const qint8 value = static_cast<qint8>(x * 127);
                *reinterpret_cast<quint8*>(ptr) = value;
            }
            else if (format.sampleSize() == 16 && format.sampleType() == QAudioFormat::UnSignedInt)
            {
                quint16 value = static_cast<quint16>((1.0 + x) / 2 * 65535);
                if (format.byteOrder() == QAudioFormat::LittleEndian)
                    qToLittleEndian<quint16>(value, ptr);
                else
                    qToBigEndian<quint16>(value, ptr);
            }
            else if (format.sampleSize() == 16 && format.sampleType() == QAudioFormat::SignedInt)
            {
                qint16 value = static_cast<qint16>(x * 32767);
                if (format.byteOrder() == QAudioFormat::LittleEndian)
                    qToLittleEndian<qint16>(value, ptr);
                else
                    qToBigEndian<qint16>(value, ptr);
            }

            ptr += channelBytes;//кол-во каналов
            length -= channelBytes;
        }
        ++sampleIndex;
    }
}
예제 #6
0
QString formatToString(const QAudioFormat &format)
{
    QString result;

    if (QAudioFormat() != format) {
        if (format.codec() == "audio/pcm") {
            Q_ASSERT(format.sampleType() == QAudioFormat::SignedInt);

            const QString formatEndian = (format.byteOrder() == QAudioFormat::LittleEndian)
                ?   QString("LE") : QString("BE");

            QString formatType;
            switch (format.sampleType()) {
            case QAudioFormat::SignedInt:
                formatType = "signed";
                break;
            case QAudioFormat::UnSignedInt:
                formatType = "unsigned";
                break;
            case QAudioFormat::Float:
                formatType = "float";
                break;
            case QAudioFormat::Unknown:
                formatType = "unknown";
                break;
            }

            QString formatChannels = QString("%1 channels").arg(format.channelCount());
            switch (format.channelCount()) {
            case 1:
                formatChannels = "mono";
                break;
            case 2:
                formatChannels = "stereo";
                break;
            }

            result = QString("%1 Hz %2 bit %3 %4 %5")
                .arg(format.sampleRate())
                .arg(format.sampleSize())
                .arg(formatType)
                .arg(formatEndian)
                .arg(formatChannels);
        } else {
            result = format.codec();
        }
    }

    return result;
}
예제 #7
0
ViAudioFormat::ViAudioFormat(const QAudioFormat &other)
{
	if(other.sampleType() == QAudioFormat::SignedInt)
	{
		setSampleType(ViAudioFormat::SignedInt);
	}
	else if(other.sampleType() == QAudioFormat::UnSignedInt)
	{
		setSampleType(ViAudioFormat::UnSignedInt);
	}
	else if(other.sampleType() == QAudioFormat::Float)
	{
		setSampleType(ViAudioFormat::Float);
	}
	else
	{
		setSampleSize(ViAudioFormat::Unknown);
	}

	if(other.byteOrder() == QAudioFormat::BigEndian)
	{
		setByteOrder(ViAudioFormat::BigEndian);
	}
	else
	{
		setByteOrder(ViAudioFormat::LittleEndian);
	}
	
	mQuality = ViAudioFormat::Average;
	mSampleSize = other.sampleSize();
	mSampleRate = other.sampleRate();
	mChannelCount = other.channelCount();
	mCodec = NULL;
}
예제 #8
0
void WaveRenderArea::setAudioFormat(const QAudioFormat &format)
{
  Q_D(WaveRenderArea);
  Q_ASSERT(format.channelCount() == 1);
  d->audioFormat = format;
  d->maxAmplitude = AudioInputDevice::maxAmplitudeForFormat(d->audioFormat);
}
예제 #9
0
void AudioReflector::processLocalAudio(unsigned int sampleTime, const QByteArray& samples, const QAudioFormat& format) {
    bool processLocalAudio = true; // Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingProcessLocalAudio)
    if (processLocalAudio) {
        const int NUM_CHANNELS_INPUT = 1;
        const int NUM_CHANNELS_OUTPUT = 2;
        const int EXPECTED_SAMPLE_RATE = 24000;
        if (format.channelCount() == NUM_CHANNELS_INPUT && format.sampleRate() == EXPECTED_SAMPLE_RATE) {
            QAudioFormat outputFormat = format;
            outputFormat.setChannelCount(NUM_CHANNELS_OUTPUT);
            QByteArray stereoInputData(samples.size() * NUM_CHANNELS_OUTPUT, 0);
            int numberOfSamples = (samples.size() / sizeof(int16_t));
            int16_t* monoSamples = (int16_t*)samples.data();
            int16_t* stereoSamples = (int16_t*)stereoInputData.data();
            
            for (int i = 0; i < numberOfSamples; i++) {
                stereoSamples[i* NUM_CHANNELS_OUTPUT] = monoSamples[i] * _localAudioAttenuationFactor;
                stereoSamples[(i * NUM_CHANNELS_OUTPUT) + 1] = monoSamples[i] * _localAudioAttenuationFactor;
            }
            _localAudioDelays.clear();
            _localEchoesSuppressed.clear();
            echoAudio(LOCAL_AUDIO, sampleTime, stereoInputData, outputFormat);
            _localEchoesCount = _localAudioDelays.size();
            _localEchoesSuppressedCount = _localEchoesSuppressed.size();
        }
    }
}
예제 #10
0
AudioStreamBasicDescription toAudioStreamBasicDescription(QAudioFormat const& audioFormat)
{
    AudioStreamBasicDescription sf;

    sf.mFormatFlags         = kAudioFormatFlagIsPacked;
    sf.mSampleRate          = audioFormat.sampleRate();
    sf.mFramesPerPacket     = 1;
    sf.mChannelsPerFrame    = audioFormat.channelCount();
    sf.mBitsPerChannel      = audioFormat.sampleSize();
    sf.mBytesPerFrame       = sf.mChannelsPerFrame * (sf.mBitsPerChannel / 8);
    sf.mBytesPerPacket      = sf.mFramesPerPacket * sf.mBytesPerFrame;
    sf.mFormatID            = kAudioFormatLinearPCM;

    switch (audioFormat.sampleType()) {
    case QAudioFormat::SignedInt:
        sf.mFormatFlags |= kAudioFormatFlagIsSignedInteger;
        break;
    case QAudioFormat::UnSignedInt: /* default */
        break;
    case QAudioFormat::Float:
        sf.mFormatFlags |= kAudioFormatFlagIsFloat;
        break;
    case QAudioFormat::Unknown:
    default:
        break;
    }

    if (audioFormat.byteOrder() == QAudioFormat::BigEndian)
        sf.mFormatFlags |= kAudioFormatFlagIsBigEndian;

    return sf;
}
예제 #11
0
/* channelCount() API property test. */
void tst_QAudioFormat::checkChannelCount()
{
    // channels is the old name for channelCount, so
    // they should always be equal
    QAudioFormat audioFormat;
    QVERIFY(audioFormat.channelCount() == -1);
    QVERIFY(audioFormat.channels() == -1);

    audioFormat.setChannelCount(123);
    QVERIFY(audioFormat.channelCount() == 123);
    QVERIFY(audioFormat.channels() == 123);

    audioFormat.setChannels(5);
    QVERIFY(audioFormat.channelCount() == 5);
    QVERIFY(audioFormat.channels() == 5);
}
예제 #12
0
pa_sample_spec audioFormatToSampleSpec(const QAudioFormat &format)
{
    pa_sample_spec  spec;

    spec.rate = format.sampleRate();
    spec.channels = format.channelCount();

    if (format.sampleSize() == 8) {
        spec.format = PA_SAMPLE_U8;
    } else if (format.sampleSize() == 16) {
        switch (format.byteOrder()) {
        case QAudioFormat::BigEndian: spec.format = PA_SAMPLE_S16BE; break;
        case QAudioFormat::LittleEndian: spec.format = PA_SAMPLE_S16LE; break;
        }
    } else if (format.sampleSize() == 24) {
        switch (format.byteOrder()) {
        case QAudioFormat::BigEndian: spec.format = PA_SAMPLE_S24BE; break;
        case QAudioFormat::LittleEndian: spec.format = PA_SAMPLE_S24LE; break;
        }
    } else if (format.sampleSize() == 32) {
        switch (format.byteOrder()) {
        case QAudioFormat::BigEndian:
            format.sampleType() == QAudioFormat::Float ? spec.format = PA_SAMPLE_FLOAT32BE : spec.format = PA_SAMPLE_S32BE;
            break;
        case QAudioFormat::LittleEndian:
            format.sampleType() == QAudioFormat::Float ? spec.format = PA_SAMPLE_FLOAT32LE : spec.format = PA_SAMPLE_S32LE;
            break;
        }
    } else {
        spec.format = PA_SAMPLE_INVALID;
    }

    return spec;
}
예제 #13
0
void xmppClient::slotAudioModeChanged(QIODevice::OpenMode mode)
{
    QXmppCall *call = qobject_cast<QXmppCall*>(sender());
    Q_ASSERT(call);
    QXmppRtpAudioChannel *channel = call->audioChannel();

    // prepare audio format
    QAudioFormat format;
    format.setSampleRate(channel->payloadType().clockrate());
    format.setChannelCount(channel->payloadType().channels());
    format.setSampleSize(16);
    format.setCodec("audio/pcm");
    format.setByteOrder(QAudioFormat::LittleEndian);
    format.setSampleType(QAudioFormat::SignedInt);

    // the size in bytes of the audio buffers to/from sound devices
    // 160 ms seems to be the minimum to work consistently on Linux/Mac/Windows
    const int bufferSize = (format.sampleRate() * format.channelCount() * (format.sampleSize() / 8) * 160) / 1000;

    if (mode & QIODevice::ReadOnly) {
        // initialise audio output
        QAudioOutput *audioOutput = new QAudioOutput(format, this);
        audioOutput->setBufferSize(bufferSize);
        audioOutput->start(channel);
    }

    if (mode & QIODevice::WriteOnly) {
        // initialise audio input
        QAudioInput *audioInput = new QAudioInput(format, this);
        audioInput->setBufferSize(bufferSize);
        audioInput->start(channel);
    }
}
예제 #14
0
SLDataFormat_PCM QOpenSLESEngine::audioFormatToSLFormatPCM(const QAudioFormat &format)
{
    SLDataFormat_PCM format_pcm;
    format_pcm.formatType = SL_DATAFORMAT_PCM;
    format_pcm.numChannels = format.channelCount();
    format_pcm.samplesPerSec = format.sampleRate() * 1000;
    format_pcm.bitsPerSample = format.sampleSize();
    format_pcm.containerSize = format.sampleSize();
    format_pcm.channelMask = (format.channelCount() == 1 ?
                                  SL_SPEAKER_FRONT_CENTER :
                                  SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT);
    format_pcm.endianness = (format.byteOrder() == QAudioFormat::LittleEndian ?
                                 SL_BYTEORDER_LITTLEENDIAN :
                                 SL_BYTEORDER_BIGENDIAN);
    return format_pcm;

}
예제 #15
0
void Generator::generateData(const QAudioFormat &format, qint64 durationUs, int frequency)
{
	const int channelBytes = format.sampleSize() / 8;
	const int sampleBytes = format.channelCount() * channelBytes;
	
    qint64 length = (format.sampleRate() * format.channelCount() * (format.sampleSize() / 8))
                        * durationUs / 100000;

    Q_ASSERT(length % sampleBytes == 0);
    Q_UNUSED(sampleBytes) // suppress warning in release builds

    m_buffer.resize(length);
    unsigned char *ptr = reinterpret_cast<unsigned char *>(m_buffer.data());    
	
	generateData(format, ptr, length);

}
예제 #16
0
void tst_QWaveDecoder::file()
{
    QFETCH(QString, file);
    QFETCH(tst_QWaveDecoder::Corruption, corruption);
    QFETCH(int, channels);
    QFETCH(int, samplesize);
    QFETCH(int, samplerate);
    QFETCH(QAudioFormat::Endian, byteorder);

    QFile stream;
    stream.setFileName(file);
    stream.open(QIODevice::ReadOnly);

    QVERIFY(stream.isOpen());

    QWaveDecoder waveDecoder(&stream);
    QSignalSpy validFormatSpy(&waveDecoder, SIGNAL(formatKnown()));
    QSignalSpy parsingErrorSpy(&waveDecoder, SIGNAL(parsingError()));

    if (corruption == NotAWav) {
        QSKIP("Not all failures detected correctly yet");
        QTRY_COMPARE(parsingErrorSpy.count(), 1);
        QCOMPARE(validFormatSpy.count(), 0);
    } else if (corruption == NoSampleData) {
        QTRY_COMPARE(validFormatSpy.count(), 1);
        QCOMPARE(parsingErrorSpy.count(), 0);
        QVERIFY(waveDecoder.audioFormat().isValid());
        QVERIFY(waveDecoder.size() == 0);
        QVERIFY(waveDecoder.duration() == 0);
    } else if (corruption == FormatDescriptor) {
        QTRY_COMPARE(parsingErrorSpy.count(), 1);
        QCOMPARE(validFormatSpy.count(), 0);
    } else if (corruption == FormatString) {
        QTRY_COMPARE(parsingErrorSpy.count(), 1);
        QCOMPARE(validFormatSpy.count(), 0);
        QVERIFY(!waveDecoder.audioFormat().isValid());
    } else if (corruption == DataDescriptor) {
        QTRY_COMPARE(parsingErrorSpy.count(), 1);
        QCOMPARE(validFormatSpy.count(), 0);
        QVERIFY(waveDecoder.size() == 0);
    } else if (corruption == None) {
        QTRY_COMPARE(validFormatSpy.count(), 1);
        QCOMPARE(parsingErrorSpy.count(), 0);
        QVERIFY(waveDecoder.audioFormat().isValid());
        QVERIFY(waveDecoder.size() > 0);
        QVERIFY(waveDecoder.duration() == 250);
        QAudioFormat format = waveDecoder.audioFormat();
        QVERIFY(format.isValid());
        QVERIFY(format.channelCount() == channels);
        QVERIFY(format.sampleSize() == samplesize);
        QVERIFY(format.sampleRate() == samplerate);
        if (format.sampleSize() != 8) {
            QVERIFY(format.byteOrder() == byteorder);
        }
    }

    stream.close();
}
void SpectrumAnalyser::calculate(const QByteArray &buffer,
                         const QAudioFormat &format)
{
    // QThread::currentThread is marked 'for internal use only', but
    // we're only using it for debug output here, so it's probably OK :)
    SPECTRUMANALYSER_DEBUG << "SpectrumAnalyser::calculate"
                           << QThread::currentThread()
                           << "state" << m_state;

    if (isReady()) {
        Q_ASSERT(isPCMS16LE(format));

        const int bytesPerSample = format.sampleSize() * format.channelCount() / 8;

#ifdef DUMP_SPECTRUMANALYSER
        m_count++;
        const QString pcmFileName = m_outputDir.filePath(QString("spectrum_%1.pcm").arg(m_count, 4, 10, QChar('0')));
        QFile pcmFile(pcmFileName);
        pcmFile.open(QIODevice::WriteOnly);
        const int bufferLength = m_numSamples * bytesPerSample;
        pcmFile.write(buffer, bufferLength);

        m_textStream << "TimeDomain " << m_count << "\n";
        const qint16* input = reinterpret_cast<const qint16*>(buffer);
        for (int i=0; i<m_numSamples; ++i) {
            m_textStream << i << "\t" << *input << "\n";
            input += format.channels();
        }
#endif

        m_state = Busy;

        // Invoke SpectrumAnalyserThread::calculateSpectrum using QMetaObject.  If
        // m_thread is in a different thread from the current thread, the
        // calculation will be done in the child thread.
        // Once the calculation is finished, a calculationChanged signal will be
        // emitted by m_thread.
        const bool b = QMetaObject::invokeMethod(m_thread, "calculateSpectrum",
                                  Qt::AutoConnection,
                                  Q_ARG(QByteArray, buffer),
                                  Q_ARG(int, format.sampleRate()),
                                  Q_ARG(int, bytesPerSample));
        Q_ASSERT(b);
        Q_UNUSED(b) // suppress warnings in release builds

#ifdef DUMP_SPECTRUMANALYSER
        m_textStream << "FrequencySpectrum " << m_count << "\n";
        FrequencySpectrum::const_iterator x = m_spectrum.begin();
        for (int i=0; i<m_numSamples; ++i, ++x)
            m_textStream << i << "\t"
                         << x->frequency << "\t"
                         << x->amplitude<< "\t"
                         << x->phase << "\n";
#endif
    }
}
예제 #18
0
bool QAudioDeviceInfoInternal::isFormatSupported(const QAudioFormat& format) const
{
    QAudioDeviceInfoInternal *self = const_cast<QAudioDeviceInfoInternal*>(this);

    return format.isValid()
            && format.codec() == QString::fromLatin1("audio/pcm")
            && self->supportedSampleRates().contains(format.sampleRate())
            && self->supportedChannelCounts().contains(format.channelCount())
            && self->supportedSampleSizes().contains(format.sampleSize());
}
예제 #19
0
void TSoundOutputDeviceImp::play(const TSoundTrackP &st, TINT32 s0, TINT32 s1,
                                 bool loop, bool scrubbing) {
  if (!doSetStreamFormat(st->getFormat())) return;

  MyData *myData = new MyData();

  myData->imp              = shared_from_this();
  myData->totalPacketCount = s1 - s0;
  myData->fileByteCount    = (s1 - s0) * st->getSampleSize();
  myData->entireFileBuffer = new char[myData->fileByteCount];

  memcpy(myData->entireFileBuffer, st->getRawData() + s0 * st->getSampleSize(),
         myData->fileByteCount);

  m_isPlaying       = true;
  myData->isLooping = loop;

  QAudioFormat format;
  QAudioDeviceInfo info(QAudioDeviceInfo::defaultOutputDevice());

  format.setSampleSize(st->getBitPerSample());
  format.setCodec("audio/pcm");
  format.setChannelCount(st->getChannelCount());
  format.setByteOrder(QAudioFormat::LittleEndian);
  format.setSampleType(st->getFormat().m_signedSample
                           ? QAudioFormat::SignedInt
                           : QAudioFormat::UnSignedInt);
  format.setSampleRate(st->getSampleRate());
  QList<QAudioFormat::Endian> sbos        = info.supportedByteOrders();
  QList<int> sccs                         = info.supportedChannelCounts();
  QList<int> ssrs                         = info.supportedSampleRates();
  QList<QAudioFormat::SampleType> sstypes = info.supportedSampleTypes();
  QList<int> ssss                         = info.supportedSampleSizes();
  QStringList supCodes                    = info.supportedCodecs();
  if (!info.isFormatSupported((format))) {
    format                                 = info.nearestFormat(format);
    int newChannels                        = format.channelCount();
    int newBitsPerSample                   = format.sampleSize();
    int newSampleRate                      = format.sampleRate();
    QAudioFormat::SampleType newSampleType = format.sampleType();
    QAudioFormat::Endian newBo             = format.byteOrder();
  }
  int test = st->getSampleCount() / st->getSampleRate();
  QByteArray *data =
      new QByteArray(myData->entireFileBuffer, myData->fileByteCount);
  QBuffer *newBuffer = new QBuffer;
  newBuffer->setBuffer(data);
  newBuffer->open(QIODevice::ReadOnly);
  newBuffer->seek(0);
  if (m_audioOutput == NULL) {
    m_audioOutput = new QAudioOutput(format, NULL);
  }
  m_audioOutput->start(newBuffer);
  m_audioOutput->setVolume(m_volume);
}
QDebug operator<<(QDebug dbg, const QAudioFormat &f)
{
    dbg.nospace() << "QAudioFormat(" << f.sampleRate();
    dbg.nospace() << "Hz, " << f.sampleSize();
    dbg.nospace() << "bit, channelCount=" << f.channelCount();
    dbg.nospace() << ", sampleType=" << f.sampleType();
    dbg.nospace() << ", byteOrder=" << f.byteOrder();
    dbg.nospace() << ", codec=" << f.codec();
    dbg.nospace() << ")";

    return dbg.space();
}
예제 #21
0
파일: lockin2.cpp 프로젝트: antigol/lockin2
bool Lockin2::isFormatSupported(const QAudioFormat &format)
{
    if (format.codec() != "audio/pcm") {
        return false;
    }

    if (format.channelCount() != 2) {
        return false;
    }

    return true;
}
예제 #22
0
CircularBuffer::CircularBuffer(const QAudioFormat& format, qint64 samples, QObject* parent)
    : QIODevice(parent),
      m_format(format),
      m_bytesPerSample((format.sampleSize() / 8) * format.channelCount())
{
    m_readPtr = 0;
    m_writePtr = 0;
    m_bufferSize = 0;
    m_bufferCapacity = samples;

    m_buffer.resize(m_bufferCapacity);
    m_buffer.fill(0);
    open(QIODevice::ReadWrite);
}
예제 #23
0
void AudioFileWav::addHeader(const QAudioFormat& audioFormat) {
    QDataStream stream(&_file);

    stream.setByteOrder(QDataStream::LittleEndian);

    // RIFF
    stream.writeRawData("RIFF", 4);
    stream << quint32(0);
    stream.writeRawData("WAVE", 4);

    // Format description PCM = 16
    stream.writeRawData("fmt ", 4);
    stream << quint32(16);
    stream << quint16(1);
    stream << quint16(audioFormat.channelCount());
    stream << quint32(audioFormat.sampleRate());
    stream << quint32(audioFormat.sampleRate() * audioFormat.channelCount() * audioFormat.sampleSize() / 8); // bytes per second
    stream << quint16(audioFormat.channelCount() * audioFormat.sampleSize() / 8); // block align
    stream << quint16(audioFormat.sampleSize()); // bits Per Sample
    // Init data chunck
    stream.writeRawData("data", 4);
    stream << quint32(0);
}
예제 #24
0
/*!
    Returns the closest QAudioFormat to the supplied \a settings that the system supports.

    These settings are provided by the platform/audio plugin being used.

    They are also dependent on the \l {QAudio}::Mode being used.
*/
QAudioFormat QAudioDeviceInfo::nearestFormat(const QAudioFormat &settings) const
{
    if (isFormatSupported(settings))
        return settings;

    QAudioFormat nearest = settings;

    QList<QString> testCodecs = supportedCodecs();
    QList<int> testChannels = supportedChannelCounts();
    QList<QAudioFormat::Endian> testByteOrders = supportedByteOrders();
    QList<QAudioFormat::SampleType> testSampleTypes;
    QList<QAudioFormat::SampleType> sampleTypesAvailable = supportedSampleTypes();
    QMap<int,int> testSampleRates;
    QList<int> sampleRatesAvailable = supportedSampleRates();
    QMap<int,int> testSampleSizes;
    QList<int> sampleSizesAvailable = supportedSampleSizes();

    // Get sorted lists for checking
    if (testCodecs.contains(settings.codec())) {
        testCodecs.removeAll(settings.codec());
        testCodecs.insert(0, settings.codec());
    }
    testChannels.removeAll(settings.channelCount());
    testChannels.insert(0, settings.channelCount());
    testByteOrders.removeAll(settings.byteOrder());
    testByteOrders.insert(0, settings.byteOrder());

    if (sampleTypesAvailable.contains(settings.sampleType()))
        testSampleTypes.append(settings.sampleType());
    if (sampleTypesAvailable.contains(QAudioFormat::SignedInt))
        testSampleTypes.append(QAudioFormat::SignedInt);
    if (sampleTypesAvailable.contains(QAudioFormat::UnSignedInt))
        testSampleTypes.append(QAudioFormat::UnSignedInt);
    if (sampleTypesAvailable.contains(QAudioFormat::Float))
        testSampleTypes.append(QAudioFormat::Float);

    if (sampleSizesAvailable.contains(settings.sampleSize()))
        testSampleSizes.insert(0,settings.sampleSize());
    sampleSizesAvailable.removeAll(settings.sampleSize());
    foreach (int size, sampleSizesAvailable) {
        int larger  = (size > settings.sampleSize()) ? size : settings.sampleSize();
        int smaller = (size > settings.sampleSize()) ? settings.sampleSize() : size;
        bool isMultiple = ( 0 == (larger % smaller));
        int diff = larger - smaller;
        testSampleSizes.insert((isMultiple ? diff : diff+100000), size);
    }
예제 #25
0
qint64 AudioOutput::currentSerialPlayedUsec()
{
    if(serial != aw->serial){
        this->reset();
        //debug
        audioDevice = this->start();
        serial = aw->serial;
        return 0;
    }
    qint64 currentSerialProcessedUsec = this->processedUSecs();
    int bytesInBuffer = this->bufferSize() - this->bytesFree();
    QAudioFormat format = this->format();
    int sampleSizes = format.sampleSize();
    int channels = format.channelCount();
    int sampleRates = format.sampleRate();
    qint64 playedUsec = currentSerialProcessedUsec - (qint64)(1000000) * 8 * bytesInBuffer/sampleSizes/channels/sampleRates;
    return playedUsec;
}
예제 #26
0
void SoundEngine::sweepInputFile(const qint64 &audioLength, const QAudioFormat &format)
{
    emit reset();

    int channelCount=format.channelCount();
    QByteArray startBuffer(4410*channelCount,0);
    m_backingTrack->m_inputFile->seek(m_backingTrack->m_inputFile->headerLength());

        while(!m_backingTrack->m_inputFile->atEnd())
        {
            m_backingTrack->m_inputFile->read(startBuffer.data(),3528*channelCount);
            m_backingTrack->calculateLevel(startBuffer,3528*channelCount);
            emit drawingBackEnabled(m_backingTrack->m_level);
        }

    emit timeLinePosition(0);

}
예제 #27
0
void Generator::generateData(const QAudioFormat& format, unsigned char* ptr, qint64 length)
{
	const int channelBytes = format.sampleSize() / 8;
	if (length <= 0)
		return;
    QMutexLocker lock(m_mutex);

	QVector<qreal> channels;
    channels.resize(2);
	
//	LOG_INFO() << "generating Sounds" << QDateTime::currentDateTime() << length;
	initializeSounds();
	
    while (length) {
        generateTone(channels[0], channels[1], m_sampleIndex);
        //const qreal x = soundFunc(2 * M_PI * frequency * qreal(sampleIndex  ) / format.sampleRate());
        for (int i=0; i<format.channelCount(); ++i) {
            if (format.sampleSize() == 8 && format.sampleType() == QAudioFormat::UnSignedInt) {
                const quint8 value = static_cast<quint8>((1.0 + channels[i]) / 2 * 255);
                *reinterpret_cast<quint8*>(ptr) = value;
            } else if (format.sampleSize() == 8 && format.sampleType() == QAudioFormat::SignedInt) {
                const qint8 value = static_cast<qint8>(channels[i] * 127);
                *reinterpret_cast<quint8*>(ptr) = value;
            } else if (format.sampleSize() == 16 && format.sampleType() == QAudioFormat::UnSignedInt) {
                quint16 value = static_cast<quint16>((1.0 + channels[i]) / 2 * 65535);
                if (format.byteOrder() == QAudioFormat::LittleEndian)
                    qToLittleEndian<quint16>(value, ptr);
                else
                    qToBigEndian<quint16>(value, ptr);
            } else if (format.sampleSize() == 16 && format.sampleType() == QAudioFormat::SignedInt) {
                qint16 value = static_cast<qint16>(channels[i] * 32767);
                if (format.byteOrder() == QAudioFormat::LittleEndian)
                    qToLittleEndian<qint16>(value, ptr);
                else
                    qToBigEndian<qint16>(value, ptr);
            }

            ptr += channelBytes;
            length -= channelBytes;
        }
        ++m_sampleIndex;
    }
	
}
예제 #28
0
void TaudioOUT::createOutputDevice() {
  m_deviceInfo = QAudioDeviceInfo::defaultOutputDevice();
  QList<QAudioDeviceInfo> devList = QAudioDeviceInfo::availableDevices(QAudio::AudioOutput);
  for (int i = 0; i < devList.size(); ++i) { // find device with name or keep default one
    if (devList[i].deviceName() == m_audioParams->OUTdevName) {
      m_deviceInfo = devList[i];
      break;
    }
  }
  m_devName = m_deviceInfo.deviceName();
  QAudioFormat format;
    format.setChannelCount(1);
    format.setSampleRate(m_sampleRate);
    format.setSampleType(QAudioFormat::SignedInt);
    format.setSampleSize(16);
    format.setCodec("audio/pcm");
    format.setByteOrder(QAudioFormat::LittleEndian);
  if (!m_deviceInfo.isFormatSupported(format)) {
    qDebug() << "Output Format 44100/16 stereo is not supported";
    format = m_deviceInfo.nearestFormat(format);
    qDebug() << "Format is" << format.sampleRate() << format.channelCount() << format.sampleSize();
  }
  m_sampleRate = format.sampleRate();

  if (m_audioOUT) {
    delete m_audioOUT;
    delete m_buffer;
  }
  m_audioOUT = new QAudioOutput(m_deviceInfo, format, this);
  m_audioOUT->setBufferSize(m_bufferFrames * 2);

  m_buffer = new TaudioBuffer(this);
  m_buffer->open(QIODevice::ReadOnly);
  m_buffer->setBufferSize(m_audioOUT->bufferSize());

  m_maxSamples = m_sampleRate * 1.5;

  qDebug() << "OUT:" << m_deviceInfo.deviceName() << m_audioOUT->format().sampleRate() << m_maxSamples;

  connect(m_buffer, &TaudioBuffer::feedAudio, this, &TaudioOUT::outCallBack, Qt::DirectConnection);
  connect(m_audioOUT, &QAudioOutput::stateChanged, this, &TaudioOUT::stateChangedSlot);  
}
void SpectrumAnalyser::calculate(const QByteArray &buffer,
                         const QAudioFormat &format)
{
    // QThread::currentThread is marked 'for internal use only', but
    // we're only using it for debug output here, so it's probably OK :)
    SPECTRUMANALYSER_DEBUG << "SpectrumAnalyser::calculate"
                           << QThread::currentThread()
                           << "state" << m_state;

	SPECTRUMANALYSER_DEBUG << "buffer size =" << buffer.size();

    if (isReady()) {
        Q_ASSERT(isPCMS16LE(format));

        const int bytesPerSample = format.sampleSize() * format.channelCount() / 8;

        m_state = Busy;

        // Invoke SpectrumAnalyserThread::calculateSpectrum using QMetaObject.  If
        // m_thread is in a different thread from the current thread, the
        // calculation will be done in the child thread.
        // Once the calculation is finished, a calculationChanged signal will be
        // emitted by m_thread.
        const bool b = QMetaObject::invokeMethod(m_thread, "calculateSpectrum",
                                  Qt::AutoConnection,
                                  Q_ARG(QByteArray, buffer),
                                  Q_ARG(int, format.sampleRate()),
                                  Q_ARG(int, bytesPerSample));
        Q_ASSERT(b);
        Q_UNUSED(b) // suppress warnings in release builds

#ifdef DUMP_SPECTRUMANALYSER
        m_textStream << "FrequencySpectrum " << m_count << "\n";
        FrequencySpectrum::const_iterator x = m_spectrum.begin();
        for (int i=0; i<m_numSamples; ++i, ++x)
            m_textStream << i << "\t"
                         << x->frequency << "\t"
                         << x->amplitude<< "\t"
                         << x->phase << "\n";
#endif
    }
}
예제 #30
0
TaudioIN::TaudioIN(QObject *parent) :
  QObject(parent),
  m_inDevice(0),
  m_buffer(0),
  m_pitchFinder(0)
{
  QList<QAudioDeviceInfo> devList = QAudioDeviceInfo::availableDevices(QAudio::AudioInput);
  for (int i = 0; i < devList.count(); ++i)
    qDebug() << i << devList[i].deviceName();
  
  QAudioDeviceInfo defaultIn = QAudioDeviceInfo::defaultInputDevice();
  QAudioFormat format;
    format.setChannelCount(1);
    format.setSampleRate(48000);
    format.setSampleType(QAudioFormat::SignedInt);
    format.setSampleSize(16);
    format.setCodec("audio/pcm");
    format.setByteOrder(QAudioFormat::LittleEndian);
  if (!defaultIn.isFormatSupported(format)) {
    qDebug() << "Format 48000/16 mono is not suported";
    format = defaultIn.nearestFormat(format);
    qDebug() << "Format is" << format.sampleRate() << format.channelCount() << format.sampleSize();
  }
  
  m_lastNote = new Tnote();
  
  m_audioIN = new QAudioInput(defaultIn, format, this);
  m_audioIN->setBufferSize(2048);
  m_pitchFinder = new TpitchFinder();
  m_pitchFinder->setMinimalDuration(0.1f);
  m_pitchFinder->setSplitByVolChange(false);
  m_pitchFinder->setSplitVolume(0.0);
  m_pitchFinder->setSkipStillerVal(0.0);
  m_pitchFinder->aGl()->equalLoudness = true;
  m_pitchFinder->setSampleRate(m_audioIN->format().sampleRate()); // framesPerChunk is determined here
  connect(m_pitchFinder, &TpitchFinder::pitchInChunk, this, &TaudioIN::pitchDetected);
  connect(m_pitchFinder, &TpitchFinder::noteStarted, this, &TaudioIN::noteStartedSlot);
  connect(m_pitchFinder, &TpitchFinder::noteFinished, this, &TaudioIN::noteFinishedSlot);
}