Exemple #1
0
// returns the audio level for each channel
QVector<qreal> getBufferLevels(const QAudioBuffer& buffer)
{
    QVector<qreal> values;

    if (!buffer.format().isValid() || buffer.format().byteOrder() != QAudioFormat::LittleEndian)
        return values;

    if (buffer.format().codec() != "audio/pcm")
        return values;

    int channelCount = buffer.format().channelCount();
    values.fill(0, channelCount);
    qreal peak_value = getPeakValue(buffer.format());
    if (qFuzzyCompare(peak_value, qreal(0)))
        return values;

    switch (buffer.format().sampleType()) {
    case QAudioFormat::Unknown:
    case QAudioFormat::UnSignedInt:
        if (buffer.format().sampleSize() == 32)
            values = getBufferLevels(buffer.constData<quint32>(), buffer.frameCount(), channelCount);
        if (buffer.format().sampleSize() == 16)
            values = getBufferLevels(buffer.constData<quint16>(), buffer.frameCount(), channelCount);
        if (buffer.format().sampleSize() == 8)
            values = getBufferLevels(buffer.constData<quint8>(), buffer.frameCount(), channelCount);
        for (int i = 0; i < values.size(); ++i)
            values[i] = qAbs(values.at(i) - peak_value / 2) / (peak_value / 2);
        break;
    case QAudioFormat::Float:
        if (buffer.format().sampleSize() == 32) {
            values = getBufferLevels(buffer.constData<float>(), buffer.frameCount(), channelCount);
            for (int i = 0; i < values.size(); ++i)
                values[i] /= peak_value;
        }
        break;
    case QAudioFormat::SignedInt:
        if (buffer.format().sampleSize() == 32)
            values = getBufferLevels(buffer.constData<qint32>(), buffer.frameCount(), channelCount);
        if (buffer.format().sampleSize() == 16)
            values = getBufferLevels(buffer.constData<qint16>(), buffer.frameCount(), channelCount);
        if (buffer.format().sampleSize() == 8)
            values = getBufferLevels(buffer.constData<qint8>(), buffer.frameCount(), channelCount);
        for (int i = 0; i < values.size(); ++i)
            values[i] /= peak_value;
        break;
    }

    return values;
}
// process audio buffer for fft calculations
void MainWindow::processBuffer(QAudioBuffer buffer){
  qreal peakValue;
  int duration;

  if(buffer.frameCount() < 512)
    return;

  // return left and right audio mean levels
  levelLeft = levelRight = 0;
  // It only knows how to process stereo audio frames
  // mono frames = :P
  if(buffer.format().channelCount() != 2)
    return;

  sample.resize(buffer.frameCount());
  // audio is signed int
  if(buffer.format().sampleType() == QAudioFormat::SignedInt){
    QAudioBuffer::S16S *data = buffer.data<QAudioBuffer::S16S>();
    // peak value changes according to sample size.
    if (buffer.format().sampleSize() == 32)
      peakValue=INT_MAX;
    else if (buffer.format().sampleSize() == 16)
      peakValue=SHRT_MAX;
    else
      peakValue=CHAR_MAX;

    // scale everything to [0,1]
    for(int i=0; i<buffer.frameCount(); i++){
      // for visualization purposes, we only need one of the
      // left/right channels
      sample[i] = data[i].left/peakValue;
      levelLeft+= abs(data[i].left)/peakValue;
      levelRight+= abs(data[i].right)/peakValue;
    }
  }

  // audio is unsigned int
  else if(buffer.format().sampleType() == QAudioFormat::UnSignedInt){
    QAudioBuffer::S16U *data = buffer.data<QAudioBuffer::S16U>();
    if (buffer.format().sampleSize() == 32)
      peakValue=UINT_MAX;
    else if (buffer.format().sampleSize() == 16)
      peakValue=USHRT_MAX;
    else
      peakValue=UCHAR_MAX;
    for(int i=0; i<buffer.frameCount(); i++){
      sample[i] = data[i].left/peakValue;
      levelLeft+= abs(data[i].left)/peakValue;
      levelRight+= abs(data[i].right)/peakValue;
    }
  }

  // audio is float type
  else if(buffer.format().sampleType() == QAudioFormat::Float){
    QAudioBuffer::S32F *data = buffer.data<QAudioBuffer::S32F>();
    peakValue = 1.00003;
    for(int i=0; i<buffer.frameCount(); i++){
      sample[i] = data[i].left/peakValue;
      // test if sample[i] is infinity (it works)
      // some tests produced infinity values :p
      if(sample[i] != sample[i]){
        sample[i] = 0;
      }
      else{
        levelLeft+= abs(data[i].left)/peakValue;
        levelRight+= abs(data[i].right)/peakValue;
      }
    }
  }
  // if the probe is listening to the audio
  // do fft calculations
  // when it is done, calculator will tell us
  if(probe->isActive()){
    duration = buffer.format().durationForBytes(buffer.frameCount())/1000;
    //qDebug() << "duracao =" << duration;
    calculator->calc(sample, duration);
  }
  // tells anyone interested about left and right mean levels
  emit levels(levelLeft/buffer.frameCount(),levelRight/buffer.frameCount());
}
void AudioBuffer::init(QAudioBuffer &qtbuffer){
    qDebug() << "void AudioBuffer::init(...) called";
    QAudioFormat audioFormat = qtbuffer.format();
    this->hzFreq = audioFormat.sampleRate();
    this->durationInMs = qtbuffer.duration()*1000;
    QAudioFormat::SampleType sampleType
            = audioFormat.sampleType();
    int frameCount = qtbuffer.frameCount();
    int nChannels = audioFormat.channelCount();
    int bytesPerFrame = audioFormat.bytesPerFrame();
    int bytesPerValue = bytesPerFrame/nChannels;
    void *firstData = qtbuffer.data();
    this->bufferSize = frameCount;
    this->buffer = QSharedPointer<SharedBuffer>(
                new SharedBuffer);
    this->buffer->buffer = new int[this->bufferSize];
    this->mean = 0;
    const int tempReducFactor = 500;
    qint64 meanOfSquare = 0;
    for(int i=0; i<frameCount; i++){
        int currentValue = 0;
        for(int j=0; j<nChannels; j++){
            int currentPos
                    = i*bytesPerFrame
                    + j*bytesPerValue;
            void *valPos = (void *)(((quint8 *)firstData) + currentPos);
            if(sampleType == QAudioFormat::SignedInt){
                if(bytesPerValue == 1){
                    quint8 val = *((quint8*)valPos);
                    currentValue += val;
                }else if(bytesPerValue == 2){
                    quint16 val = *((quint16*)valPos);
                    currentValue += val;
                }else if(bytesPerValue == 4){
                    quint32 val = *((quint32*)valPos);
                    currentValue += val;
                }else if(bytesPerValue == 8){
                    quint64 val = *((quint64*)valPos);
                    currentValue += val;
                }
            }else if(sampleType == QAudioFormat::UnSignedInt){
                if(bytesPerValue == 1){
                    qint8 val = *((qint8*)valPos);
                    currentValue += val;
                }else if(bytesPerValue == 2){
                    qint16 val = *((qint16*)valPos);
                    currentValue += val;
                }else if(bytesPerValue == 4){
                    qint32 val = *((qint32*)valPos);
                    currentValue += val;
                }else if(bytesPerValue == 8){
                    qint64 val = *((qint64*)valPos);
                    currentValue += val;
                }
            }else if(sampleType == QAudioFormat::Float){
                qreal val = *((qreal*)valPos);
                currentValue += val;
            }
        }
        currentValue /= nChannels;
        currentValue /= tempReducFactor;
        this->buffer->buffer[i] = currentValue;
        this->mean += currentValue;
        meanOfSquare += currentValue*currentValue;
        if(meanOfSquare < 0){
            Q_ASSERT(false);
        }
    }
    this->mean /= frameCount;
    meanOfSquare /= frameCount;
    qint64 squaredMean =
            this->mean
            * this->mean;
    this->var = meanOfSquare
            - squaredMean;
    this->var *= tempReducFactor * tempReducFactor;
    this->mean *= tempReducFactor;
    this->sd = qSqrt(this->var);
    qDebug() << "void AudioBuffer::init(...) end";
}