Esempio n. 1
0
SoundSource::OpenResult SoundSourceMp3::tryOpen(const AudioSourceConfig& /*audioSrcCfg*/) {
    DEBUG_ASSERT(!hasChannelCount());
    DEBUG_ASSERT(!hasSamplingRate());

    DEBUG_ASSERT(!m_file.isOpen());
    if (!m_file.open(QIODevice::ReadOnly)) {
        qWarning() << "Failed to open file:" << m_file.fileName();
        return OpenResult::FAILED;
    }

    // Get a pointer to the file using memory mapped IO
    m_fileSize = m_file.size();
    m_pFileData = m_file.map(0, m_fileSize);
    // NOTE(uklotzde): If the file disappears unexpectedly while mapped
    // a SIGBUS error might occur that is not handled and will terminate
    // Mixxx immediately. This behavior is documented in the manpage of
    // mmap(). It has already appeared due to hardware errors and is
    // described in the following bug report:
    // https://bugs.launchpad.net/mixxx/+bug/1452005

    // Transfer it to the mad stream-buffer:
    mad_stream_options(&m_madStream, MAD_OPTION_IGNORECRC);
    mad_stream_buffer(&m_madStream, m_pFileData, m_fileSize);
    DEBUG_ASSERT(m_pFileData == m_madStream.this_frame);

    DEBUG_ASSERT(m_seekFrameList.empty());
    m_avgSeekFrameCount = 0;
    m_curFrameIndex = getMinFrameIndex();
    int headerPerSamplingRate[kSamplingRateCount];
    for (int i = 0; i < kSamplingRateCount; ++i) {
        headerPerSamplingRate[i] = 0;
    }

    // Decode all the headers and calculate audio properties

    unsigned long sumBitrate = 0;

    mad_header madHeader;
    mad_header_init(&madHeader);

    SINT maxChannelCount = getChannelCount();
    do {
        if (!decodeFrameHeader(&madHeader, &m_madStream, true)) {
            if (isStreamValid(m_madStream)) {
                // Skip frame
                continue;
            } else {
                // Abort decoding
                break;
            }
        }

        // Grab data from madHeader
        const unsigned int madSampleRate = madHeader.samplerate;

        // TODO(XXX): Replace DEBUG_ASSERT with static_assert
        // MAD must not change its enum values!
        DEBUG_ASSERT(MAD_UNITS_8000_HZ == 8000);
        const mad_units madUnits = static_cast<mad_units>(madSampleRate);

        const long madFrameLength = mad_timer_count(madHeader.duration, madUnits);
        if (0 >= madFrameLength) {
            qWarning() << "Skipping MP3 frame with invalid length"
                    << madFrameLength
                    << "in:" << m_file.fileName();
            // Skip frame
            continue;
        }

        const SINT madChannelCount = MAD_NCHANNELS(&madHeader);
        if (isValidChannelCount(maxChannelCount) && (madChannelCount != maxChannelCount)) {
            qWarning() << "Differing number of channels"
                    << madChannelCount << "<>" << maxChannelCount
                    << "in some MP3 frame headers:"
                    << m_file.fileName();
        }
        maxChannelCount = math_max(madChannelCount, maxChannelCount);

        const int samplingRateIndex = getIndexBySamplingRate(madSampleRate);
        if (samplingRateIndex >= kSamplingRateCount) {
            qWarning() << "Invalid sample rate:" << m_file.fileName()
                    << madSampleRate;
            // Abort
            mad_header_finish(&madHeader);
            return OpenResult::FAILED;
        }
        // Count valid frames separated by its sampling rate
        headerPerSamplingRate[samplingRateIndex]++;

        addSeekFrame(m_curFrameIndex, m_madStream.this_frame);

        // Accumulate data from the header
        sumBitrate += madHeader.bitrate;

        // Update current stream position
        m_curFrameIndex += madFrameLength;

        DEBUG_ASSERT(m_madStream.this_frame);
        DEBUG_ASSERT(0 <= (m_madStream.this_frame - m_pFileData));
    } while (quint64(m_madStream.this_frame - m_pFileData) < m_fileSize);

    mad_header_finish(&madHeader);

    if (MAD_ERROR_NONE != m_madStream.error) {
        // Unreachable code for recoverable errors
        DEBUG_ASSERT(!MAD_RECOVERABLE(m_madStream.error));
        if (MAD_ERROR_BUFLEN != m_madStream.error) {
            qWarning() << "Unrecoverable MP3 header error:"
                    << mad_stream_errorstr(&m_madStream);
            // Abort
            return OpenResult::FAILED;
        }
    }

    if (m_seekFrameList.empty()) {
        // This is not a working MP3 file.
        qWarning() << "SSMP3: This is not a working MP3 file:"
                << m_file.fileName();
        // Abort
        return OpenResult::FAILED;
    }

    int mostCommonSamplingRateIndex = kSamplingRateCount; // invalid
    int mostCommonSamplingRateCount = 0;
    int differentRates = 0;
    for (int i = 0; i < kSamplingRateCount; ++i) {
        // Find most common sampling rate
        if (mostCommonSamplingRateCount < headerPerSamplingRate[i]) {
            mostCommonSamplingRateCount = headerPerSamplingRate[i];
            mostCommonSamplingRateIndex = i;
            differentRates++;
        }
    }

    if (differentRates > 1) {
        qWarning() << "Differing sampling rate in some headers:"
                   << m_file.fileName();
        for (int i = 0; i < kSamplingRateCount; ++i) {
            if (0 < headerPerSamplingRate[i]) {
                qWarning() << headerPerSamplingRate[i] << "MP3 headers with sampling rate" << getSamplingRateByIndex(i);
            }
        }

        qWarning() << "MP3 files with varying sample rate are not supported!";
        qWarning() << "Since this happens most likely due to a corrupt file";
        qWarning() << "Mixxx tries to plays it with the most common sample rate for this file";
    }

    if (mostCommonSamplingRateIndex < kSamplingRateCount) {
        setSamplingRate(getSamplingRateByIndex(mostCommonSamplingRateIndex));
    } else {
        qWarning() << "No single valid sampling rate in header";
        // Abort
        return OpenResult::FAILED;
    }

    // Initialize the AudioSource
    setChannelCount(maxChannelCount);
    setFrameCount(m_curFrameIndex);

    // Calculate average values
    m_avgSeekFrameCount = getFrameCount() / m_seekFrameList.size();
    const unsigned long avgBitrate = sumBitrate / m_seekFrameList.size();
    setBitrate(avgBitrate / 1000);

    // Terminate m_seekFrameList
    addSeekFrame(m_curFrameIndex, 0);

    // Reset positions
    m_curFrameIndex = getMinFrameIndex();

    // Restart decoding at the beginning of the audio stream
    m_curFrameIndex = restartDecoding(m_seekFrameList.front());
    if (m_curFrameIndex != m_seekFrameList.front().frameIndex) {
        qWarning() << "Failed to start decoding:" << m_file.fileName();
        // Abort
        return OpenResult::FAILED;
    }

    return OpenResult::SUCCEEDED;
}
Esempio n. 2
0
Result SoundSourceFFmpeg::tryOpen(const AudioSourceConfig& /*audioSrcCfg*/) {
    unsigned int i;
    AVDictionary *l_iFormatOpts = NULL;

    const QByteArray qBAFilename(getLocalFileNameBytes());
    qDebug() << "New SoundSourceFFmpeg :" << qBAFilename;

    DEBUG_ASSERT(!m_pFormatCtx);
    m_pFormatCtx = avformat_alloc_context();

#if LIBAVCODEC_VERSION_INT < 3622144
    m_pFormatCtx->max_analyze_duration = 999999999;
#endif

    // Open file and make m_pFormatCtx
    if (avformat_open_input(&m_pFormatCtx, qBAFilename.constData(), NULL,
                            &l_iFormatOpts)!=0) {
        qDebug() << "av_open_input_file: cannot open" << qBAFilename;
        return ERR;
    }

#if LIBAVCODEC_VERSION_INT > 3544932
    av_dict_free(&l_iFormatOpts);
#endif

    // Retrieve stream information
    if (avformat_find_stream_info(m_pFormatCtx, NULL)<0) {
        qDebug() << "av_find_stream_info: cannot open" << qBAFilename;
        return ERR;
    }

    //debug only (Enable if needed)
    //av_dump_format(m_pFormatCtx, 0, qBAFilename.constData(), false);

    // Find the first audio stream
    m_iAudioStream=-1;

    for (i=0; i<m_pFormatCtx->nb_streams; i++)
        if (m_pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
            m_iAudioStream=i;
            break;
        }
    if (m_iAudioStream == -1) {
        qDebug() << "ffmpeg: cannot find an audio stream: cannot open"
                 << qBAFilename;
        return ERR;
    }

    // Get a pointer to the codec context for the audio stream
    m_pCodecCtx=m_pFormatCtx->streams[m_iAudioStream]->codec;

    // Find the decoder for the audio stream
    if (!(m_pCodec=avcodec_find_decoder(m_pCodecCtx->codec_id))) {
        qDebug() << "ffmpeg: cannot find a decoder for" << qBAFilename;
        return ERR;
    }

    if (avcodec_open2(m_pCodecCtx, m_pCodec, NULL)<0) {
        qDebug() << "ffmpeg:  cannot open" << qBAFilename;
        return ERR;
    }

    m_pResample = new EncoderFfmpegResample(m_pCodecCtx);
    m_pResample->open(m_pCodecCtx->sample_fmt, AV_SAMPLE_FMT_FLT);

    setChannelCount(m_pCodecCtx->channels);
    setFrameRate(m_pCodecCtx->sample_rate);
    setFrameCount((m_pFormatCtx->duration * m_pCodecCtx->sample_rate) / AV_TIME_BASE);

    qDebug() << "ffmpeg: Samplerate: " << getFrameRate() << ", Channels: " <<
             getChannelCount() << "\n";
    if (getChannelCount() > 2) {
        qDebug() << "ffmpeg: No support for more than 2 channels!";
        return ERR;
    }

    return OK;
}
Esempio n. 3
0
SoundSource::OpenResult SoundSourceFFmpeg::tryOpen(const AudioSourceConfig& /*audioSrcCfg*/) {
    AVDictionary *l_iFormatOpts = nullptr;

    const QString localFileName(getLocalFileName());
    qDebug() << "New SoundSourceFFmpeg :" << localFileName;

    DEBUG_ASSERT(!m_pFormatCtx);
    m_pFormatCtx = avformat_alloc_context();

    if (m_pFormatCtx == nullptr) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: Can't allocate memory";
        return OpenResult::FAILED;
    }

    // TODO() why is this required, should't it be a runtime check
#if LIBAVCODEC_VERSION_INT < 3622144 // 55.69.0
    m_pFormatCtx->max_analyze_duration = 999999999;
#endif

    // libav replaces open() with ff_win32_open() which accepts a
    // Utf8 path
    // see: avformat/os_support.h
    // The old method defining an URL_PROTOCOL is deprecated
#if defined(_WIN32) && !defined(__MINGW32CE__)
    const QByteArray qBAFilename(
            avformat_version() >= ((52<<16)+(0<<8)+0) ?
            getLocalFileName().toUtf8() :
            getLocalFileName().toLocal8Bit());
#else
    const QByteArray qBAFilename(getLocalFileName().toLocal8Bit());
#endif

    // Open file and make m_pFormatCtx
    if (avformat_open_input(&m_pFormatCtx, qBAFilename.constData(), nullptr,
                            &l_iFormatOpts) != 0) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName;
        return OpenResult::FAILED;
    }

    // TODO() why is this required, should't it be a runtime check
#if LIBAVCODEC_VERSION_INT > 3544932 // 54.23.100
    av_dict_free(&l_iFormatOpts);
#endif

    // Retrieve stream information
    if (avformat_find_stream_info(m_pFormatCtx, nullptr) < 0) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName;
        return OpenResult::FAILED;
    }

    //debug only (Enable if needed)
    //av_dump_format(m_pFormatCtx, 0, qBAFilename.constData(), false);

    // Find the first audio stream
    m_iAudioStream = -1;

    for (unsigned int i = 0; i < m_pFormatCtx->nb_streams; i++)
        if (m_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            m_iAudioStream = i;
            break;
        }

    if (m_iAudioStream == -1) {
        qDebug() <<
                 "SoundSourceFFmpeg::tryOpen: cannot find an audio stream: cannot open"
                 << localFileName;
        return OpenResult::FAILED;
    }

    // Get a pointer to the codec context for the audio stream
    m_pCodecCtx = m_pFormatCtx->streams[m_iAudioStream]->codec;

    // Find the decoder for the audio stream
    if (!(m_pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id))) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: cannot find a decoder for" <<
                localFileName;
        return OpenResult::UNSUPPORTED_FORMAT;
    }

    if (avcodec_open2(m_pCodecCtx, m_pCodec, nullptr)<0) {
        qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName;
        return OpenResult::FAILED;
    }

    m_pResample = std::make_unique<EncoderFfmpegResample>(m_pCodecCtx);
    m_pResample->openMixxx(m_pCodecCtx->sample_fmt, AV_SAMPLE_FMT_FLT);

    setChannelCount(m_pCodecCtx->channels);
    setSamplingRate(m_pCodecCtx->sample_rate);
    setFrameCount((qint64)round((double)((double)m_pFormatCtx->duration *
                                         (double)m_pCodecCtx->sample_rate) / (double)AV_TIME_BASE));

    qDebug() << "SoundSourceFFmpeg::tryOpen: Sampling rate: " << getSamplingRate() <<
             ", Channels: " <<
             getChannelCount() << "\n";
    if (getChannelCount() > 2) {
        qDebug() << "ffmpeg: No support for more than 2 channels!";
        return OpenResult::FAILED;
    }

    return OpenResult::SUCCEEDED;
}
Esempio n. 4
0
TSoundTrackFormat TSoundTrack::getFormat() const {
  return TSoundTrackFormat(getSampleRate(), getBitPerSample(),
                           getChannelCount(), isSampleSigned());
}
Esempio n. 5
0
SINT SoundSourceM4A::readSampleFrames(
        SINT numberOfFrames, CSAMPLE* sampleBuffer) {
    DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));

    const SINT numberOfFramesTotal = math_min(
            numberOfFrames, getMaxFrameIndex() - m_curFrameIndex);
    const SINT numberOfSamplesTotal = frames2samples(numberOfFramesTotal);

    CSAMPLE* pSampleBuffer = sampleBuffer;
    SINT numberOfSamplesRemaining = numberOfSamplesTotal;
    while (0 < numberOfSamplesRemaining) {

        if (!m_sampleBuffer.isEmpty()) {
            // Consume previously decoded sample data
            const SampleBuffer::ReadableChunk readableChunk(
                    m_sampleBuffer.readFromHead(numberOfSamplesRemaining));
            if (pSampleBuffer) {
                SampleUtil::copy(pSampleBuffer, readableChunk.data(), readableChunk.size());
                pSampleBuffer += readableChunk.size();
            }
            m_curFrameIndex += samples2frames(readableChunk.size());
            DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
            DEBUG_ASSERT(numberOfSamplesRemaining >= readableChunk.size());
            numberOfSamplesRemaining -= readableChunk.size();
            if (0 == numberOfSamplesRemaining) {
                break; // exit loop
            }
        }
        // All previously decoded sample data has been consumed now
        DEBUG_ASSERT(m_sampleBuffer.isEmpty());

        if (0 == m_inputBufferLength) {
            // Fill input buffer from file
            if (isValidSampleBlockId(m_curSampleBlockId)) {
                // Read data for next sample block into input buffer
                u_int8_t* pInputBuffer = &m_inputBuffer[0];
                u_int32_t inputBufferLength = m_inputBuffer.size(); // in/out parameter
                if (!MP4ReadSample(m_hFile, m_trackId, m_curSampleBlockId,
                        &pInputBuffer, &inputBufferLength,
                        nullptr, nullptr, nullptr, nullptr)) {
                    qWarning()
                            << "Failed to read MP4 input data for sample block"
                            << m_curSampleBlockId << "(" << "min ="
                            << kSampleBlockIdMin << "," << "max ="
                            << m_maxSampleBlockId << ")";
                    break; // abort
                }
                ++m_curSampleBlockId;
                m_inputBufferLength = inputBufferLength;
                m_inputBufferOffset = 0;
            }
        }
        DEBUG_ASSERT(0 <= m_inputBufferLength);
        if (0 == m_inputBufferLength) {
            break; // EOF
        }

        // NOTE(uklotzde): The sample buffer for NeAACDecDecode2 has to
        // be big enough for a whole block of decoded samples, which
        // contains up to m_framesPerSampleBlock frames. Otherwise
        // we need to use a temporary buffer.
        CSAMPLE* pDecodeBuffer; // in/out parameter
        SINT decodeBufferCapacity;
        const SINT decodeBufferCapacityMin = frames2samples(m_framesPerSampleBlock);
        if (pSampleBuffer && (decodeBufferCapacityMin <= numberOfSamplesRemaining)) {
            // Decode samples directly into sampleBuffer
            pDecodeBuffer = pSampleBuffer;
            decodeBufferCapacity = numberOfSamplesRemaining;
        } else {
            // Decode next sample block into temporary buffer
            const SINT writeToTailCount = math_max(
                    numberOfSamplesRemaining, decodeBufferCapacityMin);
            const SampleBuffer::WritableChunk writableChunk(
                    m_sampleBuffer.writeToTail(writeToTailCount));
            pDecodeBuffer = writableChunk.data();
            decodeBufferCapacity = writableChunk.size();
        }
        DEBUG_ASSERT(decodeBufferCapacityMin <= decodeBufferCapacity);

        NeAACDecFrameInfo decFrameInfo;
        void* pDecodeResult = NeAACDecDecode2(
                m_hDecoder, &decFrameInfo,
                &m_inputBuffer[m_inputBufferOffset],
                m_inputBufferLength,
                reinterpret_cast<void**>(&pDecodeBuffer),
                decodeBufferCapacity * sizeof(*pDecodeBuffer));
        // Verify the decoding result
        if (0 != decFrameInfo.error) {
            qWarning() << "AAC decoding error:"
                    << decFrameInfo.error
                    << NeAACDecGetErrorMessage(decFrameInfo.error)
                    << getUrlString();
            break; // abort
        }
        DEBUG_ASSERT(pDecodeResult == pDecodeBuffer); // verify the in/out parameter

        // Verify the decoded sample data for consistency
        if (getChannelCount() != decFrameInfo.channels) {
            qWarning() << "Corrupt or unsupported AAC file:"
                    << "Unexpected number of channels" << decFrameInfo.channels
                    << "<>" << getChannelCount();
            break; // abort
        }
        if (getSamplingRate() != SINT(decFrameInfo.samplerate)) {
            qWarning() << "Corrupt or unsupported AAC file:"
                    << "Unexpected sampling rate" << decFrameInfo.samplerate
                    << "<>" << getSamplingRate();
            break; // abort
        }

        // Consume input data
        m_inputBufferLength -= decFrameInfo.bytesconsumed;
        m_inputBufferOffset += decFrameInfo.bytesconsumed;

        // Consume decoded output data
        const SINT numberOfSamplesDecoded = decFrameInfo.samples;
        DEBUG_ASSERT(numberOfSamplesDecoded <= decodeBufferCapacity);
        SINT numberOfSamplesRead;
        if (pDecodeBuffer == pSampleBuffer) {
            numberOfSamplesRead = math_min(numberOfSamplesDecoded, numberOfSamplesRemaining);
            pSampleBuffer += numberOfSamplesRead;
        } else {
            m_sampleBuffer.readFromTail(decodeBufferCapacity - numberOfSamplesDecoded);
            const SampleBuffer::ReadableChunk readableChunk(
                    m_sampleBuffer.readFromHead(numberOfSamplesRemaining));
            numberOfSamplesRead = readableChunk.size();
            if (pSampleBuffer) {
                SampleUtil::copy(pSampleBuffer, readableChunk.data(), numberOfSamplesRead);
                pSampleBuffer += numberOfSamplesRead;
            }
        }
        // The decoder might decode more samples than actually needed
        // at the end of the file! When the end of the file has been
        // reached decoding can be restarted by seeking to a new
        // position.
        DEBUG_ASSERT(numberOfSamplesDecoded >= numberOfSamplesRead);
        m_curFrameIndex += samples2frames(numberOfSamplesRead);
        DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
        DEBUG_ASSERT(numberOfSamplesRemaining >= numberOfSamplesRead);
        numberOfSamplesRemaining -= numberOfSamplesRead;
    }

    DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
    DEBUG_ASSERT(numberOfSamplesTotal >= numberOfSamplesRemaining);
    return samples2frames(numberOfSamplesTotal - numberOfSamplesRemaining);
}
Esempio n. 6
0
 virtual void onSeek(sf::Time timeOffset)
 {
     m_currentSample = static_cast<std::size_t>(timeOffset.asSeconds() * getSampleRate() * getChannelCount());
 }
Esempio n. 7
0
void MyStream::onSeek(sf::Time timeOffset)
    {
        // compute the corresponding sample index according to the sample rate and channel count
        m_currentSample = static_cast<std::size_t>(timeOffset.asSeconds() * getSampleRate() * getChannelCount());
    }
Esempio n. 8
0
unsigned int sfSoundStream_getChannelCount(const sfSoundStream* soundStream)
{
    CSFML_CALL_RETURN(soundStream, getChannelCount(), 0);
}
Esempio n. 9
0
void SoundSourceFLAC::flacMetadata(const FLAC__StreamMetadata* metadata) {
    // https://xiph.org/flac/api/group__flac__stream__decoder.html#ga43e2329c15731c002ac4182a47990f85
    // "...one STREAMINFO block, followed by zero or more other metadata blocks."
    // "...by default the decoder only calls the metadata callback for the STREAMINFO block..."
    // "...always before the first audio frame (i.e. write callback)."
    switch (metadata->type) {
    case FLAC__METADATA_TYPE_STREAMINFO:
    {
        const SINT channelCount = metadata->data.stream_info.channels;
        if (isValidChannelCount(channelCount)) {
            if (hasChannelCount()) {
                // already set before -> check for consistency
                if (getChannelCount() != channelCount) {
                    qWarning() << "Unexpected channel count:"
                            << channelCount << " <> " << getChannelCount();
                }
            } else {
                // not set before
                setChannelCount(channelCount);
            }
        } else {
            qWarning() << "Invalid channel count:"
                    << channelCount;
        }
        const SINT samplingRate = metadata->data.stream_info.sample_rate;
        if (isValidSamplingRate(samplingRate)) {
            if (hasSamplingRate()) {
                // already set before -> check for consistency
                if (getSamplingRate() != samplingRate) {
                    qWarning() << "Unexpected sampling rate:"
                            << samplingRate << " <> " << getSamplingRate();
                }
            } else {
                // not set before
                setSamplingRate(samplingRate);
            }
        } else {
            qWarning() << "Invalid sampling rate:"
                    << samplingRate;
        }
        const SINT frameCount = metadata->data.stream_info.total_samples;
        DEBUG_ASSERT(isValidFrameCount(frameCount));
        if (isEmpty()) {
            // not set before
            setFrameCount(frameCount);
        } else {
            // already set before -> check for consistency
            if (getFrameCount() != frameCount) {
                qWarning() << "Unexpected frame count:"
                        << frameCount << " <> " << getFrameCount();
            }
        }
        const unsigned bitsPerSample = metadata->data.stream_info.bits_per_sample;
        DEBUG_ASSERT(kBitsPerSampleDefault != bitsPerSample);
        if (kBitsPerSampleDefault == m_bitsPerSample) {
            // not set before
            m_bitsPerSample = bitsPerSample;
            m_sampleScaleFactor = CSAMPLE_PEAK
                    / CSAMPLE(FLAC__int32(1) << bitsPerSample);
        } else {
            // already set before -> check for consistency
            if (bitsPerSample != m_bitsPerSample) {
                qWarning() << "Unexpected bits per sample:"
                        << bitsPerSample << " <> " << m_bitsPerSample;
            }
        }
        m_maxBlocksize = metadata->data.stream_info.max_blocksize;
        if (0 >= m_maxBlocksize) {
            qWarning() << "Invalid max. blocksize" << m_maxBlocksize;
        }
        const SINT sampleBufferCapacity =
                m_maxBlocksize * getChannelCount();
        m_sampleBuffer.resetCapacity(sampleBufferCapacity);
        break;
    }
    default:
        // Ignore all other metadata types
        break;
    }
}
Esempio n. 10
0
FLAC__StreamDecoderWriteStatus SoundSourceFLAC::flacWrite(
        const FLAC__Frame* frame, const FLAC__int32* const buffer[]) {
    const SINT numChannels = frame->header.channels;
    if (getChannelCount() > numChannels) {
        qWarning() << "Corrupt or unsupported FLAC file:"
                << "Invalid number of channels in FLAC frame header"
                << frame->header.channels << "<>" << getChannelCount();
        return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT;
    }
    if (getSamplingRate() != SINT(frame->header.sample_rate)) {
        qWarning() << "Corrupt or unsupported FLAC file:"
                << "Invalid sample rate in FLAC frame header"
                << frame->header.sample_rate << "<>" << getSamplingRate();
        return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT;
    }
    const SINT numReadableFrames = frame->header.blocksize;
    if (numReadableFrames > m_maxBlocksize) {
        qWarning() << "Corrupt or unsupported FLAC file:"
                << "Block size in FLAC frame header exceeds the maximum block size"
                << frame->header.blocksize << ">" << m_maxBlocksize;
        return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT;
    }

    // According to the API docs the decoder will always report the current
    // position in "FLAC samples" (= "Mixxx frames") for convenience
    DEBUG_ASSERT(frame->header.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER);
    m_curFrameIndex = frame->header.number.sample_number;

    // Decode buffer should be empty before decoding the next frame
    DEBUG_ASSERT(m_sampleBuffer.isEmpty());
    const SampleBuffer::WritableChunk writableChunk(
            m_sampleBuffer.writeToTail(frames2samples(numReadableFrames)));

    const SINT numWritableFrames = samples2frames(writableChunk.size());
    DEBUG_ASSERT(numWritableFrames <= numReadableFrames);
    if (numWritableFrames < numReadableFrames) {
        qWarning() << "Sample buffer has not enough free space for all decoded FLAC samples:"
                << numWritableFrames << "<" << numReadableFrames;
    }

    CSAMPLE* pSampleBuffer = writableChunk.data();
    DEBUG_ASSERT(getChannelCount() <= numChannels);
    switch (getChannelCount()) {
    case 1: {
        // optimized code for 1 channel (mono)
        for (SINT i = 0; i < numWritableFrames; ++i) {
            *pSampleBuffer++ = buffer[0][i] * m_sampleScaleFactor;
        }
        break;
    }
    case 2: {
        // optimized code for 2 channels (stereo)
        for (SINT i = 0; i < numWritableFrames; ++i) {
            *pSampleBuffer++ = buffer[0][i] * m_sampleScaleFactor;
            *pSampleBuffer++ = buffer[1][i] * m_sampleScaleFactor;
        }
        break;
    }
    default: {
        // generic code for multiple channels
        for (SINT i = 0; i < numWritableFrames; ++i) {
            for (SINT j = 0; j < getChannelCount(); ++j) {
                *pSampleBuffer++ = buffer[j][i] * m_sampleScaleFactor;
            }
        }
    }
    }

    return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE;
}
Esempio n. 11
0
SINT SoundSourceFLAC::readSampleFrames(
        SINT numberOfFrames, CSAMPLE* sampleBuffer,
        SINT sampleBufferSize, bool readStereoSamples) {
    DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
    DEBUG_ASSERT(getSampleBufferSize(numberOfFrames, readStereoSamples) <= sampleBufferSize);

    const SINT numberOfFramesTotal =
            math_min(numberOfFrames, getMaxFrameIndex() - m_curFrameIndex);
    const SINT numberOfSamplesTotal = frames2samples(numberOfFramesTotal);

    CSAMPLE* outBuffer = sampleBuffer;
    SINT numberOfSamplesRemaining = numberOfSamplesTotal;
    while (0 < numberOfSamplesRemaining) {
        // If our buffer from libflac is empty (either because we explicitly cleared
        // it or because we've simply used all the samples), ask for a new buffer
        if (m_sampleBuffer.isEmpty()) {
            // Save the current frame index
            const SINT curFrameIndexBeforeProcessing = m_curFrameIndex;
            // Documentation of FLAC__stream_decoder_process_single():
            // "Depending on what was decoded, the metadata or write callback
            // will be called with the decoded metadata block or audio frame."
            // See also: https://xiph.org/flac/api/group__flac__stream__decoder.html#ga9d6df4a39892c05955122cf7f987f856
            if (!FLAC__stream_decoder_process_single(m_decoder)) {
                qWarning() << "Failed to decode FLAC file"
                        << m_file.fileName();
                break; // abort
            }
            // After seeking we might need to skip some samples if the decoder
            // complained that it has lost sync for some malformed(?) files
            if (curFrameIndexBeforeProcessing != m_curFrameIndex) {
                if (curFrameIndexBeforeProcessing > m_curFrameIndex) {
                    qWarning() << "Trying to adjust frame index"
                            << m_curFrameIndex << "<>" << curFrameIndexBeforeProcessing
                            << "while decoding FLAC file"
                            << m_file.fileName();
                    skipSampleFrames(curFrameIndexBeforeProcessing - m_curFrameIndex);
                } else {
                    qWarning() << "Unexpected frame index"
                            << m_curFrameIndex << "<>" << curFrameIndexBeforeProcessing
                            << "while decoding FLAC file"
                            << m_file.fileName();
                    break; // abort
                }
            }
            DEBUG_ASSERT(curFrameIndexBeforeProcessing == m_curFrameIndex);
        }
        if (m_sampleBuffer.isEmpty()) {
            break; // EOF
        }

        const SampleBuffer::ReadableChunk readableChunk(
                m_sampleBuffer.readFromHead(numberOfSamplesRemaining));
        const SINT framesToCopy = samples2frames(readableChunk.size());
        if (outBuffer) {
            if (readStereoSamples && (kChannelCountStereo != getChannelCount())) {
                if (kChannelCountMono == getChannelCount()) {
                    SampleUtil::copyMonoToDualMono(outBuffer,
                            readableChunk.data(),
                            framesToCopy);
                } else {
                    SampleUtil::copyMultiToStereo(outBuffer,
                            readableChunk.data(),
                            framesToCopy, getChannelCount());
                }
                outBuffer += framesToCopy * kChannelCountStereo;
            } else {
                SampleUtil::copy(outBuffer, readableChunk.data(), readableChunk.size());
                outBuffer += readableChunk.size();
            }
        }
        m_curFrameIndex += framesToCopy;
        numberOfSamplesRemaining -= readableChunk.size();
    }

    DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
    DEBUG_ASSERT(numberOfSamplesTotal >= numberOfSamplesRemaining);
    return samples2frames(numberOfSamplesTotal - numberOfSamplesRemaining);
}
Esempio n. 12
0
bool TNetDevice::prepareDrawData()
{
    static int old_smp_to_draw=0;
    static int old_chan_count=0;
    double t,w,center_delta_x,center_delta_y,center_delta;
    QMutexLocker locker(&mutex);
    int cur_freq=getCurChannel();
    int chan_count=getChannelCount();
    int smp_to_draw=getSweepSmpCountToDraw();
    int rvtp_smp_to_draw=getRvtpSmpCountToDraw();
    w=getXYRange()*2;
    t=w/(chan_count+1);
    if(!isNewSmpExist())
        return false;
    if(smp_to_draw>0)
    {
        //client->getNetDevice()->calcSpectr();
        //cout<<"sweep_smp_to_draw="<<smp_to_draw<<endl;
        setInfoReady(true);
        if((smp_to_draw!=old_smp_to_draw)||(old_chan_count!=chan_count))
        {
            old_chan_count=chan_count;
            old_smp_to_draw=smp_to_draw;
            for(int chan=0;chan<chan_count;chan++)
            {
                drawXBuf[chan].resize(smp_to_draw);
                drawYBuf[chan].resize(smp_to_draw);
                drawARawBuf[chan].resize(smp_to_draw);
            }
            if(rvtp_smp_to_draw)
            {
                drawRvtpXBuf.resize(rvtp_smp_to_draw);
                drawRvtpYBuf.resize(rvtp_smp_to_draw);
            }
            drawSweepXBuf.resize(smp_to_draw);
            drawSweepYBuf.resize(smp_to_draw);
            drawSweepARawBuf.resize(smp_to_draw);
            drawSweepRVTPBuf.resize(smp_to_draw);
            drawSweepRVTPBufS.resize(smp_to_draw);
            drawSweepOV1Buf.resize(smp_to_draw);
            drawSweepOV2Buf.resize(smp_to_draw);
            drawSweepASDBuf.resize(smp_to_draw);
        }

        for(int i=0;i<smp_to_draw;i++)
        {
            for(int chan=0;chan<chan_count;chan++)
            {
                center_delta=(chan-chan_count/2)*t;
                if(chan_count%2==0)
                {
                    center_delta+=t/2;
                }
                if(viewType==VT_XY)
                {
                    center_delta_x=center_delta;
                    center_delta_y=0;
                }
                else
                {
                    center_delta_x=center_delta_y=center_delta;
                }
                drawXBuf[chan][i]=getSweepChanXBuf(chan,i)+center_delta_x;
                drawYBuf[chan][i]=getSweepChanYBuf(chan,i)+center_delta_y;
                drawARawBuf[chan][i]=getSweepChanARawBuf(chan,i)+center_delta_y;
            }
            drawSweepXBuf[i]=getSweepChanXBuf(cur_freq,i);//+Cntr;
            drawSweepYBuf[i]=getSweepChanYBuf(cur_freq,i);//-Cntr;
            drawSweepARawBuf[i]=getSweepChanARawBuf(cur_freq,i);
            drawSweepRVTPBufS[i]=getSweepRVTPBuf(i)*(1<<17)-(1<<16);
            drawSweepRVTPBuf[i]=getSweepRVTPBuf(i)+0.1;
            drawSweepOV1Buf[i]=getSweepOV1Buf(i)+1.1;
            drawSweepOV2Buf[i]=getSweepOV2Buf(i)+2.1;
            drawSweepASDBuf[i]=getSweepASDBuf(i)+3.1;
        }
        for(int j=0;j<rvtp_smp_to_draw;j++)
        {
            drawRvtpXBuf[j]=getRvtpChanXBuf(cur_freq,j);
            drawRvtpYBuf[j]=getRvtpChanYBuf(cur_freq,j);
        }
        setSignalsReady(true);
        return true;
    }
    return false;
}