示例#1
0
int InboundAudioStream::parseData(ReceivedMessage& message) {
    // parse sequence number and track it
    quint16 sequence;
    message.readPrimitive(&sequence);
    SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence,
                                                                                                       message.getSourceID());
    QString codecInPacket = message.readString();

    packetReceivedUpdateTimingStats();

    int networkSamples;
    
    // parse the info after the seq number and before the audio data (the stream properties)
    int prePropertyPosition = message.getPosition();
    int propertyBytes = parseStreamProperties(message.getType(), message.readWithoutCopy(message.getBytesLeftToRead()), networkSamples);
    message.seek(prePropertyPosition + propertyBytes);

    // handle this packet based on its arrival status.
    switch (arrivalInfo._status) {
        case SequenceNumberStats::Early: {
            // Packet is early; write droppable silent samples for each of the skipped packets.
            // NOTE: we assume that each dropped packet contains the same number of samples
            // as the packet we just received.
            int packetsDropped = arrivalInfo._seqDiffFromExpected;
            writeSamplesForDroppedPackets(packetsDropped * networkSamples);

            // fall through to OnTime case
        }
        case SequenceNumberStats::OnTime: {
            // Packet is on time; parse its data to the ringbuffer
            if (message.getType() == PacketType::SilentAudioFrame) {
                // FIXME - Some codecs need to know about these silent frames... and can produce better output
                writeDroppableSilentSamples(networkSamples);
            } else {
                // note: PCM and no codec are identical
                bool selectedPCM = _selectedCodecName == "pcm" || _selectedCodecName == "";
                bool packetPCM = codecInPacket == "pcm" || codecInPacket == "";
                if (codecInPacket == _selectedCodecName || (packetPCM && selectedPCM)) {
                    auto afterProperties = message.readWithoutCopy(message.getBytesLeftToRead());
                    parseAudioData(message.getType(), afterProperties);
                } else {
                    qDebug() << "Codec mismatch: expected" << _selectedCodecName << "got" << codecInPacket << "writing silence";
                    writeDroppableSilentSamples(networkSamples);
                    // inform others of the mismatch
                    auto sendingNode = DependencyManager::get<NodeList>()->nodeWithUUID(message.getSourceID());
                    emit mismatchedAudioCodec(sendingNode, _selectedCodecName, codecInPacket);
                }
            }
            break;
        }
        default: {
            // For now, late packets are ignored.  It may be good in the future to insert the late audio packet data
            // into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
            break;
        }
    }

    int framesAvailable = _ringBuffer.framesAvailable();
    // if this stream was starved, check if we're still starved.
    if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
        _isStarved = false;
    }
    // if the ringbuffer exceeds the desired size by more than the threshold specified,
    // drop the oldest frames so the ringbuffer is down to the desired size.
    if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) {
        int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING);
        _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples());
        
        _framesAvailableStat.reset();
        _currentJitterBufferFrames = 0;

        _oldFramesDropped += framesToDrop;

        qCDebug(audiostream, "Dropped %d frames", framesToDrop);
        qCDebug(audiostream, "Resetted current jitter frames");
    }

    framesAvailableChanged();

    return message.getPosition();
}
示例#2
0
int InboundAudioStream::parseData(ReceivedMessage& message) {
    // parse sequence number and track it
    quint16 sequence;
    message.readPrimitive(&sequence);
    SequenceNumberStats::ArrivalInfo arrivalInfo =
        _incomingSequenceNumberStats.sequenceNumberReceived(sequence, message.getSourceID());
    QString codecInPacket = message.readString();

    packetReceivedUpdateTimingStats();

    int networkFrames;

    // parse the info after the seq number and before the audio data (the stream properties)
    int prePropertyPosition = message.getPosition();
    int propertyBytes = parseStreamProperties(message.getType(), message.readWithoutCopy(message.getBytesLeftToRead()), networkFrames);

    message.seek(prePropertyPosition + propertyBytes);

    // handle this packet based on its arrival status.
    switch (arrivalInfo._status) {
        case SequenceNumberStats::Unreasonable: {
            lostAudioData(1);
            break;
        }
        case SequenceNumberStats::Early: {
            // Packet is early. Treat the packets as if all the packets between the last
            // OnTime packet and this packet were lost. If we're using a codec this will 
            // also result in allowing the codec to interpolate lost data. Then
            // fall through to the "on time" logic to actually handle this packet
            int packetsDropped = arrivalInfo._seqDiffFromExpected;
            lostAudioData(packetsDropped);

            // fall through to OnTime case
        }
        case SequenceNumberStats::OnTime: {
            // Packet is on time; parse its data to the ringbuffer
            if (message.getType() == PacketType::SilentAudioFrame
                || message.getType() == PacketType::ReplicatedSilentAudioFrame) {
                // If we recieved a SilentAudioFrame from our sender, we might want to drop
                // some of the samples in order to catch up to our desired jitter buffer size.
                writeDroppableSilentFrames(networkFrames);

            } else {
                // note: PCM and no codec are identical
                bool selectedPCM = _selectedCodecName == "pcm" || _selectedCodecName == "";
                bool packetPCM = codecInPacket == "pcm" || codecInPacket == "";
                if (codecInPacket == _selectedCodecName || (packetPCM && selectedPCM)) {
                    auto afterProperties = message.readWithoutCopy(message.getBytesLeftToRead());
                    parseAudioData(message.getType(), afterProperties);
                    _mismatchedAudioCodecCount = 0;

                } else {
                    _mismatchedAudioCodecCount++;
                    qDebug(audio) << "Codec mismatch: expected" << _selectedCodecName << "got" << codecInPacket;

                    if (packetPCM) {
                        // If there are PCM packets in-flight after the codec is changed, use them.
                        auto afterProperties = message.readWithoutCopy(message.getBytesLeftToRead());
                        _ringBuffer.writeData(afterProperties.data(), afterProperties.size());
                    } else {
                        // Since the data in the stream is using a codec that we aren't prepared for,
                        // we need to let the codec know that we don't have data for it, this will
                        // allow the codec to interpolate missing data and produce a fade to silence.
                        lostAudioData(1);
                    }

                    if (_mismatchedAudioCodecCount > MAX_MISMATCHED_AUDIO_CODEC_COUNT) {
                        _mismatchedAudioCodecCount = 0;

                        // inform others of the mismatch
                        auto sendingNode = DependencyManager::get<NodeList>()->nodeWithLocalID(message.getSourceID());
                        if (sendingNode) {
                            emit mismatchedAudioCodec(sendingNode, _selectedCodecName, codecInPacket);
                            qDebug(audio) << "Codec mismatch threshold exceeded, SelectedAudioFormat(" << _selectedCodecName << " ) sent";
                        }
                    }
                }
            }
            break;
        }
        default: {
            // For now, late packets are ignored.  It may be good in the future to insert the late audio packet data
            // into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
            break;
        }
    }

    int framesAvailable = _ringBuffer.framesAvailable();
    // if this stream was starved, check if we're still starved.
    if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
        qCInfo(audiostream, "Starve ended");
        _isStarved = false;
    }
    // if the ringbuffer exceeds the desired size by more than the threshold specified,
    // drop the oldest frames so the ringbuffer is down to the desired size.
    if (framesAvailable > _desiredJitterBufferFrames + MAX_FRAMES_OVER_DESIRED) {
        int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING);
        _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples());
        
        _framesAvailableStat.reset();
        _currentJitterBufferFrames = 0;

        _oldFramesDropped += framesToDrop;

        qCInfo(audiostream, "Dropped %d frames", framesToDrop);
        qCInfo(audiostream, "Reset current jitter frames");
    }

    framesAvailableChanged();

    return message.getPosition();
}
示例#3
0
int InboundAudioStream::parseData(const QByteArray& packet) {

    PacketType packetType = packetTypeForPacket(packet);
    QUuid senderUUID = uuidFromPacketHeader(packet);

    // parse header 
    int numBytesHeader = numBytesForPacketHeader(packet);
    const char* sequenceAt = packet.constData() + numBytesHeader;
    int readBytes = numBytesHeader;

    // parse sequence number and track it
    quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));
    readBytes += sizeof(quint16);
    SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID);

    frameReceivedUpdateTimingStats();

    // TODO: handle generalized silent packet here?????

    // parse the info after the seq number and before the audio data.(the stream properties)
    int numAudioSamples;
    readBytes += parseStreamProperties(packetType, packet.mid(readBytes), numAudioSamples);

    // handle this packet based on its arrival status.
    // For now, late packets are ignored.  It may be good in the future to insert the late audio frame
    // into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
    switch (arrivalInfo._status) {
        case SequenceNumberStats::Early: {
            int packetsDropped = arrivalInfo._seqDiffFromExpected;
            writeSamplesForDroppedPackets(packetsDropped * numAudioSamples);
            // fall through to OnTime case
        }
        case SequenceNumberStats::OnTime: {
            readBytes += parseAudioData(packetType, packet.mid(readBytes), numAudioSamples);
            break;
        }
        default: {
            break;
        }
    }

    int framesAvailable = _ringBuffer.framesAvailable();
    // if this stream was starved, check if we're still starved.
    if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
        _isStarved = false;
    }
    // if the ringbuffer exceeds the desired size by more than the threshold specified,
    // drop the oldest frames so the ringbuffer is down to the desired size.
    if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) {
        int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING);
        _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples());
        
        _framesAvailableStat.reset();
        _currentJitterBufferFrames = 0;

        _oldFramesDropped += framesToDrop;
    }

    framesAvailableChanged();

    return readBytes;
}
int InboundAudioStream::parseData(NLPacket& packet) {

    // parse sequence number and track it
    quint16 sequence;
    packet.readPrimitive(&sequence);
    SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence,
            packet.getSourceID());

    packetReceivedUpdateTimingStats();

    int networkSamples;

    // parse the info after the seq number and before the audio data (the stream properties)
    int prePropertyPosition = packet.pos();
    int propertyBytes = parseStreamProperties(packet.getType(), packet.read(packet.bytesLeftToRead()), networkSamples);
    packet.seek(prePropertyPosition + propertyBytes);

    // handle this packet based on its arrival status.
    switch (arrivalInfo._status) {
    case SequenceNumberStats::Early: {
        // Packet is early; write droppable silent samples for each of the skipped packets.
        // NOTE: we assume that each dropped packet contains the same number of samples
        // as the packet we just received.
        int packetsDropped = arrivalInfo._seqDiffFromExpected;
        writeSamplesForDroppedPackets(packetsDropped * networkSamples);

        // fall through to OnTime case
    }
    case SequenceNumberStats::OnTime: {
        // Packet is on time; parse its data to the ringbuffer
        if (packet.getType() == PacketType::SilentAudioFrame) {
            writeDroppableSilentSamples(networkSamples);
        } else {
            parseAudioData(packet.getType(), packet.read(packet.bytesLeftToRead()), networkSamples);
        }
        break;
    }
    default: {
        // For now, late packets are ignored.  It may be good in the future to insert the late audio packet data
        // into the ring buffer to fill in the missing frame if it hasn't been mixed yet.
        break;
    }
    }

    int framesAvailable = _ringBuffer.framesAvailable();
    // if this stream was starved, check if we're still starved.
    if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) {
        _isStarved = false;
    }
    // if the ringbuffer exceeds the desired size by more than the threshold specified,
    // drop the oldest frames so the ringbuffer is down to the desired size.
    if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) {
        int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING);
        _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples());

        _framesAvailableStat.reset();
        _currentJitterBufferFrames = 0;

        _oldFramesDropped += framesToDrop;
    }

    framesAvailableChanged();

    return packet.pos();
}