int InboundAudioStream::writeSamplesForDroppedPackets(int networkSamples) { if (_repetitionWithFade) { return writeLastFrameRepeatedWithFade(networkSamples); } return writeDroppableSilentSamples(networkSamples); }
int InboundAudioStream::parseData(ReceivedMessage& message) { // parse sequence number and track it quint16 sequence; message.readPrimitive(&sequence); SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, message.getSourceID()); QString codecInPacket = message.readString(); packetReceivedUpdateTimingStats(); int networkSamples; // parse the info after the seq number and before the audio data (the stream properties) int prePropertyPosition = message.getPosition(); int propertyBytes = parseStreamProperties(message.getType(), message.readWithoutCopy(message.getBytesLeftToRead()), networkSamples); message.seek(prePropertyPosition + propertyBytes); // handle this packet based on its arrival status. switch (arrivalInfo._status) { case SequenceNumberStats::Early: { // Packet is early; write droppable silent samples for each of the skipped packets. // NOTE: we assume that each dropped packet contains the same number of samples // as the packet we just received. int packetsDropped = arrivalInfo._seqDiffFromExpected; writeSamplesForDroppedPackets(packetsDropped * networkSamples); // fall through to OnTime case } case SequenceNumberStats::OnTime: { // Packet is on time; parse its data to the ringbuffer if (message.getType() == PacketType::SilentAudioFrame) { // FIXME - Some codecs need to know about these silent frames... and can produce better output writeDroppableSilentSamples(networkSamples); } else { // note: PCM and no codec are identical bool selectedPCM = _selectedCodecName == "pcm" || _selectedCodecName == ""; bool packetPCM = codecInPacket == "pcm" || codecInPacket == ""; if (codecInPacket == _selectedCodecName || (packetPCM && selectedPCM)) { auto afterProperties = message.readWithoutCopy(message.getBytesLeftToRead()); parseAudioData(message.getType(), afterProperties); } else { qDebug() << "Codec mismatch: expected" << _selectedCodecName << "got" << codecInPacket << "writing silence"; writeDroppableSilentSamples(networkSamples); // inform others of the mismatch auto sendingNode = DependencyManager::get<NodeList>()->nodeWithUUID(message.getSourceID()); emit mismatchedAudioCodec(sendingNode, _selectedCodecName, codecInPacket); } } break; } default: { // For now, late packets are ignored. It may be good in the future to insert the late audio packet data // into the ring buffer to fill in the missing frame if it hasn't been mixed yet. break; } } int framesAvailable = _ringBuffer.framesAvailable(); // if this stream was starved, check if we're still starved. if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) { _isStarved = false; } // if the ringbuffer exceeds the desired size by more than the threshold specified, // drop the oldest frames so the ringbuffer is down to the desired size. if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) { int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING); _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples()); _framesAvailableStat.reset(); _currentJitterBufferFrames = 0; _oldFramesDropped += framesToDrop; qCDebug(audiostream, "Dropped %d frames", framesToDrop); qCDebug(audiostream, "Resetted current jitter frames"); } framesAvailableChanged(); return message.getPosition(); }
int InboundAudioStream::parseData(const QByteArray& packet) { PacketType packetType = packetTypeForPacket(packet); QUuid senderUUID = uuidFromPacketHeader(packet); // parse header int numBytesHeader = numBytesForPacketHeader(packet); const char* dataAt = packet.constData() + numBytesHeader; int readBytes = numBytesHeader; // parse sequence number and track it quint16 sequence = *(reinterpret_cast<const quint16*>(dataAt)); dataAt += sizeof(quint16); readBytes += sizeof(quint16); SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, senderUUID); packetReceivedUpdateTimingStats(); int networkSamples; // parse the info after the seq number and before the audio data (the stream properties) readBytes += parseStreamProperties(packetType, packet.mid(readBytes), networkSamples); // handle this packet based on its arrival status. switch (arrivalInfo._status) { case SequenceNumberStats::Early: { // Packet is early; write droppable silent samples for each of the skipped packets. // NOTE: we assume that each dropped packet contains the same number of samples // as the packet we just received. int packetsDropped = arrivalInfo._seqDiffFromExpected; writeSamplesForDroppedPackets(packetsDropped * networkSamples); // fall through to OnTime case } case SequenceNumberStats::OnTime: { // Packet is on time; parse its data to the ringbuffer if (packetType == PacketTypeSilentAudioFrame) { writeDroppableSilentSamples(networkSamples); } else { readBytes += parseAudioData(packetType, packet.mid(readBytes), networkSamples); } break; } default: { // For now, late packets are ignored. It may be good in the future to insert the late audio packet data // into the ring buffer to fill in the missing frame if it hasn't been mixed yet. break; } } int framesAvailable = _ringBuffer.framesAvailable(); // if this stream was starved, check if we're still starved. if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) { _isStarved = false; } // if the ringbuffer exceeds the desired size by more than the threshold specified, // drop the oldest frames so the ringbuffer is down to the desired size. if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) { int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING); _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples()); _framesAvailableStat.reset(); _currentJitterBufferFrames = 0; _oldFramesDropped += framesToDrop; } framesAvailableChanged(); return readBytes; }
int InboundAudioStream::writeSamplesForDroppedPackets(int numSamples) { return writeDroppableSilentSamples(numSamples); }
int InboundAudioStream::parseData(NLPacket& packet) { // parse sequence number and track it quint16 sequence; packet.readPrimitive(&sequence); SequenceNumberStats::ArrivalInfo arrivalInfo = _incomingSequenceNumberStats.sequenceNumberReceived(sequence, packet.getSourceID()); packetReceivedUpdateTimingStats(); int networkSamples; // parse the info after the seq number and before the audio data (the stream properties) int prePropertyPosition = packet.pos(); int propertyBytes = parseStreamProperties(packet.getType(), packet.read(packet.bytesLeftToRead()), networkSamples); packet.seek(prePropertyPosition + propertyBytes); // handle this packet based on its arrival status. switch (arrivalInfo._status) { case SequenceNumberStats::Early: { // Packet is early; write droppable silent samples for each of the skipped packets. // NOTE: we assume that each dropped packet contains the same number of samples // as the packet we just received. int packetsDropped = arrivalInfo._seqDiffFromExpected; writeSamplesForDroppedPackets(packetsDropped * networkSamples); // fall through to OnTime case } case SequenceNumberStats::OnTime: { // Packet is on time; parse its data to the ringbuffer if (packet.getType() == PacketType::SilentAudioFrame) { writeDroppableSilentSamples(networkSamples); } else { parseAudioData(packet.getType(), packet.read(packet.bytesLeftToRead()), networkSamples); } break; } default: { // For now, late packets are ignored. It may be good in the future to insert the late audio packet data // into the ring buffer to fill in the missing frame if it hasn't been mixed yet. break; } } int framesAvailable = _ringBuffer.framesAvailable(); // if this stream was starved, check if we're still starved. if (_isStarved && framesAvailable >= _desiredJitterBufferFrames) { _isStarved = false; } // if the ringbuffer exceeds the desired size by more than the threshold specified, // drop the oldest frames so the ringbuffer is down to the desired size. if (framesAvailable > _desiredJitterBufferFrames + _maxFramesOverDesired) { int framesToDrop = framesAvailable - (_desiredJitterBufferFrames + DESIRED_JITTER_BUFFER_FRAMES_PADDING); _ringBuffer.shiftReadPosition(framesToDrop * _ringBuffer.getNumFrameSamples()); _framesAvailableStat.reset(); _currentJitterBufferFrames = 0; _oldFramesDropped += framesToDrop; } framesAvailableChanged(); return packet.pos(); }