Esempio n. 1
0
int AudioMixerClientData::parseData(const QByteArray& packet) {
    PacketType packetType = packetTypeForPacket(packet);
    if (packetType == PacketTypeMicrophoneAudioWithEcho
        || packetType == PacketTypeMicrophoneAudioNoEcho
        || packetType == PacketTypeSilentAudioFrame) {

        // grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
        AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
        
        // read the first byte after the header to see if this is a stereo or mono buffer
        quint8 channelFlag = packet.at(numBytesForPacketHeader(packet));
        bool isStereo = channelFlag == 1;
        
        if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) {
            // there's a mismatch in the buffer channels for the incoming and current buffer
            // so delete our current buffer and create a new one
            _ringBuffers.removeOne(avatarRingBuffer);
            avatarRingBuffer->deleteLater();
            avatarRingBuffer = NULL;
        }

        if (!avatarRingBuffer) {
            // we don't have an AvatarAudioRingBuffer yet, so add it
            avatarRingBuffer = new AvatarAudioRingBuffer(isStereo);
            _ringBuffers.push_back(avatarRingBuffer);
        }

        // ask the AvatarAudioRingBuffer instance to parse the data
        avatarRingBuffer->parseData(packet);
    } else {
        // this is injected audio

        // grab the stream identifier for this injected audio
        QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(numBytesForPacketHeader(packet), NUM_BYTES_RFC4122_UUID));

        InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;

        for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
            if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector
                && ((InjectedAudioRingBuffer*) _ringBuffers[i])->getStreamIdentifier() == streamIdentifier) {
                matchingInjectedRingBuffer = (InjectedAudioRingBuffer*) _ringBuffers[i];
            }
        }

        if (!matchingInjectedRingBuffer) {
            // we don't have a matching injected audio ring buffer, so add it
            matchingInjectedRingBuffer = new InjectedAudioRingBuffer(streamIdentifier);
            _ringBuffers.push_back(matchingInjectedRingBuffer);
        }

        matchingInjectedRingBuffer->parseData(packet);
    }

    return 0;
}
Esempio n. 2
0
int AudioMixerClientData::parseData(const QByteArray& packet) {
    PacketType packetType = packetTypeForPacket(packet);
    if (packetType == PacketTypeMicrophoneAudioWithEcho
        || packetType == PacketTypeMicrophoneAudioNoEcho
        || packetType == PacketTypeSilentAudioFrame) {

        // grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
        AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();

        if (!avatarRingBuffer) {
            // we don't have an AvatarAudioRingBuffer yet, so add it
            avatarRingBuffer = new AvatarAudioRingBuffer();
            _ringBuffers.push_back(avatarRingBuffer);
        }

        // ask the AvatarAudioRingBuffer instance to parse the data
        avatarRingBuffer->parseData(packet);
    } else {
        // this is injected audio

        // grab the stream identifier for this injected audio
        QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(numBytesForPacketHeader(packet), NUM_BYTES_RFC4122_UUID));

        InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;

        for (unsigned int i = 0; i < _ringBuffers.size(); i++) {
            if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector
                && ((InjectedAudioRingBuffer*) _ringBuffers[i])->getStreamIdentifier() == streamIdentifier) {
                matchingInjectedRingBuffer = (InjectedAudioRingBuffer*) _ringBuffers[i];
            }
        }

        if (!matchingInjectedRingBuffer) {
            // we don't have a matching injected audio ring buffer, so add it
            matchingInjectedRingBuffer = new InjectedAudioRingBuffer(streamIdentifier);
            _ringBuffers.push_back(matchingInjectedRingBuffer);
        }

        matchingInjectedRingBuffer->parseData(packet);
    }

    return 0;
}
Esempio n. 3
0
int AudioMixerClientData::parseData(const QByteArray& packet) {

    // parse sequence number for this packet
    int numBytesPacketHeader = numBytesForPacketHeader(packet);
    const char* sequenceAt = packet.constData() + numBytesPacketHeader;
    quint16 sequence = *(reinterpret_cast<const quint16*>(sequenceAt));

    PacketType packetType = packetTypeForPacket(packet);
    if (packetType == PacketTypeMicrophoneAudioWithEcho
        || packetType == PacketTypeMicrophoneAudioNoEcho
        || packetType == PacketTypeSilentAudioFrame) {

        _incomingAvatarAudioSequenceNumberStats.sequenceNumberReceived(sequence);

        // grab the AvatarAudioRingBuffer from the vector (or create it if it doesn't exist)
        AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
        
        // read the first byte after the header to see if this is a stereo or mono buffer
        quint8 channelFlag = packet.at(numBytesForPacketHeader(packet) + sizeof(quint16));
        bool isStereo = channelFlag == 1;
        
        if (avatarRingBuffer && avatarRingBuffer->isStereo() != isStereo) {
            // there's a mismatch in the buffer channels for the incoming and current buffer
            // so delete our current buffer and create a new one
            _ringBuffers.removeOne(avatarRingBuffer);
            avatarRingBuffer->deleteLater();
            avatarRingBuffer = NULL;
        }

        if (!avatarRingBuffer) {
            // we don't have an AvatarAudioRingBuffer yet, so add it
            avatarRingBuffer = new AvatarAudioRingBuffer(isStereo, AudioMixer::getUseDynamicJitterBuffers());
            _ringBuffers.push_back(avatarRingBuffer);
        }

        // ask the AvatarAudioRingBuffer instance to parse the data
        avatarRingBuffer->parseData(packet);
    } else if (packetType == PacketTypeInjectAudio) {
        // this is injected audio

        // grab the stream identifier for this injected audio
        QUuid streamIdentifier = QUuid::fromRfc4122(packet.mid(numBytesForPacketHeader(packet) + sizeof(quint16), NUM_BYTES_RFC4122_UUID));

        if (!_incomingInjectedAudioSequenceNumberStatsMap.contains(streamIdentifier)) {
            _incomingInjectedAudioSequenceNumberStatsMap.insert(streamIdentifier, SequenceNumberStats(INCOMING_SEQ_STATS_HISTORY_LENGTH));
        }
        _incomingInjectedAudioSequenceNumberStatsMap[streamIdentifier].sequenceNumberReceived(sequence);

        InjectedAudioRingBuffer* matchingInjectedRingBuffer = NULL;

        for (int i = 0; i < _ringBuffers.size(); i++) {
            if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector
                && ((InjectedAudioRingBuffer*) _ringBuffers[i])->getStreamIdentifier() == streamIdentifier) {
                matchingInjectedRingBuffer = (InjectedAudioRingBuffer*) _ringBuffers[i];
            }
        }

        if (!matchingInjectedRingBuffer) {
            // we don't have a matching injected audio ring buffer, so add it
            matchingInjectedRingBuffer = new InjectedAudioRingBuffer(streamIdentifier, AudioMixer::getUseDynamicJitterBuffers());
            _ringBuffers.push_back(matchingInjectedRingBuffer);
        }

        matchingInjectedRingBuffer->parseData(packet);
    } else if (packetType == PacketTypeAudioStreamStats) {

        const char* dataAt = packet.data();

        // skip over header, appendFlag, and num stats packed
        dataAt += (numBytesPacketHeader + sizeof(quint8) + sizeof(quint16));

        // read the downstream audio stream stats
        memcpy(&_downstreamAudioStreamStats, dataAt, sizeof(AudioStreamStats));
    }

    return 0;
}
Esempio n. 4
0
QString AudioMixerClientData::getAudioStreamStatsString() const {
    QString result;
    AudioStreamStats streamStats = _downstreamAudioStreamStats;
    result += "DOWNSTREAM.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
        + " current: ?"
        + " available:" + QString::number(streamStats._ringBufferFramesAvailable)
        + " starves:" + QString::number(streamStats._ringBufferStarveCount)
        + " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
        + " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
        + " silents_dropped: ?"
        + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
        + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
        + " min_gap:" + formatUsecTime(streamStats._timeGapMin)
        + " max_gap:" + formatUsecTime(streamStats._timeGapMax)
        + " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
        + " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
        + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
        + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);

    AvatarAudioRingBuffer* avatarRingBuffer = getAvatarAudioRingBuffer();
    if (avatarRingBuffer) {
        AudioStreamStats streamStats = getAudioStreamStatsOfStream(avatarRingBuffer);
        result += " UPSTREAM.mic.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
            + " current:" + QString::number(streamStats._ringBufferCurrentJitterBufferFrames)
            + " available:" + QString::number(streamStats._ringBufferFramesAvailable)
            + " starves:" + QString::number(streamStats._ringBufferStarveCount)
            + " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
            + " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
            + " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
            + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
            + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
            + " min_gap:" + formatUsecTime(streamStats._timeGapMin)
            + " max_gap:" + formatUsecTime(streamStats._timeGapMax)
            + " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
            + " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
            + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
            + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
    } else {
        result = "mic unknown";
    }
    
    for (int i = 0; i < _ringBuffers.size(); i++) {
        if (_ringBuffers[i]->getType() == PositionalAudioRingBuffer::Injector) {
            AudioStreamStats streamStats = getAudioStreamStatsOfStream(_ringBuffers[i]);
            result += " UPSTREAM.inj.desired:" + QString::number(streamStats._ringBufferDesiredJitterBufferFrames)
                + " current:" + QString::number(streamStats._ringBufferCurrentJitterBufferFrames)
                + " available:" + QString::number(streamStats._ringBufferFramesAvailable)
                + " starves:" + QString::number(streamStats._ringBufferStarveCount)
                + " not_mixed:" + QString::number(streamStats._ringBufferConsecutiveNotMixedCount)
                + " overflows:" + QString::number(streamStats._ringBufferOverflowCount)
                + " silents_dropped:" + QString::number(streamStats._ringBufferSilentFramesDropped)
                + " lost%:" + QString::number(streamStats._packetStreamStats.getLostRate() * 100.0f, 'f', 2)
                + " lost%_30s:" + QString::number(streamStats._packetStreamWindowStats.getLostRate() * 100.0f, 'f', 2)
                + " min_gap:" + formatUsecTime(streamStats._timeGapMin)
                + " max_gap:" + formatUsecTime(streamStats._timeGapMax)
                + " avg_gap:" + formatUsecTime(streamStats._timeGapAverage)
                + " min_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMin)
                + " max_gap_30s:" + formatUsecTime(streamStats._timeGapWindowMax)
                + " avg_gap_30s:" + formatUsecTime(streamStats._timeGapWindowAverage);
        }
    }
    return result;
}