int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { // setup a data stream to read from this packet QDataStream packetStream(packetAfterSeqNum); // skip the stream identifier packetStream.skipRawData(NUM_BYTES_RFC4122_UUID); // pull the loopback flag and set our boolean uchar shouldLoopback; packetStream >> shouldLoopback; _shouldLoopbackForNode = (shouldLoopback == 1); // use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data packetStream.skipRawData(parsePositionalData(packetAfterSeqNum.mid(packetStream.device()->pos()))); // pull out the radius for this injected source - if it's zero this is a point source packetStream >> _radius; quint8 attenuationByte = 0; packetStream >> attenuationByte; _attenuationRatio = attenuationByte / (float)MAX_INJECTOR_VOLUME; int numAudioBytes = packetAfterSeqNum.size() - packetStream.device()->pos(); numAudioSamples = numAudioBytes / sizeof(int16_t); return packetStream.device()->pos(); }
int InjectedAudioRingBuffer::parseData(const QByteArray& packet) { // setup a data stream to read from this packet QDataStream packetStream(packet); packetStream.skipRawData(numBytesForPacketHeader(packet)); // push past the stream identifier packetStream.skipRawData(NUM_BYTES_RFC4122_UUID); // pull the loopback flag and set our boolean uchar shouldLoopback; packetStream >> shouldLoopback; _shouldLoopbackForNode = (shouldLoopback == 1); // use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data packetStream.skipRawData(parsePositionalData(packet.mid(packetStream.device()->pos()))); // pull out the radius for this injected source - if it's zero this is a point source packetStream >> _radius; quint8 attenuationByte = 0; packetStream >> attenuationByte; _attenuationRatio = attenuationByte / (float) MAX_INJECTOR_VOLUME; packetStream.skipRawData(writeData(packet.data() + packetStream.device()->pos(), packet.size() - packetStream.device()->pos())); return packetStream.device()->pos(); }
int PositionalAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) { unsigned char* currentBuffer = sourceBuffer + sizeof(PACKET_HEADER); currentBuffer += parsePositionalData(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); currentBuffer += parseAudioSamples(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); return currentBuffer - sourceBuffer; }
int AvatarAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { int readBytes = 0; if (type == PacketType::SilentAudioFrame) { const char* dataAt = packetAfterSeqNum.constData(); SilentSamplesBytes numSilentSamples = *(reinterpret_cast<const quint16*>(dataAt)); readBytes += sizeof(SilentSamplesBytes); numAudioSamples = (int) numSilentSamples; // read the positional data readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes)); } else { _shouldLoopbackForNode = (type == PacketType::MicrophoneAudioWithEcho); // read the channel flag ChannelFlag channelFlag = packetAfterSeqNum.at(readBytes); bool isStereo = channelFlag == 1; readBytes += sizeof(ChannelFlag); // if isStereo value has changed, restart the ring buffer with new frame size if (isStereo != _isStereo) { _ringBuffer.resizeForFrameSize(isStereo ? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO : AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); // restart the codec if (_codec) { if (_decoder) { _codec->releaseDecoder(_decoder); } _decoder = _codec->createDecoder(AudioConstants::SAMPLE_RATE, isStereo ? AudioConstants::STEREO : AudioConstants::MONO); } qCDebug(audio) << "resetting AvatarAudioStream... codec:" << _selectedCodecName << "isStereo:" << isStereo; _isStereo = isStereo; } // read the positional data readBytes += parsePositionalData(packetAfterSeqNum.mid(readBytes)); // calculate how many samples are in this packet int numAudioBytes = packetAfterSeqNum.size() - readBytes; numAudioSamples = numAudioBytes / sizeof(int16_t); } return readBytes; }
int PositionalAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) { unsigned char* currentBuffer = sourceBuffer + numBytesForPacketHeader(sourceBuffer); currentBuffer += sizeof(uint16_t); // the source ID currentBuffer += parseListenModeData(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); currentBuffer += parsePositionalData(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); currentBuffer += parseAudioSamples(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); return currentBuffer - sourceBuffer; }
int InjectedAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) { // setup a data stream to read from this packet QDataStream packetStream(packetAfterSeqNum); // skip the stream identifier packetStream.skipRawData(NUM_BYTES_RFC4122_UUID); // read the channel flag bool isStereo; packetStream >> isStereo; // if isStereo value has changed, restart the ring buffer with new frame size if (isStereo != _isStereo) { _ringBuffer.resizeForFrameSize(isStereo ? AudioConstants::NETWORK_FRAME_SAMPLES_STEREO : AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL); _isStereo = isStereo; } // pull the loopback flag and set our boolean uchar shouldLoopback; packetStream >> shouldLoopback; _shouldLoopbackForNode = (shouldLoopback == 1); // use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data packetStream.skipRawData(parsePositionalData(packetAfterSeqNum.mid(packetStream.device()->pos()))); // pull out the radius for this injected source - if it's zero this is a point source packetStream >> _radius; quint8 attenuationByte = 0; packetStream >> attenuationByte; _attenuationRatio = attenuationByte / (float)MAX_INJECTOR_VOLUME; packetStream >> _ignorePenumbra; int numAudioBytes = packetAfterSeqNum.size() - packetStream.device()->pos(); numAudioSamples = numAudioBytes / sizeof(int16_t); return packetStream.device()->pos(); }
int InjectedAudioRingBuffer::parseData(unsigned char* sourceBuffer, int numBytes) { unsigned char* currentBuffer = sourceBuffer + sizeof(PACKET_HEADER_INJECT_AUDIO); // pull stream identifier from the packet memcpy(&_streamIdentifier, currentBuffer, sizeof(_streamIdentifier)); currentBuffer += sizeof(_streamIdentifier); // use parsePositionalData in parent PostionalAudioRingBuffer class to pull common positional data currentBuffer += parsePositionalData(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); // pull out the radius for this injected source - if it's zero this is a point source memcpy(&_radius, currentBuffer, sizeof(_radius)); currentBuffer += sizeof(_radius); unsigned int attenuationByte = *(currentBuffer++); _attenuationRatio = attenuationByte / (float) MAX_INJECTOR_VOLUME; currentBuffer += parseAudioSamples(currentBuffer, numBytes - (currentBuffer - sourceBuffer)); return currentBuffer - sourceBuffer; }
int PositionalAudioRingBuffer::parseData(const QByteArray& packet) { // skip the packet header (includes the source UUID) int readBytes = numBytesForPacketHeader(packet); readBytes += parsePositionalData(packet.mid(readBytes)); if (packetTypeForPacket(packet) == PacketTypeSilentAudioFrame) { // this source had no audio to send us, but this counts as a packet // write silence equivalent to the number of silent samples they just sent us int16_t numSilentSamples; memcpy(&numSilentSamples, packet.data() + readBytes, sizeof(int16_t)); readBytes += sizeof(int16_t); addSilentFrame(numSilentSamples); } else { // there is audio data to read readBytes += writeData(packet.data() + readBytes, packet.size() - readBytes); } return readBytes; }