コード例 #1
0
ファイル: AudioMixer.cpp プロジェクト: ChristophHaag/hifi
void AudioMixer::handleMuteEnvironmentPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
    auto nodeList = DependencyManager::get<NodeList>();

    if (sendingNode->isAllowedEditor()) {
        glm::vec3 position;
        float radius;

        auto newPacket = NLPacket::create(PacketType::MuteEnvironment, sizeof(position) + sizeof(radius));

        // read the position and radius from the sent packet
        message->readPrimitive(&position);
        message->readPrimitive(&radius);

        // write them to our packet
        newPacket->writePrimitive(position);
        newPacket->writePrimitive(radius);

        nodeList->eachNode([&](const SharedNodePointer& node){
            if (node->getType() == NodeType::Agent && node->getActiveSocket() &&
                node->getLinkedData() && node != sendingNode) {
                nodeList->sendUnreliablePacket(*newPacket, *node);
            }
        });
    }
}
コード例 #2
0
void OctreeHeadlessViewer::queryOctree() {
    char serverType = getMyNodeType();
    PacketType packetType = getMyQueryMessageType();

    if (_hasViewFrustum) {
        ConicalViewFrustums views { _viewFrustum };
        _octreeQuery.setConicalViews(views);
    } else {
        _octreeQuery.clearConicalViews();
    }

    auto nodeList = DependencyManager::get<NodeList>();

    auto node = nodeList->soloNodeOfType(serverType);
    if (node && node->getActiveSocket()) {
        auto queryPacket = NLPacket::create(packetType);

        // encode the query data
        auto packetData = reinterpret_cast<unsigned char*>(queryPacket->getPayload());
        int packetSize = _octreeQuery.getBroadcastData(packetData);
        queryPacket->setPayloadSize(packetSize);

        // make sure we still have an active socket
        nodeList->sendUnreliablePacket(*queryPacket, *node);
    }
}
コード例 #3
0
ファイル: DomainHandler.cpp プロジェクト: disigma/hifi
void DomainHandler::sendDisconnectPacket() {
    // The DomainDisconnect packet is not verified - we're relying on the eventual addition of DTLS to the
    // domain-server connection to stop greifing here
    
    // construct the disconnect packet once (an empty packet but sourced with our current session UUID)
    static auto disconnectPacket = NLPacket::create(PacketType::DomainDisconnectRequest, 0);
    
    // send the disconnect packet to the current domain server
    auto nodeList = DependencyManager::get<NodeList>();
    nodeList->sendUnreliablePacket(*disconnectPacket, _sockAddr);
}
コード例 #4
0
void AbstractAudioInterface::emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber,
                                             const Transform& transform, glm::vec3 avatarBoundingBoxCorner, glm::vec3 avatarBoundingBoxScale,
                                             PacketType packetType, QString codecName) {
    static std::mutex _mutex;
    using Locker = std::unique_lock<std::mutex>;
    auto nodeList = DependencyManager::get<NodeList>();
    SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
    if (audioMixer && audioMixer->getActiveSocket()) {
        Locker lock(_mutex);
        auto audioPacket = NLPacket::create(packetType);

        // FIXME - this is not a good way to determine stereoness with codecs.... 
        quint8 isStereo = bytes == AudioConstants::NETWORK_FRAME_BYTES_STEREO ? 1 : 0;

        // write sequence number
        auto sequence = sequenceNumber++;
        audioPacket->writePrimitive(sequence);

        // write the codec
        audioPacket->writeString(codecName);

        if (packetType == PacketType::SilentAudioFrame) {
            // pack num silent samples
            quint16 numSilentSamples = isStereo ?
                AudioConstants::NETWORK_FRAME_SAMPLES_STEREO :
                AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
            audioPacket->writePrimitive(numSilentSamples);
        } else {
            // set the mono/stereo byte
            audioPacket->writePrimitive(isStereo);
        }

        // pack the three float positions
        audioPacket->writePrimitive(transform.getTranslation());
        // pack the orientation
        audioPacket->writePrimitive(transform.getRotation());

        audioPacket->writePrimitive(avatarBoundingBoxCorner);
        audioPacket->writePrimitive(avatarBoundingBoxScale);


        if (audioPacket->getType() != PacketType::SilentAudioFrame) {
            // audio samples have already been packed (written to networkAudioSamples)
            int leadingBytes = audioPacket->getPayloadSize();
            audioPacket->setPayloadSize(leadingBytes + bytes);
            memcpy(audioPacket->getPayload() + leadingBytes, audioData, bytes);
        }
        nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
        nodeList->sendUnreliablePacket(*audioPacket, *audioMixer);
    }
}
コード例 #5
0
void AudioMixerClientData::optionallyReplicatePacket(ReceivedMessage& message, const Node& node) {

    // first, make sure that this is a packet from a node we are supposed to replicate
    if (node.isReplicated()) {

        // now make sure it's a packet type that we want to replicate

        // first check if it is an original type that we should replicate
        PacketType mirroredType = PacketTypeEnum::getReplicatedPacketMapping().value(message.getType());

        if (mirroredType == PacketType::Unknown) {
            // if it wasn't check if it is a replicated type that we should re-replicate
            if (PacketTypeEnum::getReplicatedPacketMapping().key(message.getType()) != PacketType::Unknown) {
                mirroredType = message.getType();
            } else {
                qCDebug(audio) << "Packet passed to optionallyReplicatePacket was not a replicatable type - returning";
                return;
            }
        }

        std::unique_ptr<NLPacket> packet;
        auto nodeList = DependencyManager::get<NodeList>();

        // enumerate the downstream audio mixers and send them the replicated version of this packet
        nodeList->unsafeEachNode([&](const SharedNodePointer& downstreamNode) {
            if (AudioMixer::shouldReplicateTo(node, *downstreamNode)) {
                // construct the packet only once, if we have any downstream audio mixers to send to
                if (!packet) {
                    // construct an NLPacket to send to the replicant that has the contents of the received packet
                    packet = NLPacket::create(mirroredType);

                    if (!isReplicatedPacket(message.getType())) {
                        // since this packet will be non-sourced, we add the replicated node's ID here
                        packet->write(node.getUUID().toRfc4122());
                    }

                    packet->write(message.getMessage());
                }
                
                nodeList->sendUnreliablePacket(*packet, *downstreamNode);
            }
        });
    }

}
コード例 #6
0
void AbstractAudioInterface::emitAudioPacket(const void* audioData, size_t bytes, quint16& sequenceNumber, const Transform& transform, PacketType packetType) {
    static std::mutex _mutex;
    using Locker = std::unique_lock<std::mutex>;
    auto nodeList = DependencyManager::get<NodeList>();
    SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
    if (audioMixer && audioMixer->getActiveSocket()) {
        Locker lock(_mutex);
        static std::unique_ptr<NLPacket> audioPacket = NLPacket::create(PacketType::Unknown);
        quint8 isStereo = bytes == AudioConstants::NETWORK_FRAME_BYTES_STEREO ? 1 : 0;
        audioPacket->setType(packetType);
        // reset the audio packet so we can start writing
        audioPacket->reset();
        // write sequence number
        audioPacket->writePrimitive(sequenceNumber++);
        if (audioPacket->getType() == PacketType::SilentAudioFrame) {
            // pack num silent samples
            quint16 numSilentSamples = isStereo ?
                AudioConstants::NETWORK_FRAME_SAMPLES_STEREO :
                AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
            audioPacket->writePrimitive(numSilentSamples);
        } else {
            // set the mono/stereo byte
            audioPacket->writePrimitive(isStereo);
        }

        // pack the three float positions
        audioPacket->writePrimitive(transform.getTranslation());
        // pack the orientation
        audioPacket->writePrimitive(transform.getRotation());

        if (audioPacket->getType() != PacketType::SilentAudioFrame) {
            // audio samples have already been packed (written to networkAudioSamples)
            audioPacket->setPayloadSize(audioPacket->getPayloadSize() + bytes);
            static const int leadingBytes = sizeof(quint16) + sizeof(glm::vec3) + sizeof(glm::quat) + sizeof(quint8);
            memcpy(audioPacket->getPayload() + leadingBytes, audioData, bytes);
        }
        nodeList->flagTimeForConnectionStep(LimitedNodeList::ConnectionStep::SendAudioPacket);
        nodeList->sendUnreliablePacket(*audioPacket, *audioMixer);
    }
}
コード例 #7
0
ファイル: ScriptEngine.cpp プロジェクト: CiaranLaval/hifi
void ScriptEngine::run() {
    // TODO: can we add a short circuit for _stoppingAllScripts here? What does it mean to not start running if
    // we're in the process of stopping?

    if (!_isInitialized) {
        init();
    }
    _isRunning = true;
    _isFinished = false;
    emit runningStateChanged();

    QScriptValue result = evaluate(_scriptContents);

    QElapsedTimer startTime;
    startTime.start();

    int thisFrame = 0;

    auto nodeList = DependencyManager::get<NodeList>();
    auto entityScriptingInterface = DependencyManager::get<EntityScriptingInterface>();

    qint64 lastUpdate = usecTimestampNow();

    while (!_isFinished) {
        int usecToSleep = (thisFrame++ * SCRIPT_DATA_CALLBACK_USECS) - startTime.nsecsElapsed() / 1000; // nsec to usec
        if (usecToSleep > 0) {
            usleep(usecToSleep);
        }

        if (_isFinished) {
            break;
        }

        QCoreApplication::processEvents();

        if (_isFinished) {
            break;
        }

        if (!_isFinished && entityScriptingInterface->getEntityPacketSender()->serversExist()) {
            // release the queue of edit entity messages.
            entityScriptingInterface->getEntityPacketSender()->releaseQueuedMessages();

            // since we're in non-threaded mode, call process so that the packets are sent
            if (!entityScriptingInterface->getEntityPacketSender()->isThreaded()) {
                entityScriptingInterface->getEntityPacketSender()->process();
            }
        }

        if (!_isFinished && _isAvatar && _avatarData) {

            const int SCRIPT_AUDIO_BUFFER_SAMPLES = floor(((SCRIPT_DATA_CALLBACK_USECS * AudioConstants::SAMPLE_RATE)
                                                           / (1000 * 1000)) + 0.5);
            const int SCRIPT_AUDIO_BUFFER_BYTES = SCRIPT_AUDIO_BUFFER_SAMPLES * sizeof(int16_t);

            QByteArray avatarByteArray = _avatarData->toByteArray();
            auto avatarPacket = NLPacket::create(PacketType::AvatarData, avatarByteArray.size());

            avatarPacket->write(avatarByteArray);

            nodeList->broadcastToNodes(std::move(avatarPacket), NodeSet() << NodeType::AvatarMixer);

            if (_isListeningToAudioStream || _avatarSound) {
                // if we have an avatar audio stream then send it out to our audio-mixer
                bool silentFrame = true;

                int16_t numAvailableSamples = SCRIPT_AUDIO_BUFFER_SAMPLES;
                const int16_t* nextSoundOutput = NULL;

                if (_avatarSound) {

                    const QByteArray& soundByteArray = _avatarSound->getByteArray();
                    nextSoundOutput = reinterpret_cast<const int16_t*>(soundByteArray.data()
                                                                       + _numAvatarSoundSentBytes);

                    int numAvailableBytes = (soundByteArray.size() - _numAvatarSoundSentBytes) > SCRIPT_AUDIO_BUFFER_BYTES
                        ? SCRIPT_AUDIO_BUFFER_BYTES
                        : soundByteArray.size() - _numAvatarSoundSentBytes;
                    numAvailableSamples = numAvailableBytes / sizeof(int16_t);


                    // check if the all of the _numAvatarAudioBufferSamples to be sent are silence
                    for (int i = 0; i < numAvailableSamples; ++i) {
                        if (nextSoundOutput[i] != 0) {
                            silentFrame = false;
                            break;
                        }
                    }

                    _numAvatarSoundSentBytes += numAvailableBytes;
                    if (_numAvatarSoundSentBytes == soundByteArray.size()) {
                        // we're done with this sound object - so set our pointer back to NULL
                        // and our sent bytes back to zero
                        _avatarSound = NULL;
                        _numAvatarSoundSentBytes = 0;
                    }
                }

                auto audioPacket = NLPacket::create(silentFrame
                                                    ? PacketType::SilentAudioFrame
                                                    : PacketType::MicrophoneAudioNoEcho);

                // seek past the sequence number, will be packed when destination node is known
                audioPacket->seek(sizeof(quint16));

                if (silentFrame) {
                    if (!_isListeningToAudioStream) {
                        // if we have a silent frame and we're not listening then just send nothing and break out of here
                        break;
                    }

                    // write the number of silent samples so the audio-mixer can uphold timing
                    audioPacket->writePrimitive(SCRIPT_AUDIO_BUFFER_SAMPLES);

                    // use the orientation and position of this avatar for the source of this audio
                    audioPacket->writePrimitive(_avatarData->getPosition());
                    glm::quat headOrientation = _avatarData->getHeadOrientation();
                    audioPacket->writePrimitive(headOrientation);

                } else if (nextSoundOutput) {
                    // assume scripted avatar audio is mono and set channel flag to zero
                    audioPacket->writePrimitive((quint8) 0);

                    // use the orientation and position of this avatar for the source of this audio
                    audioPacket->writePrimitive(_avatarData->getPosition());
                    glm::quat headOrientation = _avatarData->getHeadOrientation();
                    audioPacket->writePrimitive(headOrientation);

                    // write the raw audio data
                    audioPacket->write(reinterpret_cast<const char*>(nextSoundOutput), numAvailableSamples * sizeof(int16_t));
                }

                // write audio packet to AudioMixer nodes
                auto nodeList = DependencyManager::get<NodeList>();
                nodeList->eachNode([this, &nodeList, &audioPacket](const SharedNodePointer& node){
                    // only send to nodes of type AudioMixer
                    if (node->getType() == NodeType::AudioMixer) {
                        // pack sequence number
                        quint16 sequence = _outgoingScriptAudioSequenceNumbers[node->getUUID()]++;
                        audioPacket->seek(0);
                        audioPacket->writePrimitive(sequence);

                        // send audio packet
                        nodeList->sendUnreliablePacket(*audioPacket, *node);
                    }
                });
            }
        }

        qint64 now = usecTimestampNow();
        float deltaTime = (float) (now - lastUpdate) / (float) USECS_PER_SECOND;

        if (hasUncaughtException()) {
            int line = uncaughtExceptionLineNumber();
            qCDebug(scriptengine) << "Uncaught exception at (" << _fileNameString << ") line" << line << ":" << uncaughtException().toString();
            emit errorMessage("Uncaught exception at (" + _fileNameString + ") line" + QString::number(line) + ":" + uncaughtException().toString());
            clearExceptions();
        }

        if (!_isFinished) {
            emit update(deltaTime);
        }
        lastUpdate = now;

    }

    stopAllTimers(); // make sure all our timers are stopped if the script is ending
    emit scriptEnding();

    // kill the avatar identity timer
    delete _avatarIdentityTimer;

    if (entityScriptingInterface->getEntityPacketSender()->serversExist()) {
        // release the queue of edit entity messages.
        entityScriptingInterface->getEntityPacketSender()->releaseQueuedMessages();

        // since we're in non-threaded mode, call process so that the packets are sent
        if (!entityScriptingInterface->getEntityPacketSender()->isThreaded()) {
            // wait here till the edit packet sender is completely done sending
            while (entityScriptingInterface->getEntityPacketSender()->hasPacketsToSend()) {
                entityScriptingInterface->getEntityPacketSender()->process();
                QCoreApplication::processEvents();
            }
        } else {
            // FIXME - do we need to have a similar "wait here" loop for non-threaded packet senders?
        }
    }

    emit finished(_fileNameString);

    _isRunning = false;
    emit runningStateChanged();

    emit doneRunning();

    _doneRunningThisScript = true;
}
コード例 #8
0
ファイル: AudioInjector.cpp プロジェクト: disigma/hifi
uint64_t AudioInjector::injectNextFrame() {
    
    if (_state == AudioInjector::State::Finished) {
        qDebug() << "AudioInjector::injectNextFrame called but AudioInjector has finished and was not restarted. Returning.";
        return NEXT_FRAME_DELTA_ERROR_OR_FINISHED;
    }
    
    // if we haven't setup the packet to send then do so now
    static int positionOptionOffset = -1;
    static int volumeOptionOffset = -1;
    static int audioDataOffset = -1;
    
    if (!_currentPacket) {
        if (_currentSendOffset < 0 ||
            _currentSendOffset >= _audioData.size()) {
            _currentSendOffset = 0;
        }
        
        // make sure we actually have samples downloaded to inject
        if (_audioData.size()) {
            
            _outgoingSequenceNumber = 0;
            _nextFrame = 0;
            
            if (!_frameTimer) {
                _frameTimer = std::unique_ptr<QElapsedTimer>(new QElapsedTimer);
            }
            
            _frameTimer->restart();
            
            _currentPacket = NLPacket::create(PacketType::InjectAudio);
            
            // setup the packet for injected audio
            QDataStream audioPacketStream(_currentPacket.get());
            
            // pack some placeholder sequence number for now
            audioPacketStream << (quint16) 0;
            
            // pack stream identifier (a generated UUID)
            audioPacketStream << QUuid::createUuid();
            
            // pack the stereo/mono type of the stream
            audioPacketStream << _options.stereo;
            
            // pack the flag for loopback
            uchar loopbackFlag = (uchar) true;
            audioPacketStream << loopbackFlag;
            
            // pack the position for injected audio
            positionOptionOffset = _currentPacket->pos();
            audioPacketStream.writeRawData(reinterpret_cast<const char*>(&_options.position),
                                           sizeof(_options.position));
            
            // pack our orientation for injected audio
            audioPacketStream.writeRawData(reinterpret_cast<const char*>(&_options.orientation),
                                           sizeof(_options.orientation));
            
            // pack zero for radius
            float radius = 0;
            audioPacketStream << radius;
            
            // pack 255 for attenuation byte
            volumeOptionOffset = _currentPacket->pos();
            quint8 volume = MAX_INJECTOR_VOLUME;
            audioPacketStream << volume;
            
            audioPacketStream << _options.ignorePenumbra;
            
            audioDataOffset = _currentPacket->pos();
            
        } else {
            // no samples to inject, return immediately
            qDebug() << "AudioInjector::injectNextFrame() called with no samples to inject. Returning.";
            return NEXT_FRAME_DELTA_ERROR_OR_FINISHED;
        }
    }
    
    int bytesToCopy = std::min((_options.stereo ? 2 : 1) * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL,
                               _audioData.size() - _currentSendOffset);
    
    //  Measure the loudness of this frame
    _loudness = 0.0f;
    for (int i = 0; i < bytesToCopy; i += sizeof(int16_t)) {
        _loudness += abs(*reinterpret_cast<int16_t*>(_audioData.data() + _currentSendOffset + i)) /
        (AudioConstants::MAX_SAMPLE_VALUE / 2.0f);
    }
    _loudness /= (float)(bytesToCopy / sizeof(int16_t));
    
    _currentPacket->seek(0);
    
    // pack the sequence number
    _currentPacket->writePrimitive(_outgoingSequenceNumber);
    
    _currentPacket->seek(positionOptionOffset);
    _currentPacket->writePrimitive(_options.position);
    _currentPacket->writePrimitive(_options.orientation);
    
    quint8 volume = MAX_INJECTOR_VOLUME * _options.volume;
    _currentPacket->seek(volumeOptionOffset);
    _currentPacket->writePrimitive(volume);
    
    _currentPacket->seek(audioDataOffset);
    
    // copy the next NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL bytes to the packet
    _currentPacket->write(_audioData.data() + _currentSendOffset, bytesToCopy);
    
    // set the correct size used for this packet
    _currentPacket->setPayloadSize(_currentPacket->pos());
    
    // grab our audio mixer from the NodeList, if it exists
    auto nodeList = DependencyManager::get<NodeList>();
    SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
    
    if (audioMixer) {
        // send off this audio packet
        nodeList->sendUnreliablePacket(*_currentPacket, *audioMixer);
        _outgoingSequenceNumber++;
    }
    
    _currentSendOffset += bytesToCopy;
    
    if (_currentSendOffset >= _audioData.size()) {
        // we're at the end of the audio data to send
        if (_options.loop) {
            // we were asked to loop, set our send offset to 0
            _currentSendOffset = 0;
        } else {
            // we weren't to loop, say that we're done now
            finish();
            return NEXT_FRAME_DELTA_ERROR_OR_FINISHED;
        }
    }
    
    if (_currentSendOffset == bytesToCopy) {
        // ask AudioInjectorManager to call us right away again to
        // immediately send the first two frames so the mixer can start using the audio right away
        return NEXT_FRAME_DELTA_IMMEDIATELY;
    } else {
        return (++_nextFrame * AudioConstants::NETWORK_FRAME_USECS) - _frameTimer->nsecsElapsed() / 1000;
    }
    
}
コード例 #9
0
ファイル: AudioInjector.cpp プロジェクト: toloudis/hifi
int64_t AudioInjector::injectNextFrame() {
    if (_state == AudioInjector::State::Finished) {
        qDebug() << "AudioInjector::injectNextFrame called but AudioInjector has finished and was not restarted. Returning.";
        return NEXT_FRAME_DELTA_ERROR_OR_FINISHED;
    }

    // if we haven't setup the packet to send then do so now
    static int positionOptionOffset = -1;
    static int volumeOptionOffset = -1;
    static int audioDataOffset = -1;

    if (!_currentPacket) {
        if (_currentSendOffset < 0 ||
                _currentSendOffset >= _audioData.size()) {
            _currentSendOffset = 0;
        }

        // make sure we actually have samples downloaded to inject
        if (_audioData.size()) {

            int sampleSize = (_options.stereo ? 2 : 1) * sizeof(AudioConstants::AudioSample);
            auto numSamples = static_cast<int>(_audioData.size() / sampleSize);
            auto targetSize = numSamples * sampleSize;
            if (targetSize != _audioData.size()) {
                qDebug() << "Resizing audio that doesn't end at multiple of sample size, resizing from "
                         << _audioData.size() << " to " << targetSize;
                _audioData.resize(targetSize);
            }

            _outgoingSequenceNumber = 0;
            _nextFrame = 0;

            if (!_frameTimer) {
                _frameTimer = std::unique_ptr<QElapsedTimer>(new QElapsedTimer);
            }

            _frameTimer->restart();

            _currentPacket = NLPacket::create(PacketType::InjectAudio);

            // setup the packet for injected audio
            QDataStream audioPacketStream(_currentPacket.get());

            // pack some placeholder sequence number for now
            audioPacketStream << (quint16) 0;

            // pack stream identifier (a generated UUID)
            audioPacketStream << QUuid::createUuid();

            // pack the stereo/mono type of the stream
            audioPacketStream << _options.stereo;

            // pack the flag for loopback
            uchar loopbackFlag = (uchar)true;
            audioPacketStream << loopbackFlag;

            // pack the position for injected audio
            positionOptionOffset = _currentPacket->pos();
            audioPacketStream.writeRawData(reinterpret_cast<const char*>(&_options.position),
                                           sizeof(_options.position));

            // pack our orientation for injected audio
            audioPacketStream.writeRawData(reinterpret_cast<const char*>(&_options.orientation),
                                           sizeof(_options.orientation));

            // pack zero for radius
            float radius = 0;
            audioPacketStream << radius;

            // pack 255 for attenuation byte
            volumeOptionOffset = _currentPacket->pos();
            quint8 volume = MAX_INJECTOR_VOLUME;
            audioPacketStream << volume;

            audioPacketStream << _options.ignorePenumbra;

            audioDataOffset = _currentPacket->pos();

        } else {
            // no samples to inject, return immediately
            qDebug() << "AudioInjector::injectNextFrame() called with no samples to inject. Returning.";
            return NEXT_FRAME_DELTA_ERROR_OR_FINISHED;
        }
    }

    if (!_frameTimer->isValid()) {
        // in the case where we have been restarted, the frame timer will be invalid and we need to start it back over here
        _frameTimer->restart();
    }

    int totalBytesLeftToCopy = (_options.stereo ? 2 : 1) * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL;
    if (!_options.loop) {
        // If we aren't looping, let's make sure we don't read past the end
        totalBytesLeftToCopy = std::min(totalBytesLeftToCopy, _audioData.size() - _currentSendOffset);
    }

    //  Measure the loudness of this frame
    _loudness = 0.0f;
    for (int i = 0; i < totalBytesLeftToCopy; i += sizeof(int16_t)) {
        _loudness += abs(*reinterpret_cast<int16_t*>(_audioData.data() + ((_currentSendOffset + i) % _audioData.size()))) /
                     (AudioConstants::MAX_SAMPLE_VALUE / 2.0f);
    }
    _loudness /= (float)(totalBytesLeftToCopy/ sizeof(int16_t));

    _currentPacket->seek(0);

    // pack the sequence number
    _currentPacket->writePrimitive(_outgoingSequenceNumber);

    _currentPacket->seek(positionOptionOffset);
    _currentPacket->writePrimitive(_options.position);
    _currentPacket->writePrimitive(_options.orientation);

    quint8 volume = MAX_INJECTOR_VOLUME * _options.volume;
    _currentPacket->seek(volumeOptionOffset);
    _currentPacket->writePrimitive(volume);

    _currentPacket->seek(audioDataOffset);

    while (totalBytesLeftToCopy > 0) {
        int bytesToCopy = std::min(totalBytesLeftToCopy, _audioData.size() - _currentSendOffset);

        _currentPacket->write(_audioData.data() + _currentSendOffset, bytesToCopy);
        _currentSendOffset += bytesToCopy;
        totalBytesLeftToCopy -= bytesToCopy;
        if (_options.loop && _currentSendOffset >= _audioData.size()) {
            _currentSendOffset = 0;
        }
    }

    // set the correct size used for this packet
    _currentPacket->setPayloadSize(_currentPacket->pos());

    // grab our audio mixer from the NodeList, if it exists
    auto nodeList = DependencyManager::get<NodeList>();
    SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);

    if (audioMixer) {
        // send off this audio packet
        nodeList->sendUnreliablePacket(*_currentPacket, *audioMixer);
        _outgoingSequenceNumber++;
    }

    if (_currentSendOffset >= _audioData.size() && !_options.loop) {
        finish();
        return NEXT_FRAME_DELTA_ERROR_OR_FINISHED;
    }

    if (!_hasSentFirstFrame) {
        _hasSentFirstFrame = true;
        // ask AudioInjectorManager to call us right away again to
        // immediately send the first two frames so the mixer can start using the audio right away
        return NEXT_FRAME_DELTA_IMMEDIATELY;
    }

    const int MAX_ALLOWED_FRAMES_TO_FALL_BEHIND = 7;
    int64_t currentTime = _frameTimer->nsecsElapsed() / 1000;
    auto currentFrameBasedOnElapsedTime = currentTime / AudioConstants::NETWORK_FRAME_USECS;

    if (currentFrameBasedOnElapsedTime - _nextFrame > MAX_ALLOWED_FRAMES_TO_FALL_BEHIND) {
        // If we are falling behind by more frames than our threshold, let's skip the frames ahead
        qDebug() << this << "injectNextFrame() skipping ahead, fell behind by " << (currentFrameBasedOnElapsedTime - _nextFrame) << " frames";
        _nextFrame = currentFrameBasedOnElapsedTime;
        _currentSendOffset = _nextFrame * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL * (_options.stereo ? 2 : 1) % _audioData.size();
    }

    int64_t playNextFrameAt = ++_nextFrame * AudioConstants::NETWORK_FRAME_USECS;

    return std::max(INT64_C(0), playNextFrameAt - currentTime);
}
コード例 #10
0
void AudioInjector::injectToMixer() {
    if (_currentSendOffset < 0 ||
        _currentSendOffset >= _audioData.size()) {
        _currentSendOffset = 0;
    }

    auto nodeList = DependencyManager::get<NodeList>();

    // make sure we actually have samples downloaded to inject
    if (_audioData.size()) {

        auto audioPacket = NLPacket::create(PacketType::InjectAudio);

        // setup the packet for injected audio
        QDataStream audioPacketStream(audioPacket.get());

        // pack some placeholder sequence number for now
        audioPacketStream << (quint16) 0;

        // pack stream identifier (a generated UUID)
        audioPacketStream << QUuid::createUuid();

        // pack the stereo/mono type of the stream
        audioPacketStream << _options.stereo;

        // pack the flag for loopback
        uchar loopbackFlag = (uchar) true;
        audioPacketStream << loopbackFlag;

        // pack the position for injected audio
        int positionOptionOffset = audioPacket->pos();
        audioPacketStream.writeRawData(reinterpret_cast<const char*>(&_options.position),
                                  sizeof(_options.position));

        // pack our orientation for injected audio
        audioPacketStream.writeRawData(reinterpret_cast<const char*>(&_options.orientation),
                                  sizeof(_options.orientation));

        // pack zero for radius
        float radius = 0;
        audioPacketStream << radius;

        // pack 255 for attenuation byte
        int volumeOptionOffset = audioPacket->pos();
        quint8 volume = MAX_INJECTOR_VOLUME * _options.volume;
        audioPacketStream << volume;

        audioPacketStream << _options.ignorePenumbra;

        int audioDataOffset = audioPacket->pos();

        QElapsedTimer timer;
        timer.start();
        int nextFrame = 0;

        bool shouldLoop = _options.loop;

        // loop to send off our audio in NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL byte chunks
        quint16 outgoingInjectedAudioSequenceNumber = 0;

        while (_currentSendOffset < _audioData.size() && !_shouldStop) {

            int bytesToCopy = std::min(((_options.stereo) ? 2 : 1) * AudioConstants::NETWORK_FRAME_BYTES_PER_CHANNEL,
                                       _audioData.size() - _currentSendOffset);

            //  Measure the loudness of this frame
            _loudness = 0.0f;
            for (int i = 0; i < bytesToCopy; i += sizeof(int16_t)) {
                _loudness += abs(*reinterpret_cast<int16_t*>(_audioData.data() + _currentSendOffset + i)) /
                (AudioConstants::MAX_SAMPLE_VALUE / 2.0f);
            }
            _loudness /= (float)(bytesToCopy / sizeof(int16_t));
            
            audioPacket->seek(0);
            
            // pack the sequence number
            audioPacket->writePrimitive(outgoingInjectedAudioSequenceNumber);
            
            audioPacket->seek(positionOptionOffset);
            audioPacket->writePrimitive(_options.position);
            audioPacket->writePrimitive(_options.orientation);

            volume = MAX_INJECTOR_VOLUME * _options.volume;
            audioPacket->seek(volumeOptionOffset);
            audioPacket->writePrimitive(volume);

            audioPacket->seek(audioDataOffset);

            // copy the next NETWORK_BUFFER_LENGTH_BYTES_PER_CHANNEL bytes to the packet
            audioPacket->write(_audioData.data() + _currentSendOffset, bytesToCopy);

            // set the correct size used for this packet
            audioPacket->setPayloadSize(audioPacket->pos());

            // grab our audio mixer from the NodeList, if it exists
            SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
            
            if (audioMixer) {
                // send off this audio packet
                nodeList->sendUnreliablePacket(*audioPacket, *audioMixer);
                outgoingInjectedAudioSequenceNumber++;
            }

            _currentSendOffset += bytesToCopy;

            // send two packets before the first sleep so the mixer can start playback right away

            if (_currentSendOffset != bytesToCopy && _currentSendOffset < _audioData.size()) {

                // process events in case we have been told to stop and be deleted
                QCoreApplication::processEvents();

                if (_shouldStop) {
                    break;
                }

                // not the first packet and not done
                // sleep for the appropriate time
                int usecToSleep = (++nextFrame * AudioConstants::NETWORK_FRAME_USECS) - timer.nsecsElapsed() / 1000;

                if (usecToSleep > 0) {
                    usleep(usecToSleep);
                }
            }

            if (shouldLoop && _currentSendOffset >= _audioData.size()) {
                _currentSendOffset = 0;
            }
        }
    }

    setIsFinished(true);
    _isPlaying = !_isFinished; // Which can be false if a restart was requested
}