Example #1
0
void Agent::executeScript() {
    _scriptEngine = scriptEngineFactory(ScriptEngine::AGENT_SCRIPT, _scriptContents, _payload);

    DependencyManager::get<RecordingScriptingInterface>()->setScriptEngine(_scriptEngine);

    // setup an Avatar for the script to use
    auto scriptedAvatar = DependencyManager::get<ScriptableAvatar>();

    connect(_scriptEngine.data(), SIGNAL(update(float)),
            scriptedAvatar.data(), SLOT(update(float)), Qt::ConnectionType::QueuedConnection);
    scriptedAvatar->setForceFaceTrackerConnected(true);

    // call model URL setters with empty URLs so our avatar, if user, will have the default models
    scriptedAvatar->setSkeletonModelURL(QUrl());

    // force lazy initialization of the head data for the scripted avatar
    // since it is referenced below by computeLoudness and getAudioLoudness
    scriptedAvatar->getHeadOrientation();

    // give this AvatarData object to the script engine
    _scriptEngine->registerGlobalObject("Avatar", scriptedAvatar.data());

    // give scripts access to the Users object
    _scriptEngine->registerGlobalObject("Users", DependencyManager::get<UsersScriptingInterface>().data());


    auto player = DependencyManager::get<recording::Deck>();
    connect(player.data(), &recording::Deck::playbackStateChanged, [=] {
        if (player->isPlaying()) {
            auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
            if (recordingInterface->getPlayFromCurrentLocation()) {
                scriptedAvatar->setRecordingBasis();
            }
        } else {
            scriptedAvatar->clearRecordingBasis();
        }
    });

    using namespace recording;
    static const FrameType AVATAR_FRAME_TYPE = Frame::registerFrameType(AvatarData::FRAME_NAME);
    Frame::registerFrameHandler(AVATAR_FRAME_TYPE, [scriptedAvatar](Frame::ConstPointer frame) {

        auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
        bool useFrameSkeleton = recordingInterface->getPlayerUseSkeletonModel();

        // FIXME - the ability to switch the avatar URL is not actually supported when playing back from a recording
        if (!useFrameSkeleton) {
            static std::once_flag warning;
            std::call_once(warning, [] {
                qWarning() << "Recording.setPlayerUseSkeletonModel(false) is not currently supported.";
            });
        }

        AvatarData::fromFrame(frame->data, *scriptedAvatar);
    });

    using namespace recording;
    static const FrameType AUDIO_FRAME_TYPE = Frame::registerFrameType(AudioConstants::getAudioFrameName());
    Frame::registerFrameHandler(AUDIO_FRAME_TYPE, [this, &scriptedAvatar](Frame::ConstPointer frame) {
        static quint16 audioSequenceNumber{ 0 };

        QByteArray audio(frame->data);

        if (_isNoiseGateEnabled) {
            int16_t* samples = reinterpret_cast<int16_t*>(audio.data());
            int numSamples = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL;
            _audioGate.render(samples, samples, numSamples);
        }

        computeLoudness(&audio, scriptedAvatar);

        // state machine to detect gate opening and closing
        bool audioGateOpen = (scriptedAvatar->getAudioLoudness() != 0.0f);
        bool openedInLastBlock = !_audioGateOpen && audioGateOpen;  // the gate just opened
        bool closedInLastBlock = _audioGateOpen && !audioGateOpen;  // the gate just closed
        _audioGateOpen = audioGateOpen;
        Q_UNUSED(openedInLastBlock);

        // the codec must be flushed to silence before sending silent packets,
        // so delay the transition to silent packets by one packet after becoming silent.
        auto packetType = PacketType::MicrophoneAudioNoEcho;
        if (!audioGateOpen && !closedInLastBlock) {
            packetType = PacketType::SilentAudioFrame;
        }

        Transform audioTransform;
        auto headOrientation = scriptedAvatar->getHeadOrientation();
        audioTransform.setTranslation(scriptedAvatar->getWorldPosition());
        audioTransform.setRotation(headOrientation);

        QByteArray encodedBuffer;
        if (_encoder) {
            _encoder->encode(audio, encodedBuffer);
        } else {
            encodedBuffer = audio;
        }

        AbstractAudioInterface::emitAudioPacket(encodedBuffer.data(), encodedBuffer.size(), audioSequenceNumber, false, 
            audioTransform, scriptedAvatar->getWorldPosition(), glm::vec3(0),
            packetType, _selectedCodecName);
    });

    auto avatarHashMap = DependencyManager::set<AvatarHashMap>();
    _scriptEngine->registerGlobalObject("AvatarList", avatarHashMap.data());

    auto& packetReceiver = DependencyManager::get<NodeList>()->getPacketReceiver();
    packetReceiver.registerListener(PacketType::BulkAvatarData, avatarHashMap.data(), "processAvatarDataPacket");
    packetReceiver.registerListener(PacketType::KillAvatar, avatarHashMap.data(), "processKillAvatar");
    packetReceiver.registerListener(PacketType::AvatarIdentity, avatarHashMap.data(), "processAvatarIdentityPacket");

    // register ourselves to the script engine
    _scriptEngine->registerGlobalObject("Agent", this);

    _scriptEngine->registerGlobalObject("SoundCache", DependencyManager::get<SoundCache>().data());
    _scriptEngine->registerGlobalObject("AnimationCache", DependencyManager::get<AnimationCache>().data());

    QScriptValue webSocketServerConstructorValue = _scriptEngine->newFunction(WebSocketServerClass::constructor);
    _scriptEngine->globalObject().setProperty("WebSocketServer", webSocketServerConstructorValue);

    auto entityScriptingInterface = DependencyManager::get<EntityScriptingInterface>();

    _scriptEngine->registerGlobalObject("EntityViewer", &_entityViewer);

    _scriptEngine->registerGetterSetter("location", LocationScriptingInterface::locationGetter,
        LocationScriptingInterface::locationSetter);

    auto recordingInterface = DependencyManager::get<RecordingScriptingInterface>();
    _scriptEngine->registerGlobalObject("Recording", recordingInterface.data());

    entityScriptingInterface->init();

    _entityViewer.init();

    entityScriptingInterface->setEntityTree(_entityViewer.getTree());

    DependencyManager::set<AssignmentParentFinder>(_entityViewer.getTree());

    QMetaObject::invokeMethod(&_avatarAudioTimer, "start");

    // Agents should run at 45hz
    static const int AVATAR_DATA_HZ = 45;
    static const int AVATAR_DATA_IN_MSECS = MSECS_PER_SECOND / AVATAR_DATA_HZ;
    QTimer* avatarDataTimer = new QTimer(this);
    connect(avatarDataTimer, &QTimer::timeout, this, &Agent::processAgentAvatar);
    avatarDataTimer->setSingleShot(false);
    avatarDataTimer->setInterval(AVATAR_DATA_IN_MSECS);
    avatarDataTimer->setTimerType(Qt::PreciseTimer);
    avatarDataTimer->start();

    _scriptEngine->run();

    Frame::clearFrameHandler(AUDIO_FRAME_TYPE);
    Frame::clearFrameHandler(AVATAR_FRAME_TYPE);

    DependencyManager::destroy<RecordingScriptingInterface>();

    setFinished(true);
}