void AudioContext::deleteMarkedNodes() { ASSERT(isMainThread()); // Protect this object from being deleted before we release the mutex locked by AutoLocker. Ref<AudioContext> protect(*this); { AutoLocker locker(this); while (size_t n = m_nodesToDelete.size()) { AudioNode* node = m_nodesToDelete[n - 1]; m_nodesToDelete.removeLast(); // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions. unsigned numberOfInputs = node->numberOfInputs(); for (unsigned i = 0; i < numberOfInputs; ++i) m_dirtySummingJunctions.remove(node->input(i)); // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs. unsigned numberOfOutputs = node->numberOfOutputs(); for (unsigned i = 0; i < numberOfOutputs; ++i) m_dirtyAudioNodeOutputs.remove(node->output(i)); // Finally, delete it. delete node; } m_isDeletionScheduled = false; } }
JSC::JSValue JSAudioNode::connect(JSC::ExecState* exec) { if (exec->argumentCount() < 1) return throwError(exec, createSyntaxError(exec, "Not enough arguments")); unsigned outputIndex = 0; unsigned inputIndex = 0; AudioNode* destinationNode = toAudioNode(exec->argument(0)); if (!destinationNode) return throwError(exec, createSyntaxError(exec, "Invalid destination node")); if (exec->argumentCount() > 1) outputIndex = exec->argument(1).toInt32(exec); if (exec->argumentCount() > 2) inputIndex = exec->argument(2).toInt32(exec); AudioNode* audioNode = static_cast<AudioNode*>(impl()); bool success = audioNode->connect(destinationNode, outputIndex, inputIndex); if (!success) return throwError(exec, createSyntaxError(exec, "Invalid index parameter")); return JSC::jsUndefined(); }
v8::Handle<v8::Value> V8AudioNode::connectCallback(const v8::Arguments& args) { if (args.Length() < 1) return throwError("Not enough arguments", V8Proxy::SyntaxError); AudioNode* destinationNode = toNative(args[0]->ToObject()); if (!destinationNode) return throwError("Invalid destination node", V8Proxy::SyntaxError); unsigned output = 0; unsigned input = 0; bool ok = false; if (args.Length() > 1) { output = toInt32(args[1], ok); if (!ok) return throwError("Invalid index parameters", V8Proxy::SyntaxError); } if (args.Length() > 2) { input = toInt32(args[2], ok); if (!ok) return throwError("Invalid index parameters", V8Proxy::SyntaxError); } AudioNode* audioNode = toNative(args.Holder()); bool success = audioNode->connect(destinationNode, output, input); if (!success) return throwError("Invalid index parameter", V8Proxy::SyntaxError); return v8::Undefined(); }
NS_IMETHODIMP StateChangeTask::Run() { MOZ_ASSERT(NS_IsMainThread()); if (!mAudioContext && !mAudioNodeStream) { return NS_OK; } if (mAudioNodeStream) { AudioNode* node = mAudioNodeStream->Engine()->NodeMainThread(); if (!node) { return NS_OK; } mAudioContext = node->Context(); if (!mAudioContext) { return NS_OK; } } mAudioContext->OnStateChanged(mPromise, mNewState); // We have can't call Release() on the AudioContext on the MSG thread, so we // unref it here, on the main thread. mAudioContext = nullptr; return NS_OK; }
void AudioContext::deleteMarkedNodes() { ASSERT(isMainThread()); AutoLocker locker(this); // Note: deleting an AudioNode can cause m_nodesToDelete to grow. while (size_t n = m_nodesToDelete.size()) { AudioNode* node = m_nodesToDelete[n - 1]; m_nodesToDelete.removeLast(); // Before deleting the node, clear out any AudioNodeInputs from m_dirtyAudioNodeInputs. unsigned numberOfInputs = node->numberOfInputs(); for (unsigned i = 0; i < numberOfInputs; ++i) m_dirtyAudioNodeInputs.remove(node->input(i)); // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs. unsigned numberOfOutputs = node->numberOfOutputs(); for (unsigned i = 0; i < numberOfOutputs; ++i) m_dirtyAudioNodeOutputs.remove(node->output(i)); // Finally, delete it. delete node; } m_isDeletionScheduled = false; }
void ProcessorGraph::createDefaultNodes() { // add output node -- sends output to the audio card AudioProcessorGraph::AudioGraphIOProcessor* on = new AudioProcessorGraph::AudioGraphIOProcessor(AudioProcessorGraph::AudioGraphIOProcessor::audioOutputNode); // add record node -- sends output to disk RecordNode* recn = new RecordNode(); recn->setNodeId(RECORD_NODE_ID); // add audio node -- takes all inputs and selects those to be used for audio monitoring AudioNode* an = new AudioNode(); an->setNodeId(AUDIO_NODE_ID); // add message center MessageCenter* msgCenter = new MessageCenter(); msgCenter->setNodeId(MESSAGE_CENTER_ID); addNode(on, OUTPUT_NODE_ID); addNode(recn, RECORD_NODE_ID); addNode(an, AUDIO_NODE_ID); addNode(msgCenter, MESSAGE_CENTER_ID); }
void AudioContext::deleteMarkedNodes() { ASSERT(isGraphOwner() || isAudioThreadFinished()); // Note: deleting an AudioNode can cause m_nodesToDelete to grow. size_t nodesDeleted = 0; while (size_t n = m_nodesToDelete.size()) { AudioNode* node = m_nodesToDelete[n - 1]; m_nodesToDelete.removeLast(); // Before deleting the node, clear out any AudioNodeInputs from m_dirtyAudioNodeInputs. unsigned numberOfInputs = node->numberOfInputs(); for (unsigned i = 0; i < numberOfInputs; ++i) m_dirtyAudioNodeInputs.remove(node->input(i)); // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs. unsigned numberOfOutputs = node->numberOfOutputs(); for (unsigned i = 0; i < numberOfOutputs; ++i) m_dirtyAudioNodeOutputs.remove(node->output(i)); // Finally, delete it. delete node; // Don't delete too many nodes per render quantum since we don't want to do too much work in the realtime audio thread. if (++nodesDeleted > MaxNodesToDeletePerQuantum) break; } }
bool AudioNode::DisconnectFromOutputIfConnected<AudioNode>(uint32_t aOutputNodeIndex, uint32_t aInputIndex) { WEB_AUDIO_API_LOG("%f: %s %u Disconnect()", Context()->CurrentTime(), NodeType(), Id()); AudioNode* destination = mOutputNodes[aOutputNodeIndex]; MOZ_ASSERT(aOutputNodeIndex < mOutputNodes.Length()); MOZ_ASSERT(aInputIndex < destination->InputNodes().Length()); // An upstream node may be starting to play on the graph thread, and the // engine for a downstream node may be sending a PlayingRefChangeHandler // ADDREF message to this (main) thread. Wait for a round trip before // releasing nodes, to give engines receiving sound now time to keep their // nodes alive. class RunnableRelease final : public Runnable { public: explicit RunnableRelease(already_AddRefed<AudioNode> aNode) : mozilla::Runnable("RunnableRelease") , mNode(aNode) { } NS_IMETHOD Run() override { mNode = nullptr; return NS_OK; } private: RefPtr<AudioNode> mNode; }; InputNode& input = destination->mInputNodes[aInputIndex]; if (input.mInputNode != this) { return false; } // Remove one instance of 'dest' from mOutputNodes. There could be // others, and it's not correct to remove them all since some of them // could be for different output ports. RefPtr<AudioNode> output = mOutputNodes[aOutputNodeIndex].forget(); mOutputNodes.RemoveElementAt(aOutputNodeIndex); // Destroying the InputNode here sends a message to the graph thread // to disconnect the streams, which should be sent before the // RunAfterPendingUpdates() call below. destination->mInputNodes.RemoveElementAt(aInputIndex); output->NotifyInputsChanged(); if (mStream) { nsCOMPtr<nsIRunnable> runnable = new RunnableRelease(output.forget()); mStream->RunAfterPendingUpdates(runnable.forget()); } return true; }
JSC::JSValue JSAudioNode::disconnect(JSC::ExecState* exec) { unsigned outputIndex = 0; if (exec->argumentCount() > 0) outputIndex = exec->argument(0).toInt32(exec); AudioNode* audioNode = static_cast<AudioNode*>(impl()); audioNode->disconnect(outputIndex); return JSC::jsUndefined(); }
void AudioContext::handleDeferredFinishDerefs() { ASSERT(isAudioThread() && isGraphOwner()); for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) { AudioNode* node = m_deferredFinishDerefList[i]; node->finishDeref(AudioNode::RefTypeConnection); } m_deferredFinishDerefList.clear(); }
void AudioNodeOutput::propagateChannelCount() { ASSERT(context()->isAudioThread() && context()->isGraphOwner()); if (isChannelCountKnown()) { // Announce to any nodes we're connected to that we changed our channel count for its input. for (InputsIterator i = m_inputs.begin(); i != m_inputs.end(); ++i) { AudioNodeInput* input = *i; AudioNode* connectionNode = input->node(); connectionNode->checkNumberOfChannelsForInput(input); } } }
void AudioContext::handleStoppableSourceNodes() { ASSERT(isGraphOwner()); // Find AudioBufferSourceNodes to see if we can stop playing them. for (unsigned i = 0; i < m_referencedNodes.size(); ++i) { AudioNode* node = m_referencedNodes.at(i).get(); if (node->handler().nodeType() == AudioHandler::NodeTypeAudioBufferSource) { AudioBufferSourceNode* sourceNode = static_cast<AudioBufferSourceNode*>(node); sourceNode->audioBufferSourceHandler().handleStoppableSourceNode(); } } }
MediaStreamAudioDestinationHandler::MediaStreamAudioDestinationHandler(AudioNode& node, size_t numberOfChannels) : AudioBasicInspectorHandler(NodeTypeMediaStreamAudioDestination, node, node.context()->sampleRate(), numberOfChannels) , m_mixBus(AudioBus::create(numberOfChannels, ProcessingSizeInFrames)) { m_source = MediaStreamSource::create("WebAudio-" + createCanonicalUUIDString(), MediaStreamSource::TypeAudio, "MediaStreamAudioDestinationNode", false, true, MediaStreamSource::ReadyStateLive, true); MediaStreamSourceVector audioSources; audioSources.append(m_source.get()); MediaStreamSourceVector videoSources; m_stream = MediaStream::create(node.context()->executionContext(), MediaStreamDescriptor::create(audioSources, videoSources)); MediaStreamCenter::instance().didCreateMediaStreamAndTracks(m_stream->descriptor()); m_source->setAudioFormat(numberOfChannels, node.context()->sampleRate()); initialize(); }
void AudioNode::Disconnect(AudioNode& aDestination, uint32_t aOutput, uint32_t aInput, ErrorResult& aRv) { if (aOutput >= NumberOfOutputs()) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (aInput >= aDestination.NumberOfInputs()) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } bool wasConnected = false; for (int32_t outputIndex = mOutputNodes.Length() - 1; outputIndex >= 0; --outputIndex) { for (int32_t inputIndex = aDestination.mInputNodes.Length() - 1; inputIndex >= 0; --inputIndex) { InputNode& input = aDestination.mInputNodes[inputIndex]; if (input.mOutputPort == aOutput && input.mInputPort == aInput) { wasConnected |= DisconnectFromOutputIfConnected(aDestination, outputIndex, inputIndex); break; } } } if (!wasConnected) { aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR); return; } // This disconnection may have disconnected a panner and a source. Context()->UpdatePannerSource(); }
v8::Handle<v8::Value> V8AudioNode::disconnectCallback(const v8::Arguments& args) { unsigned output = 0; bool ok = false; if (args.Length() > 0) { output = toInt32(args[0], ok); if (!ok) return throwError("Invalid index parameters", V8Proxy::SyntaxError); } AudioNode* audioNode = toNative(args.Holder()); bool success = audioNode->disconnect(output); if (!success) return throwError("Invalid index parameter", V8Proxy::SyntaxError); return v8::Undefined(); }
void ProcessorGraph::createDefaultNodes() { // add output node -- sends output to the audio card AudioProcessorGraph::AudioGraphIOProcessor* on = new AudioProcessorGraph::AudioGraphIOProcessor(AudioProcessorGraph::AudioGraphIOProcessor::audioOutputNode); // add record node -- sends output to disk RecordNode* recn = new RecordNode(); recn->setNodeId(RECORD_NODE_ID); // add audio node -- takes all inputs and selects those to be used for audio monitoring AudioNode* an = new AudioNode(); an->setNodeId(AUDIO_NODE_ID); // add audio resampling node -- resamples continuous signals to 44.1kHz AudioResamplingNode* arn = new AudioResamplingNode(); arn->setNodeId(RESAMPLING_NODE_ID); addNode(on, OUTPUT_NODE_ID); addNode(recn, RECORD_NODE_ID); addNode(an, AUDIO_NODE_ID); addNode(arn, RESAMPLING_NODE_ID); // connect audio subnetwork for (int n = 0; n < 2; n++) { addConnection(AUDIO_NODE_ID, n, RESAMPLING_NODE_ID, n); addConnection(RESAMPLING_NODE_ID, n, OUTPUT_NODE_ID, n); } addConnection(AUDIO_NODE_ID, midiChannelIndex, RESAMPLING_NODE_ID, midiChannelIndex); std::cout << "Default nodes created." << std::endl; }
void AudioNode::Connect(AudioNode& aDestination, uint32_t aOutput, uint32_t aInput, ErrorResult& aRv) { if (aOutput >= NumberOfOutputs() || aInput >= aDestination.NumberOfInputs()) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (Context() != aDestination.Context()) { aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); return; } if (FindIndexOfNodeWithPorts(aDestination.mInputNodes, this, aInput, aOutput) != nsTArray<AudioNode::InputNode>::NoIndex) { // connection already exists. return; } // The MediaStreamGraph will handle cycle detection. We don't need to do it // here. mOutputNodes.AppendElement(&aDestination); InputNode* input = aDestination.mInputNodes.AppendElement(); input->mInputNode = this; input->mInputPort = aInput; input->mOutputPort = aOutput; AudioNodeStream* destinationStream = aDestination.mStream; if (mStream && destinationStream) { // Connect streams in the MediaStreamGraph MOZ_ASSERT(aInput <= UINT16_MAX, "Unexpected large input port number"); MOZ_ASSERT(aOutput <= UINT16_MAX, "Unexpected large output port number"); input->mStreamPort = destinationStream-> AllocateInputPort(mStream, AudioNodeStream::AUDIO_TRACK, static_cast<uint16_t>(aInput), static_cast<uint16_t>(aOutput)); } aDestination.NotifyInputsChanged(); // This connection may have connected a panner and a source. Context()->UpdatePannerSource(); }
/*----------------------------------------------------------------------------*/ void SoftKineticCamera::onNodeDisconnected(Device device, Device::NodeRemovedData data) { if (data.node.is<AudioNode>() && (data.node.as<AudioNode>() == m_anode)) m_anode.unset(); if (data.node.is<ColorNode>() && (data.node.as<ColorNode>() == m_cnode)) m_cnode.unset(); if (data.node.is<DepthNode>() && (data.node.as<DepthNode>() == m_dnode)) m_dnode.unset(); printf("Node disconnected\n"); }
/*----------------------------------------------------------------------------*/ void onNodeDisconnected(Device device, Device::NodeRemovedData data) { if (data.node.is<AudioNode>() && (data.node.as<AudioNode>() == g_anode)) g_anode.unset(); if (data.node.is<ColorNode>() && (data.node.as<ColorNode>() == g_cnode)) g_cnode.unset(); if (data.node.is<DepthNode>() && (data.node.as<DepthNode>() == g_dnode)) g_dnode.unset(); printf("Node disconnected\n"); }
/* static */ already_AddRefed<AudioNodeStream> AudioNodeStream::Create(AudioContext* aCtx, AudioNodeEngine* aEngine, Flags aFlags, MediaStreamGraph* aGraph) { MOZ_ASSERT(NS_IsMainThread()); // MediaRecorders use an AudioNodeStream, but no AudioNode AudioNode* node = aEngine->NodeMainThread(); MediaStreamGraph* graph = aGraph ? aGraph : aCtx->Graph(); RefPtr<AudioNodeStream> stream = new AudioNodeStream(aEngine, aFlags, graph->GraphRate()); stream->mSuspendedCount += aCtx->ShouldSuspendNewStream(); if (node) { stream->SetChannelMixingParametersImpl(node->ChannelCount(), node->ChannelCountModeValue(), node->ChannelInterpretationValue()); } graph->AddStream(stream); return stream.forget(); }
NS_IMETHODIMP AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData, bool aAnonymize) { const nsLiteralCString nodeDescription("Memory used by AudioNode DOM objects (Web Audio)."); for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) { AudioNode* node = iter.Get()->GetKey(); int64_t amount = node->SizeOfIncludingThis(MallocSizeOf); nsPrintfCString domNodePath("explicit/webaudio/audio-node/%s/dom-nodes", node->NodeType()); aHandleReport->Callback(EmptyCString(), domNodePath, KIND_HEAP, UNITS_BYTES, amount, nodeDescription, aData); } int64_t amount = SizeOfIncludingThis(MallocSizeOf); MOZ_COLLECT_REPORT( "explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES, amount, "Memory used by AudioContext objects (Web Audio)."); return NS_OK; }
/* static */ already_AddRefed<AudioNodeStream> AudioNodeStream::Create(AudioContext* aCtx, AudioNodeEngine* aEngine, Flags aFlags, MediaStreamGraph* aGraph) { MOZ_ASSERT(NS_IsMainThread()); MOZ_RELEASE_ASSERT(aGraph); // MediaRecorders use an AudioNodeStream, but no AudioNode AudioNode* node = aEngine->NodeMainThread(); RefPtr<AudioNodeStream> stream = new AudioNodeStream(aEngine, aFlags, aGraph->GraphRate(), aCtx->GetOwnerGlobal()->AbstractMainThreadFor(TaskCategory::Other)); stream->mSuspendedCount += aCtx->ShouldSuspendNewStream(); if (node) { stream->SetChannelMixingParametersImpl(node->ChannelCount(), node->ChannelCountModeValue(), node->ChannelInterpretationValue()); } aGraph->AddStream(stream); return stream.forget(); }
MediaStreamAudioSourceHandler::MediaStreamAudioSourceHandler(AudioNode& node, MediaStream& mediaStream, MediaStreamTrack* audioTrack, PassOwnPtr<AudioSourceProvider> audioSourceProvider) : AudioHandler(NodeTypeMediaStreamAudioSource, node, node.context()->sampleRate()) , m_mediaStream(mediaStream) , m_audioTrack(audioTrack) , m_audioSourceProvider(audioSourceProvider) , m_sourceNumberOfChannels(0) { // Default to stereo. This could change depending on the format of the // MediaStream's audio track. addOutput(2); initialize(); }
/*----------------------------------------------------------------------------*/ int main(int argc, char* argv[]) { ros::init (argc, argv, "pub_pcl"); ros::NodeHandle nh; pub = nh.advertise<PointCloud> ("points2", 1); point_cloud::PointCloud detector("pcl_class1"); //creates PointCloudLab class object int user_input = 1; g_context = Context::create("localhost"); g_context.deviceAddedEvent().connect(&onDeviceConnected); g_context.deviceRemovedEvent().connect(&onDeviceDisconnected); // Get the list of currently connected devices vector<Device> da = g_context.getDevices(); // We are only interested in the first device if (da.size() >= 1) { g_bDeviceFound = true; da[0].nodeAddedEvent().connect(&onNodeConnected); da[0].nodeRemovedEvent().connect(&onNodeDisconnected); vector<Node> na = da[0].getNodes(); printf("Found %u nodes\n",na.size()); for (int n = 0; n < (int)na.size(); n++) configureNode(na[n]); } while(user_input==1) { g_context.startNodes(); g_context.run(); g_context.stopNodes(); cout << "Please enter an 1 or 0: "; cin >> user_input; } if (g_cnode.isSet()) g_context.unregisterNode(g_cnode); if (g_dnode.isSet()) g_context.unregisterNode(g_dnode); if (g_anode.isSet()) g_context.unregisterNode(g_anode); if (g_pProjHelper) delete g_pProjHelper; return 0; }
/* static */ already_AddRefed<AudioNodeStream> AudioNodeStream::Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine, Flags aFlags) { MOZ_ASSERT(NS_IsMainThread()); // MediaRecorders use an AudioNodeStream, but no AudioNode AudioNode* node = aEngine->NodeMainThread(); MOZ_ASSERT(!node || aGraph->GraphRate() == node->Context()->SampleRate()); dom::AudioContext::AudioContextId contextIdForStream = node ? node->Context()->Id() : NO_AUDIO_CONTEXT; nsRefPtr<AudioNodeStream> stream = new AudioNodeStream(aEngine, aFlags, aGraph->GraphRate(), contextIdForStream); if (aEngine->HasNode()) { stream->SetChannelMixingParametersImpl(aEngine->NodeMainThread()->ChannelCount(), aEngine->NodeMainThread()->ChannelCountModeValue(), aEngine->NodeMainThread()->ChannelInterpretationValue()); } aGraph->AddStream(stream); return stream.forget(); }
void AudioNode::Connect(AudioNode& aDestination, uint32_t aOutput, uint32_t aInput, ErrorResult& aRv) { if (aOutput >= MaxNumberOfOutputs() || aInput >= aDestination.MaxNumberOfInputs()) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (Context() != aDestination.Context()) { aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); return; } // XXX handle cycle detection per spec Output output(&aDestination, aInput); mOutputs.EnsureLengthAtLeast(aOutput + 1); mOutputs.ReplaceElementAt(aOutput, output); Input input(this, aOutput); aDestination.mInputs.EnsureLengthAtLeast(aInput + 1); aDestination.mInputs.ReplaceElementAt(aInput, input); }
/*----------------------------------------------------------------------------*/ void SoftKineticCamera::configureAudioNode() { m_anode.newSampleReceivedEvent().connect(&onNewAudioSample); AudioNode::Configuration config = m_anode.getConfiguration(); config.sampleRate = 44100; try { m_context.requestControl(m_anode,0); m_anode.setConfiguration(config); m_anode.setInputMixerLevel(0.5f); } catch (ArgumentException& e) { printf("Argument Exception: %s\n",e.what()); } catch (UnauthorizedAccessException& e) { printf("Unauthorized Access Exception: %s\n",e.what()); } catch (ConfigurationException& e) { printf("Configuration Exception: %s\n",e.what()); } catch (StreamingException& e) { printf("Streaming Exception: %s\n",e.what()); } catch (TimeoutException&) { printf("TimeoutException\n"); } }
MediaElementAudioSourceHandler::MediaElementAudioSourceHandler(AudioNode& node, HTMLMediaElement& mediaElement) : AudioHandler(NodeTypeMediaElementAudioSource, node, node.context()->sampleRate()) , m_mediaElement(mediaElement) , m_sourceNumberOfChannels(0) , m_sourceSampleRate(0) , m_passesCurrentSrcCORSAccessCheck(passesCurrentSrcCORSAccessCheck(mediaElement.currentSrc())) , m_maybePrintCORSMessage(!m_passesCurrentSrcCORSAccessCheck) , m_currentSrcString(mediaElement.currentSrc().string()) { ASSERT(isMainThread()); // Default to stereo. This could change depending on what the media element // .src is set to. addOutput(2); initialize(); }
void AudioNode::Disconnect(AudioNode& aDestination, uint32_t aOutput, uint32_t aInput, ErrorResult& aRv) { if (aOutput >= NumberOfOutputs()) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } if (aInput >= aDestination.NumberOfInputs()) { aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); return; } bool wasConnected = false; for (int32_t outputIndex = mOutputNodes.Length() - 1; outputIndex >= 0; --outputIndex) { if (mOutputNodes[outputIndex] != &aDestination) { continue; } wasConnected |= DisconnectMatchingDestinationInputs<AudioNode>( outputIndex, [aOutput, aInput](const InputNode& aInputNode) { return aInputNode.mOutputPort == aOutput && aInputNode.mInputPort == aInput; }); } if (!wasConnected) { aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR); return; } // This disconnection may have disconnected a panner and a source. Context()->UpdatePannerSource(); }
/*----------------------------------------------------------------------------*/ void SoftKineticCamera::configureNode(Node node) { if ((node.is<DepthNode>())&&(!m_dnode.isSet())) { m_dnode = node.as<DepthNode>(); configureDepthNode(); m_context.registerNode(node); } if ((node.is<ColorNode>())&&(!m_cnode.isSet())) { m_cnode = node.as<ColorNode>(); configureColorNode(); m_context.registerNode(node); } if ((node.is<AudioNode>())&&(!m_anode.isSet())) { m_anode = node.as<AudioNode>(); configureAudioNode(); m_context.registerNode(node); } }