Exemple #1
0
    PeakCompNode::PeakCompNode(float sampleRate) : lab::AudioBasicProcessorNode(sampleRate)
    {
        m_processor.reset(new PeakCompNodeInternal(sampleRate));

        internalNode = static_cast<PeakCompNodeInternal*>(m_processor.get());
        
        setNodeType(lab::NodeType::NodeTypePeakComp);

        addInput(std::unique_ptr<AudioNodeInput>(new lab::AudioNodeInput(this)));
        addOutput(std::unique_ptr<AudioNodeOutput>(new lab::AudioNodeOutput(this, 2))); // 2 stereo
        
        initialize();
    }
void IPLFloodFill::init()
{
    // init
    _result         = NULL;

    // basic settings
    setClassName("IPLFloodFill");
    setTitle("Flood Fill");
    setCategory(IPLProcess::CATEGORY_LOCALOPERATIONS);
    setOpenCVSupport(IPLOpenCVSupport::OPENCV_ONLY);
    setDescription("");

    // inputs and outputs
    addInput("Image", IPL_IMAGE_COLOR);
    addOutput("Magnitude", IPL_IMAGE_GRAYSCALE);
    addOutput("Edge", IPL_IMAGE_GRAYSCALE);
    addOutput("Gradient", IPL_IMAGE_GRAYSCALE);

    // properties
    addProcessPropertyInt("threshold", "Threshold", "", 1, IPL_WIDGET_SLIDER, 1, 255);
    addProcessPropertyDouble("highThreshold", "High Threshold", "Thresholds for the hysteresis procedure", 0.6, IPL_WIDGET_SLIDER, 0.0, 1.0);
}
MediaStreamAudioSourceHandler::MediaStreamAudioSourceHandler(AudioNode& node, MediaStream& mediaStream, MediaStreamTrack* audioTrack, PassOwnPtr<AudioSourceProvider> audioSourceProvider)
    : AudioHandler(NodeTypeMediaStreamAudioSource, node, node.context()->sampleRate())
    , m_mediaStream(mediaStream)
    , m_audioTrack(audioTrack)
    , m_audioSourceProvider(audioSourceProvider)
    , m_sourceNumberOfChannels(0)
{
    // Default to stereo. This could change depending on the format of the
    // MediaStream's audio track.
    addOutput(2);

    initialize();
}
MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* context, HTMLMediaElement* mediaElement)
    : AudioSourceNode(context, context->sampleRate())
    , m_mediaElement(mediaElement)
    , m_sourceNumberOfChannels(0)
    , m_sourceSampleRate(0)
{
    // Default to stereo. This could change depending on what the media element .src is set to.
    addOutput(adoptPtr(new AudioNodeOutput(this, 2)));

    setNodeType(NodeTypeMediaElementAudioSource);

    initialize();
}
AudioChannelMerger::AudioChannelMerger(AudioContext* context, float sampleRate)
    : AudioNode(context, sampleRate)
{
    // Create a fixed number of inputs (able to handle the maximum number of channels we deal with).
    for (unsigned i = 0; i < NumberOfInputs; ++i)
        addInput(adoptPtr(new AudioNodeInput(this)));

    addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
    
    setNodeType(NodeTypeChannelMerger);
    
    initialize();
}
MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* context, MediaStream* mediaStream, AudioSourceProvider* audioSourceProvider)
    : AudioSourceNode(context, context->sampleRate())
    , m_mediaStream(mediaStream)
    , m_audioSourceProvider(audioSourceProvider)
    , m_sourceNumberOfChannels(0)
{
    // Default to stereo. This could change depending on the format of the MediaStream's audio track.
    addOutput(adoptPtr(new AudioNodeOutput(this, 2)));

    setNodeType(NodeTypeMediaStreamAudioSource);

    initialize();
}
pdsp::BiquadBase::BiquadBase(){
    addInput("signal", input_signal);
    addOutput("signal", output);
    addInput("freq", input_frequency);
    addInput("Q", input_Q);
    addInput("gain", input_gain);
    updateOutputNodes();
    
    input_gain.setDefaultValue(0.0f);
    input_Q.setDefaultValue(0.707f);
    input_frequency.setDefaultValue(8000.0f);
    
    BiquadBase::cookCoefficients(0.0f, 0.0f, 0.0f, 0.0f);
}
Exemple #8
0
AudioGainNode::AudioGainNode(AudioContext* context, float sampleRate)
    : AudioNode(context, sampleRate)
    , m_lastGain(1.0)
    , m_sampleAccurateGainValues(AudioNode::ProcessingSizeInFrames) // FIXME: can probably share temp buffer in context
{
    m_gain = AudioGain::create(context, "gain", 1.0, 0.0, 1.0);

    addInput(adoptPtr(new AudioNodeInput(this)));
    addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
    
    setNodeType(NodeTypeGain);
    
    initialize();
}
Exemple #9
0
pdsp::TriggerGeiger::TriggerGeiger(){
    
    addInput("jitter",   in_jitter_ms );
    addInput("distance", in_distance_ms );
    addOutput("trig", output_trig);
    updateOutputNodes();
    
    in_distance_ms.setDefaultValue(200.0f);
    in_jitter_ms.setDefaultValue(0.0f);

    if(dynamicConstruction){
        prepareToPlay(globalBufferSize, globalSampleRate);
    }
}
Exemple #10
0
GainNode::GainNode(float sampleRate)
    : AudioNode(sampleRate)
    , m_lastGain(1.0)
    , m_sampleAccurateGainValues(AudioNode::ProcessingSizeInFrames) // FIXME: can probably share temp buffer in context
{
    m_gain = std::make_shared<AudioParam>("gain", 1.0, 0.0, 10000.0);

    addInput(std::unique_ptr<AudioNodeInput>(new AudioNodeInput(this)));
    addOutput(std::unique_ptr<AudioNodeOutput>(new AudioNodeOutput(this, 1)));
    
    setNodeType(NodeTypeGain);
    
    initialize();
}
bool DonghaiConverter::handleInput(struct input_event *input) {
    switch (input->type) {
        case EV_MSC:
            return handleMscInput(input);
        case EV_KEY:
            return handleKeyInput(input);
        case EV_SYN:
            return handleSynInput(input);
        default:
            addOutput(input, false);
            break;
    }
    return true;
}
ChannelMergerNode::ChannelMergerNode(AudioContext* context, float sampleRate, unsigned numberOfInputs)
    : AudioNode(context, sampleRate)
    , m_desiredNumberOfOutputChannels(DefaultNumberOfOutputChannels)
{
    // Create the requested number of inputs.
    for (unsigned i = 0; i < numberOfInputs; ++i)
        addInput(std::make_unique<AudioNodeInput>(this));

    addOutput(std::make_unique<AudioNodeOutput>(this, 1));
    
    setNodeType(NodeTypeChannelMerger);
    
    initialize();
}
Exemple #13
0
QgsBufferAlgorithm::QgsBufferAlgorithm()
{
  addParameter( new QgsProcessingParameterFeatureSource( QStringLiteral( "INPUT" ), QObject::tr( "Input layer" ) ) );
  addParameter( new QgsProcessingParameterNumber( QStringLiteral( "DISTANCE" ), QObject::tr( "Distance" ), QgsProcessingParameterNumber::Double, 10 ) );
  addParameter( new QgsProcessingParameterNumber( QStringLiteral( "SEGMENTS" ), QObject::tr( "Segments" ), QgsProcessingParameterNumber::Integer, 5, false, 1 ) );

  addParameter( new QgsProcessingParameterEnum( QStringLiteral( "END_CAP_STYLE" ), QObject::tr( "End cap style" ), QStringList() << QObject::tr( "Round" ) << QObject::tr( "Flat" ) << QObject::tr( "Square" ), false ) );
  addParameter( new QgsProcessingParameterEnum( QStringLiteral( "JOIN_STYLE" ), QObject::tr( "Join style" ), QStringList() << QObject::tr( "Round" ) << QObject::tr( "Miter" ) << QObject::tr( "Bevel" ), false ) );
  addParameter( new QgsProcessingParameterNumber( QStringLiteral( "MITRE_LIMIT" ), QObject::tr( "Miter limit" ), QgsProcessingParameterNumber::Double, 2, false, 1 ) );

  addParameter( new QgsProcessingParameterBoolean( QStringLiteral( "DISSOLVE" ), QObject::tr( "Dissolve result" ), false ) );
  addParameter( new QgsProcessingParameterFeatureSink( QStringLiteral( "OUTPUT_LAYER" ), QObject::tr( "Buffered" ), QgsProcessingParameterDefinition::TypeVectorPolygon ) );
  addOutput( new QgsProcessingOutputVectorLayer( QStringLiteral( "OUTPUT_LAYER" ), QObject::tr( "Buffered" ), QgsProcessingParameterDefinition::TypeVectorPoint ) );
}
Exemple #14
0
pdsp::Decimator::Decimator(){
    
    phase = 1.0f;
    addInput("signal", input_signal);
    addInput("freq", input_freq);
    addOutput("output", output);
    updateOutputNodes();

    input_freq.setDefaultValue(44100.0f);

    if(dynamicConstruction){
        prepareToPlay(globalBufferSize, globalSampleRate);
    }
}
Exemple #15
0
/*
 * Discover this function's kernel interface, including I/O and functions
 * needed.
 */
void FunctionInterface::analyze()
{
	string msg = "Analyzing function " + NAME(function);
	DEBUG(TOOL, msg);

	status = IN_PROGRESS;

	//Add inputs to our list
	SgInitializedNamePtrList inputs = function->get_parameterList()->get_args();
	SgInitializedNamePtrList::const_iterator inputIt;
	for(inputIt = inputs.begin(); inputIt != inputs.end(); inputIt++)
	{
		msg = "\tfound input " + NAME(*inputIt);
		DEBUG(TOOL, msg);
		addInput(*inputIt);

		FunctionTraversal::checkVariableType((*inputIt)->get_type(), this);
	}

	//Add return value to our list (denoted by special marker name "__retval__")
	SgType* returnType = function->get_orig_return_type();
	if(!isSgTypeVoid(returnType))
	{
		msg = "\tfound output of type " + get_name(returnType);
		DEBUG(TOOL, msg);
		SgInitializedName* returnVal = buildInitializedName(RETVAL_NAME, returnType);
		addOutput(returnVal);

		FunctionTraversal::checkVariableType(returnType, this);
	}

	//Traverse function
	FunctionTraversal ft(this);
	ft.traverse(function, preorder);

	//Incorporate sub-calls
	//TODO do side-effect checking here for sub-calls?
	set<FunctionInterface*>::const_iterator funcIt;
	for(funcIt = calledFunctions.begin(); funcIt != calledFunctions.end(); funcIt++)
	{
		if((*funcIt)->getStatus() == NOT_ANALYZED)
			(*funcIt)->analyze();

		combineGlobalInputs((*funcIt)->getGlobalInputs());
		combineGlobalOutputs((*funcIt)->getGlobalOutputs());
		combineCalledFunctions((*funcIt)->getCalledFunctions());
	}

	status = ANALYZED;
}
void QgsProcessingModelAlgorithm::updateDestinationParameters()
{
  //delete existing destination parameters
  QMutableListIterator<const QgsProcessingParameterDefinition *> it( mParameters );
  while ( it.hasNext() )
  {
    const QgsProcessingParameterDefinition *def = it.next();
    if ( def->isDestination() )
    {
      delete def;
      it.remove();
    }
  }
  // also delete outputs
  qDeleteAll( mOutputs );
  mOutputs.clear();

  // rebuild
  QMap< QString, ChildAlgorithm >::const_iterator childIt = mChildAlgorithms.constBegin();
  for ( ; childIt != mChildAlgorithms.constEnd(); ++childIt )
  {
    QMap<QString, QgsProcessingModelAlgorithm::ModelOutput> outputs = childIt->modelOutputs();
    QMap<QString, QgsProcessingModelAlgorithm::ModelOutput>::const_iterator outputIt = outputs.constBegin();
    for ( ; outputIt != outputs.constEnd(); ++outputIt )
    {
      if ( !childIt->isActive() || !childIt->algorithm() )
        continue;

      // child algorithm has a destination parameter set, copy it to the model
      const QgsProcessingParameterDefinition *source = childIt->algorithm()->parameterDefinition( outputIt->childOutputName() );
      if ( !source )
        continue;

      QgsProcessingParameterDefinition *param = QgsProcessingParameters::parameterFromVariantMap( source->toVariantMap() );
      param->setName( outputIt->childId() + ':' + outputIt->name() );
      param->setDescription( outputIt->description() );
      addParameter( param );

      if ( const QgsProcessingDestinationParameter *destParam = dynamic_cast< const QgsProcessingDestinationParameter *>( param ) )
      {
        QgsProcessingOutputDefinition *output = destParam->toOutputDefinition();
        if ( output )
        {
          addOutput( output );
        }
      }
    }
  }
}
Exemple #17
0
void DProcess::run()
{
	DString buffer;
	DString cmode;
	DStringList::iterator it;
	char * buf;
	int bufsize = 80 * sizeof( char );

	// construct exe line
	buffer = m_exe;
	for ( it = m_args.begin() ; it != m_args.end() ; ++it )
	{
		if ( !buffer.isEmpty() )
		{
			buffer.append( " " );
		}
		buffer.append( *it );
	}

	if ( m_com_mode == READ_ONLY)
	{
		cmode = "r";
	}
	else
	{
		cmode = "w";
	}
	m_file = popen( buffer.c_str(), cmode.c_str() );

	if ( !m_file )
	{
		return;
	}
	
	if ( m_com_mode == READ_ONLY)
	{
		buf = new char[ bufsize ];

		while ( fgets( buf, bufsize, m_file ) != 0 )
		{
			addOutput( buf );
		}
		delete[]( buf );
	}
	
	pclose( m_file );

	m_file = 0;
}
Exemple #18
0
pdsp::Amp::Amp(){
        addInput("signal", input_signal);
        addInput("mod", input_mod);
        addOutput("signal", output);
        updateOutputNodes();
    
        input_mod.setDefaultValue(0.0f);
    
        meter.store(0.0f);
        meterOut.store(0.0f);
    
        if(dynamicConstruction){
                prepareToPlay(globalBufferSize, globalSampleRate);
        }
}
void NetProduction::CreateLayers(){
    for (int i=0; i < Layers; ++i){
        QString Name="Layer"+QString::number(i+1);
        Model *Layer= new LakeEnvironment(Name,this);  //New creates it in the Heap
        Layer->deepInitialize();

        Parameter<double> *AverageDepth = Layer->seekOneChild<Parameter<double>*>("AvgDepth");
        PullVariable<double> *light = Layer->seekOneChild<PullVariable<double>*>("Light");
        addOutput(i, light);

        double Layerheight = MaxDepth/Layers - i;
        AverageDepth->setValue(Layerheight);
    }

    }
void IOutputParser::appendOutputParser(IOutputParser *parser)
{
    if (!parser)
        return;
    if (m_parser) {
        m_parser->appendOutputParser(parser);
        return;
    }

    m_parser = parser;
    connect(parser, SIGNAL(addOutput(QString,ProjectExplorer::BuildStep::OutputFormat)),
            this, SLOT(outputAdded(QString,ProjectExplorer::BuildStep::OutputFormat)), Qt::DirectConnection);
    connect(parser, SIGNAL(addTask(ProjectExplorer::Task)),
            this, SLOT(taskAdded(ProjectExplorer::Task)), Qt::DirectConnection);
}
Exemple #21
0
bool IosDeployStep::init()
{
    QTC_CHECK(m_transferStatus == NoTransfer);
    m_device = ProjectExplorer::DeviceKitInformation::device(target()->kit());
    IosRunConfiguration * runConfig = qobject_cast<IosRunConfiguration *>(
                this->target()->activeRunConfiguration());
    QTC_CHECK(runConfig);
    m_bundlePath = runConfig->bundleDir().toString();
    if (m_device.isNull()) {
        emit addOutput(tr("Error: no device available, deploy failed."),
                       BuildStep::ErrorMessageOutput);
        return false;
    }
    return true;
}
Exemple #22
0
void IPLGabor::init()
{
    // init
    _result0    = NULL;
    _result1    = NULL;
    _result2    = NULL;

    // basic settings
    setClassName("IPLGabor");
    setTitle("Gabor Filter");
    setCategory(IPLProcess::CATEGORY_LOCALOPERATIONS);

    // inputs and outputs
    addInput("Image", IPL_IMAGE_COLOR);
    addOutput("Even Gabor ", IPL_IMAGE_COLOR);
    addOutput("Odd Gabor ", IPL_IMAGE_COLOR);
    addOutput("Power Gabor ", IPL_IMAGE_COLOR);

    // properties
    addProcessPropertyInt("window", "Window", "", 5, IPL_WIDGET_SLIDER_ODD, 3, 15);
    addProcessPropertyInt("wavelength", "Wavelength", "", 5, IPL_WIDGET_SLIDER, 1, 15);
    addProcessPropertyDouble("direction", "Direction", "", 0, IPL_WIDGET_SLIDER, 0, 2*PI);
    addProcessPropertyDouble("deviation", "Std. Deviation", "", 5, IPL_WIDGET_SLIDER, 1, 10);
}
GainNode::GainNode(AudioContext* context, float sampleRate)
    : AudioNode(context, sampleRate)
    , m_lastGain(1.0)
    , m_sampleAccurateGainValues(AudioNode::ProcessingSizeInFrames) // FIXME: can probably share temp buffer in context
{
    ScriptWrappable::init(this);
    m_gain = AudioParam::create(context, 1.0);

    addInput();
    addOutput(AudioNodeOutput::create(this, 1));

    setNodeType(NodeTypeGain);

    initialize();
}
Exemple #24
0
	LNSMul::LNSMul(Target * target, int wE, int wF) :
		Operator(target), wE(wE), wF(wF)
	{
		ostringstream name;
		/* The name has the format: LNSMul_wE_wF where: 
			wE = width of the integral part of the exponent
			wF = width of the fractional part of the exponent */
		name << "LNSMul_" << wE << "_" << wF; 
		setName(name.str());
		setCopyrightString("Jérémie Detrey, Florent de Dinechin (2003-2004), Sylvain Collange (2008)");
		addInput ("nA", wE + wF + 3);
		addInput ("nB", wE + wF + 3);
		addOutput("nR", wE + wF + 3);
		
		addConstant("wE", "positive", wE);
		addConstant("wF", "positive", wF);
		
		//vhdl << tab << declare("eRn", wE+wF+1) << " <= (nA(wE+wF-1) & nA(wE+wF-1 downto 0)) + (nB(wE+wF-1) & nB(wE+wF-1 downto 0));\n";

		IntAdder *my_adder = new IntAdder(target, wE+wF+1);
		oplist.push_back(my_adder);
		vhdl << tab << declare("X", wE+wF+1) << "<= nA(wE+wF-1) & nA(wE+wF-1 downto 0);\n";
		vhdl << tab << declare("Y", wE+wF+1) << "<= nB(wE+wF-1) & nB(wE+wF-1 downto 0);\n";
		inPortMap   (my_adder, "X", "X");
		inPortMap   (my_adder, "Y", "Y");
		inPortMapCst(my_adder, "Cin", "'0'");
		outPortMap (my_adder, "R","eRn");
		vhdl << instance(my_adder, "my_add");	
		
		vhdl << tab << declare("sRn") << " <= nA(wE+wF) xor nB(wE+wF);\n";
		vhdl << tab << declare("xRn", 2) << " <= \"00\" when eRn(wE+wF downto wE+wF-1) = \"10\" else\n"
			<< tab << "	 \"10\" when eRn(wE+wF downto wE+wF-1) = \"01\" else\n"
			<< tab << "	 \"01\";\n";
		vhdl << tab << declare("nRn", wE+wF+3) << " <= xRn & sRn & eRn(wE+wF-1 downto 0);\n";

		vhdl << tab << declare("xA", 2) << " <= nA(wE+wF+2 downto wE+wF+1);\n";
		vhdl << tab << declare("xB", 2) << " <= nB(wE+wF+2 downto wE+wF+1);\n";
		vhdl << tab << declare("xAB", 4) << " <= xA & xB when xA >= xB else\n"
			<< tab << "	 xB & xA;\n";
		vhdl
			<< tab << "with xAB select\n"
			<< tab << tab << "nR(wE+wF+2 downto wE+wF+1) <= xRn  when \"0101\",\n"
			<< tab << "	                                    \"00\" when \"0000\" | \"0100\",\n"
			<< tab << "	                                    \"10\" when \"1001\" | \"1010\",\n"
			<< tab << "	                                    \"11\" when others;\n"
			<< tab << "\n"
			<< tab << "nR(wE+wF downto 0) <= nRn(wE+wF downto 0);\n";
	}
pdsp::PRNoiseGen::PRNoiseGen(){

    addInput("clock", input_trig_clock);
    addInput("reseed", input_trig_seed);
    addOutput("signal", output);
    updateOutputNodes();

    seedMult = rand();
    //seed = seedMult * time(NULL);
    seed = randomInt();
    pnRegister = seed;

    if(dynamicConstruction){
            prepareToPlay(globalBufferSize, globalSampleRate);
    }
}
StereoPannerHandler::StereoPannerHandler(AudioNode& node, float sampleRate, AudioParamHandler& pan)
    : AudioHandler(NodeTypeStereoPanner, node, sampleRate)
    , m_pan(pan)
    , m_sampleAccuratePanValues(ProcessingSizeInFrames)
{
    addInput();
    addOutput(2);

    // The node-specific default mixing rules declare that StereoPannerNode
    // can handle mono to stereo and stereo to stereo conversion.
    m_channelCount = 2;
    m_channelCountMode = ClampedMax;
    m_channelInterpretation = AudioBus::Speakers;

    initialize();
}
ChannelMergerHandler::ChannelMergerHandler(AudioNode& node, float sampleRate, unsigned numberOfInputs)
    : AudioHandler(NodeTypeChannelMerger, node, sampleRate)
{
    // These properties are fixed for the node and cannot be changed by user.
    m_channelCount = 1;
    m_channelCountMode = Explicit;

    // Create the requested number of inputs.
    for (unsigned i = 0; i < numberOfInputs; ++i)
        addInput();

    // Create the output with the requested number of channels.
    addOutput(numberOfInputs);

    initialize();
}
Exemple #28
0
ConvolverNode::ConvolverNode(AudioContext* context, float sampleRate)
    : AudioNode(context, sampleRate)
    , m_normalize(true)
{
    addInput(std::make_unique<AudioNodeInput>(this));
    addOutput(std::make_unique<AudioNodeOutput>(this, 2));

    // Node-specific default mixing rules.
    m_channelCount = 2;
    m_channelCountMode = ClampedMax;
    m_channelInterpretation = AudioBus::Speakers;

    setNodeType(NodeTypeConvolver);
    
    initialize();
}
Exemple #29
0
pdsp::ValueSequencer::ValueSequencer(){
        
    addOutput("signal", output);
    resetOutputToDefault();

    setSlewTime(0.0f);
    
    connectToSlewControl = false;

    messageBuffer = nullptr;
    slewControl = nullptr;

    if(dynamicConstruction){
        prepareToPlay(globalBufferSize, globalSampleRate);
    }
}
ChannelMergerNode::ChannelMergerNode(float sampleRate, unsigned numberOfInputs) : AudioNode(sampleRate)
{
    numberOfInputs = std::max(1U, std::min(numberOfInputs, AudioContext::maxNumberOfChannels));
    
    // Create the requested number of inputs.
    for (uint32_t i = 0; i < numberOfInputs; ++i)
    {
        addInput(std::unique_ptr<AudioNodeInput>(new AudioNodeInput(this)));
    }

    addOutput(std::unique_ptr<AudioNodeOutput>(new AudioNodeOutput(this, 1)));
    
    setNodeType(NodeTypeChannelMerger);
    
    initialize();
}