Beispiel #1
0
/* Function: rtOneStep ========================================================
 *
 * Abstract:
 *      Perform one step of the model.
 */
static void rt_OneStep(RT_MODEL *S)
{
  real_T tnext;

  /***********************************************
   * Check and see if error status has been set  *
   ***********************************************/
  if (rtmGetErrorStatus(S) != NULL) {
    GBLbuf.stopExecutionFlag = 1;
    return;
  }

  /* enable interrupts here */
  tnext = rt_SimGetNextSampleHit();
  rtsiSetSolverStopTime(rtmGetRTWSolverInfo(S),tnext);
  outputs(S, 0);
  rtExtModeSingleTaskUpload(S);
  update(S, 0);
  rt_SimUpdateDiscreteTaskSampleHits(rtmGetNumSampleTimes(S),
    rtmGetTimingData(S),
    rtmGetSampleHitPtr(S),
    rtmGetTPtr(S));
  if (rtmGetSampleTime(S,0) == CONTINUOUS_SAMPLE_TIME) {
    rt_UpdateContinuousStates(S);
  }

  rtExtModeCheckEndTrigger();
}                                      /* end rtOneStep */
Beispiel #2
0
QString DMXUSB::outputInfo(quint32 output)
{
    QString str;

    if (output == QLCIOPlugin::invalidLine())
    {
        if (m_outputs.size() == 0)
        {
            str += QString("<BR><B>%1</B>").arg(tr("No output support available."));
            str += QString("<P>");
            str += tr("Make sure that you have your hardware firmly plugged in. "
                      "NOTE: FTDI VCP interface is not supported by this plugin.");
            str += QString("</P>");
        }
    }
    else if (output < quint32(m_outputs.size()))
    {
        str += QString("<H3>%1</H3>").arg(outputs()[output]);
        str += QString("<P>");
        str += tr("Device is operating correctly.");
        str += QString("</P>");
        QString add = m_outputs[output]->additionalInfo();
        if (add.isEmpty() == false)
            str += add;
    }

    str += QString("</BODY>");
    str += QString("</HTML>");

    return str;
}
Beispiel #3
0
void Views::copyView(const std::string& viewname,
		     const std::string& copyname)
{
    vpz::View view = get(viewname);
    vpz::View copy = get(viewname);
    copy.setName(copyname);
    std::string copyoutputname;
    int number = 1;

    do {
	copyoutputname = view.output() + "_";
	copyoutputname += boost::lexical_cast< std::string >(number);
	++number;
    }while (outputs().exist(copyoutputname));

    copyOutput(view.output(), copyoutputname);

    switch (copy.type()) {
    case vpz::View::TIMED:
	addTimedView(copy.name(), copy.timestep(), copyoutputname);
	break;
    case vpz::View::EVENT:
	addEventView(copy.name(), copyoutputname);
	break;
    case vpz::View::FINISH:
	addFinishView(copy.name(), copyoutputname);
	break;
    }
}
Beispiel #4
0
    shared_ptr <GLTF::JSONObject> serializeEffect(GLTFEffect* effect, void *context)
    {
        shared_ptr <GLTF::JSONObject> effectObject(new GLTF::JSONObject());
        
        shared_ptr <GLTF::JSONObject> instanceTechnique(new GLTF::JSONObject());
        
        std::string techniqueID = effect->getTechniqueID();

        effectObject->setString("name", effect->getName());
        effectObject->setValue("instanceTechnique", instanceTechnique);
        instanceTechnique->setString("technique", techniqueID);
        shared_ptr<JSONArray> outputs(new JSONArray());
        shared_ptr <JSONObject> values = effect->getValues();
        std::vector <std::string> keys = values->getAllKeys();
        for (size_t i = 0 ; i < keys.size() ; i++) {
            shared_ptr <JSONObject> parameter = static_pointer_cast <JSONObject> (values->getValue(keys[i]));
            shared_ptr <JSONObject> parameterValue = static_pointer_cast <JSONObject> (parameter->getValue("value"));
            shared_ptr<JSONObject> output(new JSONObject());
            if (parameterValue) {
                output->setValue("value", parameterValue);
                output->setString("parameter", keys[i]);
                outputs->appendValue(output);
            }
        }
        instanceTechnique->setValue("values", outputs);
    
        return effectObject;
    }
Beispiel #5
0
dmatrix3 ConvLayer::backpropagation() const
{
    dmatrix3 outputs(Excitations.size(), dmatrix2
                    (Excitations[0].size(), dvec
                    (Excitations[0][0].size(), 0.0)));
    ivec step;
    step.reserve(4);
    
    int index;
    
    for(int z=0;z<Errors.size();z++) {
        index = 0;
        for(int y=0;y<Errors[0].size();y++) {
            for(int x=0;x<Errors[0][0].size();x++, index++) {
                step = Steps[index];
                for(int i=step[0];i<step[1];i++) {
                    for(int j=step[2];j<step[3];j++) {
                        outputs[z][i][j] += sigmoid_p(
                                Excitations[z][i][j] *
                                Errors[z][y][x]);
                    }
                }
            }
        }
    }
    return outputs;
}
 // The imputation method is a "collapsed Gibbs sampler" that integrates out
 // latent data from preceding layers (i.e. preceding nodes are activated
 // probabilistically), but conditions on the latent data from the current
 // layer and the layer above.
 void GFFPS::impute_hidden_layer_outputs(RNG &rng) {
   int number_of_hidden_layers = model_->number_of_hidden_layers();
   if (number_of_hidden_layers == 0) return;
   ensure_space_for_latent_data();
   clear_latent_data();
   std::vector<Vector> allocation_probs =
       model_->activation_probability_workspace();
   std::vector<Vector> complementary_allocation_probs = allocation_probs;
   std::vector<Vector> workspace = allocation_probs;
   for (int i = 0; i < model_->dat().size(); ++i) {
     const Ptr<RegressionData> &data_point(model_->dat()[i]);
     Nnet::HiddenNodeValues &outputs(imputed_hidden_layer_outputs_[i]);
     model_->fill_activation_probabilities(data_point->x(), allocation_probs);
     impute_terminal_layer_inputs(rng, data_point->y(), outputs.back(),
                                  allocation_probs.back(),
                                  complementary_allocation_probs.back());
     for (int layer = number_of_hidden_layers - 1; layer > 0; --layer) {
       // This for-loop intentionally skips layer 0, because the inputs to the
       // first hidden layer are the observed predictors.
       imputers_[layer].impute_inputs(
           rng,
           outputs,
           allocation_probs[layer - 1],
           complementary_allocation_probs[layer - 1],
           workspace[layer - 1]);
     }
     imputers_[0].store_initial_layer_latent_data(outputs[0], data_point);
   }
 }
Beispiel #7
0
Qt3DCore::QNodeCreatedChangeBasePtr QRenderTarget::createNodeCreationChange() const
{
    auto creationChange = Qt3DCore::QNodeCreatedChangePtr<QRenderTargetData>::create(this);
    auto &data = creationChange->data;
    data.outputIds = qIdsForNodes(outputs());
    return creationChange;
}
Beispiel #8
0
 void eval(int num, array **arrays)
 {
     std::vector<af_array> outputs(num);
     for (int i = 0; i < num; i++) {
         outputs[i] = arrays[i]->get();
     }
     AF_THROW(af_eval_multiple(num, &outputs[0]));
 }
Beispiel #9
0
void node::rt_context_update (rt_process_context& ctx)
{
    for (auto& in : inputs ())
        in.rt_context_update (ctx);
    for (auto& out : outputs ())
        out.rt_context_update (ctx);
    rt_on_context_update (ctx);
}
Beispiel #10
0
static EORB_CPP_node *read_expr_8 (void)
{
   EORB_CPP_node *l;
   EORB_CPP_node *r;
   char c;

#ifdef DEBUG_EXPR

   if (debugging)
   {
      outputs("~E8:");
   }
#endif
   l = read_expr_9();
   while (1)
   {
      c = getnhsexpand();
      switch (c)
      {
         case '+':
         case '-':
#ifdef DEBUG_EXPR

            if (debugging)
            {
               outputc(c);
            }
#endif
            r = read_expr_9();
            l = newnode(l, c, r);
            break;
         default:
#ifdef DEBUG_EXPR

            if (debugging)
            {
               outputs("~");
            }
#endif
            Push(c);
            return (l);
            break;
      }
   }
}
	void updateCached()
	{	ScalarFieldArray N;
		FluidMixture::Outputs outputs(&N, 0, &Adiel_rhoExplicitTilde, 0, &Adiel);
		
		fluidMixture->getFreeEnergy(outputs); //Fluid free energy including coupling
		Ntilde.resize(N.size());
		for(unsigned i=0; i<N.size(); i++)
			Ntilde[i] = J(N[i]);
	}
	void gradient(View& view, const Eigen::VectorXd& parameters, Eigen::VectorXd& gradient_vector)
	{
		// TODO: Check concept for InputIterator

		//double N = std::distance(first_input, last_input);
		double scaling_factor = 1. / view.size();
		gradient_vector.setZero();

		// DEBUG
		//std::cout << gradient_ << std::endl;

		for (unsigned int i = 0; i < view.size(); i++) {
			forward_propagation(parameters, view.first(i), outputs());
			back_propagation_error(parameters, view.first(i), outputs(), view.second(i), gradient_vector, scaling_factor);
		}

		// DEBUG
		//std::cout << gradient_ << std::endl;
	}
Beispiel #13
0
//! split une liste de signaux sur n bus				
siglist split(const siglist& inputs, int nbus)
{
	int nlines	= (int)inputs.size();
	
	siglist outputs(nbus);
	
	for (int b=0; b<nbus; b++) {
		outputs[b] = inputs[b % nlines];
	}
	return outputs;
}			
task main()
{
	int myval2 = 2; // defined as local to task main()(preferred)
	int myval3 = 3;
	
	inputs(myval1);
	processing(myval2);
	outputs(myval3);


}
std::vector<double> nevil::basic_feedforward_nn::update(const std::vector<double> &inputs)
{
  assert ((_num_input_nodes == inputs.size())
    && "Error: matrix size and input size don't match!");

  std::vector<double> outputs(_num_output_nodes, 0);
  for (std::size_t i = 0; i < _num_output_nodes; ++i)
    for (std::size_t j = 0; j < _num_input_nodes; ++j)
        outputs[i] += _weights[(i * _num_input_nodes) + j] * inputs[j];
  return outputs;
}
Beispiel #16
0
Vector<double> MeanSquaredError::calculate_terms(void) const {
// Control sentence

#ifndef NDEBUG

  check();

#endif

  // Neural network stuff

  const MultilayerPerceptron* multilayer_perceptron_pointer =
      neural_network_pointer->get_multilayer_perceptron_pointer();

  const unsigned inputs_number =
      multilayer_perceptron_pointer->get_inputs_number();
  const unsigned outputs_number =
      multilayer_perceptron_pointer->get_outputs_number();

  // Data set stuff

  const Instances& instances = data_set_pointer->get_instances();

  const unsigned training_instances_number =
      instances.count_training_instances_number();

  // Mean squared error stuff

  Vector<double> performance_terms(training_instances_number);

  Vector<double> inputs(inputs_number);
  Vector<double> outputs(outputs_number);
  Vector<double> targets(outputs_number);

  for (unsigned i = 0; i < training_instances_number; i++) {
    // Input vector

    inputs = data_set_pointer->get_training_input_instance(i);

    // Output vector

    outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);

    // Target vector

    targets = data_set_pointer->get_training_target_instance(i);

    // Error

    performance_terms[i] = outputs.calculate_distance(targets);
  }

  return (performance_terms / sqrt((double)training_instances_number));
}
Beispiel #17
0
    void work(void)
    {
        float *out = outputs()[0]->buffer();
        const float *in0 = inputs()[0]->buffer();
        const size_t elems = this->workInfo().minElements;

        for (size_t i = 1; i < inputs().size(); i++)
        {
            const float *in = inputs()[i]->buffer();
            for (size_t n = 0; n < elems; n++)
            {
                out[n] = in0[n] + in[n];
            }
            in0 = out; //setup for next loop
            inputs()[i]->consume(elems);
        }

        inputs()[0]->consume(elems);
        outputs()[0]->produce(elems);
    }
Beispiel #18
0
QString GPIOPlugin::outputInfo(quint32 output)
{
    QString str;

    if (output == 0)
        str += QString("<H3>%1</H3>").arg(outputs()[output]);

    str += QString("</BODY>");
    str += QString("</HTML>");

    return str;
}
Beispiel #19
0
std::string TrainProcessor::processInput(const std::string &input) {
	cv::Mat samples, categories;
	if (!fillSamples(input, samples, categories)) {
		return "Unable to load samples from file";
	}

	const int inputSize = myCvPCA.eigenvalues.rows;
	const int outputSize = myClassesList.length();

	cv::Mat inputs;
	myCvPCA.project(samples, inputs);

	cv::Mat outputs(samples.rows, outputSize, CV_32FC1);
	outputs = 0.0f;
	for (int i = 0; i < categories.rows; ++i) {
		char cat = categories.at<unsigned char>(i, 0);
		int index = (int) myClassesList.find(cat);
		outputs.at<float>(i, index) = 1.0f;
	}

	int layers = (myLayersCount > 0) ? myLayersCount : std::max(3, (int)(inputSize * myLayersScale));

	std::cout << std::endl;
	std::cout << "Layers number = " << layers << std::endl;

	cv::Mat layerSizes(1, layers, CV_32SC1);
	--layers;

	std::cout << "Layer sizes: " << inputSize;
	layerSizes.at<int>(0, 0) = inputSize;

	for (int i = 1; i < layers; ++i) {
		const float scale = myLayersSizeScale + (1.0f - myLayersSizeScale) * (i-1) / (layers-1);
		const int sz = (int)(scale * (inputSize + (outputSize - inputSize) * i / layers));
		std::cout << " " << sz;
		layerSizes.at<int>(0, i) = sz;
	}

	std::cout << " " << outputSize << std::endl;
	layerSizes.at<int>(0, layers) = outputSize;

	std::cout << std::endl;
	double timer = (double)cv::getTickCount();

	myCvMLP.create(layerSizes, CvANN_MLP::SIGMOID_SYM, 1.0, 1.0);
	myCvMLP.train(inputs, outputs, cv::Mat(), cv::Mat(), CvANN_MLP_TrainParams(), 0);

	timer = (double)cv::getTickCount() - timer;
	std::cout << "Training time = " << (timer / cv::getTickFrequency()) << " s" << std::endl;
	std::cout << std::endl;

	return "";
}
Beispiel #20
0
void pcnn::calculate_states(const pcnn_stimulus & stimulus) {
	std::vector<double> feeding(size(), 0.0);
	std::vector<double> linking(size(), 0.0);
	std::vector<double> outputs(size(), 0.0);

	for (unsigned int index = 0; index < size(); index++) {
		pcnn_oscillator & current_oscillator = m_oscillators[index];
		std::vector<unsigned int> neighbors;
		get_neighbors(index, neighbors);

		double feeding_influence = 0.0;
		double linking_influence = 0.0;

		for (std::vector<unsigned int>::const_iterator iter = neighbors.begin(); iter != neighbors.end(); iter++) {
			const double output_neighbor = m_oscillators[(*iter)].output;

			feeding_influence += output_neighbor * m_params.M;
			linking_influence += output_neighbor * m_params.W;
		}

		feeding_influence *= m_params.VF;
		linking_influence *= m_params.VL;

		feeding[index] = m_params.AF * current_oscillator.feeding + stimulus[index] + feeding_influence;
		linking[index] = m_params.AL * current_oscillator.linking + linking_influence;

		/* calculate internal activity */
		double internal_activity = feeding[index] * (1.0 + m_params.B * linking[index]);

		/* calculate output of the oscillator */
		if (internal_activity > current_oscillator.threshold) {
			outputs[index] = OUTPUT_ACTIVE_STATE;
		}
		else {
			outputs[index] = OUTPUT_INACTIVE_STATE;
		}
	}

	/* fast linking */
	if (m_params.FAST_LINKING) {
		fast_linking(feeding, linking, outputs);
	}

	/* update states of oscillators */
	for (unsigned int index = 0; index < size(); index++) {
		pcnn_oscillator & oscillator = m_oscillators[index];

		oscillator.feeding = feeding[index];
		oscillator.linking = linking[index];
		oscillator.output = outputs[index];
		oscillator.threshold = m_params.AT * oscillator.threshold + m_params.VT * outputs[index];
	}
}
Beispiel #21
0
double MeanSquaredError::calculate_generalization_performance(void) const {
// Control sentence (if debug)

#ifndef NDEBUG

  check();

#endif

  const MultilayerPerceptron* multilayer_perceptron_pointer =
      neural_network_pointer->get_multilayer_perceptron_pointer();

  const unsigned inputs_number =
      multilayer_perceptron_pointer->get_inputs_number();
  const unsigned outputs_number =
      multilayer_perceptron_pointer->get_outputs_number();

  const Instances& instances = data_set_pointer->get_instances();

  const unsigned generalization_instances_number =
      instances.count_generalization_instances_number();

  if (generalization_instances_number == 0) {
    return (0.0);
  } else {
    Vector<double> inputs(inputs_number);
    Vector<double> outputs(outputs_number);
    Vector<double> targets(outputs_number);

    double generalization_objective = 0.0;

    for (unsigned i = 0; i < generalization_instances_number; i++) {
      // Input vector

      inputs = data_set_pointer->get_generalization_input_instance(i);

      // Output vector

      outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);

      // Target vector

      targets = data_set_pointer->get_generalization_target_instance(i);

      // Sum of squares error

      generalization_objective += outputs.calculate_sum_squared_error(targets);
    }

    return (generalization_objective / (double)generalization_instances_number);
  }
}
Beispiel #22
0
void Xnor::updateLogic( ) {
  char res = false;
  if( !isValid( ) ) {
    res = -1;
  }
  else {
    for( QNEPort *input : inputs( ) ) {
      res = res ^ input->value( );
    }
    res =  !res;
  }
  outputs( ).first( )->setValue(res);
}
Beispiel #23
0
void
MatchesExecutor::execute(uint32_t docId)
{
    size_t output = 0;
    for (uint32_t i = 0; i < _handles.size(); ++i) {
        const TermFieldMatchData *tfmd = _md->resolveTermField(_handles[i]);
        if (tfmd->getDocId() == docId) {
            output = 1;
            break;
        }
    }
    outputs().set_number(0, static_cast<feature_t>(output));
}
// beep for a quarter of a second
void beep(void) {
  outputs(pin(0) | pin(1));
  pinOff(1);
  
  byte i = 0;
  while (i < 250) {
    delay(1);
    pinOn(pin(0));
    delay(1);
    pinOff(pin(0));
    i++;
  }
}
/** Convert a String to Integer Array in C/C++
 *
 * @reference   https://www.geeksforgeeks.org/convert-a-string-to-integer-array-in-c-c/
 *
 * Given a string str containing numbers separated with “, “. The task
 * is to convert it into an integer array and find the sum of that array.
 */
auto ConvertToIntArray(const std::string &str) {
    std::vector<int> outputs(1, 0);

    for (const auto c : str) {
        if (c == ',') {
            outputs.push_back(0);
        } else if (isdigit(c)) {
            outputs.back() = outputs.back() * 10 + c - '0';
        }
    }

    return outputs;
}
Beispiel #26
0
std::ostream& printNode(std::ostream & out, size_t level, const Node * n, std::vector<const Node*> * groups) {
  auto outputs = n->outputs();
  indent(out, level) << const_value_list_with_types(outputs);
  out << " = ";
  IR_IFM_CONST(n,PythonOp)
    out << "^" << value->name();
    out << "(";
    int i = 0;
    for (auto& scalar : value->scalar_args) {
      if (i++ > 0)
        out << ", ";
      printPyObject(out, scalar);
    }
    out << ")";
  IR_ELSEIFM_CONST(CppOp)
    out << "CppOp[" << value->name() << "]";
  IR_ELSE()
    if(n->hasAttribute(kSubgraph)) {
      if(groups) {
        out << n->kind().toString() << "_" << groups->size();
        groups->push_back(n);
      } else {
        out << n->kind().toString() << "[" << *n->g(kSubgraph) << "]";
      }
    } else {
      out << n->kind().toString();
      if(n->hasAttributes()) {
        printAttributes(out,n);
      }
    }
  IR_END()
  out << "(" << n->inputs() << ")";
  std::string scopeName = n->scopeName();
  if (scopeName.empty()) {
    out << "\n";
  }
  else {
    out << ", ";
    out << "scope: " << scopeName << "\n";
  }
  for(size_t i = 0; i < n->blocks().size(); ++i) {
    auto b = n->blocks()[i];
    indent(out, level + 1) << "block" << i << "(" << const_value_list_with_types(b->inputs(), false) << ") {\n";
    for(auto n : b->nodes()) {
      printNode(out, level + 2, n, groups);
    }
    indent(out, level + 2) << "-> (" << b->outputs() << ")\n";
    indent(out, level + 1) << "}\n";
  }
  return out;
}
Beispiel #27
0
QString SPIPlugin::outputInfo(quint32 output)
{
    QString str;

    if (output != QLCIOPlugin::invalidLine() && output == 0)
    {
        str += QString("<H3>%1</H3>").arg(outputs()[output]);
    }

    str += QString("</BODY>");
    str += QString("</HTML>");

    return str;
}
void NeuralNetwork::Train(FloatsVector& inputs, FloatsVector& desiredOutputs)
{
    FloatsVector outputs(outputNeuronCount);
    FloatsVector hiddenOutputs(hiddenNeuronCount);
    FloatsVector outputDeltas(outputNeuronCount);
    FloatsVector hiddenDeltas(hiddenNeuronCount);
    const float eta = 0.3;
    
    assert(inputs.size() == inputNeuronCount);
    assert(desiredOutputs.size() == outputNeuronCount);
    
    // Feedforward the current values
    Propagate(inputs, outputs, hiddenOutputs);
    
    // Calculate the deltas from the output
    for (int k = 0; k < outputNeuronCount; k++)
    {
        outputDeltas[k] = desiredOutputs[k] - outputs[k];
    }
    
    // Propagate the deltas back to the hidden layer
    for (int j = 0; j < hiddenNeuronCount; j++)
    {
        for (int k = 0; k < outputNeuronCount; k++) {
            hiddenDeltas[j] += outputWeights[k * hiddenNeuronCount + j] * outputDeltas[k];
        }
    }
    
    // Now update the weights for the input->hidden weights
    for (int i = 0; i < inputNeuronCount; i++)
    {
        for (int j = 0; j < hiddenNeuronCount; j++)
        {
            float deltaWeight = eta * hiddenDeltas[j] * hiddenOutputs[j] * (1.0 - hiddenOutputs[j]);
            inputWeights[j * inputNeuronCount + i] += deltaWeight;
        }
    }
    
    // And update the hidden->output weights
    for (int j = 0; j < hiddenNeuronCount; j++)
    {
        for (int k = 0; k < outputNeuronCount; k++)
        {
            float deltaWeight = eta * outputDeltas[k] * outputs[k] * (1.0 - outputs[k]) * hiddenOutputs[j];
            outputWeights[k * hiddenNeuronCount + j] += deltaWeight;
        }
    }
    
    
}
Beispiel #29
0
void Block::cloneFrom(Block * src, std::function<Value*(Value*)> outer_map) {
  std::unordered_map<Value*, Value*> local_map;
  auto env = [&](Value * v) {
    auto it = local_map.find(v);
    if(it != local_map.end())
      return it->second;
    return outer_map(v);
  };
  for(auto input : src->inputs()) {
    local_map[input] = this->addInput()->copyMetadata(input);
  }
  auto graph = owningGraph();
  for(auto node : src->nodes()) {
    auto new_node = this->appendNode(graph->createClone(node, env));
    for(size_t i = 0; i < node->outputs().size(); ++i) {
      local_map[node->outputs()[i]] = new_node->outputs()[i];
      new_node->outputs()[i]->copyMetadata(node->outputs()[i]);
    }
  }
  for(auto output : src->outputs()) {
    this->registerOutput(env(output));
  }
}
Beispiel #30
0
Adaline::TrainResult Adaline::train(vector<AdalineTrainingPattern> &ts, double error, int nEpochs, double learningFactor, Adaline::WeightUpdateType wut)
{
	(void)wut;

	size_t sTS = ts.size();
	vector<vector<double> > inputs(sTS);
	vector<double> outputs(sTS);

	for(size_t i = 0; i < sTS; i++){
		inputs[i] = ts[i].getInputs();
		outputs[i] = ts[i].getOutput();
	}
	return train(inputs, outputs, error, nEpochs, learningFactor);
}