Ejemplo n.º 1
0
    void Min::backward(const tensor_type& data_input, const tensor_type& gradient_output)
    {
      gradient_input.resizeLike(data_input);
      gradient_input.setZero();
      
      if (dimension) {
	for (int row = 0; row != data_input.rows(); ++ row)
	  gradient_input.row(row)[indices[row]] = gradient_output.col(0)[row];
      } else {
	for (int col = 0; col != data_input.cols(); ++ col)
	  gradient_input.col(col)[indices[col]] = gradient_output.col(0)[col];
      }
    }
Ejemplo n.º 2
0
    void Min::forward(const tensor_type& data_input)
    {
      if (dimension) {
	// select row and compute column-min
	data_output.resize(data_input.rows(), 1);
	indices.resize(data_input.rows());
	
	for (size_type row = 0; row != data_input.rows(); ++ row) {
	  int col_min = 0;
	  data_output.col(0)[row] = data_input.row(row).minCoeff(&col_min);
	  indices[row] = col_min;
	}
      } else {
	// select column and compute row-min!
	data_output.resize(data_input.cols(), 1);
	indices.resize(data_input.cols());
	
	for (size_type col = 0; col != data_input.cols(); ++ col) {
	  int row_min = 0;
	  data_output.col(0)[col] = data_input.col(col).minCoeff(&row_min);
	  indices[col] = row_min;
	}
      }
    }
Ejemplo n.º 3
0
    void SoftMax::forward(const tensor_type& data_input)
    {
      // use of redux for computing logsum...
      
      const double infty = - std::numeric_limits<double>::infinity();
      
      double logsum = infty;
      for (difference_type i = 0; i != data_input.rows(); ++ i) {
	const double value = data_input.col(0)[i];
	
	if (logsum == infty)
	  logsum = value;
	else if (value > infty) {
	  if (logsum >= value)
	    logsum = logsum + utils::mathop::log1p(std::exp(value - logsum));
	  else
	    logsum = value  + utils::mathop::log1p(std::exp(logsum - value));
	}
      }
      
      data_output = (data_input.array() - logsum).exp();
    }