Exemplo n.º 1
0
void TensorFlowRepresentation::update(const ProjectionPtr projection, const Vector &delta)
{
  VectorProjection *vp = dynamic_cast<VectorProjection*>(projection.get());
  
  if (vp)
  {
    Vector v;
    read(projection, &v, NULL);
    batch_.push_back({vp->vector, v+delta});
  }
  else
    throw Exception("representation/tensorflow requires a projector returning a VectorProjection");
}
Exemplo n.º 2
0
double ANNRepresentation::read(const ProjectionPtr &projection, Vector *result, Vector *stddev) const
{
  VectorProjection *vp = dynamic_cast<VectorProjection*>(projection.get());
  
  if (vp)
  {
    double hidden[MAX_NODES], output[MAX_NODES];
    size_t hidden_params = inputs_+bias_+recurrent_;
    
    // Calculate hidden activation
    for (size_t hh=0; hh < hiddens_; ++hh)
    {
      const double *w = &weights_[hidden_params*hh];
      double act = 0;
      if (bias_)      act += w[inputs_];                  // Bias
      if (recurrent_) act += state_[hh]*w[inputs_+bias_]; // Recurrence
      
      for (size_t ii=0; ii < inputs_; ++ii)
        act += w[ii]*vp->vector[ii];
        
      hidden[hh] = activate(act);
    }
    
    // TODO: Remember state
    // memcpy(state_.data(), hidden, hiddens_*sizeof(double));
    
    // Calculate output activation
    for (size_t oo=0; oo < outputs_; ++oo)
    {
      const double *w = &weights_[hidden_params*hiddens_+(hiddens_+bias_)*oo];
      double act = 0;
      if (bias_) act += w[hiddens_]; // Bias
      
      for (size_t hh=0; hh < hiddens_; ++hh)
        act += w[hh]*hidden[hh];
        
      output[oo] = activate(act);
    }
    
    // Normalize outputs from 0..1 to output_min_..output_max_
    result->resize(outputs_);
    for (size_t oo=0; oo < outputs_; ++oo)
      (*result)[oo] = output_min_[oo]+output[oo]*(output_max_[oo]-output_min_[oo]);
  }
  else
    throw Exception("representation/parameterized/ann requires a projector returning a VectorProjection");
    
  if (stddev) stddev->clear();
  
  return (*result)[0];
}
Exemplo n.º 3
0
void TensorFlowRepresentation::enqueue(const ProjectionPtr &projection)
{
  VectorProjection *vp = dynamic_cast<VectorProjection*>(projection.get());
  
  if (vp)
  {
    auto input_map = input_.tensor<float, 2>();
    for (size_t jj=0; jj < input_map.dimension(1); ++jj)
      input_map(counter_, jj) = vp->vector[jj];
    counter_++;
  }
  else
    throw Exception("representation/tensorflow requires a projector returning a VectorProjection");
}
Exemplo n.º 4
0
void TensorFlowRepresentation::enqueue(const ProjectionPtr &projection, const Vector &target)
{
  VectorProjection *vp = dynamic_cast<VectorProjection*>(projection.get());
  
  if (vp)
  {
    auto input_map = input_.tensor<float, 2>();
    for (size_t ii=0; ii < input_map.dimension(1); ++ii)
      input_map(counter_, ii) = vp->vector[ii];

    auto target_map = target_.tensor<float, 2>();
    for (size_t ii=0; ii < target_map.dimension(1); ++ii)
      target_map(counter_, ii) = target[ii];

    counter_++;
  }
  else
    throw Exception("representation/tensorflow requires a projector returning a VectorProjection");
}
Exemplo n.º 5
0
void TensorFlowRepresentation::write(const ProjectionPtr projection, const Vector &target, const Vector &alpha)
{
  VectorProjection *vp = dynamic_cast<VectorProjection*>(projection.get());
  
  if (vp)
  {
    if (prod(alpha) == 1)
    {
      batch_.push_back({vp->vector, target});
    }
    else
    {
      Vector v;
      read(projection, &v, NULL);
      batch_.push_back({vp->vector, (1-alpha)*v + alpha*target});
    }
  }
  else
    throw Exception("representation/tensorflow requires a projector returning a VectorProjection");
}
Exemplo n.º 6
0
double TensorFlowRepresentation::read(const ProjectionPtr &projection, Vector *result, Vector *stddev) const
{
  VectorProjection *vp = dynamic_cast<VectorProjection*>(projection.get());
  
  if (vp)
  {
    Tensor input(tensorflow::DT_FLOAT, TensorShape({1, (int)vp->vector.size()}));
    auto input_map = input.tensor<float, 2>();
    for (size_t ii=0; ii < vp->vector.size(); ++ii)
      input_map(0, ii) = vp->vector[ii];

    Tensor learning(tensorflow::DT_BOOL, TensorShape());
    learning.scalar<bool>()() = false;

    std::vector<std::pair<std::string, tensorflow::Tensor>> inputs = {{input_layer_, input}};
    if (!learning_phase_.empty())
      inputs.push_back({learning_phase_, learning});
    
    std::vector<Tensor> outputs;
    
    Status run_status = session_->Run(inputs, {output_layer_}, {}, &outputs);
    if (!run_status.ok()) {
      ERROR(run_status.ToString());
      throw Exception("Could not run prediction graph");
    }
    
    auto output_map = outputs[0].tensor<float, 2>();
    result->resize(output_map.dimension(1));
    for (size_t ii=0; ii < result->size(); ++ii)
      (*result)[ii] = output_map(0, ii);
  }
  else
    throw Exception("representation/tensorflow requires a projector returning a VectorProjection");
    
  if (stddev) *stddev = Vector();
  
  return (*result)[0];
}