Example #1
0
MatrixPtr GenerateClusteredData::operator()() {
  auto matrix = std::make_shared<Matrix>();
  matrix->reserve(nbrInds);
  Variables variables;
  
  for (size_t var = 0; var < nbrClusters * clustSize; ++var) {
    variables ^= Variable( boost::lexical_cast<std::string>(var),
                           plIntegerType(0, cardinality-1) );
  }

  Clustering clustering; clustering.reserve(nbrClusters);
  for ( size_t clust = 0; clust < nbrClusters; ++clust ) {
    Cluster cluster;
    for ( size_t item = 0; item < clustSize; ++item ) {
      cluster.push_back( clust*clustSize + item ); 
    }
    clustering.push_back( cluster );
  }
  
  plJointDistribution jointDist = createClusteringJointDist( variables, clustering);
  plValues values( variables );
  // std::cout << jointDist << std::endl << jointDist.get_computable_object_list() << std::endl;
  for (size_t ind = 0; ind < nbrInds; ++ind) {
    jointDist.draw(values);   
    std::vector<int> row(variables.size()); 
    for (size_t var = 0; var < variables.size(); ++var) {
      row[var] = values[variables[var]];  
    }
    matrix->push_back(row);
  }

  //std::cout << jointDist << std::endl;
  return Transpose(*matrix);
}
Example #2
0
/**
 * Applies the required resizes to nodes in the specified axis, rerouting edges
 * around the resized nodes.
 * @param dim axis
 * @param targets the target rectangles (in both axes)
 * @param nodes to be moved and/or resized
 * @param edges to be rerouted around nodes
 * @param resizes ResizeInfo for specific nodes
 * @param vs canonical list of variables passed into solver.  Note that
 * the first nodes.size() variables are used for each corresponding node.  
 * Note also that new variables for the dummy nodes will be appended to this
 * list and will need to be cleaned up later.
 * @param cs canonical list of constraints over variables.  Note that new
 * non-overlap constraints may be appended to the end of this list.
 */
static void resizeAxis(vpsc::Dim dim, const Rectangles& targets,
        Nodes& nodes, Edges& edges,  RootCluster *clusters, ResizeMap& resizes,
        Variables& vs, Constraints& cs)
{
    COLA_ASSERT(vs.size()>=nodes.size());

    //  - create copy tn of topologyNodes with resize rects replaced with
    //    three nodes: one for the lhs of rect, one for centre and one for rhs.
    //    lhs node goes at position of replaced node, the others are appended
    //    to end of tn.
    //  - set desired positions of each lhs node to be the left side
    //    of resized rect and symmetric for rhs node, centre node's desired
    //    pos it at the centre
    Nodes tn(nodes.size());

    COLA_ASSERT(assertConvexBends(edges));
    COLA_ASSERT(assertNoSegmentRectIntersection(nodes,edges));

    transform(nodes.begin(),nodes.end(),tn.begin(),
            TransformNode(dim, targets,resizes,vs));
    feach(resizes, CreateLeftRightDummyNodes(dim,targets,tn,vs));
    COLA_ASSERT(tn.size()==nodes.size()+2*resizes.size());
    COLA_ASSERT(vs.size()>=tn.size());

    // update topologyRoutes with references to resized nodes replaced with
    // correct references to lhs/rhs nodes
    feach(edges,SubstituteNodes(dim,resizes,tn));

    COLA_ASSERT(assertConvexBends(edges));
    COLA_ASSERT(assertNoSegmentRectIntersection(tn,edges));

    // move nodes and reroute
    topology::TopologyConstraints t(dim, tn, edges, clusters, vs, cs);
    COLA_ASSERT(checkDesired(dim,tn,targets,resizes));
#ifndef NDEBUG
    unsigned loopCtr=0;
#endif
    while(t.solve()) { COLA_ASSERT(++loopCtr<1000); }
    //COLA_ASSERT(checkFinal(tn,targets,resizes));
    
    // reposition and resize original nodes
    feach(nodes,CopyPositions(dim,tn,resizes));

    // revert topologyRoutes back to original nodes
    feach(edges,RevertNodes(nodes));

    COLA_ASSERT(assertConvexBends(edges));
    COLA_ASSERT(assertNoSegmentRectIntersection(nodes,edges));

    // clean up
    feach(tn,DeleteTempNode());
}
Example #3
0
IncSolver::IncSolver(Variables const &vs, Constraints const &cs)
    : m(cs.size()),
      cs(cs),
      n(vs.size()), 
      vs(vs),
      needsScaling(false)
{
    for(unsigned i=0;i<n;++i) {
        vs[i]->in.clear();
        vs[i]->out.clear();

        // Set needsScaling if any variables have a scale other than 1.
        needsScaling |= (vs[i]->scale != 1);
    }
    for(unsigned i=0;i<m;++i) {
        Constraint *c=cs[i];
        c->left->out.push_back(c);
        c->right->in.push_back(c);
        c->needsScaling = needsScaling;
    }
    bs=new Blocks(vs);
#ifdef LIBVPSC_LOGGING
    printBlocks();
    //COLA_ASSERT(!constraintGraphIsCyclic(n,vs));
#endif

    inactive=cs;
    for(Constraints::iterator i=inactive.begin();i!=inactive.end();++i) {
        (*i)->active=false;
    }
}
Example #4
0
 EqualityConstraintSet(Variables vs)
 {
     for (size_t i = 0; i < vs.size(); ++i)
     {
         std::map<Variable *, double> varSet;
         varSet[vs[i]] = 0;
         variableGroups.push_back(varSet);
     }
 }
Example #5
0
void Deconvolution<T>::forward_impl(const Variables &inputs,
                                    const Variables &outputs) {
  using namespace ::nbla::eigen;
  // Getting variable pointers
  const T *y = inputs[0]->get_data_pointer<T>(this->ctx_);
  const T *w = inputs[1]->get_data_pointer<T>(this->ctx_);
  T *col = col_.cast_data_and_get_pointer<T>(this->ctx_);
  T *x = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_);
  const T *b;
  if (inputs.size() == 3) {
    b = inputs[2]->get_data_pointer<T>(this->ctx_);
  }

  // Sample loop
  for (int n = 0; n < outer_size_; ++n) {

    // matrix multiplication
    const T *y_n = y + n * inner_size_o_;
    for (int g = 0; g < group_; ++g) {
      ConstMatrixMap<T> mw(w + g * row_w_ * col_w_, row_w_, col_w_);
      ConstMatrixMap<T> my(y_n + g * row_y_ * col_y_, row_y_, col_y_);
      MatrixMap<T> mcol(col + g * row_col_ * col_col_, row_col_, col_col_);
      mcol = mw.transpose() * my;
    }

    // col2im for w * x
    T *x_n = x + n * inner_size_i_;
    memset(x_n, 0, sizeof(*x_n) * inner_size_i_);
    if (spatial_dims_ == 2) {
      col2im<T>(col, channels_i_, spatial_shape_i_.data(), kernel_.data(),
                pad_.data(), stride_.data(), dilation_.data(), x_n);
    } else {
      col2im_nd<T>(col, channels_i_, spatial_dims_, spatial_shape_i_.data(),
                   kernel_.data(), pad_.data(), stride_.data(),
                   dilation_.data(), x_n);
    }

    // adding bias
    if (inputs.size() == 3) {
      MatrixMap<T> mx(x_n, channels_i_, inner_size_i_ / channels_i_);
      mx.colwise() += ConstColVectorMap<T>(b, channels_i_);
    }
  }
}
Example #6
0
DistValueMat createNBProbTables( const Variables& variables,  const Variable& latentVar) {
  DistValueMat probTableX;    
  for (size_t var = 0; var < variables.size(); ++var) {
    DistValueVec X_Z;
    for (size_t i = 0; i < latentVar.cardinality(); ++i) {
      const DistValues X_Z_i = createNBUniVarProbTab( variables[var].cardinality() );
      X_Z.push_back(X_Z_i);
    }
    probTableX.push_back(X_Z);
  }    
  return probTableX;
}
Example #7
0
plComputableObjectList createNBCndTable(Variables& variables, Variable& latentVar) {
  plComputableObjectList dataCndTable;
  
  DistValueMat probTableXZ = createNBProbTables(variables, latentVar);
  
  for (size_t x = 0; x < variables.size(); ++x) { 
    plDistributionTable distTab_Xi_Z(variables[x], latentVar); 
    for (size_t h = 0; h < latentVar.cardinality(); ++h) {
      distTab_Xi_Z.push( plProbTable(variables[x], probTableXZ[x][h]), (int)h );
    }
    dataCndTable *= distTab_Xi_Z; // adds the conditional table to result 
  }
    
  return dataCndTable;
}
Example #8
0
Variables FunctionRef::call(const Variables &params) const {
	StackGuard guard(*_luaState);

	const int savedTop = lua_gettop(_luaState);
	pushSelf();

	Stack stack(*_luaState);
	stack.pushVariables(params);

	if (lua_pcall(&stack.getLuaState(), params.size(), LUA_MULTRET, 0) != 0) {
		throw Common::Exception("Failed to call Lua function:\n\t%s", lua_tostring(&stack.getLuaState(), -1));
	}

	const int retsCount = stack.getSize() - savedTop;
	return stack.getVariablesFromTop(retsCount);
}
 void increaseStack() {
     if (SP == stack.size()) {
         stack.resize(SP * 2);
     }
 }
Example #10
0
void Deconvolution<T>::setup_impl(const Variables &inputs,
                                  const Variables &outputs) {
  // Shape check
  Shape_t shape_out = inputs[0]->shape();
  Shape_t shape_weights = inputs[1]->shape();
  NBLA_CHECK(base_axis_ < shape_out.size() - 1, error_code::unclassified,
             "base_axis must be less than ndim - 1 of inputs[0]. "
             "base_axis: %d >= ndim of inputs[0] - 1: %d.",
             base_axis_, shape_out.size() - 1);
  spatial_dims_ = shape_out.size() - base_axis_ - 1;
  NBLA_CHECK(shape_weights.size() == 2 + spatial_dims_, error_code::value,
             "Weights must be a tensor more than 3D.");
  // Storing shape variables
  channels_i_ = shape_weights[1] * group_;
  channels_o_ = shape_weights[0];
  channels_g_ = shape_weights[1];
  inner_size_k_ = channels_g_;
  const int channels_i_mod_group = channels_i_ % group_;
  NBLA_CHECK(channels_i_mod_group == 0, error_code::value,
             "Number of input channel needs to be divisible by group. "
             "Input channel: %d, group: %d",
             channels_i_, group_);
  const int channels_o_mod_group = channels_o_ % group_;
  NBLA_CHECK(channels_o_mod_group == 0, error_code::value,
             "Number of output channel needs to be divisible by group. "
             "Output channel: %d, group: %d",
             channels_o_, group_);
  NBLA_CHECK(channels_i_ / group_ == channels_g_, error_code::value,
             "Number of grouped channel mismatch."
             "Input: %d != Weights[1]: %d",
             channels_i_ / group_, channels_g_);
  NBLA_CHECK(pad_.size() == spatial_dims_, error_code::value,
             "pad size mismatch. pad size: %d != spatial dims: %d.",
             pad_.size(), spatial_dims_);
  NBLA_CHECK(stride_.size() == spatial_dims_, error_code::value,
             "stride size mismatch. stride size: %d != spatial dims: %d.",
             stride_.size(), spatial_dims_);
  NBLA_CHECK(dilation_.size() == spatial_dims_, error_code::value,
             "dilation size mismatch. dilation size: %d != spatial dims: %d.",
             dilation_.size(), spatial_dims_);
  for (int i = 0; i < spatial_dims_; ++i) {
    kernel_.push_back(shape_weights[2 + i]);
    inner_size_k_ *= kernel_[i];
    spatial_shape_o_.push_back(shape_out[base_axis_ + 1 + i]);
    const int k = dilation_[i] * (kernel_[i] - 1) + 1;
    const int size_i = stride_[i] * (spatial_shape_o_[i] - 1) + k - 2 * pad_[i];
    NBLA_CHECK(
        size_i > 0, error_code::value,
        "Invalid configuration of deconvolution at %d-th spatial dimension. "
        "{input:%d, kernel:%d, pad:%d, stride:%d, dilation:%d}.",
        i, size_i, kernel_[i], pad_[i], stride_[i], dilation_[i]);
    spatial_shape_i_.push_back(size_i);
  }

  // Reshaping output
  Shape_t shape_data;
  outer_size_ = 1;
  for (int i = 0; i < base_axis_; ++i) { // Fill shapes up to base axis
    shape_data.push_back(shape_out[i]);
    outer_size_ *= shape_out[i];
  }
  shape_data.push_back(channels_i_); // output channels
  inner_size_i_ = channels_i_;
  inner_size_o_ = channels_o_;
  for (int i = 0; i < spatial_dims_; ++i) {
    shape_data.push_back(spatial_shape_i_[i]);
    inner_size_i_ *= spatial_shape_i_[i];
    inner_size_o_ *= spatial_shape_o_[i];
  }
  outputs[0]->reshape(shape_data, true);

  // Reshaping col buffer
  // Actual memory is not allocated until it is used.
  col_.reshape(Shape_t{inner_size_k_ * group_, inner_size_o_ / channels_o_},
               true);

  // Check for with bias
  if (inputs.size() == 3) {
    NBLA_CHECK(inputs[2]->shape().size() == 1, error_code::value,
               "Bias(inputs[2]) must be a 1d tensor.");
    NBLA_CHECK(inputs[2]->shape()[0] == channels_i_, error_code::value,
               "Shape of bias(inputs[2]) and weights(inputs[1]) mismatch. "
               "bias shape[0]: %d != weights shape[1] * group: %d.",
               inputs[2]->shape()[0], channels_i_);
  }
  // Check for with bias
  if (inputs.size() == 3) {
    NBLA_CHECK(inputs[2]->shape().size() == 1, error_code::value, "");
    NBLA_CHECK(inputs[2]->shape()[0] == channels_i_, error_code::value, "");
  }

  // Set variables for convolution by matrix multiplication
  // In 2D case:
  // K: in maps, H: in height, W: in width
  // K': out maps, H': out height, W': out height
  // M: kernel height, N: kernel width
  row_w_ = channels_o_ / group_;          // K'
  col_w_ = inner_size_k_;                 // KMN
  row_col_ = col_w_;                      // KMN
  col_col_ = inner_size_o_ / channels_o_; // H'W'
  row_y_ = channels_o_ / group_;          // K'
  col_y_ = col_col_;                      // H'W'
}
Example #11
0
void Deconvolution<T>::backward_impl(const Variables &inputs,
                                     const Variables &outputs,
                                     const vector<bool> &propagate_down,
                                     const vector<bool> &accum) {

  if (!(propagate_down[0] || propagate_down[1] ||
        (inputs.size() == 3 && propagate_down[2]))) {
    return;
  }

  using namespace ::nbla::eigen;
  const T *dx = outputs[0]->get_grad_pointer<T>(this->ctx_);
  const T *y;
  const T *w;
  T *dy, *dw, *db, *col;
  std::unique_ptr<ColVectorMap<T>> mdb;

  if (propagate_down[0] || propagate_down[1]) {
    col = col_.cast_data_and_get_pointer<T>(this->ctx_);
  }
  if (propagate_down[0]) {
    w = inputs[1]->get_data_pointer<T>(this->ctx_);
    dy = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_);
  }
  if (propagate_down[1]) {
    if (!accum[1])
      inputs[1]->grad()->zero();
    y = inputs[0]->get_data_pointer<T>(this->ctx_);
    dw = inputs[1]->cast_grad_and_get_pointer<T>(this->ctx_);
  }
  if (inputs.size() == 3 && propagate_down[2]) {
    if (!accum[2])
      inputs[2]->grad()->zero();
    db = inputs[2]->cast_grad_and_get_pointer<T>(this->ctx_);
    mdb.reset(new ColVectorMap<T>(db, channels_i_));
  }

  // Sample loop
  for (int n = 0; n < outer_size_; ++n) {
    const T *dx_n = dx + n * inner_size_i_;

    if (propagate_down[0] || propagate_down[1]) {
      // im2col
      if (spatial_dims_ == 2) {
        im2col<T>(dx_n, channels_i_, spatial_shape_i_.data(), kernel_.data(),
                  pad_.data(), stride_.data(), dilation_.data(), col);
      } else {
        im2col_nd<T>(dx_n, channels_i_, spatial_dims_, spatial_shape_i_.data(),
                     kernel_.data(), pad_.data(), stride_.data(),
                     dilation_.data(), col);
      }
    }

    if (propagate_down[0]) {
      // Backprop to image
      T *dy_n = dy + n * inner_size_o_;
      for (int g = 0; g < group_; ++g) {
        ConstMatrixMap<T> mcol(col + g * row_col_ * col_col_, row_col_,
                               col_col_);
        ConstMatrixMap<T> mw(w + g * row_w_ * col_w_, row_w_, col_w_);
        MatrixMap<T> mdy(dy_n + g * row_y_ * col_y_, row_y_, col_y_);
        if (accum[0])
          mdy += mw * mcol;
        else
          mdy = mw * mcol;
      }
    }

    if (propagate_down[1]) {
      // Backprop to weights
      const T *y_n = y + n * inner_size_o_;
      for (int g = 0; g < group_; ++g) {
        ConstMatrixMap<T> mcol(col + g * row_col_ * col_col_, row_col_,
                               col_col_);
        ConstMatrixMap<T> my(y_n + g * row_y_ * col_y_, row_y_, col_y_);
        MatrixMap<T> mdw(dw + g * row_w_ * col_w_, row_w_, col_w_);
        mdw += my * mcol.transpose();
      }
    }

    if (inputs.size() == 3 && propagate_down[2]) {
      // Backprop to bias
      ConstMatrixMap<T> mdx(dx_n, channels_i_, inner_size_i_ / channels_i_);
      *mdb += mdx.rowwise().sum();
    }
  }
}