Example #1
0
MatrixPtr GenerateClusteredData::operator()() {
  auto matrix = std::make_shared<Matrix>();
  matrix->reserve(nbrInds);
  Variables variables;
  
  for (size_t var = 0; var < nbrClusters * clustSize; ++var) {
    variables ^= Variable( boost::lexical_cast<std::string>(var),
                           plIntegerType(0, cardinality-1) );
  }

  Clustering clustering; clustering.reserve(nbrClusters);
  for ( size_t clust = 0; clust < nbrClusters; ++clust ) {
    Cluster cluster;
    for ( size_t item = 0; item < clustSize; ++item ) {
      cluster.push_back( clust*clustSize + item ); 
    }
    clustering.push_back( cluster );
  }
  
  plJointDistribution jointDist = createClusteringJointDist( variables, clustering);
  plValues values( variables );
  // std::cout << jointDist << std::endl << jointDist.get_computable_object_list() << std::endl;
  for (size_t ind = 0; ind < nbrInds; ++ind) {
    jointDist.draw(values);   
    std::vector<int> row(variables.size()); 
    for (size_t var = 0; var < variables.size(); ++var) {
      row[var] = values[variables[var]];  
    }
    matrix->push_back(row);
  }

  //std::cout << jointDist << std::endl;
  return Transpose(*matrix);
}
Example #2
0
Variables Stack::getVariables() const {
	Variables vars;
	for (int i = 1; i <= getSize(); ++i) {
		vars.push_back(getVariableAt(i));
	}
	return vars;
}
Example #3
0
void
madara::knowledge::containers::NativeDoubleVector::set_name (
  const std::string & var_name,
  Variables & knowledge, int size)
{
  if (context_ != knowledge.get_context () || name_ != var_name)
  {
    context_ = knowledge.get_context ();

    ContextGuard context_guard (*context_);
    MADARA_GUARD_TYPE guard (mutex_);

    madara_logger_ptr_log (logger::global_logger.get (),
      logger::LOG_MAJOR,
      "NativeDoubleVector::set_name: setting name to %s\n",
      var_name.c_str ());

    name_ = var_name;
    
    vector_ = knowledge.get_ref (var_name, settings_);

    if (size > 0)
      resize (size_t (size));
  }
}
/*
 * Class:     ai_madara_knowledge_Variables
 * Method:    jni_compile
 * Signature: (JLjava/lang/String;)J
 */
jlong JNICALL Java_ai_madara_knowledge_Variables_jni_1compile(
    JNIEnv* env, jobject, jlong cptr, jstring expression)
{
  const char* nativeExpression = env->GetStringUTFChars(expression, 0);

  Variables* vars = (Variables*)cptr;
  CompiledExpression* result(0);

  if (vars)
  {
    result = new CompiledExpression(vars->compile(nativeExpression));

    env->ReleaseStringUTFChars(expression, nativeExpression);
  }
  else
  {
    // user has tried to use a deleted object. Clean up and throw

    madara::utility::java::throw_dead_obj_exception(env,
        "Variables::compile: "
        "Variables objects are released already");
  }

  return (jlong)result;
}
Example #5
0
Variables FunctionRef::call(const Variable &v1, const Variable &v2, const Variable &v3) const {
	Variables params;
	params.push_back(v1);
	params.push_back(v2);
	params.push_back(v3);
	return call(params);
}
Example #6
0
void VariablesTest::test_count_targets_number(void)
{
   message += "test_count_targets_number\n";

   Variables v;

   assert_true(v.count_targets_number() == 0, LOG);
}
Example #7
0
void VariablesTest::test_get_variables_number(void)
{
   message += "test_get_variables_number\n";

   Variables v;

   assert_true(v.get_variables_number() == 0, LOG);
}
Example #8
0
Aurora::Lua::Variables Aurora::Lua::Stack::getVariablesFromTop(int count) const {
	Variables vars;
	const int start = std::max(0, getSize() - count) + 1;
	for (int i = start; i <= getSize(); ++i) {
		vars.push_back(getVariableAt(i));
	}
	return vars;
}
int InterpreterDBG::nextCmd(int line, Variables& v, list<pair<string, int> >& stk) {

  sendStackInfo(stk);

  sendVariables(v.getGlobals(), stk, true);
  sendVariables(v.getLocals(), stk, false);

  return receiveCmd();
}
Example #10
0
void CheckUnusedVar::checkFunctionVariableUsage()
{
    if (!_settings->isEnabled("style"))
        return;

    // Parse all executing scopes..
    const SymbolDatabase *symbolDatabase = _tokenizer->getSymbolDatabase();

    for (std::list<Scope>::const_iterator scope = symbolDatabase->scopeList.begin(); scope != symbolDatabase->scopeList.end(); ++scope) {
        // only check functions
        if (scope->type != Scope::eFunction)
            continue;

        // varId, usage {read, write, modified}
        Variables variables;

        checkFunctionVariableUsage_iterateScopes(&*scope, variables);


        // Check usage of all variables in the current scope..
        for (Variables::VariableMap::const_iterator it = variables.varUsage().begin(); it != variables.varUsage().end(); ++it) {
            const Variables::VariableUsage &usage = it->second;
            const std::string &varname = usage._name->str();

            // variable has been marked as unused so ignore it
            if (usage._name->isUnused())
                continue;

            // skip things that are only partially implemented to prevent false positives
            if (usage._type == Variables::pointerPointer ||
                usage._type == Variables::pointerArray ||
                usage._type == Variables::referenceArray)
                continue;

            // variable has had memory allocated for it, but hasn't done
            // anything with that memory other than, perhaps, freeing it
            if (usage.unused() && !usage._modified && usage._allocateMemory)
                allocatedButUnusedVariableError(usage._name, varname);

            // variable has not been written, read, or modified
            else if (usage.unused() && !usage._modified)
                unusedVariableError(usage._name, varname);

            // variable has not been written but has been modified
            else if (usage._modified && !usage._write && !usage._allocateMemory)
                unassignedVariableError(usage._name, varname);

            // variable has been written but not read
            else if (!usage._read && !usage._modified)
                unreadVariableError(usage._name, varname);

            // variable has been read but not written
            else if (!usage._write && !usage._allocateMemory)
                unassignedVariableError(usage._name, varname);
        }
    }
}
Example #11
0
void VariablesTest::test_arrange_targets_indices(void)
{
   message += "test_arrange_targets_indices\n";

   Variables v;

   Vector<size_t> targets_indices = v.arrange_targets_indices();

   assert_true(targets_indices.size() == 0, LOG);
}
Example #12
0
void VariablesTest::test_arrange_names(void)
{
   message += "test_get_names\n";

   Variables v;

   Vector<std::string> names = v.arrange_names();

   assert_true(names.size() == 0, LOG);
}
Example #13
0
const char* build_get_variable(const char* name)
{
  Variables::iterator i = g_build_variables.find(name);
  if(i != g_build_variables.end())
  {
    return (*i).second.c_str();
  }
  globalErrorStream() << "undefined build variable: " << makeQuoted(name) << "\n";
  return "";
}
Example #14
0
void VariablesTest::test_arrange_units(void)
{
   message += "test_arrange_units\n";

   Variables v;

   Vector<std::string> units = v.arrange_units();

   assert_true(units.size() == 0, LOG);

}
Example #15
0
void VariablesTest::test_arrange_descriptions(void)
{
   message += "test_arrange_descriptions\n";

   Variables v;

   Vector<std::string> descriptions = v.arrange_descriptions();

   assert_true(descriptions.size() == 0, LOG);

}
Example #16
0
static void filterFields (const StringSet &avoid, Variables &fields) {
	for (int i = 0; i < fields.length (); i++) {
		const VariableDef &cur = fields.at (i);
		
		if (avoid.contains (cur.type)) {
			fields.remove (i);
			i--;
		}
		
	}
	
}
Example #17
0
/**
 * Applies the required resizes to nodes in the specified axis, rerouting edges
 * around the resized nodes.
 * @param dim axis
 * @param targets the target rectangles (in both axes)
 * @param nodes to be moved and/or resized
 * @param edges to be rerouted around nodes
 * @param resizes ResizeInfo for specific nodes
 * @param vs canonical list of variables passed into solver.  Note that
 * the first nodes.size() variables are used for each corresponding node.  
 * Note also that new variables for the dummy nodes will be appended to this
 * list and will need to be cleaned up later.
 * @param cs canonical list of constraints over variables.  Note that new
 * non-overlap constraints may be appended to the end of this list.
 */
static void resizeAxis(vpsc::Dim dim, const Rectangles& targets,
        Nodes& nodes, Edges& edges,  RootCluster *clusters, ResizeMap& resizes,
        Variables& vs, Constraints& cs)
{
    COLA_ASSERT(vs.size()>=nodes.size());

    //  - create copy tn of topologyNodes with resize rects replaced with
    //    three nodes: one for the lhs of rect, one for centre and one for rhs.
    //    lhs node goes at position of replaced node, the others are appended
    //    to end of tn.
    //  - set desired positions of each lhs node to be the left side
    //    of resized rect and symmetric for rhs node, centre node's desired
    //    pos it at the centre
    Nodes tn(nodes.size());

    COLA_ASSERT(assertConvexBends(edges));
    COLA_ASSERT(assertNoSegmentRectIntersection(nodes,edges));

    transform(nodes.begin(),nodes.end(),tn.begin(),
            TransformNode(dim, targets,resizes,vs));
    feach(resizes, CreateLeftRightDummyNodes(dim,targets,tn,vs));
    COLA_ASSERT(tn.size()==nodes.size()+2*resizes.size());
    COLA_ASSERT(vs.size()>=tn.size());

    // update topologyRoutes with references to resized nodes replaced with
    // correct references to lhs/rhs nodes
    feach(edges,SubstituteNodes(dim,resizes,tn));

    COLA_ASSERT(assertConvexBends(edges));
    COLA_ASSERT(assertNoSegmentRectIntersection(tn,edges));

    // move nodes and reroute
    topology::TopologyConstraints t(dim, tn, edges, clusters, vs, cs);
    COLA_ASSERT(checkDesired(dim,tn,targets,resizes));
#ifndef NDEBUG
    unsigned loopCtr=0;
#endif
    while(t.solve()) { COLA_ASSERT(++loopCtr<1000); }
    //COLA_ASSERT(checkFinal(tn,targets,resizes));
    
    // reposition and resize original nodes
    feach(nodes,CopyPositions(dim,tn,resizes));

    // revert topologyRoutes back to original nodes
    feach(edges,RevertNodes(nodes));

    COLA_ASSERT(assertConvexBends(edges));
    COLA_ASSERT(assertNoSegmentRectIntersection(nodes,edges));

    // clean up
    feach(tn,DeleteTempNode());
}
Example #18
0
void VariablesTest::test_set(void)
{
   message += "test_set\n";

   Variables v;

   // Instances and inputs and target variables

   v.set(1);

   assert_true(v.count_inputs_number() == 0, LOG);
   assert_true(v.count_targets_number() == 0, LOG);
}
Example #19
0
void VariablesTest::test_from_XML(void)
{
   message += "test_from_XML\n";

   Variables v;

   // Test

   v.set(3);

   tinyxml2::XMLDocument* document = v.to_XML();

   v.from_XML(*document);
}
Example #20
0
void VariablesTest::test_get_display(void)
{
   message += "test_get_display\n";

   Variables v;

   v.set_display(true);

   assert_true(v.get_display() == true, LOG);

   v.set_display(false);

   assert_true(v.get_display() == false, LOG);
}
Example #21
0
Variables* Variables::clone() const
{
    // owner should always be true
    Variables* newVector = new Variables(true);

    int i;
    Variable* newItem;
    for(i=0;i<items.size();i++)
    {
        newItem = new Variable(*items.at(i));
        newVector->addItem(newItem);
    }  

    return newVector;
}
madara::knowledge::containers::NativeDoubleVectorStaged::
    NativeDoubleVectorStaged(const std::string& name, Variables& knowledge,
        int size, const KnowledgeUpdateSettings& settings)
  : context_(knowledge.get_context()), has_changed_(false)
{
  madara_logger_ptr_log(logger::global_logger.get(), logger::LOG_MAJOR,
      "NativeDoubleVectorStaged::constructor called for %s[%d]\n", name.c_str(),
      size);

  vector_ = knowledge.get_ref(name, settings);
  if (size >= 0)
  {
    resize(size);
  }
}
Real TaylorApproximation::value(const Variables& vars)
{
  short bdo = sharedDataRep->buildDataOrder;
  if (bdo == 1)
    return approxData.anchor_function();
  else { // build up approx value from constant and derivative terms
    Real approx_val = (bdo & 1) ? approxData.anchor_function() : 0.;
    if (bdo & 6) {
      const RealVector&       x = vars.continuous_variables();
      const RealVector&      x0 = approxData.anchor_continuous_variables();
      const RealVector&    grad = approxData.anchor_gradient();
      const RealSymMatrix& hess = approxData.anchor_hessian();
      size_t num_v = sharedDataRep->numVars;
      for (size_t i=0; i<num_v; i++) {
	Real dist_i = x[i] - x0[i];
	if (bdo & 2) // include gradient terms
	  approx_val += grad[i] * dist_i;
	if (bdo & 4) // include Hessian terms
	  for (size_t j=0; j<num_v; j++)
	    approx_val += dist_i * hess(i,j) * (x[j] - x0[j])/2.;
      }
    }
    return approx_val;
  }
}
void PostfixExprEvaluator::processToken(Token &token,
	                                    std::stack<Token> &operands,
	                                    Variables &vars)
{
	switch (token.type)
	{
		case TokenType::TOK_VARIABLE:
			token = vars.at(token.value.valueString);
			processToken(token, operands, vars);
			break;
		case TokenType::TOK_LPAREN:
		case TokenType::TOK_RPAREN:
			break;
		case TokenType::TOK_FLOAT:
		case TokenType::TOK_INT:
			operands.push(token);
			break;
		case TokenType::TOK_UNARYOP:
			processUnaryOp(token, operands);
			break;
		case TokenType::TOK_BINARYOP:
			processBinaryOp(token, operands);
			break;
	}  
}
Example #25
0
IncSolver::IncSolver(Variables const &vs, Constraints const &cs)
    : m(cs.size()),
      cs(cs),
      n(vs.size()), 
      vs(vs),
      needsScaling(false)
{
    for(unsigned i=0;i<n;++i) {
        vs[i]->in.clear();
        vs[i]->out.clear();

        // Set needsScaling if any variables have a scale other than 1.
        needsScaling |= (vs[i]->scale != 1);
    }
    for(unsigned i=0;i<m;++i) {
        Constraint *c=cs[i];
        c->left->out.push_back(c);
        c->right->in.push_back(c);
        c->needsScaling = needsScaling;
    }
    bs=new Blocks(vs);
#ifdef LIBVPSC_LOGGING
    printBlocks();
    //COLA_ASSERT(!constraintGraphIsCyclic(n,vs));
#endif

    inactive=cs;
    for(Constraints::iterator i=inactive.begin();i!=inactive.end();++i) {
        (*i)->active=false;
    }
}
    void VisitArraySubscriptExpr(clang::ArraySubscriptExpr *AS) {
      // find array name and size
      clang::DeclRefExpr *DR;
      if (!(DR = clang::dyn_cast<clang::DeclRefExpr>(
                      AS->getBase()->IgnoreImpCasts()))) {
          printf("Can't handle complex array expressions:\n\t%s\n",
                  toString(AS));
          exit(1);
      }
  
      const char *arr = DR->getDecl()->getNameAsString().c_str();
      int size = variables.arraySize(arr);
  
      clang::Expr *ind = AS->getIdx();
      // Both size and index are literals. No need for abstraction:
      if (clang::IntegerLiteral *IL =
              clang::dyn_cast<clang::IntegerLiteral>(ind)) {
          int val = (int)*IL->getValue().getRawData();
          if (val < 0 || val >= size) {
              printf("** Index out of bounds error: Array %s, size %d, index "
                      "%d\n", arr, size, val);
              printf("\t%s\n", toString(AS));
              error = true;
          }
      // Size is a literal but the index is an abstract value:
      } else if (clang::DeclRefExpr *DR =
              clang::dyn_cast<clang::DeclRefExpr>(ind->IgnoreImpCasts())) {
          char *varInd = variables.find(
                  DR->getDecl()->getNameAsString().c_str());
          if (!blkApronCtx->isIndexInBound(varInd, size)) {
              printf("** Index out of bounds error: Index %s, Array %s, size "
                      "%d\n", toString(DR), arr, size);
              error = true;
          }
      } else {
          printf("Can't handle compound expressions as array indexes:\n\t%s\n",
                  toString(AS));
          exit(1);
      }
//      else if (clang::ImplicitCastExpr *CE =
//              clang::dyn_cast<clang::ImplicitCastExpr>(ind)) {
//        Visit(CE);
//        char *indVar = variables.getLastVar();
//        analysisCtx->addArrayIndex(indVar, size);
//      }
//      variables.toggleCollectingVars(false);
    }
Example #27
0
madara::knowledge::containers::FlexMap::FlexMap(const std::string& name,
    Variables& knowledge, const KnowledgeUpdateSettings& settings,
    const std::string& delimiter)
  : BaseContainer(name, settings),
    context_(knowledge.get_context()),
    delimiter_(delimiter)
{
}
Example #28
0
 EqualityConstraintSet(Variables vs)
 {
     for (size_t i = 0; i < vs.size(); ++i)
     {
         std::map<Variable *, double> varSet;
         varSet[vs[i]] = 0;
         variableGroups.push_back(varSet);
     }
 }
Example #29
0
void
madara::knowledge::containers::IntegerVector::set_name (
  const std::string & var_name,
  Variables & knowledge, int size)
{
  if (context_ != knowledge.get_context () || name_ != var_name)
  {
    context_ = knowledge.get_context ();

    ContextGuard context_guard (*context_);
    MADARA_GUARD_TYPE guard (mutex_);

    name_ = var_name;

    vector_.clear ();
    resize (size);
  }
}
Example #30
0
void Deconvolution<T>::forward_impl(const Variables &inputs,
                                    const Variables &outputs) {
  using namespace ::nbla::eigen;
  // Getting variable pointers
  const T *y = inputs[0]->get_data_pointer<T>(this->ctx_);
  const T *w = inputs[1]->get_data_pointer<T>(this->ctx_);
  T *col = col_.cast_data_and_get_pointer<T>(this->ctx_);
  T *x = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_);
  const T *b;
  if (inputs.size() == 3) {
    b = inputs[2]->get_data_pointer<T>(this->ctx_);
  }

  // Sample loop
  for (int n = 0; n < outer_size_; ++n) {

    // matrix multiplication
    const T *y_n = y + n * inner_size_o_;
    for (int g = 0; g < group_; ++g) {
      ConstMatrixMap<T> mw(w + g * row_w_ * col_w_, row_w_, col_w_);
      ConstMatrixMap<T> my(y_n + g * row_y_ * col_y_, row_y_, col_y_);
      MatrixMap<T> mcol(col + g * row_col_ * col_col_, row_col_, col_col_);
      mcol = mw.transpose() * my;
    }

    // col2im for w * x
    T *x_n = x + n * inner_size_i_;
    memset(x_n, 0, sizeof(*x_n) * inner_size_i_);
    if (spatial_dims_ == 2) {
      col2im<T>(col, channels_i_, spatial_shape_i_.data(), kernel_.data(),
                pad_.data(), stride_.data(), dilation_.data(), x_n);
    } else {
      col2im_nd<T>(col, channels_i_, spatial_dims_, spatial_shape_i_.data(),
                   kernel_.data(), pad_.data(), stride_.data(),
                   dilation_.data(), x_n);
    }

    // adding bias
    if (inputs.size() == 3) {
      MatrixMap<T> mx(x_n, channels_i_, inner_size_i_ / channels_i_);
      mx.colwise() += ConstColVectorMap<T>(b, channels_i_);
    }
  }
}