Beispiel #1
0
void base::freeself_kind(kind k)
{
	switch (k) {
		case BASE: freeself_base(); break;
		case INTEGER: as_integer().freeself_integer(); break;
		case VECTOR: as_vector().freeself_vector(); break;
		case NUMBER_PARTITION: as_number_partition().freeself_number_partition(); break;
		case PERMUTATION: as_permutation().freeself_permutation(); break;
		case MATRIX: as_matrix().freeself_matrix(); break;
		case LONGINTEGER: as_longinteger().freeself_longinteger(); break;
		case MEMORY: as_memory().freeself_memory(); break;
		//case PERM_GROUP: as_perm_group().freeself_perm_group(); break;
		//case PERM_GROUP_STAB_CHAIN: as_perm_group_stab_chain().freeself_perm_group_stab_chain(); break;
		case UNIPOLY: as_unipoly().freeself_unipoly(); break;
		case SOLID: as_solid().freeself_solid(); break;
		case BITMATRIX: as_bitmatrix().freeself_bitmatrix(); break;
		//case PC_PRESENTATION: as_pc_presentation().freeself_pc_presentation(); break;
		//case PC_SUBGROUP: as_pc_subgroup().freeself_pc_subgroup(); break;
		//case GROUP_WORD: as_group_word().freeself_group_word(); break;
		//case GROUP_TABLE: as_group_table().freeself_group_table(); break;
		// case ACTION: as_action().freeself_action(); break;
		case GEOMETRY: as_geometry().freeself_geometry(); break;
		case HOLLERITH: as_hollerith().freeself_hollerith(); break;
		case GROUP_SELECTION: as_group_selection().freeself_group_selection(); break;
		case BT_KEY: as_bt_key().freeself_bt_key(); break;
		case DATABASE: as_database().freeself_database(); break;
		case BTREE: as_btree().freeself_btree(); break;
		case DESIGN_PARAMETER_SOURCE: as_design_parameter_source().freeself_design_parameter_source(); break;
		case DESIGN_PARAMETER: as_design_parameter().freeself_design_parameter(); break;
		default: cout << "base::freeself_kind(), unknown kind: k= " << kind_ascii(k) << "\n";
		}
}
Beispiel #2
0
    plane( const point_type& a, const vector_type& u, const vector_type& v )
        : m_a( a )
		, m_u( u )
        , m_v( v )
		, m_n( normalize( cross_product( m_u, m_v ) ) )
		, m_d( dot_product(m_n, as_vector(m_a)) )
    {}
Beispiel #3
0
	plane( const point_type& a, const point_type& b, const point_type& c )
		: m_a( a )
		, m_u( b-a )
		, m_v( c-a )
		, m_n( normalize( cross_product( m_u, m_v ) ) )
		, m_d( dot_product( m_n, as_vector( m_a ) ) )
	{}
Beispiel #4
0
    typename result_of::swap<S, I, J>::type
    swap(const S &s, const I &i, const J &j) {
	typedef typename result_of::begin<S>::type S_;
	typedef typename result_of::distance<S_, I>::type M;
	typedef typename result_of::distance<S_, J>::type N;

	BOOST_AUTO(const &_, replace_at<M>(s, deref(j)));
	return (replace_at<N>(as_vector(_), deref(i)));
    }
Beispiel #5
0
bool Segment3D::is_point_projection_in_segment(Point3D pt_3d) {

    Vector3D pt_vector = Segment3D( start_pt(), pt_3d ).as_vector();
    double scal_prod = as_vector().scalar_prod( pt_vector );
    double segm_length = length();
    if (0 <= scal_prod and scal_prod <= segm_length*segm_length) {
        return true; }
    else {
        return false; };

};
 inline plane_orientation classify_point_to_plane(const Point& p, const Hyperplane& plane, const NumberComparisonPolicy& cmp)
 {
     using access = hyperplane_access_traits<typename std::decay<Hyperplane>::type>;
     
     // Compute signed distance of point from plane
     auto dist = scalar_projection(as_vector(p), access::get_normal_vector(plane)) - access::get_distance_to_origin(plane);
     
     // Classify p based on the signed distance
     using number_t = typename std::decay<decltype(dist)>::type;
     auto zero = constants::zero<number_t>();
     if (cmp.greater_than(dist, zero))
         return plane_orientation::in_front_of_plane;
     if (cmp.less_than(dist, zero))
         return plane_orientation::in_back_of_plane;
     
     return plane_orientation::coplanar_with_plane;
 }
Beispiel #7
0
  /*********************************************
   * Build computation graph for one sentence
   * 
   * sent: Sent instance
   *********************************************/
  Expression BuildSentGraph(const Sent& sent, const unsigned sidx,
			    ComputationGraph& cg,
			    const int latval){
    builder.new_graph(cg);
    builder.start_new_sequence();
    // define expression
    Expression i_R = input(cg, p_R->dim, as_vector(p_R->values));
    Expression i_bias = input(cg, p_bias->dim, as_vector(p_bias->values));
    Expression i_context = input(cg, p_context->dim, as_vector(p_context->values));
    Expression i_L = input(cg, p_L->dim, as_vector(p_L->values));
    Expression i_lbias = input(cg, p_lbias->dim, as_vector(p_lbias->values));
    // Initialize cvec
    Expression cvec;
    if (sidx == 0)
      cvec = i_context;
    else
      cvec = input(cg, {(unsigned)final_h.size()}, final_h);
    // compute the prob for the given latval
    Expression i_Tk = const_lookup(cg, p_T, latval);
    Expression lv_neglogprob = pickneglogsoftmax(((i_L * cvec) + i_lbias), latval);
    vector<Expression> negloglik;
    Expression i_negloglik, i_x_t, i_h_t, i_y_t;
    unsigned slen = sent.size() - 1;
    for (unsigned t = 0; t < slen; t++){
      // get word representation
      i_x_t = const_lookup(cg, p_W, sent[t]);
      vector<Expression> vecexp;
      vecexp.push_back(i_x_t);
      vecexp.push_back(cvec);
      i_x_t = concatenate(vecexp);
      // compute hidden state
      i_h_t = builder.add_input(i_Tk * i_x_t);
      // compute prediction
      i_y_t = (i_R * i_h_t) + i_bias;
      // get prediction error
      i_negloglik = pickneglogsoftmax(i_y_t, sent[t+1]);
      // push back
      negloglik.push_back(i_negloglik);
    }
    // update final_h, if latval = nlatvar - 1
    vector<float> temp_h = as_vector(i_h_t.value());
    final_hlist.push_back(temp_h);
    Expression res = (sum(negloglik) + lv_neglogprob) * (-1.0);
    return res;
  }
Beispiel #8
0
void base::read_memory(memory &m, INT debug_depth)
{
	enum kind k;
	INT i;
	char c;
	
	m.read_char(&c);
	k = (enum kind) c;
	c_kind(k);
	switch (k) {
		case BASE:
			break;
		case INTEGER:
			m.read_int(&i);
			m_i_i(i);
			break;
		case VECTOR:
			as_vector().read_mem(m, debug_depth);
			break;
		case NUMBER_PARTITION:
			as_number_partition().read_mem(m, debug_depth);
			break;
		case PERMUTATION:
			as_permutation().read_mem(m, debug_depth);
			break;
		case MATRIX:
			as_matrix().read_mem(m, debug_depth);
			break;
		case LONGINTEGER:
			// as_longinteger().read_mem(m, debug_depth);
			cout << "base::read_mem() no read_mem for LONGINTEGER" << endl;
			break;
		case MEMORY:
			as_memory().read_mem(m, debug_depth);
			break;
		case HOLLERITH:
			as_hollerith().read_mem(m, debug_depth);
			break;
		//case PERM_GROUP:
			//as_perm_group().read_mem(m, debug_depth);
			//break;
		//case PERM_GROUP_STAB_CHAIN:
			//as_perm_group_stab_chain().read_mem(m, debug_depth);
			//break;
		case UNIPOLY:
			as_unipoly().read_mem(m, debug_depth);
			break;
		case SOLID:
			as_vector().read_mem(m, debug_depth);
			break;
		case BITMATRIX:
			as_bitmatrix().read_mem(m, debug_depth);
			break;
		//case PC_PRESENTATION:
			//as_pc_presentation().read_mem(m, debug_depth);
			//break;
		//case PC_SUBGROUP:
			//as_pc_subgroup().read_mem(m, debug_depth);
			//break;
		//case GROUP_WORD:
			//as_group_word().read_mem(m, debug_depth);
			//break;
		//case GROUP_TABLE:
			//as_group_table().read_mem(m, debug_depth);
			//break;
#if 0
		case ACTION:
			as_action().read_mem(m, debug_depth);
			break;
#endif
		case GEOMETRY:
			as_geometry().read_mem(m, debug_depth);
			break;
		case GROUP_SELECTION:
			as_group_selection().read_mem(m, debug_depth);
			break;
		case DESIGN_PARAMETER:
			as_design_parameter().read_mem(m, debug_depth);
			break;
		case DESIGN_PARAMETER_SOURCE:
			as_design_parameter_source().read_mem(m, debug_depth);
			break;
		default:
			cout << "base::read_memory() no read_mem for " << kind_ascii(k) << endl;
			exit(1);
		}
}
Beispiel #9
0
Vector3D Segment3D::as_versor() {

    return as_vector().versor();
};
Beispiel #10
0
void constrain_marginals_bp (
    MatrixXf & joint,
    const VectorXf & prior_dom,
    const VectorXf & prior_cod,
    VectorXf & temp_dom,
    VectorXf & temp_cod,
    float tol,
    size_t max_steps,
    bool logging)
{
  // Enforce simultaineous constraints on a joint PMF
  //
  //   /\x. sum y. J(y,x) = p(x)
  //   /\y. sum x. J(y,x) = q(y)

  ASSERT_EQ(prior_dom.size(), joint.cols());
  ASSERT_EQ(prior_cod.size(), joint.rows());
  ASSERT_LT(0, prior_dom.minCoeff());
  ASSERT_LT(0, prior_cod.minCoeff());

  if (logging) LOG("  constraining marginals via full BP");

  const size_t X = joint.cols();
  const size_t Y = joint.rows();

  const Vector<float> p = as_vector(prior_dom);
  const Vector<float> q = as_vector(prior_cod);

  Vector<float> J = as_vector(joint);
  Vector<float> sum_y_J = as_vector(temp_dom);
  Vector<float> sum_x_J = as_vector(temp_cod);

  float stepsize = 0;
  size_t steps = 0;
  while (steps < max_steps) {
    ++steps;
    if (logging) cout << "   step " << steps << "/" << max_steps << flush;

    stepsize = 0;

    // constrain sum y. J(y,x) = 1 first,
    // in case joint is initalized with conditional

    for (size_t x = 0; x < X; ++x) {
      Vector<float> J_x = J.block(Y, x);
      sum_y_J[x] = sum(J_x);
    }
    ASSERT_LT(0, min(sum_y_J)); // XXX error here
    imax(stepsize, sqrtf(max_dist_squared(sum_y_J, p)));
    idiv_store_rhs(p, sum_y_J);
    for (size_t x = 0; x < X; ++x) {
      Vector<float> J_x = J.block(Y, x);
      J_x *= sum_y_J[x];
    }

    sum_x_J.zero();
    for (size_t x = 0; x < X; ++x) {
      Vector<float> J_x = J.block(Y, x);
      sum_x_J += J_x;
    }
    ASSERT_LT(0, min(sum_x_J));
    imax(stepsize, sqrtf(max_dist_squared(sum_x_J, q)));
    idiv_store_rhs(q, sum_x_J);
    for (size_t x = 0; x < X; ++x) {
      Vector<float> J_x = J.block(Y, x);
      J_x *= sum_x_J;
    }

    if (logging) LOG(", stepsize = " << stepsize);
    if (stepsize < tol) break;
  }
}
Beispiel #11
0
 line( const Segment& segment )
     : m_u(get_start(segment))
     , m_v( normalize(get_end(segment) - get_start(segment) ) )
     , m_n(left_normal(m_v))
     , m_d(scalar_projection(as_vector(m_u), m_n))
 {}
Beispiel #12
0
 line( const point_type& a, const point_type& b )
     : m_u(a)
     , m_v(normalize( b - a ))
     , m_n(left_normal( m_v ))
     , m_d(scalar_projection(as_vector( a ), m_n))
 {}
Beispiel #13
0
 line( const point_type& u, const Vector& v )
     : m_u(u)
     , m_v(normalize(v))
     , m_n(left_normal( m_v ))
     , m_d(scalar_projection(as_vector(u), m_n))
 {}
Beispiel #14
0
  /************************************************
   * Build CG of a given doc with a latent sequence
   *
   * doc: 
   * cg: computation graph
   * latseq: latent sequence from decoding
   * obsseq: latent sequence from observation
   * flag: what we expected to get from this function
   ************************************************/
  Expression BuildGraph(const Doc& doc, ComputationGraph& cg,
			LatentSeq latseq, LatentSeq obsseq,
			const string& flag){
    builder.new_graph(cg);
    // define expression
    Expression i_R = parameter(cg, p_R);
    Expression i_bias = parameter(cg, p_bias);
    Expression i_context = parameter(cg, p_context);
    Expression i_L = parameter(cg, p_L);
    Expression i_lbias = parameter(cg, p_lbias);
    vector<Expression> negloglik, neglogprob;
    // -----------------------------------------
    // check hidden variable list
    assert(latseq.size() <= doc.size());
    // -----------------------------------------
    // iterate over latent sequences
    // get LV-related transformation matrix
    Expression i_h_t;
    for (unsigned k = 0; k < doc.size(); k++){
      // using latent size as constraint
      builder.start_new_sequence();
      // for each sentence in this doc
      Expression cvec;
      auto& sent = doc[k];
      // start a new sequence for each sentence
      if (k == 0){
	cvec = i_context;
      } else {
	cvec = input(cg, {(unsigned)final_h.size()}, final_h);
      }
      // latent variable distribution
      int latval = 0;
      if (obsseq[k] >=0){
      	latval = obsseq[k];
	Expression k_neglogprob = pickneglogsoftmax((i_L * cvec) + i_lbias, latval);
	neglogprob.push_back(k_neglogprob);
      } else {
      	latval = latseq[k];
      }
      // build RNN for the current sentence
      Expression i_x_t, i_h_t, i_y_t, i_negloglik;
      Expression i_Tk = lookup(cg, p_T, latval);
      unsigned slen = sent.size() - 1;
      for (unsigned t = 0; t < slen; t++){
	// get word representation
	i_x_t = lookup(cg, p_W, sent[t]);
	vector<Expression> vecexp;
	vecexp.push_back(i_x_t);
	vecexp.push_back(cvec);
	i_x_t = concatenate(vecexp);
	// compute hidden state
	i_h_t = builder.add_input(i_Tk * i_x_t);
	// compute prediction
	i_y_t = (i_R * i_h_t) + i_bias;
	// get prediction error
	i_negloglik = pickneglogsoftmax(i_y_t, sent[t+1]);
	// add back
	negloglik.push_back(i_negloglik);
      }
      final_h.clear();
      final_h = as_vector(i_h_t.value());
    }
    // get result
    Expression res;
    if ((flag != "INFER") && (flag != "OBJ")){
      cerr << "Unrecognized flag: " << flag << endl;
      abort();
    } else if ((neglogprob.size() > 0) && (flag == "OBJ")){
      res = sum(negloglik) + sum(neglogprob);
    } else {
      res = sum(negloglik);
    }
    return res;
  }
Beispiel #15
0
void base::write_memory(memory &m, INT debug_depth)
{
	enum kind k;
	INT i;
	char c;
	
	k = s_kind();
	i = (INT) k;
	c = (char) k;
	if (!ONE_BYTE_INT(i)) {
		cout << "write_memory(): kind not 1 byte" << endl;
		exit(1);
		}
	m.write_char(c);
	if (debug_depth > 0) {
		cout << "base::write_memory() object of kind = " << kind_ascii(k) << endl;
		}
	switch (k) {
		case BASE:
			break;
		case INTEGER:
			m.write_int(s_i_i());
			break;
		case VECTOR:
			as_vector().write_mem(m, debug_depth);
			break;
		case NUMBER_PARTITION:
			as_number_partition().write_mem(m, debug_depth);
			break;
		case PERMUTATION:
			as_permutation().write_mem(m, debug_depth);
			break;
		case MATRIX:
			as_matrix().write_mem(m, debug_depth);
			break;
		case LONGINTEGER:
			// as_longinteger().write_mem(m, debug_depth);
			cout << "base::write_mem() no write_mem for LONGINTEGER" << endl;
			break;
		case MEMORY:
			as_memory().write_mem(m, debug_depth);
			break;
		case HOLLERITH:
			as_hollerith().write_mem(m, debug_depth);
			break;
		//case PERM_GROUP:
			//as_perm_group().write_mem(m, debug_depth);
			//break;
		//case PERM_GROUP_STAB_CHAIN:
			//as_perm_group_stab_chain().write_mem(m, debug_depth);
			//break;
		case UNIPOLY:
			as_unipoly().write_mem(m, debug_depth);
			break;
		case SOLID:
			as_solid().write_mem(m, debug_depth);
			break;
		case BITMATRIX:
			as_bitmatrix().write_mem(m, debug_depth);
			break;
		//case PC_PRESENTATION:
			//as_pc_presentation().write_mem(m, debug_depth);
			//break;
		//case PC_SUBGROUP:
			//as_pc_subgroup().write_mem(m, debug_depth);
			//break;
		//case GROUP_WORD:
			//as_group_word().write_mem(m, debug_depth);
			//break;
		//case GROUP_TABLE:
			//as_group_table().write_mem(m, debug_depth);
			//break;
#if 0
		case ACTION:
			as_action().write_mem(m, debug_depth);
			break;
#endif
		case GEOMETRY:
			as_geometry().write_mem(m, debug_depth);
			break;
		case GROUP_SELECTION:
			as_group_selection().write_mem(m, debug_depth);
			break;
		case DESIGN_PARAMETER:
			as_design_parameter().write_mem(m, debug_depth);
			break;
		case DESIGN_PARAMETER_SOURCE:
			as_design_parameter_source().write_mem(m, debug_depth);
			break;
		default:
			cout << "base::write_memory() no write_mem for " << kind_ascii(k) << endl;
			exit(1);
		}
}
Beispiel #16
0
  /************************************************
   * Build CG of a given doc with a latent sequence
   *
   * doc: 
   * cg: computation graph
   * latseq: latent sequence from decoding
   * obsseq: latent sequence from observation
   * flag: what we expected to get from this function
   *       "PROB": compute the probability of the last sentence 
   *               given the latent value
   *       "ERROR": compute the prediction error of entire doc
   *       "INFER": compute prediction error on words with 
   *                inferred latent variables
   ************************************************/
  Expression BuildRelaGraph(const Doc& doc, ComputationGraph& cg,
			    LatentSeq latseq, LatentSeq obsseq){
    builder.new_graph(cg);
    // define expression
    Expression i_R = parameter(cg, p_R);
    Expression i_bias = parameter(cg, p_bias);
    Expression i_context = parameter(cg, p_context);
    Expression i_L = parameter(cg, p_L);
    Expression i_lbias = parameter(cg, p_lbias);
    vector<Expression> negloglik, neglogprob;
    // -----------------------------------------
    // check hidden variable list
    assert(latseq.size() <= doc.size());
    // -----------------------------------------
    // iterate over latent sequences
    // get LV-related transformation matrix
    Expression i_h_t;
    vector<Expression> obj;
    for (unsigned k = 0; k < doc.size(); k++){
      auto& sent = doc[k];
      // start a new sequence for each sentence
      Expression cvec;
      if (k == 0){
	cvec = i_context;
      } else {
	cvec = input(cg, {(unsigned)final_h.size()}, final_h);
      }
      // two parts of the objective function
      Expression sent_objpart1;
      vector<Expression> sent_objpart2;
      for (int latval = 0; latval < nlatvar; latval ++){
	builder.start_new_sequence();
	// latent variable distribution
	vector<Expression> l_negloglik;
	Expression l_neglogprob = pickneglogsoftmax((i_L * cvec) + i_lbias, latval); 
	// build RNN for the current sentence
	Expression i_x_t, i_h_t, i_y_t, i_negloglik;
	Expression i_Tk = lookup(cg, p_T, latval);
	// for each word
	unsigned slen = sent.size() - 1;
	for (unsigned t = 0; t < slen; t++){
	  // get word representation
	  i_x_t = const_lookup(cg, p_W, sent[t]);
	  vector<Expression> vecexp;
	  vecexp.push_back(i_x_t);
	  vecexp.push_back(cvec);
	  i_x_t = concatenate(vecexp);
	  // compute hidden state
	  i_h_t = builder.add_input(i_Tk * i_x_t);
	  // compute prediction
	  i_y_t = (i_R * i_h_t) + i_bias;
	  // get prediction error
	  i_negloglik = pickneglogsoftmax(i_y_t, sent[t+1]);
	  // add back
	  l_negloglik.push_back(i_negloglik);
	}
	// update context vector
	if (latval == (nlatvar - 1)){
	  final_h.clear();
	  final_h = as_vector(i_h_t.value());
	}
	// - log P(Y, Z) given Y and a specific Z value
	Expression pxz = sum(l_negloglik) + l_neglogprob;
	sent_objpart2.push_back(pxz * (-1.0));
	if (obsseq[k] == latval){
	  sent_objpart1 = pxz * (-1.0);
	}
      }
      // if the latent variable is observed
      if (obsseq[k] >= 0){
	Expression sent_obj = logsumexp(sent_objpart2) - sent_objpart1;
	obj.push_back(sent_obj);
	// cout << as_scalar(sent_obj.value()) << endl;
      }
    }
    // get the objectve for entire doc
    if (obj.size() > 0){
      // if at least one observed latent value
      return sum(obj);
    } else {
      // otherwise
      Expression zero = input(cg, 0.0);
      return zero;
    }
  }
Beispiel #17
0
	line( const Segment& segment )
		: m_u(get_start(segment))
		, m_v( normalize(get_end(segment) - get_start(segment) ) )
		, m_n( left_normal(m_v))
		, m_d( dot_product(m_n, as_vector(m_u)))
	{}
Beispiel #18
0
	line( const point_type& a, const point_type& b )
		: m_u( a )
		, m_v( normalize( b - a ) )
		, m_n( left_normal( m_v ) )
		, m_d( dot_product( m_n, as_vector( a ) ) )
	{}
Beispiel #19
0
    line( const point_type& u, const vector_type& v )
        : m_u( u )
        , m_v( normalize(v) )
		, m_n(left_normal( m_v ))
		, m_d(dot_product(m_n, as_vector(u)))
    {}
Beispiel #20
0
INT base::calc_size_on_file()
{
	enum kind k;
	INT i, size;
	char c;
	
	k = s_kind();
	i = (INT) k;
	c = (char) k;
	if (!ONE_BYTE_INT(i)) {
		cout << "write_memory(): kind not 1 byte" << endl;
		exit(1);
		}
	size = 1;
	switch (k) {
		case BASE:
			break;
		case INTEGER:
			size += 4;
			break;
		case VECTOR:
			size += as_vector().csf();
			break;
		case NUMBER_PARTITION:
			size += as_number_partition().csf();
			break;
		case PERMUTATION:
			size += as_permutation().csf();
			break;
		case MATRIX:
			size += as_matrix().csf();
			break;
		case LONGINTEGER:
			// size += as_longinteger().csf();
			cout << "base::write_mem() no csf for LONGINTEGER" << endl;
			break;
		case MEMORY:
			size += as_memory().csf();
			break;
		case HOLLERITH:
			size += as_hollerith().csf();
			break;
		//case PERM_GROUP:
			//size += as_perm_group().csf();
			//break;
		//case PERM_GROUP_STAB_CHAIN:
			//size += as_perm_group_stab_chain().csf();
			//break;
		case UNIPOLY:
			size += as_unipoly().csf();
			break;
		case SOLID:
			size += as_vector().csf();
			break;
		case BITMATRIX:
			size += as_bitmatrix().csf();
			break;
		//case PC_PRESENTATION:
			//size += as_pc_presentation().csf();
			//break;
		//case PC_SUBGROUP:
			//size += as_pc_subgroup().csf();
			//break;
		//case GROUP_WORD:
			//size += as_group_word().csf();
			//break;
		//case GROUP_TABLE:
			//size += as_group_table().csf();
			//break;
#if 0
		case ACTION:
			size += as_action().csf();
			break;
#endif
		case GEOMETRY:
			size += as_geometry().csf();
			break;
		case GROUP_SELECTION:
			size += as_group_selection().csf();
			break;
		case DESIGN_PARAMETER:
			size += as_design_parameter().csf();
			break;
		case DESIGN_PARAMETER_SOURCE:
			size += as_design_parameter_source().csf();
			break;
		default:
			cout << "base::calc_size_on_file() no csf() for " << kind_ascii(k) << endl;
			exit(1);
		}
	return size;
}