Example #1
0
Node *Model::node_map_create(Scene &scene, const aiNode &node, Node *parent, int level, unsigned int options)
{
  glm::mat4 transform_local;
  std::string key(node.mName.data);
  Node *node_internal = scene.node_create(key, parent);

  {
    aiVector3t<float> scaling;
    aiQuaterniont<float> rotation;
    aiVector3t<float> position;

    node.mTransformation.Decompose(scaling, rotation, position);
    glm::vec3 scale_vec(scaling.x, scaling.y, scaling.z);
    glm::vec3 position_vec(position.x, position.y, position.z);
    glm::quat rotation_quat(rotation.x, rotation.y, rotation.z, rotation.w);
    ai_mat_copy(&node.mTransformation, transform_local);


   // node_internal->transform_local_original_set(transform_local);
   // node_internal->transform_local_current_set(scene, transform_local);

    node_internal->import_options = options;

    {
     // glm::vec3 v(scaling.x, scaling.z, scaling.y);
      glm::vec3 v(scaling.x, scaling.y, scaling.z);
      node_internal->transform_scale_set(scale_vec);
    }

    {
    //  glm::quat q(rotation_quat.w, rotation_quat.x, rotation_quat.y, rotation_quat.z);
     glm::quat q(rotation_quat.x, rotation_quat.y, rotation_quat.z, rotation_quat.w);
     node_internal->transform_rotate_set(q);
    }

    {
     // glm::vec3 v(position.x, position.z, position.y);
      glm::vec3 v(position.x, position.y, position.z);
      node_internal->transform_translate_set(v);
      POLL_DEBUG(std::cout, "model transform " << node_internal->name_get() << " = " << glm::to_string(v));
    }

  }

  nodes[key] = node_internal;

  for (size_t i = 0; i < node.mNumChildren; i++) {
    node_map_create(scene, *node.mChildren[i], node_internal, level + 1, options);
  }

  return node_internal;
}
void WeightMatrix_iter::scale_features(std::vector< std::vector<double> >& allfeatures,
				    std::vector< std::vector<double> >& pfeatures)
{
    copy(allfeatures, pfeatures);


    std::vector<double> deltas;
    EstimateBandwidth(pfeatures, deltas);
    for(size_t i=0; i < pfeatures.size(); i++){
	std::vector<double>& tmpvec = pfeatures[i];
	scale_vec(tmpvec, deltas);
    }
}
Example #3
0
	void CopmVisual::setScale(float scale)
	{
		Ogre::Vector3 scale_vec(scale, scale, scale);
		copm_marker_->setScale(scale_vec);
	}
Example #4
0
void scale_vec4(vec4 a, ft factor)
{
   scale_vec(a,factor,4);
}
Example #5
0
void scale_vec3(vec3 a, ft factor)
{
   scale_vec(a,factor,3);
}
void WeightMatrix_iter::weight_matrix_parallel(std::vector< std::vector<double> >& allfeatures,
			   bool exhaustive)
{

  
    printf("running parallel version\n");
    std::vector< std::vector<double> > pfeatures;
    copy(allfeatures, pfeatures);

    _nrows = pfeatures.size();
    _ncols = pfeatures[0].size();

    EstimateBandwidth(pfeatures, _deltas);
    
    _wtmat.resize(_nrows);
    _degree.resize(_nrows, 0.0);
    
    size_t NCORES = 8;
   
    std::map<double, unsigned int> sorted_features; // all features are unique, check learn or iterative learn algo
    std::vector< std::map<double, unsigned int> > sorted_features_p(NCORES); // all features are unique, check learn or iterative learn algo
    int part = 0;  
    for(size_t i=0; i < pfeatures.size(); i++){
	std::vector<double>& tmpvec = pfeatures[i];
	scale_vec(tmpvec, _deltas);
	double nrm = vec_norm(tmpvec);
	sorted_features.insert(std::make_pair(nrm, i));
	
	(sorted_features_p[part]).insert(std::make_pair(nrm, i));
	part = (part+1) % NCORES;
    }
    
    std::vector< boost::thread* > threads;
    std::vector< std::vector< std::vector<RowVal> > > tmpmat(sorted_features_p.size());
    std::vector< std::vector<double> > degrees(sorted_features_p.size());
    for(size_t pp=0; pp < sorted_features_p.size(); pp++){
      tmpmat[pp].resize(_nrows);
      degrees[pp].resize(_nrows, 0.0);
      //compute_weight_partial(sorted_features_p[i], sorted_features, pfeatures);
      threads.push_back(new boost::thread(&WeightMatrix_iter::compute_weight_partial, this, sorted_features_p[pp], sorted_features, pfeatures, boost::ref(tmpmat[pp]), boost::ref(degrees[pp])));
    }
    
    printf("Sync all threads \n");
    for (size_t ti=0; ti<threads.size(); ti++) 
      (threads[ti])->join();
    printf("all threads done\n");
    
    for(size_t pp=0; pp < tmpmat.size(); pp++){
	for(size_t i=0; i < tmpmat[pp].size(); i++){
	    if(tmpmat[pp][i].size()>0){
	       _wtmat[i].insert(_wtmat[i].end(),tmpmat[pp][i].begin(), tmpmat[pp][i].end());
	       (_degree[i]) += degrees[pp][i];
	    }
	}
    }    
    
    if(_nrows != _wtmat.size()){
	printf(" number of rows and wtmat size mismatch\n");
    }
    _nzd_indicator.resize(_nrows, 0);
    _trn_indicator.resize(_nrows, 0);
    for (size_t i=0; i < _nrows; i++){
	
	if (_degree[i] > 0){
	    _nzd_indicator[i] = 1;
	    _nzd_map.insert(std::make_pair(i, _nzd_count) );
	    _nzd_invmap.insert(std::make_pair(_nzd_count, i) );
	    _nzd_count++;
	}
	  
    }
    
    compute_Wnorm();
//     //** _nzd_count = non-zero degree count == number of columns 
//     //** _nnz = number of nonzeros
//     
//     _nnz = _Wnorm_i.size();
//     cholmod_solver.initialize(_nzd_count, _nnz, _Wnorm_i.data(), _Wnorm_j.data(), _Wnorm_v.data());
    
    /* C debug*
    FILE* fp=fopen("semi-supervised/tmp_wtmatrix_parallel.txt", "wt");
    for(size_t rr = 0; rr < _wtmat.size(); rr++){
	double luu = 0;
	double w_l = 0;
	for(size_t cc=0; cc < _wtmat[rr].size(); cc++){
	    size_t cidx = _wtmat[rr][cc].j;
	    double val = _wtmat[rr][cc].v;
	    //val = exp(-val/(0.5*_thd*_thd));
	    fprintf(fp, "%u %u %lf\n", rr, cidx, val);
	}
    }
    fclose(fp);
    //*C*/
    
    
}
void WeightMatrix_iter::weight_matrix(std::vector< std::vector<double> >& allfeatures,
			   bool exhaustive)
{

  
    std::vector< std::vector<double> > pfeatures;
    copy(allfeatures, pfeatures);

    _nrows = pfeatures.size();
    _ncols = pfeatures[0].size();

    EstimateBandwidth(pfeatures, _deltas);
    
    _wtmat.resize(_nrows);
    _degree.resize(_nrows, 0.0);
    
    unsigned long nnz=0;
   
    std::map<double, unsigned int> sorted_features;
    for(size_t i=0; i < pfeatures.size(); i++){
	std::vector<double>& tmpvec = pfeatures[i];
	scale_vec(tmpvec, _deltas);
	double nrm = vec_norm(tmpvec);
	sorted_features.insert(std::make_pair(nrm, i));
    }
    std::map<double, unsigned int>::iterator sit_i;    
    std::map<double, unsigned int>::iterator sit_j;    
    
    
    for(sit_i = sorted_features.begin() ; sit_i != sorted_features.end(); sit_i++){
	
	sit_j = sit_i;
	sit_j++;
	size_t i = sit_i->second;
	unsigned int nchecks = 0;
	for(; sit_j != sorted_features.end(); sit_j++){
	    size_t j = sit_j->second;
	    double dot_ij = vec_dot(pfeatures[i], pfeatures[j]);  
	    double dist = sit_i->first + sit_j->first - 2*dot_ij;
	    double min_dist = sit_i->first + sit_j->first - 2*sqrt(sit_i->first)*sqrt(sit_j->first);
	    
	    nchecks++;
	    
	    if (dist < (_thd*_thd)){
		double val =  exp(-dist/(0.5*_thd*_thd));
		
		_wtmat[i].push_back(RowVal(j, val));
		(_degree[i]) += val;
		_wtmat[j].push_back(RowVal(i, val));
		(_degree[j]) += val;
		
		nnz += 2;
	    }
	    if (min_dist>(_thd*_thd)){
	      break;
	    }
	}
    }
    printf("total nonzeros: %lu\n",nnz);

    /* C debug*
    FILE* fp=fopen("semi-supervised/tmp_wtmatrix.txt", "wt");
    for(size_t rr = 0; rr < _wtmat.size(); rr++){
	double luu = 0;
	double w_l = 0;
	for(size_t cc=0; cc < _wtmat[rr].size(); cc++){
	    size_t cidx = _wtmat[rr][cc].j;
	    double val = _wtmat[rr][cc].v;
	    //val = exp(-val/(0.5*_thd*_thd));
	    fprintf(fp, "%u %u %lf\n", rr, cidx, val);
	}
    }
    fclose(fp);
    //*C*/
    
    
}