void harp::spec_desisim::values ( vector_double & data ) const { data.resize ( nglobal_ ); data.clear(); fitsfile * fp; fits::open_read ( fp, path_ ); // read the object flux fits::img_seek ( fp, objhdu_ ); fits::img_read ( fp, data, false ); // read the sky flux and sum vector_double skyflux ( data.size() ); fits::img_seek ( fp, skyhdu_ ); fits::img_read ( fp, skyflux, false ); fits::close ( fp ); for ( size_t i = 0; i < data.size(); ++i ) { data[i] += skyflux[i]; } return; }
// The error function F(x): void myFunction( const vector_double &x, const vector_double &y, vector_double &out_f) { out_f.resize(1); // 1-cos(x+1) *cos(x*y+1) out_f[0] = 1 - cos(x[0]+1) * cos(x[0]*x[1]+1); }
/*--------------------------------------------------------------- getAsVector ---------------------------------------------------------------*/ void CPose2D::getAsVector(vector_double &v) const { v.resize(3); v[0]=m_coords[0]; v[1]=m_coords[1]; v[2]=m_phi; }
void harp::spec_sim::inv_variance ( vector_double & data ) const { data.resize ( size_ ); data.clear(); return; }
void ffff(const vector_double &x,const CQuaternionDouble &Q, vector_double &OUT) { OUT.resize(3); CQuaternionDouble q(x[0],x[1],x[2],x[3]); q.normalize(); q.rpy(OUT[2],OUT[1],OUT[0]); }
/*--------------------------------------------------------------- getHistogramNormalized ---------------------------------------------------------------*/ void CHistogram::getHistogramNormalized( vector_double &x, vector_double &hits ) const { const size_t N = m_bins.size(); linspace(m_min,m_max,N, x); hits.resize(N); const double K=m_binSizeInv/m_count; for (size_t i=0;i<N;i++) hits[i]=K*m_bins[i]; }
void harp::spec_desisim::lambda ( vector_double & lambda_vals ) const { lambda_vals.resize ( nlambda_ ); for ( size_t i = 0; i < nlambda_; ++i ) { lambda_vals[i] = crval + cdelt * (double)i; } return; }
/** Returns a 1x7 vector with [x y z qr qx qy qz] */ void CPose3DQuat::getAsVector(vector_double &v) const { v.resize(7); v[0] = m_coords[0]; v[1] = m_coords[1]; v[2] = m_coords[2]; v[3] = m_quat[0]; v[4] = m_quat[1]; v[5] = m_quat[2]; v[6] = m_quat[3]; }
void harp::spec_sim::lambda ( vector_double & lambda_vals ) const { lambda_vals.resize ( nlambda_ ); double incr = (last_lambda_ - first_lambda_) / (double)( nlambda_ - 1 ); for ( size_t j = 0; j < nlambda_; ++j ) { lambda_vals[j] = first_lambda_ + incr * (double)j; } return; }
void harp::eigen_decompose ( matrix_double const & invcov, vector_double & D, matrix_double & W, bool regularize ) { D.resize ( invcov.size1() ); W.resize ( invcov.size1(), invcov.size2() ); matrix_double temp ( invcov ); int nfound; boost::numeric::ublas::vector < int > support ( 2 * invcov.size1() ); boost::numeric::bindings::lapack::syevr ( 'V', 'A', boost::numeric::bindings::lower ( temp ), 0.0, 0.0, 0, 0, 0.0, nfound, D, W, support ); if ( regularize ) { double min = 1.0e100; double max = -1.0e100; for ( size_t i = 0; i < D.size(); ++i ) { if ( D[i] < min ) { min = D[i]; } if ( D[i] > max ) { max = D[i]; } } double rcond = min / max; // pick some delta that is bigger than machine precision, but still tiny double epsilon = 10.0 * std::numeric_limits < double > :: epsilon(); if ( rcond < epsilon ) { double reg = max * epsilon - min; //cerr << "REG offset = " << reg << " for min / max = " << min << " / " << max << endl; for ( size_t i = 0; i < D.size(); ++i ) { D[i] += reg; } } } return; }
void harp::column_norm ( matrix_double const & mat, vector_double & S ) { S.resize( mat.size1() ); S.clear(); for ( size_t i = 0; i < mat.size2(); ++i ) { for ( size_t j = 0; j < mat.size1(); ++j ) { S[ j ] += mat( j, i ); } } // Invert for ( size_t i = 0; i < S.size(); ++i ) { S[i] = 1.0 / S[i]; } return; }
void harp::spec_desisim::sky ( vector_double & data ) const { data.resize ( nglobal_ ); data.clear(); fitsfile * fp; fits::open_read ( fp, path_ ); // read the sky flux fits::img_seek ( fp, skyhdu_ ); fits::img_read ( fp, data, false ); fits::close ( fp ); return; }
void harp::spec_sim::values ( vector_double & data ) const { double PI = std::atan2 ( 0.0, -1.0 ); data.resize ( size_ ); size_t bin = 0; size_t halfspace = (size_t)( atmspace_ / 2 ); for ( size_t i = 0; i < nspec_; ++i ) { bool dosky = ( i % skymod_ == 0 ) ? true : false; size_t objoff = 2 * i; for ( size_t j = 0; j < nlambda_; ++j ) { double val = 0.0; val += background_ * sin ( 3.0 * (double)j / ( (double)atmspace_ * 2.0 * PI ) ) * sin ( 7.0 * (double)j / ( (double)atmspace_ * 2.0 * PI ) ) * sin ( 11.0 * (double)j / ( (double)atmspace_ * 2.0 * PI ) ); val += 2.0 * background_; if ( ( ( j + halfspace ) % atmspace_ ) == 0 ) { val += atmpeak_; } if ( ! dosky ) { if ( ( ( j + objoff ) % objspace_ ) == 0 ) { val += objpeak_; } } data[ bin ] = val; ++bin; } } return; }
void harp::sparse_mv_trans ( matrix_double_sparse const & AT, vector_double const & in, vector_double & out ) { // FIXME: for now, we just use the (unthreaded) boost sparse matrix-vector product. If this // operation dominates the cost in any way, we can add a threaded implementation here. size_t nrows = AT.size1(); size_t ncols = AT.size2(); if ( in.size() != nrows ) { std::ostringstream o; o << "length of input vector (" << in.size() << ") does not match number of rows in transposed matrix (" << nrows << ")"; HARP_THROW( o.str().c_str() ); } out.resize ( ncols ); boost::numeric::ublas::axpy_prod ( in, AT, out, true ); return; }
void harp::spec_sim::sky_truth ( vector_double & data ) const { double PI = std::atan2 ( 0.0, -1.0 ); size_t halfspace = (size_t)( atmspace_ / 2 ); size_t nreduced = 0; for ( size_t i = 0; i < nspec_; ++i ) { if ( i % skymod_ != 0 ) { ++nreduced; } } ++nreduced; size_t nbins = nreduced * nlambda_; data.resize ( nbins ); size_t bin = 0; for ( size_t i = 0; i < nspec_; ++i ) { if ( i % skymod_ != 0 ) { size_t objoff = 2 * i; for ( size_t j = 0; j < nlambda_; ++j ) { double val = 0.0; if ( ( ( j + objoff ) % objspace_ ) == 0 ) { val += objpeak_; } data[ bin ] = val; ++bin; } } } for ( size_t j = 0; j < nlambda_; ++j ) { double val = 0.0; val += background_ * sin ( 3.0 * (double)j / ( (double)atmspace_ * 2.0 * PI ) ) * sin ( 7.0 * (double)j / ( (double)atmspace_ * 2.0 * PI ) ) * sin ( 11.0 * (double)j / ( (double)atmspace_ * 2.0 * PI ) ); val += 2.0 * background_; if ( ( ( j + halfspace ) % atmspace_ ) == 0 ) { val += atmpeak_; } data[ bin ] = val; ++bin; } return; }
/*--------------------------------------------------------------- Evaluate each gap ---------------------------------------------------------------*/ void CHolonomicND::evaluateGaps( const vector_double &obstacles, const double maxObsRange, const TGapArray &gaps, const int TargetSector, const double TargetDist, vector_double &out_gaps_evaluation ) { out_gaps_evaluation.resize( gaps.size()); double targetAng = M_PI*(-1 + 2*(0.5+TargetSector)/double(obstacles.size())); double target_x = TargetDist*cos(targetAng); double target_y = TargetDist*sin(targetAng); for (unsigned int i=0;i<gaps.size();i++) { // Para referenciarlo mas facilmente: const TGap *gap = &gaps[i]; double d; d = min( obstacles[ gap->representative_sector ], min( maxObsRange, 0.95f*TargetDist) ); // Las coordenadas (en el TP-Space) representativas del gap: double phi = M_PI*(-1 + 2*(0.5+gap->representative_sector)/double(obstacles.size())); double x = d*cos(phi); double y = d*sin(phi); // Factor 1: Distancia hasta donde llego por esta GPT: // ----------------------------------------------------- double factor1; /* if (gap->representative_sector == TargetSector ) factor1 = min(TargetDist,obstacles[gap->representative_sector]) / TargetDist; else { if (TargetDist>1) factor1 = obstacles[gap->representative_sector] / TargetDist; else factor1 = obstacles[gap->representative_sector]; } */ // Calcular la distancia media a donde llego por este gap: double meanDist = 0; for (int j=gap->ini;j<=gap->end;j++) meanDist+= obstacles[j]; meanDist/= ( gap->end - gap->ini + 1); if (abs(gap->representative_sector-TargetSector)<=1 && TargetDist<1) factor1 = min(TargetDist,meanDist) / TargetDist; else factor1 = meanDist; // Factor 2: Distancia en sectores: // ------------------------------------------- double dif = fabs(((double)( TargetSector - gap->representative_sector ))); // if (dif> (0.5f*obstacles.size()) ) dif = obstacles.size() - dif; // Solo si NO estan el target y el gap atravesando el alfa = "-pi" o "pi" if (dif> (0.5f*obstacles.size()) && (TargetSector-0.5f*obstacles.size())*(gap->representative_sector-0.5f*obstacles.size())<0 ) dif = obstacles.size() - dif; double factor2= exp(-square( dif / (obstacles.size()/4))) ; // Factor3: Para evitar cabeceos entre 2 o mas caminos que sean casi iguales: // ------------------------------------------- double dist = (double)(abs(last_selected_sector - gap->representative_sector)); // if (dist> (0.5f*obstacles.size()) ) dist = obstacles.size() - dist; double factor_AntiCab; if (last_selected_sector==-1) factor_AntiCab = 0; else factor_AntiCab = (dist > 0.10f*obstacles.size()) ? 0.0f:1.0f; // Factor3: Minima distancia entre el segmento y el target: // Se valora negativamente el alejarse del target // ----------------------------------------------------- double closestX,closestY; double dist_eucl = math::minimumDistanceFromPointToSegment( target_x, target_y, 0,0, x,y, closestX,closestY); double factor3= ( maxObsRange - min(maxObsRange ,dist_eucl) ) / maxObsRange; ASSERT_(factorWeights.size()==4); if ( obstacles[gap->representative_sector] < TOO_CLOSE_OBSTACLE ) // Too close to obstacles out_gaps_evaluation[i] = 0; else out_gaps_evaluation[i] = ( factorWeights[0] * factor1 + factorWeights[1] * factor2 + factorWeights[2] * factor3 + factorWeights[3] * factor_AntiCab ) / (math::sum(factorWeights)) ; } // for each gap }
// extracted from Particles3Dcomm.cpp // void Collective::read_particles_restart( const VCtopology3D* vct, int species_number, vector_double& u, vector_double& v, vector_double& w, vector_double& q, vector_double& x, vector_double& y, vector_double& z, vector_double& t)const { #ifdef NO_HDF5 eprintf("Require HDF5 to read from restart file."); #else if (vct->getCartesian_rank() == 0 && species_number == 0) { printf("LOADING PARTICLES FROM RESTART FILE in %s/restart.hdf\n", getRestartDirName().c_str()); } stringstream ss; ss << vct->getCartesian_rank(); string name_file = getRestartDirName() + "/restart" + ss.str() + ".hdf"; // hdf stuff hid_t file_id, dataspace; hid_t datatype, dataset_id; herr_t status; size_t size; hsize_t dims_out[1]; /* dataset dimensions */ int status_n; // open the hdf file file_id = H5Fopen(name_file.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); if (file_id < 0) { eprintf("couldn't open file: %s\n" "\tRESTART NOT POSSIBLE", name_file.c_str()); //cout << "couldn't open file: " << name_file << endl; //cout << "RESTART NOT POSSIBLE" << endl; } //find the last cycle int lastcycle=0; dataset_id = H5Dopen2(file_id, "/last_cycle", H5P_DEFAULT); // HDF 1.8.8 status = H5Dread(dataset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &lastcycle); status = H5Dclose(dataset_id); stringstream species_name; species_name << species_number; ss.str("");ss << "/particles/species_" << species_number << "/x/cycle_" << lastcycle; dataset_id = H5Dopen2(file_id, ss.str().c_str(), H5P_DEFAULT); // HDF 1.8.8 datatype = H5Dget_type(dataset_id); size = H5Tget_size(datatype); dataspace = H5Dget_space(dataset_id); /* dataspace handle */ status_n = H5Sget_simple_extent_dims(dataspace, dims_out, NULL); // get how many particles there are on this processor for this species status_n = H5Sget_simple_extent_dims(dataspace, dims_out, NULL); const int nop = dims_out[0]; // number of particles in this process //Particles3Dcomm::resize_SoA(nop); { // // allocate space for particles including padding // const int padded_nop = roundup_to_multiple(nop,DVECWIDTH); u.reserve(padded_nop); v.reserve(padded_nop); w.reserve(padded_nop); q.reserve(padded_nop); x.reserve(padded_nop); y.reserve(padded_nop); z.reserve(padded_nop); t.reserve(padded_nop); // // define size of particle data // u.resize(nop); v.resize(nop); w.resize(nop); q.resize(nop); x.resize(nop); y.resize(nop); z.resize(nop); t.resize(nop); } // get x status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &x[0]); // close the data set status = H5Dclose(dataset_id); // get y ss.str("");ss << "/particles/species_" << species_number << "/y/cycle_" << lastcycle; dataset_id = H5Dopen2(file_id, ss.str().c_str(), H5P_DEFAULT); // HDF 1.8.8 status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &y[0]); status = H5Dclose(dataset_id); // get z ss.str("");ss << "/particles/species_" << species_number << "/z/cycle_" << lastcycle; dataset_id = H5Dopen2(file_id, ss.str().c_str(), H5P_DEFAULT); // HDF 1.8.8 status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &z[0]); status = H5Dclose(dataset_id); // get u ss.str("");ss << "/particles/species_" << species_number << "/u/cycle_" << lastcycle; dataset_id = H5Dopen2(file_id, ss.str().c_str(), H5P_DEFAULT); // HDF 1.8.8 status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &u[0]); status = H5Dclose(dataset_id); // get v ss.str("");ss << "/particles/species_" << species_number << "/v/cycle_" << lastcycle; dataset_id = H5Dopen2(file_id, ss.str().c_str(), H5P_DEFAULT); // HDF 1.8.8 status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &v[0]); status = H5Dclose(dataset_id); // get w ss.str("");ss << "/particles/species_" << species_number << "/w/cycle_" << lastcycle; dataset_id = H5Dopen2(file_id, ss.str().c_str(), H5P_DEFAULT); // HDF 1.8.8 status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &w[0]); status = H5Dclose(dataset_id); // get q ss.str("");ss << "/particles/species_" << species_number << "/q/cycle_" << lastcycle; dataset_id = H5Dopen2(file_id, ss.str().c_str(), H5P_DEFAULT); // HDF 1.8.8 status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &q[0]); //if ID is not saved, read in q as ID status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &t[0]); status = H5Dclose(dataset_id); /* get ID ss.str("");ss << "/particles/species_" << species_number << "/ID/cycle_" << lastcycle; dataset_id = H5Dopen2(file_id, ss.str().c_str(), H5P_DEFAULT); // HDF 1.8.8 status = H5Dread(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &t[0]); status = H5Dclose(dataset_id); */ status = H5Fclose(file_id); #endif }
/*--------------------------------------------------------------- Evaluate each gap ---------------------------------------------------------------*/ void CHolonomicND::evaluateGaps( const vector_double &obstacles, const double maxObsRange, const TGapArray &gaps, const unsigned int target_sector, const double target_dist, vector_double &out_gaps_evaluation ) { out_gaps_evaluation.resize( gaps.size()); double targetAng = M_PI*(-1 + 2*(0.5+target_sector)/double(obstacles.size())); double target_x = target_dist*cos(targetAng); double target_y = target_dist*sin(targetAng); for (unsigned int i=0;i<gaps.size();i++) { // Short cut: const TGap *gap = &gaps[i]; const double d = min3( obstacles[ gap->representative_sector ], maxObsRange, 0.95*target_dist ); // The TP-Space representative coordinates for this gap: const double phi = M_PI*(-1 + 2*(0.5+gap->representative_sector)/double(obstacles.size())); const double x = d*cos(phi); const double y = d*sin(phi); // Factor #1: Maximum reachable distance with this PTG: // ----------------------------------------------------- // It computes the average free distance of the gap: double meanDist = 0; for (unsigned int j=gap->ini;j<=gap->end;j++) meanDist+= obstacles[j]; meanDist/= ( gap->end - gap->ini + 1); double factor1; if (mrpt::utils::abs_diff(gap->representative_sector,target_sector)<=1 && target_dist<1) factor1 = std::min(target_dist,meanDist) / target_dist; else factor1 = meanDist; // Factor #2: Distance to target in "sectors" // ------------------------------------------- unsigned int dif = mrpt::utils::abs_diff(target_sector, gap->representative_sector ); // Handle the -PI,PI circular topology: if (dif> 0.5*obstacles.size()) dif = obstacles.size() - dif; const double factor2= exp(-square( dif / (obstacles.size()*0.25))) ; // Factor #3: Punish paths that take us far away wrt the target: **** I don't understand it ********* // ----------------------------------------------------- double closestX,closestY; double dist_eucl = math::minimumDistanceFromPointToSegment( target_x, target_y, // Point 0,0, x,y, // Segment closestX,closestY // Out ); const double factor3 = ( maxObsRange - std::min(maxObsRange ,dist_eucl) ) / maxObsRange; // Factor #4: Stabilizing factor (hysteresis) to avoid quick switch among very similar paths: // ------------------------------------------------------------------------------------------ double factor_AntiCab; if (m_last_selected_sector != std::numeric_limits<unsigned int>::max() ) { unsigned int dist = mrpt::utils::abs_diff(m_last_selected_sector, gap->representative_sector); if (dist > unsigned(0.1*obstacles.size())) factor_AntiCab = 0.0; else factor_AntiCab = 1.0; } else { factor_AntiCab = 0; } ASSERT_(options.factorWeights.size()==4); if ( obstacles[gap->representative_sector] < options.TOO_CLOSE_OBSTACLE ) // Too close to obstacles out_gaps_evaluation[i] = 0; else out_gaps_evaluation[i] = ( options.factorWeights[0] * factor1 + options.factorWeights[1] * factor2 + options.factorWeights[2] * factor3 + options.factorWeights[3] * factor_AntiCab ) / (math::sum(options.factorWeights)) ; } // for each gap }
/*--------------------------------------------------------------- HornMethod ---------------------------------------------------------------*/ double scanmatching::HornMethod( const vector_double &inVector, vector_double &outVector, // The output vector bool forceScaleToUnity ) { MRPT_START vector_double input; input.resize( inVector.size() ); input = inVector; outVector.resize( 7 ); // Compute the centroids TPoint3D cL(0,0,0), cR(0,0,0); const size_t nMatches = input.size()/6; ASSERT_EQUAL_(input.size()%6, 0) for( unsigned int i = 0; i < nMatches; i++ ) { cL.x += input[i*6+3]; cL.y += input[i*6+4]; cL.z += input[i*6+5]; cR.x += input[i*6+0]; cR.y += input[i*6+1]; cR.z += input[i*6+2]; } ASSERT_ABOVE_(nMatches,0) const double F = 1.0/nMatches; cL *= F; cR *= F; CMatrixDouble33 S; // S.zeros(); // Zeroed by default // Substract the centroid and compute the S matrix of cross products for( unsigned int i = 0; i < nMatches; i++ ) { input[i*6+3] -= cL.x; input[i*6+4] -= cL.y; input[i*6+5] -= cL.z; input[i*6+0] -= cR.x; input[i*6+1] -= cR.y; input[i*6+2] -= cR.z; S.get_unsafe(0,0) += input[i*6+3]*input[i*6+0]; S.get_unsafe(0,1) += input[i*6+3]*input[i*6+1]; S.get_unsafe(0,2) += input[i*6+3]*input[i*6+2]; S.get_unsafe(1,0) += input[i*6+4]*input[i*6+0]; S.get_unsafe(1,1) += input[i*6+4]*input[i*6+1]; S.get_unsafe(1,2) += input[i*6+4]*input[i*6+2]; S.get_unsafe(2,0) += input[i*6+5]*input[i*6+0]; S.get_unsafe(2,1) += input[i*6+5]*input[i*6+1]; S.get_unsafe(2,2) += input[i*6+5]*input[i*6+2]; } // Construct the N matrix CMatrixDouble44 N; // N.zeros(); // Zeroed by default N.set_unsafe( 0,0,S.get_unsafe(0,0) + S.get_unsafe(1,1) + S.get_unsafe(2,2) ); N.set_unsafe( 0,1,S.get_unsafe(1,2) - S.get_unsafe(2,1) ); N.set_unsafe( 0,2,S.get_unsafe(2,0) - S.get_unsafe(0,2) ); N.set_unsafe( 0,3,S.get_unsafe(0,1) - S.get_unsafe(1,0) ); N.set_unsafe( 1,0,N.get_unsafe(0,1) ); N.set_unsafe( 1,1,S.get_unsafe(0,0) - S.get_unsafe(1,1) - S.get_unsafe(2,2) ); N.set_unsafe( 1,2,S.get_unsafe(0,1) + S.get_unsafe(1,0) ); N.set_unsafe( 1,3,S.get_unsafe(2,0) + S.get_unsafe(0,2) ); N.set_unsafe( 2,0,N.get_unsafe(0,2) ); N.set_unsafe( 2,1,N.get_unsafe(1,2) ); N.set_unsafe( 2,2,-S.get_unsafe(0,0) + S.get_unsafe(1,1) - S.get_unsafe(2,2) ); N.set_unsafe( 2,3,S.get_unsafe(1,2) + S.get_unsafe(2,1) ); N.set_unsafe( 3,0,N.get_unsafe(0,3) ); N.set_unsafe( 3,1,N.get_unsafe(1,3) ); N.set_unsafe( 3,2,N.get_unsafe(2,3) ); N.set_unsafe( 3,3,-S.get_unsafe(0,0) - S.get_unsafe(1,1) + S.get_unsafe(2,2) ); // q is the quaternion correspondent to the greatest eigenvector of the N matrix (last column in Z) CMatrixDouble44 Z, D; vector_double v; N.eigenVectors( Z, D ); Z.extractCol( Z.getColCount()-1, v ); ASSERTDEB_( fabs( sqrt( v[0]*v[0] + v[1]*v[1] + v[2]*v[2] + v[3]*v[3] ) - 1.0 ) < 0.1 ); // Make q_r > 0 if( v[0] < 0 ){ v[0] *= -1; v[1] *= -1; v[2] *= -1; v[3] *= -1; } CPose3DQuat q; // Create a pose rotation with the quaternion for(unsigned int i = 0; i < 4; i++ ) // insert the quaternion part outVector[i+3] = q[i+3] = v[i]; // Compute scale double num = 0.0; double den = 0.0; for( unsigned int i = 0; i < nMatches; i++ ) { num += square( input[i*6+0] ) + square( input[i*6+1] ) + square( input[i*6+2] ); den += square( input[i*6+3] ) + square( input[i*6+4] ) + square( input[i*6+5] ); } // end-for // The scale: double s = std::sqrt( num/den ); // Enforce scale to be 1 if( forceScaleToUnity ) s = 1.0; TPoint3D pp; q.composePoint( cL.x, cL.y, cL.z, pp.x, pp.y, pp.z ); pp*=s; outVector[0] = cR.x - pp.x; // X outVector[1] = cR.y - pp.y; // Y outVector[2] = cR.z - pp.z; // Z return s; // return scale MRPT_END }