template <typename PointT, typename PointNT> void pcl::SurfelSmoothing<PointT, PointNT>::smoothCloudIteration (PointCloudInPtr &output_positions, NormalCloudPtr &output_normals) { PCL_INFO ("SurfelSmoothing: cloud smoothing iteration starting ...\n"); tree_->setInputCloud (interm_cloud_); output_positions = PointCloudInPtr (new PointCloudIn); output_positions->points.resize (interm_cloud_->points.size ()); output_normals = NormalCloudPtr (new NormalCloud); output_normals->points.resize (interm_cloud_->points.size ()); std::vector<int> nn_indices; std::vector<float> nn_distances; std::vector<float> diffs (interm_cloud_->points.size ()); Eigen::Vector4f total_residual = Eigen::Vector4f::Zero (); for (size_t i = 0; i < interm_cloud_->points.size (); ++i) { Eigen::Vector4f smoothed_point = Eigen::Vector4f::Zero (); Eigen::Vector4f smoothed_normal = Eigen::Vector4f::Zero (); // get neighbors tree_->radiusSearch (i, scale_, nn_indices, nn_distances); float theta_normalization_factor = 0.0; Eigen::Vector4f e_residual = Eigen::Vector4f::Zero (); for (std::vector<int>::iterator nn_index_it = nn_indices.begin (); nn_index_it != nn_indices.end (); ++nn_index_it) { float dist = pcl::squaredEuclideanDistance (interm_cloud_->points[i], interm_cloud_->points[*nn_index_it]); float theta_i = exp ( (-1) * dist / scale_squared_); theta_normalization_factor += theta_i; smoothed_normal += theta_i * interm_normals_->points[*nn_index_it].getNormalVector4fMap (); e_residual += theta_i * (interm_cloud_->points[i].getVector4fMap () - interm_cloud_->points[*nn_index_it].getVector4fMap ()); } smoothed_normal /= theta_normalization_factor; e_residual /= theta_normalization_factor; smoothed_point = interm_cloud_->points[i].getVector4fMap () - e_residual.dot (smoothed_normal) * smoothed_normal; /// smoothed_point = interm_cloud_->points[point_i].getVector3fMap () - e_residual; total_residual += e_residual; output_positions->points[i].getVector4fMap () = smoothed_point; output_normals->points[i].getNormalVector4fMap () = smoothed_normal; // Calculate difference diffs[i] = smoothed_normal.dot (smoothed_point - interm_cloud_->points[i].getVector4fMap ()); } std::cerr << "Total residual after an iteration: " << total_residual << std::endl; PCL_INFO("SurfelSmoothing done iteration\n"); }
bool Segment::operator<(const Segment& other) const { if (is_empty()) return true; else if (other.is_empty()) return false; std::vector<double> diffs(4); diffs[0] = l_ - other.l_; diffs[1] = u_ - other.u_; diffs[2] = flag2double(other.li_) - flag2double(li_); diffs[3] = flag2double(ui_) - flag2double(other.ui_); return less_than_helper(diffs); }
double get_rotational_diffusion_coefficient( const algebra::Rotation3Ds &displacements, double dt) { Floats diffs(displacements.size() - 1); for (unsigned int i = 1; i < displacements.size(); ++i) { algebra::Rotation3D orot = displacements[i - 1]; algebra::Rotation3D crot = displacements[i]; algebra::Rotation3D diff = crot / orot; diffs[i - 1] = algebra::get_axis_and_angle(diff).second; } double mean = std::accumulate(diffs.begin(), diffs.end(), 0.0) / diffs.size(); double stdsum = 0; for (unsigned int i = 0; i < diffs.size(); ++i) { stdsum += algebra::get_squared(diffs[i] - mean); } double sigma = stdsum / diffs.size(); return sigma / (6.0 * dt); }
Foam::Tuple2<Foam::label, Foam::scalar> Foam::lduAddressing::band() const { const labelgpuList& owner = lowerAddr(); const labelgpuList& neighbour = upperAddr(); labelgpuList cellBandwidth(size(), 0); labelgpuList diffs(neighbour.size(),0); thrust::transform ( neighbour.begin(), neighbour.end(), owner.begin(), diffs.begin(), subtractOperatorFunctor<label,label,label>() ); thrust::transform ( diffs.begin(), diffs.end(), thrust::make_permutation_iterator ( cellBandwidth.begin(), neighbour.begin() ), thrust::make_permutation_iterator ( cellBandwidth.begin(), neighbour.begin() ), maxBinaryFunctionFunctor<label,label,label>() ); label bandwidth = max(cellBandwidth); // Do not use field algebra because of conversion label to scalar scalar profile = thrust::reduce ( cellBandwidth.begin(), cellBandwidth.end() ); return Tuple2<label, scalar>(bandwidth, profile); }
template <typename PointT, typename PointNT> void pcl::SurfelSmoothing<PointT, PointNT>::extractSalientFeaturesBetweenScales (PointCloudInPtr &cloud2, NormalCloudPtr &cloud2_normals, boost::shared_ptr<std::vector<int> > &output_features) { if (interm_cloud_->points.size () != cloud2->points.size () || cloud2->points.size () != cloud2_normals->points.size ()) { PCL_ERROR ("[pcl::SurfelSmoothing::extractSalientFeaturesBetweenScales]: Number of points in the clouds does not match.\n"); return; } std::vector<float> diffs (cloud2->points.size ()); for (size_t i = 0; i < cloud2->points.size (); ++i) diffs[i] = cloud2_normals->points[i].getNormalVector4fMap ().dot (cloud2->points[i].getVector4fMap () - interm_cloud_->points[i].getVector4fMap ()); std::vector<int> nn_indices; std::vector<float> nn_distances; output_features->resize (cloud2->points.size ()); for (size_t point_i = 0; point_i < cloud2->points.size (); ++point_i) { // Get neighbors tree_->radiusSearch (point_i, scale_, nn_indices, nn_distances); bool largest = true; bool smallest = true; for (std::vector<int>::iterator nn_index_it = nn_indices.begin (); nn_index_it != nn_indices.end (); ++nn_index_it) { if (diffs[point_i] < diffs[*nn_index_it]) largest = false; else smallest = false; } if (largest == true || smallest == true) (*output_features)[point_i] = point_i; } }
std::vector<std::vector<double> > interpolate(const std::vector<double> &p1, const std::vector<double> &p2, double step_size) { std::vector<std::vector<double> > interpolation; double dist = getLineDistance(p1, p2); if (dist > step_size) { double fraction = step_size / dist; std::vector<double> diffs(p1.size()); std::vector<double> point(p1.size()); for (unsigned int i = 0; i < p1.size(); i++) { diffs[i] = getAngleBetween(p1[i], p2[i]); } for (unsigned int i = 1; fraction * i < 1.0; i++) { for (unsigned int j = 0; j < point.size(); j++) { point[j] = p1[j] + fraction * i * getDirectionMultiplier(p1[j], p2[j]) * diffs[j]; } interpolation.push_back(point); } } return interpolation; }
int64 problem101(int64 n) { if (n <= 0) { return 0; } vector<vector<int64>> diffs((uint32)n + 1, vector<int64>((uint32)n + 1, -1)); vector<int64> terms((uint32)n + 1); for (int64 x = 1; x <= n; x++) { int64 sum = 0; for (int64 i = 0; i <= n; i++) { if (i % 2 == 0) { sum += power(x, i); } else { sum -= power(x, i); } } diffs[0][x] = sum; } for (int64 term = 1; term < n; term++) { for (int64 x = 1; x <= n - term; x++) { diffs[term][x] = diffs[term - 1][x + 1] - diffs[term - 1][x]; } } int64 diffResult = 0; for (int64 term = 1; term <= n; term++) { int64 expectedTerm = diffs[0][term]; for (int64 i = term - 1; i >= 1; i--) { expectedTerm += diffs[term - i][i]; } diffResult += expectedTerm; } return diffResult; }
Results* join_clusters2_restart (double *x,//array/matrix of data SymNoDiag *W,//lower triangle of weight matrix unsigned int Px,//problem size double lambda,//starting point in regularization path double join_thresh, //tolerance for equality of points double opt_thresh, //tolerance for optimality double lambda_factor,//increase of lambda after optimality double smooth,//smoothing parameter int maxit, int linesearch_freq,//how often to do a linesearch? if 0, never. if //n>0, do n-1 linesearch steps for every //decreasing step size step. set this to 2 if //unsure. int linesearch_points,//how many points to check along the gradient //direction. set to 10 if unsure. int check_splits, int target_cluster, int verbose ){ unsigned int N = W->N; //W->print(); double old_lambda=0; std::vector<int> rows,rowsj; std::vector<int>::iterator rowit,ri,rj; std::list< std::vector<int> > clusters,tocheck; std::list< std::vector<int> >::iterator it,cj; unsigned int i,k,j; int tried_restart; for(i=0;i<N;i++){ rows.assign(1,i); clusters.push_back(rows); } double *old_alpha = new double[N*Px]; double *alpha = new double[N*Px]; double *xbar = new double[N*Px]; double *dir = new double[N*Px]; for(i=0;i<N*Px;i++){ alpha[i]=xbar[i]=x[i]; } Matrix amat(alpha,N,Px),xmat(x,N,Px); SymNoDiag diffs(N); diffs.calc_diffs(clusters,amat,nrm2); //store initial trivial solution Results *results = new Results(N,Px,opt_thresh); if(target_cluster==0)results->add(alpha,0,0); double weight,diff,step; while(clusters.size()>1){ double grad=opt_thresh; int iteration=1; tried_restart=0; //if we use the general (slower) algorithm for any weights, then //split the clusters to individual points if(check_splits){ clusters.clear(); //reassign original clusters for(i=0;i<N;i++){ rows.assign(1,i); clusters.push_back(rows); } //recopy original xbar for(i=0;i<N*Px;i++){ xbar[i]=x[i]; } } while(grad>=opt_thresh){ //first calc gradients grad = 0; for(it=clusters.begin();it!=clusters.end();it++){ rows = *it; i = rows[0]; for(k=0;k<Px;k++){ dir[i+k*N] = xbar[i+k*N] - alpha[i+k*N]; } for(cj=clusters.begin();cj!=clusters.end();cj++){ if(it!=cj){ rowsj = *cj; j=rowsj[0]; weight=0; diff = *diffs(i,j); if(diff!=0){ if(smooth!=0){ diff *= diff; //now squared l2 norm diff += smooth; //add smoothing parameter under sqrt diff = sqrt(diff);//put sqrt back } for(ri=rows.begin();ri!=rows.end();ri++){ for(rj=rowsj.begin();rj!=rowsj.end();rj++){ weight += W->getval(*ri,*rj); } } //weight *= lambda / diff / ((double)(N-1)) / ((double)rows.size()); weight *= lambda / diff / ((double)rows.size()); for(k=0;k<Px;k++){ dir[i+k*N] += weight * (alpha[j+k*N]-alpha[i+k*N]); } } } } grad += nrm2(Array(dir+i,N,Px)); } //store this iteration //results->add(alpha,lambda,grad); //then take a step if(linesearch_freq==0 || (iteration % linesearch_freq)==0 ){ //Decreasing step size //TDH and pierre 18 jan 2011 try sqrt dec step size step=1/((double)iteration); //step=1/sqrt((double)iteration); if(verbose>=2)printf("grad %f step %f it %d\n",grad,step,iteration); take_step(clusters,alpha,dir,N,Px,step); }else{ double cost_here,cost_step; std::map<double,double> cost_steps; std::map<double,double>::iterator step1,step2; for(i=0;i<N*Px;i++)old_alpha[i]=alpha[i];//copy alpha //compare current cost to cost after stepping in gradient direction cost_here=cost_step=calc_cost(clusters,amat,xmat,W,diffs,lambda); step = 0; cost_steps.insert(std::pair<double,double>(cost_here,0)); while(cost_step<=cost_here){ take_step(clusters,alpha,dir,N,Px,1); step += 1; diffs.calc_diffs(clusters,amat,nrm2); cost_step=calc_cost(clusters,amat,xmat,W,diffs,lambda); if(verbose>=2) printf("cost %.10f step %f cost_here %f\n",cost_step,step,cost_here); cost_steps.insert(std::pair<double,double>(cost_step,step)); } for(int cuts=0;cuts<linesearch_points;cuts++){ step1=step2=cost_steps.begin(); step2++; step = (step1->second + step2->second)/2; for(i=0;i<N*Px;i++){ alpha[i]=old_alpha[i]; } take_step(clusters,alpha,dir,N,Px,step); diffs.calc_diffs(clusters,amat,nrm2); cost_step=calc_cost(clusters,amat,xmat,W,diffs,lambda); if(verbose>=2)printf("cost %.10f step %f %d\n",cost_step,step,cuts); cost_steps.insert(std::pair<double,double>(cost_step,step)); } cost_steps.clear(); } if(iteration++ > maxit){ if(tried_restart){ printf("max iteration %d exit\n",maxit); delete old_alpha; delete alpha; delete xbar; delete dir; return results; }else{ if(verbose>=1)printf("max iterations, trying restart from x\n"); tried_restart=1; iteration=1; for(i=0;i<N*Px;i++)alpha[i]=x[i]; } } //calculate differences diffs.calc_diffs(clusters,amat,nrm2); //check for joins JoinPair tojoin; while(dojoin(tojoin=check_clusters_thresh(&clusters,diffs,join_thresh))){ //if(verbose>=1) // printf("join: %d %d\n",tojoin.first->front(),tojoin.second->front()); int ni=tojoin.first->size(); int nj=tojoin.second->size(); i=tojoin.first->front(); j=tojoin.second->front(); tojoin.first->insert(tojoin.first->end(), tojoin.second->begin(), tojoin.second->end()); for(k=0;k<Px;k++){ alpha[i+k*N] = (alpha[i+k*N]*ni + alpha[j+k*N]*nj)/(ni+nj); xbar[i+k*N] = (xbar[i+k*N]*ni + xbar[j+k*N]*nj)/(ni+nj); } clusters.erase(tojoin.second); iteration=1; if(clusters.size()>1){ diffs.calc_diffs(clusters,amat,nrm2);//inefficient }else{ grad=0;//so we can escape from the last optimization loop } } }//while(grad>=opt_thresh) if(verbose>=1) printf("solution iteration %d lambda %f nclusters %d\n", iteration,lambda,(int)clusters.size()); if(target_cluster == 0){ //for each cluster, there may be several points. we store the //alpha value just in the row of the first point. thus here we //copy this value to the other rows before copying the optimal //alpha to results. for(it=clusters.begin();it!=clusters.end();it++){ rows = *it; if(rows.size()>1){ for(i=1;i<rows.size();i++){ for(k=0;k<Px;k++){ alpha[rows[i]+k*N] = alpha[rows[0]+k*N]; } } } } results->add(alpha,lambda,grad); } //haven't yet reached the target number of clusters, multiply //lambda by lambda_factor and continue along the path if((int)clusters.size()>target_cluster){ old_lambda=lambda; lambda *= lambda_factor; } //if we have passed the target cluster number then decrease //lambda and go look for it! if((int)clusters.size()<target_cluster){ if(verbose>=1){ printf("missed target %d, going back for it\n",target_cluster); } lambda = (lambda+old_lambda)/2; clusters.clear(); //reassign original clusters for(i=0;i<N;i++){ rows.assign(1,i); clusters.push_back(rows); } //recopy original xbar for(i=0;i<N*Px;i++){ xbar[i]=x[i]; } } //this is the number of clusters that we were looking for, //save and quit! if((int)clusters.size()==target_cluster){ for(it=clusters.begin();it!=clusters.end();it++){ rows = *it; if(rows.size()>1){ for(i=1;i<rows.size();i++){ for(k=0;k<Px;k++){ alpha[rows[i]+k*N] = alpha[rows[0]+k*N]; } } } } results->add(alpha,lambda,grad); if(verbose>=1)printf("got target cluster %d exit\n",target_cluster); delete old_alpha; delete alpha; delete xbar; delete dir; return results; } } //TODO: consolidate cleanup... just use data structures that //automatically clean themselves up when the function exits. delete old_alpha; delete alpha; delete xbar; delete dir; return results; }
int main(int argc, char **argv) { if (argc < 1) { printf("usage: %s <item> [<item> ...]\n", argv[0]); printf("An <item> can be a file path, a directory path, or a pid.\n"); return 1; } int itemCount = argc - 1; struct kevent events[itemCount]; t_watchInfo watchInfos[itemCount]; int timeoutTime = -1; itemCount = 0; for (int i = 1; i < argc; i++) { char *item = argv[i]; if (strcmp(item, "-t") == 0) { timeoutTime = atoi(argv[++i]); } else if (isdigit(item[0])) { int pid = atoi(item); EV_SET(&events[itemCount], pid, EVFILT_PROC, EV_ADD | EV_ENABLE | EV_CLEAR, NOTE_EXIT, 0, NULL); ++itemCount; } else { struct stat sb; if (stat(item, &sb) == -1) { die("stat"); } if (S_ISDIR(sb.st_mode)) { watchInfos[itemCount].dir = parseDir(item); } else { watchInfos[itemCount].dir = NULL; } watchInfos[itemCount].path = item; int fd = open(item, O_RDONLY); if (fd == -1) { die("open"); exit(-1); } EV_SET(&events[itemCount], fd, EVFILT_VNODE, EV_ADD | EV_ENABLE | EV_CLEAR, NOTE_RENAME | NOTE_WRITE, 0, &watchInfos[itemCount]); ++itemCount; } } struct timespec timeout; memset(&timeout, 0, sizeof(struct timespec)); timeout.tv_sec = timeoutTime; int result = kevent(kqueue(), events, itemCount, events, itemCount, timeoutTime >= 0 ? &timeout : NULL); if (result > 0) { for (int i = 0; i < result; ++i) { t_watchInfo *hit = events[i].udata; if (hit == NULL) { fprintf(stdout, "proc %d\n", (int) events[i].ident); } else if (hit->dir != NULL) { t_dirContents *dirNow = parseDir(hit->path); t_dirContents *dirDiffs = diffs(hit->dir, dirNow); if (dirDiffs->count > 0) { fprintf(stdout, "%s %s%s%s\n", ((hit->dir != NULL && dirNow != NULL && hit->dir->count > dirNow->count) || (hit->dir != NULL && dirNow == NULL)) ? "-" : "+", hit->path, (hit->path[strlen(hit->path)-1] == '/') ? "" : "/", dirDiffs->entries[0]); } } else { fprintf(stdout, "%s\n", hit->path); } } return 0; } else if (result == 0) { fprintf(stdout, "timeout\n"); } else { fprintf(stderr, "result: %d\n", result); die("kevent"); } return 1; }
template <typename PointT, typename PointNT> void pcl::SmoothedSurfacesKeypoint<PointT, PointNT>::detectKeypoints (PointCloudT &output) { // Calculate differences for each cloud std::vector<std::vector<float> > diffs (scales_.size ()); // The cloud with the smallest scale has no differences std::vector<float> aux_diffs (input_->points.size (), 0.0f); diffs[scales_[0].second] = aux_diffs; cloud_trees_[scales_[0].second]->setInputCloud (clouds_[scales_[0].second]); for (size_t scale_i = 1; scale_i < clouds_.size (); ++scale_i) { size_t cloud_i = scales_[scale_i].second, cloud_i_minus_one = scales_[scale_i - 1].second; diffs[cloud_i].resize (input_->points.size ()); PCL_INFO ("cloud_i %u cloud_i_minus_one %u\n", cloud_i, cloud_i_minus_one); for (size_t point_i = 0; point_i < input_->points.size (); ++point_i) diffs[cloud_i][point_i] = cloud_normals_[cloud_i]->points[point_i].getNormalVector3fMap ().dot ( clouds_[cloud_i]->points[point_i].getVector3fMap () - clouds_[cloud_i_minus_one]->points[point_i].getVector3fMap ()); // Setup kdtree for this cloud cloud_trees_[cloud_i]->setInputCloud (clouds_[cloud_i]); } // Find minima and maxima in differences inside the input cloud typename pcl::search::Search<PointT>::Ptr input_tree = cloud_trees_.back (); for (int point_i = 0; point_i < static_cast<int> (input_->points.size ()); ++point_i) { std::vector<int> nn_indices; std::vector<float> nn_distances; input_tree->radiusSearch (point_i, input_scale_ * neighborhood_constant_, nn_indices, nn_distances); bool is_min = true, is_max = true; for (std::vector<int>::iterator nn_it = nn_indices.begin (); nn_it != nn_indices.end (); ++nn_it) if (*nn_it != point_i) { if (diffs[input_index_][point_i] < diffs[input_index_][*nn_it]) is_max = false; else if (diffs[input_index_][point_i] > diffs[input_index_][*nn_it]) is_min = false; } // If the point is a local minimum/maximum, check if it is the same over all the scales if (is_min || is_max) { bool passed_min = true, passed_max = true; for (size_t scale_i = 0; scale_i < scales_.size (); ++scale_i) { size_t cloud_i = scales_[scale_i].second; // skip input cloud if (cloud_i == clouds_.size () - 1) continue; nn_indices.clear (); nn_distances.clear (); cloud_trees_[cloud_i]->radiusSearch (point_i, scales_[scale_i].first * neighborhood_constant_, nn_indices, nn_distances); bool is_min_other_scale = true, is_max_other_scale = true; for (std::vector<int>::iterator nn_it = nn_indices.begin (); nn_it != nn_indices.end (); ++nn_it) if (*nn_it != point_i) { if (diffs[input_index_][point_i] < diffs[cloud_i][*nn_it]) is_max_other_scale = false; else if (diffs[input_index_][point_i] > diffs[cloud_i][*nn_it]) is_min_other_scale = false; } if (is_min == true && is_min_other_scale == false) passed_min = false; if (is_max == true && is_max_other_scale == false) passed_max = false; if (!passed_min && !passed_max) break; } // check if point was minimum/maximum over all the scales if (passed_min || passed_max) output.points.push_back (input_->points[point_i]); } } output.header = input_->header; output.width = static_cast<uint32_t> (output.points.size ()); output.height = 1; // debug stuff // for (size_t scale_i = 0; scale_i < scales_.size (); ++scale_i) // { // PointCloud<PointXYZI>::Ptr debug_cloud (new PointCloud<PointXYZI> ()); // debug_cloud->points.resize (input_->points.size ()); // debug_cloud->width = input_->width; // debug_cloud->height = input_->height; // for (size_t point_i = 0; point_i < input_->points.size (); ++point_i) // { // debug_cloud->points[point_i].intensity = diffs[scales_[scale_i].second][point_i]; // debug_cloud->points[point_i].x = input_->points[point_i].x; // debug_cloud->points[point_i].y = input_->points[point_i].y; // debug_cloud->points[point_i].z = input_->points[point_i].z; // } // char str[512]; sprintf (str, "diffs_%2d.pcd", scale_i); // io::savePCDFile (str, *debug_cloud); // } }
void gradient_descent_local_planner_t::gradient_steer(const state_t* start, const state_t* goal, plan_t& plan) { //for now only going to do piecewise constant plans plan.clear(); traj.link_space(world_model->get_state_space()); plan.append_onto_front(max_multiple * simulation::simulation_step); const space_t* control_space = world_model->get_control_space(); sampler->sample(control_space, plan[0].control); unsigned count = 0; std::vector<double> old_control(control_space->get_dimension()); std::vector<double> test_control(control_space->get_dimension()); std::vector<double> control_below(control_space->get_dimension()); std::vector<double> control_above(control_space->get_dimension()); std::vector<std::pair<double, double> > diffs(control_space->get_dimension()); while( count < attempts ) { traj.clear(); plan[0].duration = max_multiple * simulation::simulation_step; propagate(start, plan, traj); unsigned num_sim_steps = 0; unsigned best_sim_step = 0; double best_dist = PRX_INFINITY; for( trajectory_t::iterator it = traj.begin(); it != traj.end(); it++ ) { num_sim_steps++; if( metric->distance_function(*it, goal) < best_dist ) { best_dist = metric->distance_function(*it, goal); best_sim_step = num_sim_steps; } } plan[0].duration = best_sim_step * simulation::simulation_step; if( best_dist < accepted_radius ) { return; } else { state_t* state = world_model->get_state_space()->alloc_point(); for( unsigned i = 0; i < control_space->get_dimension(); i++ ) { old_control[i] = plan[0].control->at(i); control_below[i] = old_control[i] - .01 * (control_space->get_bounds()[i]->get_upper_bound() - control_space->get_bounds()[i]->get_lower_bound()); control_above[i] = old_control[i] + .01 * (control_space->get_bounds()[i]->get_upper_bound() - control_space->get_bounds()[i]->get_lower_bound()); diffs[i].first = diffs[i].second = 0; } for( unsigned i = 0; i < control_space->get_dimension(); i++ ) { test_control = old_control; test_control[i] = control_below[i]; control_space->set_from_vector(test_control, plan[0].control); propagate_step(start, plan, state); diffs[i].first = metric->distance_function(state, goal); test_control[i] = control_above[i]; control_space->set_from_vector(test_control, plan[0].control); propagate_step(start, plan, state); diffs[i].second = metric->distance_function(state, goal); } world_model->get_state_space()->free_point(state); //now that all the differences have been computed, determine the direction to move test_control = old_control; for( unsigned i = 0; i < control_space->get_dimension(); i++ ) { test_control[i] += (diffs[i].first - diffs[i].second)*(learning_rate); } control_space->set_from_vector(test_control, plan[0].control); } count++; } // PRX_INFO_S(plan.print()); }
int as154_seas(double *inp, int N, int optmethod, int p, int d, int q, int s, int P, int D, int Q, double *phi, double *theta, double *PHI, double *THETA, double *wmean,double *var,double *loglik,double *hess) { int i, pq, retval, length, offset,ret; double *b, *tf, *x,*inp2,*dx,*thess; int *ipiv; double maxstep; alik_seas_object obj; custom_function as154_min; obj = alik_seas_init(p, d, q, s, P, D, Q, N); inp2 = (double*)malloc(sizeof(double)* (N - s*D)); pq = obj->pq; b = (double*)malloc(sizeof(double)* pq); tf = (double*)malloc(sizeof(double)* pq); thess = (double*)malloc(sizeof(double)* pq*pq); dx = (double*)malloc(sizeof(double)* pq); ipiv = (int*)malloc(sizeof(int)* pq); length = N; maxstep = 1.0; css_seas(inp, N, optmethod, p, d, q, s, P, D, Q, phi, theta, PHI, THETA, wmean, var,loglik,hess); /* */ if (D > 0) { N = diffs(inp, N, D, s, inp2); } else { for (i = 0; i < N; ++i) { inp2[i] = inp[i]; } } x = (double*)malloc(sizeof(double)* (N - d)); if (d > 0) { N = diff(inp2, N, d, x); // No need to demean x } else { for (i = 0; i < N; ++i) { x[i] = inp2[i]; } } obj->N = N; offset = obj->offset; for (i = 0; i < p; ++i) { b[i] = phi[i]; } for (i = 0; i < q; ++i) { b[p + i] = -theta[i]; } for (i = 0; i < P; ++i) { b[p + q + i] = PHI[i]; } for (i = 0; i < Q; ++i) { b[p + q + P + i] = -THETA[i]; } if (obj->M == 1) { b[p + q + P + Q] = *wmean; } obj->mean = *wmean; //mdisplay(b, 1, p + q + P + Q); for (i = 0; i < N; ++i) { obj->x[offset + i] = obj->x[offset + 2 * N + i] = x[i]; } for (i = N; i < 2 * N; ++i) { obj->x[offset + i] = 0.0; } //printf("\n %d %g ", pq,maxstep); // custom_function as154_min = { fas154_seas, obj }; as154_min.funcpt = fas154_seas; as154_min.params = obj; retval = fminunc(&as154_min, NULL, pq, b, maxstep, optmethod, tf); if (retval == 0) { ret = 0; } else if (retval == 15) { ret = 15; } else if (retval == 4) { ret = 4; } else { ret = 1; } for (i = 0; i < pq; ++i) { dx[i] = 1.0; } hessian_fd(&as154_min, tf, pq, dx, obj->eps, hess); mtranspose(hess, pq, pq, thess); for (i = 0; i < pq*pq; ++i) { thess[i] = (N - d - s*D) * 0.5 * (hess[i] + thess[i]); } ludecomp(thess, pq, ipiv); minverse(thess, pq, ipiv, hess); for (i = 0; i < p; ++i) { phi[i] = tf[i]; } for (i = 0; i < q; ++i) { theta[i] = -tf[p + i]; } for (i = 0; i < P; ++i) { PHI[i] = tf[p + q + i]; } for (i = 0; i < Q; ++i) { THETA[i] = -tf[p + q + P + i]; } if (obj->M == 1) { *wmean = tf[p + q + Q + P]; } else { *wmean = 0.0; } *var = (obj->ssq) / (double) N; *loglik = obj->loglik; //printf("MEAN %g \n", mean(obj->x+N,N)); //mdisplay(obj->x + N, 1, N); free(b); free(tf); free(inp2); free(x); free(dx); free(thess); free(ipiv); free_alik_seas(obj); return ret; }
template <typename PointT, typename PointNT> float pcl::SurfelSmoothing<PointT, PointNT>::smoothCloudIteration (PointCloudInPtr &output_positions, NormalCloudPtr &output_normals) { // PCL_INFO ("SurfelSmoothing: cloud smoothing iteration starting ...\n"); output_positions = PointCloudInPtr (new PointCloudIn); output_positions->points.resize (interm_cloud_->points.size ()); output_normals = NormalCloudPtr (new NormalCloud); output_normals->points.resize (interm_cloud_->points.size ()); std::vector<int> nn_indices; std::vector<float> nn_distances; std::vector<float> diffs (interm_cloud_->points.size ()); float total_residual = 0.0f; for (size_t i = 0; i < interm_cloud_->points.size (); ++i) { Eigen::Vector4f smoothed_point = Eigen::Vector4f::Zero (); Eigen::Vector4f smoothed_normal = Eigen::Vector4f::Zero (); // get neighbors // @todo using 5x the scale for searching instead of all the points to avoid O(N^2) tree_->radiusSearch (interm_cloud_->points[i], 5*scale_, nn_indices, nn_distances); float theta_normalization_factor = 0.0; std::vector<float> theta (nn_indices.size ()); for (size_t nn_index_i = 0; nn_index_i < nn_indices.size (); ++nn_index_i) { float dist = pcl::squaredEuclideanDistance (interm_cloud_->points[i], input_->points[nn_indices[nn_index_i]]);//interm_cloud_->points[nn_indices[nn_index_i]]); float theta_i = exp ( (-1) * dist / scale_squared_); theta_normalization_factor += theta_i; smoothed_normal += theta_i * interm_normals_->points[nn_indices[nn_index_i]].getNormalVector4fMap (); theta[nn_index_i] = theta_i; } smoothed_normal /= theta_normalization_factor; smoothed_normal(3) = 0.0f; smoothed_normal.normalize (); // find minimum along the normal float e_residual; smoothed_point = interm_cloud_->points[i].getVector4fMap (); while (1) { e_residual = 0.0f; smoothed_point(3) = 0.0f; for (size_t nn_index_i = 0; nn_index_i < nn_indices.size (); ++nn_index_i) { Eigen::Vector4f neighbor = input_->points[nn_indices[nn_index_i]].getVector4fMap ();//interm_cloud_->points[nn_indices[nn_index_i]].getVector4fMap (); neighbor(3) = 0.0f; float dot_product = smoothed_normal.dot (neighbor - smoothed_point); e_residual += theta[nn_index_i] * dot_product;// * dot_product; } e_residual /= theta_normalization_factor; if (e_residual < 1e-5) break; smoothed_point = smoothed_point + e_residual * smoothed_normal; } total_residual += e_residual; output_positions->points[i].getVector4fMap () = smoothed_point; output_normals->points[i].getNormalVector4fMap () = normals_->points[i].getNormalVector4fMap ();//smoothed_normal; } // std::cerr << "Total residual after iteration: " << total_residual << std::endl; // PCL_INFO("SurfelSmoothing done iteration\n"); return total_residual; }
/* * grind - memory-bound task * * Note that this won't work until you have a VM system. */ void grind(unsigned groupid, unsigned id) { unsigned *p; unsigned i, n, s; (void)groupid; waitstart(); /* each grind task uses 768K */ n = (768*1024) / sizeof(*p); p = malloc(n * sizeof(*p)); if (p == NULL) { if (errno == ENOSYS) { /* * If we don't have sbrk, just bail out with * "success" instead of failing the whole * workload. */ errx(0, "grind: sbrk/malloc not implemented"); } err(1, "malloc"); } /* First, get some random integers. */ warnx("grind %u: seeding", id); srandom(1753); for (i=0; i<n; i++) { p[i] = random(); } /* Now sort them. */ warnx("grind %u: sorting", id); qsort(p, n, sizeof(p[0]), uintcmp); /* Sort by a different comparison. */ warnx("grind %u: sorting alternately", id); qsort(p, n, sizeof(p[0]), altcmp); /* Take the sum. */ warnx("grind %u: summing", id); s = sum(p, n); warnx("grind %u: sum is %u (should be %u)", id, s, RIGHT); if (s != RIGHT) { errx(1, "grind %u FAILED", id); } /* Take first differences. */ warnx("grind %u: first differences", id); diffs(p, n); /* Sort. */ warnx("grind %u: sorting", id); qsort(p, n, sizeof(p[0]), uintcmp); warnx("grind %u: summing", id); s = sum(p, n); warnx("grind %u: sum is %u (should be 0)", id, s); if (s != 0) { errx(1, "grind %u FAILED", id); } }
/* the function called by each thread is "mainLoop" */ void* mainLoop(void* arg) { loopArg *loopA = (loopArg*)arg; istream* testSStream = loopA->inpt; ostream* pstatStream = loopA->outpt; int id = loopA->id; double log600 = log2(600.0); PrintStack printStack; for( ; ; ) { InputTree correct; InputTree* cuse; /* first lock to read in the material */ pthread_mutex_lock(&readlock); if( !*testSStream ) { pthread_mutex_unlock(&readlock); break; } *testSStream >> correct; if( !*testSStream ){ pthread_mutex_unlock(&readlock); break; } totWords += correct.length()+1; int locCount = sentenceCount++; list<ECString> wtList; correct.make(wtList); SentRep sr( wtList ); // used in precision calc ExtPos extPos; if(params.extPosIfstream) extPos.read(params.extPosIfstream,sr); pthread_mutex_unlock(&readlock); cuse = &correct; int len = correct.length(); if(len > params.maxSentLen) continue; //cerr << "Len = " << len << endl; /* if( !params.field().in(sentenceCount) ) { sentenceCount++; continue; } if(sentenceCount < -1) { sentenceCount++; continue; } sentenceCount++; */ vector<ECString> poslist; correct.makePosList(poslist); ScoreTree sc; sc.setEquivInts(poslist); MeChart* chart = new MeChart( sr,extPos,id ); chart->parse( ); Item* topS = chart->topS(); if(!topS) { cerr << "Parse failed" << endl; cerr << correct << endl; error(" could not parse "); delete chart; continue; } // compute the outside probabilities on the items so that we can // skip doing detailed computations on the really bad ones chart->set_Alphas(); Bst& bst = chart->findMapParse(); if( bst.empty()) error( "mapProbs did not return answer"); float bestF = -1; int i; int numVersions = 0; Link diffs(0); //cerr << "Need num diff: " << Bchart::Nth << endl; printStruct printS; printS.sentenceCount = locCount; printS.numDiff = 0; for(numVersions = 0 ; ; numVersions++) { short pos = 0; Val* val = bst.next(numVersions); if(!val) { //cerr << "Breaking" << endl; break; } InputTree* mapparse = inputTreeFromBsts(val,pos,sr); bool isU; int dummy = 0; diffs.is_unique(mapparse, isU, dummy); // cerr << "V " << isU << " " << numVersions << *mapparse << endl; if(isU) { printS.probs.push_back(val->prob()); printS.trees.push_back(mapparse); printS.numDiff++; } else { delete mapparse; } if(printS.numDiff >= Bchart::Nth) break; if(numVersions > 20000) break; } ParseStats* locPst = new ParseStats[Bchart::Nth]; ParseStats bestPs; for(i = 0 ; i <printS.numDiff ; i++) { InputTree *mapparse = printS.trees[i]; assert(mapparse); sc.trips.clear(); ParseStats pSt; sc.recordGold(cuse,pSt); sc.precisionRecall(mapparse,pSt); float newF = pSt.fMeasure(); cerr << printS.sentenceCount << "\t" << newF << endl; if(newF > bestF) { bestF = newF; bestPs = pSt; } if(histPoints[i]) { locPst[i] += bestPs; } } if(printS.numDiff < Bchart::Nth) { for(i = printS.numDiff ; i < Bchart::Nth ; i++) { if(histPoints[i]) locPst[i] += bestPs; } } pthread_mutex_lock(&scorelock); for(i = 0 ; i < Bchart::Nth ; i++) totPst[i]+=locPst[i]; pthread_mutex_unlock(&scorelock); int numPrinted; /* put the sentence with which we just finished at the end of the printStack*/ printStack.push_back(printS); PrintStack::iterator psi = printStack.begin(); /* now look at each item from the front of the print stack to see if it should be printed now */ pthread_mutex_lock(&writelock); for( numPrinted =0; psi != printStack.end(); numPrinted++ ) { printStruct& pstr=(*psi); if(pstr.sentenceCount != printCount) break; *pstatStream << pstr.sentenceCount << "\t" << pstr.numDiff << "\n"; printCount++; for(i = 0 ; i < pstr.numDiff ; i++) { InputTree* mapparse = pstr.trees[i]; assert(mapparse); double logP =log2(pstr.probs[i]); logP -= (sr.length()*log600); *pstatStream << logP << "\n"; if(Bchart::prettyPrint) *pstatStream << *mapparse << "\n\n"; else { mapparse->printproper(*pstatStream); *pstatStream << "\n"; } delete mapparse; } *pstatStream << endl; psi++; } pthread_mutex_unlock(&writelock); for(i = 0 ; i < numPrinted ; i++) printStack.pop_front(); if(Feature::isLM) { double lgram = log2(bst.sum()); lgram -= (sr.length()*log600); double pgram = pow(2,lgram); double iptri = chart->triGram();; double ltri = (log2(iptri)-sr.length()*log600); double ptri = pow(2.0,ltri); double pcomb1 = (0.667 * pgram)+(0.333 * ptri); double lcom1 = log2(pcomb1); totGram -= lgram; totTri -= ltri; totMix -= lcom1; if(locCount%10 == 9) { cerr << locCount << "\t"; cerr << pow(2.0,totGram/(double)totWords); cerr <<"\t" << pow(2.0,totTri/(double)totWords); cerr << "\t" << pow(2.0,totMix/(double)(totWords)); cerr << endl; } } if(locCount%50 == 1) { cerr << sentenceCount << "\t"; for(int i = 0 ; i < Bchart::Nth ; i++) if(histPoints[i]) { cerr << i << " " << totPst[i].fMeasure() << "\t"; } cerr << endl; } delete chart; delete [] locPst; } return 0; }
void Do(LocalHeap & lh) { // We proceed in three steps: // 1. Compute the difference between Q and q // 2. Compute the H(div) Schur complement // 3. Apply Schur complement to the difference // grid function with (interpolated) exact flux, grad(u) BaseVector& vecQ = Q->GetVector(); // numerical flux q BaseVector& vecq = q->GetVector(); // p.w. constant gridfunction to store element-wise error BaseVector& errvec = err->GetVector(); errvec.FV<double>() = 0.0; double sqer =0.0; // this will contain the total error square for(int k=0; k<ma->GetNE(); k++) { ElementId ei (VOL, k); double elndof = ext->GetFE(k,lh).GetNDof(); Vector<SCAL> diff(elndof); // dof nrs: global, global inner, local inner, local Schur Array<int> Gn, Ginn, Linn, Lsn; // compute the difference between Q and q ext->GetDofNrs(k,Gn); // Global# of all dofs on element k diff = SCAL(0.0); for(int j=0; j<elndof; j++) diff[j] = vecQ.FV<SCAL>()[Gn[j]] - vecq.FV<SCAL>()[Gn[j]]; // H(div) Gram matrix (given in two parts in pde file) Matrix<double> elmat(elndof), elmat2(elndof); elmat = 0.0; elmat2 = 0.0; hdivip->GetIntegrator(0)-> CalcElementMatrix(ext->GetFE(ei,lh),ma->GetTrafo(ei,lh),elmat,lh); hdivip->GetIntegrator(1)-> CalcElementMatrix(ext->GetFE(ei,lh),ma->GetTrafo(ei,lh),elmat2,lh); elmat += elmat2; // compute the H(div) Schur complement ext->GetInnerDofNrs(k,Ginn); // Global# of inner dofs on element k for(int j=0; j<elndof; j++) if (Ginn.Contains( Gn[j] )) Linn.Append(j); // Local# of inner dofs on element k else Lsn.Append(j); // Local# of Schur dofs on element k int ielndof = Linn.Size(); Matrix<double> elmati(ielndof),elmatiinv(ielndof); elmati = elmat.Rows(Linn).Cols(Linn); CalcInverse(elmati,elmatiinv); // apply Schur complement to the difference int selndof = elndof - ielndof; Vector<SCAL> diffs(selndof); Matrix<double> S(selndof), Asi(selndof,ielndof); diffs = diff(Lsn); // S = A_ss - A_si * inv(A_ii) * A_is Asi = elmat.Rows(Lsn).Cols(Linn); S = elmat.Rows(Lsn).Cols(Lsn); S -= Asi * elmatiinv * Trans(Asi); // error = (S * diffs, diffs) errvec.FVDouble()[k] = fabs(InnerProduct(diffs, S * diffs)); sqer += errvec.FVDouble()[k]; } cout<<"Discrete H^(-1/2) norm of error in q = "<<sqrt(sqer)<<endl; // write file (don't know what the last argument of AddVariable // does, but 6 seems to be the value everywhere! It seems to intializes // an object of class IM (important message). GetPDE()->AddVariable (string("fluxerr.")+GetName()+".value", sqrt(sqer), 6); }
size_t ArithmeticUtilEncoder::encode(byte* start, uint64 size) { std::vector<uint64> counts(256,0); long p=out->getPos(); for(byte* b = start; b!= (start+size); b++) counts[*b]++; int sum=0; for(int i=0;i<256;i++) sum+=(counts[i]=counts[i]*SCALE/size); long header_pos = out->getPos(); for(int i=0;i<6;i++) out->writeByte(0); out->write48bits(size,header_pos); long bits_pos = out->getPos(); for(int i=0;i<6;i++) out->writeByte(0); bytes_used=0; bytes_used += 6*2; bits_used=0; int it=0; int bsum=0; for(int i=0;i<256;i++) { if(counts[i]==0) { if(sum==SCALE) { while(counts[it%256]<=1) it++; counts[(it++)%256]--; } else sum++; counts[i]++; } } int add=SCALE-sum; for(int i=0;i<256;i++) { counts[i]+=add/256; if(i< (add%256)){ counts[i]++; } } std::vector<uint64> diffs(256,0); long a=bytes_used; bytes_used += utils::gammaEncode(counts,out); long pp=bytes_used; std::vector<SYMBOL> symbols(256); int cumul=0; for(int i=0;i<256;i++) { symbols[i].scale=SCALE; symbols[i].low_count=cumul; symbols[i].high_count= cumul = cumul + counts[i]; } size_t tmp=bytes_used; byte* ptr=start; ptr=start; while(ptr != start + size) { encode_symbol(symbols[*ptr]); ptr++; } flush(); while(bytes_used-tmp<4) { out->writeByte(0); bytes_used++; bits_used+=8; } //std::cout<<"bytes used for counts: "<<1.0*(pp-a)/(bytes_used-a)<<"\n"; std::cout<<"bytes used for counts: "<<(pp-a)<<"\n"; out->write48bits(bits_used/8,bits_pos); return bytes_used; }