static void *demod_thread_fn(void *arg) { while (!do_exit) { safe_cond_wait(&ready, &ready_m); pthread_rwlock_wrlock(&both.rw); downsample(&both); memcpy(left.buf, both.buf, 2*both.len_out); memcpy(right.buf, both.buf, 2*both.len_out); pthread_rwlock_unlock(&both.rw); rotate_90(left.buf, left.len_in); downsample(&left); memcpy(left_demod.buf, left.buf, 2*left.len_out); demodulate(&left_demod); if (dc_filter) { dc_block_filter(&left_demod);} //if (oversample) { // downsample(&left);} arbitrary_upsample(left_demod.result, stereo.buf_left, left_demod.result_len, stereo.bl_len); rotate_m90(right.buf, right.len_in); downsample(&right); memcpy(right_demod.buf, right.buf, 2*right.len_out); demodulate(&right_demod); if (dc_filter) { dc_block_filter(&right_demod);} //if (oversample) { // downsample(&right);} arbitrary_upsample(right_demod.result, stereo.buf_right, right_demod.result_len, stereo.br_len); output(); } rtlsdr_cancel_async(dev); return 0; }
void gaussian_pyramid(double *im, uint32_t r, uint32_t c, uint32_t nlev, double *tmp_halfsize, double **pyr, uint32_t *pyr_r, uint32_t *pyr_c){ pyr[0] = im; if(1 < nlev){ int v = 1; downsample(pyr[0],pyr_r[v-1],pyr_c[v-1],tmp_halfsize,pyr_r[v],pyr_c[v],pyr[v]); } for(int v = 2; v < nlev; v++){ //downsample image and store into level downsample(pyr[v-1],pyr_r[v-1],pyr_c[v-1],tmp_halfsize,pyr_r[v],pyr_c[v],pyr[v]); } }
void BloomEffect::apply(const sf::RenderTexture& input, sf::RenderTarget& output) { prepareTextures(input.getSize()); filterBright(input, mBrightnessTexture); downsample(mBrightnessTexture, mFirstPassTextures[0]); blurMultipass(mFirstPassTextures); downsample(mFirstPassTextures[0], mSecondPassTextures[0]); blurMultipass(mSecondPassTextures); add(mFirstPassTextures[0], mSecondPassTextures[0], mFirstPassTextures[1]); mFirstPassTextures[1].display(); add(input, mFirstPassTextures[1], output); }
// get data from /dev/dsp, apply a FFT on it and do HPS. // return the freq with max amplitude (hopefully ;-) void get_max_amp(void) { float max; double temp; max = temp = 0.0; int i=0,maxi=0; double prefft[N]; fftw_complex postfft[N]; fftw_plan plan; float amp[N], amp_down2[N], amp_down4[N]; while (1) { get_samples(sample, N); // add function to check for volume level // do more calculations if volume is above a threshold else return for(i=0;i<N;i++) { prefft[i]=((float)sample[i] * mycosine[i]); } // do FFT plan = fftw_plan_dft_r2c_1d(N, prefft, postfft, 0); fftw_execute(plan); // calculate amplitude of the freq's for (i = 0; i < 9000; i++ ) { amp[i] = postfft[i][0] * postfft[i][0] + postfft[i][1] * postfft[i][1]; } //downsample by 2 downsample(amp, amp_down2, 4500); //downsample by 2 more downsample(amp_down2, amp_down4, 2250); // downsample by a factor of three also max_amp = 0; max = 0.0; // multiply the amplitudes for (i = 20; i < 1000; i++) { temp = amp[i] * amp_down2[i] * amp_down4[i]; if (temp > max) { max = temp; maxi = i; } } max_amp = maxi; new_freq = 1; } }
vector<GPSPoint> DPCompressor::downsample(vector<GPSPoint> points) { if(points.size() < 3) return points; GPSPoint first = points[0]; GPSPoint last = points[points.size() - 1]; double dlat = last.get_latitude() - first.get_latitude(); double dlon = last.get_longitude() - first.get_longitude(); double dt = last.get_time() - first.get_time(); double max_kms = -1; int furthest; double max_dist = -1; for(size_t i = 1; i < points.size() - 1; ++i) { double tr = (points[i].get_time() - first.get_time()) / dt; GPSPoint approx(points[i].get_time(), first.get_latitude() + dlat * tr, first.get_longitude() + dlon * tr); double dist = points[i].distance(approx); max_kms = max(max_kms, points[i].distance_kms(approx)); if(dist > max_dist) { max_dist = dist; furthest = i; } } if(max_dist > max_error) { vector<GPSPoint> a(points.begin(), points.begin() + furthest); vector<GPSPoint> b(points.begin() + furthest, points.end()); a = downsample(a); b = downsample(b); vector<GPSPoint> ab; ab.reserve(a.size() + b.size()); ab.insert(ab.end(), a.begin(), a.end()); ab.insert(ab.end(), b.begin(), b.end()); return ab; } else { max_error_kms = max(max_error_kms, max_kms); } vector<GPSPoint> ab; ab.push_back(first); ab.push_back(last); return ab; }
void FeatureEval::fast_point_feature_histogram(){ qDebug() << "params used: " << subsample_res_ << search_radius_; qDebug() << "-----------------------------------"; boost::shared_ptr<PointCloud> _cloud = core_->cl_->active_; if(_cloud == nullptr) return; pcl::PointCloud<pcl::PointXYZINormal>::Ptr filt_cloud; std::vector<int> sub_idxs; filt_cloud = downsample(_cloud, subsample_res_, sub_idxs); // Compute t_.start(); qDebug() << "Timer started (FPFH)"; pcl::FPFHEstimation<pcl::PointXYZINormal, pcl::PointXYZINormal, pcl::FPFHSignature33> fpfh; fpfh.setInputCloud(filt_cloud); fpfh.setInputNormals(filt_cloud); pcl::search::KdTree<pcl::PointXYZINormal>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZINormal>); fpfh.setSearchMethod(tree); fpfh.setRadiusSearch(search_radius_); pcl::PointCloud<pcl::FPFHSignature33>::Ptr fpfhs (new pcl::PointCloud<pcl::FPFHSignature33> ()); fpfh.compute(*fpfhs); qDebug() << "FPFH in " << (time_ = t_.elapsed()) << " ms"; // Correlate computeCorrelation(reinterpret_cast<float*>(fpfhs->points.data()), 33, fpfhs->points.size(), sub_idxs); if(!visualise_on_) return; }
void test_segmentation() { // Read in point cloud and downsample pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud = read_pcd(); pcl::PointCloud<pcl::PointXYZRGBA>::Ptr small_cloud(new pcl::PointCloud<pcl::PointXYZRGBA>); downsample(cloud, small_cloud); // Extract Euclidean clusters std::vector<pcl::PointIndices> clusters = euclidean_clusters(small_cloud); // Use region-growing segmentation // std::vector<pcl::PointIndices> clusters = region_growing_segmentation(small_cloud); // Print out cloud and cluster sizes std::cout << "Points before filtering: " << cloud->points.size() << std::endl; std::cout << "Points after filtering: " << small_cloud->points.size() << std::endl; std::cout << "Number of clusters: " << clusters.size() << std::endl; int sum = 0; for (int i = 0; i < clusters.size(); i++) { std::cout << "Points in cluster " << i << ": " << clusters[i].indices.size() << std::endl; sum += clusters[i].indices.size(); } std::cout << "Total points in clusters: " << sum << std::endl; }
static inline double distort(EXCITER *p, double in) { double samples[2], ans; int i; double ap = p->ap, an = p->an, kpa = p->kpa, kna = p->kna, kpb = p->kpb, knb = p->knb, pwrq = p->pwrq; //printf("in: %f\n", in); upsample(p, samples, in); for (i = 0; i < p->over; i++) { double proc = samples[i]; double med; //printf("%d: %f-> ", i, proc); if (proc >= 0.0) { med = (D(ap + proc * (kpa - proc)) + kpb) * pwrq; } else { med = - (D(an - proc * (kna + proc)) + knb) * pwrq; } proc = p->srct * (med - p->prev_med + p->prev_out); //printf("%f\n", proc); p->prev_med = M(med); p->prev_out = M(proc); samples[i] = proc; } ans = downsample(p, samples); //printf("out: %f\n", ans); return ans; }
void KinectRegistration::kinectCloudCallback( const sensor_msgs::PointCloud2ConstPtr &msg ) { if( receive_point_cloud_ ) { if( !first_point_cloud_ ) { ROS_INFO("First PointCloud received"); PointCloud temp; pcl::fromROSMsg( *msg, temp ); removeOutliers( temp, src_ ); first_point_cloud_ = true; } else { ROS_INFO("PointCloud received"); PointCloud temp, temp1; pcl::fromROSMsg( *msg, temp ); removeOutliers( temp, tgt_ ); transformCloud(tgt_, temp, num_clouds_ * 40 ); temp1 = src_; temp1 += temp; downsample( temp1, temp ); pcl::toROSMsg( temp, cloud_msg_ ); registered_cloud_pub_.publish( cloud_msg_ ); src_ = temp1; } receive_point_cloud_ = false; rotateRobotino(); num_clouds_++; } else return; }
void buildPyr(Mat& img, std::vector<Mat>& pyr, int maxlevel) { pyr.resize(maxlevel + 1); pyr[0] = img.clone(); for(int level = 0; level < maxlevel; level++) { downsample(pyr[level], pyr[level + 1]); } }
void laplacian_pyramid(double *im, uint32_t r, uint32_t c, uint32_t nlev, double *tmp_halfsize, double *tmp_quartsize, double *tmp2_quartsize, double **pyr, uint32_t *pyr_r, uint32_t *pyr_c){ uint32_t S_r = r; uint32_t S_c = c; uint32_t T_r = r; uint32_t T_c = c; double *tmp = NULL; if(0 < nlev-1){ int v = 0; S_r = pyr_r[v+1]; S_c = pyr_c[v+1]; downsample(im,T_r,T_c,tmp_halfsize,S_r,S_c,tmp2_quartsize); upsample(tmp2_quartsize,S_r,S_c,pyr_r[v],pyr_c[v],tmp_halfsize,pyr[v]); for(int i = 0; i < T_r*T_c*3; i++){ pyr[v][i] = im[i] - pyr[v][i]; COST_INC_ADD(1); COST_INC_LOAD(2); COST_INC_STORE(1); } T_r = S_r; T_c = S_c; double *tmp = tmp_quartsize; tmp_quartsize = tmp2_quartsize; tmp2_quartsize = tmp; } for(int v = 1; v < nlev-1; v++){ S_r = pyr_r[v+1]; S_c = pyr_c[v+1]; downsample(tmp_quartsize,T_r,T_c,tmp_halfsize,S_r,S_c,tmp2_quartsize); upsample(tmp2_quartsize,S_r,S_c,pyr_r[v],pyr_c[v],tmp_halfsize,pyr[v]); for(int i = 0; i < T_r*T_c*3; i++){ pyr[v][i] = tmp_quartsize[i] - pyr[v][i]; COST_INC_ADD(1); COST_INC_LOAD(2); COST_INC_STORE(1); } T_r = S_r; T_c = S_c; tmp = tmp_quartsize; tmp_quartsize = tmp2_quartsize; tmp2_quartsize = tmp; } memcpy(&(pyr[nlev-1][0]),&(tmp_quartsize[0]),T_r*T_c*3*sizeof(double)); COST_INC_LOAD(T_r*T_c*3); COST_INC_STORE(T_r*T_c*3); }
bool SQ_fitter<PointT>::SQFitting( const PointCloudPtr &_cloud, const double &_smax, const double &_smin, const int &_N, const double &_thresh ) { // Store parameters smax_ = _smax; smin_ = _smin; N_ = _N; thresh_ = _thresh; cloud_in_ = _cloud; double ds; double error_i; double error_i_1; double s_i; bool fitted; SQ_params par_i, par_i_1; // 1. Initialize a ellipsoid with bounding box dimensions ds = (smax_ - smin_) / (double) N_; error_i = initialize( cloud_in_, par_in_ ); par_i = par_in_; std::cout << "\t [DEBUG] Iteration ["<<0<<"] Cloud size: "<< cloud_in_->points.size()<< ". Error: "<< error_i << std::endl; std::cout << "\t Initialization parameters:"<< std::endl; printParamsInfo( par_in_ ); // 2. Iterate through downsampling levels fitted = false; for( int i = 1; i <= N_; ++i ) { error_i_1 = error_i; par_i_1 = par_i; s_i = _smax - (i-1)*ds; PointCloudPtr cloud_i( new pcl::PointCloud<PointT>() ); cloud_i = downsample( cloud_in_, s_i ); error_i = fitting( cloud_i, par_i_1, par_i ); std::cout << "\t [DEBUG] Iteration ["<<i<<"] Cloud size: "<< cloud_i->points.size()<< ". Voxel size: "<< s_i <<". Error: "<< error_i << std::endl; // 3. If error between levels is below threshold, stop if( fabs( error_i - error_i_1 ) < thresh_ ) { std::cout << "\t [DEBUG-GOOD] Errors diff below threshold. Stop: "<< thresh_ << std::endl; par_out_ = par_i; fitted = true; break; } } return fitted; }
void laplacian_pyramid(double *im, uint32_t r, uint32_t c, uint32_t channels, uint32_t nlev, double *tmp_halfsize, double *tmp_quartsize, double *tmp2_quartsize, double **pyr, uint32_t *pyr_r, uint32_t *pyr_c){ uint32_t S_r = r; uint32_t S_c = c; uint32_t T_r = r; uint32_t T_c = c; double *tmp = NULL; if(0 < nlev-1){ int v = 0; S_r = pyr_r[v+1]; S_c = pyr_c[v+1]; downsample(im,T_r,T_c,channels,tmp_halfsize,S_r,S_c,tmp2_quartsize); upsample(tmp2_quartsize,S_r,S_c,channels,pyr_r[v],pyr_c[v],tmp_halfsize,pyr[v]); for(int i = 0; i < T_r*T_c*channels; i++){ pyr[v][i] = im[i] - pyr[v][i]; COST_INC_ADD(1); } T_r = S_r; T_c = S_c; double *tmp = tmp_quartsize; tmp_quartsize = tmp2_quartsize; tmp2_quartsize = tmp; } for(int v = 1; v < nlev-1; v++){ S_r = pyr_r[v+1]; S_c = pyr_c[v+1]; downsample(tmp_quartsize,T_r,T_c,channels,tmp_halfsize,S_r,S_c,tmp2_quartsize); upsample(tmp2_quartsize,S_r,S_c,channels,pyr_r[v],pyr_c[v],tmp_halfsize,pyr[v]); for(int i = 0; i < T_r*T_c*channels; i++){ pyr[v][i] = tmp_quartsize[i] - pyr[v][i]; COST_INC_ADD(1); } T_r = S_r; T_c = S_c; tmp = tmp_quartsize; tmp_quartsize = tmp2_quartsize; tmp2_quartsize = tmp; } //memcpy(dst,src,src_len*sizeof(double)); for(int i = 0; i < T_r*T_c*channels; i++){ pyr[nlev-1][i] = tmp_quartsize[i]; } }
void SubSamplingLayer::feedForward(vector<mat>& fouts, const vector<mat>& fins) { // Since nInputs == nOutputs for subsampling layer, I just use N. size_t N = fins.size(); if (fouts.size() != N) fouts.resize(N); for (size_t i=0; i<N; ++i) fouts[i] = downsample(fins[i], _scale, _input_img_size); }
template <typename PointInT, typename IntensityT> void pcl::tracking::PyramidalKLTTracker<PointInT, IntensityT>::downsample (const FloatImageConstPtr& input, FloatImageConstPtr& output, FloatImageConstPtr& output_grad_x, FloatImageConstPtr& output_grad_y) const { downsample (input, output); FloatImagePtr grad_x (new FloatImage (input->width, input->height)); FloatImagePtr grad_y (new FloatImage (input->width, input->height)); derivatives (*output, *grad_x, *grad_y); output_grad_x = grad_x; output_grad_y = grad_y; }
void RawImage::downsampleInPlace(unsigned int downsampleX, unsigned int downsampleY) { RawImage* tmp = downsample(this, downsampleX, downsampleY); deletePixels(); _type = tmp->getType(); _bytesPerPixel = tmp->getBytesPerPixel(); _width = tmp->getWidth(); _height = tmp->getHeight(); _pixels = tmp->takePixels(); delete tmp; }
void AdaptiveManifoldFilterN::computeEta(Mat& teta, Mat1b& cluster, vector<Mat>& etaDst) { CV_DbgAssert(teta.size() == srcSize && cluster.size() == srcSize); Mat1f tetaMasked = Mat1f::zeros(srcSize); teta.copyTo(tetaMasked, cluster); float sigma_s = (float)(sigma_s_ / getResizeRatio()); Mat1f tetaMaskedBlur; downsample(tetaMasked, tetaMaskedBlur); h_filter(tetaMaskedBlur, tetaMaskedBlur, sigma_s); Mat mul; etaDst.resize(jointCnNum); for (int i = 0; i < jointCnNum; i++) { multiply(tetaMasked, jointCn[i], mul); downsample(mul, etaDst[i]); h_filter(etaDst[i], etaDst[i], sigma_s); divide(etaDst[i], tetaMaskedBlur, etaDst[i]); } }
void OutlierExtraction::compute(const CloudPFControl p, bool w) { control = p; *cloud_input0 = *cloud_input; *cloud_virtual0 = *cloud_virtual; downsample(cloud_input, cloud_input, DOWNSAMPLE_INPUT); downsample(cloud_virtual, cloud_virtual, DOWNSAMPLE_VIRTUAL); /// Set the point cloud 'cloud_virtual' from the virtual point set 'virtualPoints'. /// Particle filter CloudPFInputData input; input.cloud_static = cloud_virtual; input.cloud_moves = cloud_input; input.kdtree = pcl::KdTreeFLANN<pcl::PointXYZ>::Ptr(new pcl::KdTreeFLANN<pcl::PointXYZ>); input.kdtree->setInputCloud(input.cloud_static); RCParticleFilter<CloudPFInputData, CloudPFControl, CloudParticle, CloudPFConfig> pf(config, input, control); QVec vT = config->varianceT; QVec vR = config->varianceR; for (uint i=0; i<config->iters; i++) { CloudParticle::setVarianceT(QVec::vec3(vT(0), vT(1), vT(2))); CloudParticle::setVarianceR(QVec::vec3(vR(0), vR(1), vR(2))); vT.operator*(config->annealingConstant); vR.operator*(config->annealingConstant); pf.step(input, control, true); } CloudParticle best = pf.getBest(); /// Transform the input cloud to match the target using the information provided by the particle filter pcl::transformPointCloud(*cloud_virtual0, *cloud_virtual_transformed, best.getEigenTransformation()); /// Use such cloud to extract outliers pclGetOutliers(cloud_input0, cloud_virtual_transformed, cloud_outliers, DISTANCE_THRESHOLD); }
void point_cloud_callback(const sensor_msgs::PointCloud2::ConstPtr& msg) { found_ = false; pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>); pcl::fromROSMsg (*msg, *cloud); filter(cloud, filtered_cloud_); downsample(filtered_cloud_, downsampled_cloud_); remove_planes(downsampled_cloud_, foreground_cloud_); search_sphere(foreground_cloud_, det_cloud_, coeff_sphere_); //Publish point clouds sensor_msgs::PointCloud2 cloudFilteredOut; pcl::toROSMsg(*filtered_cloud_, cloudFilteredOut); pc_filtered_pub_.publish(cloudFilteredOut); sensor_msgs::PointCloud2 cloudDownsampledOut; pcl::toROSMsg(*downsampled_cloud_, cloudDownsampledOut); pc_downsampled_pub_.publish(cloudDownsampledOut); sensor_msgs::PointCloud2 cloudForegroundOut; pcl::toROSMsg(*foreground_cloud_, cloudForegroundOut); pc_foreground_pub_.publish(cloudForegroundOut); sensor_msgs::PointCloud2 cloudDetOut; pcl::toROSMsg(*det_cloud_, cloudDetOut); pc_det_pub_.publish(cloudDetOut); //Publish target location if(found_) { tf::Transform target_tf; target_tf.setOrigin(tf::Vector3(coeff_sphere_->values[0], coeff_sphere_->values[1], coeff_sphere_->values[2])); tf::Quaternion q; q.setRPY(0,0,0); target_tf.setRotation(q); tf_.sendTransform(tf::StampedTransform(target_tf, ros::Time::now(), msg->header.frame_id, "target")); ROS_INFO_STREAM("Target detected at [" << target_tf.getOrigin().x() << ", " << target_tf.getOrigin().y() << ", " << target_tf.getOrigin().z() << "]"); } }
void FeatureEval::curvature(){ boost::shared_ptr<PointCloud> _cloud = core_->cl_->active_; if(_cloud == nullptr) return; pcl::PointCloud<pcl::PointXYZINormal>::Ptr filt_cloud; std::vector<int> big_to_small; filt_cloud = downsample(_cloud, subsample_res_, big_to_small); // Compute t_.start(); qDebug() << "Timer started (Curvature)"; pcl::PrincipalCurvaturesEstimation<pcl::PointXYZINormal, pcl::PointXYZINormal, pcl::PrincipalCurvatures> principal_curvatures_estimation; principal_curvatures_estimation.setInputCloud (filt_cloud); principal_curvatures_estimation.setInputNormals (filt_cloud); pcl::search::KdTree<pcl::PointXYZINormal>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZINormal>); principal_curvatures_estimation.setSearchMethod (tree); principal_curvatures_estimation.setRadiusSearch (search_radius_); pcl::PointCloud<pcl::PrincipalCurvatures>::Ptr principal_curvatures (new pcl::PointCloud<pcl::PrincipalCurvatures> ()); principal_curvatures_estimation.compute (*principal_curvatures); qDebug() << "Curvature in " << (time_ = t_.elapsed()) << " ms"; // Correlate size_t csize = sizeof(pcl::PrincipalCurvatures)/sizeof(float); computeCorrelation(reinterpret_cast<float*>(principal_curvatures->points.data()), 2, principal_curvatures->points.size(), big_to_small, csize, 3); if(!visualise_on_) return; // Draw int w = _cloud->scan_width(); int h = _cloud->scan_height(); boost::shared_ptr<std::vector<float>> grid = boost::make_shared<std::vector<float>>(w*h, 0.0f); const std::vector<int> & cloudtogrid = _cloud->cloudToGridMap(); for(uint big_idx = 0; big_idx < _cloud->size(); big_idx++) { int small_idx = big_to_small[big_idx]; int grid_idx = cloudtogrid[big_idx]; pcl::PrincipalCurvatures & pc = (*principal_curvatures)[small_idx]; (*grid)[grid_idx] = pc.pc1 + pc.pc2; } drawFloats(grid, _cloud); }
void processPointCloud(const cloud_t::Ptr &cloud, std::vector<point_t> ¢roids) { if (cloud->points.size() < 10) { return; } // Downsample using Voxel Grid cloud_t::Ptr filtered_cloud(new cloud_t); downsample(cloud, filtered_cloud); // Calculate normals pcl::search::Search<point_t>::Ptr tree = boost::shared_ptr<pcl::search::Search<point_t> > (new pcl::search::KdTree<point_t>); pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>); pcl::NormalEstimation<point_t, pcl::Normal> normal_estimator; normal_estimator.setSearchMethod (tree); normal_estimator.setInputCloud (filtered_cloud); normal_estimator.setKSearch (20); normal_estimator.compute (*normals); // Region growing pcl::RegionGrowing<point_t, pcl::Normal> reg; //Maximum and minimum number of points to classify as a cluster // First check: to see if object is large (or takes up a large portion of the laser's view) reg.setMinClusterSize(40); reg.setMaxClusterSize(1000000); reg.setSearchMethod(tree); // Number of neighbors to search reg.setNumberOfNeighbours(35); reg.setInputCloud(filtered_cloud); reg.setInputNormals(normals); reg.setSmoothnessThreshold(6.0 / 180.0 * M_PI); reg.setCurvatureThreshold(.15); std::vector<pcl::PointIndices> clusters; reg.extract(clusters); // Update colored cloud colored_cloud = reg.getColoredCloud(); // Use clusters to find door(s) findDoorCentroids(filtered_cloud, clusters, centroids); }
int init_texture_buttons() { static int load_buttons=TRUE; if(load_buttons){ int i; load_buttons=FALSE; for(i=0;i<sizeof(tex_files)/sizeof(struct TEXTURE_FILE);i++){ buttons[i].tfile=&tex_files[i]; buttons[i].thumb=malloc(64*64*3); if(buttons[i].thumb){ downsample(tex_files[i].data,tex_files[i].w,tex_files[i].h,tex_files[i].bpp,buttons[i].thumb,64,64); } } buttons[0].pressed=TRUE; for(i=0;i<4;i++){ gl_textures[i].tex_button=&buttons[0]; } } return load_buttons; }
void FeatureEval::difference_of_normals(){ boost::shared_ptr<PointCloud> _cloud = core_->cl_->active_; if(_cloud == nullptr) return; int w = _cloud->scan_width(); int h = _cloud->scan_height(); pcl::PointCloud<pcl::Normal>::Ptr normals = ne_->getNormals(_cloud); float res1 = subsample_res_; float res2 = subsample_res_2_; // Downsample pcl::PointCloud<pcl::PointXYZINormal>::Ptr smaller_cloud; std::vector<int> sub_idxs; smaller_cloud = downsample(_cloud, res1, sub_idxs); t_.start(); pcl::PointCloud<pcl::Normal>::Ptr donormals = don(*smaller_cloud, *smaller_cloud, res1, res2); (time_ = t_.elapsed()); // Correlate computeCorrelation(reinterpret_cast<float*>(donormals->points.data()), 3, donormals->points.size(), sub_idxs, 4); if(!visualise_on_) return; // Draw pcl::PointCloud<pcl::Normal> big_donormals; map_small_to_big((*donormals).points, big_donormals.points, sub_idxs); const std::vector<int> & cloudtogrid = _cloud->cloudToGridMap(); boost::shared_ptr<std::vector<Eigen::Vector3f> > grid = boost::make_shared<std::vector<Eigen::Vector3f> >(w*h, Eigen::Vector3f(0.0f, 0.0f, 0.0f)); for(uint i = 0; i < normals->size(); i++){ int grid_idx = cloudtogrid[i]; (*grid)[grid_idx] = big_donormals[i].getNormalVector3fMap(); } // Draw drawVector3f(grid, _cloud); }
void FeatureEval::intensity(){ boost::shared_ptr<PointCloud> _cloud = core_->cl_->active_; if(_cloud == nullptr) return; // Subsample pcl::PointCloud<pcl::PointXYZINormal>::Ptr smaller_cloud; std::vector<int> sub_idxs; smaller_cloud = downsample(_cloud, subsample_res_, sub_idxs); time_ = 0; // Copy to contigious structure std::vector<float> tmp(smaller_cloud->size()); for(uint i = 0; i < smaller_cloud->size(); i++){ tmp[i] = smaller_cloud->points[i].intensity; } // Correlate computeCorrelation(reinterpret_cast<float*>(tmp.data()), 1, tmp.size(), sub_idxs); }
char *hidehost_normalhost(char *host, int components) { char *p; static char buf[512], res[512], res2[512], result[HOSTLEN+1]; unsigned int alpha, n; int comps = 0; ircd_snprintf(0, buf, 512, "%s:%s:%s", KEY1, host, KEY2); DoMD5((unsigned char *)&res, (unsigned char *)&buf, strlen(buf)); strcpy(res+16, KEY3); /* first 16 bytes are filled, append our key.. */ n = strlen(res+16) + 16; DoMD5((unsigned char *)&res2, (unsigned char *)&res, n); alpha = downsample((unsigned char *)&res2); for (p = host; *p; p++) { if (*p == '.') { comps++; if ((comps >= components) && IsAlpha(*(p + 1))) break; } } if (*p) { unsigned int len; p++; ircd_snprintf(0, result, HOSTLEN, "%s-%X.", PREFIX, alpha); len = strlen(result) + strlen(p); if (len <= HOSTLEN) strcat(result, p); else strcat(result, p + (len - HOSTLEN)); } else ircd_snprintf(0, result, HOSTLEN, "%s-%X", PREFIX, alpha); return result; }
void AdaptiveManifoldFilterN::buildManifoldsAndPerformFiltering(vector<Mat>& eta, Mat1b& cluster, int treeLevel) { CV_DbgAssert((int)eta.size() == jointCnNum); //splatting Size etaSize = eta[0].size(); CV_DbgAssert(etaSize == srcSize || etaSize == smallSize); if (etaSize == srcSize) { compute_w_k(eta, w_k, sigma_r_over_sqrt_2, treeLevel); etaFull = eta; downsample(eta, eta); } else { upsample(eta, etaFull); compute_w_k(etaFull, w_k, sigma_r_over_sqrt_2, treeLevel); } //blurring Psi_splat_small.resize(srcCnNum); for (int si = 0; si < srcCnNum; si++) { Mat tmp; multiply(srcCn[si], w_k, tmp); downsample(tmp, Psi_splat_small[si]); } downsample(w_k, Psi_splat_0_small); vector<Mat>& Psi_splat_small_blur = Psi_splat_small; Mat& Psi_splat_0_small_blur = Psi_splat_0_small; float rf_ss = (float)(sigma_s_ / getResizeRatio()); float rf_sr = (float)(sigma_r_over_sqrt_2); RFFilterPass(eta, Psi_splat_small, Psi_splat_0_small, Psi_splat_small_blur, Psi_splat_0_small_blur, rf_ss, rf_sr); //slicing { Mat tmp; for (int i = 0; i < srcCnNum; i++) { upsample(Psi_splat_small_blur[i], tmp); multiply(tmp, w_k, tmp); add(sum_w_ki_Psi_blur_[i], tmp, sum_w_ki_Psi_blur_[i]); } upsample(Psi_splat_0_small_blur, tmp); multiply(tmp, w_k, tmp); add(sum_w_ki_Psi_blur_0_, tmp, sum_w_ki_Psi_blur_0_); } //build new manifolds if (treeLevel < curTreeHeight) { Mat1b cluster_minus, cluster_plus; computeClusters(cluster, cluster_minus, cluster_plus); vector<Mat> eta_minus(jointCnNum), eta_plus(jointCnNum); { Mat1f teta = 1.0 - w_k; computeEta(teta, cluster_minus, eta_minus); computeEta(teta, cluster_plus, eta_plus); } //free memory to continue deep recursion eta.clear(); cluster.release(); buildManifoldsAndPerformFiltering(eta_minus, cluster_minus, treeLevel + 1); buildManifoldsAndPerformFiltering(eta_plus, cluster_plus, treeLevel + 1); } }
void ogl::texture::load_img(std::string name, std::map<int, vec4>& scale) { std::vector<GLubyte> pixels; // Load and parse the data file. size_t len; const void *buf = ::data->load(name, &len); if (buf) load_png(buf, len, pixels); ::data->free(name); // Initialize the OpenGL texture object. GLenum f = GL_RGBA; switch (c) { case 1: f = GL_LUMINANCE; case 2: f = GL_LUMINANCE_ALPHA; case 3: f = GL_RGB; } ogl::bind_texture(GL_TEXTURE_2D, GL_TEXTURE0, object); GLubyte *p = &pixels.front(); GLsizei ww = w; GLsizei hh = h; // Enumerate the mipmap levels. for (GLint l = 0; ww > 0 && hh > 0; l++) { std::map<int, vec4>::iterator it; // Set the scale for this mipmap. if ((it = scale.find(l)) == scale.end()) { glPixelTransferf(GL_RED_SCALE, 1.f); glPixelTransferf(GL_GREEN_SCALE, 1.f); glPixelTransferf(GL_BLUE_SCALE, 1.f); glPixelTransferf(GL_ALPHA_SCALE, 1.f); } else { glPixelTransferf(GL_RED_SCALE, GLfloat(it->second[0])); glPixelTransferf(GL_GREEN_SCALE, GLfloat(it->second[1])); glPixelTransferf(GL_BLUE_SCALE, GLfloat(it->second[2])); glPixelTransferf(GL_ALPHA_SCALE, GLfloat(it->second[3])); } // Copy the pixels. glTexImage2D(GL_TEXTURE_2D, l, f, ww, hh, 0, f, GL_UNSIGNED_BYTE, p); // Prepare for the next mipmap level. downsample(ww, hh, c, pixels); ww /= 2; hh /= 2; } // Initialize the default texture parameters. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); if (ogl::has_anisotropic) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, ogl::max_anisotropy); }
int main(int argc, char** argv) { SF_INFO info = {}, dst_info = {}; SNDFILE *in = NULL, *out = NULL; int* buffer = NULL; size_t num_low_samples = 0; sf_count_t ndata = 0; if (argc != 3) { printf("usage: %s in.wav out.wav\n", argv[0]); return EXIT_FAILURE; } in = sf_open(argv[1], SFM_READ, &info); if (sf_error(0)) { fprintf(stderr, "%s\n", sf_strerror(0)); goto cleanup; } printf("rate %d, frames %d channels %d\n", info.samplerate, (int)info.frames, info.channels); buffer = (int*)malloc(info.channels * info.frames * sizeof(int)); if (!buffer) { perror(""); goto cleanup; } ndata = sf_readf_int(in, buffer, info.frames * info.channels); if (sf_error(0)) { fprintf(stderr, "failed to read input %s\n", sf_strerror(0)); } printf("read %d items\n", (int)ndata); dst_info = info; dst_info.samplerate = 8000; num_low_samples = downsample(buffer, ndata, info.samplerate, dst_info.samplerate); out = sf_open(argv[2], SFM_WRITE, &dst_info); if (sf_error(0)) { fprintf(stderr, "%s\n", sf_strerror(0)); goto cleanup; } ndata = sf_writef_int(out, buffer, num_low_samples); if (sf_error(0)) { fprintf(stderr, "failed to write output %s\n", sf_strerror(0)); goto cleanup; } printf("written %d items\n", (int)ndata); cleanup: if (buffer) { free(buffer); } if (in) { sf_close(in); } if (out) { sf_close(out); } return EXIT_SUCCESS; }
template <typename PointInT, typename IntensityT> void pcl::tracking::PyramidalKLTTracker<PointInT, IntensityT>::computePyramids (const PointCloudInConstPtr& input, std::vector<FloatImageConstPtr>& pyramid, pcl::InterpolationType border_type) const { int step = 3; pyramid.resize (step * nb_levels_); FloatImageConstPtr previous; FloatImagePtr tmp (new FloatImage (input->width, input->height)); #ifdef _OPENMP #pragma omp parallel for num_threads (threads_) #endif for (int i = 0; i < static_cast<int> (input->size ()); ++i) tmp->points[i] = intensity_ (input->points[i]); previous = tmp; FloatImagePtr img (new FloatImage (previous->width + 2*track_width_, previous->height + 2*track_height_)); pcl::copyPointCloud (*tmp, *img, track_height_, track_height_, track_width_, track_width_, border_type, 0.f); pyramid[0] = img; // compute first level gradients FloatImagePtr g_x (new FloatImage (input->width, input->height)); FloatImagePtr g_y (new FloatImage (input->width, input->height)); derivatives (*img, *g_x, *g_y); // copy to bigger clouds FloatImagePtr grad_x (new FloatImage (previous->width + 2*track_width_, previous->height + 2*track_height_)); pcl::copyPointCloud (*g_x, *grad_x, track_height_, track_height_, track_width_, track_width_, pcl::BORDER_CONSTANT, 0.f); pyramid[1] = grad_x; FloatImagePtr grad_y (new FloatImage (previous->width + 2*track_width_, previous->height + 2*track_height_)); pcl::copyPointCloud (*g_y, *grad_y, track_height_, track_height_, track_width_, track_width_, pcl::BORDER_CONSTANT, 0.f); pyramid[2] = grad_y; for (int level = 1; level < nb_levels_; ++level) { // compute current level and current level gradients FloatImageConstPtr current; FloatImageConstPtr g_x; FloatImageConstPtr g_y; downsample (previous, current, g_x, g_y); // copy to bigger clouds FloatImagePtr image (new FloatImage (current->width + 2*track_width_, current->height + 2*track_height_)); pcl::copyPointCloud (*current, *image, track_height_, track_height_, track_width_, track_width_, border_type, 0.f); pyramid[level*step] = image; FloatImagePtr gradx (new FloatImage (g_x->width + 2*track_width_, g_x->height + 2*track_height_)); pcl::copyPointCloud (*g_x, *gradx, track_height_, track_height_, track_width_, track_width_, pcl::BORDER_CONSTANT, 0.f); pyramid[level*step + 1] = gradx; FloatImagePtr grady (new FloatImage (g_y->width + 2*track_width_, g_y->height + 2*track_height_)); pcl::copyPointCloud (*g_y, *grady, track_height_, track_height_, track_width_, track_width_, pcl::BORDER_CONSTANT, 0.f); pyramid[level*step + 2] = grady; // set the new level previous = current; } }
int main (int argc, char ** argv) { if (argc < 2) { pcl::console::print_info ("Syntax is: %s input.pcd <options>\n", argv[0]); pcl::console::print_info (" where options are:\n"); pcl::console::print_info (" -t min_depth,max_depth ............... Threshold depth\n"); pcl::console::print_info (" -d leaf_size ......................... Downsample\n"); pcl::console::print_info (" -r radius,min_neighbors ............... Radius outlier removal\n"); pcl::console::print_info (" -s output.pcd ......................... Save output\n"); return (1); } // Load the input file PointCloudPtr cloud (new PointCloud); pcl::io::loadPCDFile (argv[1], *cloud); pcl::console::print_info ("Loaded %s (%zu points)\n", argv[1], cloud->size ()); // Threshold depth double min_depth, max_depth; bool threshold_depth = pcl::console::parse_2x_arguments (argc, argv, "-t", min_depth, max_depth) > 0; if (threshold_depth) { size_t n = cloud->size (); cloud = thresholdDepth (cloud, min_depth, max_depth); pcl::console::print_info ("Eliminated %zu points outside depth limits\n", n - cloud->size ()); } // Downsample and threshold depth double leaf_size; bool downsample_cloud = pcl::console::parse_argument (argc, argv, "-d", leaf_size) > 0; if (downsample_cloud) { size_t n = cloud->size (); cloud = downsample (cloud, leaf_size); pcl::console::print_info ("Downsampled from %zu to %zu points\n", n, cloud->size ()); } // Remove outliers double radius, min_neighbors; bool remove_outliers = pcl::console::parse_2x_arguments (argc, argv, "-r", radius, min_neighbors) > 0; if (remove_outliers) { size_t n = cloud->size (); cloud = removeOutliers (cloud, radius, (int)min_neighbors); pcl::console::print_info ("Removed %zu outliers\n", n - cloud->size ()); } // Save output std::string output_filename; bool save_cloud = pcl::console::parse_argument (argc, argv, "-s", output_filename) > 0; if (save_cloud) { pcl::io::savePCDFile (output_filename, *cloud); pcl::console::print_info ("Saved result as %s\n", output_filename.c_str ()); } // Or visualize the result else { pcl::console::print_info ("Starting visualizer... Close window to exit.\n"); pcl::visualization::PCLVisualizer vis; vis.addPointCloud (cloud); vis.resetCamera (); vis.spin (); } return (0); }