MethodTable* MethodTable::duplicate(STATE) { size_t size, i; MethodTable* dup = 0; utilities::thread::SpinLock::LockGuard lg(lock_); size = bins()->to_native(); dup = MethodTable::create(state, size); // Allow for subclassing. dup->klass(state, class_object(state)); size_t num = bins()->to_native(); for(i = 0; i < num; i++) { MethodTableBucket* entry = try_as<MethodTableBucket>(values()->at(state, i)); while(entry) { dup->store(state, entry->name(), entry->method_id(), entry->method(), entry->scope(), entry->serial(), entry->visibility()); entry = try_as<MethodTableBucket>(entry->next()); } } return dup; }
// input is sizeWin void STFT::forward(float * input){ //printf("STFT::forward(float *)\n"); arr::mul(input, mFwdWin, sizeWin()); // apply forward window if(mRotateForward) mem::rotateHalf(input, sizeWin()); // do zero-phase windowing rotation? DFT::forward(input); // do forward transform // compute frequency estimates? if(Bin::MagFreq == mSpctFormat){ // This will effectively subtract the expected phase difference from the computed. // This extra step seems to give more precise frequency estimates. slice(mPhases, numBins()) += float(M_2PI * sizeHop()) / sizeDFT(); // compute relative frequencies //arr::phaseToFreq(phs, mPhases, numBins(), unitsHop()); float factor = 1.f / (M_2PI * unitsHop()); for(uint32_t i=1; i<numBins()-1; ++i){ float dp = scl::wrapPhase(bins()[i][1] - mPhases[i]); // wrap phase into [-pi, pi) mPhases[i] = bins()[i][1]; // prev phase = curr phase bins()[i][1] = dp*factor; } // compute absolute frequencies by adding respective bin center frequency slice(mBuf, numBins(), 2) += gen::RAdd<float>(binFreq()); } }
void MethodTable::redistribute(STATE, size_t size) { size_t num = bins()->to_native(); Tuple* new_values = Tuple::create(state, size); for(size_t i = 0; i < num; i++) { MethodTableBucket* entry = try_as<MethodTableBucket>(values()->at(state, i)); while(entry) { MethodTableBucket* link = try_as<MethodTableBucket>(entry->next()); entry->next(state, nil<MethodTableBucket>()); size_t bin = find_bin(key_hash(entry->name()), size); MethodTableBucket* slot = try_as<MethodTableBucket>(new_values->at(state, bin)); if(slot) { slot->append(state, entry); } else { new_values->put(state, bin, entry); } entry = link; } } values(state, new_values); bins(state, Fixnum::from(size)); }
void CumulativeVolume::computeMode1(Geometry *geometry, int timestep) { QVector<float> &x = geometry->deltaXVector(); QVector<float> &y = geometry->deltaYVector(); QVector<float> &z = geometry->deltaZVector(); double totalVolume = 0; for(int i=0; i<x.size(); i++) { totalVolume += x[i]*x[i]*x[i]; totalVolume += y[i]*y[i]*y[i]; totalVolume += z[i]*z[i]*z[i]; } double oneOverTotalVolume = 1.0 / totalVolume; int numberOfPores = 3*x.size(); gsl_vector * poreVolumes = gsl_vector_alloc (numberOfPores); gsl_vector * poreVolumesNormalized = gsl_vector_alloc (numberOfPores); gsl_vector * poreLengths = gsl_vector_alloc(numberOfPores); int poreIndex = 0; for(int i=0; i<x.size(); i++) { float pores[3]; pores[0] = x[i]; pores[1] = y[i]; pores[2] = z[i]; for(int a=0; a<3; a++) { float poreLength = pores[a]; const float dV = poreLength*poreLength*poreLength; gsl_vector_set(poreVolumes, poreIndex, dV); gsl_vector_set(poreLengths, poreIndex, poreLength); gsl_vector_set(poreVolumesNormalized, poreIndex, dV * oneOverTotalVolume); poreIndex++; } } gsl_sort_vector2(poreVolumes, poreLengths); // Sort both vectors based on the first gsl_sort_vector(poreVolumesNormalized); // Set the x values and be ready to make plot data m_points.clear(); m_points.reserve(bins()); float dx = (max() - min()) / (bins() - 1); for(int i=0; i<bins(); i++) { float x = min() + i*dx; m_points.push_back(QPointF(x,0)); } for(int i=0; i<numberOfPores; i++) { float dVN = gsl_vector_get(poreVolumesNormalized, i); // dVN deltaVolumeNormalized float poreSize = gsl_vector_get(poreLengths, i); int bin = poreSize / dx; if(bin>=bins()) continue; // Some pore sizes might be larger than largest? Don't seg fault m_points[bin].setY(m_points[bin].y() + dVN); } for(int i=1; i<bins(); i++) { qreal newValue = m_points[i].y() + m_points[i-1].y(); m_points[i].setY(newValue); } }
void CumulativeVolume::computeMode0(Geometry *geometry, int timestep) { QVector<float> &x = geometry->deltaXVector(); QVector<float> &y = geometry->deltaYVector(); QVector<float> &z = geometry->deltaZVector(); double totalVolume = geometry->totalVolume(); double oneOverTotalVolume = 1.0 / totalVolume; int numberOfPores = x.size()*y.size()*z.size(); gsl_vector * poreVolumes = gsl_vector_alloc (numberOfPores); gsl_vector * poreVolumesNormalized = gsl_vector_alloc (numberOfPores); gsl_vector * poreLengths = gsl_vector_alloc(numberOfPores); int poreIndex = 0; for(int i=0; i<x.size(); i++) { const float dx = x[i]; for(int j=0; j<y.size(); j++) { const float dy = y[j]; for(int k=0; k<z.size(); k++) { const float dz = z[k]; const float dV = dx*dy*dz; float poreLength = std::min(std::min(dx, dy), dz); #ifdef POREISCBRT poreLength = cbrt(dV); #endif gsl_vector_set(poreVolumes, poreIndex, dV); gsl_vector_set(poreLengths, poreIndex, poreLength); gsl_vector_set(poreVolumesNormalized, poreIndex, dV * oneOverTotalVolume); poreIndex++; } } } gsl_sort_vector2(poreVolumes, poreLengths); // Sort both vectors based on the first gsl_sort_vector(poreVolumesNormalized); // Set the x values and be ready to make plot data m_points.clear(); m_points.reserve(bins()); float dx = (max() - min()) / (bins() - 1); for(int i=0; i<bins(); i++) { float x = min() + i*dx; m_points.push_back(QPointF(x,0)); } for(int i=0; i<numberOfPores; i++) { float dVN = gsl_vector_get(poreVolumesNormalized, i); // dVN deltaVolumeNormalized float poreSize = gsl_vector_get(poreLengths, i); int bin = poreSize / dx; if(bin>=bins()) continue; // Some pore sizes might be larger than largest? Don't seg fault m_points[bin].setY(m_points[bin].y() + dVN); } for(int i=1; i<bins(); i++) { qreal newValue = m_points[i].y() + m_points[i-1].y(); m_points[i].setY(newValue); } }
double cisstAlgorithmICP_RobustICP::ComputeEpsilon(vctDynamicVector<double> &sampleDist) { unsigned int numSamps = sampleDist.size(); double minDist = sampleDist.MinElement(); double maxDist = sampleDist.MaxElement(); unsigned int numBins = 16; double binWidth = (maxDist - minDist) / (double)numBins; // build histogram of match distances vctDynamicVector<unsigned int> bins(numBins, (unsigned int)0); unsigned int sampleBin; for (unsigned int i = 0; i < numSamps; i++) { if (sampleDist[i] == maxDist) { // handle max case sampleBin = numBins - 1; } else { sampleBin = (unsigned int)floor((sampleDist[i] - minDist) / binWidth); } bins(sampleBin)++; } // find histogram peak unsigned int peakBin = numBins; // initialize to invalid bin unsigned int peakBinSize = 0; for (unsigned int i = 0; i < numBins; i++) { if (bins(i) >= peakBinSize) { peakBin = i; peakBinSize = bins(i); } } // find valley following peak // (valley bin must be <= 60% of peak bin size) double valleyThresh = 0.6 * (double)peakBinSize; unsigned int valleyBin = peakBin + 1; for (unsigned int i = peakBin + 1; i < numBins; i++) { if ((double)bins(i) <= valleyThresh) { break; } valleyBin = i + 1; } // set epsilon to the smallest distance in the valley bin double epsilon = minDist + valleyBin * binWidth; //printHistogram(bins, peakBin, valleyBin, minDist, maxDist, binWidth); return epsilon; }
int bins(int t, int left, int right) { int mid = (left + right) / 2; if (t == min_left[mid].val)return mid; else if (left > right)return -1; else if (t < min_left[mid].val)return bins(t, left, mid - 1); else return bins(t, mid + 1, right); }
void STFT::inverse(float * dst){ //printf("STFT::inverse(float *)\n"); if(Bin::MagFreq == mSpctFormat){ //mem::copy(bins1(), mPhases, numBins()); // not correct, need to unwrap frequencies for(uint32_t i=1; i<numBins()-1; ++i) bins()[i] = mPhases[i]; } DFT::inverse(0); // result goes into mBuf // undo zero-phase windowing rotation? if(mRotateForward) mem::rotateHalf(mBuf, sizeWin()); // apply secondary window to smooth ends? if(mWindowInverse){ arr::mulBartlett(mBuf, sizeWin()); } if(overlapping()){ //inverse windows overlap? // scale inverse so overlap-add is normalized //arr::mul(mBuf, gen::val(mInvWinMul), sizeWin()); slice(mBuf, sizeWin()) *= mInvWinMul; // shift old output left while adding new output arr::add(mBufInv, mBuf, mBufInv + sizeHop(), sizeWin() - sizeHop()); } // copy remaining non-overlapped portion of new output uint32_t sizeOverlap = sizeWin() - sizeHop(); mem::deepCopy(mBufInv + sizeOverlap, mBuf + sizeOverlap, sizeHop()); // copy output if external buffer provided if(dst) mem::deepCopy(dst, mBufInv, sizeWin()); }
void read_tile_samples(KVS &store, int uid, std::string full_channel_name, TileIndex requested_index, TileIndex client_tile_index, std::vector<DataSample<T> > &samples, bool &binned) { Channel ch(store, uid, full_channel_name); Tile tile; TileIndex actual_index; bool success = ch.read_tile_or_closest_ancestor(requested_index, actual_index, tile); if (!success) { log_f("gettile: no tile found for %s", requested_index.to_string().c_str()); } else { log_f("gettile: requested %s: found %s", requested_index.to_string().c_str(), actual_index.to_string().c_str()); for (unsigned i = 0; i < tile.get_samples<T>().size(); i++) { DataSample<T> &sample=tile.get_samples<T>()[i]; if (client_tile_index.contains_time(sample.time)) samples.push_back(sample); } } if (samples.size() <= 512) { binned = false; } else { // Bin binned = true; std::vector<DataAccumulator<T> > bins(512); for (unsigned i = 0; i < samples.size(); i++) { DataSample<T> &sample=samples[i]; bins[(int)floor(client_tile_index.position(sample.time)*512)] += sample; } samples.clear(); for (unsigned i = 0; i < bins.size(); i++) { if (bins[i].weight > 0) samples.push_back(bins[i].get_sample()); } } }
std::vector<int> initHistogram(){ std::vector<int> bins(59, 0); const int BIT[8] = { 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80 }; int val[8]; int index = 0; int cont = 0; for (int i = 0; i < 256; i++){ for (int k = 0; k < 8; k++){ if (i & BIT[k]) val[k] = 1; else val[k] = 0; } if (val[7] != val[0]){ cont++; } if (val[0] != val[1]){ cont++; } if (val[1] != val[2]){ cont++; } if (val[2] != val[3]){ cont++; } if (val[3] != val[4]){ cont++; } if (val[4] != val[5]){ cont++; } if (val[5] != val[6]){ cont++; } if (val[6] != val[7]){ cont++; } if (cont < 3){ bins[index] = i; //cout << "bins[" << index << "] = " << i << endl; index++; } cont = 0; } return bins; }
void LookupTable::redistribute(STATE, size_t size) { size_t num = bins_->to_native(); Tuple* new_values = Tuple::create(state, size); for(size_t i = 0; i < num; i++) { Tuple* entry = try_as<Tuple>(values_->at(state, i)); while(entry) { Tuple* link = try_as<Tuple>(entry->at(state, 2)); entry->put(state, 2, Qnil); size_t bin = find_bin(key_hash(entry->at(state, 0)), size); Tuple* slot = try_as<Tuple>(new_values->at(state, bin)); if(!slot) { new_values->put(state, bin, entry); } else { entry_append(state, slot, entry); } entry = link; } } values(state, new_values); bins(state, Fixnum::from(size)); }
void Histogram::set_use_logscale( bool on ) { if( on == logscale_ ) return; logscale_ = on; recompute_ = true; bins( bins_count_ ); }
void Histogram::reset() { for(int i = 0; i < bins(); ++i) { counts[i] = 0; } }
void LookupTable::redistribute(STATE, size_t size) { size_t num = bins_->to_native(); Tuple* new_values = Tuple::create(state, size); for(size_t i = 0; i < num; i++) { LookupTableBucket* entry = try_as<LookupTableBucket>(values_->at(state, i)); while(entry) { LookupTableBucket* link = try_as<LookupTableBucket>(entry->next()); entry->next(state, reinterpret_cast<LookupTableBucket *>(Qnil)); size_t bin = find_bin(key_hash(entry->key()), size); LookupTableBucket* slot = try_as<LookupTableBucket>(new_values->at(state, bin)); if(slot) { slot->append(state, entry); } else { new_values->put(state, bin, entry); } entry = link; } } values(state, new_values); bins(state, Fixnum::from(size)); }
double ds::BinnedData::sum_in(int bin) const { if(bin >= 0 && bin < bins()) return _data[bin]; else return -1.0; }
void PlaneSupportedCuboidEstimator::publishHistogram( ParticleCloud::Ptr particles, int index, ros::Publisher& pub, const std_msgs::Header& header) { const double step = 0.001; // Lookup min/max float max_value = -FLT_MAX; float min_value = FLT_MAX; for (size_t i = 0; i < particles->points.size(); i++) { max_value = std::max(max_value, particles->points[i][index]); min_value = std::min(min_value, particles->points[i][index]); } int num = (max_value - min_value) / step + 1; std::vector<unsigned int> bins(num, 0); for (size_t i = 0; i < particles->points.size(); i++) { float value = particles->points[i][index]; const int bin_index = (value - min_value) / step; const int min_confirmed_bin_index = std::min(bin_index, num - 1); bins[min_confirmed_bin_index] = bins[min_confirmed_bin_index] + 1; } jsk_recognition_msgs::HistogramWithRange histogram; histogram.header = header; for (size_t i = 0; i < bins.size(); i++) { jsk_recognition_msgs::HistogramWithRangeBin bin; bin.min_value = i * step + min_value; bin.max_value = (i + 1) * step + min_value; bin.count = bins[i]; histogram.bins.push_back(bin); } pub.publish(histogram); }
void ds::BinnedData::write(std::string file, bool average) const { tdx::File writeFile(file, tdx::File::out); //Check for the existence of the file if(writeFile.exists()) { std::cout << "WARNING: File.. " << file << " already exists. Overwriting!\n"; } std::string output = ""; output += "\n"; if(average) output += "#Averaged "; else output += "#Summed "; output += "data in range (" + std::to_string(min_range()) + ", " + std::to_string(max_range()) + ") spaced by " + std::to_string(spacing())+ ":\n\n"; for(int bin=0; bin<bins(); bin++) { double data_point = min_range() + (bin)*spacing(); double data; if(average) data = average_in(bin); else data = sum_in(bin); output += std::to_string(data_point) + "\t" + std::to_string(data) + "\n"; } writeFile << output; writeFile.close(); }
int KHistogramBand::histogramGDAL(GDALRasterBand *band) { int numBins; if (band->GetRasterDataType() == GDT_Byte) numBins = 256; else if (band->GetRasterDataType() == GDT_UInt16 || band->GetRasterDataType() == GDT_Int16) numBins = 65536; else return 1; // set number of bins if (numBins > size()) return 2; bins() = numBins; // positioning centers bin windows at truncation points double minBinValue = -0.5; double maxBinValue = numBins - 0.5; //"numBins - 1 + 0.5" if (band->GetHistogram(minBinValue, maxBinValue, numBins, (GUIntBig *)getBinPointer(), // this is the actual // int* data array in the // table class FALSE, FALSE, GDALDummyProgress, NULL) != CE_None) return 2; return 0; }
Object* MethodTable::alias(STATE, Symbol* name, Symbol* vis, Symbol* orig_name, Object* orig_method, Module* orig_mod) { check_frozen(state); utilities::thread::SpinLock::LockGuard lg(lock_); Executable* orig_exec; if(Alias* alias = try_as<Alias>(orig_method)) { orig_exec = alias->original_exec(); orig_mod = alias->original_module(); orig_name = alias->original_name(); } else if(orig_method->nil_p()) { orig_exec = nil<Executable>(); } else { orig_exec = as<Executable>(orig_method); } Alias* method = Alias::create(state, orig_name, orig_mod, orig_exec); native_int num_entries = entries()->to_native(); native_int num_bins = bins()->to_native(); if(max_density_p(num_entries, num_bins)) { redistribute(state, num_bins <<= 1); } native_int bin = find_bin(key_hash(name), num_bins); MethodTableBucket* entry = try_as<MethodTableBucket>(values()->at(state, bin)); MethodTableBucket* last = NULL; while(entry) { if(entry->name() == name) { entry->method_id(state, nil<String>()); entry->method(state, method); entry->scope(state, cNil); entry->serial(state, Fixnum::from(0)); entry->visibility(state, vis); return name; } last = entry; entry = try_as<MethodTableBucket>(entry->next()); } if(last) { last->next(state, MethodTableBucket::create( state, name, nil<String>(), method, cNil, Fixnum::from(0), vis)); } else { values()->put(state, bin, MethodTableBucket::create( state, name, nil<String>(), method, cNil, Fixnum::from(0), vis)); } entries(state, Fixnum::from(num_entries + 1)); return name; }
int ds::BinnedData::get_bin_number(double data_point) const { int bin = floor((data_point-min_range())/spacing()); if( bin>=0 && bin<bins() ) return bin; else return -1; }
double ds::BinnedData::average_in(int bin) const { if(bin >= 0 && bin < bins()) if(_counts[bin] == 0) return 0.0; else return _data[bin]/_counts[bin]; else return -1.0; }
Object* MethodTable::store(STATE, Symbol* name, Object* method_id, Object* method, Object* scope, Fixnum* serial, Symbol* visibility) { check_frozen(state); utilities::thread::SpinLock::LockGuard lg(lock_); if(!method->nil_p()) { if(Alias* stored_alias = try_as<Alias>(method)) { lock_.unlock(); Object* res = alias(state, name, visibility, stored_alias->original_name(), stored_alias->original_exec(), stored_alias->original_module()); lock_.lock(); return res; } } native_int num_entries = entries()->to_native(); native_int num_bins = bins()->to_native(); if(max_density_p(num_entries, num_bins)) { redistribute(state, num_bins <<= 1); } native_int bin = find_bin(key_hash(name), num_bins); MethodTableBucket* entry = try_as<MethodTableBucket>(values()->at(state, bin)); MethodTableBucket* last = NULL; while(entry) { if(entry->name() == name) { entry->method_id(state, method_id); entry->method(state, method); entry->scope(state, scope); entry->serial(state, serial); entry->visibility(state, visibility); return name; } last = entry; entry = try_as<MethodTableBucket>(entry->next()); } if(last) { last->next(state, MethodTableBucket::create( state, name, method_id, method, scope, serial, visibility)); } else { values()->put(state, bin, MethodTableBucket::create( state, name, method_id, method, scope, serial, visibility)); } entries(state, Fixnum::from(num_entries + 1)); return name; }
void search(int t) { /* int i; for (i = 0; i < min_left_length; i++) { if (min_left[i].val == t) min_left[i].state = 1; }*/ int index = bins(t, 0, min_left_length - 1); if (index != -1)min_left[index].state = 1; }
void Histogram::high_clip( float val ) { high_clip_ = val; recompute_ = true; if( val < low_clip_ ) end_ = start_; else end_ = std::upper_bound( data_.begin(), data_.end(), high_clip_ ); compute_stats(); bins( bins_count_ ); }
void Histogram::low_clip( float val ) { low_clip_ = val; recompute_ = true; if( val > high_clip_ ) start_ = end_; else start_ = std::lower_bound( data_.begin(), data_.end(), low_clip_ ); compute_stats(); bins( bins_count_ ); }
double ds::BinnedData::max_summed_value() const { double max = 0.0; for(int bin=0; bin<bins(); bin++) { if(sum_in(bin) > max) max = sum_in(bin); } return max; }
double ds::BinnedData::max_averaged_value() const { double max = 0.0; for(int bin=0; bin<bins(); bin++) { if(average_in(bin) > max) max = average_in(bin); } return max; }
pair<int,double> Histogram::Minave() const { double acc = 0; double ave ; double a = count(0); pair <int,double> minave ; for (int i = 0; i< bins(); i++) { acc = acc + count(i); if (count(i) < a) { a = count(i); } } ave = acc/bins(); minave = make_pair(a,ave); return minave; }
void Histogram::clipping_values( std::pair<float,float> vals ) { recompute_ = true; if( vals.second < vals.first ) vals.second = vals.first; low_clip_ = vals.first; high_clip_ = vals.second; start_ = std::lower_bound( data_.begin(), data_.end(), low_clip_ ); end_ = std::upper_bound( data_.begin(), data_.end(), high_clip_ ); compute_stats(); bins( bins_count_ ); }
int Histogram::Getmax() const { double a = count(0); for (int i = 0; i< bins(); i++) { if (count(i) > a) { a = count(i); } } return a; }