void object::test<3>() { Keyed one("one"), two("two"), three("three"); // We don't want to rely on the underlying container delivering keys // in any particular order. That allows us the flexibility to // reimplement LLInstanceTracker using, say, a hash map instead of a // std::map. We DO insist that every key appear exactly once. typedef std::vector<std::string> StringVector; StringVector keys(Keyed::beginKeys(), Keyed::endKeys()); std::sort(keys.begin(), keys.end()); StringVector::const_iterator ki(keys.begin()); ensure_equals(*ki++, "one"); ensure_equals(*ki++, "three"); ensure_equals(*ki++, "two"); // Use ensure() here because ensure_equals would want to display // mismatched values, and frankly that wouldn't help much. ensure("didn't reach end", ki == keys.end()); // Use a somewhat different approach to order independence with // beginInstances(): explicitly capture the instances we know in a // set, and delete them as we iterate through. typedef std::set<Keyed*> InstanceSet; InstanceSet instances; instances.insert(&one); instances.insert(&two); instances.insert(&three); for (Keyed::instance_iter ii(Keyed::beginInstances()), iend(Keyed::endInstances()); ii != iend; ++ii) { Keyed& ref = *ii; ensure_equals("spurious instance", instances.erase(&ref), 1); } ensure_equals("unreported instance", instances.size(), 0); }
bool Model:: computeJacobian(taoDNode const * node, double gx, double gy, double gz, Matrix & jacobian) const { if ( ! node) { return false; } ancestry_table_t::const_iterator iae(ancestry_table_.find(const_cast<taoDNode*>(node))); if (iae == ancestry_table_.end()) { return false; } ancestry_list_t const & alist(iae->second); #ifdef DEBUG fprintf(stderr, "computeJacobian()\ng: [% 4.2f % 4.2f % 4.2f]\n", gx, gy, gz); #endif // DEBUG // \todo Implement support for more than one joint per node, and // more than one DOF per joint. jacobian = Matrix::Zero(6, ndof_); ancestry_list_t::const_iterator ia(alist.begin()); ancestry_list_t::const_iterator iend(alist.end()); for (/**/; ia != iend; ++ia) { deVector6 Jg_col; ia->joint->getJgColumns(&Jg_col); int const icol(ia->id); #ifdef DEBUG fprintf(stderr, "iJg[%d]: [ % 4.2f % 4.2f % 4.2f % 4.2f % 4.2f % 4.2f]\n", icol, Jg_col.elementAt(0), Jg_col.elementAt(1), Jg_col.elementAt(2), Jg_col.elementAt(3), Jg_col.elementAt(4), Jg_col.elementAt(5)); #endif // DEBUG for (size_t irow(0); irow < 6; ++irow) { jacobian.coeffRef(irow, icol) = Jg_col.elementAt(irow); } // Add the effect of the joint rotation on the translational // velocity at the global point (column-wise cross product with // [gx;gy;gz]). Note that Jg_col.elementAt(3) is the // contribution to omega_x etc, because the upper 3 elements of // Jg_col are v_x etc. (And don't ask me why we have to // subtract the cross product, it probably got inverted // somewhere) jacobian.coeffRef(0, icol) -= -gz * Jg_col.elementAt(4) + gy * Jg_col.elementAt(5); jacobian.coeffRef(1, icol) -= gz * Jg_col.elementAt(3) - gx * Jg_col.elementAt(5); jacobian.coeffRef(2, icol) -= -gy * Jg_col.elementAt(3) + gx * Jg_col.elementAt(4); #ifdef DEBUG fprintf(stderr, "0Jg[%d]: [ % 4.2f % 4.2f % 4.2f % 4.2f % 4.2f % 4.2f]\n", icol, jacobian.coeff(0, icol), jacobian.coeff(1, icol), jacobian.coeff(2, icol), jacobian.coeff(3, icol), jacobian.coeff(4, icol), jacobian.coeff(5, icol)); #endif // DEBUG } return true; }
bool Model:: getGravity(Vector & gravity) const { if (0 == g_torque_.size()) { return false; } gravity = g_torque_; // knock away gravity torque from links that are already otherwise compensated dof_set_t::const_iterator iend(gravity_disabled_.end()); for (dof_set_t::const_iterator ii(gravity_disabled_.begin()); ii != iend; ++ii) { gravity[*ii] = 0; } return true; }
void object::test<4>() { Unkeyed one, two, three; typedef std::set<Unkeyed*> KeySet; KeySet instances; instances.insert(&one); instances.insert(&two); instances.insert(&three); for (Unkeyed::instance_iter ii(Unkeyed::beginInstances()), iend(Unkeyed::endInstances()); ii != iend; ++ii) { Unkeyed& ref = *ii; ensure_equals("spurious instance", instances.erase(&ref), 1); } ensure_equals("unreported instance", instances.size(), 0); }
void Elastic::FAC::pack_v_v_rhs(double* buffer, const SAMRAI::hier::Patch& patch, const SAMRAI::hier::Box& region, const std::string& variable_name, const int &depth) const { boost::shared_ptr<SAMRAI::pdat::SideData<double> > v_ptr; if (variable_name == "Displacement") { v_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch.getPatchData(v_id)); } else if ("Fault Correction + RHS" == variable_name) { v_ptr = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch.getPatchData(v_rhs_id)); } else { TBOX_ERROR("Unregistered variable name '" << variable_name << "' in\n" << "Elastic::FAC::packDerivedDataIntoDoubleBuffer"); } SAMRAI::pdat::SideData<double>& v = *v_ptr; if(d_dim.getValue()==2) { const SAMRAI::hier::Index ip(1,0), jp(0,1); SAMRAI::pdat::CellIterator iend(SAMRAI::pdat::CellGeometry::end(region)); for (SAMRAI::pdat::CellIterator icell(SAMRAI::pdat::CellGeometry::begin(region)); icell!=iend; ++icell) { const SAMRAI::pdat::CellIndex ¢er(*icell); const SAMRAI::pdat::SideIndex x(center,0,SAMRAI::pdat::SideIndex::Lower), y(center,1,SAMRAI::pdat::SideIndex::Lower); double vx=v(x); double vy=v(y); if(!offset_vector_on_output) { vx=(v(x+ip) + vx)/2; vy=(v(y+jp) + vy)/2; } if (0==depth) { *buffer = vx; } else { *buffer = vy; } buffer = buffer + 1; } } else { const SAMRAI::hier::Index ip(1,0,0), jp(0,1,0), kp(0,0,1); SAMRAI::pdat::CellIterator iend(SAMRAI::pdat::CellGeometry::end(region)); for (SAMRAI::pdat::CellIterator icell(SAMRAI::pdat::CellGeometry::begin(region)); icell!=iend; ++icell) { const SAMRAI::pdat::CellIndex ¢er(*icell); const SAMRAI::pdat::SideIndex x(center,0,SAMRAI::pdat::SideIndex::Lower), y(center,1,SAMRAI::pdat::SideIndex::Lower), z(center,2,SAMRAI::pdat::SideIndex::Lower); double vx=v(x); double vy=v(y); double vz=v(z); if(!offset_vector_on_output) { vx=(v(x+ip) + vx)/2; vy=(v(y+jp) + vy)/2; vz=(v(z+kp) + vz)/2; } if (0==depth) { *buffer = vx; } else if (1==depth) { *buffer = vy; } else { *buffer = vz; } ++buffer; } } }
void DenseDescriptorDictionaryTrainer::parseImage() { //online dictionary builder OnlineDictionary online_dictionary(m_dictionary_capacity); std::map<std::string,std::string> samples_and_annotation; std::map<std::string,Array<int,4>> samples_regions; std::map<std::string,int> samples_labels; int labels_num; //load samples from file loadObjectClassifyTrainingSamples(samples_and_annotation,samples_regions,samples_labels,labels_num); ImageReadNode<ImageSignal<float>> img_read; ImageReadNode<ImageSignal<unsigned char>> lable_read; std::map<std::string,std::string>::iterator iter,iend(samples_and_annotation.end()); for (iter = samples_and_annotation.begin(); iter != iend; ++iter) { std::string sample_file_name = iter->first; if (!sample_file_name.empty()) { EAGLEEYE_INFO("process %s \n",sample_file_name.c_str()); img_read.setFilePath((m_trainer_folder + sample_file_name).c_str()); img_read.start(); Matrix<float> img_sample = img_read.getImage(); Array<int,4> object_region = samples_regions[sample_file_name]; Matrix<float> local_img_sample = img_sample(Range(object_region[2],object_region[3]),Range(object_region[0],object_region[1])); local_img_sample.clone(); // putToMatlab(local_img_sample,"local_img"); //resize img if (m_is_resize_flag) { local_img_sample = resize(local_img_sample,m_scales[0]); } // putToMatlab(local_img_sample,"resize_local_img"); //get rid of noise if (m_is_gaussian_flag) { local_img_sample = gaussFilter(local_img_sample,m_kernel_size[0],m_kernel_size[1]); } // putToMatlab(local_img_sample,"gaussian_local_img"); //get dense points DenseFeatureDetector dense_feature_det; if (m_is_needed_main_dir) { dense_feature_det.enableCalcMainDir(); } else { dense_feature_det.disableCalcMainDir(); } dense_feature_det.setDetectorParams(m_init_scale,m_scale_levels,m_scale_mul,m_init_xy_step,m_init_img_bound, m_search_radius,m_vary_xy_step_with_scale,m_vary_img_bound_with_scale); std::vector<KeyPoint> keypoints; dense_feature_det.detect(local_img_sample,keypoints); //compute keypoint descriptors Matrix<float> keypoints_descripors; m_descriptor->compute(local_img_sample,keypoints,keypoints_descripors); online_dictionary.trainOnline(keypoints_descripors,m_dictionary,m_dictionary_counts); } } Dictionary::saveDictionary(m_dictionary,(m_trainer_folder + m_trainer_name + m_ext_name).c_str()); }
void Elastic::FAC::pack_strain(double* buffer, const SAMRAI::hier::Patch& patch, const SAMRAI::hier::Box& region, const int &depth) const { boost::shared_ptr<SAMRAI::pdat::SideData<double> > v_ptr= boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch.getPatchData(v_id)); SAMRAI::pdat::SideData<double>& v = *v_ptr; boost::shared_ptr<SAMRAI::pdat::CellData<double> > dv_diagonal; boost::shared_ptr<SAMRAI::pdat::SideData<double> > dv_mixed; if(!faults.empty()) { dv_diagonal=boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch.getPatchData(dv_diagonal_id)); dv_mixed=boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch.getPatchData(dv_mixed_id)); } const Gamra::Dir dim=d_dim.getValue(); if(have_embedded_boundary()) { boost::shared_ptr<SAMRAI::pdat::SideData<double> > level_set_ptr; if(have_embedded_boundary()) level_set_ptr=boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch.getPatchData(level_set_id)); } Gamra::Dir ix(Gamra::Dir::from_int(depth/dim)), iy(Gamra::Dir::from_int(depth%dim)); const SAMRAI::hier::Index zero(SAMRAI::hier::Index::getZeroIndex(d_dim)); SAMRAI::hier::Index ip(zero), jp(zero); ip[ix]=1; jp[iy]=1; const double *dx(boost::dynamic_pointer_cast <SAMRAI::geom::CartesianPatchGeometry> (patch.getPatchGeometry())->getDx()); SAMRAI::pdat::CellIterator iend(SAMRAI::pdat::CellGeometry::end(region)); for(SAMRAI::pdat::CellIterator icell(SAMRAI::pdat::CellGeometry::begin(region)); icell!=iend; ++icell) { const SAMRAI::pdat::SideIndex s(*icell,ix,SAMRAI::pdat::SideIndex::Lower); if(ix==iy) { double diff(v(s+ip)-v(s)); if(!faults.empty()) diff-=(*dv_diagonal)(*icell,ix); *buffer=diff/dx[ix]; } else { const int ix_iy(index_map(ix,iy,dim)); double diff(- v(s-jp) - v(s+ip-jp) + v(s+jp) + v(s+ip+jp)); if(!faults.empty()) diff+=(*dv_mixed)(s,ix_iy+1) - (*dv_mixed)(s-jp,ix_iy) + (*dv_mixed)(s+ip,ix_iy+1) - (*dv_mixed)(s+ip-jp,ix_iy) + (*dv_mixed)(s+jp,ix_iy+1) - (*dv_mixed)(s,ix_iy) + (*dv_mixed)(s+ip+jp,ix_iy+1) - (*dv_mixed)(s+ip,ix_iy); *buffer=diff/(4*dx[iy]); } ++buffer; } }
BOOL LLFloaterAbout::postBuild() { center(); LLViewerTextEditor *support_widget = getChild<LLViewerTextEditor>("support_editor", true); LLViewerTextEditor *credits_widget = getChild<LLViewerTextEditor>("credits_editor", true); getChild<LLUICtrl>("copy_btn")->setCommitCallback( boost::bind(&LLFloaterAbout::onClickCopyToClipboard, this)); #if LL_WINDOWS getWindow()->incBusyCount(); getWindow()->setCursor(UI_CURSOR_ARROW); #endif LLSD info(getInfo()); #if LL_WINDOWS getWindow()->decBusyCount(); getWindow()->setCursor(UI_CURSOR_ARROW); #endif std::ostringstream support; // Render the LLSD from getInfo() as a format_map_t LLStringUtil::format_map_t args; // allow the "Release Notes" URL label to be localized args["ReleaseNotes"] = LLTrans::getString("ReleaseNotes"); for (LLSD::map_const_iterator ii(info.beginMap()), iend(info.endMap()); ii != iend; ++ii) { if (! ii->second.isArray()) { // Scalar value if (ii->second.isUndefined()) { args[ii->first] = getString("none"); } else { // don't forget to render value asString() args[ii->first] = ii->second.asString(); } } else { // array value: build KEY_0, KEY_1 etc. entries for (LLSD::Integer n(0), size(ii->second.size()); n < size; ++n) { args[STRINGIZE(ii->first << '_' << n)] = ii->second[n].asString(); } } } // Now build the various pieces support << getString("AboutHeader", args); if (info.has("REGION")) { support << "\n\n" << getString("AboutPosition", args); } support << "\n\n" << getString("AboutSystem", args); support << "\n"; if (info.has("GRAPHICS_DRIVER_VERSION")) { support << "\n" << getString("AboutDriver", args); } support << "\n" << getString("AboutLibs", args); if (info.has("COMPILER")) { support << "\n" << getString("AboutCompiler", args); } if (info.has("PACKETS_IN")) { support << '\n' << getString("AboutTraffic", args); } support_widget->appendText(support.str(), FALSE, LLStyle::Params() .color(LLUIColorTable::instance().getColor("TextFgReadOnlyColor"))); support_widget->blockUndo(); // Fix views support_widget->setEnabled(FALSE); support_widget->startOfDoc(); credits_widget->setEnabled(FALSE); credits_widget->startOfDoc(); return TRUE; }
int Model:: init(tao_tree_info_s * kgm_tree, tao_tree_info_s * cc_tree, std::ostream * msg) { int const status(tao_consistency_check(kgm_tree->root, msg)); if (0 != status) { return status; } if (kgm_tree_) { if (msg) { *msg << "jspace::Model::init(): already initialized\n"; } return -1; } if ( ! kgm_tree->sort()) { if (msg) { *msg << "jspace::Model::init(): could not sort KGM nodes according to IDs\n"; } return -2; } if ((0 != cc_tree) && ( ! cc_tree->sort())) { if (msg) { *msg << "jspace::Model::init(): could not sort CC nodes according to IDs\n"; } return -3; } // Create ancestry table of all nodes in the KGM tree, for correct // (and slightly more efficient) computation of the Jacobian. ancestry_table_.clear(); // just paranoid... typedef tao_tree_info_s::node_info_t::const_iterator cit_t; cit_t in(kgm_tree->info.begin()); cit_t iend(kgm_tree->info.end()); for (/**/; in != iend; ++in) { // ...paranoid checks... if ( ! in->node) { if (msg) { *msg << "jspace::Model::init(): NULL node at entry #" << in->id << "\n"; } return -4; } if (in->id != in->node->getID()) { if (msg) { *msg << "jspace::Model::init(): node ID of entry #" << in->id << " is " << in->node->getID() << "\n"; } return -5; } ancestry_list_t & alist(ancestry_table_[in->node]); // first reference creates the instance // walk up the ancestry, append each parent to the list of // ancestors of this node for (taoDNode * node(in->node); 0 != node; node = node->getDParent()) { ancestry_entry_s entry; entry.id = node->getID(); entry.joint = node->getJointList(); if (0 != entry.joint) { alist.push_back(entry); } } } kgm_tree_ = kgm_tree; cc_tree_ = cc_tree; ndof_ = kgm_tree->info.size(); return 0; }
ErrorCode DenseTag::find_entities_with_value( const SequenceManager* seqman, Error* error, Range& output_entities, const void* value, int value_bytes, EntityType type, const Range* intersect_entities ) const { if (value_bytes && value_bytes != get_size()) { error->set_last_error( "Cannot compare data of size %d with tag of size %d", value_bytes, get_size() ); return MB_INVALID_SIZE; } if (!intersect_entities) { std::pair<EntityType,EntityType> range = type_range(type); TypeSequenceManager::const_iterator i; for (EntityType t = range.first; t != range.second; ++i) { const TypeSequenceManager& map = seqman->entity_map(t); for (i = map.begin(); i != map.end(); ++i) { const void* data = (*i)->data()->get_tag_data( mySequenceArray ); if (data) { ByteArrayIterator start( (*i)->data()->start_handle(), data, *this ); ByteArrayIterator end( (*i)->end_handle() + 1, 0, 0 ); start += (*i)->start_handle() - (*i)->data()->start_handle(); find_tag_values_equal( *this, value, get_size(), start, end, output_entities ); } } } } else { const unsigned char* array = NULL; // initialize to get rid of warning size_t count; ErrorCode rval; Range::const_pair_iterator p = intersect_entities->begin(); if (type != MBMAXTYPE) { p = intersect_entities->lower_bound(type); assert(TYPE_FROM_HANDLE(p->first) == type); } for (; p != intersect_entities->const_pair_end() && (MBMAXTYPE == type || TYPE_FROM_HANDLE(p->first) == type); ++p) { EntityHandle start = p->first; while (start <= p->second) { rval = get_array( seqman, error, start, array, count ); if (MB_SUCCESS != rval) return rval; if (p->second - start < count-1) count = p->second - start + 1; if (array) { ByteArrayIterator istart( start, array, *this ); ByteArrayIterator iend( start+count, 0, 0 ); find_tag_values_equal( *this, value, get_size(), istart, iend, output_entities ); } start += count; } } } return MB_SUCCESS; }
/// ensures get_index() works properly after changing tokens vector or drop_get_index void generate_get_index() { for (index_type i=ibegin();i<iend();++i) dict_hash.insert(i); }
/* ************************************************************************* * Set up the initial guess and problem parameters * * and solve the Stokes problem. We explicitly initialize and * * deallocate the solver state in this example. * ************************************************************************* */ bool Stokes::FAC::solve() { if (!d_hierarchy) { TBOX_ERROR(d_object_name << "Cannot solve using an uninitialized object.\n"); } int ln; /* * Fill in the initial guess. */ for (ln = 0; ln <= d_hierarchy->getFinestLevelNumber(); ++ln) { boost::shared_ptr<SAMRAI::hier::PatchLevel> level = d_hierarchy->getPatchLevel(ln); SAMRAI::hier::PatchLevel::Iterator ip(level->begin()); SAMRAI::hier::PatchLevel::Iterator iend(level->end()); for ( ; ip!=iend; ++ip) { boost::shared_ptr<SAMRAI::hier::Patch> patch = *ip; boost::shared_ptr<SAMRAI::pdat::CellData<double> > p = boost::dynamic_pointer_cast<SAMRAI::pdat::CellData<double> > (patch->getPatchData(p_id)); boost::shared_ptr<SAMRAI::geom::CartesianPatchGeometry> geom = boost::dynamic_pointer_cast<SAMRAI::geom::CartesianPatchGeometry> (patch->getPatchGeometry()); if(p_initial.empty()) { p->fill(0.0); } else { const int dim=d_dim.getValue(); const double *dx=geom->getDx(); std::vector<double> xyz(dim); std::vector<double> dx_p(dim); for(int d=0;d<dim;++d) dx_p[d]=(p_initial_xyz_max[d] - p_initial_xyz_min[d])/(p_initial_ijk[d]-1); std::vector<int> di(dim); di[0]=1; for(int d=1;d<dim;++d) di[d]=di[d-1]*p_initial_ijk[d-1]; SAMRAI::hier::Box pbox = p->getBox(); SAMRAI::pdat::CellIterator cend(SAMRAI::pdat::CellGeometry::end(p->getGhostBox())); for(SAMRAI::pdat::CellIterator ci(SAMRAI::pdat::CellGeometry::begin(p->getGhostBox())); ci!=cend; ++ci) { const SAMRAI::pdat::CellIndex &c(*ci); std::vector<double> xyz(dim); /* VLA's not allowed by clang */ double weight[3][2]; for(int d=0;d<dim;++d) xyz[d]=geom->getXLower()[d] + dx[d]*(c[d]-pbox.lower()[d] + 0.5); int ijk(0); std::vector<int> ddi(dim); for(int d=0;d<dim;++d) { int i=static_cast<int>(xyz[d]*(p_initial_ijk[d]-1) /(p_initial_xyz_max[d] - p_initial_xyz_min[d])); i=std::max(0,std::min(p_initial_ijk[d]-1,i)); ijk+=i*di[d]; if(i==p_initial_ijk[d]-1) { weight[d][0]=1; weight[d][1]=0; ddi[d]=0; } else { weight[d][1]= (xyz[d]-(i*dx_p[d] + p_initial_xyz_min[d]))/dx_p[d]; weight[d][0]=1-weight[d][1]; ddi[d]=di[d]; } } if(dim==2) { (*p)(c)=p_initial[ijk]*weight[0][0]*weight[1][0] + p_initial[ijk+ddi[0]]*weight[0][1]*weight[1][0] + p_initial[ijk+ddi[1]]*weight[0][0]*weight[1][1] + p_initial[ijk+ddi[0]+ddi[1]]*weight[0][1]*weight[1][1]; } else { (*p)(c)=p_initial[ijk]*weight[0][0]*weight[1][0]*weight[2][0] + p_initial[ijk+ddi[0]]*weight[0][1]*weight[1][0]*weight[2][0] + p_initial[ijk+ddi[1]]*weight[0][0]*weight[1][1]*weight[2][0] + p_initial[ijk+ddi[0]+ddi[1]]*weight[0][1]*weight[1][1]*weight[2][0] + p_initial[ijk+ddi[2]]*weight[0][0]*weight[1][0]*weight[2][1] + p_initial[ijk+ddi[0]+ddi[2]]*weight[0][1]*weight[1][0]*weight[2][1] + p_initial[ijk+ddi[1]+ddi[2]]*weight[0][0]*weight[1][1]*weight[2][1] + p_initial[ijk+ddi[0]+ddi[1]+ddi[2]]*weight[0][1]*weight[1][1]*weight[2][1]; } } } boost::shared_ptr<SAMRAI::pdat::SideData<double> > v = boost::dynamic_pointer_cast<SAMRAI::pdat::SideData<double> > (patch->getPatchData(v_id)); v->fill(0.0); } d_stokes_fac_solver.set_boundaries(p_id,v_id,level,false); } fix_viscosity(); d_stokes_fac_solver.initializeSolverState (p_id,cell_viscosity_id,edge_viscosity_id,dp_id,p_rhs_id,v_id,v_rhs_id, d_hierarchy,0,d_hierarchy->getFinestLevelNumber()); SAMRAI::tbox::plog << "solving..." << std::endl; int solver_ret; solver_ret = d_stokes_fac_solver.solveSystem(p_id,p_rhs_id,v_id,v_rhs_id); double avg_factor, final_factor; d_stokes_fac_solver.getConvergenceFactors(avg_factor, final_factor); SAMRAI::tbox::plog << "\t" << (solver_ret ? "" : "NOT ") << "converged " << "\n" << " iterations: " << d_stokes_fac_solver.getNumberOfIterations() << "\n" << " residual: "<< d_stokes_fac_solver.getResidualNorm() << "\n" << " average convergence: "<< avg_factor << "\n" << " final convergence: "<< final_factor << "\n" << std::flush; d_stokes_fac_solver.deallocateSolverState(); return solver_ret; }