void cloth::SwFactory::extractFabricData(const Fabric& fabric, Range<uint32_t> phases, Range<uint32_t> sets, Range<float> restvalues, Range<uint32_t> indices, Range<uint32_t> anchors, Range<float> tetherLengths) const { const SwFabric& swFabric = static_cast<const SwFabric&>(fabric); PX_ASSERT(phases.empty() || phases.size() == swFabric.getNumPhases()); PX_ASSERT(restvalues.empty() || restvalues.size() == swFabric.getNumRestvalues()); PX_ASSERT(sets.empty() || sets.size() == swFabric.getNumSets()); PX_ASSERT(indices.empty() || indices.size() == swFabric.getNumIndices()); PX_ASSERT(anchors.empty() || anchors.size() == swFabric.getNumTethers()); PX_ASSERT(tetherLengths.empty() || tetherLengths.size() == swFabric.getNumTethers()); for(uint32_t i=0; !phases.empty(); ++i, phases.popFront()) phases.front() = swFabric.mPhases[i]; const uint32_t* sEnd = swFabric.mSets.end(), *sIt; const float* rBegin = swFabric.mRestvalues.begin(), *rIt = rBegin; const uint16_t* iIt = swFabric.mIndices.begin(); uint32_t* sDst = sets.begin(); float* rDst = restvalues.begin(); uint32_t* iDst = indices.begin(); uint32_t numConstraints = 0; for(sIt = swFabric.mSets.begin(); ++sIt != sEnd; ) { const float* rEnd = rBegin + *sIt; for(; rIt != rEnd; ++rIt) { uint16_t i0 = *iIt++; uint16_t i1 = *iIt++; if(PxMax(i0, i1) >= swFabric.mNumParticles) continue; if(!restvalues.empty()) *rDst++ = *rIt; if(!indices.empty()) { *iDst++ = i0; *iDst++ = i1; } ++numConstraints; } if(!sets.empty()) *sDst++ = numConstraints; } for(uint32_t i=0; !anchors.empty(); ++i, anchors.popFront()) anchors.front() = swFabric.mTethers[i].mAnchor; for(uint32_t i=0; !tetherLengths.empty(); ++i, tetherLengths.popFront()) tetherLengths.front() = swFabric.mTethers[i].mLength * swFabric.mTetherLengthScale; }
void ClothImpl<SwCloth>::setPhaseConfig(Range<const PhaseConfig> configs) { mCloth.mPhaseConfigs.resize(0); // transform phase config to use in solver for(; !configs.empty(); configs.popFront()) if(configs.front().mStiffness > 0.0f) mCloth.mPhaseConfigs.pushBack(transform(configs.front())); mCloth.wakeUp(); }
void test_tree_readwrite() { ErrorCode err; Tag tag; Core mb; AdaptiveKDTree tool(&mb); EntityHandle root = create_tree( tool, DEPTH, INTERVALS, &tag ); // write to file err = mb.write_file( "tree.h5m" ); CHECK_ERR(err); // clear everything mb.delete_mesh(); // read tree from file err = mb.load_file( "tree.h5m" ); remove("tree.h5m"); CHECK_ERR(err); // get tag handle by name, because the handle may have changed err = mb.tag_get_handle( TAG_NAME, 1, MB_TYPE_INTEGER, tag ); CHECK_ERR(err); // get root handle for tree Range range; err = tool.find_all_trees( range ); assert(!err); assert(range.size() == 1); root = range.front(); // first (only) handle validate_tree( tool, root, DEPTH, INTERVALS, tag ); }
bool ObjectValueMap::findZoneEdges() { /* * For unmarked weakmap keys with delegates in a different zone, add a zone * edge to ensure that the delegate zone does finish marking after the key * zone. */ JS::AutoAssertNoGC nogc; Zone *mapZone = compartment->zone(); for (Range r = all(); !r.empty(); r.popFront()) { JSObject *key = r.front().key(); if (key->isMarked(BLACK) && !key->isMarked(GRAY)) continue; JSWeakmapKeyDelegateOp op = key->getClass()->ext.weakmapKeyDelegateOp; if (!op) continue; JSObject *delegate = op(key); if (!delegate) continue; Zone *delegateZone = delegate->zone(); if (delegateZone == mapZone) continue; if (!delegateZone->gcZoneGroupEdges.put(key->zone())) return false; } return true; }
bool ObjectValueMap::findZoneEdges() { /* * For unmarked weakmap keys with delegates in a different zone, add a zone * edge to ensure that the delegate zone finishes marking before the key * zone. */ JS::AutoSuppressGCAnalysis nogc; for (Range r = all(); !r.empty(); r.popFront()) { JSObject* key = r.front().key(); if (key->asTenured().isMarked(BLACK) && !key->asTenured().isMarked(GRAY)) continue; JSWeakmapKeyDelegateOp op = key->getClass()->extWeakmapKeyDelegateOp(); if (!op) continue; JSObject* delegate = op(key); if (!delegate) continue; Zone* delegateZone = delegate->zone(); if (delegateZone == zone) continue; if (!delegateZone->gcZoneGroupEdges.put(key->zone())) return false; } return true; }
bool ObjectValueMap::findZoneEdges() { /* * For unmarked weakmap keys with delegates in a different zone, add a zone * edge to ensure that the delegate zone finishes marking before the key * zone. */ JS::AutoSuppressGCAnalysis nogc; for (Range r = all(); !r.empty(); r.popFront()) { JSObject* key = r.front().key(); if (key->asTenured().isMarkedBlack()) { continue; } JSObject* delegate = getDelegate(key); if (!delegate) { continue; } Zone* delegateZone = delegate->zone(); if (delegateZone == zone() || !delegateZone->isGCMarking()) { continue; } if (!delegateZone->gcSweepGroupEdges().put(key->zone())) { return false; } } return true; }
bool add_data(T v) { MSS_BEGIN(bool); Range range; MSS(add(range, 1)); range.front() = v; MSS_END(); }
bool contains(Range range, Predicate predicate) { while(!range.empty()) { if(predicate(range.front())) return true; else range.step_front(); } return false; }
void test(Range r) { std::pair<Range, Range> p = std::minmax_element(r); if (!r.empty()) { for (Range j = r; !j.empty(); j.pop_front()) { assert(!(j.front() < p.first.front())); assert(!(p.second.front() < j.front())); } } else { assert(p.first.empty()); assert(p.second.empty()); } }
ErrorCode MeshTag::remove_data(SequenceManager*, Error* /* error */, const Range& range) { if (range.empty()) return MB_SUCCESS; else return not_root_set(get_name(), range.front()); }
void AttributeStorage::trace(JSTracer* tracer) { Range r = mStore.all(); while (!r.empty()) { r.front().value.trace(tracer); r.popFront(); } }
void AccessorStorage::trace(JSTracer* tracer) { Range r = mStore.all(); while (!r.empty()) { r.front().value.data.trace(tracer); r.popFront(); } }
void test_list_set_with_stale_handles() { Core moab; Interface& mb = moab; ErrorCode rval; Range verts; const int num_vtx = 40; std::vector<double> coords( 3*num_vtx, 0.0 ); rval = mb.create_vertices( &coords[0], num_vtx, verts ); CHECK_ERR(rval); CHECK_EQUAL(num_vtx, (int)verts.size()); EntityHandle set; rval = mb.create_meshset( MESHSET_ORDERED, set ); CHECK_ERR(rval); rval = mb.add_entities( set, verts ); CHECK_ERR(rval); std::vector<EntityHandle> dead_verts; for (int i = num_vtx/4; i < num_vtx; i += num_vtx/4 ) { Range::iterator j = verts.begin(); j += i; dead_verts.push_back( *j ); } rval = mb.delete_entities( &dead_verts[0], dead_verts.size() ); CHECK_ERR(rval); Core moab2; Interface& mb2 = moab2; EntityHandle file_set; read_write_file( mb, mb2, &file_set ); Range sets; rval = mb2.get_entities_by_type( 0, MBENTITYSET, sets ); CHECK_ERR(rval); CHECK_EQUAL( 2, (int)sets.size() ); EntityHandle other_set = sets.front() == file_set ? sets.back() : sets.front(); std::vector<EntityHandle> list; rval = mb2.get_entities_by_handle( other_set, list ); CHECK_ERR(rval); CHECK_EQUAL( verts.size() - dead_verts.size(), list.size() ); }
ErrorCode MeshTag::get_data(const SequenceManager*, Error* /* error */, const Range& range, const void**, int*) const { if (range.empty()) return MB_SUCCESS; else return not_root_set(get_name(), range.front()); }
std::vector<Pipeline> createGraphicsPipelines( const Range<std::reference_wrapper<GraphicsPipelineBuilder>>& builder, vk::PipelineCache cache) { if(builder.empty()) return {}; std::vector<vk::GraphicsPipelineCreateInfo> infos; infos.reserve(builder.size()); for(auto& b : builder) infos.push_back(b.get().parse()); return createGraphicsPipelines(builder.front().get().shader.device(), infos, cache); }
ErrorCode MeshTag::get_data(const SequenceManager*, Error* /* error */, const Range& r, void*) const { if (variable_length()) { MB_SET_ERR(MB_VARIABLE_DATA_LENGTH, "No length specified for variable-length tag " << get_name() << " value"); } else if (r.empty()) return MB_SUCCESS; else return not_root_set(get_name(), r.front()); }
bool read(Reader &r, const Fields &fields_info) { MSS_BEGIN(bool); for (auto fix = 0u; fix < fields.size(); ++fix) { L(C(fix)); auto read_field = [&](auto &r1){ MSS_BEGIN(bool); //Get the range where this data should be stored Range rng; MSS(add(rng, fields_info[fix].dim)); Strange values; r1.text(values); L(C(values)); bool all_values_could_be_added = true; auto add_value_to_range = [&](auto &part) { if (rng.empty()) { all_values_could_be_added = false; return; } details::read(rng.front(), part); L(C(rng.front())C(part)); rng.pop_front(); }; values.each_split(' ', add_value_to_range); MSS(all_values_could_be_added, std::cout << "Error: Too many values for record " << r.tag() << ", field " << fix << "" << std::endl); MSS(rng.empty(), std::cout << "Error: Not enough values for record " << r.tag() << ", field " << fix << "" << std::endl); MSS_END(); }; MSS(r(fix, read_field), std::cout << "Error: Could not find field " << fix << std::endl); } MSS(r.empty(), std::cout << "Error: Too many fields for record " << r.tag() << "" << std::endl); MSS_END(); }
void test_tree( int max_depth ) { ErrorCode rval; Core moab; Interface& mb = moab; EntityHandle root; // create tag in which to store number for each tree node, // in depth-first in-order search order. Tag tag; rval = mb.tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, tag ); CHECK_ERR(rval); // create a binary tree to a depth of 20 (about 1 million nodes) rval = mb.create_meshset( MESHSET_SET, root ); CHECK_ERR(rval); int idx = 1; recursive_build_tree( max_depth, mb, tag, root, 1, idx ); const int last_idx = idx; std::cerr << "Created binary tree containing " << last_idx << " nodes." << std::endl; std::ostringstream str; str << "tree-" << max_depth << ".h5m"; // write file and read back in rval = mb.write_file( str.str().c_str(), 0, "BUFFER_SIZE=1024;DEBUG_BINIO" ); CHECK_ERR(rval); mb.delete_mesh(); rval = mb.load_file( str.str().c_str() ); if (!keep_file) remove( str.str().c_str() ); CHECK_ERR(rval); // get tree root rval = mb.tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, tag ); CHECK_ERR(rval); Range roots; idx = 1; const void* vals[] = {&idx}; rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, vals, 1, roots ); CHECK_EQUAL( (size_t)1, roots.size() ); root = roots.front(); // check that tree is as we expect it idx = 1; recursive_check_tree( max_depth, mb, tag, root, 1, idx ); CHECK_EQUAL( last_idx, idx ); }
void ClothImpl<SwCloth>::setVirtualParticles(Range<const uint32_t[4]> indices, Range<const PxVec3> weights) { mCloth.mNumVirtualParticles = 0; // shuffle indices to form independent SIMD sets uint16_t numParticles = uint16_t(mCloth.mCurParticles.size()); TripletScheduler scheduler(indices); scheduler.simd(numParticles, 4); // convert indices to byte offset Vec4us dummy(numParticles, uint16_t(numParticles+1), uint16_t(numParticles+2), 0); Vector<uint32_t>::Type::ConstIterator sIt = scheduler.mSetSizes.begin(); Vector<uint32_t>::Type::ConstIterator sEnd = scheduler.mSetSizes.end(); TripletScheduler::ConstTripletIter tIt = scheduler.mTriplets.begin(), tLast; mCloth.mVirtualParticleIndices.resize(0); mCloth.mVirtualParticleIndices.reserve(indices.size() + 3 * uint32_t(sEnd - sIt)); for(; sIt != sEnd; ++sIt) { uint32_t setSize = *sIt; for(tLast = tIt + setSize; tIt != tLast; ++tIt, ++mCloth.mNumVirtualParticles) mCloth.mVirtualParticleIndices.pushBack(Vec4us(*tIt)); mCloth.mVirtualParticleIndices.resize( (mCloth.mVirtualParticleIndices.size() + 3) & ~3, dummy); } Vector<Vec4us>::Type(mCloth.mVirtualParticleIndices.begin(), mCloth.mVirtualParticleIndices.end()).swap(mCloth.mVirtualParticleIndices); // precompute 1/dot(w,w) Vec4fAlignedVector().swap(mCloth.mVirtualParticleWeights); mCloth.mVirtualParticleWeights.reserve(weights.size()); for(; !weights.empty(); weights.popFront()) { PxVec3 w = reinterpret_cast<const PxVec3&>(weights.front()); PxReal scale = 1 / w.magnitudeSquared(); mCloth.mVirtualParticleWeights.pushBack(PxVec4(w.x, w.y, w.z, scale)); } mCloth.notifyChanged(); }
inline bool range_ok(Range const& range, Point& centroid) { std::size_t const n = boost::size(range); if (n > 1) { return true; } else if (n <= 0) { #if defined(CENTROID_WITH_CATCH) throw centroid_exception(); #endif return false; } else // if (n == 1) { // Take over the first point in a "coordinate neutral way" copy_coordinates(range.front(), centroid); return false; } return true; }
void test_var_length_big_data() { ErrorCode rval; Core moab1, moab2; Interface &mb1 = moab1, &mb2 = moab2; Tag tag; create_mesh( mb1 ); rval = mb1.tag_get_handle( "test_tag", 0, MB_TYPE_DOUBLE, tag, MB_TAG_SPARSE|MB_TAG_VARLEN|MB_TAG_EXCL ); CHECK_ERR( rval ); // choose 3 vertices upon which to set data Range range; rval = mb1.get_entities_by_type( 0, MBVERTEX, range ); CHECK_ERR(rval); EntityHandle verts[3] = { range.front(), *(range.begin() += range.size()/3), *(range.begin() += 2*range.size()/3) }; // set 1-millon value tag data on three vertices std::vector<double> data(1000000); for (int i = 0; i < 3; ++i) { calculate_big_value( mb1, verts[i], data.size(), &data[0] ); const void* ptr = &data[0]; const int size = data.size(); rval = mb1.tag_set_by_ptr( tag, verts + i, 1, &ptr, &size ); CHECK_ERR(rval); } read_write( "test_var_length_big_data.h5m", mb1, mb2 ); compare_tags( "test_tag", mb1, mb2 ); // check 3 tagged vertices rval = mb2.tag_get_handle( "test_tag", 0, MB_TYPE_DOUBLE, tag ); CHECK_ERR(rval); range.clear(); rval = mb2.get_entities_by_type_and_tag( 0, MBVERTEX, &tag, 0, 1, range, Interface::UNION ); CHECK_ERR(rval); CHECK_EQUAL( (size_t)3, range.size() ); // check tag values for (Range::const_iterator i = range.begin(); i != range.end(); ++i) { // calculate expected value const EntityHandle h = *i; calculate_big_value( mb2, h, data.size(), &data[0] ); // get actual value const void* ptr; int size; rval = mb2.tag_get_by_ptr( tag, &h, 1, &ptr, &size ); CHECK_ERR(rval); CHECK_EQUAL( data.size(), (size_t)size ); // compare values const double* act_data = reinterpret_cast<const double*>(ptr); int wrong_count = 0; for (size_t j = 0; j < data.size(); ++j) if (act_data[j] != data[j]) ++wrong_count; CHECK_EQUAL( 0, wrong_count ); } }
/* ErrorCode ReadHDF5VarLen::read_offsets( ReadHDF5Dataset& data_set, const Range& file_ids, EntityHandle start_file_id, unsigned num_columns, const unsigned indices[], EntityHandle nudge, Range offsets_out[], std::vector<unsigned> counts_out[], Range* ranged_file_ids = 0 ) { const int local_index = 1; // sanity check const unsigned max_cols = ranged_file_ids ? data_set.columns() - 1 : data_set.columns() for (unsigned i = 0; i < num_columns; ++i) { assert(indices[i] >= max_cols); if (indices[i] >= max_cols) return MB_FAILURE; } // Use hints to make sure insertion into ranges is O(1) std::vector<Range::iterator> hints; if (ranged_file_ids) { hints.resize( num_colums + 1 ); hints.back() = ranged_file_ids->begin(); } else { hints.resize( num_columns ); } for (unsigned i = 0; i < num_columns; ++i) offsets_out[i].clear(); counts_out[i].clear(); counts_out[i].reserve( file_ids.size() ); hints[i] = offsets_out[i].begin(); } // If we only need one column from a multi-column data set, // then read only that column. if (num_columns == 1 && data_set.columns() > 1 && !ranged_file_ids) { data_set.set_column( indices[0] ); indices = &local_index; } else if (ranged_file_ids && data_set.columns() > 1 && 0 == num_columns) { data_set.set_column( data_set.columns() - 1 ); } // NOTE: do not move this above the previous block. // The previous block changes the results of data_set.columns()! const size_t table_columns = data_set.columns(); // Calculate which rows we need to read from the offsets table Range rows; Range::iterator hint = rows.begin(); Range::const_pair_iterator pair = file_ids.const_pair_begin(); // special case if reading first entity in dataset, because // there is no previous end value. if (pair != file_ids.const_pair_end() && pair->first == start_file_id) hint = rows.insert( nudge, pair->second - start_file_id + nudge ); while (pair != file_ids.const_pair_end()) { hint = rows.insert( hint, pair->first + nudge - 1 - start_file_id, pair->second + nudge - start_file_id ); ++pair; } // set up read of offsets dataset hsize_t buffer_size = bufferSize / (sizeof(hssize_t) * data_set.columns()); hssize_t* buffer = reinterpret_cast<hssize_t*>(dataBuffer); data_set.set_file_ids( rows, nudge, buffer_size, H5T_NATIVE_HSSIZE ); std::vector<hssize_t> prev_end; // If we're reading the first row of the table, then the // previous end is implicitly -1. if (!file_ids.empty() && file_ids.front() == start_file_id) prev_end.resize(num_columns,-1); // read offset table size_t count, offset; Range::const_iterator fiter = file_ids.begin(); while (!data_set.done()) { try { data_set.read( buffer, count ); } catch (ReadHDF5Dataset::Exception e) { return MB_FAILURE; } if (!count) // might have been NULL read for collective IO continue; // If the previous end values were read in the previous iteration, // then they're stored in prev_end. size_t offset = 0; if (!prev_end.empty()) { for (unsigned i = 0; i < num_columns; ++i) { counts_out[i].push_back( buffer[indices[i]] - prev_end[i] ); hints[i] = offsets_out[i].insert( hints[i], prev_end[i] + 1 + nudge, buffer[indices[i]] + nudge ); } if (ranged_file_ids && (buffer[table_columns-1] & mhdf_SET_RANGE_BIT)) hints.back() = ranged_file_ids->insert( hints.back(), *fiter ); ++fiter; offset = 1; prev_end.clear(); } while (offset < count) { assert(fiter != file_ids.end()); // whenever we get to a gap between blocks we need to // advance one step because we read an extra end id // preceding teah block if (fiter == fiter.start_of_block()) { if (offset == count-1) break; ++offset; } for (unsigned i = 0; i < num_columns; ++i) { size_t s = buffer[(offset-1)*table_columns+indices[i]] + 1; size_t e = buffer[ offset *table_columns+indices[i]]; counts_out.push_back( e - s + 1 ); hints[i] = offsets_out.insert( hints[i], s, e ); } if (ranged_file_ids && (buffer[offset*table_columns+table_columns-1] & mhdf_SET_RANGE_BIT)) hints.back() = ranged_file_ids->insert( hints.back(), *fiter ); ++fiter; ++offset; } // If we did not end on the boundary between two blocks, // then we need to save the end indices for the final entry // for use in the next iteration. Similarly, if we ended // with extra values that were read with the express intention // of getting the previous end values for a block, we need to // save them. This case only arises if we hit the break in // the above loop. if (fiter != fiter.start_of_block() || offset < count) { assert(prev_end.empty()); if (offset == count) { --offset; assert(fiter != fiter.start_of_block()); } else { assert(offset+1 == count); assert(fiter == fiter.start_of_block()); } for (unsigned i = 0; i < num_columns; ++i) prev_end.push_back(buffer[offset*table_columns+indices[i]]); } } assert(prev_end.empty()); assert(fiter == file_ids.end()); return MB_SUCCESS; } */ ErrorCode ReadHDF5VarLen::read_offsets( ReadHDF5Dataset& data_set, const Range& file_ids, EntityHandle start_file_id, EntityHandle nudge, Range& offsets_out, std::vector<unsigned>& counts_out ) { // Use hints to make sure insertion into ranges is O(1) offsets_out.clear(); counts_out.clear(); counts_out.reserve( file_ids.size() ); Range::iterator hint; // Calculate which rows we need to read from the offsets table Range rows; hint = rows.begin(); Range::const_pair_iterator pair = file_ids.const_pair_begin(); // special case if reading first entity in dataset, because // there is no previous end value. if (pair != file_ids.const_pair_end() && pair->first == start_file_id) { hint = rows.insert( nudge, pair->second - start_file_id + nudge ); ++pair; } while (pair != file_ids.const_pair_end()) { hint = rows.insert( hint, pair->first - start_file_id + nudge - 1, pair->second - start_file_id + nudge ); ++pair; } // set up read of offsets dataset hsize_t buffer_size = bufferSize / sizeof(hssize_t); hssize_t* buffer = reinterpret_cast<hssize_t*>(dataBuffer); data_set.set_file_ids( rows, nudge, buffer_size, H5T_NATIVE_HSSIZE ); hssize_t prev_end; bool have_prev_end = false; // If we're reading the first row of the table, then the // previous end is implicitly -1. if (!file_ids.empty() && file_ids.front() == start_file_id) { prev_end = -1; have_prev_end = true; } dbgOut.printf( 3, "Reading %s in %lu chunks\n", data_set.get_debug_desc(), data_set.get_read_count() ); // read offset table size_t count, offset; Range::const_iterator fiter = file_ids.begin(); hint = offsets_out.begin(); int nn = 0; while (!data_set.done()) { dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data_set.get_debug_desc() ); try { data_set.read( buffer, count ); } catch (ReadHDF5Dataset::Exception& ) { return MB_FAILURE; } if (!count) // might have been NULL read for collective IO continue; // If the previous end values were read in the previous iteration, // then they're stored in prev_end. offset = 0; if (have_prev_end) { counts_out.push_back( buffer[0] - prev_end ); hint = offsets_out.insert( hint, prev_end + 1 + nudge, buffer[0] + nudge ); ++fiter; offset = 1; have_prev_end = false; } while (offset < count) { assert(fiter != file_ids.end()); // whenever we get to a gap between blocks we need to // advance one step because we read an extra end id // preceding teah block if (fiter == fiter.start_of_block()) { if (offset == count-1) break; ++offset; } size_t s = buffer[offset-1] + 1; size_t e = buffer[offset]; counts_out.push_back( e - s + 1 ); hint = offsets_out.insert( hint, s + nudge, e + nudge ); ++fiter; ++offset; } // If we did not end on the boundary between two blocks, // then we need to save the end indices for the final entry // for use in the next iteration. Similarly, if we ended // with extra values that were read with the express intention // of getting the previous end values for a block, we need to // save them. This case only arises if we hit the break in // the above loop. if (fiter != fiter.start_of_block() || offset < count) { assert(!have_prev_end); if (offset == count) { --offset; assert(fiter != fiter.start_of_block()); } else { assert(offset+1 == count); assert(fiter == fiter.start_of_block()); } have_prev_end = true; prev_end = buffer[offset]; } } assert(!have_prev_end); assert(fiter == file_ids.end()); return MB_SUCCESS; }
byte_t extract_one_byte(Range &r) { assert_bytes_remaining(r,1); byte_t const b = r.front(); r.advance_begin(1); return b; }
// Test to reproduce bug reported by brandom smith on 2011-3-7 // and test other possible issues with the somewhat inconsistant // meshset creation flags. Bug was fixed in SVN revision 4548. void test_set_flags() { const char filename2[] = "test_set_flags.h5m"; ErrorCode rval; Core core; Interface& mb = core; // create a bunch of vertices so we have something to put in sets const int nverts = 20; double coords[3*nverts] = {0.0}; Range verts; rval = mb.create_vertices( coords, nverts, verts ); CHECK_ERR(rval); // Assign IDs to things so that we can identify them in the // data we read back in. Tag tag; rval = mb.tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, tag ); CHECK_ERR(rval); int ids[nverts]; for (int i = 0; i < nverts; ++i) ids[i] = i+1; rval = mb.tag_set_data( tag, verts, ids ); CHECK_ERR(rval); // define two lists of vertex ids corresponding to the // vertices that we are going to put into different sets const int set_verts1[] = { 1, 2, 3, 4, 8, 13, 14, 15 }; const int set_verts2[] = { 3, 9, 10, 11, 12, 13, 14, 15, 16, 17 }; const int num_verts1 = sizeof(set_verts1)/sizeof(set_verts1[0]); const int num_verts2 = sizeof(set_verts1)/sizeof(set_verts1[0]); // convert to handle lists EntityHandle set_handles1[num_verts1], set_handles2[num_verts2]; for (int i = 0; i < num_verts1; ++i) set_handles1[i] = *(verts.begin() + set_verts1[i] - 1); for (int i = 0; i < num_verts2; ++i) set_handles2[i] = *(verts.begin() + set_verts2[i] - 1); // now create some sets with different flag combinations EntityHandle sets[6]; rval = mb.create_meshset( 0, sets[0] ); rval = mb.create_meshset( MESHSET_TRACK_OWNER, sets[1] ); rval = mb.create_meshset( MESHSET_SET, sets[2] ); rval = mb.create_meshset( MESHSET_SET|MESHSET_TRACK_OWNER, sets[3] ); rval = mb.create_meshset( MESHSET_ORDERED, sets[4] ); rval = mb.create_meshset( MESHSET_ORDERED|MESHSET_TRACK_OWNER, sets[5] ); // assign IDs to sets so that we can identify them later rval = mb.tag_set_data( tag, sets, 6, ids ); CHECK_ERR(rval); // add entities to sets rval = mb.add_entities( sets[0], set_handles1, num_verts1 ); CHECK_ERR(rval); rval = mb.add_entities( sets[1], set_handles2, num_verts2 ); CHECK_ERR(rval); rval = mb.add_entities( sets[2], set_handles1, num_verts1 ); CHECK_ERR(rval); rval = mb.add_entities( sets[3], set_handles2, num_verts2 ); CHECK_ERR(rval); rval = mb.add_entities( sets[4], set_handles1, num_verts1 ); CHECK_ERR(rval); rval = mb.add_entities( sets[5], set_handles2, num_verts2 ); CHECK_ERR(rval); // now write the file and read it back in rval = mb.write_file( filename2, 0, "BUFFER_SIZE=1024;DEBUG_BINIO" ); CHECK_ERR(rval); mb.delete_mesh(); rval = mb.load_file( filename2 ); if (!keep_file) remove( filename2 ); CHECK_ERR(rval); rval = mb.tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, tag ); CHECK_ERR(rval); // find our sets Range tmp; for (int i = 0; i < 6; ++i) { int id = i+1; tmp.clear(); const void* vals[] = {&id}; rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, vals, 1, tmp ); CHECK_ERR(rval); CHECK_EQUAL( 1u, (unsigned)tmp.size() ); sets[i] = tmp.front(); } // check that sets have correct flags unsigned opts; rval = mb.get_meshset_options( sets[0], opts ); CHECK_ERR(rval); CHECK_EQUAL( 0u, opts ); rval = mb.get_meshset_options( sets[1], opts ); CHECK_ERR(rval); CHECK_EQUAL( (unsigned)MESHSET_TRACK_OWNER, opts ); rval = mb.get_meshset_options( sets[2], opts ); CHECK_ERR(rval); CHECK_EQUAL( (unsigned)MESHSET_SET, opts ); rval = mb.get_meshset_options( sets[3], opts ); CHECK_ERR(rval); CHECK_EQUAL( (unsigned)(MESHSET_SET|MESHSET_TRACK_OWNER), opts ); rval = mb.get_meshset_options( sets[4], opts ); CHECK_ERR(rval); CHECK_EQUAL( (unsigned)MESHSET_ORDERED, opts ); rval = mb.get_meshset_options( sets[5], opts ); CHECK_ERR(rval); CHECK_EQUAL( (unsigned)(MESHSET_ORDERED|MESHSET_TRACK_OWNER), opts ); // check that sets have correct contents int set_ids1[num_verts1], set_ids2[num_verts2]; tmp.clear(); rval = mb.get_entities_by_handle( sets[0], tmp ); CHECK_ERR(rval); CHECK_EQUAL( num_verts1, (int)tmp.size() ); rval = mb.tag_get_data( tag, tmp, set_ids1 ); CHECK_ERR(rval); std::sort( set_ids1, set_ids1+num_verts1 ); CHECK_ARRAYS_EQUAL( set_verts1, num_verts1, set_ids1, num_verts1 ); tmp.clear(); rval = mb.get_entities_by_handle( sets[1], tmp ); CHECK_ERR(rval); CHECK_EQUAL( num_verts2, (int)tmp.size() ); rval = mb.tag_get_data( tag, tmp, set_ids2 ); CHECK_ERR(rval); std::sort( set_ids2, set_ids2+num_verts2 ); CHECK_ARRAYS_EQUAL( set_verts2, num_verts2, set_ids2, num_verts2 ); tmp.clear(); rval = mb.get_entities_by_handle( sets[2], tmp ); CHECK_ERR(rval); CHECK_EQUAL( num_verts1, (int)tmp.size() ); rval = mb.tag_get_data( tag, tmp, set_ids1 ); CHECK_ERR(rval); std::sort( set_ids1, set_ids1+num_verts1 ); CHECK_ARRAYS_EQUAL( set_verts1, num_verts1, set_ids1, num_verts1 ); tmp.clear(); rval = mb.get_entities_by_handle( sets[3], tmp ); CHECK_ERR(rval); CHECK_EQUAL( num_verts2, (int)tmp.size() ); rval = mb.tag_get_data( tag, tmp, set_ids2 ); CHECK_ERR(rval); std::sort( set_ids2, set_ids2+num_verts2 ); CHECK_ARRAYS_EQUAL( set_verts2, num_verts2, set_ids2, num_verts2 ); tmp.clear(); rval = mb.get_entities_by_handle( sets[4], tmp ); CHECK_ERR(rval); CHECK_EQUAL( num_verts1, (int)tmp.size() ); rval = mb.tag_get_data( tag, tmp, set_ids1 ); CHECK_ERR(rval); std::sort( set_ids1, set_ids1+num_verts1 ); CHECK_ARRAYS_EQUAL( set_verts1, num_verts1, set_ids1, num_verts1 ); tmp.clear(); rval = mb.get_entities_by_handle( sets[5], tmp ); CHECK_ERR(rval); CHECK_EQUAL( num_verts2, (int)tmp.size() ); rval = mb.tag_get_data( tag, tmp, set_ids2 ); CHECK_ERR(rval); std::sort( set_ids2, set_ids2+num_verts2 ); CHECK_ARRAYS_EQUAL( set_verts2, num_verts2, set_ids2, num_verts2 ); }
typename RangeTypes<Range>::ElementType front() const { return first.empty() ? second.front() : first.front(); }
cloth::SwFabric::SwFabric( SwFactory& factory, uint32_t numParticles, Range<const uint32_t> phases, Range<const uint32_t> sets, Range<const float> restvalues, Range<const uint32_t> indices, Range<const uint32_t> anchors, Range<const float> tetherLengths, uint32_t id) : mFactory(factory), mNumParticles(numParticles), mTetherLengthScale(1.0f), mId(id) { // should no longer be prefixed with 0 PX_ASSERT(sets.front() != 0); #if defined(PX_WINDOWS) const uint32_t kSimdWidth = 8; // avx #elif defined(PX_X360) || defined(PX_PS3) const uint32_t kSimdWidth = 8; // unrolled loop #else const uint32_t kSimdWidth = 4; #endif // consistency check PX_ASSERT(sets.back() == restvalues.size()); PX_ASSERT(restvalues.size()*2 == indices.size()); PX_ASSERT(mNumParticles > *maxElement(indices.begin(), indices.end())); PX_ASSERT(mNumParticles + kSimdWidth-1 <= USHRT_MAX); mPhases.assign(phases.begin(), phases.end()); mSets.reserve(sets.size() + 1); mSets.pushBack(0); // prefix with 0 mOriginalNumRestvalues = uint32_t(restvalues.size()); // padd indices for SIMD const uint32_t* iBegin = indices.begin(), *iIt = iBegin; const float* rBegin = restvalues.begin(), *rIt = rBegin; const uint32_t* sIt, *sEnd = sets.end(); for(sIt = sets.begin(); sIt != sEnd; ++sIt) { const float* rEnd = rBegin + *sIt; const uint32_t* iEnd = iBegin + *sIt * 2; uint32_t numConstraints = uint32_t(rEnd - rIt); for(; rIt != rEnd; ++rIt) mRestvalues.pushBack(*rIt); for(; iIt != iEnd; ++iIt) mIndices.pushBack(uint16_t(*iIt)); // add dummy indices to make multiple of 4 for(; numConstraints &= kSimdWidth-1; ++numConstraints) { mRestvalues.pushBack(-FLT_MAX); uint32_t index = mNumParticles + numConstraints - 1; mIndices.pushBack(uint16_t(index)); mIndices.pushBack(uint16_t(index)); } mSets.pushBack(uint32_t(mRestvalues.size())); } // trim overallocations RestvalueContainer(mRestvalues.begin(), mRestvalues.end()).swap(mRestvalues); Vector<uint16_t>::Type(mIndices.begin(), mIndices.end()).swap(mIndices); // tethers PX_ASSERT(anchors.size() == tetherLengths.size()); // pad to allow for direct 16 byte (unaligned) loads mTethers.reserve(anchors.size() + 2); for(; !anchors.empty(); anchors.popFront(), tetherLengths.popFront()) mTethers.pushBack(SwTether(uint16_t(anchors.front()), tetherLengths.front())); mFactory.mFabrics.pushBack(this); }
byte_t extract_one_byte(Range &r) { assert(r); byte_t const b = r.front(); r.advance_begin(1); return b; }
bool operator()(Range<it> a, Range<it> b) { return a.front() > b.front(); }