void XAsset::createD3HierarchyClasses(Clump* clump, D3DXFRAME* frame) { Frame* f = static_cast<Frame*>( frame ); while( f->pMeshContainer ) { Mesh* mesh = static_cast<Mesh*>( f->pMeshContainer ); const char* geometryName = mesh->Name; if( !geometryName ) geometryName = "NamelessGeometry"; Geometry* geometry = new Geometry( mesh, geometryName ); Atomic* atomic = new Atomic; atomic->setGeometry( geometry ); atomic->setFrame( f ); clump->add( atomic ); f->pMeshContainer = f->pMeshContainer->pNextMeshContainer; } if( f->pFrameFirstChild ) { static_cast<Frame*>( f->pFrameFirstChild )->pParentFrame = f; createD3HierarchyClasses( clump, f->pFrameFirstChild ); } if( f->pFrameSibling ) { static_cast<Frame*>( f->pFrameSibling )->pParentFrame = f->pParentFrame; createD3HierarchyClasses( clump, f->pFrameSibling ); } }
engine::IAtomic* BSP::setAtomicWorldCB(engine::IAtomic* atomic, void* data) { Atomic* a = dynamic_cast<Atomic*>( atomic ); a->_bsp = data; a->onUpdate(); return atomic; }
BOOST_AUTO_TEST_CASE_TEMPLATE(SwapTest, T, AtomicSwapTypes) { Atomic<T> value = 102; T const old = value.Swap(T(123)); BOOST_CHECK_EQUAL(old, T(102)); BOOST_CHECK_EQUAL(value, T(123)); }
void Triangle_Processor::Divide(unsigned int N, Stack<AtomicRegion>& Offspring) { Stack<Triangle> Parts; Vector<unsigned int> DiffOrder(Diffs.Size()); real NewVolume; switch(N) { case 2: { NewVolume = Geometry().Volume()/2; TheRule->ComputeDiffs(LocalIntegrand(),Geometry(),Diffs); // Sort the differences in descending order. for (unsigned int ik=0 ; ik<=2 ; ik++) { DiffOrder[ik] = ik; } for (unsigned int i=0 ; i<=1 ; i++) { for (unsigned int k=i+1 ; k<=2 ; k++) if (Diffs[DiffOrder[k]]>Diffs[DiffOrder[i]]) { unsigned int h = DiffOrder[i]; DiffOrder[i] = DiffOrder[k]; DiffOrder[k] = h; } } TheDivisor2->Apply (Geometry(),Parts,DiffOrder); break; } case 4: { NewVolume = Geometry().Volume()/4; TheDivisor4->Apply (Geometry(),Parts,DiffOrder); break; } default: { NewVolume = Geometry().Volume()/N; Error(True,"This kind of subdivision is not implemented"); } } for (unsigned int i =0;i<N;i++) { Triangle* g = Parts.Pop(); g->Volume(NewVolume); Processor<Triangle>* p = Descendant(); Atomic<Triangle>* a = new Atomic<Triangle>(g,p); a->LocalIntegrand(&LocalIntegrand()); Offspring.Push(a); }; return; }
BOOST_AUTO_TEST_CASE_TEMPLATE(XorTest, T, AtomicXorTypes) { Atomic<T> value = 0x48; T old = value.Xor(T(0x28)); BOOST_CHECK_EQUAL(old, T(0x48)); BOOST_CHECK_EQUAL(value, T(0x60)); }
BOOST_AUTO_TEST_CASE_TEMPLATE(OrTest, T, AtomicOrTypes) { Atomic<T> value = 0x40; T old = value.Or(T(0x08)); BOOST_CHECK_EQUAL(old, T(0x40)); BOOST_CHECK_EQUAL(value, T(0x48)); }
BOOST_AUTO_TEST_CASE_TEMPLATE(AndTest, T, AtomicAndTypes) { Atomic<T> value = 0x48; T old = value.And(T(0x42)); BOOST_CHECK_EQUAL(old, T(0x48)); BOOST_CHECK_EQUAL(value, T(0x40)); }
BOOST_AUTO_TEST_CASE_TEMPLATE(DecrementTest, T, AtomicIncrementTypes) { Atomic<T> value = 1; T old = value.Decrement(); BOOST_CHECK_EQUAL(old, T(1)); BOOST_CHECK_EQUAL(value, T(0)); }
BOOST_AUTO_TEST_CASE_TEMPLATE(AddTest, T, AtomicAddTypes) { Atomic<T> value = 0; T old = value.Add(T(123)); BOOST_CHECK_EQUAL(old, T(0)); BOOST_CHECK_EQUAL(value, T(123)); }
void ObjectRenderer::renderInstance(InstanceObject* instance, RenderList& outList) { const auto& atomic = instance->getAtomic(); if (!atomic) { return; } // Only draw visible objects if (!instance->getVisible()) { return; } auto modelinfo = instance->getModelInfo<SimpleModelInfo>(); // Handles times provided by TOBJ data const auto currentHour = m_world->getHour(); if (modelinfo->timeOff < modelinfo->timeOn) { if (currentHour >= modelinfo->timeOff && currentHour < modelinfo->timeOn) return; } else { if (currentHour >= modelinfo->timeOff || currentHour < modelinfo->timeOn) return; } float mindist = glm::length(instance->getPosition() - m_camera.position) / kDrawDistanceFactor; if (mindist > modelinfo->getLargestLodDistance()) { culled++; return; } if (modelinfo->isBigBuilding() && mindist < modelinfo->getNearLodDistance() && mindist < kMagicLODDistance) { auto related = modelinfo->related(); if (!related || related->isLoaded()) { culled++; return; } } Atomic* distanceatomic = modelinfo->getDistanceAtomic(mindist / kDrawDistanceFactor); if (!distanceatomic) { return; } if (atomic->getGeometry() != distanceatomic->getGeometry()) { atomic->setGeometry(distanceatomic->getGeometry()); } // Render the atomic the instance thinks it should be renderAtomic(atomic.get(), glm::mat4(), instance, outList); }
BOOST_AUTO_TEST_CASE_TEMPLATE(CompareAndSwapFailTest, T, AtomicTypes) { Atomic<T> value = 0; T cmp = 123; bool const swapped = value.CompareAndSwap(cmp, 0); BOOST_CHECK(!swapped); BOOST_CHECK_EQUAL(cmp, T(0)); BOOST_CHECK_EQUAL(value, T(0)); }
BOOST_AUTO_TEST_CASE_TEMPLATE(CompareAndSwapSucceedTest, T, AtomicTypes) { Atomic<T> value = 0; T cmp = 0; bool const swapped = value.CompareAndSwap(cmp, 123); BOOST_CHECK(swapped); BOOST_CHECK_EQUAL(cmp, T(0)); BOOST_CHECK_EQUAL(value, T(123)); }
SharedMemory::SharedMemory() : mAllocSize(0) , mMappedSize(0) { static Atomic<uint32_t> registered; if (registered.compareExchange(0, 1)) { RegisterStrongMemoryReporter(new ShmemAllocatedReporter()); RegisterStrongMemoryReporter(new ShmemMappedReporter()); } }
SharedMemory::SharedMemory() : mAllocSize(0) , mMappedSize(0) { MOZ_COUNT_CTOR(SharedMemory); static Atomic<bool> registered; if (registered.compareExchange(false, true)) { RegisterStrongMemoryReporter(new ShmemReporter()); } }
inline bool Atomic<T>::cswap ( const Atomic<T> &oldval, const Atomic<T> &newval ) { #ifdef HAVE_NEW_GCC_ATOMIC_OPS // FIXME: The atomics passed are const T* oldv = const_cast<T*>(&oldval._value); T* newv = const_cast<T*>(&newval._value); return __atomic_compare_exchange_n( &_value, oldv, newv, /* weak */ false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE ); #else return __sync_bool_compare_and_swap ( &_value, oldval.value(), newval.value() ); #endif }
virtual int run() { DBGPRINTF("%s", "blocked thread !!!"); barrier.wait(); DBGPRINTF("%s", "unblocked thread !!!"); for (int i=1; i<=imax; ++i) { ThreadMutexLock lock(mutex); MustBeTrue(atomic.increment_and_test(2)); MustBeTrue(atomic.decrement_and_test(1)); } DBGTRACE(); return 0; }
void Census::visit(const std::shared_ptr<Instruction> &instruction) { instructions_.push_back(instruction.get()); unique_ = false; switch (instruction->mnemonic()) { case Instruction::READ: { Read *read = instruction->as<Read>(); visit(read->reg()); visit(read->address()); spaces_.push_back(read->space()); break; } case Instruction::WRITE: { Write *write = instruction->as<Write>(); visit(write->value()); visit(write->address()); spaces_.push_back(write->space()); break; } case Instruction::MFENCE: { break; } case Instruction::LOCAL: { Local *local = instruction->as<Local>(); visit(local->reg()); visit(local->value()); break; } case Instruction::CONDITION: { Condition *condition = instruction->as<Condition>(); visit(condition->expression()); break; } case Instruction::ATOMIC: { Atomic *atomic = instruction->as<Atomic>(); for (const auto &instr : atomic->instructions()) { visit(instr); } break; } case Instruction::NOOP: /* FALLTHROUGH */ case Instruction::LOCK: /* FALLTHROUGH */ case Instruction::UNLOCK: break; default: { assert(!"NEVER REACHED"); } } }
Error operator()(AsyncLoaderTaskContext& ctx) { if(m_count) { auto x = m_count->fetchAdd(1); if(m_id >= 0) { if(m_id != static_cast<I32>(x)) { ANKI_LOGE("Wrong excecution order"); return ErrorCode::FUNCTION_FAILED; } } } if(m_sleepTime != 0.0) { HighRezTimer::sleep(m_sleepTime); } if(m_barrier) { m_barrier->wait(); } ctx.m_pause = m_pause; ctx.m_resubmitTask = m_resubmit; m_resubmit = false; return ErrorCode::NONE; }
double testPerfAddAndFetch() { const std::size_t iters(30000); StopWatch stopWatch; double total(0); for(std::size_t iter = 0; iter != iters; ++iter) { Atomic<T> atomic; stopWatch.start(); for(std::size_t count = 0; count != testing::STANDARD_SPEED_COUNT; ++count) { atomic.addAndFetch(1); } stopWatch.stop(); total += getTimevalAsMicros(stopWatch.elapsed()); stopWatch.reset(); } return testing::calcNanosPer(total, iters); }
double testPerfFetch() { const std::size_t iters(30000); StopWatch stopWatch; double total(0); int foo(0); for(std::size_t iter = 0; iter != iters; ++iter) { Atomic<T> atomic; stopWatch.start(); for(std::size_t count = 0; count != testing::STANDARD_SPEED_COUNT; ++count) { foo += atomic.fetch(); // To ensure fetch is actually called } stopWatch.stop(); total += getTimevalAsMicros(stopWatch.elapsed()); stopWatch.reset(); } return testing::calcNanosPer(total, iters); }
double testPerfCompareAndSwap(bool success) { const std::size_t iters(30000); StopWatch stopWatch; double total(0); const T compare = success ? 0 : 1; for(std::size_t iter = 0; iter != iters; ++iter) { Atomic<T> atomic; stopWatch.start(); for(std::size_t count = 0; count != testing::STANDARD_SPEED_COUNT; ++count) { atomic.compareAndSwap(compare, compare); } stopWatch.stop(); total += getTimevalAsMicros(stopWatch.elapsed()); stopWatch.reset(); } return testing::calcNanosPer(total, iters); }
int main ( int argc, char **argv ) { int i; bool check = true; main__loop_1_data_t _loop_data; A = 0; WD *wg = getMyThreadSafe()->getCurrentWD(); for ( i = 0; i < NUM_ITERS; i++ ) { // If we're done processing half of the dataset if ( i == NUM_ITERS/2 ) { // Stop scheduler sys.stopScheduler(); } // Work descriptor creation WD * wd = new WD( new SMPDD( main__loop_1 ), sizeof( _loop_data ), __alignof__(nanos_loop_info_t), ( void * ) &_loop_data ); wd->setPriority( 100 ); // Work Group affiliation wg->addWork( *wd ); // Work submission sys.submit( *wd ); if ( i == ( NUM_ITERS/2 + 5 ) ){ // Keep going sys.startScheduler(); } } // barrier (kind of) wg->waitCompletion(); /* * How can we be sure the test passed? Each task increments A. If we run N * tasks, A should be equal to N. * If it's less than N, that'd mean the scheduler lost something. */ if ( A.value() != NUM_ITERS ) check = false; if ( check ) { fprintf(stderr, "%s : %s\n", argv[0], "successful"); return 0; } else { fprintf(stderr, "%s: %s\n", argv[0], "unsuccessful"); return -1; } }
int main ( int argc, char **argv ) { int i; bool check = true; main__loop_1_data_t _loop_data; // Repeat the test NUM_RUNS times for ( int testNumber = 0; testNumber < NUM_RUNS; ++testNumber ) { A = 0; WG *wg = getMyThreadSafe()->getCurrentWD(); // Stop scheduler sys.stopScheduler(); // increment variable for ( i = 0; i < NUM_ITERS; i++ ) { // Work descriptor creation WD * wd = new WD( new SMPDD( main__loop_1 ), sizeof( _loop_data ), __alignof__(nanos_loop_info_t), ( void * ) &_loop_data ); wd->setPriority( 100 ); // Work Group affiliation wg->addWork( *wd ); // Work submission sys.submit( *wd ); } // Re-enable the scheduler sys.startScheduler(); // barrier (kind of) wg->waitCompletion(); /* * The verification criteria is that A is equal to the number of tasks * run. Should A be lower, that would indicate that not all tasks * successfuly finished. */ if ( A.value() != NUM_ITERS ) check = false; } if ( check ) { fprintf(stderr, "%s : %s\n", argv[0], "successful"); return 0; } else { fprintf(stderr, "%s: %s\n", argv[0], "unsuccessful"); return -1; } }
void SignalPipeWatcher::StopWatching() { MOZ_ASSERT(XRE_GetIOMessageLoop() == MessageLoopForIO::current()); // Close sDumpPipeWriteFd /after/ setting the fd to -1. // Otherwise we have the (admittedly far-fetched) race where we // // 1) close sDumpPipeWriteFd // 2) open a new fd with the same number as sDumpPipeWriteFd // had. // 3) receive a signal, then write to the fd. int pipeWriteFd = sDumpPipeWriteFd.exchange(-1); close(pipeWriteFd); FdWatcher::StopWatching(); }
void NS_DispatchEventualMemoryPressure(MemoryPressureState state) { /* * A new memory pressure event erases an ongoing memory pressure, but an * existing "new" memory pressure event takes precedence over a new "ongoing" * memory pressure event. */ switch (state) { case MemPressure_None: sMemoryPressurePending = MemPressure_None; break; case MemPressure_New: sMemoryPressurePending = MemPressure_New; break; case MemPressure_Ongoing: sMemoryPressurePending.compareExchange(MemPressure_None, MemPressure_Ongoing); break; } }
ImportAsset::ImportAsset(const char* resourcePath) { import::IImport* iImport = NULL; queryInterface( "Import", &iImport ); import::IImportStream* iImportStream = iImport->importRws( resourcePath ); while( iImportStream->getType() != import::itNULL ) switch( iImportStream->getType() ) { case import::itTexture: { import::ImportTexture* importData = iImportStream->importTexture(); // search for texture in current dictionary if( Texture::textures.find( importData->name ) == Texture::textures.end() ) { if( strcmp( importData->name, "House01" ) == 0 ) { int iiii=0; } // create compatible texture Texture* texture = Texture::createDynamicTexture( importData->width, importData->height, importData->depth, importData->name ); // lock texture top-level surface D3DLOCKED_RECT lockedRect; texture->iDirect3DTexture()->LockRect( 0, &lockedRect, NULL, 0 ); // copy pixels int offset = 0; unsigned char* destPixels = (unsigned char*)(lockedRect.pBits); for( int i=0; i<importData->width*importData->height; i++ ) { destPixels[offset + 0] = importData->pixels[offset + 2], destPixels[offset + 1] = importData->pixels[offset + 1], destPixels[offset + 2] = importData->pixels[offset + 0], destPixels[offset + 3] = importData->pixels[offset + 3]; offset += 4; } texture->iDirect3DTexture()->UnlockRect( 0 ); // setup filetering, addressing, etc switch( importData->addressMode ) { case import::iamWrap: texture->setAddressTypeU( engine::atWrap ); texture->setAddressTypeV( engine::atWrap ); break; case import::iamMirror: texture->setAddressTypeU( engine::atMirror ); texture->setAddressTypeV( engine::atMirror ); break; case import::iamClamp: texture->setAddressTypeU( engine::atClamp ); texture->setAddressTypeV( engine::atClamp ); break; case import::iamBorder: texture->setAddressTypeU( engine::atBorder ); texture->setAddressTypeV( engine::atBorder ); break; } switch( importData->filterMode ) { case import::ifmNearest: texture->setMagFilter( engine::ftPoint ); texture->setMinFilter( engine::ftPoint ); texture->setMipFilter( engine::ftNone ); break; case import::ifmLinear: texture->setMagFilter( engine::ftLinear ); texture->setMinFilter( engine::ftLinear ); texture->setMipFilter( engine::ftNone ); break; case import::ifmMipNearest: texture->setMagFilter( engine::ftPoint ); texture->setMinFilter( engine::ftPoint ); texture->setMipFilter( engine::ftPoint ); break; case import::ifmMipLinear: texture->setMagFilter( engine::ftPoint ); texture->setMinFilter( engine::ftPoint ); texture->setMipFilter( engine::ftLinear ); break; case import::ifmLinearMipNearest: texture->setMagFilter( engine::ftLinear ); texture->setMinFilter( engine::ftLinear ); texture->setMipFilter( engine::ftPoint ); break; case import::ifmLinearMipLinear: texture->setMagFilter( engine::ftLinear ); texture->setMinFilter( engine::ftLinear ); texture->setMipFilter( engine::ftLinear ); break; } // override filtering texture->setMagFilter( engine::ftLinear ); texture->setMinFilter( engine::ftLinear ); texture->setMipFilter( engine::ftLinear ); texture->setMaxAnisotropy( 8 ); /* // create normal map iImport->createNormalMap( importData->width, importData->height, importData->depth, importData->pixels, importData->stride, true, 5.0f ); // create compatible texture Texture* normalMap = Texture::createDynamicTexture( importData->width, importData->height, importData->depth, ( std::string( importData->name ) + "_nmap" ).c_str() ); normalMap->setMagFilter( engine::ftLinear ); normalMap->setMinFilter( engine::ftLinear ); normalMap->setMipFilter( engine::ftLinear ); // lock texture top-level surface lockedRect; normalMap->iDirect3DTexture()->LockRect( 0, &lockedRect, NULL, 0 ); // copy pixels offset = 0; destPixels = (unsigned char*)(lockedRect.pBits); for( i=0; i<importData->width*importData->height; i++ ) { destPixels[offset + 0] = importData->pixels[offset + 2], destPixels[offset + 1] = importData->pixels[offset + 1], destPixels[offset + 2] = importData->pixels[offset + 0], destPixels[offset + 3] = importData->pixels[offset + 3]; offset += 4; } normalMap->iDirect3DTexture()->UnlockRect( 0 ); */ // put texture into importer storage _textures.insert( TextureT( importData->id, texture ) ); } // release import data iImport->release( importData ); } break; case import::itMaterial: { import::ImportMaterial* importData = iImportStream->importMaterial(); Shader* shader = NULL; if( importData->textureId == 0 && importData->dualPassTextureId == 0 ) { shader = new Shader( 0, importData->name ); } else if( importData->textureId != 0 && importData->dualPassTextureId == 0 ) { shader = new Shader( 1, importData->name ); TextureI textureI = _textures.find( importData->textureId ); if( textureI != _textures.end() ) { shader->setLayerTexture( 0, textureI->second ); } shader->setLayerUV( 0,0 ); /* ::TextureI normalMapI = Texture::textures.find( ( std::string( textureI->second->getName() ) + "_nmap" ).c_str() ); if( normalMapI != Texture::textures.end() ) { shader->setNormalMap( normalMapI->second ); shader->setNormalMapUV( 0 ); } */ } else { shader = new Shader( 2, importData->name ); TextureI textureI = _textures.find( importData->textureId ); if( textureI != _textures.end() ) { shader->setLayerTexture( 0, textureI->second ); } textureI = _textures.find( importData->dualPassTextureId ); if( textureI != _textures.end() ) { shader->setLayerTexture( 1, textureI->second ); } shader->setLayerUV( 1,1 ); switch( importData->dualPassBlendType ) { case import::ImportMaterial::btAdd: shader->setLayerBlending( 1, engine::btAdd ); break; case import::ImportMaterial::btModulate: shader->setLayerBlending( 1, engine::btModulate ); break; case import::ImportMaterial::btBlendTextureAlpha: shader->setLayerBlending( 1, engine::btBlendTextureAlpha ); break; default: shader->setLayerBlending( 1, engine::btOver ); } textureI = _textures.find( importData->textureId ); assert( textureI != _textures.end() ); ::TextureI normalMapI = Texture::textures.find( ( std::string( textureI->second->getName() ) + "_nmap" ).c_str() ); if( normalMapI != Texture::textures.end() ) { shader->setNormalMap( normalMapI->second ); shader->setNormalMapUV( 0 ); } } shader->setDiffuseColor( importData->color ); // put texture into importer storage _shaders.insert( ShaderT( importData->id, shader ) ); // release import data iImport->release( importData ); } break; case import::itFrame: { import::ImportFrame* importData = iImportStream->importFrame(); Frame* frame = new Frame( importData->name ); frame->TransformationMatrix = wrap( importData->modeling ); FrameI parentI = _frames.find( importData->parentId ); if( parentI != _frames.end() ) { frame->setParent( parentI->second ); } _frames.insert( FrameT( importData->id, frame ) ); iImport->release( importData ); } break; case import::itGeometry: { import::ImportGeometry* importData = iImportStream->importGeometry(); Geometry* geometry = new Geometry( importData->numVertices, importData->numTriangles, importData->numUVs, importData->numMaterials, ( importData->prelights != NULL ) ? 1 : 0, false, importData->name ); for( int i=0; i<importData->numVertices; i++ ) { geometry->getVertices()[i] = wrap( importData->vertices[i] ); geometry->getNormals()[i] = wrap( importData->normals[i] ); // rh2lh conversion /* geometry->getVertices()[i][2] *= -1; geometry->getNormals()[i][2] *= -1; */ geometry->getUVSet(0)[i] = wrap( importData->uvs[0][i] ); if( importData->numUVs > 1 ) { geometry->getUVSet(1)[i] = wrap( importData->uvs[1][i] ); } if( importData->prelights ) { geometry->getPrelights(0)[i] = wrap( importData->prelights[i] ); } } for( i=0; i<importData->numTriangles; i++ ) { /* geometry->getTriangles()[i].set( importData->triangles[i].vertexId[0], importData->triangles[i].vertexId[2], importData->triangles[i].vertexId[1], importData->triangles[i].materialId ); */ geometry->getTriangles()[i].set( importData->triangles[i].vertexId[0], importData->triangles[i].vertexId[1], importData->triangles[i].vertexId[2], importData->triangles[i].materialId ); } for( i=0; i<importData->numMaterials; i++ ) { ShaderI shaderI = _shaders.find( importData->materials[i] ); assert( shaderI != _shaders.end() ); geometry->setShader( i, shaderI->second ); } geometry->instance(); _geometries.insert( GeometryT( importData->id, geometry ) ); iImport->release( importData ); } break; case import::itAtomic: { import::ImportAtomic* importData = iImportStream->importAtomic(); Atomic* atomic = new Atomic; FrameI frameI = _frames.find( importData->frameId ); assert( frameI != _frames.end() ); atomic->setFrame( frameI->second ); GeometryI geometryI = _geometries.find( importData->geometryId ); assert( geometryI != _geometries.end() ); atomic->setGeometry( geometryI->second ); TextureI textureI = _textures.find( importData->lightmapId ); if( textureI != _textures.end() ) { atomic->setLightMap( textureI->second ); } _atomics.insert( AtomicT( importData->id, atomic ) ); iImport->release( importData ); } break; case import::itClump: { import::ImportClump* importData = iImportStream->importClump(); Clump* clump = new Clump( importData->name ); FrameI frameI = _frames.find( importData->frameId ); assert( frameI != _frames.end() ); clump->setFrame( frameI->second ); frameI->second->dirty(); for( int i=0; i<importData->numAtomics; i++ ) { AtomicI atomicI = _atomics.find( importData->atomics[i] ); assert( atomicI != _atomics.end() ); clump->add( atomicI->second ); } for( i=0; i<importData->numLights; i++ ) { LightI lightI = _lights.find( importData->lights[i] ); assert( lightI != _lights.end() ); clump->add( lightI->second ); } iImport->release( importData ); /* rh2lh( clump->frame() ); */ _clumps.push_back( clump ); } break; case import::itWorldSector: { import::ImportWorldSector* importData = iImportStream->importWorldSector(); BSPI bspI = _bspM.find( importData->worldId ); assert( bspI != _bspM.end() ); BSPSectorI parentSectorI = _bspSectorM.find( importData->parentId ); BSPSector* parentSector = NULL; if( parentSectorI != _bspSectorM.end() ) parentSector = parentSectorI->second; AABB boundingBox; boundingBox.inf = wrap( importData->aabbInf ); boundingBox.sup = wrap( importData->aabbSup ); // rh2lh conversion /* float temp = boundingBox.inf[2] * -1; boundingBox.inf[2] = boundingBox.sup[2] * -1; boundingBox.sup[2] = temp; */ int numPrelights = 0; if( importData->prelights ) numPrelights++; Geometry* geometry = NULL; if( importData->numVertices && importData->numTriangles ) { geometry = new Geometry( importData->numVertices, importData->numTriangles, importData->numUVs, bspI->second->getNumShaders(), numPrelights, true, strformat( "BSPSector_%x_Shape", importData->id ).c_str() ); geometry->setShaders( bspI->second->getShaders() ); for( int i=0; i<importData->numVertices; i++ ) { geometry->getVertices()[i] = wrap( importData->vertices[i] ); geometry->getNormals()[i] = wrap( importData->normals[i] ); // rh2lh conversion /* geometry->getVertices()[i][2] *= -1; geometry->getNormals()[i][2] *= -1; */ for( int j=0; j<importData->numUVs; j++ ) geometry->getUVSet(j)[i] = wrap( importData->uvs[j][i] ); if( numPrelights ) geometry->getPrelights(0)[i] = wrap( importData->prelights[i] ); } for( i=0; i<importData->numTriangles; i++ ) { /* geometry->getTriangles()[i].set( importData->triangles[i].vertexId[0], importData->triangles[i].vertexId[2], importData->triangles[i].vertexId[1], importData->triangles[i].materialId ); */ geometry->getTriangles()[i].set( importData->triangles[i].vertexId[0], importData->triangles[i].vertexId[1], importData->triangles[i].vertexId[2], importData->triangles[i].materialId ); } geometry->instance(); } BSPSector* sector = new BSPSector( bspI->second, parentSector, boundingBox, geometry ); TextureI textureI = _textures.find( importData->lightmapId ); if( textureI != _textures.end() ) { sector->setLightMap( textureI->second ); } _bspSectorM.insert( BSPSectorT( importData->id, sector ) ); iImport->release( importData ); }; break; case import::itWorld: { import::ImportWorld* importData = iImportStream->importWorld(); AABB boundingBox; boundingBox.inf = wrap( importData->aabbInf ); boundingBox.sup = wrap( importData->aabbSup ); // rh2lh conversion /* float temp = boundingBox.inf[2] * -1; boundingBox.inf[2] = boundingBox.sup[2] * -1; boundingBox.sup[2] = temp; */ BSP* bsp = new BSP( importData->name, boundingBox, importData->numMaterials ); for( int i=0; i<importData->numMaterials; i++ ) { ShaderI shaderI = _shaders.find( importData->materials[i] ); assert( shaderI != _shaders.end() ); bsp->setShader( i, shaderI->second ); } _bspM.insert( BSPT( importData->id, bsp ) ); _bsps.push_back( bsp ); iImport->release( importData ); }; break; case import::itLight: { import::ImportLight* importData = iImportStream->importLight(); engine::LightType lightType; switch( importData->type ) { case import::ImportLight::ltAmbient: lightType = engine::ltAmbient; break; case import::ImportLight::ltDirectional: lightType = engine::ltDirectional; break; case import::ImportLight::ltPoint: lightType = engine::ltPoint; break; case import::ImportLight::ltSpot: lightType = engine::ltSpot; break; default: assert( !"shouldn't be here!" ); } Light* light = new Light( lightType ); light->setRange( importData->radius ); light->setDiffuseColor( importData->color ); light->setSpecularColor( importData->color ); light->setPhi( importData->coneAngle ); light->setTheta( importData->coneAngle ); light->setAttenuation( Vector3f( 0,0.0001f,0 ) ); FrameI frameI = _frames.find( importData->frameId ); assert( frameI != _frames.end() ); light->setFrame( frameI->second ); _lights.insert( LightT( importData->id, light ) ); iImport->release( importData ); } break; default: assert( !"shouldn't be here!" ); } iImport->release( iImportStream ); }
void Triangle_Processor::Process( Stack<AtomicRegion>& Offspring) { TimesCalled ++; if (TimesCalled == 1) { TheRule->Apply(LocalIntegrand(),Geometry(),Integral(),AbsoluteError()); Offspring.MakeEmpty(); return; }; if(TimesCalled == 2) { real NewVolume = Geometry().Volume()/2; Stack<Triangle> Parts; Vector<unsigned int> DiffOrder(Diffs.Size()); const real difffac = real(1)/real(0.45); const real difftreshold = 1e-3; TheRule->ComputeDiffs(LocalIntegrand(),Geometry(),Diffs); // Sort the differences in descending order. for (unsigned int ik=0 ; ik<=2 ; ik++) { DiffOrder[ik] = ik; } for (unsigned int i=0 ; i<=1 ; i++) { for (unsigned int k=i+1 ; k<=2 ; k++) if (Diffs[DiffOrder[k]]>Diffs[DiffOrder[i]]) { unsigned int h = DiffOrder[i]; DiffOrder[i] = DiffOrder[k]; DiffOrder[k] = h; } } if (Diffs[DiffOrder[0]] < difftreshold) { TheDivisor4->Apply(Geometry(),Parts,DiffOrder); NewVolume /=2; } else { if (Diffs[DiffOrder[0]]>difffac*Diffs[DiffOrder[2]]) { TheDivisor2->Apply (Geometry(),Parts,DiffOrder); } else { TheDivisor4->Apply(Geometry(),Parts,DiffOrder); NewVolume /=2; } }; unsigned int N = Parts.Size(); for (unsigned int ii =0;ii<N;ii++) { Triangle* g = Parts.Pop(); g->Volume(NewVolume); Processor<Triangle>* p = Descendant(); Atomic<Triangle>* a = new Atomic<Triangle>(g,p); a->LocalIntegrand(&LocalIntegrand()); Offspring.Push(a); }; return; }; Error(TimesCalled > 2, "Triangle_Processor : more than two calls of Process()"); }
//============================================================================== ANKI_TEST(Resource, AsyncLoader) { HeapAllocator<U8> alloc(allocAligned, nullptr); // Simple create destroy { AsyncLoader a; a.init(alloc); } // Simple task that will finish { AsyncLoader a; a.init(alloc); Barrier barrier(2); a.submitNewTask<Task>(0.0, &barrier, nullptr); barrier.wait(); } // Many tasks that will finish { AsyncLoader a; a.init(alloc); Barrier barrier(2); Atomic<U32> counter = {0}; const U COUNT = 100; for(U i = 0; i < COUNT; i++) { Barrier* pbarrier = nullptr; if(i == COUNT - 1) { pbarrier = &barrier; } a.submitNewTask<Task>(0.01, pbarrier, &counter); } barrier.wait(); ANKI_TEST_EXPECT_EQ(counter.load(), COUNT); } // Many tasks that will _not_ finish { AsyncLoader a; a.init(alloc); for(U i = 0; i < 100; i++) { a.submitNewTask<Task>(0.0, nullptr, nullptr); } } // Tasks that allocate { AsyncLoader a; a.init(alloc); Barrier barrier(2); for(U i = 0; i < 10; i++) { Barrier* pbarrier = nullptr; if(i == 9) { pbarrier = &barrier; } a.submitNewTask<MemTask>(alloc, pbarrier); } barrier.wait(); } // Tasks that allocate and never finished { AsyncLoader a; a.init(alloc); for(U i = 0; i < 10; i++) { a.submitNewTask<MemTask>(alloc, nullptr); } } // Pause/resume { AsyncLoader a; a.init(alloc); Atomic<U32> counter(0); Barrier barrier(2); // Check if the pause will sync a.submitNewTask<Task>(0.5, nullptr, &counter, 0); HighRezTimer::sleep(0.25); // Wait for the thread to pick the task... a.pause(); /// ...and then sync ANKI_TEST_EXPECT_EQ(counter.load(), 1); // Test resume a.submitNewTask<Task>(0.1, nullptr, &counter, 1); HighRezTimer::sleep(1.0); ANKI_TEST_EXPECT_EQ(counter.load(), 1); a.resume(); // Sync a.submitNewTask<Task>(0.1, &barrier, &counter, 2); barrier.wait(); ANKI_TEST_EXPECT_EQ(counter.load(), 3); } // Pause/resume { AsyncLoader a; a.init(alloc); Atomic<U32> counter(0); Barrier barrier(2); // Check task resubmit a.submitNewTask<Task>(0.0, &barrier, &counter, -1, false, true); barrier.wait(); barrier.wait(); ANKI_TEST_EXPECT_EQ(counter.load(), 2); // Check task pause a.submitNewTask<Task>(0.0, nullptr, &counter, -1, true, false); a.submitNewTask<Task>(0.0, nullptr, &counter, -1, false, false); HighRezTimer::sleep(1.0); ANKI_TEST_EXPECT_EQ(counter.load(), 3); a.resume(); HighRezTimer::sleep(1.0); ANKI_TEST_EXPECT_EQ(counter.load(), 4); // Check both counter.set(0); a.submitNewTask<Task>(0.0, nullptr, &counter, 0, false, false); a.submitNewTask<Task>(0.0, nullptr, &counter, -1, true, true); a.submitNewTask<Task>(0.0, nullptr, &counter, 2, false, false); HighRezTimer::sleep(1.0); ANKI_TEST_EXPECT_EQ(counter.load(), 2); a.resume(); HighRezTimer::sleep(1.0); ANKI_TEST_EXPECT_EQ(counter.load(), 4); } // Fuzzy test { AsyncLoader a; a.init(alloc); Barrier barrier(2); Atomic<U32> counter = {0}; for(U i = 0; i < 10; i++) { Barrier* pbarrier = nullptr; if(i == 9) { pbarrier = &barrier; } a.submitNewTask<Task>(randRange(0.0, 0.5), pbarrier, &counter, i); } barrier.wait(); ANKI_TEST_EXPECT_EQ(counter.load(), 10); } }
void RunWriter(void* arg) { PR_SetCurrentThreadName("Shutdown Statistics Writer"); MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(arg); // Shutdown will generally complete before we have a chance to // deallocate. This is not a leak. // Setup destinationPath and tmpFilePath nsCString destinationPath(static_cast<char*>(arg)); nsAutoCString tmpFilePath; tmpFilePath.Append(destinationPath); tmpFilePath.AppendLiteral(".tmp"); // Cleanup any file leftover from a previous run Unused << PR_Delete(tmpFilePath.get()); Unused << PR_Delete(destinationPath.get()); while (true) { // // Check whether we have received data from the main thread. // // We perform the check before waiting on `gWriteReady` as we may // have received data while we were busy writing. // // Also note that gWriteData may have been modified several times // since we last checked. That's ok, we are not losing any important // data (since we keep adding data), and we are not leaking memory // (since the main thread deallocates any data that hasn't been // consumed by the writer thread). // UniquePtr<nsCString> data(gWriteData.exchange(nullptr)); if (!data) { // Data is not available yet. // Wait until the main thread provides it. PR_EnterMonitor(gWriteReady); PR_Wait(gWriteReady, PR_INTERVAL_NO_TIMEOUT); PR_ExitMonitor(gWriteReady); continue; } MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(data.get()); // Shutdown may complete before we have a chance to deallocate. // This is not a leak. // // Write to a temporary file // // In case of any error, we simply give up. Since the data is // hardly critical, we don't want to spend too much effort // salvaging it. // UniquePtr<PRFileDesc, PR_CloseDelete> tmpFileDesc(PR_Open(tmpFilePath.get(), PR_WRONLY | PR_TRUNCATE | PR_CREATE_FILE, 00600)); // Shutdown may complete before we have a chance to close the file. // This is not a leak. MOZ_LSAN_INTENTIONALLY_LEAK_OBJECT(tmpFileDesc.get()); if (tmpFileDesc == nullptr) { break; } if (PR_Write(tmpFileDesc.get(), data->get(), data->Length()) == -1) { break; } tmpFileDesc.reset(); // // Rename on top of destination file. // // This is not sufficient to guarantee that the destination file // will be written correctly, but, again, we don't care enough // about the data to make more efforts. // if (PR_Rename(tmpFilePath.get(), destinationPath.get()) != PR_SUCCESS) { break; } } }
MemoryPressureState NS_GetPendingMemoryPressure() { int32_t value = sMemoryPressurePending.exchange(MemPressure_None); return MemoryPressureState(value); }