void Heap::initializeLineMetadata() { for (unsigned short size = alignment; size <= smallMax; size += alignment) { unsigned short startOffset = 0; for (size_t lineNumber = 0; lineNumber < SmallPage::lineCount - 1; ++lineNumber) { unsigned short objectCount; unsigned short remainder; divideRoundingUp(static_cast<unsigned short>(SmallPage::lineSize - startOffset), size, objectCount, remainder); BASSERT(objectCount); m_smallLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount }; startOffset = remainder ? size - remainder : 0; } // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor. unsigned short objectCount = static_cast<unsigned short>((SmallPage::lineSize - startOffset) / size); m_smallLineMetadata[sizeClass(size)][SmallPage::lineCount - 1] = { startOffset, objectCount }; } for (unsigned short size = smallMax + alignment; size <= mediumMax; size += alignment) { unsigned short startOffset = 0; for (size_t lineNumber = 0; lineNumber < MediumPage::lineCount - 1; ++lineNumber) { unsigned short objectCount; unsigned short remainder; divideRoundingUp(static_cast<unsigned short>(MediumPage::lineSize - startOffset), size, objectCount, remainder); BASSERT(objectCount); m_mediumLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount }; startOffset = remainder ? size - remainder : 0; } // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor. unsigned short objectCount = static_cast<unsigned short>((MediumPage::lineSize - startOffset) / size); m_mediumLineMetadata[sizeClass(size)][MediumPage::lineCount - 1] = { startOffset, objectCount }; } }
void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize) { BASSERT(size <= largeMax); BASSERT(size >= largeMin); BASSERT(size == roundUpToMultipleOf<largeAlignment>(size)); BASSERT(unalignedSize <= largeMax); BASSERT(unalignedSize >= largeMin); BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize)); BASSERT(alignment <= largeChunkSize / 2); BASSERT(alignment >= largeAlignment); BASSERT(isPowerOfTwo(alignment)); LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize); if (!largeObject) { m_isAllocatingPages = true; largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize); } size_t alignmentMask = alignment - 1; if (test(largeObject.begin(), alignmentMask)) { size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin(); std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize); m_largeObjects.insert(pair.first); largeObject = pair.second; } return allocateLarge(lock, largeObject, size); }
void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size) { BASSERT(isPowerOfTwo(alignment)); BASSERT(alignment >= superChunkSize); BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size)); void* result = tryVMAllocate(alignment, size); if (!result) return nullptr; m_xLargeObjects.push(Range(result, size)); return result; }
void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size) { BASSERT(size <= largeMax); BASSERT(size >= largeMin); BASSERT(size == roundUpToMultipleOf<largeAlignment>(size)); LargeObject largeObject = m_largeObjects.take(size); if (!largeObject) { m_isAllocatingPages = true; largeObject = m_vmHeap.allocateLargeObject(size); } return allocateLarge(lock, largeObject, size); }
LargeObject FreeList::take(Owner owner, size_t alignment, size_t size, size_t unalignedSize) { BASSERT(isPowerOfTwo(alignment)); size_t alignmentMask = alignment - 1; LargeObject candidate; size_t candidateIndex; size_t begin = m_vector.size() > freeListSearchDepth ? m_vector.size() - freeListSearchDepth : 0; for (size_t i = begin; i < m_vector.size(); ++i) { LargeObject largeObject(LargeObject::DoNotValidate, m_vector[i].begin()); if (!largeObject.isValidAndFree(owner, m_vector[i].size())) { m_vector.pop(i--); continue; } if (largeObject.size() < size) continue; if (test(largeObject.begin(), alignmentMask) && largeObject.size() < unalignedSize) continue; if (!!candidate && candidate.begin() < largeObject.begin()) continue; candidate = largeObject; candidateIndex = i; } if (!!candidate) m_vector.pop(candidateIndex); return candidate; }
void* Allocator::reallocate(void* object, size_t newSize) { if (!m_isBmallocEnabled) return realloc(object, newSize); size_t oldSize = 0; switch (objectType(object)) { case ObjectType::Small: { BASSERT(objectType(nullptr) == ObjectType::Small); if (!object) break; size_t sizeClass = Object(object).page()->sizeClass(); oldSize = objectSize(sizeClass); break; } case ObjectType::Large: { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); oldSize = PerProcess<Heap>::getFastCase()->largeSize(lock, object); if (newSize < oldSize && newSize > smallMax) { PerProcess<Heap>::getFastCase()->shrinkLarge(lock, Range(object, oldSize), newSize); return object; } break; } } void* result = allocate(newSize); size_t copySize = std::min(oldSize, newSize); memcpy(result, object, copySize); m_deallocator.deallocate(object); return result; }
void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, const LargeObject& largeObject) { BASSERT(!largeObject.isFree()); largeObject.setFree(true); LargeObject merged = largeObject.merge(); m_largeObjects.insert(merged); m_scavenger.run(); }
void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, LargeObject& largeObject, size_t size) { BASSERT(largeObject.isFree()); if (largeObject.size() - size > largeMin) { std::pair<LargeObject, LargeObject> split = largeObject.split(size); largeObject = split.first; m_largeObjects.insert(split.second); } largeObject.setFree(false); return largeObject.begin(); }
sender(async::dispatcher &worker, const t_syncRadiusList& syncRadien, t_terrainAccessor* tiles) : m_syncRadien(syncRadien) { BASSERT(tiles != nullptr); BASSERT(tiles->getNumLod() <= (int32)syncRadien.size()); real voxelSize(1.); for (int32 indLod = 0; indLod < tiles->getNumLod(); ++indLod) { const auto callback(boost::bind(&sender::isInRange, this, _1, _2, indLod)); t_simpleAccessor* toWork(tiles->getLod(indLod)); t_multipleTilesPtr lod(new t_multipleTiles(worker, voxelSize, callback, toWork)); voxelSize*=2.; m_multipleTiles.push_back(lod); lod->signalSendTileData()->connect(boost::bind(&sender::lodWantsToSendTileData, this, _1, _2, indLod)); } }
inline colour operator / (const real scalar) const { BASSERT(scalar != 0.0); colour kDiv; real fInv = 1.0f / scalar; kDiv.r = r * fInv; kDiv.g = g * fInv; kDiv.b = b * fInv; kDiv.a = a * fInv; return kDiv; }
void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size) { BASSERT(size <= largeMax); BASSERT(size >= largeMin); m_isAllocatingPages = true; Range range = m_largeRanges.take(size); if (!range) range = m_vmHeap.allocateLargeRange(size); Range leftover; bool hasPhysicalPages; BoundaryTag::allocate(size, range, leftover, hasPhysicalPages); if (!!leftover) m_largeRanges.insert(leftover); if (!hasPhysicalPages) vmAllocatePhysicalPagesSloppy(range.begin(), range.size()); return range.begin(); }
void* Allocator::allocate(size_t alignment, size_t size) { BASSERT(isPowerOfTwo(alignment)); if (!m_isBmallocEnabled) { void* result = nullptr; #if !defined(ANDROID) || (ANDROID_NATIVE_API_LEVEL > 15) #pragma message ("ANDROID_NATIVE_API_LEVEL= " STRINGIFY(ANDROID_NATIVE_API_LEVEL)) if (posix_memalign(&result, alignment, size)) return nullptr; #else return memalign(alignment, size); #endif return result; } if (size <= smallMax && alignment <= smallLineSize) { size_t alignmentMask = alignment - 1; while (void* p = allocate(size)) { if (!test(p, alignmentMask)) return p; m_deallocator.deallocate(p); } } if (size <= mediumMax && alignment <= mediumLineSize) { size = std::max(size, smallMax + Sizes::alignment); size_t alignmentMask = alignment - 1; while (void* p = allocate(size)) { if (!test(p, alignmentMask)) return p; m_deallocator.deallocate(p); } } size = std::max(largeMin, roundUpToMultipleOf<largeAlignment>(size)); alignment = roundUpToMultipleOf<largeAlignment>(alignment); size_t unalignedSize = largeMin + alignment + size; if (unalignedSize <= largeMax && alignment <= largeChunkSize / 2) { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); return PerProcess<Heap>::getFastCase()->allocateLarge(lock, alignment, size, unalignedSize); } size = roundUpToMultipleOf<xLargeAlignment>(size); alignment = std::max(superChunkSize, alignment); std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); return PerProcess<Heap>::getFastCase()->allocateXLarge(lock, alignment, size); }
void Deallocator::deallocateSlowCase(void* object) { BASSERT(!deallocateFastCase(object)); if (!m_isBmallocEnabled) { free(object); return; } BASSERT(objectType(nullptr) == XLarge); if (!object) return; if (isSmallOrMedium(object)) { processObjectLog(); m_objectLog.push(object); return; } if (!isXLarge(object)) return deallocateLarge(object); return deallocateXLarge(object); }
/** * @brief getVoxel return ref to voxel. * @param pos -1 <= pos.xyz < voxelLengthWithNormalCorrection-1 * @return */ const voxelType& getVoxel(const vector3int32& pos) const { BASSERT(pos.x >= -1); BASSERT(pos.y >= -1); BASSERT(pos.z >= -1); BASSERT(pos.x < voxelLengthWithNormalCorrection-1); BASSERT(pos.y < voxelLengthWithNormalCorrection-1); BASSERT(pos.z < voxelLengthWithNormalCorrection-1); const int32 index((pos.x+1)*voxelLengthWithNormalCorrection*voxelLengthWithNormalCorrection + (pos.y+1)*voxelLengthWithNormalCorrection + pos.z+1); return m_voxels[index]; }
void Heap::allocateMediumBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache) { MediumPage* page = allocateMediumPage(lock, sizeClass); BASSERT(!rangeCache.size()); MediumLine* lines = page->begin(); // Due to overlap from the previous line, the last line in the page may not be able to fit any objects. size_t end = MediumPage::lineCount; if (!m_mediumLineMetadata[sizeClass][MediumPage::lineCount - 1].objectCount) --end; // Find a free line. for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) { if (lines[lineNumber].refCount(lock)) continue; // In a fragmented page, some free ranges might not fit in the cache. if (rangeCache.size() == rangeCache.capacity()) { m_mediumPagesWithFreeLines[sizeClass].push(page); return; } LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber]; char* begin = lines[lineNumber].begin() + lineMetadata.startOffset; unsigned short objectCount = lineMetadata.objectCount; lines[lineNumber].ref(lock, lineMetadata.objectCount); page->ref(lock); // Merge with subsequent free lines. while (++lineNumber < end) { if (lines[lineNumber].refCount(lock)) break; LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber]; objectCount += lineMetadata.objectCount; lines[lineNumber].ref(lock, lineMetadata.objectCount); page->ref(lock); } if (!allocator.canAllocate()) allocator.refill({ begin, objectCount }); else rangeCache.push({ begin, objectCount }); } }
void Deallocator::processObjectLog() { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); Heap* heap = PerProcess<Heap>::getFastCase(); for (auto object : m_objectLog) { if (isSmall(object)) { SmallLine* line = SmallLine::get(object); heap->derefSmallLine(lock, line); } else { BASSERT(isMedium(object)); MediumLine* line = MediumLine::get(object); heap->derefMediumLine(lock, line); } } m_objectLog.clear(); }
void Deallocator::deallocateSlowCase(void* object) { BASSERT(!deallocateFastCase(object)); if (!object) return; if (isSmallOrMedium(object)) { processObjectLog(); m_objectLog.push(object); return; } BeginTag* beginTag = LargeChunk::beginTag(object); if (!beginTag->isXLarge()) return deallocateLarge(object); return deallocateXLarge(object); }
/** * @brief setCalculateLod enables or disables lod calculation and voxel buffering for it. * @param lod */ void setCalculateLod(const bool& lod) { if (m_calculateLod == lod) { // do nothing // BWARNING("m_calculateLod == lod"); return; } m_calculateLod = lod; if (m_calculateLod) { BASSERT(m_voxelsLod == nullptr); m_voxelsLod.reset(new t_voxelArrayLod(6*voxelCountLod)); // [6*voxelCountLod] } else { m_voxelsLod.reset(); } }
void Heap::deallocateMediumLine(std::lock_guard<StaticMutex>& lock, MediumLine* line) { BASSERT(!line->refCount(lock)); MediumPage* page = MediumPage::get(line); size_t refCount = page->refCount(lock); page->deref(lock); switch (refCount) { case MediumPage::lineCount: { // First free line in the page. m_mediumPagesWithFreeLines[page->sizeClass()].push(page); break; } case 1: { // Last free line in the page. m_mediumPages.push(page); m_scavenger.run(); break; } } }
void Deallocator::processObjectLog() { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); for (auto object : m_objectLog) { if (isSmall(object)) { SmallLine* line = SmallLine::get(object); if (!line->deref(lock)) continue; deallocateSmallLine(lock, line); } else { BASSERT(isSmallOrMedium(object)); MediumLine* line = MediumLine::get(object); if (!line->deref(lock)) continue; deallocateMediumLine(lock, line); } } m_objectLog.clear(); }
void XLargeMap::addVirtual(const XLargeRange& range) { auto canMerge = [&range](const Allocation& other) { return other.object.end() == range.begin(); }; if (range.size() < xLargeAlignment) { // This is an unused fragment, so it might belong in the allocated list. auto it = std::find_if(m_allocated.begin(), m_allocated.end(), canMerge); if (it != m_allocated.end()) { BASSERT(!it->unused); it->unused = range; return; } // If we didn't find a neighbor in the allocated list, our neighbor must // have been freed. We'll merge with it below. } addFree(range); }
void* Allocator::allocate(size_t alignment, size_t size) { BASSERT(isPowerOfTwo(alignment)); if (!m_isBmallocEnabled) { void* result = nullptr; if (posix_memalign(&result, alignment, size)) return nullptr; return result; } if (size <= smallMax && alignment <= smallLineSize) { size_t alignmentMask = alignment - 1; while (void* p = allocate(size)) { if (!test(p, alignmentMask)) return p; m_deallocator.deallocate(p); } } if (size <= largeMax && alignment <= largeMax) { size = std::max(largeMin, roundUpToMultipleOf<largeAlignment>(size)); alignment = roundUpToMultipleOf<largeAlignment>(alignment); size_t unalignedSize = largeMin + alignment - largeAlignment + size; if (unalignedSize <= largeMax && alignment <= chunkSize / 2) { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); m_deallocator.processObjectLog(lock); return PerProcess<Heap>::getFastCase()->allocateLarge(lock, alignment, size, unalignedSize); } } if (size <= xLargeMax && alignment <= xLargeMax) { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); return PerProcess<Heap>::getFastCase()->allocateXLarge(lock, alignment, size); } BCRASH(); return nullptr; }
void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure) { BASSERT(isPowerOfTwo(alignment)); if (!m_isBmallocEnabled) { void* result = nullptr; if (posix_memalign(&result, alignment, size)) return nullptr; return result; } if (!size) size = alignment; if (size <= smallMax && alignment <= smallMax) return allocate(roundUpToMultipleOf(alignment, size)); std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); Heap* heap = PerProcess<Heap>::getFastCase(); if (crashOnFailure) return heap->allocateLarge(lock, alignment, size); return heap->tryAllocateLarge(lock, alignment, size); }
SmallLine* Heap::allocateSmallLineSlowCase(std::lock_guard<StaticMutex>& lock, size_t smallSizeClass) { m_isAllocatingPages = true; SmallPage* page = [this]() { if (m_smallPages.size()) return m_smallPages.pop(); SmallPage* page = m_vmHeap.allocateSmallPage(); vmAllocatePhysicalPages(page->begin()->begin(), vmPageSize); return page; }(); SmallLine* line = page->begin(); Vector<SmallLine*>& smallLines = m_smallLines[smallSizeClass]; for (auto it = line + 1; it != page->end(); ++it) smallLines.push(it); BASSERT(!line->refCount(lock)); page->setSmallSizeClass(smallSizeClass); page->ref(lock); return line; }
//---------------------------------------------------------------------------------------------------------------- void CClient::Activated() { CPlayer::Activated(); BASSERT(m_pPlayerInfo, exit(1)); m_sSteamId = m_pPlayerInfo->GetNetworkIDString(); if ( m_sSteamId.size() ) { TCommandAccessFlags iAccess = CConfiguration::ClientAccessLevel(m_sSteamId); if ( iAccess ) // Founded. iCommandAccessFlags = iAccess; } else iCommandAccessFlags = 0; BLOG_I( "User connected %s (steam id %s), access: %s.", GetName(), m_sSteamId.c_str(), CTypeToString::AccessFlagsToString(iCommandAccessFlags).c_str() ); iWaypointDrawFlags = FWaypointDrawNone; iPathDrawFlags = FPathDrawNone; bAutoCreatePaths = FLAG_ALL_SET_OR_0(FCommandAccessWaypoint, iCommandAccessFlags); bAutoCreateWaypoints = false; iItemDrawFlags = FItemDrawAll; iItemTypeFlags = 0; iDestinationWaypoint = EWaypointIdInvalid; #if defined(DEBUG) || defined(_DEBUG) bDebuggingEvents = FLAG_ALL_SET_OR_0(FCommandAccessConfig, iCommandAccessFlags); #else bDebuggingEvents = false; #endif }
/// Array accessor operator inline real& operator [] ( const size_t i ) { BASSERT( i < 4 ); return *(&w+i); }
void bBoard::process(bFpsTimer * fps) { guard(bBoard::process); BASSERT( fps != NULL ); for( int i=0; i<ball_size; ++i ) { ball[i]->process( fps->factor() ); } // find and mark balls with collisions for( int i=0; i<ball_size; ++i ) { for( int k=0; k<band_size; ++k ) { bBand::band_piece bp; if( (bp = band[k]->is_within( ball[i]->pos, ball[i]->radius )) != bBand::bNone ) { if( bp == bBand::bSide ) { luband.set( i, k, 1); } else { luband.set( i, k, bp==bBand::bEdge1?2:3); } } } for( int j=0; j<i; ++j ) { if( ball[i]->collides( ball[j] ) ) { luball.set( i, j, 1 ); } } } // move backwards balls with collision(s) for( int i=0; i<ball_size; ++i ) { if( is_any(i) ) { ball[i]->unprocess( fps->factor() ); } } Profiler.begin( "ball_mgr::reflections" ); // calculate new velocity vectors // (but no commit - one change changes next calculation result!) commit_reflections(); Profiler.end( "ball_mgr::reflections" ); luball.clear(); luband.clear(); for( int i=0; i<ball_size; ++i ) { // apply velocity vector changes ball[i]->commit_v(); /* ball[i]->process( fps->factor() ); for( int k=0; k<band_size; ++k ) { if( band[k]->is_within( ball[i]->pos, ball[i]->radius ) != bBand::bNone ) { ball[i]->unprocess( fps->factor() ); } } for( int j=0; j<i; ++j ) { if( ball[i]->collides( ball[j] ) ) { ball[i]->unprocess( fps->factor() ); } }*/ } unguard; }
//---------------------------------------------------------------------------------------------------------------- void CConfiguration::LoadWeapons( good::ini_file::const_iterator it ) { good::string_buffer sbBuffer(szMainBuffer, iMainBufferSize, false); CWeapons::Clear(); // Iterate throught key-values of weapons section. BLOG_D("Weapons:"); for ( good::ini_section::const_iterator itemIt = it->begin(); itemIt != it->end(); ++itemIt ) { sbBuffer = itemIt->value; good::escape(sbBuffer); StringVector aParams; good::split((good::string)sbBuffer, aParams, ',', true); good::vector<good::string> aAmmos[2]; good::vector<int> aAmmosCount[2]; CWeapon* pWeapon = new CWeapon(); StringVector aCurrent; aCurrent.reserve(4); bool bError = false; int iSecondary = 0; for ( StringVector::iterator paramsIt = aParams.begin(); paramsIt != aParams.end(); ++paramsIt ) { int iValue = -1; bool bProcessed = true; aCurrent.clear(); good::split(*paramsIt, aCurrent); BASSERT( aCurrent.size() > 0, exit(1) ); if ( aCurrent[0] == "class" ) { if ( aCurrent.size() > 1 ) { for ( int i=1; i<aCurrent.size(); ++i ) { TClass iClass = CTypeToString::ClassFromString(aCurrent[i]); if ( iClass == -1 ) { BLOG_E( "File \"%s\", section [%s]:", m_iniFile.name.c_str(), it->name.c_str() ); BLOG_E( " Weapon %s, invalid class: %s.", itemIt->key.c_str(), aCurrent[1].c_str() ); bError = true; break; } FLAG_SET(1 << iClass, pWeapon->iClass); } if ( bError ) break; } else { BLOG_E( "File \"%s\", section [%s]:", m_iniFile.name.c_str(), it->name.c_str() ); BLOG_E( " Weapon %s, class not specified.", itemIt->key.c_str() ); bError = true; break; } } else if ( aCurrent.size() == 1 ) { if ( aCurrent[0] == "secondary" ) iSecondary = CWeapon::SECONDARY; else { TWeaponFlags iFlag = CTypeToString::WeaponFlagsFromString(aCurrent[0]); if ( iFlag == -1 ) bProcessed = false; else FLAG_SET(iFlag, pWeapon->iFlags[iSecondary]); } } else if ( aCurrent.size() == 2 ) { if ( aCurrent[0] == "type" ) { int iType = CTypeToString::WeaponTypeFromString(aCurrent[1]); if ( iType == -1 ) { BLOG_E( "File \"%s\", section [%s]:", m_iniFile.name.c_str(), it->name.c_str() ); BLOG_E( " Weapon %s, invalid type: %s.", itemIt->key.c_str(), aCurrent[1].c_str() ); bError = true; break; } pWeapon->iType = iType; // Set weapon default parameters. switch (iType) { case EWeaponMelee: case EWeaponPhysics: pWeapon->iAttackBullets[0] = pWeapon->iAttackBullets[1] = 0; break; case EWeaponRocket: case EWeaponGrenade: case EWeaponRemoteDetonation: pWeapon->iClipSize[0] = 1; break; } } else if ( aCurrent[0] == "preference" ) { iValue = CTypeToString::PreferenceFromString(aCurrent[1]); if ( iValue == -1 ) bProcessed = false; else pWeapon->iBotPreference = iValue; } else if ( aCurrent[0] == "team" ) { iValue = CMod::GetTeamIndex(aCurrent[1]); if ( iValue == -1 ) { BLOG_E( "File \"%s\", section [%s]:", m_iniFile.name.c_str(), it->name.c_str() ); BLOG_E( " Weapon %s, invalid team: %s.", itemIt->key.c_str(), aCurrent[1].c_str() ); bError = true; break; } pWeapon->iTeam = 1 << iValue; } else if ( aCurrent[0] == "aim" ) { TWeaponAim iAim = CTypeToString::WeaponAimFromString(aCurrent[1]); if ( iAim == -1 ) bProcessed = false; else pWeapon->iAim[iSecondary] = iAim; } else { sscanf(aCurrent[1].c_str(), "%d", &iValue); if ( iValue < 0 ) { BLOG_E( "File \"%s\", section [%s]:", m_iniFile.name.c_str(), it->name.c_str() ); BLOG_E( " Weapon %s, invalid number: %s for parameter %s.", itemIt->key.c_str(), aCurrent[1].c_str(), aCurrent[0].c_str() ); bError = true; break; } if ( aCurrent[0] == "clip" ) pWeapon->iClipSize[iSecondary] = iValue; else if ( aCurrent[0] == "damage" ) pWeapon->fDamage[iSecondary] = iValue; else if ( aCurrent[0] == "delay" ) pWeapon->fShotTime[iSecondary] = iValue / 1000.0f; else if ( aCurrent[0] == "hold" ) pWeapon->fHoldTime[iSecondary] = iValue / 1000.0f; else if ( aCurrent[0] == "reload_by" ) pWeapon->iReloadBy[iSecondary] = iValue; else if ( aCurrent[0] == "reload" ) pWeapon->fReloadTime[iSecondary] = iValue / 1000.0f; else if ( aCurrent[0] == "reload_start" ) pWeapon->fReloadStartTime[iSecondary] = iValue / 1000.0f; else if ( aCurrent[0] == "holster" ) pWeapon->fHolsterTime = iValue / 1000.0f; else if ( aCurrent[0] == "default_ammo" ) pWeapon->iDefaultAmmo[iSecondary] = iValue; else if ( aCurrent[0] == "max_ammo" ) pWeapon->iMaxAmmo[iSecondary] = iValue; else if ( aCurrent[0] == "bullets" ) pWeapon->iAttackBullets[iSecondary] = iValue; else if ( aCurrent[0] == "default_ammo" ) pWeapon->iDefaultAmmo[iSecondary] = iValue; else if ( aCurrent[0] == "zoom_distance" ) pWeapon->fMinDistanceSqr[1] = SQR(iValue); else if ( aCurrent[0] == "zoom_time" ) pWeapon->fShotTime[1] = iValue / 1000.0f; else bProcessed = false; } } else if ( aCurrent.size() == 3 ) { if ( aCurrent[0] == "ammo" ) { aAmmos[iSecondary].reserve(4); int iValue = -1; sscanf(aCurrent[2].c_str(), "%d", &iValue); if ( iValue <= 0 ) // Ammo count can't be 0. { BLOG_E( "File \"%s\", section [%s]:", m_iniFile.name.c_str(), it->name.c_str() ); BLOG_E( " Weapon %s, invalid parameter for '%s' ammo's count: %s.", itemIt->key.c_str(), aCurrent[1].c_str(), aCurrent[2].c_str()); bError = true; break; } good::string sAmmo(aCurrent[1], true); aAmmos[iSecondary].push_back(sAmmo); aAmmosCount[iSecondary].push_back(iValue); } else { int iValue1 = -1, iValue2 = -1; sscanf(aCurrent[1].c_str(), "%d", &iValue1); sscanf(aCurrent[2].c_str(), "%d", &iValue2); if ( (iValue1 < 0) || (iValue2 < 0) || (iValue1 >= CUtil::iMaxMapSize) || (iValue1 >= CUtil::iMaxMapSize) ) bProcessed = false; else { if ( aCurrent[0] == "parabolic" ) { pWeapon->iParabolicDistance0[iSecondary] = iValue1; pWeapon->iParabolicDistance45[iSecondary] = iValue2; } else if ( aCurrent[0] == "range" ) { pWeapon->fMinDistanceSqr[iSecondary] = SQR(iValue1); if ( iValue2 == 0 ) pWeapon->fMaxDistanceSqr[iSecondary] = CUtil::iMaxMapSizeSqr; else pWeapon->fMaxDistanceSqr[iSecondary] = SQR(iValue2); } else bProcessed = false; } } } else bProcessed = false; if ( !bProcessed ) { BLOG_W( "File \"%s\", section [%s]:", m_iniFile.name.c_str(), it->name.c_str() ); BLOG_W( " Weapon %s, unknown keyword %s or invalid parameters, skipping.", itemIt->key.c_str(), aCurrent[0].c_str() ); } } if ( bError ) delete pWeapon; else { BLOG_D( " %s", itemIt->key.c_str() ); pWeapon->iId = CWeapons::Size(); BLOG_D( " id %d", pWeapon->iId ); if ( pWeapon->iTeam ) { BLOG_D( " team %s", CTypeToString::TeamFlagsToString(pWeapon->iTeam).c_str() ); //if ( FLAGS_SOME_SET(FDeathmatchTeamAllWeapons, CMod::iDeathmatchFlags) ) pWeapon->iTeam |= 1 << CMod::iUnassignedTeam; } else pWeapon->iTeam = -1; // Mark to use by any flag. if ( CMod::aClassNames.size() ) BLOG_D( " class %s", CTypeToString::ClassFlagsToString(pWeapon->iClass).c_str() ); else pWeapon->iClass = -1; // Mark to use by any flag. // If reload_by is not specified, assume reload refill the clip. for ( int i=0; i < 2; ++i ) if ( pWeapon->iReloadBy[i] == 0 ) pWeapon->iReloadBy[i] = pWeapon->iClipSize[i]; // Add weapon class. CItemClass cEntityClass; cEntityClass.sClassName.assign(itemIt->key, true); pWeapon->pWeaponClass = CItems::AddItemClassFor( EItemTypeWeapon, cEntityClass ); // Add ammo classes. pWeapon->aAmmos[0].reserve(aAmmos[0].size()); pWeapon->aAmmos[1].reserve(aAmmos[1].size()); for ( int iSec=0; iSec < 2; ++iSec ) for ( int i=0; i < aAmmos[iSec].size(); ++i ) { const good::string& sAmmo = aAmmos[iSec][i]; const CItemClass* pAmmoClass = CItems::GetItemClass( EItemTypeAmmo, sAmmo ); if ( !pAmmoClass ) { CItemClass cAmmoClass; cAmmoClass.sClassName = sAmmo; pAmmoClass = CItems::AddItemClassFor( EItemTypeAmmo, cAmmoClass ); } pWeapon->aAmmos[iSec].push_back( pAmmoClass ); pWeapon->aAmmosCount[iSec].push_back( aAmmosCount[iSec][i] ); BLOG_D( " ammo %s (%u bullets)", pWeapon->aAmmos[iSec][i]->sClassName.c_str(), pWeapon->aAmmosCount[iSec][i] ); } CWeaponWithAmmo cWeapon(pWeapon); CWeapons::Add(cWeapon); } } }