nsresult SourceBuffer::Compact() { mMutex.AssertCurrentThreadOwns(); MOZ_ASSERT(mConsumerCount == 0, "Should have no consumers here"); MOZ_ASSERT(mWaitingConsumers.Length() == 0, "Shouldn't have waiters"); MOZ_ASSERT(mStatus, "Should be complete here"); // Compact our waiting consumers list, since we're complete and no future // consumer will ever have to wait. mWaitingConsumers.Compact(); // If we have no chunks, then there's nothing to compact. if (mChunks.Length() < 1) { return NS_OK; } // If we have one chunk, then we can compact if it has excess capacity. if (mChunks.Length() == 1 && mChunks[0].Length() == mChunks[0].Capacity()) { return NS_OK; } // We can compact our buffer. Determine the total length. size_t length = 0; for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) { length += mChunks[i].Length(); } Maybe<Chunk> newChunk = CreateChunk(length, /* aRoundUp = */ false); if (MOZ_UNLIKELY(!newChunk || newChunk->AllocationFailed())) { NS_WARNING("Failed to allocate chunk for SourceBuffer compacting - OOM?"); return NS_OK; } // Copy our old chunks into the new chunk. for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) { size_t offset = newChunk->Length(); MOZ_ASSERT(offset < newChunk->Capacity()); MOZ_ASSERT(offset + mChunks[i].Length() <= newChunk->Capacity()); memcpy(newChunk->Data() + offset, mChunks[i].Data(), mChunks[i].Length()); newChunk->AddLength(mChunks[i].Length()); } MOZ_ASSERT(newChunk->Length() == newChunk->Capacity(), "Compacted chunk has slack space"); // Replace the old chunks with the new, compact chunk. mChunks.Clear(); if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(newChunk))))) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } mChunks.Compact(); return NS_OK; }
TInt E32Main() { test.Title(); test.Start(_L("Writable Data Paging Soak Test")); ParseCommandLine(); if (DPTest::Attributes() & DPTest::ERomPaging) test.Printf(_L("Rom paging supported\n")); if (DPTest::Attributes() & DPTest::ECodePaging) test.Printf(_L("Code paging supported\n")); if (DPTest::Attributes() & DPTest::EDataPaging) test.Printf(_L("Data paging supported\n")); TInt totalRamSize; HAL::Get(HAL::EMemoryRAM,totalRamSize); HAL::Get(HAL::EMemoryPageSize,gPageSize); test.Printf(_L("Total RAM size 0x%08X bytes"),totalRamSize); test.Printf(_L(" Swap size 0x%08X bytes"),SwapSize()); test.Printf(_L(" Page size 0x%08X bytes\n"),gPageSize); CacheSize(gMin,gMax); if ((DPTest::Attributes() & DPTest::EDataPaging) == 0) { test.Printf(_L("Writable Demand Paging not supported\n")); test.End(); return 0; } ShowMemoryUse(); //User::SetDebugMask(0x00000008); //KMMU //User::SetDebugMask(0x00000080); //KEXEC //User::SetDebugMask(0x90000000); //KPANIC KMMU2 //User::SetDebugMask(0x40000000, 1); //KPAGING if (gChunkSize) { CreateChunk (&gChunk[gNextChunk], gChunkSize); ReadChunk (&gChunk[gNextChunk]); ShowMemoryUse(); gNextChunk++; } CActiveScheduler* myScheduler = new (ELeave) CActiveScheduler(); CActiveScheduler::Install(myScheduler); CActiveConsole* myActiveConsole = new CActiveConsole(); myActiveConsole->GetCharacter(); CActiveScheduler::Start(); test.End(); return 0; }
void SaveCloakStruct(CloakStruct *cloakStruct) { SaveChunk *chunk; CloakStruct *savecontents; chunk = CreateChunk(BASIC_STRUCTURE,sizeof(CloakStruct),cloakStruct); savecontents = (CloakStruct *)chunkContents(chunk); savecontents->spaceobj = (SpaceObj *)SpaceObjRegistryGetID(savecontents->spaceobj); SaveThisChunk(chunk); memFree(chunk); }
void SaveDefenseStruct(DefenseStruct *defenseStruct) { SaveChunk *chunk; DefenseStruct *savecontents; chunk = CreateChunk(BASIC_STRUCTURE,sizeof(DefenseStruct),defenseStruct); savecontents = (DefenseStruct *)chunkContents(chunk); savecontents->bullet = (Bullet *) SpaceObjRegistryGetID((SpaceObj *)savecontents->bullet); savecontents->laser = (Bullet *) SpaceObjRegistryGetID((SpaceObj *)savecontents->laser); SaveThisChunk(chunk); memFree(chunk); }
void OnNewChunk(Packet& packet) { auto chmgr = ChunkManager::Get(); u16 chunkID; u16 neighborhoodID; u8 w,h,d; vec3 position; quat rotation; ivec3 poi; packet.Read(chunkID); packet.Read(neighborhoodID); packet.Read(w); packet.Read(h); packet.Read(d); auto ch = chmgr->CreateChunk(w,h,d); ch->chunkID = chunkID; if(!neighborhoodID) { packet.Read(position); packet.Read(rotation); ch->position = position; ch->rotation = rotation; }else{ packet.Read(poi); auto neigh = chmgr->GetNeighborhood(neighborhoodID); if(!neigh) { neigh = chmgr->CreateNeighborhood(); neigh->neighborhoodID = neighborhoodID; neigh->chunkSize = ivec3{w,h,d}; } ch->SetNeighborhood(neigh); ch->positionInNeighborhood = poi; neigh->UpdateChunkTransform(ch); } // logger << "New chunk " << chunkID << " at " << position; }
void SaveAnomalyPing(ping *tping) { SaveChunk *chunk; sdword size = sizeofping(tping); ping *savecontents; chunk = CreateChunk(VARIABLE_STRUCTURE|SAVE_PING,size,tping); savecontents = (ping *)chunkContents(chunk); savecontents->owner = (SpaceObj *)SpaceObjRegistryGetID(tping->owner); savecontents->userID = SpaceObjRegistryGetID((SpaceObj *)tping->userID); SaveThisChunk(chunk); memFree(chunk); if (tping->userDataSize > 0) { SaveSelection((SpaceObjSelection *) (tping + 1)); } }
void CActiveConsole::ProcessValue() { switch (iCmdGetValue) { case 'C' : if (iValue > 0 && gNextChunk < MAX_CHUNKS) { CreateChunk (&gChunk[gNextChunk], iValue); ReadChunk (&gChunk[gNextChunk]); ShowMemoryUse(); gNextChunk++; } break; case 'H' : CacheSize (0,iValue); break; case 'L' : CacheSize (iValue,0); break; case 'P' : iPeriod = iValue; iActions = (TUint16)(iValue < KFlushQuietLimit ? EFlushQuiet : EFlush); iTimer->Cancel(); if (iValue > 0) { iTimer->Start(0,iValue,TCallBack(Callback,this)); } break; default : break; } iCmdGetValue = 0; iPrompt = ETrue; }
nsresult SourceBuffer::ExpectLength(size_t aExpectedLength) { MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?"); MutexAutoLock lock(mMutex); if (MOZ_UNLIKELY(mStatus)) { MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete"); return NS_OK; } if (MOZ_UNLIKELY(mChunks.Length() > 0)) { MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength"); return NS_OK; } if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aExpectedLength))))) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } return NS_OK; }
nsresult SourceBuffer::Append(const char* aData, size_t aLength) { MOZ_ASSERT(aData, "Should have a buffer"); MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk"); size_t currentChunkCapacity = 0; size_t currentChunkLength = 0; char* currentChunkData = nullptr; size_t currentChunkRemaining = 0; size_t forCurrentChunk = 0; size_t forNextChunk = 0; size_t nextChunkCapacity = 0; { MutexAutoLock lock(mMutex); if (MOZ_UNLIKELY(mStatus)) { // This SourceBuffer is already complete; ignore further data. return NS_ERROR_FAILURE; } if (MOZ_UNLIKELY(mChunks.Length() == 0)) { if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } } // Copy out the current chunk's information so we can release the lock. // Note that this wouldn't be safe if multiple producers were allowed! Chunk& currentChunk = mChunks.LastElement(); currentChunkCapacity = currentChunk.Capacity(); currentChunkLength = currentChunk.Length(); currentChunkData = currentChunk.Data(); // Partition this data between the current chunk and the next chunk. // (Because we always allocate a chunk big enough to fit everything passed // to Append, we'll never need more than those two chunks to store // everything.) currentChunkRemaining = currentChunkCapacity - currentChunkLength; forCurrentChunk = min(aLength, currentChunkRemaining); forNextChunk = aLength - forCurrentChunk; // If we'll need another chunk, determine what its capacity should be while // we still hold the lock. nextChunkCapacity = forNextChunk > 0 ? FibonacciCapacityWithMinimum(forNextChunk) : 0; } // Write everything we can fit into the current chunk. MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity); memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk); // If there's something left, create a new chunk and write it there. Maybe<Chunk> nextChunk; if (forNextChunk > 0) { MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?"); nextChunk = CreateChunk(nextChunkCapacity); if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) { memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk); nextChunk->AddLength(forNextChunk); } } // Update shared data structures. { MutexAutoLock lock(mMutex); // Update the length of the current chunk. Chunk& currentChunk = mChunks.LastElement(); MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?"); MOZ_ASSERT(currentChunk.Length() == currentChunkLength, "Multiple producers?"); currentChunk.AddLength(forCurrentChunk); // If we created a new chunk, add it to the series. if (forNextChunk > 0) { if (MOZ_UNLIKELY(!nextChunk)) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(nextChunk))))) { return HandleError(NS_ERROR_OUT_OF_MEMORY); } } // Resume any waiting readers now that there's new data. ResumeWaitingConsumers(); } return NS_OK; }
MapChunk::MapChunk(int X, int Y, int Data[10][10], Renderer* Render){ TextureData.reserve(10); for (int i = 0; i < 10; i++) TextureData[i].reserve(10); CreateChunk(X, Y, Data, Render); }
void* GCAlloc::Alloc(int flags) #endif { GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small"); // Allocation must be signalled before we allocate because no GC work must be allowed to // come between an allocation and an initialization - if it does, we may crash, as // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them // having it. In principle we could signal allocation late but only set the object // flags after signaling, but we might still cause trouble for the profiler, which also // depends on non-interruptibility. m_gc->SignalAllocWork(m_itemSize); GCBlock* b = m_firstFree; start: if (b == NULL) { if (m_needsSweeping && !m_gc->collecting) { Sweep(m_needsSweeping); b = m_firstFree; goto start; } bool canFail = (flags & GC::kCanFail) != 0; CreateChunk(canFail); b = m_firstFree; if (b == NULL) { GCAssert(canFail); return NULL; } } GCAssert(!b->needsSweeping); GCAssert(b == m_firstFree); GCAssert(b && !b->IsFull()); void *item; if(b->firstFree) { item = b->firstFree; b->firstFree = *((void**)item); // clear free list pointer, the rest was zero'd in free *(intptr_t*) item = 0; #ifdef MMGC_MEMORY_INFO //check for writes on deleted memory VerifyFreeBlockIntegrity(item, b->size); #endif } else { item = b->nextItem; if(((uintptr_t)((char*)item + b->size) & 0xfff) != 0) { b->nextItem = (char*)item + b->size; } else { b->nextItem = NULL; } } // set up bits, items start out white and whether they need finalization // is determined by the caller // make sure we ended up in the right place GCAssert(((flags&GC::kContainsPointers) != 0) == ContainsPointers()); // this assumes what we assert GCAssert((unsigned long)GC::kFinalize == (unsigned long)GCAlloc::kFinalize); int index = GetIndex(b, item); GCAssert(index >= 0); Clear4BitsAndSet(b, index, flags & kFinalize); b->numItems++; #ifdef MMGC_MEMORY_INFO m_numAlloc++; #endif // If we're out of free items, be sure to remove ourselves from the // list of blocks with free items. TODO Minor optimization: when we // carve an item off the end of the block, we don't need to check here // unless we just set b->nextItem to NULL. if (b->IsFull()) { m_firstFree = b->nextFree; b->nextFree = NULL; GCAssert(b->prevFree == NULL); if (m_firstFree) m_firstFree->prevFree = 0; } // prevent mid-collection (ie destructor) allocations on un-swept pages from // getting swept. If the page is finalized and doesn't need sweeping we don't want // to set the mark otherwise it will be marked when we start the next marking phase // and write barriers won't fire (since its black) if(m_gc->collecting) { if((b->finalizeState != m_gc->finalizedValue) || b->needsSweeping) SetBit(b, index, kMark); } GCAssert((uintptr_t(item) & ~0xfff) == (uintptr_t) b); GCAssert((uintptr_t(item) & 7) == 0); #ifdef MMGC_HOOKS GCHeap* heap = GCHeap::GetGCHeap(); if(heap->HooksEnabled()) { size_t userSize = m_itemSize - DebugSize(); #ifdef MMGC_MEMORY_PROFILER m_totalAskSize += size; heap->AllocHook(GetUserPointer(item), size, userSize); #else heap->AllocHook(GetUserPointer(item), 0, userSize); #endif } #endif return item; }
// Member for thread function TInt CSMPSoakThread::DoSMPStressMemoryThread() { RTest test(_L("SMPStressMemoryThread")); test.Start(_L("SMPStressMemoryThread")); TMemory *memoryTablePtr; TChunkInfo chunkTable[KNumChunks]; TInt ctIndex = 0; test_KErrNone(UserHal::PageSizeInBytes(gPageSize)); FOREVER { SetThreadPriority(); if (gAbort) break; memoryTablePtr = (TMemory *) (iThreadData.listPtr); ctIndex = 0; //Create different type of chunks and write/read/verfiy it while (memoryTablePtr->chunkType != EChunkNone) { PRINT((_L("Create Chunk"))); CreateChunk (&chunkTable[ctIndex],memoryTablePtr); PRINT(_L("Write and Read Chunk")); WriteReadChunk (&chunkTable[ctIndex],memoryTablePtr); ctIndex++; memoryTablePtr++; } //Commit different type of chunks TBool anyCommit; do { anyCommit = EFalse; memoryTablePtr = (TMemory *) (iThreadData.listPtr); ctIndex = 0; while (memoryTablePtr->chunkType != EChunkNone) { //Commit Chunks PRINT((_L("Commit Chunk Memory"))); PRINT ((_L("CommitChunk %d bottom %d top %d\n"),ctIndex,memoryTablePtr->initialBottom,memoryTablePtr->initialTop)); CommitChunk (&chunkTable[ctIndex],memoryTablePtr); anyCommit = ETrue; //Write into Chunks WriteReadChunk (&chunkTable[ctIndex],memoryTablePtr); PRINT((_L("Write Read Chunk Size %d\n"), (memoryTablePtr->initialTop) - (memoryTablePtr->initialBottom))); ctIndex++; memoryTablePtr++; } } while (anyCommit); //Close the Chunks memoryTablePtr = (TMemory *) (iThreadData.listPtr); ctIndex = 0; while (memoryTablePtr->chunkType != EChunkNone) { chunkTable[ctIndex].chunk.Close(); ctIndex++; memoryTablePtr++; } User::After(gPeriod); } test.End(); test.Close(); return 0x00; }
void* FixedAlloc::Alloc(size_t size, FixedMallocOpts opts) { (void)size; GCAssertMsg(m_heap->StackEnteredCheck() || (opts&kCanFail) != 0, "MMGC_ENTER must be on the stack"); GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small"); if(!m_firstFree) { bool canFail = (opts & kCanFail) != 0; CreateChunk(canFail); if(!m_firstFree) { if (!canFail) { GCAssertMsg(0, "Memory allocation failed to abort properly"); GCHeap::SignalInconsistentHeapState("Failed to abort"); /*NOTREACHED*/ } return NULL; } } FixedBlock* b = m_firstFree; GCAssert(b && !IsFull(b)); b->numAlloc++; // Consume the free list if available void *item = NULL; if (b->firstFree) { item = b->firstFree; b->firstFree = *((void**)item); // assert that the freelist hasn't been tampered with (by writing to the first 4 bytes) GCAssert(b->firstFree == NULL || (b->firstFree >= b->items && (((uintptr_t)b->firstFree - (uintptr_t)b->items) % b->size) == 0 && (uintptr_t) b->firstFree < ((uintptr_t)b & ~0xfff) + GCHeap::kBlockSize)); #ifdef MMGC_MEMORY_INFO //check for writes on deleted memory VerifyFreeBlockIntegrity(item, b->size); #endif } else { // Take next item from end of block item = b->nextItem; GCAssert(item != 0); if(!IsFull(b)) { // There are more items at the end of the block b->nextItem = (void *) ((uintptr_t)item+m_itemSize); } else { b->nextItem = 0; } } // If we're out of free items, be sure to remove ourselves from the // list of blocks with free items. if (IsFull(b)) { m_firstFree = b->nextFree; b->nextFree = NULL; GCAssert(b->prevFree == NULL); if (m_firstFree) m_firstFree->prevFree = 0; } item = GetUserPointer(item); #ifdef MMGC_HOOKS if(m_heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER m_totalAskSize += size; #endif m_heap->AllocHook(item, size, b->size - DebugSize()); } #endif #ifdef _DEBUG // fresh memory poisoning if((opts & kZero) == 0) memset(item, 0xfa, b->size - DebugSize()); #endif if((opts & kZero) != 0) memset(item, 0, b->size - DebugSize()); return item; }
int CWaveFile::WriteHeader( void ) ///////////////////////////////////////////////////////////////////////////// { m_mmckinfoParent.fccType = mmioFOURCC('W','A','V','E'); m_mmckinfoParent.dwFlags = MMIO_DIRTY; if( CreateChunk( (LPMMCKINFO)&m_mmckinfoParent, MMIO_CREATERIFF ) ) { DPF(( "Could not create WAVE chunk.\n" )); Close(); return( FALSE ); } m_mmckinfoSubchunk.ckid = mmioFOURCC('f','m','t',' '); m_mmckinfoSubchunk.cksize = sizeof( PCMWAVEFORMAT ); if( CreateChunk( (LPMMCKINFO)&m_mmckinfoSubchunk, (UINT)NULL ) ) { DPF(( "Could not create fmt subchunk.\n" )); Close(); return( FALSE ); } //DPF(("FormatSize %ld\n", m_lFormatSize )); if( Write( (HPSTR)&m_FormatEx, m_lFormatSize ) != m_lFormatSize ) { DPF(("Write format Failed!\n")); Close(); return( FALSE ); } // Ascend out of the format subchunk. if( Ascend( &m_mmckinfoSubchunk ) ) { DPF(("WriteHeader: Ascend format subchunk Failed\n")); } if( m_FormatEx.wFormatTag != WAVE_FORMAT_PCM ) { DWORD dwSampleLength = 0; MMCKINFO mmckinfoFact; // create the fact chunk mmckinfoFact.ckid = mmioFOURCC('f','a','c','t'); mmckinfoFact.cksize = sizeof( DWORD ); if( CreateChunk( (LPMMCKINFO)&mmckinfoFact, (UINT)NULL ) ) { DPF(("CreateChunk Fact subchunk Failed!\n")); Close(); return( FALSE ); } // write a dummy value for the fact chunk if( Write( (HPSTR)&dwSampleLength, sizeof( DWORD ) ) != (LONG)sizeof( DWORD ) ) { DPF(("Write Fact subchunk Failed!\n")); Close(); return( FALSE ); } // Ascend out of the fact subchunk. if( Ascend( &mmckinfoFact ) ) { DPF(("Ascend Fact subchunk Failed!\n")); Close(); return( FALSE ); } } // create the 'data' sub-chunk m_mmckinfoSubchunk.ckid = mmioFOURCC('d','a','t','a'); m_mmckinfoSubchunk.dwFlags = MMIO_DIRTY; if( CreateChunk( (LPMMCKINFO)&m_mmckinfoSubchunk, (UINT)NULL ) ) { DPF(("CreateChunk data Failed!\n")); Close(); return( FALSE ); } //DPF(("fccType [%08lx] %lu\n", m_mmckinfoSubchunk.fccType, m_mmckinfoSubchunk.cksize )); return( TRUE ); }