status_t LargeMemoryPhysicalPageMapper::GetSlot(bool canWait, PhysicalPageSlot*& slot) { MutexLocker locker(fLock); PhysicalPageSlotPool* pool = fNonEmptyPools.Head(); if (pool == NULL) { if (!canWait) return B_WOULD_BLOCK; // allocate new pool locker.Unlock(); status_t error = fInitialPool->AllocatePool(pool); if (error != B_OK) return error; locker.Lock(); fNonEmptyPools.Add(pool); pool = fNonEmptyPools.Head(); } slot = pool->GetSlot(); if (pool->IsEmpty()) { fNonEmptyPools.Remove(pool); fEmptyPools.Add(pool); } return B_OK; }
status_t LargeMemoryPhysicalPageMapper::Init(kernel_args* args, PhysicalPageSlotPool* initialPool, TranslationMapPhysicalPageMapper*& _kernelPageMapper) { fInitialPool = initialPool; fNonEmptyPools.Add(fInitialPool); // get the debug slot GetSlot(true, fDebugSlot); // init the kernel translation map physical page mapper status_t error = fKernelMapper.Init(); if (error != B_OK) { panic("LargeMemoryPhysicalPageMapper::Init(): Failed to init " "kernel translation map physical page mapper!"); return error; } _kernelPageMapper = &fKernelMapper; // init the per-CPU data int32 cpuCount = smp_get_num_cpus(); for (int32 i = 0; i < cpuCount; i++) fPerCPUData[i].Init(); return B_OK; }
status_t LargeMemoryPhysicalPageMapper::Init(kernel_args* args, PhysicalPageSlotPool* initialPools, int32 initialPoolCount, size_t poolSize, TranslationMapPhysicalPageMapper*& _kernelPageMapper) { ASSERT(initialPoolCount >= 1); fInitialPool = initialPools; for (int32 i = 0; i < initialPoolCount; i++) { uint8* pointer = (uint8*)initialPools + i * poolSize; fNonEmptyPools.Add((PhysicalPageSlotPool*)pointer); } // get the debug slot GetSlot(true, fDebugSlot); // init the kernel translation map physical page mapper status_t error = fKernelMapper.Init(); if (error != B_OK) { panic("LargeMemoryPhysicalPageMapper::Init(): Failed to init " "kernel translation map physical page mapper!"); return error; } _kernelPageMapper = &fKernelMapper; // init the per-CPU data int32 cpuCount = smp_get_num_cpus(); for (int32 i = 0; i < cpuCount; i++) fPerCPUData[i].Init(); return B_OK; }
void LargeMemoryPhysicalPageMapper::PutSlot(PhysicalPageSlot* slot) { MutexLocker locker(fLock); PhysicalPageSlotPool* pool = slot->pool; if (pool->IsEmpty()) { fEmptyPools.Remove(pool); fNonEmptyPools.Add(pool); } pool->PutSlot(slot); }
status_t rtm_create_pool(rtm_pool** _pool, size_t totalSize, const char* name) { rtm_pool* pool = (rtm_pool*)malloc(sizeof(rtm_pool)); if (pool == NULL) return B_NO_MEMORY; if (name == NULL) name = "realtime pool"; status_t status = mutex_init(&pool->lock, name); if (status != B_OK) { free(pool); return status; } // Allocate enough space for at least one allocation over \a totalSize pool->max_size = (totalSize + sizeof(FreeChunk) - 1 + B_PAGE_SIZE) & ~(B_PAGE_SIZE - 1); area_id area = create_area(name, &pool->heap_base, B_ANY_ADDRESS, pool->max_size, B_LAZY_LOCK, B_READ_AREA | B_WRITE_AREA); if (area < 0) { mutex_destroy(&pool->lock); free(pool); return area; } pool->area = area; pool->available = pool->max_size - FreeChunk::NextOffset(); // declare the whole heap as one chunk, and add it // to the free list FreeChunk* chunk = (FreeChunk*)pool->heap_base; chunk->SetTo(pool->max_size, NULL); pool->free_anchor.SetTo(0, chunk); *_pool = pool; static pthread_once_t sOnce = PTHREAD_ONCE_INIT; pthread_once(&sOnce, &pool_init); MutexLocker _(&sPoolsLock); sPools.Add(pool); return B_OK; }