status_t LargeMemoryPhysicalPageMapper::GetSlot(bool canWait, PhysicalPageSlot*& slot) { MutexLocker locker(fLock); PhysicalPageSlotPool* pool = fNonEmptyPools.Head(); if (pool == NULL) { if (!canWait) return B_WOULD_BLOCK; // allocate new pool locker.Unlock(); status_t error = fInitialPool->AllocatePool(pool); if (error != B_OK) return error; locker.Lock(); fNonEmptyPools.Add(pool); pool = fNonEmptyPools.Head(); } slot = pool->GetSlot(); if (pool->IsEmpty()) { fNonEmptyPools.Remove(pool); fEmptyPools.Add(pool); } return B_OK; }
status_t X86PagingMethod32Bit::Init(kernel_args* args, VMPhysicalPageMapper** _physicalPageMapper) { TRACE("X86PagingMethod32Bit::Init(): entry\n"); // page hole set up in stage2 fPageHole = (page_table_entry*)(addr_t)args->arch_args.page_hole; // calculate where the pgdir would be fPageHolePageDir = (page_directory_entry*) (((addr_t)args->arch_args.page_hole) + (B_PAGE_SIZE * 1024 - B_PAGE_SIZE)); // clear out the bottom 2 GB, unmap everything memset(fPageHolePageDir + FIRST_USER_PGDIR_ENT, 0, sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS); fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir; fKernelVirtualPageDirectory = (page_directory_entry*)(addr_t) args->arch_args.vir_pgdir; #ifdef TRACE_X86_PAGING_METHOD_32_BIT TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir); TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n", fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory); #endif X86PagingStructures32Bit::StaticInit(); // create the initial pool for the physical page mapper PhysicalPageSlotPool* pool = new(&PhysicalPageSlotPool::sInitialPhysicalPagePool) PhysicalPageSlotPool; status_t error = pool->InitInitial(args); if (error != B_OK) { panic("X86PagingMethod32Bit::Init(): Failed to create initial pool " "for physical page mapper!"); return error; } // create physical page mapper large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper, fKernelPhysicalPageMapper); // TODO: Select the best page mapper! // enable global page feature if available if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) { // this prevents kernel pages from being flushed from TLB on // context-switch x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES); } TRACE("X86PagingMethod32Bit::Init(): done\n"); *_physicalPageMapper = fPhysicalPageMapper; return B_OK; }
void LargeMemoryPhysicalPageMapper::PutSlot(PhysicalPageSlot* slot) { MutexLocker locker(fLock); PhysicalPageSlotPool* pool = slot->pool; if (pool->IsEmpty()) { fEmptyPools.Remove(pool); fNonEmptyPools.Add(pool); } pool->PutSlot(slot); }
status_t ARMPagingMethod32Bit::Init(kernel_args* args, VMPhysicalPageMapper** _physicalPageMapper) { TRACE("vm_translation_map_init: entry\n"); fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir; fKernelVirtualPageDirectory = (page_directory_entry*) args->arch_args.vir_pgdir; TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n", fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory); ARMPagingStructures32Bit::StaticInit(); // create the initial pool for the physical page mapper PhysicalPageSlotPool* pool = new(&PhysicalPageSlotPool::sInitialPhysicalPagePool) PhysicalPageSlotPool; status_t error = pool->InitInitial(args); if (error != B_OK) { panic("ARMPagingMethod32Bit::Init(): Failed to create initial pool " "for physical page mapper!"); return error; } // create physical page mapper large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper, fKernelPhysicalPageMapper); // TODO: Select the best page mapper! // enable global page feature if available #if 0 //IRA: check for ARMv6!! if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) { // this prevents kernel pages from being flushed from TLB on // context-switch x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES); } #endif TRACE("ARMPagingMethod32Bit::Init(): done\n"); *_physicalPageMapper = fPhysicalPageMapper; return B_OK; }
status_t X86PagingMethod32Bit::PhysicalPageSlotPool::AllocatePool( X86LargePhysicalPageMapper::PhysicalPageSlotPool*& _pool) { // create the pool structure PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool; if (pool == NULL) return B_NO_MEMORY; ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool); // create an area that can contain the page table and the slot // structures size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]); void* data; virtual_address_restrictions virtualRestrictions = {}; virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; physical_address_restrictions physicalRestrictions = {}; area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool", PAGE_ALIGN(areaSize), B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, &virtualRestrictions, &physicalRestrictions, &data); if (dataArea < 0) return dataArea; // create the null area for the virtual address space void* virtualBase; area_id virtualArea = vm_create_null_area( VMAddressSpace::KernelID(), "physical page pool space", &virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE, CREATE_AREA_PRIORITY_VIP); if (virtualArea < 0) { delete_area(dataArea); return virtualArea; } // prepare the page table memset(data, 0, B_PAGE_SIZE); // get the page table's physical address phys_addr_t physicalTable; X86VMTranslationMap32Bit* map = static_cast<X86VMTranslationMap32Bit*>( VMAddressSpace::Kernel()->TranslationMap()); uint32 dummyFlags; cpu_status state = disable_interrupts(); map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags); restore_interrupts(state); // put the page table into the page directory int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * 1024); page_directory_entry* entry = &map->PagingStructures32Bit()->pgdir_virt[index]; PutPageTableInPageDir(entry, physicalTable, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); X86PagingStructures32Bit::UpdateAllPageDirs(index, *entry); // init the pool structure pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase); poolDeleter.Detach(); _pool = pool; return B_OK; }