/* i.e. this succeeds if the page table corresponding to the address exists. */ int page_walk_nofault(sos_pcb *pcb, seL4_Word proc_vaddr, page_table_entry **pte) { int err = SOS_PAGE_TABLE_SUCCESS; seL4_Word pd_idx; seL4_Word pt_idx; sos_page_table *pt; if (!pcb || !pte || !(pcb->sos_pd)) return SOS_PAGE_TABLE_INVAL; // Grab PD and PT indices from the given vaddr pd_idx = PAGE_TABLE(proc_vaddr); pt_idx = PAGE_TABLE_ENTRY(proc_vaddr); // Index into PD and PT, which must both exist pt = (sos_page_table *) ((pcb->sos_pd) + pd_idx); dprintf(7, "page_walk_nofault: page directory exists at %p\n", pcb->sos_pd); // No page table = fault required if(!(*pt)) return SOS_PAGE_TABLE_FAULT; dprintf(7, "page_walk_nofault: page table exists\n"); (*pte) = (page_table_entry *) ((*pt) + pt_idx); // Couple of debug prints dprintf(6, "I think my PTE is at %p\n", *pte); dprintf(8, "sizeof(struct page_table_entry) = %d\n", sizeof(struct page_table_entry)); dprintf(6, "FN: %d\n", (*pte)->framenumber); dprintf(8, "page_walk_nofault: returning\n"); return err; }
/* A faulting page walk. May need to alloc various frames for SOS and seL4 page tables. */ int page_walk(sos_pcb *pcb, seL4_Word proc_vaddr, page_attrs attrs, page_table_cb cb, void *cb_data) { int err; seL4_Word pd_idx; dprintf(6, "page_walk: seeking %p\n", proc_vaddr); // malloc a structure to store all intermediate data page_walk_data *data = malloc(sizeof(struct page_walk_data)); if (data == NULL) { dprintf(1, "sos_page_alloc: Could not allocate callback data (OOM)\n"); return SOS_PAGE_TABLE_OOM; } // Stash the attrs and callbacks for later data->attrs = attrs; data->cb = cb; data->cb_data = cb_data; data->pcb = pcb; // Sanity checks - if these fail, process has been corrupted // These are ok - no need to check for abort since synchronous call conditional_panic(((void*) pcb->sos_pd == NULL), "No page directory"); conditional_panic((pcb->cspace == NULL), "Process cspace does not exist"); // Grab PD and PT indices from the given vaddr pd_idx = PAGE_TABLE(proc_vaddr); data->pt_idx = PAGE_TABLE_ENTRY(proc_vaddr); // Index into page directory, which *must* exist (else process is corrupt) // pt stores the address of the pointer to the PT, i.e. (**) data->pt = (sos_page_table *) (pcb->sos_pd + pd_idx); dprintf(6, "page_walk: setting up alloc or finaliser\n"); if ((*(data->pt)) == NULL) { // PT we want doesn't exist, so we alloc it dprintf(6, "page_walk: time for frame_alloc\n"); err = frame_alloc(&_page_table_frame_alloc_cb, (void*) data); // frame_alloc is asynchronous - it will finalise for us return err; } else { // Return to syscall loop, then finalise dprintf(6, "page_walk: ready to finalise\n"); err = sos_task_add_ready(&_sos_page_alloc_finalise, (void *) data, SOS_TASK_PRIORITY_HIGH); if (err) { dprintf(1, "sos_page_alloc: Could not finalise (%d)\n", err); // XXX Could tear down the process here free(data); } return SOS_PAGE_TABLE_SUCCESS; } }
int x86_prepare_page_table (DWORD *pagedir, DWORD addr) { DWORD *tableptr; if (x86_page_table_present (pagedir, addr)) return KERNEL_SUCCESS_VALUE; RETURN_ON_PTR_FAILURE (tableptr = page_alloc (1)); memset (tableptr, 0, PAGE_SIZE); pagedir[PAGE_TABLE (addr)] = (DWORD) tableptr | PAGE_TABLE_DFL_FLAGS; return KERNEL_SUCCESS_VALUE; }
INLINE DWORD * x86_get_page_table (DWORD *pagedir, DWORD addr) { return (DWORD *) (pagedir[PAGE_TABLE (addr)] & PAGE_BITS); }
INLINE int x86_page_table_present (DWORD *pagedir, DWORD addr) { return !! (pagedir[PAGE_TABLE (addr)] & PAGE_FLAG_PRESENT); }
int bt_mmu_map(bt_pgd_t pgd_h, bt_paddr_t pa, bt_vaddr_t va, BT_u32 size, int type) { BT_u32 flag = 0; bt_pte_t pte; bt_paddr_t pg; BT_u32 ng = 0; bt_pgd_t pgd = GET_PGD(pgd_h); if((va + size) < 0xC0000000) { ng = MMU_PTE_NG; } pa = BT_PAGE_TRUNC(pa); // Ensure correct alignments. va = BT_PAGE_TRUNC(va); size = BT_PAGE_ALIGN(size); switch(type) { // Build up the ARM MMU flags from BT page types. case BT_PAGE_UNMAP: flag = 0; break; case BT_PAGE_READ: flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_USER_RO); break; case BT_PAGE_WRITE: flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_USER_RW); break; case BT_PAGE_SYSTEM: flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_SYSTEM); break; case BT_PAGE_IOMEM: flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_SYSTEM); break; default: //do_kernel_panic("bt_mmu_map"); return -1; break; } bt_mmu_flush_tlb(); while(size > 0) { if(pte_present(pgd, va)) { pte = virt_to_pte(pgd, va); // Get the page table from PGD. } else { // If its a section or super-section then return an error! - (Kernel coherent pool?). pg = (bt_paddr_t) BT_CacheAlloc(&g_ptCache); if(!pg) { return -1; } memset((void *)pg, 0, MMU_L2TBL_SIZE); pte = (bt_pte_t) pg; pgd[PAGE_DIR(va)] = (BT_u32) bt_virt_to_phys(pte) | MMU_PDE_PRESENT; } pte[PAGE_TABLE(va)] = (BT_u32) pa | flag | ng; pa += BT_PAGE_SIZE; va += BT_PAGE_SIZE; size -= BT_PAGE_SIZE; } bt_mmu_flush_tlb(); return 0; }
void vtMapNewPages() { queue<uint32_t> newPages, zero; #if !ENABLE_MT vtLoadNeededPages(); #endif { // lock LOCK(vt.newPagesMutex) vt.missingPageCount = vt.neededPages.size(); // just stats keeping if (USE_PBO_PHYSTEX) { uint8_t i = 0; vt.newPageCount = 0; while (i < PBO_PHYSTEX_PAGES && !vt.newPages.empty()) { newPages.push(vt.newPages.front()); vt.newPages.pop(); i++; vt.newPageCount++; // just stats keeping } } else { newPages = vt.newPages; vt.newPageCount = newPages.size(); // just stats keeping vt.newPages = zero; } } // unlock // we do this here instead of in vtLoadNeededPages() when new pages are actually mapped so it runs on the mainThread and the cachedPagesAccessTimes structure doesn't need to be locked vtcReduceCacheIfNecessaryLOCK(vt.thisFrameClock); if (!newPages.empty()) { bool foundSlot = true; const void *image_data; for (uint8_t i = 0; i < c.mipChainLength; i++) { #ifdef DEBUG_ERASE_CACHED_PAGES_EVERY_FRAME vt.mipLevelTouched[i] = true; vt.mipLevelMinrow[i] = 0; vt.mipLevelMaxrow[i] = (c.virtTexDimensionPages >> i) - 1; #else vt.mipLevelTouched[i] = false; vt.mipLevelMinrow[i] = (uint16_t) c.virtTexDimensionPages >> i; vt.mipLevelMaxrow[i] = 0; #endif } #if USE_PBO_PHYSTEX uint8_t xCoordinatesForPageMapping[PBO_PHYSTEX_PAGES]; uint8_t yCoordinatesForPageMapping[PBO_PHYSTEX_PAGES]; uint8_t newPageCount = 0; glBindBuffer(GL_PIXEL_UNPACK_BUFFER, vt.pboPhystex); glBufferData(GL_PIXEL_UNPACK_BUFFER, c.pageMemsize * PBO_PHYSTEX_PAGES, 0, GL_STREAM_DRAW); uint8_t *phys_buffer = (uint8_t *)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY); assert(phys_buffer); #endif glActiveTexture(GL_TEXTURE0 + TEXUNIT_FOR_PHYSTEX); while(!newPages.empty() && foundSlot) { const uint32_t pageInfo = newPages.front(); newPages.pop(); const uint16_t y_coord = EXTRACT_Y(pageInfo), x_coord = EXTRACT_X(pageInfo); const uint8_t mip = EXTRACT_MIP(pageInfo); image_data = vtcRetrieveCachedPageLOCK(pageInfo); if(image_data == NULL) { continue; } // find slot bool foundFree = false; uint8_t x, y, storedX = 0, storedY = 0; clock_t lowestClock = vt.thisFrameClock; foundSlot = false; // find least recently used or free page for (x = 0; x < c.physTexDimensionPages; x++) { for (y = 0; y < c.physTexDimensionPages; y++) { if ((vt.textureStorageInfo[x][y].clockUsed < lowestClock) && (vt.textureStorageInfo[x][y].mip < c.mipChainLength - HIGHEST_MIP_LEVELS_TO_KEEP)) { lowestClock = vt.textureStorageInfo[x][y].clockUsed; storedX = x; storedY = y; foundSlot = true; if (lowestClock == 0) { foundFree = true; break; } } } if (foundFree) break; } if (foundSlot) { x = storedX; y = storedY; if (!foundFree) { // unmap page #if DEBUG_LOG > 0 printf("Unloading page from VRAM: Mip:%u %u/%u from %u/%u lastUsed: %llu\n", vt.textureStorageInfo[x][y].mip, vt.textureStorageInfo[x][y].x, vt.textureStorageInfo[x][y].y, x, y, (long long unsigned int)lowestClock); #endif vtUnmapPage(vt.textureStorageInfo[x][y].mip, vt.textureStorageInfo[x][y].x, vt.textureStorageInfo[x][y].y, x, y); // dont need complete version cause we map a new page at the same location } assert((x < c.physTexDimensionPages) && (y < c.physTexDimensionPages)); // map page //vt.textureStorageInfo[x][y].active = true; vt.textureStorageInfo[x][y].x = x_coord; vt.textureStorageInfo[x][y].y = y_coord; vt.textureStorageInfo[x][y].mip = mip; vt.textureStorageInfo[x][y].clockUsed = vt.thisFrameClock; PAGE_TABLE(mip, x_coord, y_coord) = (MIP_INFO(mip) << 24) + (x << 16) + (y << 8) + kTableMapped; touchMipRow(mip, y_coord) if (FALLBACK_ENTRIES) { if (mip >= 1) { _mapPageFallbackEntries(mip - 1, x_coord * 2, y_coord * 2, mip, x, y); _mapPageFallbackEntries(mip - 1, x_coord * 2, y_coord * 2 + 1, mip, x, y); _mapPageFallbackEntries(mip - 1, x_coord * 2 + 1, y_coord * 2, mip, x, y); _mapPageFallbackEntries(mip - 1, x_coord * 2 + 1, y_coord * 2 + 1, mip, x, y); } } #if USE_PBO_PHYSTEX memcpy(phys_buffer + c.pageMemsize * newPageCount, image_data, c.pageMemsize); xCoordinatesForPageMapping[newPageCount] = x; yCoordinatesForPageMapping[newPageCount] = y; newPageCount ++; #else if (c.pageDXTCompression) glCompressedTexSubImage2D(GL_TEXTURE_2D, 0, x * c.pageDimension, y * c.pageDimension, c.pageDimension, c.pageDimension, c.pageDXTCompression, c.pageMemsize, image_data); else glTexSubImage2D(GL_TEXTURE_2D, 0, x * c.pageDimension, y * c.pageDimension, c.pageDimension, c.pageDimension, c.pageDataFormat, c.pageDataType, image_data); #if MIPPED_PHYSTEX uint32_t *mippedData; if (IMAGE_DECOMPRESSION_LIBRARY == DecompressionMac) // TODO: assert away other option mippedData = vtuDownsampleImageRGBA((const uint32_t *)image_data); else mippedData = vtuDownsampleImageRGB((const uint32_t *)image_data); glTexSubImage2D(GL_TEXTURE_2D, 1, x * (c.pageDimension / 2), y * (c.pageDimension / 2), (c.pageDimension / 2), (c.pageDimension / 2), c.pageDataFormat, c.pageDataType, mippedData); free(mippedData); #endif #if DEBUG_LOG > 0 printf("Loading page to VRAM: Mip:%u %u/%u to %u/%u\n", mip, x_coord, y_coord, x, y); #endif #endif } else { // lock LOCK(vt.newPagesMutex) printf("WARNING: skipping page loading because there are no free slots %i %i \n", vt.necessaryPageCount, c.physTexDimensionPages * c.physTexDimensionPages); vt.newPages.push(pageInfo); while (!newPages.empty()) { vt.newPages.push(newPages.front()); newPages.pop(); } } // unlock }