/* * Add an inverse entry for the physical page associated with mapping from vaddr * to paddr into the coremap. Inverse mapping means the page is indexed by page * number calculated using paddr. */ int add_ppage (u_int32_t vaddr, u_int32_t paddr, pid_t pid, u_int32_t status) { int result = 0; int spl=splhigh(); paddr = paddr & PAGE_FRAME; //get the index of the page in the page table int page_index = (paddr - coremap_base) / PAGE_SIZE; //make sure that the paddr address is valid assert( (coremap[ page_index ].paddr & PAGE_FRAME) == paddr ); /* * Add the mapping and set the attribute bits */ coremap[ page_index ].vaddr = vaddr; //If it is a kernel address allocated by kernel then set kernel attribute flag if(vaddr > USERTOP) coremap[ page_index ].paddr = SET_VALID(paddr)|SET_DIRTY(paddr)|SET_KERNEL(paddr); else coremap[ page_index ].paddr = SET_VALID(paddr)|SET_DIRTY(paddr); /*Initialize _PTE fields for this entry*/ coremap[ page_index ].last_access_time_sec = 0; coremap[ page_index ].last_access_time_nsec = 0; coremap[ page_index ].pid = pid; coremap[ page_index ].status = PAGE_DIRTY; /* * mark (unavailable) the page entry of coremap. */ if(!bitmap_isset(core_memmap,page_index)) bitmap_mark(core_memmap, page_index); splx(spl); return result; }
static FAR uint8_t *lpc43_cacheread(struct lpc43_dev_s *priv, off_t sector) { FAR const uint8_t *src; off_t blkno; int index; /* Convert from the 512 byte sector to the erase sector size of the device. For * exmample, if the actual erase sector size if 4Kb (1 << 12), then we first * shift to the right by 3 to get the sector number in 4096 increments. */ blkno = sector >> (SPIFI_BLKSHIFT - SPIFI_512SHIFT); fvdbg("sector: %ld blkno: %d\n", sector, blkno); /* Check if the requested erase block is already in the cache */ if (!IS_VALID(priv) || blkno != (off_t)priv->blkno) { /* No.. Flush any dirty erase block currently in the cache */ lpc43_cacheflush(priv); /* Read the new erase block into the cache */ /* Get the SPIFI address corresponding to the new erase block */ src = SPIFI_BASE + (blkno << SPIFI_BLKSHIFT); /* Read the entire erase block from FLASH */ lpc43_pageread(priv, priv->cache, src, SPIFI_BLKSIZE); /* Mark the sector as cached */ priv->blkno = (uint16_t)blkno; SET_VALID(priv); /* The data in the cache is valid */ CLR_DIRTY(priv); /* It should match the FLASH contents */ CLR_ERASED(priv); /* The underlying FLASH has not been erased */ } /* Get the index to the 512 sector in the erase block that holds the argument */ index = sector & ((1 << (SPIFI_BLKSHIFT - SPIFI_512SHIFT)) - 1); /* Return the address in the cache that holds this sector */ return &priv->cache[index << SPIFI_512SHIFT]; }
/* * alloc_page(): allocate a single page: * ------------------------------------- * 1. Snatch a page from paging module by calling snatch_a_page() * 2. Insert the page into the coremap and mark the bitmap properly. * 3. Return the physical address of the page */ u_int32_t alloc_page(u_int32_t vaddr, int pid) { u_int32_t paddr; //kprintf("vm bootstrap: alloc_page\n"); //snatch a page from paging module. Paging module is responsible for all //paging/swapping mechanism to allocate the page paddr = snatch_a_page(); //set valid attribute as snatch_a_page() guaranteed to return a valid paddr paddr = SET_VALID(paddr); //check whether it is a kernel page or not, if yes then mark as kernel and //we'll not touch it later if( vaddr >= MIPS_KSEG0) paddr = SET_KERNEL(paddr); //Add the invert entry for this page mapped to vaddr into pagetable coremap add_ppage(vaddr, paddr, pid, PAGE_DIRTY); return paddr; }
/* * Bring back the page from disk into memory by swapping out a victim page if * necessary */ u_int32_t load_page_into_memory(u_int32_t vaddr, pid_t pid) { //panic("VM: hm......right...\n); //Get the chunk containing demanded page (not in memory) from disk u_int32_t chunk = get_spage(vaddr, pid); /* * snatch a entry in page table for this page by swapping out a victim page * if necessary */ u_int32_t paddr = snatch_a_page(); assert(paddr!=0x0); /* * Now, we have a free entry in the page table for this page. So bring * back the page from disk by swapping the chunk into paddr. */ swapin(paddr, chunk); /* * set the attributes */ paddr = SET_VALID(paddr); paddr = SET_SWAPPED(paddr); /* * So, we have swapped in the page into memory. Add the pagetable entry for * this page. */ int spl=splhigh(); assert((chunk & PAGE_FRAME)/PAGE_SIZE < swaparea_size); //add_ppage(vaddr, paddr, swaparea[(chunk & PAGE_FRAME)/PAGE_SIZE].pid, PAGE_CLEAN); add_ppage(vaddr, paddr, pid, PAGE_CLEAN); //Changed by tocurtis splx(spl); return paddr; }
/* Allocate n contiguous kernel pages */ u_int32_t kpage_nalloc(int n, int pid) { u_int32_t paddr; int spl=splhigh(); int i; unsigned index=0,count=0; unsigned swap_index=0,swap_count=0; unsigned best_count=0,best_index=0; //kprintf("vm bootstrap: cp5\n"); //Just one page? then snatch a page and update pagetable coremap if(n==1) { u_int32_t paddr = snatch_a_page(); paddr = SET_VALID(paddr); paddr = SET_KERNEL(paddr); add_ppage( PADDR_TO_KVADDR(paddr), paddr, curthread->pid, PAGE_DIRTY); splx(spl); return (paddr & PAGE_FRAME); } /* * Otherwise, find the index of a hole from which there are at least n contagious pages. */ for(i = 0 ; i < (int)coremap_size; i++) { //We should not replace kernel pages: kernel pages shouldn't be touched //so look for non-kernel pages to replace and make a big enough hole if(IS_KERNEL(coremap[i].paddr)) { //If it is a kernel page and we didn't find a big enough hole so //far then we can't take the current hole. So get back. swap_count = 0; } //a non-kernel page is found in the hole currently under investigation else { //ok last page was kernel an so we are restarting the hole from this page if( swap_count == 0) swap_index = i; //keep maintaining how many contagious pages should be replaced swap_count++; //ok, now we have n contagious nonkernel pages. So, this is a potential good hole if ((int)swap_count >= n) { //update index and size of the best hole found so far if(best_count > count) { best_count = count; best_index = swap_index; } //resrart the search from this page to find a better hole (if exists) swap_count = 0; } } //if this is a free page then a free hole can be started from here, set free hole index and size if( !bitmap_isset(core_memmap, i) ) { index = i; count = 0; } else { //ok, we can't get this page as free page. So, lets check if we have found n free pages or nor //if we fund n pages then its the perfect free hole so far, so stop the search. if((int)count == n) { break; } } count ++; } //so no of contagious free pages found in the search is not enough. //So, we need to replace some non-kernel pages. Start from the best //the best non-kernel hole we found in the search. if((int)count < n) { if((int)best_count < n) { //not enough pages to replace. Panic splx(spl); return 0; } else { //ok, we have enough nonkernel pages to replace for(i=best_index; i < (int)(best_index + best_count); i++) { u_int32_t ppaddr = coremap[i].paddr; //the page is valid if( IS_VALID(ppaddr) ) { //find an empty chunk in the disk to swap out the existing page u_int32_t chunk = get_empty_chunk(); //mark entry into swap_table splx(spl); //Now, swapout the page into the chunk of the disk swapout(chunk,ppaddr); total_asyncpage_write++; //update the swap area map to make the page frame free for allocation. //we are allocating below. So, there is a possibility of a race condition here. remove_ppage(ppaddr); } } //so, we have now replaced pages starting from best_index to make space for the demanded n pages. //we will now claim these pages below. index = best_index; } } //claim the hole we set up above for(i=index ; i < (int)(index + n); i++) { add_ppage(PADDR_TO_KVADDR(coremap[i].paddr), coremap[i].paddr, pid, PAGE_DIRTY); } splx(spl); paddr = coremap[index].paddr; //kprintf("VM_ALLOCN: 0x%x\n", paddr); return (paddr & PAGE_FRAME); }