static page * get_page(unsigned page_num) { page * current_page; unsigned frame_num; if (pthread_mutex_lock(&globals.metadata[page_num].lock) != SUCCESS) return NULL; if (globals.metadata[page_num].frame_number == -1) { if ((current_page = select_page_wrapper()) == NULL) return NULL; /* already locked mutex to selected page */ frame_num = current_page->meta->frame_number; update_strategy_metadata(page_num); page_meta * swapped_meta = current_page->meta; current_page->meta = &globals.metadata[page_num]; if (pthread_mutex_unlock (&globals.select_mutex) == ERROR) return NULL; if (write_to_swap(swapped_meta) == ERROR) return NULL; if (read_from_swap(current_page, page_num, frame_num) == ERROR) return NULL; current_page->meta->modified = false; current_page->meta->reference_counter = 0; current_page->meta->frame_number = frame_num; } else current_page = &globals.pages[globals.metadata[page_num].frame_number]; current_page->meta->reference_counter++; return current_page; }
paddr_t swap_out(int flag){ struct coremap *local_coremap = coremap_list; struct coremap *swap_coremap = NULL ;//coremap_list; uint32_t min = 4294967295; //struct tlbshootdown *tlbsd = (struct tlbshootdown*)kmalloc(sizeof(struct tlbshootdown)) ; struct tlbshootdown tlbsd ; while(local_coremap!=NULL){ if(local_coremap->status == 0 && local_coremap->timestamp <= min /*&& local_coremap->swap == 0*/){ min = local_coremap->timestamp; swap_coremap = local_coremap; } local_coremap = local_coremap->next; } if(swap_coremap == NULL){ panic("Could not find page in other processes\n"); } struct addrspace *temp = swap_coremap->as; struct page_table_entry *pt = temp->page_table; while(pt!=NULL){ if(pt->pa == swap_coremap->pa && pt->state!=1) break; pt=pt->next; } off_t off = 0; if(pt == NULL) panic("swap_out : test panic \n");//off = write_to_swap(PADDR_TO_KVADDR(swap_coremap->pa),-1) ; else{ //swap_coremap->swap = 1; //if(swap_coremap->as == curthread->t_addrspace){ tlbsd.ts_vaddr = swap_coremap->va ; vm_tlbshootdown(&tlbsd) ; //} off = write_to_swap(PADDR_TO_KVADDR(swap_coremap->pa),pt->offset) ; pt->state = PAGE_ON_DISK; pt->offset = off; } bzero((void *)PADDR_TO_KVADDR(swap_coremap->pa),PAGE_SIZE); swap_coremap->timestamp = localtime; localtime++; swap_coremap->pages=1; if(flag){ //We are swapping to allocate a kernel page. swap_coremap->status = 1; swap_coremap->as=NULL; } else{ //We are swapping to allocate user page swap_coremap->status = 0 ; swap_coremap->as=curthread->t_addrspace; } //swap_coremap->swap = 0; return swap_coremap->pa; }
paddr_t swap_kpages(int npages){ paddr_t pa = 0; int count = 0; struct coremap *local_coremap = coremap_list; struct coremap *start = coremap_list; //First find sequence of user allocated pages. Kernel pages cannot be swapped out. lock_acquire(coremap_lock); while(local_coremap != NULL && count!=npages){ if((local_coremap->status & 0x3) == 0){ //Last two bits - free and fixed should be 0. if(count == 0) start = local_coremap; count++; } else{ count = 0; } local_coremap = local_coremap->next; } if(count == npages){ local_coremap = start; count = 0; while(count!=npages){ local_coremap->timestamp = localtime; localtime++; local_coremap->pages = 0; local_coremap->status = 1; if((local_coremap->status & 0x4) == 0){ struct addrspace *temp = local_coremap->as; struct page_table_entry *pt = temp->page_table; while(pt!=NULL){ if(pt->pa == local_coremap->pa) break; pt=pt->next; } off_t off = write_to_swap(PADDR_TO_KVADDR(local_coremap->pa),pt->offset) ; if(off == -10) panic("swap Kpages : \n"); update_pagetable_entry(local_coremap,off); } //bzero((void *)PADDR_TO_KVADDR(local_coremap->pa),PAGE_SIZE); local_coremap->as = NULL; local_coremap = local_coremap->next; count++; } start->pages = count; lock_release(coremap_lock); return start->pa; } lock_release(coremap_lock); panic("swap_kpages : Could not swap out %d pages\n",npages); return pa; }
paddr_t coremap_getFrames(unsigned long n, bool swappable, int seg_type) { paddr_t paddr; paddr = 0; // requesting too many pages if(n*PAGE_SIZE + lo_paddr > hi_paddr) { paddr = 0; } (void) seg_type; struct coremap_entry ** coremap = get_global_coremap(); // grab lock for synchronization lock_acquire(coremap_lk); for(unsigned int i = 0; i < max_pages; i++) { if(!coremap[i]->cm_occupied) { // set in page table global_coremap[i]; bool lengthFree = true; // try and find n continous frames for(unsigned int j = i + 1; (j < i + n) && (j < max_pages); j++) { // if its not n continous frames if(coremap[j]->cm_occupied) { lengthFree = false; break; } } // after found n continous frames, allocate them if(lengthFree && (i + n < max_pages)) { paddr = coremap[i]->cm_paddr; for(unsigned int j = i; j < i + n; j++) { coremap[j]->cm_occupied = true; coremap[j]->cm_length = n - (j - i); coremap[j]->cm_proc = curproc; coremap[j]->cm_swappable = swappable; coremap[j]->seg_type = seg_type; } lock_release(coremap_lk); return paddr; } } } // if paddr = 0 at this point, we want to evict something // from physical memory struct addrspace *as; as = curproc_getas(); if(as == NULL) { // debug // printCoremap(); } // else the coremap is full struct pt_entry* pte; if(paddr == 0){ // find a page victim pte = Pvictim(as, seg_type); // claim it coremap[pte->cm_index]->cm_proc = curproc; coremap[pte->cm_index]->cm_swappable = swappable; paddr = coremap[pte->cm_index]->cm_paddr; lock_release(coremap_lk); // write it to the swapfile write_to_swap(pte); } return paddr; }