/*------------------------------------------------------------------------- * xmunmap - xmunmap *------------------------------------------------------------------------- */ SYSCALL xmunmap(int virtpage ) { STATWORD ps; int getStoreValue, getPageNo; bs_map_t* main_bs; int getReturnValue, bs_no, page_no; unsigned int temp; /* sanity check ! */ if ( (virtpage < 4096) ) { // kprintf("xmummap call error: virtpage (%d) invalid! \n", virtpage); return SYSERR; } disable(ps); //kprintf("\nInside xmunmap().. %d ", virtpage); //getStoreValue = 20; getReturnValue = bsm_lookup(currpid, (virtpage*4096), &getStoreValue, &getPageNo); if (getReturnValue == SYSERR) { // kprintf("xmunmap(): could not find mapping!\n"); restore(ps); return SYSERR; } // kprintf("\nxmunmap(): bs returned %d!\n", getStoreValue); bs_no = getStoreValue; page_no = getPageNo; // For all frames that are mapped decrease their refcnts bstore_dec_fr_refcnt(bs_no, virtpage); getReturnValue = bsm_unmap(currpid, virtpage, bs_no); if( getReturnValue == SYSERR ) { //kprintf("xmummap: bsm_unmap error"); restore(ps); return(SYSERR); } //kprintf("\nxmummap: all OK"); temp = ((proctab[currpid].pdbr)<<12); write_cr3(temp); restore(ps); return(OK); }
/*------------------------------------------------------------------------- * xmunmap - xmunmap *------------------------------------------------------------------------- */ SYSCALL xmunmap(int virtpage ) { STATWORD ps; int bsid, page; /* sanity check ! */ if ( (virtpage < 4096) ){ kprintf("xmummap call error: virtpage (%d) invalid! \n", virtpage); return SYSERR; } if(OK == bsm_lookup(currpid, VPN2VAD(virtpage), &bsid, &page)){ write_back_frames(currpid, bsid); } ERROR_CHECK2( bsm_unmap(currpid, virtpage), ps); return OK; }
/*------------------------------------------------------------------------- * pfint - paging fault ISR *------------------------------------------------------------------------- */ SYSCALL pfint() { STATWORD ps; disable(ps); int vp, p, q; int i; unsigned long fault_addr; int store, pageth; int new_frame, m_frame = -1; pt_t *ptr_pt; fault_addr = read_cr2(); vp = (fault_addr & ~(NBPG - 1)) >> 12; //If process tries to access bs that it has not got using getbs. if (bsm_lookup(currpid, fault_addr, &store, &pageth) == SYSERR) { kill(currpid); } pd_t *ptr_pd = (pd_t *) (proctab[currpid].pdbr); p = (fault_addr & 0xFFC00000) >> 22; q = (fault_addr & 0x003FF000) >> 12; ptr_pd = ptr_pd + p; if (ptr_pd->pd_pres == 0) { get_frm(&new_frame); frm_tab[new_frame].fr_pid = currpid; frm_tab[new_frame].fr_type = FR_TBL; new_frame = new_frame + FRAME0; add_page_dir(ptr_pd, new_frame, ((proctab[currpid].pdbr & ~(NBPG - 1)) >> 12) - FRAME0); } else
/******************************************************************************* * Name: bsm_unmap * * Desc: Unmaps the BS assoicated with the given pid and vpage. We have to * search thru all BS's maps to find the actual mapping. * * Params: * pid - pid whose mapping is to be removed * vpage - vpage of the pid whose mapping is to be removed * flag - not used for now * * Returns: SYSCALL * OK - on sucess * SYSERR - on failure ******************************************************************************/ SYSCALL bsm_unmap(int pid, int vpage, int flag) { int bs_id = EMPTY; int offset = EMPTY; uint8_t victim_pos = 0; bs_map_t *prev = NULL; bs_map_t *victim = NULL; bs_map_t *bsptr = NULL; STATWORD ps; disable(ps); DTRACE_START; if (isbadpid(pid)) { DTRACE("DBG$ %d %s> bad pid %d\n", currpid, __func__, pid); goto RESTORE_AND_RETURN_ERROR; } if (FALSE == VM_IS_VPAGE_VALID(vpage)) { DTRACE("DBG$ %d %s> bad vpno %d\n", currpid, __func__, vpage); goto RESTORE_AND_RETURN_ERROR; } if (OK != bsm_lookup(pid, VPAGE_TO_VADDR(vpage), &bs_id, &offset, &prev, &victim_pos)) { DTRACE("DBG$ %d %s> bsm_lookup() failed\n", currpid, __func__); goto RESTORE_AND_RETURN_ERROR; } DTRACE("DBG$ %d %s> pid %d, vpage %d is mapped to bs id %d\n", \ currpid, __func__, pid, vpage, bs_id); #ifdef DBG_ON print_bs_details(bs_id, TRUE); #endif /* There could be three cases: * 1. Only one map in BS. * 2. Multiple maps and the victim is at head, middle and tail. */ if (1 == (BS_GET_COUNT(bs_id))) { DTRACE("DBG$ %d %s> only one node\n", currpid, __func__); victim = bsm_tab[bs_id]; } else { switch (victim_pos) { case BS_POS_HEAD: DTRACE("DBG$ %d %s> multiple, victim at head\n", \ currpid, __func__); victim = bsm_tab[bs_id]; prev = victim; bsm_tab[bs_id] = victim->bsm_next; break; case BS_POS_MID: DTRACE("DBG$ %d %s> multiple, victim at middle\n", \ currpid, __func__); victim = prev->bsm_next; prev->bsm_next = victim->bsm_next; break; case BS_POS_TAIL: DTRACE("DBG$ %d %s> multiple, victim at tail\n", \ currpid, __func__); victim = prev->bsm_next; prev->bsm_next = victim->bsm_next; bsm_data.bsm_tail[bs_id] = prev; break; default: DTRACE("DBG$ %d %s> bad victim position %d\n", \ currpid, __func__, victim_pos); goto RESTORE_AND_RETURN_ERROR; } } victim->bsm_status = BS_UNMAPPED; victim->bsm_pid = EMPTY; victim->bsm_isvheap = FALSE; victim->bsm_vpno = 0; victim->bsm_next = NULL; if (prev) { /* Free the victim if and only if there are more than one mappings * in the BS, on which, prev will be non-null. */ DTRACE("prev present.. freeing mem\n", NULL); freemem((void *) victim, sizeof(bs_map_t)); } BS_DEC_COUNT(bs_id); DTRACE("DBG$ %d %s> bs unmapping of pid %d, bs id %d, vpage %d, " \ "is successful\n", \ currpid, __func__, pid, bs_id, vpage); /* Change the state of the BS to unmapped if there are no assoc. maps. */ if (0 == BS_GET_COUNT(bs_id)) { bsptr = BS_GET_PTR(bs_id); bsptr->bsm_status = BS_UNMAPPED; DTRACE("DBG$ %d %s> chaning bs id state to unmapped as " \ "there are no associated processes\n", \ currpid, __func__, pid, bs_id, vpage); } #ifdef DBG_ON print_bs_details(bs_id, TRUE); #endif DTRACE_END; restore(ps); return OK; RESTORE_AND_RETURN_ERROR: DTRACE("DBG$ %d %s> returning SYSERR\n", currpid, __func__); DTRACE_END; restore(ps); return SYSERR; }
/******************************************************************************* * Name: pf_handler * * Desc: High level page fault handling code. The low level page fault * interrupt handler is written in assembly. It'll setup the required * registers (faulted address in CR2) and the stack frame with the * error code. This routine is responsible for the following: * 1. Read the faulted address and lookup BS to find if this address is * mapped. If so, get the BS id and the offset within the BS. If * not, this is an illegal access - kill the process and move on. * 2. Actual paging starts here. Lookup the proctab to find the pid's * pdir base. From the faulted address, we can get the pdir offset. * Using these two, check if a page table exist for the faulted * address. If not create one. * 3. If the page table is presnet, get the ptbl offset from the * faulted vaddr. This points to the location of the page table * entry. Now, check if the frame assoicated with this page is * already present in the memory (shared pages). If so, update the * page table's 'pres' bit to reflect this and increment the * frame's refcount. If not, allocate a new frame and update the * page table entry to reflect that the frame is present. * 4. Processor caches the paging entries (TLB) maintained by software * and uses them whenever possible. When a pgt entry's 'pres' bit * is cleared, we need to flush the entry from the processor * cache so that the proceessor would use the updated software * data. This is described in detail in section 4.8 of Intel IA32 * software developer manual (vol 3). There are many ways to force * the processor to use the software tables, than hardware cache. * One such way is to reload teh CR0 register. So, if any of the * 'pres' bits are modified, we reload the CR0 register. * * Params: None. * * Returns: SYSCALL * OK - on success * SYSERR - on error ******************************************************************************/ SYSCALL pf_handler(void) { int bs_id = EMPTY; int bs_offset = EMPTY; int fr_id = EMPTY; uint32_t pdir_offset = 0; uint32_t ptbl_offset = 0; uint32_t page_offset = 0; uint32_t fault_addr = 0; pd_t *pdir = NULL; pt_t *ptbl = NULL; frm_map_t *frptr = NULL; frm_map_t *pt_frptr = NULL; virt_addr_t *fault_vaddr = NULL; STATWORD ps; disable(ps); DTRACE_START; DTRACE("DBG$ %d %s> inside high-level page fault handler\n", \ currpid, __func__); /* vaddr : pdir_offset : ptbl_offset : page_offset * 32 bits : 10 bits : 10 bits : 12 bits */ fault_addr = read_cr2(); /* The virtual address is 32-bits. So, we directly read the required set of * bits by assigining it to a strcutre with appropriate bit-fields. */ fault_vaddr = (virt_addr_t *) (&fault_addr); pdir_offset = (uint32_t) fault_vaddr->pd_offset; ptbl_offset = (uint32_t) fault_vaddr->pt_offset; page_offset = (uint32_t) fault_vaddr->pg_offset; DTRACE("DBG$ %d %s> faulted vaddr 0x%08x, vpage %d\n", \ currpid, __func__, fault_addr, VADDR_TO_VPAGE(fault_addr)); DTRACE("DBG$ %d %s> pd %d, pt %d, pg %d\n", \ currpid, __func__, pdir_offset, ptbl_offset, page_offset); /* Check the BS for a mapping for the faulted vaddr and the pid. If present, * record the BS id and the offset within the BS. If not present, it's * illeagal memory access. Kill the process and return. */ if (SYSERR == bsm_lookup(currpid, fault_addr, &bs_id, &bs_offset, NULL, NULL)) { DTRACE("DBG$ %d %s> bsm_lookup() failed\n", currpid, __func__); DTRACE("DBG$ %d %s> pid %d will be killed\n", \ currpid, __func__, currpid); kprintf("\n\n"); kprintf("FATAL ERROR: Process '%s' with pid '%d' is trying to access " \ "virtual memory out of its range! \nThe process will be " \ "terminated.\n", P_GET_PNAME(currpid), currpid); kprintf("\n\n"); sleep(9); DTRACE_END; restore(ps); kill(currpid); goto RESTORE_AND_RETURN_ERROR; } /* Get the currpid's page directory and index to the appropriate pgt. If pgt * isn't present, create one. */ pdir = P_GET_PDIR(currpid); if (FALSE == PD_GET_PRES(pdir, pdir_offset)) { DTRACE("DBG$ %d %s> pgt not present for pid %d, pd offset %d, " \ "pt offset %d, pg offset %d, vaddr 0x%08x\n", currpid, \ __func__, currpid, pdir_offset, ptbl_offset, page_offset, \ fault_addr); ptbl = new_pgt(); if (NULL == ptbl) { DTRACE("DBG$ %d %s> new_pgt() failed\n", currpid, __func__); goto RESTORE_AND_RETURN_ERROR; } /* Fill-in few meta-data for the pgt frame just created. */ pt_frptr = FR_GET_FPTR(FR_PA_TO_ID(ptbl)); pt_frptr->fr_pid = currpid; /* Set the 'pres' and 'write' bits alone. Rest would've been zeroed * out by new_pgt(). Also, set the base of the new page table. */ pdir[pdir_offset].pd_pres = 1; pdir[pdir_offset].pd_write = 1; pdir[pdir_offset].pd_base = VADDR_TO_VPAGE((unsigned) ptbl); } else { DTRACE("DBG$ %d %s> ptbl already present at 0x%08x, fr id %d\n", \ currpid, __func__, VPAGE_TO_VADDR(PD_GET_BASE(pdir, pdir_offset)),\ FR_PA_TO_ID(VPAGE_TO_VADDR(PD_GET_BASE(pdir, pdir_offset)))); } ptbl = (pt_t *) VPAGE_TO_VADDR(PD_GET_BASE(pdir, pdir_offset)); DTRACE("DBG$ %d %s> ptbl present at 0x%08x, fr id %d\n", \ currpid, __func__, ptbl, FR_PA_TO_ID(ptbl)); /* Find if a frame representing the same BS id and offset is present in the * memory (shared pages). If so, just update the page table entry and * increment teh refcount. */ if (EMPTY == (fr_id = is_frm_present(bs_id, bs_offset))) { DTRACE("DBG$ %d %s> frame not present.. creating a new frame\n", \ currpid, __func__); frptr = get_frm(FR_PAGE); if (NULL == frptr) { DTRACE("DBG$ %d %s> get_frm() failed\n", currpid, __func__); goto RESTORE_AND_RETURN_ERROR; } fr_id = frptr->fr_id; frm_pidmap_oper(fr_id, getpid(), FR_OP_PMAP_SET); frm_record_details(fr_id, getpid(), VADDR_TO_VPAGE(fault_addr)); /* Read the appropriate page from BS onto the new frame. */ if (SYSERR == read_bs((char *) FR_ID_TO_PA(fr_id), bs_id, bs_offset)) { DTRACE("DBG$ %d %s> read_bs() failed for fr id %d, bs %d, " \ "offset %d\n", currpid, __func__, fr_id, bs_id, bs_offset); goto RESTORE_AND_RETURN_ERROR; } else { DTRACE("DBG$ %d %s> reading for fr id %d, bs %d, offset %d\n", \ currpid, __func__, fr_id, bs_id, bs_offset); } /* Fill-in the new BS details in the frame. */ frptr->fr_type = FR_PAGE; frptr->fr_bs = bs_id; frptr->fr_bsoffset = bs_offset; inc_frm_refcount(fr_id); #ifdef DBG_ON print_frm_id(fr_id); #endif } else { /* A frame representing the same BS and offset is already present in the * memory. So, just increment the refcount of the frame. */ frm_pidmap_oper(fr_id, getpid(), FR_OP_PMAP_SET); frm_record_details(fr_id, getpid(), VADDR_TO_VPAGE(fault_addr)); inc_frm_refcount(fr_id); } /* In both cases (frame present and frame not present), we need to update * the page table entry as at this point, the frame is loaded onto the * memory. Do the following w.r.t. the pgt frame: * 1. Set the 'pres' bit in the page table entry corresponding to the * newly loaded page to reflect that the page is present in the * memory. * 2. Set the 'write' bit (as given in PA3 description). * 3. Update the 'base' of the page entry corresponding to the newly * created page to point to the frame. * 4. Increment the refcount of the pgt frame. Unlike data frames, where * refocunt denotes the # of processes that map to the actual * physical frame, pgt frame's refcount reflects the # of pages * (that are part of this pgt) that are present in the memory. * This will be decremented when a frame is paged out and the * pgt frame will be freed when the refcount reaches zero. */ ptbl[ptbl_offset].pt_pres = 1; ptbl[ptbl_offset].pt_write = 1; ptbl[ptbl_offset].pt_base = FR_ID_TO_VPAGE(fr_id); inc_frm_refcount(FR_PA_TO_ID(ptbl)); /* Reload the CR0 register would force the processor to flush the tables * that the processor maintains in hardware cache and to use the updated * software tables. */ enable_paging(); DTRACE("DBG$ %d %s> returning OK\n", currpid, __func__); DTRACE_END; restore(ps); return OK; RESTORE_AND_RETURN_ERROR: DTRACE("DBG$ %d %s> returning SYSERR\n", currpid, __func__); DTRACE_END; restore(ps); return SYSERR; }
/*------------------------------------------------------------------------- * pfint - paging fault ISR *------------------------------------------------------------------------- */ SYSCALL pfint() { STATWORD ps; disable(ps); int store, pageth; unsigned long vaddr = read_cr2(); unsigned int p_offset = (vaddr & 0xFFC00000) >> 22; unsigned int q_offset = (vaddr & 0x3FF000) >> 12; bsm_lookup(currpid, vaddr, &store, &pageth); frame_t *frame_backing_store = obtain_frame(); bs_tab[store].pg_to_frm_map[pageth] = frame_backing_store->frm_num; frame_backing_store->bs = store; frame_backing_store->bs_page = pageth; frame_backing_store->status = FRM_BS; frame_backing_store->fr_type = FR_PAGE; read_bs((char *)(frame_backing_store->frm_num * 4096), store, pageth ); frame_t * obtained_free_frame = frame_backing_store; // unsigned long pdbr = add_pg_dir_entry_for_pg_fault(currpid, p_offset, q_offset, obtained_free_frame); unsigned long page_to_frame; int avail = 0; struct pentry *pptr = &proctab[currpid]; frame_t *directory = pptr->pd; pd_t *table_descriptor = (pd_t *) ((4096 * directory->frm_num) + p_offset * sizeof(pd_t)); if (table_descriptor->pd_pres == 1) { frame_t *page_frame; int i = 0; for (i = 0; i < NFRAMES; i++) { if (frm_tab[i].frm_num == table_descriptor->pd_base) { page_frame= &frm_tab[i]; } } pt_t *offset_value = (pt_t *) (4096 * page_frame->frm_num ); offset_value =offset_value+q_offset; pt_t ptr; ptr.pt_pres = 1; ptr.pt_write = 1; ptr.pt_user = 0; ptr.pt_pwt = 0; ptr.pt_pcd = 0; ptr.pt_acc = 0; ptr.pt_dirty = 0; ptr.pt_mbz = 0; ptr.pt_global = 0; ptr.pt_avail = 0; ptr.pt_base = obtained_free_frame->frm_num; *offset_value = ptr; } else { frame_t *page_frame = obtain_frame(); page_frame->status = FRM_PGT; page_frame->fr_type = FRM_PGT; page_frame->fr_pid = currpid; pd_t *table_descriptor = (pd_t *) (4096 * directory->frm_num); table_descriptor =table_descriptor+p_offset; pd_t ptr1; ptr1.pd_pres = 1; ptr1.pd_write = 1; ptr1.pd_user = 0; ptr1.pd_pwt = 0; ptr1.pd_pcd = 0; ptr1.pd_acc = 0; ptr1.pd_mbz = 0; ptr1.pd_fmb = 0; ptr1.pd_global = 0; ptr1.pd_avail = 0; ptr1.pd_base = page_frame->frm_num; *table_descriptor = ptr1; pt_t *offset_value = (pt_t *) (4096 * page_frame->frm_num ); offset_value =offset_value+q_offset; pt_t ptr; ptr.pt_pres = 1; ptr.pt_write = 1; ptr.pt_user = 0; ptr.pt_pwt = 0; ptr.pt_pcd = 0; ptr.pt_acc = 0; ptr.pt_dirty = 0; ptr.pt_mbz = 0; ptr.pt_global = 0; ptr.pt_avail = 0; ptr.pt_base = obtained_free_frame->frm_num; *offset_value = ptr; page_to_frame = page_frame->frm_num ; } unsigned long pdbr= pptr->pdbr; struct pentry *pptr1 = &proctab[currpid]; bs_map_t *map = &(pptr1->map[store]); if(map->frm == NULL) map->frm = obtained_free_frame; else{ frame_t *tmp = map->frm; while(tmp->bs_next != NULL) tmp = tmp->bs_next; tmp->bs_next = obtained_free_frame; } obtained_free_frame->fr_vpno = map->vpno + obtained_free_frame->bs_page; obtained_free_frame->fr_pid = currpid; write_cr3(pdbr * NBPG); restore(ps); return OK; }
SYSCALL pfint() { unsigned long cr2,physical_addr; virt_addr_t * vaddr; int vp,s,o,avail,*store,*pageth; unsigned int p,q,pt; pd_t *pd; pt_t *new_pt; STATWORD ps; // Disable interrupts disable(ps); if(GDB) kprintf("\n*************pfint is running!************\n"); // Get the faulted address. The processor loads the CR2 register // with the 32-bit address that generated the exception. /* 1. Get the faulted address. */ cr2 = read_cr2(); vaddr = (virt_addr_t *)(&cr2); if(GDB) kprintf("&cr2=%x, cr2=%x, &vaddr=%x, vaddr=%x\n",&cr2,cr2,&vaddr,vaddr); /* 2. Let 'vp' be the virtual page number of the page containing of the faulted address */ vp = a2pno(cr2); if(GDB) kprintf("vp=%d,\n",vp); /* 3. Let pd point to the current page directory. */ pd = proctab[currpid].pdbr; if(GDB) kprintf("pd=%x,\n",pd); /* 4. Check that a is a legal address (i.e., it has been mapped). If it is not, print an error message and kill the process. */ pageth = getmem( sizeof(int *) ); store = getmem( sizeof(int *) ); if( SYSERR == bsm_lookup(currpid, vp, store, pageth)){ kprintf("ERROR: This virtual address hasn't been mapped!\n"); kill(currpid); } /* 5. Let p be the upper ten bits of a. [p represents page dirctory offset] */ /* 6. Let q be the bits [21:12] of a. [p represents page table offset.] /* 7.1 Let pt point to the pth page table.*/ p = vaddr->pd_offset; q = vaddr->pt_offset; pt = vaddr->pg_offset; if(GDB) kprintf("p=%d,q=%d,pt=%d\n",p,q,pt); /* 7.2 If the pth page table does not exist obtain a frame for it and initialize it. */ if(pd[p].pd_pres != 1){ if(GDB) kprintf("**obtain a frame for the new page table. \n"); avail = get_frm(); //get the id of a new frame from frm_tab[]; if (avail == -1) { if(GDB) kprintf("Could not create page table!\n"); restore(ps); return SYSERR; } //initialize frame[avail], update the process_id and frame_type of this frame. init_frm(avail, currpid, FR_TBL); frm_tab[avail].fr_upper_t = pa2frid((unsigned long) pd); if(GDB) kprintf("upper page table @frame[%d] pd=%x, a2pno(pd)=%d\n",frm_tab[avail].fr_upper_t, pd, a2pno((unsigned long) pd)); new_pt = frid2pa(avail); init_pt(new_pt); //update this page_table_entry in the page_directory. pd[p].pd_pres = 1; pd[p].pd_write = 1; pd[p].pd_user = 0; // not sure about the usage; pd[p].pd_pwt = 0; pd[p].pd_pcd = 0; pd[p].pd_acc = 0; pd[p].pd_mbz = 0; pd[p].pd_fmb = 0; pd[p].pd_global = 0; pd[p].pd_avail = 0; // not in use right now. pd[p].pd_base = a2pno((unsigned long) new_pt); /* location of page table */ if(GDB) kprintf("New page_table(%x)@frame[%d] updated in page_directory[%d]@(frame[%d])\n", new_pt, avail, p, frm_tab[avail].fr_upper_t); if(GDB) kprintf("q=%d, new_pt[q]=%x, new_pt=%x, pd[p].pd_base=%d\n", q, new_pt[q], new_pt, pd[p].pd_base); } //if the page table has already existed, just need to refcnt++; else { int avail = pd[p].pd_base -1024; frm_tab[avail].fr_refcnt++; if(GDB) kprintf("frm_tab[%d].fr_refcnt = %d, frame_type: %d\n",avail, frm_tab[avail].fr_refcnt, frm_tab[avail].fr_type); } /* 8.1 Using the backing store map, find the store s and page offset o which correspond to vp. */ //already saved in 'store' and 'pageth' s = *store; o = *pageth; /* 8.2 In the inverted page table increment the reference count of the frame which holds pt. This indicates that one more of pt's entries is marked "present." */ avail = find_frm(currpid,vp); if (avail == -1) { if(GDB) kprintf("allocating a page for the page fault\n"); avail = get_frm(); if(avail == -1) { if(GDB) kprintf("ATTENTION! Frames full. ###Replacement NEEDED!###\n"); int frame_number = proctab[currpid].nframes-1; int frame_id = proc_frames[currpid][0]; //update_proc_frames(pid,frame_number); int i; for (i = 0; i+1 < frame_number; ++i) { proc_frames[currpid][i] = proc_frames[currpid][i+1]; } proctab[currpid].nframes = frame_number; int pid = frm_tab[frame_id].fr_pid; int upper_id = frm_tab[frame_id].fr_upper_t; vp = frm_tab[frame_id].fr_vpno; if(GDB) kprintf("currpid=%d, frame[%d].pid=%d .vpno=%d, upper_frame[%d].ref=%d\n",currpid,frame_id,pid,vp,upper_id,frm_tab[upper_id].fr_refcnt); p = vp>>10; q = vp &0x003ff; new_pt = vp2pa(pd[p].pd_base); new_pt[q].pt_pres = 0; new_pt[q].pt_write = 1; new_pt[q].pt_base = 0; if(GDB) kprintf("pd_offset=%d, pt_offset=%d, pt_dirty=%d\n",p,q,new_pt[q].pt_dirty); if(new_pt[q].pt_dirty == 1) { //write back and pageth = getmem( sizeof(int *) ); store = getmem( sizeof(int *) ); if( SYSERR == bsm_lookup(currpid, vp, store, pageth)){ kprintf("ERROR: This virtual address hasn't been mapped!\n"); kill(currpid); } if(GDB) kprintf("maping found: {pid: %d, vpno: %d, store: %d, pageth: %d}\n",currpid,vp,*store,*pageth); write_bs((char *)new_pt, *store, *pageth); } init_pt(new_pt); reset_frm(frame_id); frm_tab[upper_id].fr_refcnt -= 2; //it is 2, not 1. if(frm_tab[upper_id].fr_refcnt <= 0){ //mark the appropriate entry in pd as being not present, and free pt. } //invalidate the TLB entry for the page vp using the invlpg instruction if(pid == currpid) { set_PDBR(currpid); } } else { init_frm(avail, currpid, FR_PAGE); frm_tab[avail].fr_upper_t = pd[p].pd_base-FRAME0; if(GDB) kprintf("upper page table @frame[%d]\n",frm_tab[avail].fr_upper_t); frm_tab[avail].fr_vpno = vp; int counter = proctab[currpid].nframes; proc_frames[currpid][counter] = frm_tab[avail].fr_id; proctab[currpid].nframes++; if(GDB) kprintf("proc_frames[%d][%d] = frame[%d]\n",currpid,counter,avail); // Add this frame to head of the frame list within the bs of this process //(frm_tab[avail].bs_next)->fr_vpno //, proctab[currpid].bsmap[s].frames->bs_next if(GDB) kprintf("&frm_tab[avail].bs_next = %x\n",frm_tab[avail].bs_next, &frm_tab[avail].bs_next); if(GDB) kprintf("proctab[%d].bsmap[%d].frames = %x, ->vpno=%d, ->bs_next=%x\n",currpid, s, proctab[currpid].bsmap[s].frames, proctab[currpid].bsmap[s].frames->fr_vpno, proctab[currpid].bsmap[s].frames->bs_next); frm_tab[avail].bs_next = getmem(sizeof(fr_map_t *)); frm_tab[avail].bs_next = proctab[currpid].bsmap[s].frames; proctab[currpid].bsmap[s].frames = &frm_tab[avail]; fr_map_t *frame = proctab[currpid].bsmap[s].frames; int i = frame->fr_vpno; if(GDB) kprintf("i = %d\n",i); if(GDB) kprintf("~~~frame[%d] linked to frame[%d]\n", avail, frame->bs_next==NULL?-1:frame->bs_next->fr_id); if(GDB) kprintf("frame[%d].bs_next = %x, &**=%x\n",avail,frm_tab[avail].bs_next, &frm_tab[avail].bs_next); if(GDB) kprintf("proctab[%d].bsmap[%d].frames = %x, ->vpno=%d, ->bs_next=%x\n",currpid, s, proctab[currpid].bsmap[s].frames, proctab[currpid].bsmap[s].frames->fr_vpno, proctab[currpid].bsmap[s].frames->bs_next); if(GDB) kprintf("Mapping frame[%d](ppno[%d]) to {pid[%d], vpno[%d]} -> {bs[%d],offset:%d}\n", avail,frid2vpno(avail),currpid,vp,s,o); physical_addr = frid2pa(avail); read_bs(physical_addr,s,o); if(GDB) kprintf("copied from bs[%d]:offset[%d] to vp[%d]@(%x)\n",s,o,vp,vp2pa(vp)); } }