void pt_page_in(vaddr_t vaddr, struct segment *s) { int spl = splhigh(); _vmstats_inc(VMSTAT_TLB_FAULT); //get the page detail int pt_offset_id = (vaddr - s->vbase) / PAGE_SIZE; struct page_detail *pd = &(s->pt->page_details[pt_offset_id]); if (pd->valid && pd->pfn != -1){ pd->use = 1; _vmstats_inc(VMSTAT_TLB_RELOAD); tlb_add_entry(vaddr, pd->pfn*PAGE_SIZE, s->writeable, 1); splx(spl); return; }else if(pd->sfn != -1){ splx(spl); pd->use = 1; pd->pfn = cm_getppage(); swap_read(pd->pfn,pd->sfn); pd->sfn = -1; //add the tlb entry tlb_add_entry(vaddr, pd->pfn*PAGE_SIZE, s->writeable, 1); //finish the load cm_finish_paging(pd->pfn, pd); }else{ splx(spl); pd->use = 1; pd->pfn = cm_getppage(); load_segment_page(curthread->t_vmspace->file, vaddr, s, pd->pfn*PAGE_SIZE); //add the tlb entry tlb_add_entry(vaddr, pd->pfn*PAGE_SIZE, s->writeable, 1); //finish the load cm_finish_paging(pd->pfn, pd); } }
/* Assumes vmstat_init has already been called */ void vmstats_inc(unsigned int index) { /* If interrupts are already off we might be in an interrupt * handler and we don't need to and can not use a lock */ if (curspl == SPL_HIGH) { _vmstats_inc(index); } else { /* simple check that vmstat_init has been called */ assert(stats_lock); lock_acquire(stats_lock); _vmstats_inc(index); lock_release(stats_lock); } }
/* Assumes vmstat_init has already been called */ void vmstats_inc(unsigned int index) { /* simple check that vmstat_init has been called */ KASSERT(stats_lock); lock_acquire(stats_lock); _vmstats_inc(index); lock_release(stats_lock); }
int vm_fault(int faulttype, vaddr_t faultaddress) { vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop; paddr_t paddr; int i; u_int32_t ehi, elo; struct addrspace *as = curthread->t_vmspace; int spl; int result; int probe; if (first_v != faultaddress) { first_v = faultaddress; first_read = 0; } else { first_read = 1; } int p_i; spl = splhigh(); faultaddress &= PAGE_FRAME; DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress); if (as == NULL) { /* * No address space set up. This is probably a kernel * fault early in boot. Return EFAULT so as to panic * instead of getting into an infinite faulting loop. */ return EFAULT; } vbase1 = as->as_vbase1; vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE; vbase2 = as->as_vbase2; vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE; stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE; stacktop = USERSTACK; p_i = faultaddress/PAGE_SIZE; int segment; // -1invalid 0code 1data 2stack size_t seg_size; size_t f_size; off_t offset; int flags; // Determine in which segment page lies, // Set file properties struct page *pg; if (p_i < 0) { panic("addrspace: invalid page table index\n"); // need exception handling } else if (faultaddress >= vbase1 && faultaddress < vtop1) { // look in code pg = (struct page *)array_getguy(as->useg1, (faultaddress-vbase1)/PAGE_SIZE); segment=0; seg_size=as->as_npages1; f_size = as->as_filesz1; offset = as->as_off1; flags=as->flag1; p_i = (faultaddress-vbase1)/PAGE_SIZE; } else if (faultaddress >= vbase2 && faultaddress < vtop2) { // look in data pg = (struct page *)array_getguy(as->useg2, (faultaddress-vbase2)/PAGE_SIZE); segment =1; seg_size=as->as_npages2; f_size = as->as_filesz2; offset = as->as_off2; flags=as->flag2; p_i = (faultaddress-vbase2)/PAGE_SIZE; } else if (faultaddress >= stackbase && faultaddress < stacktop) { // look in stack pg = (struct page *)array_getguy(as->usegs, ((faultaddress - stackbase)/PAGE_SIZE)); segment = 2; seg_size=DUMBVM_STACKPAGES; f_size = 0; flags=RWE; p_i = (faultaddress-vbase2)/PAGE_SIZE; } else { segment = -1; return EFAULT; } int wr_to = 0; int err; int f_amount = PAGE_SIZE; // Handling TLB miss fault type switch (faulttype) { case VM_FAULT_READONLY: // kill the current process err = EFAULT; _exit(0, &err); case VM_FAULT_READ: if (!pg->valid) // page is not in memory { pg->valid = 1; pg->vaddr = faultaddress; if(f_size != 0) { paddr = getppages(1); if (paddr == NULL) { return ENOMEM; } code_write_nread = 1; // reading from code pg->paddr = paddr; if (segment != 2) { if (f_size < f_amount) f_amount = f_size; splx(spl); result = load_each_segment(as->v, offset+(p_i*PAGE_SIZE), faultaddress, paddr, PAGE_SIZE, f_amount, flags & E_ONLY, 0); spl = splhigh(); if (result) { return result; } _vmstats_inc(VMSTAT_PAGE_FAULT_DISK); _vmstats_inc(VMSTAT_ELF_FILE_READ); } else { _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */ } if (segment == 0 && first_code_read == 0) first_code_read = 1; } else { paddr = getppages(1); if (paddr == NULL) { return ENOMEM; } if (segment == 0 && first_code_read == 0) first_code_read = 1; _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */ } pg->paddr = paddr; } else // page is in memory { if (segment == 0 && first_code_read == 0) first_code_read = 1; wr_to = 1; paddr = pg->paddr; _vmstats_inc(VMSTAT_TLB_RELOAD); /* STATS */ } break; case VM_FAULT_WRITE: wr_to = 1; if (!pg->valid) { pg->valid = 1; pg->vaddr = faultaddress; if(f_size != 0) { if (second_write == 0 && segment == 1) { first_read = 0; paddr = getppages(1); second_write = 1; wr_to = 0; } else { first_read = 1; paddr = getppages(1); } if (paddr == NULL) { return ENOMEM; } pg->paddr = paddr; if (segment != 2) { if (f_size < f_amount) f_amount = f_size; splx(spl); result = load_each_segment(as->v, offset+(p_i*PAGE_SIZE), faultaddress, paddr, PAGE_SIZE, f_amount, flags & E_ONLY, first_read); spl = splhigh(); if (result) { lock_release(tlb.tlb_lock); return result; } _vmstats_inc(VMSTAT_PAGE_FAULT_DISK); _vmstats_inc(VMSTAT_ELF_FILE_READ); } else { _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */ } } else { paddr = getppages(1); if (paddr == NULL) { return ENOMEM; } _vmstats_inc(VMSTAT_PAGE_FAULT_ZERO); /* STATS */ } pg->paddr = paddr; } else { paddr = pg->paddr; _vmstats_inc(VMSTAT_TLB_RELOAD); /* STATS */ } break; default: return EINVAL; } splx(spl); _vmstats_inc(VMSTAT_TLB_FAULT); if (wr_to == 1 || (segment == 2) || (first_code_read == 1)) { lock_acquire(tlb.tlb_lock); if (first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) { for (i=0; i<NUM_TLB; i++) { TLB_Read(&ehi, &elo, i); if (!(elo & TLBLO_VALID)) { continue; } if (ehi >= vbase1 && ehi < vtop1) { elo &= ~TLBLO_DIRTY; } TLB_Write(ehi, elo, i); } probe = TLB_Probe(faultaddress,0); if (probe >= 0) { first_code_read = 0; code_write_nread = 0; lock_release(tlb.tlb_lock); vmstats_inc(VMSTAT_TLB_FAULT_FREE); /* STATS */ return 0; } } for (i=0; i<NUM_TLB; i++) { TLB_Read(&ehi, &elo, i); if (elo & TLBLO_VALID) { continue; } ehi = faultaddress; if ((first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) || ((code_write_nread == 0) && faultaddress >= vbase1 && faultaddress < vtop1)) { elo = paddr | TLBLO_VALID; first_code_read = 0; code_write_nread = 0; } else { elo = paddr | TLBLO_DIRTY | TLBLO_VALID; } TLB_Write(ehi, elo, i); DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr); lock_release(tlb.tlb_lock); vmstats_inc(VMSTAT_TLB_FAULT_FREE); /* STATS */ return 0; } int victim = tlb_get_rr_victim(); //kprintf("vm fault: got our victim, %d \n",victim); if (victim < 0 || victim >= NUM_TLB) { lock_release(tlb.tlb_lock); return EFAULT; } ehi = faultaddress; if ((first_code_read && faultaddress >= vbase1 && faultaddress < vtop1) || ((code_write_nread == 0) && faultaddress >= vbase1 && faultaddress < vtop1)) { elo = paddr | TLBLO_VALID; first_code_read = 0; code_write_nread = 0; } else { elo = paddr | TLBLO_DIRTY | TLBLO_VALID; } TLB_Write(ehi, elo, victim); lock_release(tlb.tlb_lock); vmstats_inc(VMSTAT_TLB_FAULT_REPLACE); /* STATS */ } else { vmstats_inc(VMSTAT_TLB_FAULT_REPLACE); } return 0; }