/* * Handle processor traps/faults. Most of these are reflected * to the current partition except for page fault events, these * we handle ourselves. */ void exception(struct cpu_thread *thread, uval32 trapno, uval32 error_code) { switch (trapno) { case PF_VECTOR: page_fault(thread, error_code, get_cr2()); break; case GP_VECTOR: gen_prot_fault(thread, trapno, error_code); break; case DF_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case AC_VECTOR: #ifdef DEBUG hprintf("Exception: trapno 0x%x\n", trapno); dump_cpu_state(thread); #endif raise_fault(thread, trapno, error_code); break; default: #ifdef DEBUG hprintf("Exception: trapno 0x%x\n", trapno); dump_cpu_state(thread); #endif raise_exception(thread, trapno); break; } }
void signal_proccessing_loop(){ DEBUG(fprintf(stderr, "Memory Manager: pid(%d)\n", getpid())); DEBUG(fprintf(stderr, "Memory Manager running...\n")); while(1) { signal_number = 0; pause(); if(signal_number == SIGUSR1) { /* Page fault */ char *msg = "Processed SIGUSR1\n"; noticed(msg); page_fault(); } else if(signal_number == SIGUSR2) { /* PT dump */ char *msg = "Processed SIGUSR2\n"; noticed(msg); dump_vmem_structure(); } else if(signal_number == SIGINT) { char *msg = "Processed SIGINT\n"; noticed(msg); cleanup(); break; } else { DEBUG(fprintf(stderr, "Unknown Signal: %d\n", signal_number)); save_sig_no(0); } } }
static void iret(struct cpu_thread *thread) { uval mask; uval error; uval fault; uval new_cs; uval eflags; int room; /* handle any callbacks before returning */ if (thread->cb) thread->cb(thread); /* * Ensure the stack is there and readable. * If there is no stack and we are transitioning to the * guest kernel then we have a problem ... */ room = 3 + 2; /* over estimating (for now) */ mask = ((thread->tss.srs.regs.ss & 0x3) == 0x3) ? PTE_US : 0; if (unlikely(!isstackpresent(thread, room, mask, &error, &fault))) { #ifdef INTR_DEBUG hprintf("iret: esp: 0x%lx not mapped (eip 0x%x:0x%lx)\n", fault, thread->tss.srs.regs.cs, thread->tss.eip); #endif if (page_fault(thread, error, fault)) { assert((mask & PTE_US) == 0, "no guest kernel stack"); return; } } thread->tss.eip = pop(thread); new_cs = pop(thread); eflags = pop(thread) & ~EFLAGS_IOPL_MASK; /* FIXME! what eflags are controlled by the partition? */ if (RPL(new_cs) != RPL(thread->tss.srs.regs.cs)) { /* if iret to a diff pl, then pop esp/ss */ uval tmp_esp = pop(thread); thread->tss.srs.regs.ss = pop(thread); thread->tss.gprs.regs.esp = tmp_esp; } thread->tss.srs.regs.cs = new_cs; thread->tss.eflags = eflags | thread->cpu->os->iopl | EFLAGS_IF; if (eflags & EFLAGS_IF) enable_intr(thread); }
int vm_fault(int faulttype, vaddr_t faultaddress) { if(faultaddress == 0) return EFAULT; // Check whether the faultaddress is valid struct page_table_entry *pt_entry_temp2 = (struct page_table_entry*)kmalloc(sizeof(struct page_table_entry)) ; lock_acquire(coremap_lock); struct addrspace *curaddrspace = curthread->t_addrspace ; faultaddress &= PAGE_FRAME; int i = 0 ; for (i = 0 ; i< N_REGIONS ; i++) { if(curaddrspace->regions[i] != NULL ) { vaddr_t region_end = curaddrspace->regions[i]->region_start+(4096*curaddrspace->regions[i]->npages); if (faultaddress >= curaddrspace->regions[i]->region_start && faultaddress <= region_end) { break ; } } } if(i == N_REGIONS) { return EINVAL ; } //Check whether it is a write to a read only region if((faulttype == VM_FAULT_WRITE) && (curaddrspace->regions[i]->permissions == 0x4 || curaddrspace->regions[i]->permissions == 0x5)) { return EINVAL ; } uint32_t ehi, elo; ehi = faultaddress; paddr_t pa = page_fault(faultaddress,pt_entry_temp2); elo = pa | TLBLO_DIRTY | TLBLO_VALID; int spl = splhigh(); tlb_random(ehi, elo) ; splx(spl); lock_release(coremap_lock); if(pt_entry_temp2->state == 1000) kfree(pt_entry_temp2); return 0; }
int check_page_table(pid_t pid, char mode, addr_t address, frame_t *frame) { bool found = false; int i, index; page_table_leaf_t *pt_entry = NULL; /* * Simulate time needed to access System Page Table */ nanosleep(&ACCESS_TIME_RAM, NULL); /* * Check the System Page Table to find the entry for this pid */ for( i = 0; i < num_pids; ++i ) { if( sys_proc_table[i].pid == pid ) { found = true; index = i; break; } } /* * If not found, then create a spot for it. */ if( !found ) { printf("%s: New Process (%*d), Creating Page Table\n", current_ref, MAX_PID_LEN, pid); index = num_pids; if( 0 != create_new_page_table(pid) ) { return -1; } } /* * Find the page table entry */ find_page_table_entry(address, index, &pt_entry); /* * If the page is not valid, then it is not resident in RAM, so we need * to swap it in from disk. */ if( !pt_entry->valid ) { /* * Page Fault the page into RAM */ if( 0 > page_fault(pt_entry) ) { return -1; } } else { stats.pt_hit++; } /* * Update Cache * - Write through cache will update RAM for us */ add_to_cache(pt_entry->pid, pt_entry->page, pt_entry->frame, mode); /* * Update the TLB */ add_to_tlb(pt_entry->page, pt_entry->frame, pt_entry->pid); *frame = pt_entry->frame; return 1; }