Esempio n. 1
0
ac_bool test_crs(void) {
  ac_bool error = AC_FALSE;
  union cr0_u cr0 = { .raw = get_cr0() };
  // cr1 is reserved
  ac_uint cr2 = get_cr2();
  union cr3_u cr3 = { .raw = get_cr3() };
  union cr4_u cr4 = { .raw = get_cr4() };
  ac_uint cr8 = get_cr8();

  print_cr0("cr0", cr0.raw);
  ac_printf("cr2: 0x%p\n", cr2);
  print_cr3("cr3", cr3.raw);
  print_cr4("cr4", cr4.raw);
  ac_printf("cr8: 0x%p\n", cr8);

  set_cr0(cr0.raw);
  // cr2 is read only
  set_cr3(cr3.raw);
  set_cr4(cr4.raw);
  set_cr8(cr8);

  ac_uint cr0_1 = get_cr0();
  ac_uint cr3_1 = get_cr3();
  ac_uint cr4_1 = get_cr4();
  ac_uint cr8_1 = get_cr8();

  error |= AC_TEST(cr0.raw == cr0_1);
  error |= AC_TEST(cr3.raw == cr3_1);
  error |= AC_TEST(cr4.raw == cr4_1);
  error |= AC_TEST(cr8 == cr8_1);

  return error;
}
Esempio n. 2
0
/*
** VMX insn operates on 64 bits in long mode
** so we ensure allocation
*/
void __regparm__(2) __vmcs_force_read(raw64_t *val, vmcs_field_enc_t enc)
{
   raw64_t tmp;

   if(!enc.fake)
   {
      vmx_insn_err_t vmx_err;

      if(!vmx_vmread(&vmx_err, &tmp.raw, enc.raw))
	 panic("vmread(0x%x) err %d\n", enc.raw, vmx_err.raw);

      switch(enc.fwidth)
      {
      case VMCS_FIELD_ENC_FIELD_WIDTH_16: val->raw = tmp.wlow; break;
      case VMCS_FIELD_ENC_FIELD_WIDTH_32: val->raw = tmp.low;  break;
      default:                            val->raw = tmp.raw;  break;
      }

      return;
   }

   if(val == (raw64_t*)&vm_state.cr2)
      vm_state.cr2.raw = get_cr2();
   else if(val == (raw64_t*)&vm_state.dr6)
      vm_state.dr6.raw = get_dr6();
}
Esempio n. 3
0
/*
 * Handle processor traps/faults. Most of these are reflected
 * to the current partition except for page fault events, these
 * we handle ourselves.
 */
void
exception(struct cpu_thread *thread, uval32 trapno, uval32 error_code)
{
	switch (trapno) {
	case PF_VECTOR:
		page_fault(thread, error_code, get_cr2());
		break;
	case GP_VECTOR:
		gen_prot_fault(thread, trapno, error_code);
		break;
	case DF_VECTOR:
	case TS_VECTOR:
	case NP_VECTOR:
	case SS_VECTOR:
	case AC_VECTOR:
#ifdef DEBUG
		hprintf("Exception: trapno 0x%x\n", trapno);
		dump_cpu_state(thread);
#endif
		raise_fault(thread, trapno, error_code);
		break;
	default:
#ifdef DEBUG
		hprintf("Exception: trapno 0x%x\n", trapno);
		dump_cpu_state(thread);
#endif
		raise_exception(thread, trapno);
		break;
	}
}
Esempio n. 4
0
/*
 * Task entrypoint. A double fault switches into this code. All double
 * faults are fatal, so just print out something useful and panic. It
 * would be nice at some point to hook into the pthreads code so that
 * the current thread information could be dumped too.
 */
void
double_fault_handler(void)
{
	oskit_addr_t addr;
	struct x86_desc *sd;
	extern void tss_dump(struct x86_tss *);

	while (1) {
		addr = get_cr2();

		/* This seems to keep netboot from going nuts */
		clear_ts();

		printf("\nDOUBLE FAULT: possible stack overflow at %#x\n",
		       addr);

		/*
		 * Read the back_link field of the current TSS and use that
		 * to index into the GDT and obtain the previous TSS segment
		 * descriptor.  Use the base fields of that descriptor to get
		 * the linear address of the x86_tss struct.  Finally, pass
		 * lintokv of that address to tss_dump to get useful info!
		 */
		sd = &base_gdt[task_tss.back_link / 8];
		addr = ((sd->base_high << 24) | (sd->base_med << 16) |
			sd->base_low);
		tss_dump((struct x86_tss *)lintokv(addr));
		tss_dump_stack_trace((struct x86_tss *)lintokv(addr), 64);

		panic("DOUBLE FAULT panic");
	}
}
Esempio n. 5
0
/*
 * Trap handler. All we (currently) care about is page faults. Everything
 * else is passed through.
 */
int
svm_page_fault_handler(struct trap_state *ts)
{
	if (ts->trapno == T_PAGE_FAULT) {
		int	rcode, enabled;

		enabled = osenv_intr_save_disable();
		     
		ts->cr2 = get_cr2();
		rcode   = svm_fault(ts->cr2, ts->err);

		if (enabled)
			osenv_intr_enable();

		if (rcode != SVM_FAULT_OKAY)
			rcode = oskit_sendsig(rcode == SVM_FAULT_PAGEFLT
					      ? SIGSEGV : SIGBUS, ts);
		
		return rcode;
	}

	/*
	 * Not a page fault. Pass it through to the application as
	 * a signal. If signal handling is not enabled, a trap dump
	 * will be generated.
	 */
	return sendsig_trap_handler(ts);
}
Esempio n. 6
0
/** @brief Prepare the swexn stack in order to run user handler
 *
 *   This function pushes registers to user handler stack and changes
 *   the value of esp and eip in the kernel stack of current thread.
 *   
 *   @param type: Has error code or not
 *   @param cause: The cause of this exception or fault
 *   @return void.
 */
void swexn_handler(int type, int cause)
{
	uint32_t fault_addr = (uint32_t)get_cr2();
	unsigned int *esp0 = (unsigned int*)cur_thread->esp0;
	unsigned int *swexn_esp = (unsigned int*)cur_thread->swexn_esp;
	switch(type){
		case HAS_ERROR_CODE:
			memcpy(swexn_esp - (8 + 5 + 1), esp0 - (8 + 5 + 1), 
            sizeof(int) * (8 + 5 + 1));
			break;
		case NO_ERROR_CODE:
			memcpy(swexn_esp - 5, esp0 - 5, sizeof(int) * 5);
			memcpy(swexn_esp - (8 + 5 + 1), esp0 - (8 + 5), sizeof(int) * 8);
			break;
		default:
			panic("WTF in swexn_handler!");
			break;
	}

	/** Modify the esp value in the kernel stack */
	*(esp0 - 2) = swexn_stack_setup(cur_thread, 
		fault_addr, cause);
	
	/** Modify the eip value in the kernel stack */
	*(esp0 - 5) = (unsigned int)cur_thread->swexn_eip;

	/** Deregister the swexn handler */
	cur_thread->swexn_esp = 0;
	cur_thread->swexn_eip = 0;
	cur_thread->swexn_handler_args = NULL;

	return;
}
Esempio n. 7
0
/*
** VMX insn operates on 64 bits in long mode
** so we ensure allocation
*/
void __regparm__(2) __vmcs_force_read(raw64_t *val, vmcs_field_enc_t enc)
{
   raw64_t tmp;

   if(!enc.fake)
   {
      vmx_insn_err_t vmx_err;

      if(!vmx_vmread(&vmx_err, &tmp.raw, enc.raw))
         panic("vmread(0x%x) err %d\n", enc.raw, vmx_err.raw);

      switch(enc.fwidth)
      {
      case VMCS_FIELD_ENC_FIELD_WIDTH_16: val->wlow = tmp.wlow; break;
      case VMCS_FIELD_ENC_FIELD_WIDTH_32: val->low  = tmp.low;  break;
      default:                            val->raw  = tmp.raw;  break;
      }

      /*
      ** don't use debug, since some fields might have not been read
      */
#ifdef CONFIG_VMX_ACC_DBG
      printf("vmread(0x%x) = 0x%X\n", enc.raw, tmp.raw);
#endif
      return;
   }

   if(val == (raw64_t*)&vm_state.cr2)
      vm_state.cr2.raw = get_cr2();
   else if(val == (raw64_t*)&vm_state.dr6)
      vm_state.dr6.raw = get_dr6();
}
Esempio n. 8
0
void
pal_get_control_registers( pal_cr_t *cr0, pal_cr_t *cr2, 
			   pal_cr_t *cr3, pal_cr_t *cr4 )
{
	*cr0 = get_cr0();
	*cr2 = get_cr2();
	*cr3 = get_cr3_raw();
	*cr4 = get_cr4();
}
Esempio n. 9
0
/** @brief Print out the registers
 *
 *  This function is called to dump the registers when a thread is killed 
 *  by the kernel because of a fault or exception.
 *
 *  @param type: Has error code or not
 *  @param cause: The cause of this exception or fault
 *  @param msg: The error message 
 * 
 *  @return void
 */
void dump_regs(int type, int cause, char* msg)
{
	unsigned int *ureg = (unsigned int*)cur_thread->esp0;

	unsigned int ss = *(--ureg);
	unsigned int esp = *(--ureg);
	unsigned int eflags = *(--ureg);
	unsigned int cs = *(--ureg);
	unsigned int eip = *(--ureg);
	if(type == NO_ERROR_CODE)
		--ureg;
	unsigned int eax = *(--ureg);
	unsigned int ecx = *(--ureg);
	unsigned int edx = *(--ureg);
	unsigned int ebx = *(--ureg);
	unsigned int ebp = *(--ureg);
	unsigned int esi = *(--ureg);
	unsigned int edi = *(--ureg);
	unsigned int gs = get_gs();
	unsigned int fs = get_fs();
	unsigned int es = get_es();
	unsigned int ds = get_ds();
	unsigned int cr2 = get_cr2();

	char buf[512];
	int len = 0;

	len += sprintf(buf+len, "%s\n", msg);
	len += sprintf(buf+len, "cause: %d         ", cause);
	len += sprintf(buf+len, "cr2: 0x%8x        ", cr2);
	len += sprintf(buf+len, "ds: 0x%8x         \n", ds);
	len += sprintf(buf+len, "es: 0x%8x         ", es);
	len += sprintf(buf+len, "fs: 0x%8x         ", fs);
	len += sprintf(buf+len, "gs: 0x%8x         \n", gs);

	len += sprintf(buf+len, "eip: 0x%8x        ", eip);
	len += sprintf(buf+len, "cs: 0x%8x         ", cs);
	len += sprintf(buf+len, "eflags: 0x%8x     \n", eflags);
	len += sprintf(buf+len, "esp: 0x%8x        ", esp);
	len += sprintf(buf+len, "ss: 0x%8x         ", ss);
	
	len += sprintf(buf+len, "eax: 0x%8x        \n", eax);
	len += sprintf(buf+len, "ecx: 0x%8x        ", ecx);
	len += sprintf(buf+len, "edx: 0x%8x        ", edx);
	len += sprintf(buf+len, "ebx: 0x%8x        \n", ebx);
	len += sprintf(buf+len, "ebp: 0x%8x        ", ebp);
	len += sprintf(buf+len, "esi: 0x%8x        ", esi);
	len += sprintf(buf+len, "edi: 0x%8x        \n\n", edi);

	printf("%s",buf);
	return;
}
Esempio n. 10
0
static void
panic_32(__unused int code, __unused int pc, __unused const char *msg, boolean_t do_mca_dump, boolean_t do_bt)
{
	struct i386_tss *my_ktss = current_ktss();

	/* Set postcode (DEBUG only) */
	postcode(pc);

	/*
	 * Issue an I/O port read if one has been requested - this is an
	 * event logic analyzers can use as a trigger point.
	 */
	panic_io_port_read();

	/*
	 * Break kprintf lock in case of recursion,
	 * and record originally faulted instruction address.
	 */
	kprintf_break_lock();

	if (do_mca_dump) {
#if CONFIG_MCA
		/*
		 * Dump the contents of the machine check MSRs (if any).
		 */
		mca_dump();
#endif
	}

#if MACH_KDP
	/*
	 * Print backtrace leading to first fault:
	 */
	if (do_bt)
		panic_i386_backtrace((void *) my_ktss->ebp, 10, NULL, FALSE, NULL);
#endif

	panic("%s at 0x%08x, code:0x%x, "
	      "registers:\n"
	      "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
	      "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
	      "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
	      "EFL: 0x%08x, EIP: 0x%08x%s\n",
		  msg,
	      my_ktss->eip, code,
	      (uint32_t)get_cr0(), (uint32_t)get_cr2(), (uint32_t)get_cr3(), (uint32_t)get_cr4(),
	      my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
	      my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
	      my_ktss->eflags, my_ktss->eip, virtualized ? " VMM" : "");
}
Esempio n. 11
0
void
page_fault(u32 errcode, const struct registers *regs)
{
    u32 faultaddr = get_cr2();

    if (faultaddr < 0x1000) {
        dump_regs(regs);
        kprintf("NULL page fault at 0x%x\r\n", faultaddr);
        halt();
    }

    if (errcode & 0x2) { // caused by page write    
        if (PAGE_DIR[faultaddr >> 22] & 0x1) {
            if ((PAGE_TABLES[faultaddr >> 12] & 0x3) == 0x1) {
                // present but not writable
                kprintf("invalid write to 0x%08X\r\n", faultaddr);
                halt();
            }
        }
    }

    DPRINT(3, "page fault at 0x%x", faultaddr);
    int disknum = -1;
    int pagenum = -1;

    if ((errcode & 0x1) == 0x0) { // not present 
        // check for valid entry in PAGE_DIR
        if ((PAGE_DIR[faultaddr >> 22] & 0x1) == 0) // not present
        {
            PAGE_DIR[faultaddr >> 22] = get_phys_page() | 0x3;
        }
        else
        {
            u32 entry = PAGE_TABLES[faultaddr >> 12];

            if ((entry & 0x2) == 0x2) // on disk
            {
                disknum = (entry >> 2) & 0x3;
                pagenum = entry >> 4;
            }

            // fill in page table entry with free page
            PAGE_TABLES[faultaddr >> 12] = get_phys_page() | 0x3; // RW + PRESENT
        }
Esempio n. 12
0
/**
 * Prints memory info
 */
void print_meminfo() {
    printk("Total mem: %d MB\nFree mem: %d MB\n", get_mem_size() / 1024, (get_max_blocks() - get_used_blocks()) * 4 / 1024);
    printk("Heap size: %d KB Free heap: %d KB\n", get_heap_size() / 1024, (get_heap_size() - get_used_heap()) / 1024);
    printk("cr0: %x cr2: %x cr3: %x\n", get_cr0(), get_cr2(), get_pdbr());
}
Esempio n. 13
0
void master_exception_handler (long exception, long error)
{
  switch (exception)
  {
  case 0:
    panic ("Exception: divide error!");
  case 1:
    panic ("Exception: debug!");
  case 2:
    panic ("Exception: NMI!");
  case 3:
    panic ("Exception: breakpoint!");
  case 4:
    panic ("Exception: overflow!");
  case 5:
    panic ("Exception: bounds check!");
  case 6:
    panic ("Exception: invalid opcode!");
  case 7:
    panic ("Exception: coprocessor not availiable!");
  case 8:
    panic ("Exception: double fault!");
  case 9:
    panic ("Exception: 9 (reserved)!");
  case 10:
    panic ("Exception: invalid TSS!");
  case 11:
    panic ("Exception: segment not present!");
  case 12:
    panic ("Exception: stack!");
  case 13:
    panic ("Exception: general protection!");
  case 14:

    //  
    //printf ("PF(pid=%u,", (int) current_proc->pid);
    //
    //    if (error & PAGE_FAULT_P)
    //  printf ("present,");
    //else
    //  printf ("not present,");
    //
    //if (error & PAGE_FAULT_RW)
    //  printf ("write,");
    //else
    //  printf ("read,");
    //  
    //if (error & PAGE_FAULT_US)
    //  printf ("user,");
    //else
    //  printf ("supervisor,");
    //
    //printf ("0x%x) ", (int) get_cr2());
    //

    do_page_fault (current_proc, get_cr2(), error);

    break;

  case 16:
    panic ("Exception: coprocessor error!");
  case BAD_INT:
    warning ("Unallowed int# used!");

  }
}
Esempio n. 14
0
void
panic_64(x86_saved_state_t *sp, __unused int pc, __unused const char *msg, boolean_t do_mca_dump)
{
	/* Set postcode (DEBUG only) */
	postcode(pc);

	/*
	 * Issue an I/O port read if one has been requested - this is an
	 * event logic analyzers can use as a trigger point.
	 */
	panic_io_port_read();

	
	/*
	 * Break kprintf lock in case of recursion,
	 * and record originally faulted instruction address.
	 */
	kprintf_break_lock();

	if (do_mca_dump) {
#if CONFIG_MCA
		/*
		 * Dump the contents of the machine check MSRs (if any).
		 */
		mca_dump();
#endif
	}

#ifdef __i386__
	/*
	 * Dump the interrupt stack frame at last kernel entry.
	 */
	if (is_saved_state64(sp)) {
		x86_saved_state64_t	*ss64p = saved_state64(sp);
		panic("%s trapno:0x%x, err:0x%qx, "
		      "registers:\n"
		      "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
		      "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
		      "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
		      "R8:  0x%016qx, R9:  0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
		      "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
		      "RFL: 0x%016qx, RIP: 0x%016qx, CR2: 0x%016qx%s\n",
			  msg,
		      ss64p->isf.trapno, ss64p->isf.err,
		      (uint32_t)get_cr0(), (uint32_t)get_cr2(), (uint32_t)get_cr3(), (uint32_t)get_cr4(),
		      ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
		      ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
		      ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
		      ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
		      ss64p->isf.rflags, ss64p->isf.rip, ss64p->cr2,
			  virtualized ? " VMM" : "");
	} else {
		x86_saved_state32_t	*ss32p = saved_state32(sp);
		panic("%s at 0x%08x, trapno:0x%x, err:0x%x,"
		      "registers:\n"
		      "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
		      "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
		      "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
		      "EFL: 0x%08x, EIP: 0x%08x%s\n",
		      msg,
			  ss32p->eip, ss32p->trapno, ss32p->err,
		      (uint32_t)get_cr0(), (uint32_t)get_cr2(), (uint32_t)get_cr3(), (uint32_t)get_cr4(),
		      ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
		      ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
		      ss32p->efl, ss32p->eip, virtualized ? " VMM" : "");
	}
#else
	x86_saved_state64_t *regs = saved_state64(sp);
	panic("%s at 0x%016llx, registers:\n"
	      "CR0: 0x%016lx, CR2: 0x%016lx, CR3: 0x%016lx, CR4: 0x%016lx\n"
	      "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
	      "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
	      "R8:  0x%016llx, R9:  0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
	      "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
	      "RFL: 0x%016llx, RIP: 0x%016llx, CS:  0x%016llx, SS:  0x%016llx\n"
	      "Error code: 0x%016llx%s\n",
	      msg,
		  regs->isf.rip,
	      get_cr0(), get_cr2(), get_cr3_raw(), get_cr4(),
	      regs->rax, regs->rbx, regs->rcx, regs->rdx,
	      regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi,
	      regs->r8,  regs->r9,  regs->r10, regs->r11,
	      regs->r12, regs->r13, regs->r14, regs->r15,
	      regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF,  regs->isf.ss & 0xFFFF,
	      regs->isf.err, virtualized ? " VMM" : "");
#endif
}
Esempio n. 15
0
/** @brief Function deals with the page fault
 *
 *  This is an amazing function.
 *
 *  @param  void
 *  @return void
 */
void page_fault_handler(void){

	int fault_addr = get_cr2();
	
	mutex_lock(&cur_task->pcb_mutex);

	uint32_t align_addr = fault_addr & PGALIGN_MASK;
	uint32_t *ptep = NULL;
	Page *phy_page = NULL;
	phy_page = page_lookup(cur_task->task_pgdir, align_addr, &ptep);
	uint32_t pte = *ptep;
	
	/** Catch COW page fualt */
	if((ptep != NULL) &&(pte & PTE_P) && (pte & PTE_COW))
	{	
		mutex_lock(&mtx_m.frame_mutex);
		if(phy_page->pp_ref == 1)
		{
			*ptep = (pte | PTE_W) &(~PTE_COW);
			mutex_unlock(&mtx_m.frame_mutex);
		}
		else{
			mutex_unlock(&mtx_m.frame_mutex);
			if(pte & PTE_RWMARK){
				lprintf("ERROR: Cannot access TXT or ro_data area!");
				mutex_unlock(&cur_task->pcb_mutex);
				sys_vanish();
				return;
			}
			Page *new_page = page_alloc();
			if(new_page == NULL){
	 		lprintf("ERROR: No pages for COW in page_fault_handler");
				mutex_unlock(&cur_task->pcb_mutex);
				sys_vanish();
				return;
			}
		  
			uint32_t *temp_page = smemalign(PAGE_SIZE, PAGE_SIZE);
			if (temp_page == NULL){
			lprintf("ERROR: No memory for temp_page in page_fault_handler!");
				mutex_unlock(&cur_task->pcb_mutex);
				sys_vanish();
				return;
			}

			/* Copy the physical page to a temporary page in kernel space */
			memcpy((void*)temp_page, (void*)align_addr, PAGE_SIZE);
			
			if(page_insert(cur_task->task_pgdir, new_page, align_addr, 
				PTE_P | PTE_U | PTE_W) < 0)
			{
			lprintf("ERROR: No memory for COW in page_fault_handler");
				mutex_unlock(&cur_task->pcb_mutex);
				sys_vanish();
				return;
			}
			/* Copy the content to the new mapped physical page */
			memcpy((void*)align_addr, (void*)temp_page, PAGE_SIZE);

			/* Free the temp physical page */
			sfree(temp_page, PAGE_SIZE);
			
			mutex_lock(&mtx_m.frame_mutex);
			phy_page->pp_ref--;
			mutex_unlock(&mtx_m.frame_mutex);
		}
	}
	/** Catch the ZFOD */
	else if ((ptep != NULL) &&(pte & PTE_P) && (pte & PTE_ZFOD))
	{
		Page *pg = page_alloc();
		if(pg == NULL){
	 	lprintf("ERROR: No pages for ZFOD in page_fault_handler");
			mutex_unlock(&cur_task->pcb_mutex);
			sys_vanish();
			return;
		}
		uint32_t perm = PTE_P | PTE_U | PTE_W;
		if(page_insert(cur_task->task_pgdir, pg, align_addr, perm) < 0)
		{
		lprintf("ERROR: No memory for ZFOD in page_fault_handler");	
			mutex_unlock(&cur_task->pcb_mutex);
			sys_vanish();
			return;
		}
		bzero((void*)align_addr, PAGE_SIZE);
	}
	/* Check if installed swexn handler can fix this up */
	else if(cur_thread->swexn_eip != NULL)
	{
		mutex_unlock(&cur_task->pcb_mutex);
		swexn_handler(HAS_ERROR_CODE, SWEXN_CAUSE_PAGEFAULT);
		return;
	}
	else{
		mutex_unlock(&cur_task->pcb_mutex);
		sys_vanish();
		return;
	}
	mutex_unlock(&cur_task->pcb_mutex);
	return;
}
Esempio n. 16
0
void __regparm__(1) vmm_excp_hdlr(int64_r0_ctx_t *ctx)
{
   debug(EXCP,
         "\nvmm native exception -= %s =-\n"
         " . excp #%d error 0x%X\n"
         " . cs:rip 0x%X:0x%X\n"
         " . ss:rsp 0x%X:0x%X\n"
         " . rflags 0x%X\n"
         "\n- general registers\n"
         "rax     : 0x%X\n"
         "rcx     : 0x%X\n"
         "rdx     : 0x%X\n"
         "rbx     : 0x%X\n"
         "rsp     : 0x%X\n"
         "rbp     : 0x%X\n"
         "rsi     : 0x%X\n"
         "rdi     : 0x%X\n"
         "r08     : 0x%X\n"
         "r09     : 0x%X\n"
         "r10     : 0x%X\n"
         "r11     : 0x%X\n"
         "r12     : 0x%X\n"
         "r13     : 0x%X\n"
         "r14     : 0x%X\n"
         "r15     : 0x%X\n"
         ,exception_names[ctx->nr.blow]
         ,ctx->nr.blow, ctx->err.raw
         ,ctx->cs.raw,  ctx->rip.raw
         ,ctx->ss.raw,  ctx->rsp.raw
         ,ctx->rflags.raw
         ,ctx->gpr.rax.raw
         ,ctx->gpr.rcx.raw
         ,ctx->gpr.rdx.raw
         ,ctx->gpr.rbx.raw
         ,ctx->gpr.rsp.raw
         ,ctx->gpr.rbp.raw
         ,ctx->gpr.rsi.raw
         ,ctx->gpr.rdi.raw
         ,ctx->gpr.r8.raw
         ,ctx->gpr.r9.raw
         ,ctx->gpr.r10.raw
         ,ctx->gpr.r11.raw
         ,ctx->gpr.r12.raw
         ,ctx->gpr.r13.raw
         ,ctx->gpr.r14.raw
         ,ctx->gpr.r15.raw);

   switch(ctx->nr.blow)
   {
   case NMI_EXCP:
      debug(EXCP, "#NMI (ignored)\n");
      return;

   case MC_EXCP:
      vmm_excp_mce();
      break;

   case PF_EXCP:
      debug(EXCP,
            "#PF details: p:%d wr:%d us:%d id:%d addr 0x%X\n"
            ,ctx->err.pf.p
            ,ctx->err.pf.wr
            ,ctx->err.pf.us
            ,ctx->err.pf.id
            ,get_cr2());
      break;

   case GP_EXCP:
      debug(EXCP,
            "#GP details: ext:%d idt:%d ti:%d index:%d\n"
            ,ctx->err.sl.ext
            ,ctx->err.sl.idt
            ,ctx->err.sl.ti
            ,ctx->err.sl.idx);
      break;
   }

   panic("vmm exception !\n");
}