boolean_t db_set_hw_watchpoint( const db_watchpoint_t watch, unsigned num) { vm_size_t size = watch->hiaddr - watch->loaddr; db_addr_t addr = watch->loaddr; vm_offset_t kern_addr; if (num >= 4) return FALSE; if (size != 1 && size != 2 && size != 4) return FALSE; if (addr & (size-1)) /* Unaligned */ return FALSE; if (watch->task) { if (db_user_to_kernel_address(watch->task, addr, &kern_addr, 1) < 0) return FALSE; addr = kern_addr; } addr = kvtolin(addr); db_dr (num, addr, I386_DB_TYPE_W, size-1, I386_DB_LOCAL|I386_DB_GLOBAL); db_printf("Hardware watchpoint %d set for %x\n", num, addr); return TRUE; }
void gdt_init(void) { fill_descriptor(&gdt[QUARQ_TSS / 8], kvtolin((unsigned int)&tss), sizeof(tss) - 1, ACC_PL_K | ACC_TSS | ACC_P, 0); fill_descriptor(&gdt[KERNEL_CS / 8], kvtolin(0), 0xffffffff, ACC_PL_K | ACC_CODE_R, SZ_32); fill_descriptor(&gdt[KERNEL_DS/ 8], kvtolin(0), 0xffffffff, ACC_PL_K | ACC_DATA_W, SZ_32); fill_descriptor(&gdt[LINEAR_CS / 8], 0x00000000, 0xffffffff, ACC_PL_K | ACC_CODE_R, SZ_32); fill_descriptor(&gdt[LINEAR_DS / 8], 0x00000000, 0xffffffff, ACC_PL_K | ACC_DATA_W, SZ_32); }
void base_idt_load(void) { struct pseudo_descriptor pdesc; /* Create a pseudo-descriptor describing the GDT. */ pdesc.limit = sizeof(base_idt) - 1; pdesc.linear_base = kvtolin(&base_idt); /* Load the IDT. */ set_idt(&pdesc); }
/* * Init the VM code. */ void oskit_uvm_redzone_init(void) { oskit_addr_t addr; /* * We use a task gate to catch page faults, since a stack overflow * will try and dump more stuff on the stack. This is the easiest * way to deal with it. */ if ((addr = (oskit_addr_t) lmm_alloc_aligned(&malloc_lmm, STACKSIZE, 0, 12, 0)) == 0) panic(__FUNCTION__": Could not allocate stack\n"); task_tss.ss0 = KERNEL_DS; task_tss.esp0 = addr + STACKSIZE - sizeof(double); task_tss.esp = task_tss.esp0; task_tss.ss = KERNEL_DS; task_tss.ds = KERNEL_DS; task_tss.es = KERNEL_DS; task_tss.fs = KERNEL_DS; task_tss.gs = KERNEL_DS; task_tss.cs = KERNEL_CS; task_tss.io_bit_map_offset = sizeof(task_tss); task_tss.eip = (int) double_fault_handler; /* Make sure the task is started with interrupts disabled */ osenv_intr_disable(); task_tss.eflags = (int) get_eflags(); osenv_intr_enable(); /* Both TSSs has to know about the page tables */ task_tss.cr3 = get_cr3(); base_tss.cr3 = get_cr3(); /* Initialize the base TSS descriptor. */ fill_descriptor(&base_gdt[KERNEL_TRAP_TSS / 8], kvtolin(&task_tss), sizeof(task_tss) - 1, ACC_PL_K|ACC_TSS|ACC_P, 0); /* * NOTE: The task switch will include an extra word on the stack, * pushed by the CPU. The handler will need to be in assembly code * if we care about that value. As it is, the handler routine * stack is going to be slightly messed up, but since the handler * calls panic, it is not a problem right now. */ fill_gate(&base_idt[T_DOUBLE_FAULT], 0, KERNEL_TRAP_TSS, ACC_TASK_GATE|ACC_P|ACC_PL_K, 0); base_idt_load(); base_gdt_load(); }
int hypputc(int c) { if (!console) { char d = c; hyp_console_io(CONSOLEIO_write, 1, kvtolin(&d)); } else { spl_t spl = splhigh(); simple_lock(&outlock); while (hyp_ring_smash(console->out, console->out_prod, console->out_cons)) { hyp_console_put("ring smash\n"); /* TODO: are we allowed to sleep in putc? */ hyp_yield(); } hyp_ring_cell(console->out, console->out_prod) = c; wmb(); console->out_prod++; hyp_event_channel_send(boot_info.console_evtchn); simple_unlock(&outlock); splx(spl); } return 0; }
void hyp_console_write(const char *str, int len) { hyp_console_io (CONSOLEIO_write, len, kvtolin(str)); }