static void clear_nic_irqs(void) { e1000_mmio_beg[E1000_ICR] |= E1000_IMS_RXT0; // watch out for calling irq_eoi() before clearing rx irq in reg lapic_eoi(); irq_eoi(); }
static void msi_disable_source(struct intsrc *isrc, int eoi) { if (eoi == PIC_EOI) lapic_eoi(); }
void keyboard_handler(isr_state_t *state) { uint8_t code = inb(0x60); outb(0x61, inb(0x61)); if (KEYBOARD_CODE_ESCAPED == code) { keyboard_escaped = true; } else if (keyboard_escaped) { switch (code) { case KEYBOARD_CODE_LEFT: ui_switch_left(); break; case KEYBOARD_CODE_RIGHT: ui_switch_right(); break; case KEYBOARD_CODE_UP: ui_scroll_up(); break; case KEYBOARD_CODE_DOWN: ui_scroll_down(); break; } keyboard_escaped = false; } lapic_eoi(); }
static int ioapic_handler(struct int_context *context, struct kernel_dispatch_info *kdi) { // kprintf("IO APIC Interrupt Vector %d, IRQ %d\n", context->vector, vector_map[context->vector].irq); u32 result = 1; int irq = vector_map[context->vector].irq; // Disable IRQ ioapic_disable_irq(irq); // Dispatch kdi->dispatch_type = kdisp_interrupt; kdi->interrupt.irq = irq; kdi->interrupt.vector = context->vector; switch (irq) { case 0: //result = hal_interrupt_handler_global_timer(); break; case 1: result = keyboard_interrupt_handler(context, kdi); break; default: break; } // Enable IRQ lapic_eoi(); ioapic_enable_irq(irq); return result; }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. uint32_t fault_va; if(tf->tf_trapno >= 0 && tf->tf_trapno < 255) { switch(tf->tf_trapno) { case T_PGFLT://page fault page_fault_handler(tf); return; case T_BRKPT: monitor(tf); return; case T_SYSCALL: //fault_va = rcr2(); //cprintf("[%08x] user fault va %08x ip %08x\n",curenv->env_id, fault_va, tf->tf_eip); //cprintf("T_SYSCALL\n"); // print_trapframe(tf); tf->tf_regs.reg_eax = syscall(tf->tf_regs.reg_eax, tf->tf_regs.reg_edx, tf->tf_regs.reg_ecx,tf->tf_regs.reg_ebx, tf->tf_regs.reg_edi, tf->tf_regs.reg_esi); return; } } // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. if (tf->tf_trapno == IRQ_OFFSET + IRQ_TIMER) { lapic_eoi(); sched_yield(); return; } // Handle keyboard and serial interrupts. // LAB 5: Your code here. // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
__isr__ do_rtc (struct intr_frame r) { intr_enter (); struct tm tim; /* The RTC needs a read to port 0xc, * otherwise it sends no more interrupts * x : interrupts enabled in reg b * x : periodic interrupt has ocurred * x : alarm interrupt * x : update-ended alarm * xxxx : always 0 */ rtc_read (0xc); hznum++; if (hznum == RTCHZ) { hznum = 0; unixtime++; /* Once every hour, recalibrate the unix time */ if (unixtime % 3600) { rtc_get_tm (&tim); unixtime = mktime (&tim); } } lapic_eoi (); intr_exit (); }
static bool lapic_tmr_handler(interrupt_t* state) { if(_master) _master(); lapic_eoi(); return true; }
void ipi_handler(volatile registers_t regs) { #if CONFIG_ARCH == TYPE_ARCH_X86_64 assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18)); #endif int previous_interrupt_flag = set_int(0); add_atomic(&int_count[regs.int_no], 1); #if CONFIG_SMP /* delegate to the proper handler, in ipi.c */ switch(regs.int_no) { case IPI_DEBUG: case IPI_SHUTDOWN: case IPI_PANIC: handle_ipi_cpu_halt(regs); break; case IPI_SCHED: handle_ipi_reschedule(regs); break; case IPI_TLB: handle_ipi_tlb(regs); break; case IPI_TLB_ACK: handle_ipi_tlb_ack(regs); break; default: panic(PANIC_NOSYNC, "invalid interprocessor interrupt number: %d", regs.int_no); } #endif assert(!set_int(0)); set_cpu_interrupt_flag(previous_interrupt_flag); /* assembly code will issue sti */ #if CONFIG_SMP lapic_eoi(); #endif }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. if (tf ->tf_trapno == IRQ_OFFSET + IRQ_TIMER) { lapic_eoi(); sched_yield(); return; } // Unexpected trap: The user process or the kernel has a bug. // if (tf->tf_trapno == 14) // page_fault_handler(tf); // cprintf("TRAP NO : %d\n", tf->tf_trapno); int r; switch (tf->tf_trapno) { case T_PGFLT : page_fault_handler(tf); break; case T_BRKPT : monitor(tf); break; case T_DEBUG : monitor(tf); break; case T_SYSCALL : r = syscall(tf->tf_regs.reg_eax, tf->tf_regs.reg_edx, tf->tf_regs.reg_ecx, tf->tf_regs.reg_ebx, tf->tf_regs.reg_edi, tf->tf_regs.reg_esi); tf->tf_regs.reg_eax = r; break; default : // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } } }
__isr__ do_lapictim (struct intr_frame r) { intr_enter (); /*kprintf ("tick! %llu ", ticks ());*/ lapic_eoi (); intr_exit (); }
__isr__ do_pit (struct intr_frame r) { intr_enter (); jiffies++; /*if (jiffies % 10 == 0) kprintf (".");*/ lapic_eoi (); intr_exit (); }
static void trap_dispatch(struct frame *tf) { switch(tf->tf_trapno) { case T_PGFLT: { //print_frame(tf); do_page_fault(tf); break; } case T_GPFLT: { panic("GPFLT!\n"); do_exit(curtask); break; } case T_BRKPT : { print_frame(tf); panic("break point handler not implemented!\n"); break; } case T_DIVIDE: { printk("CPU:%d USER T_DIVIDE\n",get_cpuid()); do_exit(curtask); } case T_SYSCALL: { tf->tf_regs.reg_eax = syscall_handler(tf); break; } case IRQ_SPURIOUS: { printk("CPU:%d Spurious interrupt on irq 7\n",get_cpuid()); print_frame(tf); return; } case IRQ_TIMER : { lapic_eoi(); schedule_tick(); break; } case IRQ_KBD : { irq_eoi(); printk("CPU:%d IRQ_KBD \n",get_cpuid()); inb(0x60); break; } case IRQ_SERIAL : { panic("SERIAL handler not implemented!\n"); break; } case IRQ_IDE0 : case IRQ_IDE1 : { irq_eoi(); do_hd_interrupt(tf); break; } case IRQ_ERROR : { print_frame(tf); panic("ERROR handler not implemented!\n"); break; } default: { if (tf->tf_cs == _KERNEL_CS_) panic("unhandled trap in kernel"); else { print_frame(tf); return; } break; } } }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. //---------------------------------------- Lab3 ------------------------------------------------------------ if (tf->tf_trapno == T_PGFLT) { //cprintf("pagefault!\n"); page_fault_handler(tf); return; } if (tf->tf_trapno == T_BRKPT) { //cprintf("brkpt!\n"); monitor(tf); return; } if (tf->tf_trapno == T_DEBUG) { my_monitor(tf); return; } if (tf->tf_trapno == T_SYSCALL) { //cprintf("Syscall!\n"); tf->tf_regs.reg_eax = syscall(tf->tf_regs.reg_eax, tf->tf_regs.reg_edx, tf->tf_regs.reg_ecx, tf->tf_regs.reg_ebx, tf->tf_regs.reg_edi, tf->tf_regs.reg_esi); if (tf->tf_regs.reg_eax < 0) panic("syscall failed: %e\n", tf->tf_regs.reg_eax); return; } //---------------------------------------- Lab3 ------------------------------------------------------------ // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. //------------ Lab4 ---------------------------------------------------------------------------------------- if (tf->tf_trapno == IRQ_OFFSET + IRQ_TIMER) { // cprintf("clock interrupt!\n"); lapic_eoi(); sched_yield(); return; } //------------ Lab4 ---------------------------------------------------------------------------------------- // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. int32_t ret; switch (tf->tf_trapno){ case T_PGFLT:{ //14 page_fault_handler(tf); return; } case T_BRKPT:{ //3 breakpoint_handler(tf); return; } case T_DEBUG:{ breakpoint_handler(tf); return; } case T_SYSCALL:{ ret = system_call_handler(tf); tf->tf_regs.reg_eax = ret; return; } case IRQ_OFFSET+IRQ_TIMER:{ lapic_eoi(); time_tick(); sched_yield(); return; } case IRQ_OFFSET+IRQ_KBD:{ kbd_intr(); return; } case IRQ_OFFSET+IRQ_SERIAL:{ serial_intr(); return; } case IRQ_OFFSET+IRQ_E1000:{ e1000_trap_handler(); return; } } // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. // Add time tick increment to clock interrupts. // Be careful! In multiprocessors, clock interrupts are // triggered on every CPU. // LAB 6: Your code here. // Handle keyboard and serial interrupts. // LAB 5: Your code here. // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
static void trap_dispatch(struct Trapframe *tf) { static int page_to_age = 0; int page_to_age_first = page_to_age; int num_page_updates = NPAGEUPDATES_FACTOR*NPAGESFREE_LOW_THRESHOLD; struct PteChain *pp_refs_chain; char page_accessed; // Handle processor exceptions. // LAB 3: Your code here. if (tf->tf_trapno == T_PGFLT) { page_fault_handler(tf); return; } else if (tf->tf_trapno == T_BRKPT) { monitor(tf); // breakpoint exceptions invoke the kernel monitor return; } else if (tf->tf_trapno == T_SYSCALL) { tf->tf_regs.reg_eax = syscall(tf->tf_regs.reg_eax, tf->tf_regs.reg_edx, tf->tf_regs.reg_ecx, tf->tf_regs.reg_ebx, tf->tf_regs.reg_edi, tf->tf_regs.reg_esi); return; } // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. if (tf->tf_trapno == IRQ_OFFSET + IRQ_TIMER) { lapic_eoi(); // Update the age of some physical pages // If we fall below the thresholds, update more pages than usual // This is somewhat arbitrary, we can tweak these numbers // We also might want to increase the order of magnitude of the number of pages we update per clock tick // This will require some testing and profiling // There is also no reason why num_page_updates is based on the threshold values, it can be incremented by its own macros if (num_free_pages <= NPAGESFREE_LOW_THRESHOLD) { num_page_updates += NPAGEUPDATES_FACTOR*NPAGESFREE_HIGH_THRESHOLD; } if (num_free_pages <= NPAGESFREE_HIGH_THRESHOLD) { num_page_updates += NPAGEUPDATES_FACTOR*NPAGESFREE_LOW_THRESHOLD; } for ( ; num_page_updates >= 0; --num_page_updates) { // Find the next page that is currently mapped in user space, by finding one with a nonzero pp_ref while (!pages[page_to_age].pp_ref) { // If we wrap around to where we started, stop updating page ages if ((page_to_age=(page_to_age+1)%npages) == page_to_age_first) { goto end_of_page_age_updates; } } // Iterate through all of the user space PTEs that map to this page // If any of them have been accessed, increment the age, and then clear the PTE_A bit in all of the PTEs page_accessed = 0; for (pp_refs_chain = pages[page_to_age].pp_refs_chain; pp_refs_chain; pp_refs_chain = pp_refs_chain->pc_link) { if (*pp_refs_chain->pc_pte & PTE_A) { page_accessed = 1; pages[page_to_age].age += PAGE_AGE_INCREMENT_ON_ACCESS; for ( ; pp_refs_chain; pp_refs_chain = pp_refs_chain->pc_link) { *(pp_refs_chain->pc_pte) &= ~PTE_A; } break; } } pages[page_to_age].age = (pages[page_to_age].age > MAX_PAGE_AGE ? MAX_PAGE_AGE : pages[page_to_age].age); if (!page_accessed) { if (pages[page_to_age].age >= (uint8_t)PAGE_AGE_DECREMENT_ON_CLOCK) { pages[page_to_age].age -= (uint8_t)PAGE_AGE_DECREMENT_ON_CLOCK; } else { pages[page_to_age].age = 0; } } // If we wrap around to where we started, stop updating page ages if ((page_to_age=(page_to_age+1)%npages) == page_to_age_first) { goto end_of_page_age_updates; } } end_of_page_age_updates: sched_yield(); } // Handle keyboard and serial interrupts. // LAB 5: Your code here. if (tf->tf_trapno == IRQ_OFFSET + IRQ_KBD) { lapic_eoi(); kbd_intr(); return; } if (tf->tf_trapno == IRQ_OFFSET + IRQ_SERIAL) { lapic_eoi(); serial_intr(); return; } // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. // TODO: chky int r; switch (tf->tf_trapno) { case T_PGFLT: page_fault_handler(tf); break; case T_BRKPT: // TODO: lab3 ex6 challenge monitor(tf); break; case T_SYSCALL: r = syscall(tf->tf_regs.reg_eax, // syscallno tf->tf_regs.reg_edx, // a1 tf->tf_regs.reg_ecx, // a2 tf->tf_regs.reg_ebx, // a3 tf->tf_regs.reg_edi, // a4 tf->tf_regs.reg_esi // a5 ); tf->tf_regs.reg_eax = r; return; default: break; } // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. // TODO: chky if (tf->tf_trapno == IRQ_OFFSET + IRQ_TIMER) { lapic_eoi(); sched_yield(); return; } // chky end // Handle keyboard and serial interrupts. // LAB 5: Your code here. // TODO: chky if (tf->tf_trapno == IRQ_OFFSET + IRQ_KBD) { kbd_intr(); return; } if (tf->tf_trapno == IRQ_OFFSET + IRQ_SERIAL) { serial_intr(); return; } // chky end // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. //print_trapframe(tf); if(tf->tf_trapno == T_PGFLT) { page_fault_handler(tf); } else if(tf->tf_trapno == T_BRKPT) { monitor(tf); } else if(tf->tf_trapno == T_SYSCALL) { tf->tf_regs.reg_rax = syscall(tf->tf_regs.reg_rax, tf->tf_regs.reg_rdx, tf->tf_regs.reg_rcx, tf->tf_regs.reg_rbx, tf->tf_regs.reg_rdi, tf->tf_regs.reg_rsi); return; } // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. // Handle keyboard and serial interrupts. // LAB 5: Your code here. else if(tf->tf_trapno == (IRQ_OFFSET + IRQ_TIMER)) { lapic_eoi(); sched_yield(); } else if(tf->tf_trapno == (IRQ_OFFSET + IRQ_KBD)) { kbd_intr(); sched_yield(); } else if(tf->tf_trapno == (IRQ_OFFSET + IRQ_SERIAL)) { serial_intr(); sched_yield(); } // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
static void trap_dispatch(struct Trapframe *tf) { uint32_t temp; // Switch on the trap number switch(tf->tf_trapno) { case T_DEBUG: // Unset the step flag in EFLAGS before entering the monitor tf->tf_eflags &= ~FL_TF; case T_BRKPT: // Use breakpoints and debug interrupts as a shortcut to // start the kernel monitor monitor(tf); return; case T_PGFLT: // Handle page faults with their own handler. page_fault_handler(tf); return; case T_SYSCALL: // For system calls, pass arguments to the syscall handler // and return the result in register EAX. // // The number must be stored away, and tf->tf_regs.reg_eax // set to 0 before the call. This is because some system // calls such as sys_ipc_recv don't return (due to a call to // sched_yield()), but expect to return 0 for the return value. // If reg_eax is not cleared, then whatever is in it might be // mistaken for the return value. -_- temp = tf->tf_regs.reg_eax; tf->tf_regs.reg_eax = 0; tf->tf_regs.reg_eax = syscall(temp, tf->tf_regs.reg_edx, tf->tf_regs.reg_ecx, tf->tf_regs.reg_ebx, tf->tf_regs.reg_edi, tf->tf_regs.reg_esi); return; } // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! if(tf->tf_trapno == IRQ_OFFSET+IRQ_TIMER) { // Timer interrupts should cause the scheduler to be run lapic_eoi(); sched_yield(); } // Handle keyboard and serial interrupts. if(tf->tf_trapno == IRQ_OFFSET+IRQ_KBD) { // kern/console.c function that handles the keyboard kbd_intr(); return; } if(tf->tf_trapno == IRQ_OFFSET+IRQ_SERIAL) { // kern/console.c function that handles serial input serial_intr(); return; } // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
static void msi_eoi_source(struct intsrc *isrc) { lapic_eoi(); }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. //<<<<<<< HEAD // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } //cprintf("entering trap dispathc\n"); // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. //<<<<<<< HEAD // Add time tick increment to clock interrupts. // Be careful! In multiprocessors, clock interrupts are // triggered on every CPU. // LAB 6: Your code here. //======= //<<<<<<< HEAD //>>>>>>> lab5 // Handle keyboard and serial interrupts. // LAB 5: Your code here. if(tf->tf_trapno==IRQ_OFFSET+IRQ_KBD) { //cprintf("IRQ_OFFSET+IRQ_KBD trap\n"); kbd_intr(); return; } if(tf->tf_trapno==IRQ_OFFSET+IRQ_SERIAL) { //cprintf("IRQ_OFFSET+IRQ_serial trap\n"); serial_intr(); return; } //======= //======= if(tf->tf_trapno==T_PGFLT) { // cprintf("pagefault handler\n"); page_fault_handler(tf); return; } else if((tf->tf_trapno==T_GPFLT)) { print_trapframe(tf); return; } else if(tf->tf_trapno==T_BRKPT) { // cprintf("T_BRKPT"); monitor(tf); return; } else if(tf->tf_trapno==T_SYSCALL) {// cprintf("calling syscal'\n"); tf->tf_regs.reg_rax =syscall(tf->tf_regs.reg_rax,tf->tf_regs.reg_rdx,tf->tf_regs.reg_rcx,tf->tf_regs.reg_rbx,tf->tf_regs.reg_rdi,tf->tf_regs.reg_rsi); // cprintf("syscall exit\n"); return; } //>>>>>>> lab3 //>>>>>>> lab4 // Unexpected trap: The user process or the kernel has a bug. else if(tf->tf_trapno == IRQ_OFFSET + IRQ_TIMER) { lapic_eoi(); time_tick(); sched_yield(); } print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { cprintf("destroy env\n"); env_destroy(curenv); return; } // cprintf("exiting trap_dispatch\n"); }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. if (tf->tf_trapno == T_BRKPT) { print_trapframe(tf); // cprintf("Breakpoint!\n"); while (1) monitor(NULL); } else if (tf->tf_trapno == T_PGFLT) { page_fault_handler(tf); return; } else if (tf->tf_trapno == T_SYSCALL) { uint32_t syscallno; uint32_t a1, a2, a3, a4, a5; syscallno = tf->tf_regs.reg_eax; a1 = tf->tf_regs.reg_edx; a2 = tf->tf_regs.reg_ecx; a3 = tf->tf_regs.reg_ebx; a4 = tf->tf_regs.reg_edi; a5 = tf->tf_regs.reg_esi; int32_t ret = syscall(syscallno, a1, a2, a3, a4, a5); tf->tf_regs.reg_eax = ret; return; } // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. if (tf->tf_trapno == IRQ_OFFSET + IRQ_TIMER) { time_tick(); lapic_eoi(); /* what's that? */ sched_yield(); } // Handle keyboard and serial interrupts. // LAB 5: Your code here. if (tf->tf_trapno == IRQ_OFFSET + IRQ_KBD) { kbd_intr(); return; } if (tf->tf_trapno == IRQ_OFFSET + IRQ_SERIAL) { serial_intr(); return; } // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
/* this should NEVER enter from an interrupt handler, * and only from kernel code in the one case of calling * sys_setup() */ void entry_syscall_handler(volatile registers_t regs) { /* don't need to save the flag here, since it will always be true */ #if CONFIG_ARCH == TYPE_ARCH_X86_64 assert(regs.int_no == 0x80 && ((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18)); #endif set_int(0); add_atomic(&int_count[0x80], 1); if(current_task->flags & TF_IN_INT) panic(0, "attempted to enter syscall while handling an interrupt"); /* set the interrupt handling flag... */ raise_flag(TF_IN_INT); #if CONFIG_ARCH == TYPE_ARCH_X86_64 if(regs.rax == 128) { #elif CONFIG_ARCH == TYPE_ARCH_X86 if(regs.eax == 128) { #endif /* the injection code at the end of the signal handler calls * a syscall with eax = 128. So here we handle returning from * a signal handler. First, copy back the old registers, and * reset flags and signal stuff */ memcpy((void *)®s, (void *)¤t_task->reg_b, sizeof(registers_t)); current_task->sig_mask = current_task->old_mask; current_task->cursig=0; lower_flag(TF_INSIG); lower_flag(TF_JUMPIN); } else { assert(!current_task->sysregs && !current_task->regs); /* otherwise, this is a normal system call. Save the regs for modification * for signals and exec */ current_task->regs = ®s; current_task->sysregs = ®s; syscall_handler(®s); assert(!get_cpu_interrupt_flag()); /* handle stage2's here...*/ if(maybe_handle_stage_2 || !current_task->syscall_count) { mutex_acquire(&s2_lock); for(int i=0;i<MAX_INTERRUPTS;i++) { if(stage2_count[i]) { sub_atomic(&stage2_count[i], 1); for(int j=0;j<MAX_HANDLERS;j++) { if(interrupt_handlers[i][j][1]) { (interrupt_handlers[i][j][1])(®s); } } } } mutex_release(&s2_lock); } assert(!get_cpu_interrupt_flag()); } assert(!set_int(0)); current_task->sysregs=0; current_task->regs=0; /* we don't need worry about this being wrong, since we'll always be returning to * user-space code */ set_cpu_interrupt_flag(1); /* we're never returning to an interrupt, so we can * safely reset this flag */ lower_flag(TF_IN_INT); #if CONFIG_SMP lapic_eoi(); #endif } /* This gets called from our ASM interrupt handler stub. */ void isr_handler(volatile registers_t regs) { #if CONFIG_ARCH == TYPE_ARCH_X86_64 assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18)); #endif /* this is explained in the IRQ handler */ int previous_interrupt_flag = set_int(0); add_atomic(&int_count[regs.int_no], 1); /* check if we're interrupting kernel code, and set the interrupt * handling flag */ char already_in_interrupt = 0; if(current_task->flags & TF_IN_INT) already_in_interrupt = 1; raise_flag(TF_IN_INT); /* run the stage1 handlers, and see if we need any stage2s. And if we * don't handle it at all, we need to actually fault to handle the error * and kill the process or kernel panic */ char called=0; char need_second_stage = 0; for(int i=0;i<MAX_HANDLERS;i++) { if(interrupt_handlers[regs.int_no][i][0] || interrupt_handlers[regs.int_no][i][1]) { /* we're able to handle the error! */ called = 1; if(interrupt_handlers[regs.int_no][i][0]) (interrupt_handlers[regs.int_no][i][0])(®s); if(interrupt_handlers[regs.int_no][i][1]) need_second_stage = 1; } } if(need_second_stage) { /* we need to run a second stage handler. Indicate that here... */ add_atomic(&stage2_count[regs.int_no], 1); maybe_handle_stage_2 = 1; } /* clean up... Also, we don't handle stage 2 in ISR handling, since this can occur from within a stage2 handler */ assert(!set_int(0)); /* if it went unhandled, kill the process or panic */ if(!called) faulted(regs.int_no, !already_in_interrupt, regs.eip); /* restore previous interrupt state */ set_cpu_interrupt_flag(previous_interrupt_flag); if(!already_in_interrupt) lower_flag(TF_IN_INT); /* send out the EOI... */ #if CONFIG_SMP lapic_eoi(); #endif }
void irq_handler(volatile registers_t regs) { #if CONFIG_ARCH == TYPE_ARCH_X86_64 assert(((regs.ds&(~0x7)) == 0x10 || (regs.ds&(~0x7)) == 0x20) && ((regs.cs&(~0x7)) == 0x8 || (regs.cs&(~0x7)) == 0x18)); #endif /* ok, so the assembly entry function clears interrupts in the cpu, * but the kernel doesn't know that yet. So we clear the interrupt * flag in the cpu structure as part of the normal set_int call, but * it returns the interrupts-enabled flag from BEFORE the interrupt * was recieved! F****n' brilliant! Back up that flag, so we can * properly restore the flag later. */ int previous_interrupt_flag = set_int(0); add_atomic(&int_count[regs.int_no], 1); /* save the registers so we can screw with iret later if we need to */ char clear_regs=0; if(current_task && !current_task->regs) { /* of course, if we are already inside an interrupt, we shouldn't * overwrite those. Also, we remember if we've saved this set of registers * for later use */ clear_regs=1; current_task->regs = ®s; } /* check if we're interrupting kernel code */ char already_in_interrupt = 0; if(current_task->flags & TF_IN_INT) already_in_interrupt = 1; /* ...and set the flag so we know we're in an interrupt */ raise_flag(TF_IN_INT); /* now, run through the stage1 handlers, and see if we need any * stage2 handlers to run later */ char need_second_stage = 0; for(int i=0;i<MAX_HANDLERS;i++) { if(interrupt_handlers[regs.int_no][i][0]) (interrupt_handlers[regs.int_no][i][0])(®s); if(interrupt_handlers[regs.int_no][i][1]) need_second_stage = 1; } /* if we need a second stage handler, increment the count for this * interrupt number, and indicate that handlers should check for * second stage handlers. */ if(need_second_stage) { add_atomic(&stage2_count[regs.int_no], 1); maybe_handle_stage_2 = 1; } assert(!get_cpu_interrupt_flag()); /* ok, now are we allowed to handle stage2's right here? */ if(!already_in_interrupt && (maybe_handle_stage_2||need_second_stage)) { maybe_handle_stage_2 = 0; /* handle the stage2 handlers. NOTE: this may change to only * handling one interrupt, and/or one function. For now, this works. */ mutex_acquire(&s2_lock); for(int i=0;i<MAX_INTERRUPTS;i++) { if(stage2_count[i]) { /* decrease the count for this interrupt number, and loop through * all the second stage handlers and run them */ sub_atomic(&stage2_count[i], 1); for(int j=0;j<MAX_HANDLERS;j++) { if(interrupt_handlers[i][j][1]) { (interrupt_handlers[i][j][1])(®s); } } } } mutex_release(&s2_lock); assert(!get_cpu_interrupt_flag()); } /* ok, now lets clean up */ assert(!set_int(0)); /* clear the registers if we saved the ones from this interrupt */ if(current_task && clear_regs) current_task->regs=0; /* restore the flag in the cpu struct. The assembly routine will * call iret, which will also restore the EFLAG state to what * it was before, including the interrupts-enabled bit in eflags */ set_cpu_interrupt_flag(previous_interrupt_flag); /* and clear the state flag if this is going to return to user-space code */ if(!already_in_interrupt) lower_flag(TF_IN_INT); /* and send out the EOIs */ if(interrupt_controller == IOINT_PIC) ack_pic(regs.int_no); #if CONFIG_SMP lapic_eoi(); #endif }
void trap(struct trapframe *tf) { int v = tf->trapno; struct proc *cp = curproc[cpu()]; if(v == T_SYSCALL){ if(cp->killed) proc_exit(); cp->tf = tf; syscall(); if(cp->killed) proc_exit(); return; } // Increment nlock to make sure interrupts stay off // during interrupt handler. Decrement before returning. cpus[cpu()].nlock++; switch(v){ case IRQ_OFFSET + IRQ_TIMER: lapic_timerintr(); cpus[cpu()].nlock--; if(cp){ // Force process exit if it has been killed and is in user space. // (If it is still executing in the kernel, let it keep running // until it gets to the regular system call return.) if((tf->cs&3) == 3 && cp->killed) proc_exit(); // Force process to give up CPU and let others run. if(cp->state == RUNNING) yield(); } return; case IRQ_OFFSET + IRQ_IDE: ide_intr(); lapic_eoi(); break; case IRQ_OFFSET + IRQ_KBD: kbd_intr(); lapic_eoi(); break; case IRQ_OFFSET + IRQ_SPURIOUS: cprintf("spurious interrupt from cpu %d eip %x\n", cpu(), tf->eip); break; default: if(curproc[cpu()]) { // Assume process divided by zero or dereferenced null, etc. cprintf("pid %d: unhandled trap %d on cpu %d eip %x -- kill proc\n", curproc[cpu()]->pid, v, cpu(), tf->eip); proc_exit(); } // Otherwise it's our mistake. cprintf("unexpected trap %d from cpu %d eip %x\n", v, cpu(), tf->eip); panic("trap"); } cpus[cpu()].nlock--; }
static void trap_dispatch(struct Trapframe *tf) { // Handle processor exceptions. // LAB 3: Your code here. switch(tf->tf_trapno) { case T_PGFLT: page_fault_handler(tf); return; case T_BRKPT: case T_DEBUG: monitor(tf); return; case T_SYSCALL: tf->tf_regs.reg_eax = syscall(tf->tf_regs.reg_eax, // syscall # tf->tf_regs.reg_edx, // arg1 tf->tf_regs.reg_ecx, // arg2 tf->tf_regs.reg_ebx, // arg3 tf->tf_regs.reg_edi, // arg4 tf->tf_regs.reg_esi);// arg5 return; } // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // Add time tick increment to clock interrupts. // Be careful! In multiprocessors, clock interrupts are // triggered on every CPU. // LAB 4: Your code here. // LAB 6: Your code here. if (tf->tf_trapno == IRQ_OFFSET + IRQ_TIMER) { time_tick(); lapic_eoi(); sched_yield(); return; } // Handle keyboard and serial interrupts. // LAB 7: Your code here. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SERIAL) { serial_intr(); return; } if (tf->tf_trapno == IRQ_OFFSET + IRQ_KBD) { kbd_intr(); return; } // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
static void trap_dispatch(struct Trapframe *tf) { int32_t res; // Handle processor exceptions. // LAB 3: Your code here. // Handle spurious interrupts // The hardware sometimes raises these because of noise on the // IRQ line or other reasons. We don't care. if (tf->tf_trapno == IRQ_OFFSET + IRQ_SPURIOUS) { cprintf("Spurious interrupt on irq 7\n"); print_trapframe(tf); return; } // Handle clock interrupts. Don't forget to acknowledge the // interrupt using lapic_eoi() before calling the scheduler! // LAB 4: Your code here. // Add time tick increment to clock interrupts. // Be careful! In multiprocessors, clock interrupts are // triggered on every CPU. // LAB 6: Your code here. // Handle keyboard and serial interrupts. // LAB 5: Your code here. //if(tf->tf_trapno == 48 && tf->tf_regs.reg_eax==7) //{ //cprintf("trap no = %d at cpu %d env %x\n",tf->tf_trapno,cpunum(),curenv->env_id); //print_trapframe(tf); //} switch(tf->tf_trapno) { case IRQ_OFFSET + IRQ_TIMER: //cprintf("clock interrupt on irq 7 on cpu %d\n",cpunum()); //print_trapframe(tf); //cprintf(" eip 0x%08x\n", tf->tf_eip); //cprintf(" esp 0x%08x\n", tf->tf_esp); lapic_eoi(); time_tick(); sched_yield(); break; case IRQ_OFFSET + IRQ_SERIAL: serial_intr(); break; case IRQ_OFFSET + IRQ_KBD: kbd_intr(); break; case T_DIVIDE: tf->tf_regs.reg_ecx = 1; break; case T_PGFLT: page_fault_handler(tf); goto err; case T_SYSCALL: res = syscall(tf->tf_regs.reg_eax,tf->tf_regs.reg_edx,tf->tf_regs.reg_ecx,tf->tf_regs.reg_ebx,tf->tf_regs.reg_edi,tf->tf_regs.reg_esi); tf->tf_regs.reg_eax = res; break; case T_BRKPT:print_trapframe(tf);monitor(NULL);break; default: goto err; } return; err: // Unexpected trap: The user process or the kernel has a bug. print_trapframe(tf); if (tf->tf_cs == GD_KT) panic("unhandled trap in kernel"); else { env_destroy(curenv); return; } }
void trap(struct trapframe *tf) { if(tf->trapno == T_SYSCALL){ if(cp->killed) exit(); cp->tf = tf; syscall(); if(cp->killed) exit(); return; } switch(tf->trapno){ case IRQ_OFFSET + IRQ_TIMER: if(cpu() == 0){ acquire(&tickslock); ticks++; wakeup(&ticks); release(&tickslock); } lapic_eoi(); break; case IRQ_OFFSET + IRQ_IDE: ide_intr(); lapic_eoi(); break; case IRQ_OFFSET + IRQ_KBD: kbd_intr(); lapic_eoi(); break; case IRQ_OFFSET + IRQ_SPURIOUS: cprintf("cpu%d: spurious interrupt at %x:%x\n", cpu(), tf->cs, tf->eip); lapic_eoi(); break; default: if(cp == 0 || (tf->cs&3) == 0){ // In kernel, it must be our mistake. cprintf("unexpected trap %d from cpu %d eip %x\n", tf->trapno, cpu(), tf->eip); panic("trap"); } // In user space, assume process misbehaved. cprintf("pid %d %s: trap %d err %d on cpu %d eip %x -- kill proc\n", cp->pid, cp->name, tf->trapno, tf->err, cpu(), tf->eip); cp->killed = 1; } // Force process exit if it has been killed and is in user space. // (If it is still executing in the kernel, let it keep running // until it gets to the regular system call return.) if(cp && cp->killed && (tf->cs&3) == DPL_USER) exit(); // Force process to give up CPU on clock tick. // If interrupts were on while locks held, would need to check nlock. if(cp && cp->state == RUNNING && tf->trapno == IRQ_OFFSET+IRQ_TIMER) yield(); }
void trap(struct trapframe *tf) { uint cr2; //print_trapframe(tf); procdump(); //chy for debug if (cp!=NULL) dbmsg("trap frame from %x %x, cp name %s\n",tf->eip, tf->trapno,cp->name); if(tf->trapno == T_SYSCALL){ if(cp->killed) exit(); cp->tf = tf; syscall(); if(cp->killed) exit(); return; } switch(tf->trapno){ cprintf("interrupt %x, trap frame : %x\n",tf->trapno, (uint)tf); case IRQ_OFFSET + IRQ_TIMER: // if(cpu() == 0){ acquire(&tickslock); ticks++; wakeup(&ticks); release(&tickslock); // } lapic_eoi(); break; case IRQ_OFFSET + IRQ_IDE: ide_intr(); lapic_eoi(); break; case IRQ_OFFSET + IRQ_KBD: kbd_intr(); lapic_eoi(); break; case IRQ_OFFSET + IRQ_SPURIOUS: cprintf("cpu%d: spurious interrupt at %x:%x\n", cpu(), tf->cs, tf->eip); lapic_eoi(); break; case T_PGFLT: cprintf("page fault!\n"); //cprintf("current process uses %d pages.\n", cp->sz/PAGE); cr2=rcr2(); if(handle_pgfault(cr2)<0){ cprintf("cannot handle page fault! Virtual addr: %x, eip: %x\n", cr2, tf->eip); }else{ cprintf("page fault handled successfully! Virtual addr: %x, eip: %x\n", cr2, tf->eip); } //cprintf("current process uses %d pages.\n", cp->sz/PAGE); break; default: if(cp == 0 || (tf->cs&3) == 0){ // In kernel, it must be our mistake. cprintf("unexpected trap %d from cpu %d eip %x esp %x cr2 %x, pid %d %s \n", tf->trapno, cpu(), tf->eip, tf->esp, rcr2(),cp->pid, cp->name); panic("trap"); } // In user space, assume process misbehaved. cprintf("pid %d %s: trap %d err %d on cpu %d eip %x cr2 %x -- kill proc\n", cp->pid, cp->name, tf->trapno, tf->err, cpu(), tf->eip, rcr2()); cp->killed = 1; } // Force process exit if it has been killed and is in user space. // (If it is still executing in the kernel, let it keep running // until it gets to the regular system call return.) if(cp && cp->killed && (tf->cs&3) == DPL_USER) exit(); // Force process to give up CPU on clock tick. // If interrupts were on while locks held, would need to check nlock. if(cp && cp->state == RUNNING && tf->trapno == IRQ_OFFSET+IRQ_TIMER){ #ifdef LOAD_BALANCE_ON if(cp == idleproc[cpu()]){ struct rq* rq = theCpu.rq; rq->sched_class->load_balance(rq); } #endif proc_tick(theCpu.rq, cp); } }