void thread_exit(void) { scheduler_thread_exiting(); scheduler_schedule(NULL, NULL); NOTREACHED(); }
void fault(struct tcb *tcb, struct ccb *ccb) { if (tcb->ivect == 32) { ccb->lapic->eoi = 0; scheduler_schedule(); return; } if (tcb->ivect == 0x100) { if (tcb->rdi == 0 && tcb->state != TCB_STATE_RUNNING) { ccb_unload_tcb(); } scheduler_schedule(); return; } if (tcb->ivect == 14) { extern uint64_t read_cr2(void); log(ERROR, "page fault at addr %p (ip %p, sp %p, id %d)", read_cr2(), tcb->rip, tcb->rsp, tcb->id); log(ERROR, "translation: %p %p", pcx_get_trans(NULL, read_cr2()), pcx_get_flags(NULL, read_cr2())); queue_pagefault(ccb_unload_tcb()); interrupt_vector_fire(__PINION_INTERRUPT_PAGEFAULT); scheduler_schedule(); return; } if (tcb->ivect == 8) { log(ERROR, "PANIC: double fault"); for(;;); } if (tcb->ivect < 32) { queue_miscfault(ccb_unload_tcb()); interrupt_vector_fire(__PINION_INTERRUPT_MISCFAULT); scheduler_schedule(); return; } }
/** Handles an interrupt (exception code 0). All interrupt handlers * that are registered for any of the occured interrupts (hardware * 0-5, software 0-1) are called. The scheduler is called if a timer * interrupt (hardware 5) or a context switch request (software * interrupt 0) occured, or if the currently running thread for the * processor is the idle thread. * * @param cause The Cause register from CP0 */ void interrupt_handle(uint32_t cause) { int this_cpu, i; if(cause & INTERRUPT_CAUSE_SOFTWARE_0) { _interrupt_clear_sw0(); } this_cpu = _interrupt_getcpu(); /* Exceptions should be handled elsewhere: */ if((cause & 0x0000007c) != 0) { kprintf("Caught exception, cause %.8x, CPU %i\n", cause, this_cpu); KERNEL_PANIC("Exception in interrupt_handle"); } /* Call appropiate interrupt handlers. Handlers cannot be * unregistered, so after the first empty * entry all others are * also empty. */ for (i=0; i<CONFIG_MAX_DEVICES; i++) { if (interrupt_handlers[i].device == NULL) break; /* If this handler is registered for any of the interrupts * that occured, call it. */ if ((cause & interrupt_handlers[i].irq) != 0) interrupt_handlers[i].handler(interrupt_handlers[i].device); } /* Timer interrupt (HW5) or requested context switch (SW0) * Also call scheduler if we're running the idle thread. */ if((cause & (INTERRUPT_CAUSE_SOFTWARE_0 | INTERRUPT_CAUSE_HARDWARE_5)) || scheduler_current_thread[this_cpu] == IDLE_THREAD_TID) { scheduler_schedule(); /* Until we have proper VM we must manually fill the TLB with pagetable entries before running code using given pagetable. Note that this method limits pagetable rows (possible mapping pairs) to 16 and can't be used with proper pagetables and VM. Note that if you remove this call (which you probably do when you implement proper VM), you must manually call _tlb_set_asid here. See the implementation of tlb_fill on details how to do that. */ _tlb_set_asid(thread_get_current_thread()); } }
static void scheduler_schedule_on_intterupt(cpu_state_t const *cpu, stack_state_t const *stack) { disable_interrupts(); ps_t *ps = scheduler_get_current_process(); if (stack->cs == (SEGSEL_USER_SPACE_CS | 0x03)) { scheduler_update_user_registers(ps, cpu, stack); } else { scheduler_update_kernel_registers(ps, cpu, stack); } pic_acknowledge(); scheduler_schedule(); }
void scheduler_test_switch_to_next_guest(void *pdata){ struct arch_regs *regs = pdata; uint64_t pct = read_cntpct(); uint32_t tval = read_cnthp_tval(); uart_print( "cntpct:"); uart_print_hex64(pct); uart_print("\n\r"); uart_print( "cnth_tval:"); uart_print_hex32(tval); uart_print("\n\r"); /* Note: As of context_switchto() and context_perform_switch() are available, no need to test if trapped from Hyp mode. context_perform_switch() takes care of it */ /* Test guest context switch */ if ( (regs->cpsr & 0x1F) != 0x1A ) { scheduler_schedule(); } }
uint64_t *task_switch(uint64_t *stack) { /* OK, We want to save current stack */ thread_table_t *task = thread_get_current_thread_entry(); /* Is it a usertask? */ if(task->attribs & THREAD_FLAG_USERMODE) task->user_context->stack = stack; else task->context->stack = stack; /* Schedule */ scheduler_schedule(); /* Get new task */ task = thread_get_current_thread_entry(); /* Switch page directory */ vmm_setcr3(task->context->pml4); /* Update TSS */ tss_setstack(0, (uint64_t)task->context->stack); /* Test if this new task is set to * enter usermode */ if(task->attribs & THREAD_FLAG_ENTERUSER) { task->attribs &= ~THREAD_FLAG_ENTERUSER; task->attribs |= THREAD_FLAG_USERMODE; } /* return new stack */ if(task->attribs & THREAD_FLAG_USERMODE) return task->user_context->stack; else return task->context->stack; }
/** Handles an interrupt (exception code 0). All interrupt handlers * that are registered for any of the occured interrupts (hardware * 0-5, software 0-1) are called. The scheduler is called if a timer * interrupt (hardware 5) or a context switch request (software * interrupt 0) occured, or if the currently running thread for the * processor is the idle thread. * * @param cause The Cause register from CP0 */ void interrupt_handle(uint32_t cause) { int this_cpu, i; if(cause & INTERRUPT_CAUSE_SOFTWARE_0) { _interrupt_clear_sw0(); } this_cpu = _interrupt_getcpu(); /* Exceptions should be handled elsewhere: */ if((cause & 0x0000007c) != 0) { kprintf("Caught exception, cause %.8x, CPU %i\n", cause, this_cpu); KERNEL_PANIC("Exception in interrupt_handle"); } /* Call appropiate interrupt handlers. Handlers cannot be * unregistered, so after the first empty * entry all others are * also empty. */ for (i=0; i<CONFIG_MAX_DEVICES; i++) { if (interrupt_handlers[i].device == NULL) break; /* If this handler is registered for any of the interrupts * that occured, call it. */ if ((cause & interrupt_handlers[i].irq) != 0) interrupt_handlers[i].handler(interrupt_handlers[i].device); } /* Timer interrupt (HW5) or requested context switch (SW0) * Also call scheduler if we're running the idle thread. */ if((cause & (INTERRUPT_CAUSE_SOFTWARE_0 | INTERRUPT_CAUSE_HARDWARE_5)) || scheduler_current_thread[this_cpu] == IDLE_THREAD_TID) { scheduler_schedule(); tlb_fill(thread_get_current_thread_entry()->pagetable); } /* thread_table_t *thread = thread_get_current_thread_entry(); process_id_t pid = thread->process_id; if(pid == -1) { * Not a process thread. Use thread id with most significant bit flipped as ASID. Note: this limits both PROCESS_MAX_PROCESSES and the number of kernel work threads to 128 since ASID is one byte and the ASID address space is divided into two. * uint8_t asid = thread_get_current_thread() | 0x8; _tlb_set_asid(asid); return; } else { * Use PID as ASID. This ensures that threads within a process shares the same ASID _tlb_set_asid(pid); / } }*/ }
/** Handles an interrupt (exception code 0). All interrupt handlers * that are registered for any of the occured interrupts (hardware * 0-5, software 0-1) are called. The scheduler is called if a timer * interrupt (hardware 5) or a context switch request (software * interrupt 0) occured, or if the currently running thread for the * processor is the idle thread. * * @param cause The Cause register from CP0 */ void interrupt_handle(virtaddr_t cause) { int this_cpu, i; if(cause & INTERRUPT_CAUSE_SOFTWARE_0) { _interrupt_clear_sw0(); } this_cpu = _interrupt_getcpu(); /* Exceptions should be handled elsewhere: */ if((cause & 0x0000007c) != 0) { kprintf("Caught exception, cause %.8x, CPU %i\n", cause, this_cpu); KERNEL_PANIC("Exception in interrupt_handle"); } /* Call appropiate interrupt handlers. Handlers cannot be * unregistered, so after the first empty * entry all others are * also empty. */ for (i=0; i<CONFIG_MAX_DEVICES; i++) { if (interrupt_handlers[i].device == NULL) break; /* If this handler is registered for any of the interrupts * that occured, call it. */ if ((cause & interrupt_handlers[i].irq) != 0) interrupt_handlers[i].handler(interrupt_handlers[i].device); } /* Timer interrupt (HW5) or requested context switch (SW0) * Also call scheduler if we're running the idle thread. */ if((cause & (INTERRUPT_CAUSE_SOFTWARE_0 | INTERRUPT_CAUSE_HARDWARE_5)) || scheduler_current_thread[this_cpu] == IDLE_THREAD_TID) { scheduler_schedule(); /* Until we have proper VM we must manually fill the TLB with pagetable entries before running code using given pagetable. Note that this method limits pagetable rows (possible mapping pairs) to 16 and can't be used with proper pagetables and VM. Note that if you remove this call (which you probably do when you implement proper VM), you must manually call _tlb_set_asid here. See the implementation of tlb_fill on details how to do that. */ pagetable_t* pagetable = thread_get_current_thread_entry()->pagetable; if(pagetable == NULL) return; /* Check that the pagetable can fit into TLB. This is needed until we have proper VM system, because the whole pagetable must fit into TLB. */ KERNEL_ASSERT(pagetable->valid_count <= (_tlb_get_maxindex()+1)); _tlb_write(pagetable->entries, 0, pagetable->valid_count); /* Set ASID field in Co-Processor 0 to match thread ID so that only entries with the ASID of the current thread will match in the TLB hardware. */ _tlb_set_asid(pagetable->ASID); } }
void init(uint64_t loader, struct unfold64_objl *object_list, struct unfold64_mmap *memory_map) { // parse configuration for (size_t i = 0; i < object_list->count; i++) { if (!strcmp(object_list->entry[i].name, "/boot/pconf")) { config_parse((char*) object_list->entry[i].base); break; } } // initialize the physical memory manager pmm_init(memory_map); // initialize paging pcx_init(); // initialize interrupt handling idt_init(); // allocate the CCB for processor 0 ccb_new(); // initialize LAPIC timer struct ccb *ccb = ccb_get_self(); ccb->lapic->destination_format = 0xFFFFFFFF; ccb->lapic->logical_destination = (ccb->lapic->logical_destination & 0xFFFFFF) | 1; ccb->lapic->lvt_timer = 0x10000; ccb->lapic->lvt_performance_monitoring_counters = 0x400; ccb->lapic->lvt_lint0 = 0x10000; ccb->lapic->lvt_lint1 = 0x10000; ccb->lapic->task_priority = 0; ccb->lapic->spurious_interrupt_vector = 33 | 0x100; ccb->lapic->timer_initial_count = 100000; // roughly 1 KHz ccb->lapic->lvt_timer = 32 | 0x20000; ccb->lapic->timer_divide_configuration = 3; // 16 // initialize interrupt routes // pinion (pagefault, zombie, etc.) interrupt vector page pinion_vector_page_vtable.on_reset = pinion_on_reset; interrupt_add_vector_page(0x0080, &pinion_vector_page_vtable); // IRQ interrupt vector page irq_vector_page_vtable.on_fire = irq_on_fire; irq_vector_page_vtable.on_reset = irq_on_reset; interrupt_add_vector_page(0x0100, &irq_vector_page_vtable); // initialize ACPI (for IRQ routing info) init_acpi(); // allocate initial thread TCB and add to scheduler scheduler_add_tcb(tcb_new()); // schedule first thread scheduler_schedule(); // load kernel image load_kernel(object_list); }