exception_t handleInterruptEntry(void) { irq_t irq; irq = getActiveIRQ(); #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_Interrupt; ksKernelEntry.word = irq; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif if (irq != irqInvalid) { handleInterrupt(irq); } else { #ifdef CONFIG_IRQ_REPORTING printf("Spurious interrupt\n"); #endif handleSpuriousIRQ(); } schedule(); activateThread(); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif return EXCEPTION_NONE; }
exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType) { exception_t status; #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_VMFault; ksKernelEntry.word = vm_faultType; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif status = handleVMFault(ksCurThread, vm_faultType); if (status != EXCEPTION_NONE) { handleFault(ksCurThread); } schedule(); activateThread(); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif return EXCEPTION_NONE; }
exception_t handleUserLevelDebugException(int int_vector) { tcb_t *ct; getAndResetActiveBreakpoint_t active_bp; testAndResetSingleStepException_t single_step_info; #if defined(CONFIG_DEBUG_BUILD) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_UserLevelFault; ksKernelEntry.word = int_vector; #else (void)int_vector; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif ct = NODE_STATE(ksCurThread); /* Software break request (INT3) is detected by the vector number */ if (int_vector == int_software_break_request) { current_fault = seL4_Fault_DebugException_new(getRestartPC(NODE_STATE(ksCurThread)), 0, seL4_SoftwareBreakRequest); } else { /* Hardware breakpoint trigger is detected using DR6 */ active_bp = getAndResetActiveBreakpoint(ct); if (active_bp.bp_num >= 0) { current_fault = seL4_Fault_DebugException_new(active_bp.vaddr, active_bp.bp_num, active_bp.reason); } else { single_step_info = testAndResetSingleStepException(ct); if (single_step_info.ret == true) { /* If the caller asked us to skip over N instructions before * generating the next single-step breakpoint, we shouldn't * bother to construct a fault message until we've skipped N * instructions. */ if (singleStepFaultCounterReady(ct) == false) { return EXCEPTION_NONE; } current_fault = seL4_Fault_DebugException_new(single_step_info.instr_vaddr, 0, seL4_SingleStep); } else { return EXCEPTION_SYSCALL_ERROR; } } } handleFault(NODE_STATE(ksCurThread)); schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleUserLevelFault(word_t w_a, word_t w_b) { current_fault = fault_user_exception_new(w_a, w_b); handleFault(ksCurThread); schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType) { exception_t status; status = handleVMFault(ksCurThread, vm_faultType); if (status != EXCEPTION_NONE) { handleFault(ksCurThread); } schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleUserLevelFault(word_t w_a, word_t w_b) { #ifdef DEBUG ksKernelEntry.path = Debug_UserLevelFault; ksKernelEntry.number = w_a; ksKernelEntry.code = w_b; #endif /* DEBUG */ current_fault = fault_user_exception_new(w_a, w_b); handleFault(ksCurThread); schedule(); activateThread(); return EXCEPTION_NONE; }
BOOT_CODE VISIBLE void boot_sys( unsigned long multiboot_magic, multiboot_info_t* mbi) { bool_t result; result = try_boot_sys(multiboot_magic, mbi); if (!result) { fail("boot_sys failed for some reason :(\n"); } ARCH_NODE_STATE(x86KScurInterrupt) = int_invalid; ARCH_NODE_STATE(x86KSPendingInterrupt) = int_invalid; schedule(); activateThread(); }
exception_t handleInterruptEntry(void) { irq_t irq; irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } else { printf("Spurious interrupt\n"); handleSpuriousIRQ(); } schedule(); activateThread(); return EXCEPTION_NONE; }
BOOT_CODE VISIBLE void init_kernel( paddr_t ui_p_reg_start, paddr_t ui_p_reg_end, sword_t pv_offset, vptr_t v_entry, paddr_t dtb_addr_p, uint32_t dtb_size ) { bool_t result; paddr_t dtb_end_p = 0; if (dtb_addr_p) { dtb_end_p = dtb_addr_p + dtb_size; } #ifdef ENABLE_SMP_SUPPORT /* we assume there exists a cpu with id 0 and will use it for bootstrapping */ if (getCurrentCPUIndex() == 0) { result = try_init_kernel(ui_p_reg_start, ui_p_reg_end, pv_offset, v_entry, dtb_addr_p, dtb_end_p); } else { result = try_init_kernel_secondary_core(); } #else result = try_init_kernel(ui_p_reg_start, ui_p_reg_end, pv_offset, v_entry, dtb_addr_p, dtb_end_p); #endif /* ENABLE_SMP_SUPPORT */ if (!result) { fail("Kernel init failed for some reason :("); } schedule(); activateThread(); }
exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType) { exception_t status; #ifdef DEBUG ksKernelEntry.path = Debug_VMFault; ksKernelEntry.fault_type = vm_faultType; #endif /* DEBUG */ status = handleVMFault(ksCurThread, vm_faultType); if (status != EXCEPTION_NONE) { handleFault(ksCurThread); } schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleInterruptEntry(void) { irq_t irq; //printf("==in handleInterruptEntry function===\n"); irq = getActiveIRQ(); if (irq != irqInvalid) { //printf("will call hanleInterrupt function\n"); handleInterrupt(irq); } else { //printf("Spurious interrupt\n"); handleSpuriousIRQ(); } //printf("will schedule\n"); schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleUserLevelFault(word_t w_a, word_t w_b) { #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_UserLevelFault; ksKernelEntry.word = w_a; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif current_fault = fault_user_exception_new(w_a, w_b); handleFault(ksCurThread); schedule(); activateThread(); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif return EXCEPTION_NONE; }
exception_t handleInterruptEntry(void) { irq_t irq; irq = getActiveIRQ(); #ifdef DEBUG ksKernelEntry.path = Debug_Interrupt; ksKernelEntry.irq = irq; #endif /* DEBUG */ if (irq != irqInvalid) { handleInterrupt(irq); } else { #ifdef CONFIG_IRQ_REPORTING printf("Spurious interrupt\n"); #endif handleSpuriousIRQ(); } schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleUnknownSyscall(word_t w) { #ifdef DEBUG if (w == SysDebugPutChar) { kernel_putchar(getRegister(ksCurThread, capRegister)); return EXCEPTION_NONE; } if (w == SysDebugHalt) { printf("Debug halt syscall from user thread 0x%x\n", (unsigned int)ksCurThread); halt(); } if (w == SysDebugSnapshot) { printf("Debug snapshot syscall from user thread 0x%x\n", (unsigned int)ksCurThread); capDL(); return EXCEPTION_NONE; } if (w == SysDebugCapIdentify) { word_t cptr = getRegister(ksCurThread, capRegister); lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(ksCurThread, cptr); uint32_t cap_type = cap_get_capType(lu_ret.cap); setRegister(ksCurThread, capRegister, cap_type); return EXCEPTION_NONE; } if (w == SysDebugNameThread) { /* This is a syscall meant to aid debugging, so if anything goes wrong * then assume the system is completely misconfigured and halt */ const char *name; word_t cptr = getRegister(ksCurThread, capRegister); lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(ksCurThread, cptr); /* ensure we got a TCB cap */ uint32_t cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysDebugNameThread: cap is not a TCB, halting"); halt(); } /* Add 1 to the IPC buffer to skip the message info word */ name = (const char*)(lookupIPCBuffer(true, ksCurThread) + 1); if (!name) { userError("SysDebugNameThread: Failed to lookup IPC buffer, halting"); halt(); } /* ensure the name isn't too long */ if (name[strnlen(name, seL4_MsgMaxLength * sizeof(word_t))] != '\0') { userError("SysDebugNameThread: Name too long, halting"); halt(); } setThreadName(TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)), name); return EXCEPTION_NONE; } #endif #ifdef DANGEROUS_CODE_INJECTION if (w == SysDebugRun) { ((void (*) (void *))getRegister(ksCurThread, capRegister))((void*)getRegister(ksCurThread, msgInfoRegister)); return EXCEPTION_NONE; } #endif #ifdef CONFIG_BENCHMARK if (w == SysBenchmarkResetLog) { ksLogIndex = 0; return EXCEPTION_NONE; } else if (w == SysBenchmarkDumpLog) { int i; word_t *buffer = lookupIPCBuffer(true, ksCurThread); word_t start = getRegister(ksCurThread, capRegister); word_t size = getRegister(ksCurThread, msgInfoRegister); word_t logSize = ksLogIndex > MAX_LOG_SIZE ? MAX_LOG_SIZE : ksLogIndex; if (buffer == NULL) { userError("Cannot dump benchmarking log to a thread without an ipc buffer\n"); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (start > logSize) { userError("Start > logsize\n"); current_syscall_error.type = seL4_InvalidArgument; return EXCEPTION_SYSCALL_ERROR; } /* Assume we have access to an ipc buffer 1024 words big. * Do no write to the first 4 bytes as these are overwritten */ if (size > MAX_IPC_BUFFER_STORAGE) { size = MAX_IPC_BUFFER_STORAGE; } /* trim to size */ if ((start + size) > logSize) { size = logSize - start; } /* write to ipc buffer */ for (i = 0; i < size; i++) { buffer[i + 1] = ksLog[i + start]; } /* Return the amount written */ setRegister(ksCurThread, capRegister, size); return EXCEPTION_NONE; } else if (w == SysBenchmarkLogSize) { /* Return the amount of log items we tried to log (may exceed max size) */ setRegister(ksCurThread, capRegister, ksLogIndex); return EXCEPTION_NONE; } #endif /* CONFIG_BENCHMARK */ current_fault = fault_unknown_syscall_new(w); handleFault(ksCurThread); schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleSyscall(syscall_t syscall) { exception_t ret; irq_t irq; switch (syscall) { case SysSend: ret = handleInvocation(false, true); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysNBSend: ret = handleInvocation(false, false); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysCall: ret = handleInvocation(true, true); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysWait: handleWait(); break; case SysReply: handleReply(); break; case SysReplyWait: handleReply(); handleWait(); break; case SysYield: handleYield(); break; default: fail("Invalid syscall"); } schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleSyscall(syscall_t syscall) { exception_t ret; irq_t irq; //printf("\n=====In handleSyscall funtion======\n"); //printf("syscall num is %d\n", syscall); //printf("caller is %d of domain %d\n", ksCurThread->tcbPriority, ksCurThread->tcbDomain); switch (syscall) { case SysSend: ret = handleInvocation(false, true); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysNBSend: ret = handleInvocation(false, false); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysCall: ret = handleInvocation(true, true); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysWait: //printf("will call handleWait\n"); handleWait(true); break; case SysReply: handleReply(); break; case SysReplyWait: handleReply(); handleWait(true); break; case SysPoll: handleWait(false); break; case SysYield: handleYield(); break; default: fail("Invalid syscall"); } schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleSyscall(syscall_t syscall) { exception_t ret; irq_t irq; #ifdef DEBUG ksKernelEntry.path = Debug_Syscall; ksKernelEntry.syscall_no = syscall; #endif /* DEBUG */ switch (syscall) { case SysSend: ret = handleInvocation(false, true); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysNBSend: ret = handleInvocation(false, false); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysCall: ret = handleInvocation(true, true); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysRecv: handleRecv(true); break; case SysReply: handleReply(); break; case SysReplyRecv: handleReply(); handleRecv(true); break; case SysNBRecv: handleRecv(false); break; case SysYield: handleYield(); break; default: fail("Invalid syscall"); } schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleSyscall(syscall_t syscall) { exception_t ret; irq_t irq; #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = syscall; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif /* CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES */ switch (syscall) { case SysSend: ret = handleInvocation(false, true); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysNBSend: ret = handleInvocation(false, false); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysCall: ret = handleInvocation(true, true); if (unlikely(ret != EXCEPTION_NONE)) { irq = getActiveIRQ(); if (irq != irqInvalid) { handleInterrupt(irq); } } break; case SysRecv: handleRecv(true); break; case SysReply: handleReply(); break; case SysReplyRecv: handleReply(); handleRecv(true); break; case SysNBRecv: handleRecv(false); break; case SysYield: handleYield(); break; default: fail("Invalid syscall"); } schedule(); activateThread(); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif /* CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES */ return EXCEPTION_NONE; }