static void handleRecv(bool_t isBlocking) { word_t epCPtr; lookupCap_ret_t lu_ret; epCPtr = getRegister(ksCurThread, capRegister); lu_ret = lookupCap(ksCurThread, epCPtr); #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.cap_type = cap_get_capType(lu_ret.cap); #endif if (unlikely(lu_ret.status != EXCEPTION_NONE)) { /* current_lookup_fault has been set by lookupCap */ current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); return; } switch (cap_get_capType(lu_ret.cap)) { case cap_endpoint_cap: if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } deleteCallerCap(ksCurThread); receiveIPC(ksCurThread, lu_ret.cap, isBlocking); break; case cap_notification_cap: { notification_t *ntfnPtr; tcb_t *boundTCB; ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(lu_ret.cap)); boundTCB = (tcb_t*)notification_ptr_get_ntfnBoundTCB(ntfnPtr); if (unlikely(!cap_notification_cap_get_capNtfnCanReceive(lu_ret.cap) || (boundTCB && boundTCB != ksCurThread))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveSignal(ksCurThread, lu_ret.cap, isBlocking); break; } default: current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } }
static void handleWait(bool_t isBlocking) { word_t epCPtr; lookupCap_ret_t lu_ret; epCPtr = getRegister(ksCurThread, capRegister); lu_ret = lookupCap(ksCurThread, epCPtr); if (unlikely(lu_ret.status != EXCEPTION_NONE)) { /* current_lookup_fault has been set by lookupCap */ current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); return; } switch (cap_get_capType(lu_ret.cap)) { case cap_endpoint_cap: if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap) || !isBlocking)) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } deleteCallerCap(ksCurThread); receiveIPC(ksCurThread, lu_ret.cap); break; case cap_async_endpoint_cap: { async_endpoint_t *aepptr; tcb_t *boundTCB; aepptr = AEP_PTR(cap_async_endpoint_cap_get_capAEPPtr(lu_ret.cap)); boundTCB = (tcb_t*)async_endpoint_ptr_get_aepBoundTCB(aepptr); if (unlikely(!cap_async_endpoint_cap_get_capAEPCanReceive(lu_ret.cap) || (boundTCB && boundTCB != ksCurThread))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveAsyncIPC(ksCurThread, lu_ret.cap, isBlocking); break; } default: current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } }
static void handleWait(void) { word_t epCPtr; lookupCap_ret_t lu_ret; deleteCallerCap(ksCurThread); epCPtr = getRegister(ksCurThread, capRegister); lu_ret = lookupCap(ksCurThread, epCPtr); if (unlikely(lu_ret.status != EXCEPTION_NONE)) { /* current_lookup_fault has been set by lookupCap */ current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); return; } switch (cap_get_capType(lu_ret.cap)) { case cap_endpoint_cap: if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveIPC(ksCurThread, lu_ret.cap); break; case cap_async_endpoint_cap: if (unlikely(!cap_async_endpoint_cap_get_capAEPCanReceive(lu_ret.cap))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveAsyncIPC(ksCurThread, lu_ret.cap); break; default: current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } }
exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType) { exception_t status; #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_VMFault; ksKernelEntry.word = vm_faultType; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif status = handleVMFault(ksCurThread, vm_faultType); if (status != EXCEPTION_NONE) { handleFault(ksCurThread); } schedule(); activateThread(); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif return EXCEPTION_NONE; }
exception_t handleUserLevelDebugException(int int_vector) { tcb_t *ct; getAndResetActiveBreakpoint_t active_bp; testAndResetSingleStepException_t single_step_info; #if defined(CONFIG_DEBUG_BUILD) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_UserLevelFault; ksKernelEntry.word = int_vector; #else (void)int_vector; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif ct = NODE_STATE(ksCurThread); /* Software break request (INT3) is detected by the vector number */ if (int_vector == int_software_break_request) { current_fault = seL4_Fault_DebugException_new(getRestartPC(NODE_STATE(ksCurThread)), 0, seL4_SoftwareBreakRequest); } else { /* Hardware breakpoint trigger is detected using DR6 */ active_bp = getAndResetActiveBreakpoint(ct); if (active_bp.bp_num >= 0) { current_fault = seL4_Fault_DebugException_new(active_bp.vaddr, active_bp.bp_num, active_bp.reason); } else { single_step_info = testAndResetSingleStepException(ct); if (single_step_info.ret == true) { /* If the caller asked us to skip over N instructions before * generating the next single-step breakpoint, we shouldn't * bother to construct a fault message until we've skipped N * instructions. */ if (singleStepFaultCounterReady(ct) == false) { return EXCEPTION_NONE; } current_fault = seL4_Fault_DebugException_new(single_step_info.instr_vaddr, 0, seL4_SingleStep); } else { return EXCEPTION_SYSCALL_ERROR; } } } handleFault(NODE_STATE(ksCurThread)); schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleUserLevelFault(word_t w_a, word_t w_b) { current_fault = fault_user_exception_new(w_a, w_b); handleFault(ksCurThread); schedule(); activateThread(); return EXCEPTION_NONE; }
bool ARMDebug::dpRead(unsigned addr, bool APnDP, uint32_t &data) { unsigned retries = 10; unsigned ack; do { wireWrite(packHeader(addr, APnDP, true), 8); wireReadTurnaround(); ack = wireRead(3); switch (ack) { case 1: // Success data = wireRead(32); if (wireRead(1) != evenParity(data)) { wireWriteTurnaround(); wireWriteIdle(); log(LOG_ERROR, "PARITY ERROR during read (addr=%x APnDP=%d data=%08x)", addr, APnDP, data); return false; } wireWriteTurnaround(); wireWriteIdle(); log(LOG_TRACE_DP, "DP Read [%x:%x] %08x", addr, APnDP, data); return true; case 2: // WAIT wireWriteTurnaround(); wireWriteIdle(); log(LOG_TRACE_DP, "DP WAIT response, %d retries left", retries); retries--; break; case 4: // FAULT wireWriteTurnaround(); wireWriteIdle(); log(LOG_ERROR, "FAULT response during read (addr=%x APnDP=%d)", addr, APnDP); if (!handleFault()) log(LOG_ERROR, "Error during fault recovery!"); return false; default: wireWriteTurnaround(); wireWriteIdle(); log(LOG_ERROR, "PROTOCOL ERROR response during read (ack=%x addr=%x APnDP=%d)", ack, addr, APnDP); return false; } } while (retries--); log(LOG_ERROR, "WAIT timeout during read (addr=%x APnDP=%d)", addr, APnDP); return false; }
exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType) { exception_t status; status = handleVMFault(ksCurThread, vm_faultType); if (status != EXCEPTION_NONE) { handleFault(ksCurThread); } schedule(); activateThread(); return EXCEPTION_NONE; }
static int scanInFdus(Sdr sdr, time_t currentTime) { CfdpDB *cfdpConstants; Object entityElt; OBJ_POINTER(Entity, entity); Object elt; Object nextElt; Object fduObj; OBJ_POINTER(InFdu, fdu); CfdpHandler handler; cfdpConstants = getCfdpConstants(); sdr_begin_xn(sdr); for (entityElt = sdr_list_first(sdr, cfdpConstants->entities); entityElt; entityElt = sdr_list_next(sdr, entityElt)) { GET_OBJ_POINTER(sdr, Entity, entity, sdr_list_data(sdr, entityElt)); for (elt = sdr_list_first(sdr, entity->inboundFdus); elt; elt = nextElt) { nextElt = sdr_list_next(sdr, elt); fduObj = sdr_list_data(sdr, elt); GET_OBJ_POINTER(sdr, InFdu, fdu, fduObj); if (fdu->eofReceived && fdu->checkTime < currentTime) { sdr_stage(sdr, NULL, fduObj, 0); fdu->checkTimeouts++; fdu->checkTime += cfdpConstants->checkTimerPeriod; sdr_write(sdr, fduObj, (char *) fdu, sizeof(InFdu)); } if (fdu->checkTimeouts > cfdpConstants->checkTimeoutLimit) { if (handleFault(&(fdu->transactionId), CfdpCheckLimitReached, &handler) < 0) { sdr_cancel_xn(sdr); putErrmsg("Can't handle check limit \ reached.", NULL); return -1; } } } }
exception_t handleUserLevelFault(word_t w_a, word_t w_b) { #ifdef DEBUG ksKernelEntry.path = Debug_UserLevelFault; ksKernelEntry.number = w_a; ksKernelEntry.code = w_b; #endif /* DEBUG */ current_fault = fault_user_exception_new(w_a, w_b); handleFault(ksCurThread); schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleVMFaultEvent(vm_fault_type_t vm_faultType) { exception_t status; #ifdef DEBUG ksKernelEntry.path = Debug_VMFault; ksKernelEntry.fault_type = vm_faultType; #endif /* DEBUG */ status = handleVMFault(ksCurThread, vm_faultType); if (status != EXCEPTION_NONE) { handleFault(ksCurThread); } schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t handleUserLevelFault(word_t w_a, word_t w_b) { #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_UserLevelFault; ksKernelEntry.word = w_a; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif current_fault = fault_user_exception_new(w_a, w_b); handleFault(ksCurThread); schedule(); activateThread(); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif return EXCEPTION_NONE; }
/*virtual*/ SysStatus ProcessShared<ALLOC>::userHandleFault(uval vaddr, VPNum vp) { // LOCKING: none needed, see handleFault return handleFault(AccessMode::readFault, vaddr, NULL, vp); }
exception_t handleUnknownSyscall(word_t w) { #ifdef DEBUG if (w == SysDebugPutChar) { kernel_putchar(getRegister(ksCurThread, capRegister)); return EXCEPTION_NONE; } if (w == SysDebugHalt) { printf("Debug halt syscall from user thread 0x%x\n", (unsigned int)ksCurThread); halt(); } if (w == SysDebugSnapshot) { printf("Debug snapshot syscall from user thread 0x%x\n", (unsigned int)ksCurThread); capDL(); return EXCEPTION_NONE; } if (w == SysDebugCapIdentify) { word_t cptr = getRegister(ksCurThread, capRegister); lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(ksCurThread, cptr); uint32_t cap_type = cap_get_capType(lu_ret.cap); setRegister(ksCurThread, capRegister, cap_type); return EXCEPTION_NONE; } if (w == SysDebugNameThread) { /* This is a syscall meant to aid debugging, so if anything goes wrong * then assume the system is completely misconfigured and halt */ const char *name; word_t cptr = getRegister(ksCurThread, capRegister); lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(ksCurThread, cptr); /* ensure we got a TCB cap */ uint32_t cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysDebugNameThread: cap is not a TCB, halting"); halt(); } /* Add 1 to the IPC buffer to skip the message info word */ name = (const char*)(lookupIPCBuffer(true, ksCurThread) + 1); if (!name) { userError("SysDebugNameThread: Failed to lookup IPC buffer, halting"); halt(); } /* ensure the name isn't too long */ if (name[strnlen(name, seL4_MsgMaxLength * sizeof(word_t))] != '\0') { userError("SysDebugNameThread: Name too long, halting"); halt(); } setThreadName(TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)), name); return EXCEPTION_NONE; } #endif #ifdef DANGEROUS_CODE_INJECTION if (w == SysDebugRun) { ((void (*) (void *))getRegister(ksCurThread, capRegister))((void*)getRegister(ksCurThread, msgInfoRegister)); return EXCEPTION_NONE; } #endif #ifdef CONFIG_BENCHMARK if (w == SysBenchmarkResetLog) { ksLogIndex = 0; return EXCEPTION_NONE; } else if (w == SysBenchmarkDumpLog) { int i; word_t *buffer = lookupIPCBuffer(true, ksCurThread); word_t start = getRegister(ksCurThread, capRegister); word_t size = getRegister(ksCurThread, msgInfoRegister); word_t logSize = ksLogIndex > MAX_LOG_SIZE ? MAX_LOG_SIZE : ksLogIndex; if (buffer == NULL) { userError("Cannot dump benchmarking log to a thread without an ipc buffer\n"); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (start > logSize) { userError("Start > logsize\n"); current_syscall_error.type = seL4_InvalidArgument; return EXCEPTION_SYSCALL_ERROR; } /* Assume we have access to an ipc buffer 1024 words big. * Do no write to the first 4 bytes as these are overwritten */ if (size > MAX_IPC_BUFFER_STORAGE) { size = MAX_IPC_BUFFER_STORAGE; } /* trim to size */ if ((start + size) > logSize) { size = logSize - start; } /* write to ipc buffer */ for (i = 0; i < size; i++) { buffer[i + 1] = ksLog[i + start]; } /* Return the amount written */ setRegister(ksCurThread, capRegister, size); return EXCEPTION_NONE; } else if (w == SysBenchmarkLogSize) { /* Return the amount of log items we tried to log (may exceed max size) */ setRegister(ksCurThread, capRegister, ksLogIndex); return EXCEPTION_NONE; } #endif /* CONFIG_BENCHMARK */ current_fault = fault_unknown_syscall_new(w); handleFault(ksCurThread); schedule(); activateThread(); return EXCEPTION_NONE; }
static exception_t handleInvocation(bool_t isCall, bool_t isBlocking) { message_info_t info; cptr_t cptr; lookupCapAndSlot_ret_t lu_ret; word_t *buffer; exception_t status; word_t length; tcb_t *thread; thread = ksCurThread; info = messageInfoFromWord(getRegister(thread, msgInfoRegister)); cptr = getRegister(thread, capRegister); /* faulting section */ lu_ret = lookupCapAndSlot(thread, cptr); if (unlikely(lu_ret.status != EXCEPTION_NONE)) { userError("Invocation of invalid cap #%d.", (int)cptr); current_fault = fault_cap_fault_new(cptr, false); if (isBlocking) { handleFault(thread); } return EXCEPTION_NONE; } buffer = lookupIPCBuffer(false, thread); status = lookupExtraCaps(thread, buffer, info); if (unlikely(status != EXCEPTION_NONE)) { userError("Lookup of extra caps failed."); if (isBlocking) { handleFault(thread); } return EXCEPTION_NONE; } /* Syscall error/Preemptible section */ length = message_info_get_msgLength(info); if (unlikely(length > n_msgRegisters && !buffer)) { length = n_msgRegisters; } status = decodeInvocation(message_info_get_msgLabel(info), length, cptr, lu_ret.slot, lu_ret.cap, current_extra_caps, isBlocking, isCall, buffer); if (unlikely(status == EXCEPTION_PREEMPTED)) { return status; } if (unlikely(status == EXCEPTION_SYSCALL_ERROR)) { if (isCall) { replyFromKernel_error(thread); } return EXCEPTION_NONE; } if (unlikely( thread_state_get_tsType(thread->tcbState) == ThreadState_Restart)) { if (isCall) { replyFromKernel_success_empty(thread); } setThreadState(thread, ThreadState_Running); } return EXCEPTION_NONE; }
static exception_t handleInvocation(bool_t isCall, bool_t isBlocking) { seL4_MessageInfo_t info; cptr_t cptr; lookupCapAndSlot_ret_t lu_ret; word_t *buffer; exception_t status; word_t length; tcb_t *thread; thread = ksCurThread; info = messageInfoFromWord(getRegister(thread, msgInfoRegister)); cptr = getRegister(thread, capRegister); /* faulting section */ lu_ret = lookupCapAndSlot(thread, cptr); #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.cap_type = cap_get_capType(lu_ret.cap); ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = false; #endif if (unlikely(lu_ret.status != EXCEPTION_NONE)) { userError("Invocation of invalid cap #%lu.", cptr); current_fault = fault_cap_fault_new(cptr, false); if (isBlocking) { handleFault(thread); } return EXCEPTION_NONE; } buffer = lookupIPCBuffer(false, thread); status = lookupExtraCaps(thread, buffer, info); if (unlikely(status != EXCEPTION_NONE)) { userError("Lookup of extra caps failed."); if (isBlocking) { handleFault(thread); } return EXCEPTION_NONE; } /* Syscall error/Preemptible section */ length = seL4_MessageInfo_get_length(info); if (unlikely(length > n_msgRegisters && !buffer)) { length = n_msgRegisters; } status = decodeInvocation(seL4_MessageInfo_get_label(info), length, cptr, lu_ret.slot, lu_ret.cap, current_extra_caps, isBlocking, isCall, buffer); if (unlikely(status == EXCEPTION_PREEMPTED)) { return status; } if (unlikely(status == EXCEPTION_SYSCALL_ERROR)) { if (isCall) { replyFromKernel_error(thread); } return EXCEPTION_NONE; } if (unlikely( thread_state_get_tsType(thread->tcbState) == ThreadState_Restart)) { if (isCall) { replyFromKernel_success_empty(thread); } setThreadState(thread, ThreadState_Running); } return EXCEPTION_NONE; }