/* The following functions sit in the syscall error monad, but include the * exception cases for the preemptible bottom end, as they call the invoke * functions directly. This is a significant deviation from the Haskell * spec. */ exception_t decodeTCBInvocation(word_t label, unsigned int length, cap_t cap, cte_t* slot, extra_caps_t extraCaps, bool_t call, word_t *buffer) { switch (label) { case TCBReadRegisters: /* Second level of decoding */ return decodeReadRegisters(cap, length, call, buffer); case TCBWriteRegisters: return decodeWriteRegisters(cap, length, buffer); case TCBCopyRegisters: return decodeCopyRegisters(cap, length, extraCaps, buffer); case TCBSuspend: /* Jump straight to the invoke */ setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_Suspend( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap))); case TCBResume: setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_Resume( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap))); case TCBConfigure: return decodeTCBConfigure(cap, length, slot, extraCaps, buffer); case TCBSetPriority: return decodeSetPriority(cap, length, buffer); case TCBSetIPCBuffer: return decodeSetIPCBuffer(cap, length, slot, extraCaps, buffer); case TCBSetSpace: return decodeSetSpace(cap, length, slot, extraCaps, buffer); case TCBBindAEP: return decodeBindAEP(cap, extraCaps); case TCBUnbindAEP: return decodeUnbindAEP(cap); /* This is temporary until arch specific TCB operations are implemented */ #ifdef CONFIG_VTX case TCBSetEPTRoot: return decodeSetEPTRoot(cap, extraCaps); #endif default: /* Haskell: "throw IllegalOperation" */ userError("TCB: Illegal operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } }
exception_t decodeSetEPTRoot(cap_t cap, extra_caps_t extraCaps) { tcb_t *tcb; cte_t *rootSlot; exception_t e; if (extraCaps.excaprefs[0] == NULL) { userError("TCB SetEPTRoot: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } if (cap_get_capType(extraCaps.excaprefs[0]->cap) != cap_ept_page_directory_pointer_table_cap) { userError("TCB SetEPTRoot: EPT PDPT is invalid."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)); rootSlot = TCB_PTR_CTE_PTR(tcb, tcbArchEPTRoot); e = cteDelete(rootSlot, true); if (e != EXCEPTION_NONE) { return e; } cteInsert(extraCaps.excaprefs[0]->cap, extraCaps.excaprefs[0], rootSlot); setThreadState(ksCurThread, ThreadState_Restart); return EXCEPTION_NONE; }
static void handleReply(void) { cte_t *callerSlot; cap_t callerCap; callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; switch (cap_get_capType(callerCap)) { case cap_reply_cap: { tcb_t *caller; if (cap_reply_cap_get_capReplyMaster(callerCap)) { break; } caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Haskell error: * "handleReply: caller must not be the current thread" */ assert(caller != ksCurThread); doReplyTransfer(ksCurThread, caller, callerSlot); return; } case cap_null_cap: userError("Attempted reply operation when no reply cap present."); return; default: break; } fail("handleReply: invalid caller cap"); }
exception_t decodeReadRegisters(cap_t cap, unsigned int length, bool_t call, word_t *buffer) { word_t transferArch, flags, n; if (length < 2) { userError("TCB ReadRegisters: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } flags = getSyscallArg(0, buffer); n = getSyscallArg(1, buffer); if (n < 1 || n > n_frameRegisters + n_gpRegisters) { userError("TCB ReadRegisters: Attempted to read an invalid number of registers (%d).", (int)n); current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 1; current_syscall_error.rangeErrorMax = n_frameRegisters + n_gpRegisters; return EXCEPTION_SYSCALL_ERROR; } transferArch = Arch_decodeTransfer(flags >> 8); setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ReadRegisters( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), flags & BIT(ReadRegisters_suspend), n, transferArch, call); }
void epCancelAll(endpoint_t *epptr) { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: break; default: { tcb_t *thread = TCB_PTR(endpoint_ptr_get_epQueue_head(epptr)); /* Make endpoint idle */ endpoint_ptr_set_state(epptr, EPState_Idle); endpoint_ptr_set_epQueue_head(epptr, 0); endpoint_ptr_set_epQueue_tail(epptr, 0); /* Set all blocked threads to restart */ for (; thread; thread = thread->tcbEPNext) { setThreadState (thread, ThreadState_Restart); tcbSchedEnqueue(thread); } rescheduleRequired(); break; } } }
exception_t decodeWriteRegisters(cap_t cap, unsigned int length, word_t *buffer) { word_t flags, w; word_t transferArch; tcb_t* thread; if (length < 2) { userError("TCB WriteRegisters: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } flags = getSyscallArg(0, buffer); w = getSyscallArg(1, buffer); if (length - 2 < w) { userError("TCB WriteRegisters: Message too short for requested write size (%d/%d).", (int)(length - 2), (int)w); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } transferArch = Arch_decodeTransfer(flags >> 8); thread = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)); setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_WriteRegisters(thread, flags & BIT(WriteRegisters_resume), w, transferArch, buffer); }
void benchmark_track_utilisation_dump(void) { uint64_t *buffer = ((uint64_t *) & (((seL4_IPCBuffer *)lookupIPCBuffer(true, NODE_STATE(ksCurThread)))->msg[0])); tcb_t *tcb = NULL; word_t tcb_cptr = getRegister(NODE_STATE(ksCurThread), capRegister); lookupCap_ret_t lu_ret; word_t cap_type; lu_ret = lookupCap(NODE_STATE(ksCurThread), tcb_cptr); /* ensure we got a TCB cap */ cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysBenchmarkFinalizeLog: cap is not a TCB, halting"); return; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)); buffer[BENCHMARK_TCB_UTILISATION] = tcb->benchmark.utilisation; /* Requested thread utilisation */ buffer[BENCHMARK_IDLE_LOCALCPU_UTILISATION] = NODE_STATE( ksIdleThread)->benchmark.utilisation; /* Idle thread utilisation of current CPU */ #ifdef ENABLE_SMP_SUPPORT buffer[BENCHMARK_IDLE_TCBCPU_UTILISATION] = NODE_STATE_ON_CORE(ksIdleThread, tcb->tcbAffinity)->benchmark.utilisation; /* Idle thread utilisation of CPU the TCB is running on */ #else buffer[BENCHMARK_IDLE_TCBCPU_UTILISATION] = buffer[BENCHMARK_IDLE_LOCALCPU_UTILISATION]; #endif #ifdef CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT buffer[BENCHMARK_TOTAL_UTILISATION] = (ccnt_num_overflows * 0xFFFFFFFFU) + benchmark_end_time - benchmark_start_time; #else buffer[BENCHMARK_TOTAL_UTILISATION] = benchmark_end_time - benchmark_start_time; /* Overall time */ #endif /* CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT */ }
exception_t decodeSetPriority(cap_t cap, unsigned int length, word_t *buffer) { prio_t newPrio; if (length < 1) { userError("TCB SetPriority: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } newPrio = getSyscallArg(0, buffer); /* assuming here seL4_MaxPrio is of form 2^n - 1 */ newPrio = newPrio & MASK(8); if (newPrio > ksCurThread->tcbPriority) { userError("TCB SetPriority: Requested priority %d too high (max %d).", (int)newPrio, (int)ksCurThread->tcbPriority); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL, 0, newPrio, cap_null_cap_new(), NULL, cap_null_cap_new(), NULL, 0, cap_null_cap_new(), NULL, thread_control_update_priority); }
exception_t decodeBindAEP(cap_t cap, extra_caps_t extraCaps) { async_endpoint_t *aepptr; tcb_t *tcb; if (extraCaps.excaprefs[0] == NULL) { userError("TCB BindAEP: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } if (cap_get_capType(extraCaps.excaprefs[0]->cap) != cap_async_endpoint_cap) { userError("TCB BindAEP: Async endpoint is invalid."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)); if (tcb->boundAsyncEndpoint) { userError("TCB BindAEP: TCB already has AEP."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } aepptr = AEP_PTR(cap_async_endpoint_cap_get_capAEPPtr(extraCaps.excaprefs[0]->cap)); if ((tcb_t*)async_endpoint_ptr_get_aepQueue_head(aepptr)) { userError("TCB BindAEP: AEP cannot be bound."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_AEPControl(tcb, aepptr); }
exception_t decodeSetTCB(cap_t cap, unsigned int length, word_t* buffer, extra_caps_t extraCaps) { cap_t tcbCap; cte_t *tcbSlot; deriveCap_ret_t dc_ret; if ( extraCaps.excaprefs[0] == NULL) { userError("VCPU SetTCB: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } tcbSlot = extraCaps.excaprefs[0]; tcbCap = extraCaps.excaprefs[0]->cap; dc_ret = deriveCap(tcbSlot, tcbCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } tcbCap = dc_ret.cap; if (cap_get_capType(tcbCap) != cap_thread_cap) { userError("TCB cap is not a TCB cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeSetTCB(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), TCB_PTR(cap_thread_cap_get_capTCBPtr(tcbCap))); }
exception_t decodeCopyRegisters(cap_t cap, unsigned int length, extra_caps_t extraCaps, word_t *buffer) { word_t transferArch; tcb_t *srcTCB; cap_t source_cap; word_t flags; if (length < 1 || extraCaps.excaprefs[0] == NULL) { userError("TCB CopyRegisters: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } flags = getSyscallArg(0, buffer); transferArch = Arch_decodeTransfer(flags >> 8); source_cap = extraCaps.excaprefs[0]->cap; if (cap_get_capType(source_cap) == cap_thread_cap) { srcTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(source_cap)); } else { userError("TCB CopyRegisters: Invalid source TCB."); current_syscall_error.type = seL4_InvalidCapability; current_syscall_error.invalidCapNumber = 1; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_CopyRegisters( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), srcTCB, flags & BIT(CopyRegisters_suspendSource), flags & BIT(CopyRegisters_resumeTarget), flags & BIT(CopyRegisters_transferFrame), flags & BIT(CopyRegisters_transferInteger), transferArch); }
BOOT_CODE bool_t create_idle_thread(void) { pptr_t pptr; pptr = alloc_region(TCB_BLOCK_SIZE_BITS); if (!pptr) { printf("Kernel init failed: Unable to allocate tcb for idle thread\n"); return false; } memzero((void *)pptr, 1 << TCB_BLOCK_SIZE_BITS); ksIdleThread = TCB_PTR(pptr + TCB_OFFSET); configureIdleThread(ksIdleThread); return true; }
exception_t decodeUnbindAEP(cap_t cap) { tcb_t *tcb; tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)); if (!tcb->boundAsyncEndpoint) { userError("TCB UnbindAEP: TCB already has no bound AEP."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_AEPControl(tcb, NULL); }
void cancelAllSignals(notification_t *ntfnPtr) { if (notification_ptr_get_state(ntfnPtr) == NtfnState_Waiting) { tcb_t *thread = TCB_PTR(notification_ptr_get_ntfnQueue_head(ntfnPtr)); notification_ptr_set_state(ntfnPtr, NtfnState_Idle); notification_ptr_set_ntfnQueue_head(ntfnPtr, 0); notification_ptr_set_ntfnQueue_tail(ntfnPtr, 0); /* Set all waiting threads to Restart */ for (; thread; thread = thread->tcbEPNext) { setThreadState(thread, ThreadState_Restart); SCHED_ENQUEUE(thread); } rescheduleRequired(); } }
exception_t decodeSetIPCBuffer(cap_t cap, unsigned int length, cte_t* slot, extra_caps_t extraCaps, word_t *buffer) { cptr_t cptr_bufferPtr; cap_t bufferCap; cte_t *bufferSlot; if (length < 1 || extraCaps.excaprefs[0] == NULL) { userError("TCB SetIPCBuffer: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } cptr_bufferPtr = getSyscallArg(0, buffer); bufferSlot = extraCaps.excaprefs[0]; bufferCap = extraCaps.excaprefs[0]->cap; if (cptr_bufferPtr == 0) { bufferSlot = NULL; } else { exception_t e; deriveCap_ret_t dc_ret; dc_ret = deriveCap(bufferSlot, bufferCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } bufferCap = dc_ret.cap; e = checkValidIPCBuffer(cptr_bufferPtr, bufferCap); if (e != EXCEPTION_NONE) { return e; } } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot, 0, 0, /* used to be prioInvalid, but it doesn't matter */ cap_null_cap_new(), NULL, cap_null_cap_new(), NULL, cptr_bufferPtr, bufferCap, bufferSlot, thread_control_update_ipc_buffer); }
exception_t decodeDomainInvocation(word_t label, unsigned int length, extra_caps_t extraCaps, word_t *buffer) { word_t domain; cap_t tcap; if (unlikely(label != DomainSetSet)) { current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (unlikely(length == 0)) { userError("Domain Configure: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } else { domain = getSyscallArg(0, buffer); if (domain >= CONFIG_NUM_DOMAINS) { userError("Domain Configure: invalid domain (%u >= %u).", domain, CONFIG_NUM_DOMAINS); current_syscall_error.type = seL4_InvalidArgument; current_syscall_error.invalidArgumentNumber = 0; return EXCEPTION_SYSCALL_ERROR; } } if (unlikely(extraCaps.excaprefs[0] == NULL)) { userError("Domain Configure: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } tcap = extraCaps.excaprefs[0]->cap; if (unlikely(cap_get_capType(tcap) != cap_thread_cap)) { userError("Domain Configure: thread cap required."); current_syscall_error.type = seL4_InvalidArgument; current_syscall_error.invalidArgumentNumber = 1; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); setDomain(TCB_PTR(cap_thread_cap_get_capTCBPtr(tcap)), domain); return EXCEPTION_NONE; }
void benchmark_track_reset_utilisation(void) { tcb_t *tcb = NULL; word_t tcb_cptr = getRegister(NODE_STATE(ksCurThread), capRegister); lookupCap_ret_t lu_ret; word_t cap_type; lu_ret = lookupCap(NODE_STATE(ksCurThread), tcb_cptr); /* ensure we got a TCB cap */ cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysBenchmarkResetThreadUtilisation: cap is not a TCB, halting"); return; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)); tcb->benchmark.utilisation = 0; tcb->benchmark.schedule_start_time = 0; }
void setupCallerCap(tcb_t *sender, tcb_t *receiver) { cte_t *replySlot, *callerSlot; cap_t masterCap UNUSED, callerCap UNUSED; setThreadState(sender, ThreadState_BlockedOnReply); replySlot = TCB_PTR_CTE_PTR(sender, tcbReply); callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller); masterCap = replySlot->cap; /* Haskell error: "Sender must have a valid master reply cap" */ assert(cap_get_capType(masterCap) == cap_reply_cap); assert(cap_reply_cap_get_capReplyMaster(masterCap)); assert(TCB_PTR(cap_reply_cap_get_capTCBPtr(masterCap)) == NULL); cap_reply_cap_ptr_set_capCallerSlot(&replySlot->cap, CTE_REF(callerSlot)); callerCap = callerSlot->cap; /* Haskell error: "Caller cap must not already exist" */ assert(cap_get_capType(callerCap) == cap_null_cap); callerSlot->cap = cap_reply_cap_new(CTE_REF(NULL), false, TCB_REF(sender)); }
static void handleReply(void) { cte_t *callerSlot; cap_t callerCap; callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.cap_type = cap_get_capType(callerCap); #endif switch (cap_get_capType(callerCap)) { case cap_reply_cap: { tcb_t *caller; if (cap_reply_cap_get_capReplyMaster(callerCap)) { break; } caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Haskell error: * "handleReply: caller must not be the current thread" */ assert(caller != ksCurThread); doReplyTransfer(ksCurThread, caller, callerSlot); return; } case cap_null_cap: userError("Attempted reply operation when no reply cap present."); return; default: break; } fail("handleReply: invalid caller cap"); }
exception_t decodeSetSpace(cap_t cap, unsigned int length, cte_t* slot, extra_caps_t extraCaps, word_t *buffer) { cptr_t faultEP; word_t cRootData, vRootData; cte_t *cRootSlot, *vRootSlot; cap_t cRootCap, vRootCap; deriveCap_ret_t dc_ret; if (length < 3 || extraCaps.excaprefs[0] == NULL || extraCaps.excaprefs[1] == NULL) { userError("TCB SetSpace: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } faultEP = getSyscallArg(0, buffer); cRootData = getSyscallArg(1, buffer); vRootData = getSyscallArg(2, buffer); cRootSlot = extraCaps.excaprefs[0]; cRootCap = extraCaps.excaprefs[0]->cap; vRootSlot = extraCaps.excaprefs[1]; vRootCap = extraCaps.excaprefs[1]->cap; if (slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) || slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) { userError("TCB SetSpace: CSpace or VSpace currently being deleted."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (cRootData != 0) { cRootCap = updateCapData(false, cRootData, cRootCap); } dc_ret = deriveCap(cRootSlot, cRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } cRootCap = dc_ret.cap; if (cap_get_capType(cRootCap) != cap_cnode_cap && (!config_set(CONFIG_ALLOW_NULL_CSPACE) || cap_get_capType(cRootCap) != cap_null_cap)) { userError("TCB SetSpace: Invalid CNode cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (vRootData != 0) { vRootCap = updateCapData(false, vRootData, vRootCap); } dc_ret = deriveCap(vRootSlot, vRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } vRootCap = dc_ret.cap; if (!isValidVTableRoot(vRootCap)) { userError("TCB SetSpace: Invalid VSpace cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot, faultEP, 0, /* used to be prioInvalid, but it doesn't matter */ cRootCap, cRootSlot, vRootCap, vRootSlot, 0, cap_null_cap_new(), NULL, thread_control_update_space); }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; vspace_root_t *cap_pd; pde_t stored_hw_asid; dom_t dom; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = seL4_Fault_get_seL4_FaultType(NODE_STATE(ksCurThread)->tcbFault); /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != seL4_Fault_NullFault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (NODE_STATE(ksCurThread)->tcbBoundNotification && notification_ptr_get_state(NODE_STATE(ksCurThread)->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* ensure we are not single stepping the caller in ia32 */ #if defined(CONFIG_HARDWARE_DEBUG_API) && defined(CONFIG_ARCH_IA32) if (caller->tcbArch.tcbContext.breakpointState.single_step_enabled) { slowpath(SysReplyRecv); } #endif /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = seL4_Fault_get_seL4_FaultType(caller->tcbFault); if (unlikely(fault_type != seL4_Fault_NullFault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ cap_pd = cap_vtable_cap_get_vspace_root_fp(newVTable); /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysReplyRecv); } #ifdef CONFIG_ARCH_AARCH32 /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif #ifdef CONFIG_ARCH_X86_64 stored_hw_asid.words[0] = cap_pml4_cap_get_capPML4MappedASID(newVTable); #endif #ifdef CONFIG_ARCH_AARCH64 stored_hw_asid.words[0] = cap_page_global_directory_cap_get_capPGDMappedASID(newVTable); #endif #ifdef CONFIG_ARCH_RISCV stored_hw_asid.words[0] = cap_page_table_cap_get_capPTMappedASID(newVTable); #endif /* Ensure the original caller can be scheduled directly. */ dom = maxDom ? ksCurDomain : 0; if (unlikely(!isHighestPrio(dom, caller->tcbPriority))) { slowpath(SysReplyRecv); } #ifdef CONFIG_ARCH_AARCH32 /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } #ifdef ENABLE_SMP_SUPPORT /* Ensure both threads have the same affinity */ if (unlikely(NODE_STATE(ksCurThread)->tcbAffinity != caller->tcbAffinity)) { slowpath(SysReplyRecv); } #endif /* ENABLE_SMP_SUPPORT */ /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.is_fastpath = true; #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &NODE_STATE(ksCurThread)->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingIPCCanGrant(&NODE_STATE(ksCurThread)->tcbState, cap_endpoint_cap_get_capCanGrant(ep_cap));; /* Place the thread in the endpoint queue */ endpointTail = endpoint_ptr_get_epQueue_tail_fp(ep_ptr); if (likely(!endpointTail)) { NODE_STATE(ksCurThread)->tcbEPPrev = NULL; NODE_STATE(ksCurThread)->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(NODE_STATE(ksCurThread))); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(NODE_STATE(ksCurThread)), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = NODE_STATE(ksCurThread); NODE_STATE(ksCurThread)->tcbEPPrev = endpointTail; NODE_STATE(ksCurThread)->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(NODE_STATE(ksCurThread)), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs(length, NODE_STATE(ksCurThread), caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread)); }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysReplyRecv; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (ksCurThread->tcbBoundNotification && notification_ptr_get_state(ksCurThread->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = fault_get_faultType(caller->tcbFault); if (unlikely(fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp (newVTable))) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the original caller can be scheduled directly. */ if (unlikely(caller->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &ksCurThread->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); /* Place the thread in the endpoint queue */ endpointTail = TCB_PTR(endpoint_ptr_get_epQueue_tail(ep_ptr)); if (likely(!endpointTail)) { ksCurThread->tcbEPPrev = NULL; ksCurThread->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(ksCurThread)); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = ksCurThread; ksCurThread->tcbEPPrev = endpointTail; ksCurThread->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs (length, ksCurThread, caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }
BOOT_CODE bool_t create_initial_thread( cap_t root_cnode_cap, cap_t it_pd_cap, vptr_t ui_v_entry, vptr_t bi_frame_vptr, vptr_t ipcbuf_vptr, cap_t ipcbuf_cap ) { pptr_t pptr; cap_t cap; tcb_t* tcb; deriveCap_ret_t dc_ret; /* allocate TCB */ pptr = alloc_region(TCB_BLOCK_SIZE_BITS); if (!pptr) { printf("Kernel init failed: Unable to allocate tcb for initial thread\n"); return false; } memzero((void*)pptr, 1 << TCB_BLOCK_SIZE_BITS); tcb = TCB_PTR(pptr + TCB_OFFSET); tcb->tcbTimeSlice = CONFIG_TIME_SLICE; Arch_initContext(&tcb->tcbArch.tcbContext); /* derive a copy of the IPC buffer cap for inserting */ dc_ret = deriveCap(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), ipcbuf_cap); if (dc_ret.status != EXCEPTION_NONE) { printf("Failed to derive copy of IPC Buffer\n"); return false; } /* initialise TCB (corresponds directly to abstract specification) */ cteInsert( root_cnode_cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_CNODE), SLOT_PTR(pptr, tcbCTable) ); cteInsert( it_pd_cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), SLOT_PTR(pptr, tcbVTable) ); cteInsert( dc_ret.cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), SLOT_PTR(pptr, tcbBuffer) ); tcb->tcbIPCBuffer = ipcbuf_vptr; setRegister(tcb, capRegister, bi_frame_vptr); setNextPC(tcb, ui_v_entry); /* initialise TCB */ tcb->tcbPriority = seL4_MaxPrio; setupReplyMaster(tcb); setThreadState(tcb, ThreadState_Running); ksSchedulerAction = SchedulerAction_ResumeCurrentThread; ksCurThread = ksIdleThread; ksCurDomain = ksDomSchedule[ksDomScheduleIdx].domain; ksDomainTime = ksDomSchedule[ksDomScheduleIdx].length; assert(ksCurDomain < CONFIG_NUM_DOMAINS && ksDomainTime > 0); /* initialise current thread pointer */ switchToThread(tcb); /* initialises ksCurThread */ /* create initial thread's TCB cap */ cap = cap_thread_cap_new(TCB_REF(tcb)); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_TCB), cap); #ifdef DEBUG setThreadName(tcb, "rootserver"); #endif return true; }
void #ifdef ARCH_X86 NORETURN #endif fastpath_call(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; tcb_t *dest; word_t badge; cte_t *replySlot, *callerSlot; cap_t newVTable; vspace_root_t *cap_pd; pde_t stored_hw_asid; word_t fault_type; dom_t dom; word_t replyCanGrant; /* Get message info, length, and fault type. */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = seL4_Fault_get_seL4_FaultType(NODE_STATE(ksCurThread)->tcbFault); /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != seL4_Fault_NullFault)) { slowpath(SysCall); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanSend(ep_cap))) { slowpath(SysCall); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Get the destination thread, which is only going to be valid * if the endpoint is valid. */ dest = TCB_PTR(endpoint_ptr_get_epQueue_head(ep_ptr)); /* Check that there's a thread waiting to receive */ if (unlikely(endpoint_ptr_get_state(ep_ptr) != EPState_Recv)) { slowpath(SysCall); } /* ensure we are not single stepping the destination in ia32 */ #if defined(CONFIG_HARDWARE_DEBUG_API) && defined(CONFIG_ARCH_IA32) if (dest->tcbArch.tcbContext.breakpointState.single_step_enabled) { slowpath(SysCall); } #endif /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(dest, tcbVTable)->cap; /* Get vspace root. */ cap_pd = cap_vtable_cap_get_vspace_root_fp(newVTable); /* Ensure that the destination has a valid VTable. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysCall); } #ifdef CONFIG_ARCH_AARCH32 /* Get HW ASID */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif #ifdef CONFIG_ARCH_X86_64 /* borrow the stored_hw_asid for PCID */ stored_hw_asid.words[0] = cap_pml4_cap_get_capPML4MappedASID_fp(newVTable); #endif #ifdef CONFIG_ARCH_AARCH64 stored_hw_asid.words[0] = cap_page_global_directory_cap_get_capPGDMappedASID(newVTable); #endif #ifdef CONFIG_ARCH_RISCV /* Get HW ASID */ stored_hw_asid.words[0] = cap_page_table_cap_get_capPTMappedASID(newVTable); #endif /* let gcc optimise this out for 1 domain */ dom = maxDom ? ksCurDomain : 0; /* ensure only the idle thread or lower prio threads are present in the scheduler */ if (likely(dest->tcbPriority < NODE_STATE(ksCurThread->tcbPriority)) && !isHighestPrio(dom, dest->tcbPriority)) { slowpath(SysCall); } /* Ensure that the endpoint has has grant or grant-reply rights so that we can * create the reply cap */ if (unlikely(!cap_endpoint_cap_get_capCanGrant(ep_cap) && !cap_endpoint_cap_get_capCanGrantReply(ep_cap))) { slowpath(SysCall); } #ifdef CONFIG_ARCH_AARCH32 if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysCall); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(dest->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysCall); } #ifdef ENABLE_SMP_SUPPORT /* Ensure both threads have the same affinity */ if (unlikely(NODE_STATE(ksCurThread)->tcbAffinity != dest->tcbAffinity)) { slowpath(SysCall); } #endif /* ENABLE_SMP_SUPPORT */ /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.is_fastpath = true; #endif /* Dequeue the destination. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(dest->tcbEPNext)); if (unlikely(dest->tcbEPNext)) { dest->tcbEPNext->tcbEPPrev = NULL; } else { endpoint_ptr_mset_epQueue_tail_state(ep_ptr, 0, EPState_Idle); } badge = cap_endpoint_cap_get_capEPBadge(ep_cap); /* Block sender */ thread_state_ptr_set_tsType_np(&NODE_STATE(ksCurThread)->tcbState, ThreadState_BlockedOnReply); /* Get sender reply slot */ replySlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbReply); /* Get dest caller slot */ callerSlot = TCB_PTR_CTE_PTR(dest, tcbCaller); /* Insert reply cap */ replyCanGrant = thread_state_ptr_get_blockingIPCCanGrant(&dest->tcbState);; cap_reply_cap_ptr_new_np(&callerSlot->cap, replyCanGrant, 0, TCB_REF(NODE_STATE(ksCurThread))); mdb_node_ptr_set_mdbPrev_np(&callerSlot->cteMDBNode, CTE_REF(replySlot)); mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &replySlot->cteMDBNode, CTE_REF(callerSlot), 1, 1); fastpath_copy_mrs(length, NODE_STATE(ksCurThread), dest); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&dest->tcbState, ThreadState_Running); switchToThread_fp(dest, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread)); }
exception_t decodeTCBConfigure(cap_t cap, unsigned int length, cte_t* slot, extra_caps_t rootCaps, word_t *buffer) { cte_t *bufferSlot, *cRootSlot, *vRootSlot; cap_t bufferCap, cRootCap, vRootCap; deriveCap_ret_t dc_ret; cptr_t faultEP; unsigned int prio; word_t cRootData, vRootData, bufferAddr; if (length < 5 || rootCaps.excaprefs[0] == NULL || rootCaps.excaprefs[1] == NULL || rootCaps.excaprefs[2] == NULL) { userError("TCB Configure: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } faultEP = getSyscallArg(0, buffer); prio = getSyscallArg(1, buffer); cRootData = getSyscallArg(2, buffer); vRootData = getSyscallArg(3, buffer); bufferAddr = getSyscallArg(4, buffer); cRootSlot = rootCaps.excaprefs[0]; cRootCap = rootCaps.excaprefs[0]->cap; vRootSlot = rootCaps.excaprefs[1]; vRootCap = rootCaps.excaprefs[1]->cap; bufferSlot = rootCaps.excaprefs[2]; bufferCap = rootCaps.excaprefs[2]->cap; prio = prio & MASK(8); if (prio > ksCurThread->tcbPriority) { userError("TCB Configure: Requested priority %d too high (max %d).", (int)prio, (int)(ksCurThread->tcbPriority)); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (bufferAddr == 0) { bufferSlot = NULL; } else { exception_t e; dc_ret = deriveCap(bufferSlot, bufferCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } bufferCap = dc_ret.cap; e = checkValidIPCBuffer(bufferAddr, bufferCap); if (e != EXCEPTION_NONE) { return e; } } if (slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) || slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) { userError("TCB Configure: CSpace or VSpace currently being deleted."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (cRootData != 0) { cRootCap = updateCapData(false, cRootData, cRootCap); } dc_ret = deriveCap(cRootSlot, cRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } cRootCap = dc_ret.cap; if (cap_get_capType(cRootCap) != cap_cnode_cap && (!config_set(CONFIG_ALLOW_NULL_CSPACE) || cap_get_capType(cRootCap) != cap_null_cap)) { userError("TCB Configure: CSpace cap is invalid."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (vRootData != 0) { vRootCap = updateCapData(false, vRootData, vRootCap); } dc_ret = deriveCap(vRootSlot, vRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } vRootCap = dc_ret.cap; if (!isValidVTableRoot(vRootCap)) { userError("TCB Configure: VSpace cap is invalid."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot, faultEP, prio, cRootCap, cRootSlot, vRootCap, vRootSlot, bufferAddr, bufferCap, bufferSlot, thread_control_update_all); }
void #ifdef ARCH_X86 NORETURN #endif fastpath_call(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; tcb_t *dest; word_t badge; cte_t *replySlot, *callerSlot; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; word_t fault_type; /* Get message info, length, and fault type. */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysCall; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysCall); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanSend(ep_cap))) { slowpath(SysCall); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Get the destination thread, which is only going to be valid * if the endpoint is valid. */ dest = TCB_PTR(endpoint_ptr_get_epQueue_head(ep_ptr)); /* Check that there's a thread waiting to receive */ if (unlikely(endpoint_ptr_get_state(ep_ptr) != EPState_Recv)) { slowpath(SysCall); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(dest, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid VTable. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysCall); } #ifdef ARCH_ARM /* Get HW ASID */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the destination has a higher/equal priority to us. */ if (unlikely(dest->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysCall); } /* Ensure that the endpoint has has grant rights so that we can * create the reply cap */ if (unlikely(!cap_endpoint_cap_get_capCanGrant(ep_cap))) { slowpath(SysCall); } #ifdef ARCH_ARM if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysCall); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(dest->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysCall); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Dequeue the destination. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(dest->tcbEPNext)); if (unlikely(dest->tcbEPNext)) { dest->tcbEPNext->tcbEPPrev = NULL; } else { endpoint_ptr_mset_epQueue_tail_state(ep_ptr, 0, EPState_Idle); } badge = cap_endpoint_cap_get_capEPBadge(ep_cap); /* Block sender */ thread_state_ptr_set_tsType_np(&ksCurThread->tcbState, ThreadState_BlockedOnReply); /* Get sender reply slot */ replySlot = TCB_PTR_CTE_PTR(ksCurThread, tcbReply); /* Get dest caller slot */ callerSlot = TCB_PTR_CTE_PTR(dest, tcbCaller); /* Insert reply cap */ cap_reply_cap_ptr_new_np(&callerSlot->cap, 0, TCB_REF(ksCurThread)); mdb_node_ptr_set_mdbPrev_np(&callerSlot->cteMDBNode, CTE_REF(replySlot)); mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &replySlot->cteMDBNode, CTE_REF(callerSlot), 1, 1); fastpath_copy_mrs (length, ksCurThread, dest); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&dest->tcbState, ThreadState_Running); switchToThread_fp(dest, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }
exception_t handleUnknownSyscall(word_t w) { #ifdef DEBUG if (w == SysDebugPutChar) { kernel_putchar(getRegister(ksCurThread, capRegister)); return EXCEPTION_NONE; } if (w == SysDebugHalt) { printf("Debug halt syscall from user thread 0x%x\n", (unsigned int)ksCurThread); halt(); } if (w == SysDebugSnapshot) { printf("Debug snapshot syscall from user thread 0x%x\n", (unsigned int)ksCurThread); capDL(); return EXCEPTION_NONE; } if (w == SysDebugCapIdentify) { word_t cptr = getRegister(ksCurThread, capRegister); lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(ksCurThread, cptr); uint32_t cap_type = cap_get_capType(lu_ret.cap); setRegister(ksCurThread, capRegister, cap_type); return EXCEPTION_NONE; } if (w == SysDebugNameThread) { /* This is a syscall meant to aid debugging, so if anything goes wrong * then assume the system is completely misconfigured and halt */ const char *name; word_t cptr = getRegister(ksCurThread, capRegister); lookupCapAndSlot_ret_t lu_ret = lookupCapAndSlot(ksCurThread, cptr); /* ensure we got a TCB cap */ uint32_t cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysDebugNameThread: cap is not a TCB, halting"); halt(); } /* Add 1 to the IPC buffer to skip the message info word */ name = (const char*)(lookupIPCBuffer(true, ksCurThread) + 1); if (!name) { userError("SysDebugNameThread: Failed to lookup IPC buffer, halting"); halt(); } /* ensure the name isn't too long */ if (name[strnlen(name, seL4_MsgMaxLength * sizeof(word_t))] != '\0') { userError("SysDebugNameThread: Name too long, halting"); halt(); } setThreadName(TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)), name); return EXCEPTION_NONE; } #endif #ifdef DANGEROUS_CODE_INJECTION if (w == SysDebugRun) { ((void (*) (void *))getRegister(ksCurThread, capRegister))((void*)getRegister(ksCurThread, msgInfoRegister)); return EXCEPTION_NONE; } #endif #ifdef CONFIG_BENCHMARK if (w == SysBenchmarkResetLog) { ksLogIndex = 0; return EXCEPTION_NONE; } else if (w == SysBenchmarkDumpLog) { int i; word_t *buffer = lookupIPCBuffer(true, ksCurThread); word_t start = getRegister(ksCurThread, capRegister); word_t size = getRegister(ksCurThread, msgInfoRegister); word_t logSize = ksLogIndex > MAX_LOG_SIZE ? MAX_LOG_SIZE : ksLogIndex; if (buffer == NULL) { userError("Cannot dump benchmarking log to a thread without an ipc buffer\n"); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (start > logSize) { userError("Start > logsize\n"); current_syscall_error.type = seL4_InvalidArgument; return EXCEPTION_SYSCALL_ERROR; } /* Assume we have access to an ipc buffer 1024 words big. * Do no write to the first 4 bytes as these are overwritten */ if (size > MAX_IPC_BUFFER_STORAGE) { size = MAX_IPC_BUFFER_STORAGE; } /* trim to size */ if ((start + size) > logSize) { size = logSize - start; } /* write to ipc buffer */ for (i = 0; i < size; i++) { buffer[i + 1] = ksLog[i + start]; } /* Return the amount written */ setRegister(ksCurThread, capRegister, size); return EXCEPTION_NONE; } else if (w == SysBenchmarkLogSize) { /* Return the amount of log items we tried to log (may exceed max size) */ setRegister(ksCurThread, capRegister, ksLogIndex); return EXCEPTION_NONE; } #endif /* CONFIG_BENCHMARK */ current_fault = fault_unknown_syscall_new(w); handleFault(ksCurThread); schedule(); activateThread(); return EXCEPTION_NONE; }