/* Like getReceiveSlots, this is specialised for single-cap transfer. */ static message_info_t transferCaps(message_info_t info, extra_caps_t caps, endpoint_t *endpoint, tcb_t *receiver, word_t *receiveBuffer, bool_t diminish) { unsigned int i; cte_t* destSlot; info = message_info_set_msgExtraCaps(info, 0); info = message_info_set_msgCapsUnwrapped(info, 0); if (likely(!caps.excaprefs[0] || !receiveBuffer)) { return info; } destSlot = getReceiveSlots(receiver, receiveBuffer); for (i = 0; i < seL4_MsgMaxExtraCaps && caps.excaprefs[i] != NULL; i++) { cte_t *slot = caps.excaprefs[i]; cap_t cap = slot->cap; if (cap_get_capType(cap) == cap_endpoint_cap && EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)) == endpoint) { /* If this is a cap to the endpoint on which the message was sent, * only transfer the badge, not the cap. */ setExtraBadge(receiveBuffer, cap_endpoint_cap_get_capEPBadge(cap), i); info = message_info_set_msgCapsUnwrapped(info, message_info_get_msgCapsUnwrapped(info) | (1 << i)); } else { deriveCap_ret_t dc_ret; if (!destSlot) { break; } if (diminish) { dc_ret = deriveCap(slot, maskCapRights(noWrite, cap)); } else { dc_ret = deriveCap(slot, cap); } if (dc_ret.status != EXCEPTION_NONE) { break; } if (cap_get_capType(dc_ret.cap) == cap_null_cap) { break; } cteInsert(dc_ret.cap, slot, destSlot); destSlot = NULL; } } return message_info_set_msgExtraCaps(info, i); }
static void handleRecv(bool_t isBlocking) { word_t epCPtr; lookupCap_ret_t lu_ret; epCPtr = getRegister(ksCurThread, capRegister); lu_ret = lookupCap(ksCurThread, epCPtr); #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.cap_type = cap_get_capType(lu_ret.cap); #endif if (unlikely(lu_ret.status != EXCEPTION_NONE)) { /* current_lookup_fault has been set by lookupCap */ current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); return; } switch (cap_get_capType(lu_ret.cap)) { case cap_endpoint_cap: if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } deleteCallerCap(ksCurThread); receiveIPC(ksCurThread, lu_ret.cap, isBlocking); break; case cap_notification_cap: { notification_t *ntfnPtr; tcb_t *boundTCB; ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(lu_ret.cap)); boundTCB = (tcb_t*)notification_ptr_get_ntfnBoundTCB(ntfnPtr); if (unlikely(!cap_notification_cap_get_capNtfnCanReceive(lu_ret.cap) || (boundTCB && boundTCB != ksCurThread))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveSignal(ksCurThread, lu_ret.cap, isBlocking); break; } default: current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } }
exception_t decodeIRQHandlerInvocation(word_t invLabel, irq_t irq, extra_caps_t excaps) { switch (invLabel) { case IRQAckIRQ: setThreadState(ksCurThread, ThreadState_Restart); invokeIRQHandler_AckIRQ(irq); return EXCEPTION_NONE; case IRQSetIRQHandler: { cap_t ntfnCap; cte_t *slot; if (excaps.excaprefs[0] == NULL) { current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } ntfnCap = excaps.excaprefs[0]->cap; slot = excaps.excaprefs[0]; if (cap_get_capType(ntfnCap) != cap_notification_cap || !cap_notification_cap_get_capNtfnCanSend(ntfnCap)) { if (cap_get_capType(ntfnCap) != cap_notification_cap) { userError("IRQSetHandler: provided cap is not an notification capability."); } else { userError("IRQSetHandler: caller does not have send rights on the endpoint."); } current_syscall_error.type = seL4_InvalidCapability; current_syscall_error.invalidCapNumber = 0; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); invokeIRQHandler_SetIRQHandler(irq, ntfnCap, slot); return EXCEPTION_NONE; } case IRQClearIRQHandler: setThreadState(ksCurThread, ThreadState_Restart); invokeIRQHandler_ClearIRQHandler(irq); return EXCEPTION_NONE; default: userError("IRQHandler: Illegal operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } }
deriveCap_ret_t deriveCap(cte_t *slot, cap_t cap) { deriveCap_ret_t ret; if (isArchCap(cap)) { return Arch_deriveCap(slot, cap); } switch (cap_get_capType(cap)) { case cap_zombie_cap: ret.status = EXCEPTION_NONE; ret.cap = cap_null_cap_new(); break; case cap_irq_control_cap: ret.status = EXCEPTION_NONE; ret.cap = cap_null_cap_new(); break; case cap_reply_cap: ret.status = EXCEPTION_NONE; ret.cap = cap_null_cap_new(); break; default: ret.status = EXCEPTION_NONE; ret.cap = cap; } return ret; }
word_t* PURE lookupIPCBuffer(bool_t isReceiver, tcb_t *thread) { word_t w_bufferPtr; cap_t bufferCap; vm_rights_t vm_rights; w_bufferPtr = thread->tcbIPCBuffer; bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap; if (cap_get_capType(bufferCap) != cap_frame_cap) { return NULL; } vm_rights = cap_frame_cap_get_capFVMRights(bufferCap); if (vm_rights == VMReadWrite || (!isReceiver && vm_rights == VMReadOnly)) { word_t basePtr; unsigned int pageBits; basePtr = cap_frame_cap_get_capFBasePtr(bufferCap); pageBits = pageBitsForSize(cap_frame_cap_get_capFSize(bufferCap)); return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits))); } else { return NULL; } }
exception_t decodeSetTCB(cap_t cap, unsigned int length, word_t* buffer, extra_caps_t extraCaps) { cap_t tcbCap; cte_t *tcbSlot; deriveCap_ret_t dc_ret; if ( extraCaps.excaprefs[0] == NULL) { userError("VCPU SetTCB: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } tcbSlot = extraCaps.excaprefs[0]; tcbCap = extraCaps.excaprefs[0]->cap; dc_ret = deriveCap(tcbSlot, tcbCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } tcbCap = dc_ret.cap; if (cap_get_capType(tcbCap) != cap_thread_cap) { userError("TCB cap is not a TCB cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeSetTCB(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), TCB_PTR(cap_thread_cap_get_capTCBPtr(tcbCap))); }
exception_t decodeBindAEP(cap_t cap, extra_caps_t extraCaps) { async_endpoint_t *aepptr; tcb_t *tcb; if (extraCaps.excaprefs[0] == NULL) { userError("TCB BindAEP: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } if (cap_get_capType(extraCaps.excaprefs[0]->cap) != cap_async_endpoint_cap) { userError("TCB BindAEP: Async endpoint is invalid."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)); if (tcb->boundAsyncEndpoint) { userError("TCB BindAEP: TCB already has AEP."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } aepptr = AEP_PTR(cap_async_endpoint_cap_get_capAEPPtr(extraCaps.excaprefs[0]->cap)); if ((tcb_t*)async_endpoint_ptr_get_aepQueue_head(aepptr)) { userError("TCB BindAEP: AEP cannot be bound."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_AEPControl(tcb, aepptr); }
void benchmark_track_utilisation_dump(void) { uint64_t *buffer = ((uint64_t *) & (((seL4_IPCBuffer *)lookupIPCBuffer(true, NODE_STATE(ksCurThread)))->msg[0])); tcb_t *tcb = NULL; word_t tcb_cptr = getRegister(NODE_STATE(ksCurThread), capRegister); lookupCap_ret_t lu_ret; word_t cap_type; lu_ret = lookupCap(NODE_STATE(ksCurThread), tcb_cptr); /* ensure we got a TCB cap */ cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysBenchmarkFinalizeLog: cap is not a TCB, halting"); return; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)); buffer[BENCHMARK_TCB_UTILISATION] = tcb->benchmark.utilisation; /* Requested thread utilisation */ buffer[BENCHMARK_IDLE_LOCALCPU_UTILISATION] = NODE_STATE( ksIdleThread)->benchmark.utilisation; /* Idle thread utilisation of current CPU */ #ifdef ENABLE_SMP_SUPPORT buffer[BENCHMARK_IDLE_TCBCPU_UTILISATION] = NODE_STATE_ON_CORE(ksIdleThread, tcb->tcbAffinity)->benchmark.utilisation; /* Idle thread utilisation of CPU the TCB is running on */ #else buffer[BENCHMARK_IDLE_TCBCPU_UTILISATION] = buffer[BENCHMARK_IDLE_LOCALCPU_UTILISATION]; #endif #ifdef CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT buffer[BENCHMARK_TOTAL_UTILISATION] = (ccnt_num_overflows * 0xFFFFFFFFU) + benchmark_end_time - benchmark_start_time; #else buffer[BENCHMARK_TOTAL_UTILISATION] = benchmark_end_time - benchmark_start_time; /* Overall time */ #endif /* CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT */ }
void doReplyTransfer(tcb_t *sender, tcb_t *receiver, cte_t *slot) { assert(thread_state_get_tsType(receiver->tcbState) == ThreadState_BlockedOnReply); if (likely(fault_get_faultType(receiver->tcbFault) == fault_null_fault)) { doIPCTransfer(sender, NULL, 0, true, receiver, false); setThreadState(receiver, ThreadState_Running); attemptSwitchTo(receiver); } else { bool_t restart; restart = handleFaultReply(receiver, sender); fault_null_fault_ptr_new(&receiver->tcbFault); if (restart) { setThreadState(receiver, ThreadState_Restart); attemptSwitchTo(receiver); } else { setThreadState(receiver, ThreadState_Inactive); } } if (cap_reply_cap_get_capInCDT(slot->cap)) { cte_t *replySlot = TCB_PTR_CTE_PTR(receiver, tcbReply); assert(cap_get_capType(replySlot->cap) == cap_reply_cap); assert(cap_reply_cap_get_capInCDT(replySlot->cap)); cdtRemove(replySlot); cdtRemove(slot); slot->cap = cap_null_cap_new(); replySlot->cap = cap_reply_cap_new(false, true, TCB_REF(NULL)); } else { deleteCallerCap(sender); } }
cap_t CONST Arch_updateCapData(bool_t preserve, word_t data, cap_t cap) { /* Avoid a switch statement with just a 'default' case as the C parser does not like this */ #ifdef CONFIG_IOMMU switch (cap_get_capType(cap)) { case cap_io_space_cap: { io_space_capdata_t w = { { data } }; uint16_t PCIDevice = io_space_capdata_get_PCIDevice(w); uint16_t domainID = io_space_capdata_get_domainID(w); if (!preserve && cap_io_space_cap_get_capPCIDevice(cap) == 0 && domainID >= x86KSFirstValidIODomain && domainID != 0 && domainID <= MASK(x86KSnumIODomainIDBits)) { return cap_io_space_cap_new(domainID, PCIDevice); } else { return cap_null_cap_new(); } } default: return cap; } #endif return cap; }
static void handleReply(void) { cte_t *callerSlot; cap_t callerCap; callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; switch (cap_get_capType(callerCap)) { case cap_reply_cap: { tcb_t *caller; if (cap_reply_cap_get_capReplyMaster(callerCap)) { break; } caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Haskell error: * "handleReply: caller must not be the current thread" */ assert(caller != ksCurThread); doReplyTransfer(ksCurThread, caller, callerSlot); return; } case cap_null_cap: userError("Attempted reply operation when no reply cap present."); return; default: break; } fail("handleReply: invalid caller cap"); }
exception_t decodeSetEPTRoot(cap_t cap, extra_caps_t extraCaps) { tcb_t *tcb; cte_t *rootSlot; exception_t e; if (extraCaps.excaprefs[0] == NULL) { userError("TCB SetEPTRoot: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } if (cap_get_capType(extraCaps.excaprefs[0]->cap) != cap_ept_page_directory_pointer_table_cap) { userError("TCB SetEPTRoot: EPT PDPT is invalid."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)); rootSlot = TCB_PTR_CTE_PTR(tcb, tcbArchEPTRoot); e = cteDelete(rootSlot, true); if (e != EXCEPTION_NONE) { return e; } cteInsert(extraCaps.excaprefs[0]->cap, extraCaps.excaprefs[0], rootSlot); setThreadState(ksCurThread, ThreadState_Restart); return EXCEPTION_NONE; }
void setupCallerCap(tcb_t *sender, tcb_t *receiver) { cte_t *replySlot, *callerSlot; cap_t masterCap UNUSED, callerCap UNUSED; setThreadState(sender, ThreadState_BlockedOnReply); replySlot = TCB_PTR_CTE_PTR(sender, tcbReply); callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller); masterCap = replySlot->cap; /* Haskell error: "Sender must have a valid master reply cap" */ assert(cap_get_capType(masterCap) == cap_reply_cap); assert(cap_reply_cap_get_capReplyMaster(masterCap)); assert(TCB_PTR(cap_reply_cap_get_capTCBPtr(masterCap)) == NULL); cap_reply_cap_ptr_set_capCallerSlot(&replySlot->cap, CTE_REF(callerSlot)); callerCap = callerSlot->cap; /* Haskell error: "Caller cap must not already exist" */ assert(cap_get_capType(callerCap) == cap_null_cap); callerSlot->cap = cap_reply_cap_new(CTE_REF(NULL), false, TCB_REF(sender)); }
deriveCap_ret_t Arch_deriveCap(cte_t *slot, cap_t cap) { deriveCap_ret_t ret; switch (cap_get_capType(cap)) { case cap_page_table_cap: if (cap_page_table_cap_get_capPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving an unmapped PT cap"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_page_directory_cap: if (cap_page_directory_cap_get_capPDIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a PD cap without an assigned ASID"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; /* This is a deviation from haskell, which has only * one frame cap type on ARM */ case cap_small_frame_cap: ret.cap = cap_small_frame_cap_set_capFMappedASID(cap, asidInvalid); ret.status = EXCEPTION_NONE; return ret; case cap_frame_cap: ret.cap = cap_frame_cap_set_capFMappedASID(cap, asidInvalid); ret.status = EXCEPTION_NONE; return ret; case cap_asid_control_cap: case cap_asid_pool_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; default: /* This assert has no equivalent in haskell, * as the options are restricted by type */ fail("Invalid arch cap"); } }
void deleteCallerCap(tcb_t *receiver) { cte_t *callerSlot; callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller); if (cap_get_capType(callerSlot->cap) == cap_reply_cap) { finaliseCap(callerSlot->cap, true, true); callerSlot->cap = cap_null_cap_new(); } }
cap_t CONST Arch_maskCapRights(seL4_CapRights_t cap_rights_mask, cap_t cap) { if (cap_get_capType(cap) == cap_frame_cap) { vm_rights_t vm_rights; vm_rights = vmRightsFromWord(cap_frame_cap_get_capFVMRights(cap)); vm_rights = maskVMRights(vm_rights, cap_rights_mask); return cap_frame_cap_set_capFVMRights(cap, wordFromVMRights(vm_rights)); } else { return cap; } }
static void handleWait(bool_t isBlocking) { word_t epCPtr; lookupCap_ret_t lu_ret; epCPtr = getRegister(ksCurThread, capRegister); lu_ret = lookupCap(ksCurThread, epCPtr); if (unlikely(lu_ret.status != EXCEPTION_NONE)) { /* current_lookup_fault has been set by lookupCap */ current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); return; } switch (cap_get_capType(lu_ret.cap)) { case cap_endpoint_cap: if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap) || !isBlocking)) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } deleteCallerCap(ksCurThread); receiveIPC(ksCurThread, lu_ret.cap); break; case cap_async_endpoint_cap: { async_endpoint_t *aepptr; tcb_t *boundTCB; aepptr = AEP_PTR(cap_async_endpoint_cap_get_capAEPPtr(lu_ret.cap)); boundTCB = (tcb_t*)async_endpoint_ptr_get_aepBoundTCB(aepptr); if (unlikely(!cap_async_endpoint_cap_get_capAEPCanReceive(lu_ret.cap) || (boundTCB && boundTCB != ksCurThread))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveAsyncIPC(ksCurThread, lu_ret.cap, isBlocking); break; } default: current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } }
static void handleReply(void) { cte_t *callerSlot; cap_t callerCap; callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.cap_type = cap_get_capType(callerCap); #endif switch (cap_get_capType(callerCap)) { case cap_reply_cap: { tcb_t *caller; if (cap_reply_cap_get_capReplyMaster(callerCap)) { break; } caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Haskell error: * "handleReply: caller must not be the current thread" */ assert(caller != ksCurThread); doReplyTransfer(ksCurThread, caller, callerSlot); return; } case cap_null_cap: userError("Attempted reply operation when no reply cap present."); return; default: break; } fail("handleReply: invalid caller cap"); }
static void sendCNode(unsigned long address, unsigned int sizebits) { unsigned long i; cte_t *start = (cte_t *)address; for (i = 0; i < (1 << sizebits); i++) { cap_t cap = start[i].cap; if (cap_get_capType(cap) != cap_null_cap) { cte_t *parent = getMDBParent(&start[i]); sendWord(i); sendWord(cap.words[0]); sendWord(cap.words[1]); sendWord((unsigned long)parent); } } }
static BOOT_CODE cap_t create_it_page_directory_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid) { cap_t cap; cap = cap_page_directory_cap_new( true, /* capPDIsMapped */ IT_ASID, /* capPDMappedASID */ vptr, /* capPDMappedAddress */ pptr /* capPDBasePtr */ ); if (asid != asidInvalid && cap_get_capType(vspace_cap) != cap_null_cap) { map_it_pd_cap(vspace_cap, cap); } return cap; }
static void handleWait(void) { word_t epCPtr; lookupCap_ret_t lu_ret; deleteCallerCap(ksCurThread); epCPtr = getRegister(ksCurThread, capRegister); lu_ret = lookupCap(ksCurThread, epCPtr); if (unlikely(lu_ret.status != EXCEPTION_NONE)) { /* current_lookup_fault has been set by lookupCap */ current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); return; } switch (cap_get_capType(lu_ret.cap)) { case cap_endpoint_cap: if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveIPC(ksCurThread, lu_ret.cap); break; case cap_async_endpoint_cap: if (unlikely(!cap_async_endpoint_cap_get_capAEPCanReceive(lu_ret.cap))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveAsyncIPC(ksCurThread, lu_ret.cap); break; default: current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } }
cap_t CONST Arch_updateCapData(bool_t preserve, word_t data, cap_t cap) { switch (cap_get_capType(cap)) { #ifdef CONFIG_IOMMU case cap_io_space_cap: { io_space_capdata_t w = { { data } }; uint16_t PCIDevice = io_space_capdata_get_PCIDevice(w); uint16_t domainID = io_space_capdata_get_domainID(w); vtd_cte_t *vtd_context_table = (vtd_cte_t*)vtd_rte_get_ctp(ia32KSvtdRootTable[get_pci_bus(data)]); if (!preserve && cap_io_space_cap_get_capPCIDevice(cap) == 0 && vtd_rte_get_present(ia32KSvtdRootTable[get_pci_bus(data)]) && (!vtd_cte_get_present(vtd_context_table[PCIDevice & 0xff]) || vtd_cte_get_translation_type(vtd_context_table[PCIDevice & 0xff]) != 2) && domainID <= MASK(ia32KSnumIODomainIDBits)) { return cap_io_space_cap_new(domainID, PCIDevice); } else { return cap_null_cap_new(); } } #endif case cap_io_port_cap: { io_port_capdata_t w = { .words = { data } }; uint16_t firstPort = io_port_capdata_get_firstPort(w); uint16_t lastPort = io_port_capdata_get_lastPort(w); uint16_t capFirstPort = cap_io_port_cap_get_capIOPortFirstPort(cap); uint16_t capLastPort = cap_io_port_cap_get_capIOPortLastPort(cap); assert(capFirstPort <= capLastPort); /* Ensure input data is ordered correctly. */ if (firstPort > lastPort) { return cap_null_cap_new(); } /* Allow the update if the new cap has range no larger than the old * cap. */ if ((firstPort >= capFirstPort) && (lastPort <= capLastPort)) { return cap_io_port_cap_new(firstPort, lastPort); } else { return cap_null_cap_new(); } } default: return cap; } }
exception_t decodeDomainInvocation(word_t label, unsigned int length, extra_caps_t extraCaps, word_t *buffer) { word_t domain; cap_t tcap; if (unlikely(label != DomainSetSet)) { current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (unlikely(length == 0)) { userError("Domain Configure: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } else { domain = getSyscallArg(0, buffer); if (domain >= CONFIG_NUM_DOMAINS) { userError("Domain Configure: invalid domain (%u >= %u).", domain, CONFIG_NUM_DOMAINS); current_syscall_error.type = seL4_InvalidArgument; current_syscall_error.invalidArgumentNumber = 0; return EXCEPTION_SYSCALL_ERROR; } } if (unlikely(extraCaps.excaprefs[0] == NULL)) { userError("Domain Configure: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } tcap = extraCaps.excaprefs[0]->cap; if (unlikely(cap_get_capType(tcap) != cap_thread_cap)) { userError("Domain Configure: thread cap required."); current_syscall_error.type = seL4_InvalidArgument; current_syscall_error.invalidArgumentNumber = 1; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); setDomain(TCB_PTR(cap_thread_cap_get_capTCBPtr(tcap)), domain); return EXCEPTION_NONE; }
void benchmark_track_reset_utilisation(void) { tcb_t *tcb = NULL; word_t tcb_cptr = getRegister(NODE_STATE(ksCurThread), capRegister); lookupCap_ret_t lu_ret; word_t cap_type; lu_ret = lookupCap(NODE_STATE(ksCurThread), tcb_cptr); /* ensure we got a TCB cap */ cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysBenchmarkResetThreadUtilisation: cap is not a TCB, halting"); return; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)); tcb->benchmark.utilisation = 0; tcb->benchmark.schedule_start_time = 0; }
exception_t decodeCopyRegisters(cap_t cap, unsigned int length, extra_caps_t extraCaps, word_t *buffer) { word_t transferArch; tcb_t *srcTCB; cap_t source_cap; word_t flags; if (length < 1 || extraCaps.excaprefs[0] == NULL) { userError("TCB CopyRegisters: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } flags = getSyscallArg(0, buffer); transferArch = Arch_decodeTransfer(flags >> 8); source_cap = extraCaps.excaprefs[0]->cap; if (cap_get_capType(source_cap) == cap_thread_cap) { srcTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(source_cap)); } else { userError("TCB CopyRegisters: Invalid source TCB."); current_syscall_error.type = seL4_InvalidCapability; current_syscall_error.invalidCapNumber = 1; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_CopyRegisters( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), srcTCB, flags & BIT(CopyRegisters_suspendSource), flags & BIT(CopyRegisters_resumeTarget), flags & BIT(CopyRegisters_transferFrame), flags & BIT(CopyRegisters_transferInteger), transferArch); }
void receiveIPC(tcb_t *thread, cap_t cap) { endpoint_t *epptr; bool_t diminish; async_endpoint_t *aepptr; /* Haskell error "receiveIPC: invalid cap" */ assert(cap_get_capType(cap) == cap_endpoint_cap); //printf("\n;;;;;;;;;In function receiveIPC;;;;;;;\n"); epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)); diminish = !cap_endpoint_cap_get_capCanSend(cap); /* Check for anything waiting in the async endpoint*/ aepptr = thread->boundAsyncEndpoint; if (aepptr && async_endpoint_ptr_get_state(aepptr) == AEPState_Active) { completeAsyncIPC(aepptr, thread); } else { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: //printf("in case idle\n"); case EPState_Recv: { tcb_queue_t queue; //printf("in case recv\n"); /* Set thread state to BlockedOnReceive */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingIPCEndpoint( &thread->tcbState, EP_REF(epptr)); thread_state_ptr_set_blockingIPCDiminishCaps( &thread->tcbState, diminish); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Recv); ep_ptr_set_queue(epptr, queue); break; } case EPState_Send: { tcb_queue_t queue; tcb_t *sender; word_t badge; bool_t canGrant; bool_t do_call; //printf("in case send\n"); /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); sender = queue.head; /* Haskell error "Send endpoint queue must not be empty" */ assert(sender); /* Dequeue the first TCB */ queue = tcbEPDequeue(sender, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Get sender IPC details */ badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState); canGrant = thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState); /* Do the transfer */ doIPCTransfer(sender, epptr, badge, canGrant, thread, diminish); do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState); if (do_call || fault_get_faultType(sender->tcbFault) != fault_null_fault) { if (canGrant && !diminish) { setupCallerCap(sender, thread); } else { setThreadState(sender, ThreadState_Inactive); } } else { setThreadState(sender, ThreadState_Running); switchIfRequiredTo(sender); } break; } } } }
BOOT_CODE bool_t init_sys_state( cpu_id_t cpu_id, mem_p_regs_t mem_p_regs, dev_p_regs_t* dev_p_regs, ui_info_t ui_info, p_region_t boot_mem_reuse_p_reg, /* parameters below not modeled in abstract specification */ uint32_t num_drhu, paddr_t* drhu_list, acpi_rmrr_list_t *rmrr_list ) { cap_t root_cnode_cap; vptr_t bi_frame_vptr; vptr_t ipcbuf_vptr; cap_t it_vspace_cap; cap_t it_ap_cap; cap_t ipcbuf_cap; pptr_t bi_frame_pptr; create_frames_of_region_ret_t create_frames_ret; #ifdef CONFIG_ENABLE_BENCHMARKS vm_attributes_t buffer_attr = {{ 0 }}; word_t paddr; pde_t pde; #endif /* CONFIG_ENABLE_BENCHMARKS */ /* convert from physical addresses to kernel pptrs */ region_t ui_reg = paddr_to_pptr_reg(ui_info.p_reg); region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg); /* convert from physical addresses to userland vptrs */ v_region_t ui_v_reg; v_region_t it_v_reg; ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset; ui_v_reg.end = ui_info.p_reg.end - ui_info.pv_offset; ipcbuf_vptr = ui_v_reg.end; bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS); /* The region of the initial thread is the user image + ipcbuf and boot info */ it_v_reg.start = ui_v_reg.start; it_v_reg.end = bi_frame_vptr + BIT(PAGE_BITS); init_freemem(ui_info.p_reg, mem_p_regs); /* initialise virtual-memory-related data structures (not in abstract spec) */ if (!init_vm_state()) { return false; } #ifdef CONFIG_ENABLE_BENCHMARKS /* allocate and create the log buffer */ buffer_attr.words[0] = IA32_PAT_MT_WRITE_THROUGH; paddr = pptr_to_paddr((void *) alloc_region(pageBitsForSize(X86_LargePage))); /* allocate a large frame for logging */ pde = x86_make_pde_mapping(paddr, buffer_attr); ia32KSGlobalPD[IA32_KSLOG_IDX] = pde; /* flush the tlb */ invalidateTranslationAll(); /* if we crash here, the log isn't working */ #ifdef CONFIG_DEBUG_BUILD #if CONFIG_MAX_NUM_TRACE_POINTS > 0 printf("Testing log\n"); ksLog[0].data = 0xdeadbeef; printf("Wrote to ksLog %x\n", ksLog[0].data); assert(ksLog[0].data == 0xdeadbeef); #endif /* CONFIG_MAX_NUM_TRACE_POINTS */ #endif /* CONFIG_DEBUG_BUILD */ #endif /* CONFIG_ENABLE_BENCHMARKS */ /* create the root cnode */ root_cnode_cap = create_root_cnode(); /* create the IO port cap */ write_slot( SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOPort), cap_io_port_cap_new( 0, /* first port */ NUM_IO_PORTS - 1 /* last port */ ) ); /* create the cap for managing thread domains */ create_domain_cap(root_cnode_cap); /* create the IRQ CNode */ if (!create_irq_cnode()) { return false; } /* initialise the IRQ states and provide the IRQ control cap */ init_irqs(root_cnode_cap); /* create the bootinfo frame */ bi_frame_pptr = allocate_bi_frame(0, 1, ipcbuf_vptr); if (!bi_frame_pptr) { return false; } /* Construct an initial address space with enough virtual addresses * to cover the user image + ipc buffer and bootinfo frames */ it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg); if (cap_get_capType(it_vspace_cap) == cap_null_cap) { return false; } /* Create and map bootinfo frame cap */ create_bi_frame_cap( root_cnode_cap, it_vspace_cap, bi_frame_pptr, bi_frame_vptr ); /* create the initial thread's IPC buffer */ ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr); if (cap_get_capType(ipcbuf_cap) == cap_null_cap) { return false; } /* create all userland image frames */ create_frames_ret = create_frames_of_region( root_cnode_cap, it_vspace_cap, ui_reg, true, ui_info.pv_offset ); if (!create_frames_ret.success) { return false; } ndks_boot.bi_frame->userImageFrames = create_frames_ret.region; /* create the initial thread's ASID pool */ it_ap_cap = create_it_asid_pool(root_cnode_cap); if (cap_get_capType(it_ap_cap) == cap_null_cap) { return false; } write_it_asid_pool(it_ap_cap, it_vspace_cap); /* * Initialise the NULL FPU state. This is different from merely zero'ing it * out (i.e., the NULL FPU state is non-zero), and must be performed before * the first thread is created. */ resetFpu(); saveFpuState(&x86KSnullFpuState); x86KSfpuOwner = NULL; /* create the idle thread */ if (!create_idle_thread()) { return false; } /* create the initial thread */ if (!create_initial_thread( root_cnode_cap, it_vspace_cap, ui_info.v_entry, bi_frame_vptr, ipcbuf_vptr, ipcbuf_cap )) { return false; } if (config_set(CONFIG_IOMMU)) { /* initialise VTD-related data structures and the IOMMUs */ if (!vtd_init(cpu_id, num_drhu, rmrr_list)) { return false; } /* write number of IOMMU PT levels into bootinfo */ ndks_boot.bi_frame->numIOPTLevels = x86KSnumIOPTLevels; /* write IOSpace master cap */ write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOSpace), master_iospace_cap()); } else { ndks_boot.bi_frame->numIOPTLevels = -1; } /* convert the remaining free memory into UT objects and provide the caps */ if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) { return false; } /* WARNING: alloc_region() must not be called anymore after here! */ /* create device frames */ if (!create_device_frames(root_cnode_cap, dev_p_regs)) { return false; } /* finalise the bootinfo frame */ bi_finalise(); return true; }
void receiveIPC(tcb_t *thread, cap_t cap, bool_t isBlocking) { endpoint_t *epptr; notification_t *ntfnPtr; /* Haskell error "receiveIPC: invalid cap" */ assert(cap_get_capType(cap) == cap_endpoint_cap); epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)); /* Check for anything waiting in the notification */ ntfnPtr = thread->tcbBoundNotification; if (ntfnPtr && notification_ptr_get_state(ntfnPtr) == NtfnState_Active) { completeSignal(ntfnPtr, thread); } else { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: case EPState_Recv: { tcb_queue_t queue; if (isBlocking) { /* Set thread state to BlockedOnReceive */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingObject( &thread->tcbState, EP_REF(epptr)); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Recv); ep_ptr_set_queue(epptr, queue); } else { doNBRecvFailedTransfer(thread); } break; } case EPState_Send: { tcb_queue_t queue; tcb_t *sender; word_t badge; bool_t canGrant; bool_t do_call; /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); sender = queue.head; /* Haskell error "Send endpoint queue must not be empty" */ assert(sender); /* Dequeue the first TCB */ queue = tcbEPDequeue(sender, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Get sender IPC details */ badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState); canGrant = thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState); /* Do the transfer */ doIPCTransfer(sender, epptr, badge, canGrant, thread); do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState); if (do_call || seL4_Fault_get_seL4_FaultType(sender->tcbFault) != seL4_Fault_NullFault) { if (canGrant) { setupCallerCap(sender, thread); } else { setThreadState(sender, ThreadState_Inactive); } } else { setThreadState(sender, ThreadState_Running); switchIfRequiredTo(sender); } break; } } } }
exception_t decodeSetSpace(cap_t cap, unsigned int length, cte_t* slot, extra_caps_t extraCaps, word_t *buffer) { cptr_t faultEP; word_t cRootData, vRootData; cte_t *cRootSlot, *vRootSlot; cap_t cRootCap, vRootCap; deriveCap_ret_t dc_ret; if (length < 3 || extraCaps.excaprefs[0] == NULL || extraCaps.excaprefs[1] == NULL) { userError("TCB SetSpace: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } faultEP = getSyscallArg(0, buffer); cRootData = getSyscallArg(1, buffer); vRootData = getSyscallArg(2, buffer); cRootSlot = extraCaps.excaprefs[0]; cRootCap = extraCaps.excaprefs[0]->cap; vRootSlot = extraCaps.excaprefs[1]; vRootCap = extraCaps.excaprefs[1]->cap; if (slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) || slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) { userError("TCB SetSpace: CSpace or VSpace currently being deleted."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (cRootData != 0) { cRootCap = updateCapData(false, cRootData, cRootCap); } dc_ret = deriveCap(cRootSlot, cRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } cRootCap = dc_ret.cap; if (cap_get_capType(cRootCap) != cap_cnode_cap && (!config_set(CONFIG_ALLOW_NULL_CSPACE) || cap_get_capType(cRootCap) != cap_null_cap)) { userError("TCB SetSpace: Invalid CNode cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (vRootData != 0) { vRootCap = updateCapData(false, vRootData, vRootCap); } dc_ret = deriveCap(vRootSlot, vRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } vRootCap = dc_ret.cap; if (!isValidVTableRoot(vRootCap)) { userError("TCB SetSpace: Invalid VSpace cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot, faultEP, 0, /* used to be prioInvalid, but it doesn't matter */ cRootCap, cRootSlot, vRootCap, vRootSlot, 0, cap_null_cap_new(), NULL, thread_control_update_space); }
exception_t decodeTCBConfigure(cap_t cap, unsigned int length, cte_t* slot, extra_caps_t rootCaps, word_t *buffer) { cte_t *bufferSlot, *cRootSlot, *vRootSlot; cap_t bufferCap, cRootCap, vRootCap; deriveCap_ret_t dc_ret; cptr_t faultEP; unsigned int prio; word_t cRootData, vRootData, bufferAddr; if (length < 5 || rootCaps.excaprefs[0] == NULL || rootCaps.excaprefs[1] == NULL || rootCaps.excaprefs[2] == NULL) { userError("TCB Configure: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } faultEP = getSyscallArg(0, buffer); prio = getSyscallArg(1, buffer); cRootData = getSyscallArg(2, buffer); vRootData = getSyscallArg(3, buffer); bufferAddr = getSyscallArg(4, buffer); cRootSlot = rootCaps.excaprefs[0]; cRootCap = rootCaps.excaprefs[0]->cap; vRootSlot = rootCaps.excaprefs[1]; vRootCap = rootCaps.excaprefs[1]->cap; bufferSlot = rootCaps.excaprefs[2]; bufferCap = rootCaps.excaprefs[2]->cap; prio = prio & MASK(8); if (prio > ksCurThread->tcbPriority) { userError("TCB Configure: Requested priority %d too high (max %d).", (int)prio, (int)(ksCurThread->tcbPriority)); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (bufferAddr == 0) { bufferSlot = NULL; } else { exception_t e; dc_ret = deriveCap(bufferSlot, bufferCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } bufferCap = dc_ret.cap; e = checkValidIPCBuffer(bufferAddr, bufferCap); if (e != EXCEPTION_NONE) { return e; } } if (slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) || slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) { userError("TCB Configure: CSpace or VSpace currently being deleted."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (cRootData != 0) { cRootCap = updateCapData(false, cRootData, cRootCap); } dc_ret = deriveCap(cRootSlot, cRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } cRootCap = dc_ret.cap; if (cap_get_capType(cRootCap) != cap_cnode_cap && (!config_set(CONFIG_ALLOW_NULL_CSPACE) || cap_get_capType(cRootCap) != cap_null_cap)) { userError("TCB Configure: CSpace cap is invalid."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (vRootData != 0) { vRootCap = updateCapData(false, vRootData, vRootCap); } dc_ret = deriveCap(vRootSlot, vRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } vRootCap = dc_ret.cap; if (!isValidVTableRoot(vRootCap)) { userError("TCB Configure: VSpace cap is invalid."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot, faultEP, prio, cRootCap, cRootSlot, vRootCap, vRootSlot, bufferAddr, bufferCap, bufferSlot, thread_control_update_all); }