deriveCap_ret_t deriveCap(cte_t *slot, cap_t cap) { deriveCap_ret_t ret; if (isArchCap(cap)) { return Arch_deriveCap(slot, cap); } switch (cap_get_capType(cap)) { case cap_zombie_cap: ret.status = EXCEPTION_NONE; ret.cap = cap_null_cap_new(); break; case cap_irq_control_cap: ret.status = EXCEPTION_NONE; ret.cap = cap_null_cap_new(); break; case cap_reply_cap: ret.status = EXCEPTION_NONE; ret.cap = cap_null_cap_new(); break; default: ret.status = EXCEPTION_NONE; ret.cap = cap; } return ret; }
exception_t decodeSetPriority(cap_t cap, unsigned int length, word_t *buffer) { prio_t newPrio; if (length < 1) { userError("TCB SetPriority: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } newPrio = getSyscallArg(0, buffer); /* assuming here seL4_MaxPrio is of form 2^n - 1 */ newPrio = newPrio & MASK(8); if (newPrio > ksCurThread->tcbPriority) { userError("TCB SetPriority: Requested priority %d too high (max %d).", (int)newPrio, (int)ksCurThread->tcbPriority); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL, 0, newPrio, cap_null_cap_new(), NULL, cap_null_cap_new(), NULL, 0, cap_null_cap_new(), NULL, thread_control_update_priority); }
deriveCap_ret_t Arch_deriveCap(cte_t *slot, cap_t cap) { deriveCap_ret_t ret; switch (cap_get_capType(cap)) { case cap_page_table_cap: if (cap_page_table_cap_get_capPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving an unmapped PT cap"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_page_directory_cap: if (cap_page_directory_cap_get_capPDIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a PD cap without an assigned ASID"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; /* This is a deviation from haskell, which has only * one frame cap type on ARM */ case cap_small_frame_cap: ret.cap = cap_small_frame_cap_set_capFMappedASID(cap, asidInvalid); ret.status = EXCEPTION_NONE; return ret; case cap_frame_cap: ret.cap = cap_frame_cap_set_capFMappedASID(cap, asidInvalid); ret.status = EXCEPTION_NONE; return ret; case cap_asid_control_cap: case cap_asid_pool_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; default: /* This assert has no equivalent in haskell, * as the options are restricted by type */ fail("Invalid arch cap"); } }
void doReplyTransfer(tcb_t *sender, tcb_t *receiver, cte_t *slot) { assert(thread_state_get_tsType(receiver->tcbState) == ThreadState_BlockedOnReply); if (likely(fault_get_faultType(receiver->tcbFault) == fault_null_fault)) { doIPCTransfer(sender, NULL, 0, true, receiver, false); /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */ setThreadState(receiver, ThreadState_Running); attemptSwitchTo(receiver); } else { bool_t restart; /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */ restart = handleFaultReply(receiver, sender); fault_null_fault_ptr_new(&receiver->tcbFault); if (restart) { setThreadState(receiver, ThreadState_Restart); attemptSwitchTo(receiver); } else { setThreadState(receiver, ThreadState_Inactive); } } finaliseCap(slot->cap, true, true); slot->cap = cap_null_cap_new(); }
cap_t CONST Arch_updateCapData(bool_t preserve, word_t data, cap_t cap) { /* Avoid a switch statement with just a 'default' case as the C parser does not like this */ #ifdef CONFIG_IOMMU switch (cap_get_capType(cap)) { case cap_io_space_cap: { io_space_capdata_t w = { { data } }; uint16_t PCIDevice = io_space_capdata_get_PCIDevice(w); uint16_t domainID = io_space_capdata_get_domainID(w); if (!preserve && cap_io_space_cap_get_capPCIDevice(cap) == 0 && domainID >= x86KSFirstValidIODomain && domainID != 0 && domainID <= MASK(x86KSnumIODomainIDBits)) { return cap_io_space_cap_new(domainID, PCIDevice); } else { return cap_null_cap_new(); } } default: return cap; } #endif return cap; }
void doReplyTransfer(tcb_t *sender, tcb_t *receiver, cte_t *slot) { assert(thread_state_get_tsType(receiver->tcbState) == ThreadState_BlockedOnReply); if (likely(fault_get_faultType(receiver->tcbFault) == fault_null_fault)) { doIPCTransfer(sender, NULL, 0, true, receiver, false); setThreadState(receiver, ThreadState_Running); attemptSwitchTo(receiver); } else { bool_t restart; restart = handleFaultReply(receiver, sender); fault_null_fault_ptr_new(&receiver->tcbFault); if (restart) { setThreadState(receiver, ThreadState_Restart); attemptSwitchTo(receiver); } else { setThreadState(receiver, ThreadState_Inactive); } } if (cap_reply_cap_get_capInCDT(slot->cap)) { cte_t *replySlot = TCB_PTR_CTE_PTR(receiver, tcbReply); assert(cap_get_capType(replySlot->cap) == cap_reply_cap); assert(cap_reply_cap_get_capInCDT(replySlot->cap)); cdtRemove(replySlot); cdtRemove(slot); slot->cap = cap_null_cap_new(); replySlot->cap = cap_reply_cap_new(false, true, TCB_REF(NULL)); } else { deleteCallerCap(sender); } }
BOOT_CODE cap_t create_root_cnode(void) { pptr_t pptr; cap_t cap; /* write the number of root CNode slots to global state */ ndks_boot.slot_pos_max = BIT(CONFIG_ROOT_CNODE_SIZE_BITS); /* create an empty root CNode */ pptr = alloc_region(CONFIG_ROOT_CNODE_SIZE_BITS + CTE_SIZE_BITS); if (!pptr) { printf("Kernel init failing: could not create root cnode\n"); return cap_null_cap_new(); } memzero(CTE_PTR(pptr), 1U << (CONFIG_ROOT_CNODE_SIZE_BITS + CTE_SIZE_BITS)); cap = cap_cnode_cap_new( CONFIG_ROOT_CNODE_SIZE_BITS, /* radix */ wordBits - CONFIG_ROOT_CNODE_SIZE_BITS, /* guard size */ 0, /* guard */ pptr /* pptr */ ); /* write the root CNode cap into the root CNode */ write_slot(SLOT_PTR(pptr, BI_CAP_IT_CNODE), cap); return cap; }
exception_t decodeSetIPCBuffer(cap_t cap, unsigned int length, cte_t* slot, extra_caps_t extraCaps, word_t *buffer) { cptr_t cptr_bufferPtr; cap_t bufferCap; cte_t *bufferSlot; if (length < 1 || extraCaps.excaprefs[0] == NULL) { userError("TCB SetIPCBuffer: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } cptr_bufferPtr = getSyscallArg(0, buffer); bufferSlot = extraCaps.excaprefs[0]; bufferCap = extraCaps.excaprefs[0]->cap; if (cptr_bufferPtr == 0) { bufferSlot = NULL; } else { exception_t e; deriveCap_ret_t dc_ret; dc_ret = deriveCap(bufferSlot, bufferCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } bufferCap = dc_ret.cap; e = checkValidIPCBuffer(cptr_bufferPtr, bufferCap); if (e != EXCEPTION_NONE) { return e; } } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot, 0, 0, /* used to be prioInvalid, but it doesn't matter */ cap_null_cap_new(), NULL, cap_null_cap_new(), NULL, cptr_bufferPtr, bufferCap, bufferSlot, thread_control_update_ipc_buffer); }
cap_t CONST Arch_updateCapData(bool_t preserve, word_t data, cap_t cap) { switch (cap_get_capType(cap)) { #ifdef CONFIG_IOMMU case cap_io_space_cap: { io_space_capdata_t w = { { data } }; uint16_t PCIDevice = io_space_capdata_get_PCIDevice(w); uint16_t domainID = io_space_capdata_get_domainID(w); vtd_cte_t *vtd_context_table = (vtd_cte_t*)vtd_rte_get_ctp(ia32KSvtdRootTable[get_pci_bus(data)]); if (!preserve && cap_io_space_cap_get_capPCIDevice(cap) == 0 && vtd_rte_get_present(ia32KSvtdRootTable[get_pci_bus(data)]) && (!vtd_cte_get_present(vtd_context_table[PCIDevice & 0xff]) || vtd_cte_get_translation_type(vtd_context_table[PCIDevice & 0xff]) != 2) && domainID <= MASK(ia32KSnumIODomainIDBits)) { return cap_io_space_cap_new(domainID, PCIDevice); } else { return cap_null_cap_new(); } } #endif case cap_io_port_cap: { io_port_capdata_t w = { .words = { data } }; uint16_t firstPort = io_port_capdata_get_firstPort(w); uint16_t lastPort = io_port_capdata_get_lastPort(w); uint16_t capFirstPort = cap_io_port_cap_get_capIOPortFirstPort(cap); uint16_t capLastPort = cap_io_port_cap_get_capIOPortLastPort(cap); assert(capFirstPort <= capLastPort); /* Ensure input data is ordered correctly. */ if (firstPort > lastPort) { return cap_null_cap_new(); } /* Allow the update if the new cap has range no larger than the old * cap. */ if ((firstPort >= capFirstPort) && (lastPort <= capLastPort)) { return cap_io_port_cap_new(firstPort, lastPort); } else { return cap_null_cap_new(); } } default: return cap; } }
void ipcCancel(tcb_t *tptr) { thread_state_t *state = &tptr->tcbState; switch (thread_state_ptr_get_tsType(state)) { case ThreadState_BlockedOnSend: case ThreadState_BlockedOnReceive: { /* blockedIPCCancel state */ endpoint_t *epptr; tcb_queue_t queue; epptr = EP_PTR(thread_state_ptr_get_blockingIPCEndpoint(state)); /* Haskell error "blockedIPCCancel: endpoint must not be idle" */ assert(endpoint_ptr_get_state(epptr) != EPState_Idle); /* Dequeue TCB */ queue = ep_ptr_get_queue(epptr); queue = tcbEPDequeue(tptr, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } setThreadState(tptr, ThreadState_Inactive); break; } case ThreadState_BlockedOnAsyncEvent: asyncIPCCancel(tptr, AEP_PTR(thread_state_ptr_get_blockingIPCEndpoint(state))); break; case ThreadState_BlockedOnReply: { cte_t *slot, *callerCap; fault_null_fault_ptr_new(&tptr->tcbFault); /* Get the reply cap slot */ slot = TCB_PTR_CTE_PTR(tptr, tcbReply); callerCap = CTE_PTR(cap_reply_cap_get_capCallerSlot(slot->cap)); if (callerCap) { finaliseCap(callerCap->cap, true, true); callerCap->cap = cap_null_cap_new(); } cap_reply_cap_ptr_set_capCallerSlot(&slot->cap, CTE_REF(NULL)); break; } } }
void deleteCallerCap(tcb_t *receiver) { cte_t *callerSlot; callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller); if (cap_get_capType(callerSlot->cap) == cap_reply_cap) { finaliseCap(callerSlot->cap, true, true); callerSlot->cap = cap_null_cap_new(); } }
BOOT_CODE cap_t create_ipcbuf_frame(cap_t root_cnode_cap, cap_t pd_cap, vptr_t vptr) { cap_t cap; pptr_t pptr; /* allocate the IPC buffer frame */ pptr = alloc_region(PAGE_BITS); if (!pptr) { printf("Kernel init failing: could not create ipc buffer frame\n"); return cap_null_cap_new(); } clearMemory((void*)pptr, PAGE_BITS); /* create a cap of it and write it into the root CNode */ cap = create_mapped_it_frame_cap(pd_cap, pptr, vptr, false, false); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), cap); return cap; }
BOOT_CODE cap_t create_it_asid_pool(cap_t root_cnode_cap) { pptr_t ap_pptr; cap_t ap_cap; /* create ASID pool */ ap_pptr = alloc_region(ASID_POOL_SIZE_BITS); if (!ap_pptr) { printf("Kernel init failed: failed to create initial thread asid pool\n"); return cap_null_cap_new(); } memzero(ASID_POOL_PTR(ap_pptr), 1 << ASID_POOL_SIZE_BITS); ap_cap = cap_asid_pool_cap_new(IT_ASID >> asidLowBits, ap_pptr); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_ASID_POOL), ap_cap); /* create ASID control cap */ write_slot( SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_ASID_CTRL), cap_asid_control_cap_new() ); return ap_cap; }
/* Create an address space for the initial thread. * This includes page directory and page tables */ BOOT_CODE static cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg) { cap_t vspace_cap; vptr_t vptr; pptr_t pptr; slot_pos_t slot_pos_before; slot_pos_t slot_pos_after; slot_pos_before = ndks_boot.slot_pos_cur; if (PDPT_BITS == 0) { cap_t pd_cap; pptr_t pd_pptr; /* just create single PD obj and cap */ pd_pptr = alloc_region(PD_SIZE_BITS); if (!pd_pptr) { return cap_null_cap_new(); } memzero(PDE_PTR(pd_pptr), 1 << PD_SIZE_BITS); copyGlobalMappings(PDE_PTR(pd_pptr)); pd_cap = create_it_page_directory_cap(cap_null_cap_new(), pd_pptr, 0, IT_ASID); if (!provide_cap(root_cnode_cap, pd_cap)) { return cap_null_cap_new(); } write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pd_cap); vspace_cap = pd_cap; } else { cap_t pdpt_cap; pptr_t pdpt_pptr; unsigned int i; /* create a PDPT obj and cap */ pdpt_pptr = alloc_region(PDPT_SIZE_BITS); if (!pdpt_pptr) { return cap_null_cap_new(); } memzero(PDPTE_PTR(pdpt_pptr), 1 << PDPT_SIZE_BITS); pdpt_cap = cap_pdpt_cap_new( true, /* capPDPTISMapped */ IT_ASID, /* capPDPTMappedASID */ pdpt_pptr /* capPDPTBasePtr */ ); /* create all PD objs and caps necessary to cover userland image. For simplicity * to ensure we also cover the kernel window we create all PDs */ for (i = 0; i < BIT(PDPT_BITS); i++) { /* The compiler is under the mistaken belief here that this shift could be * undefined. However, in the case that it would be undefined this code path * is not reachable because PDPT_BITS == 0 (see if statement at the top of * this function), so to work around it we must both put in a redundant * if statement AND place the shift in a variable. While the variable * will get compiled away it prevents the compiler from evaluating * the 1 << 32 as a constant when it shouldn't * tl;dr gcc evaluates constants even if code is unreachable */ int shift = (PD_BITS + PT_BITS + PAGE_BITS); if (shift != 32) { vptr = i << shift; } else { return cap_null_cap_new(); } pptr = alloc_region(PD_SIZE_BITS); if (!pptr) { return cap_null_cap_new(); } memzero(PDE_PTR(pptr), 1 << PD_SIZE_BITS); if (!provide_cap(root_cnode_cap, create_it_page_directory_cap(pdpt_cap, pptr, vptr, IT_ASID)) ) { return cap_null_cap_new(); } } /* now that PDs exist we can copy the global mappings */ copyGlobalMappings(PDPTE_PTR(pdpt_pptr)); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pdpt_cap); vspace_cap = pdpt_cap; } slot_pos_after = ndks_boot.slot_pos_cur; ndks_boot.bi_frame->ui_pd_caps = (slot_region_t) { slot_pos_before, slot_pos_after }; /* create all PT objs and caps necessary to cover userland image */ slot_pos_before = ndks_boot.slot_pos_cur; for (vptr = ROUND_DOWN(it_v_reg.start, PT_BITS + PAGE_BITS); vptr < it_v_reg.end; vptr += BIT(PT_BITS + PAGE_BITS)) { pptr = alloc_region(PT_SIZE_BITS); if (!pptr) { return cap_null_cap_new(); } memzero(PTE_PTR(pptr), 1 << PT_SIZE_BITS); if (!provide_cap(root_cnode_cap, create_it_page_table_cap(vspace_cap, pptr, vptr, IT_ASID)) ) { return cap_null_cap_new(); } } slot_pos_after = ndks_boot.slot_pos_cur; ndks_boot.bi_frame->ui_pt_caps = (slot_region_t) { slot_pos_before, slot_pos_after }; return vspace_cap; }
deriveCap_ret_t Arch_deriveCap(cte_t* slot, cap_t cap) { deriveCap_ret_t ret; switch (cap_get_capType(cap)) { case cap_page_table_cap: if (cap_page_table_cap_get_capPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving an unmapped PT cap"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_page_directory_cap: if (cap_page_directory_cap_get_capPDIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a PD cap without an assigned ASID"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_frame_cap: #ifdef CONFIG_IOMMU cap = cap_frame_cap_set_capFIsIOSpace(cap, 0); #endif ret.cap = cap_frame_cap_set_capFMappedASID(cap, asidInvalid); ret.status = EXCEPTION_NONE; return ret; case cap_asid_control_cap: case cap_asid_pool_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; case cap_io_port_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; #ifdef CONFIG_IOMMU case cap_io_space_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; case cap_io_page_table_cap: if (cap_io_page_table_cap_get_capIOPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; #endif default: /* This assert has no equivalent in haskell, * as the options are restricted by type */ fail("Invalid arch cap type"); } }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; vspace_root_t *cap_pd; pde_t stored_hw_asid; dom_t dom; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = seL4_Fault_get_seL4_FaultType(NODE_STATE(ksCurThread)->tcbFault); /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != seL4_Fault_NullFault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (NODE_STATE(ksCurThread)->tcbBoundNotification && notification_ptr_get_state(NODE_STATE(ksCurThread)->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* ensure we are not single stepping the caller in ia32 */ #if defined(CONFIG_HARDWARE_DEBUG_API) && defined(CONFIG_ARCH_IA32) if (caller->tcbArch.tcbContext.breakpointState.single_step_enabled) { slowpath(SysReplyRecv); } #endif /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = seL4_Fault_get_seL4_FaultType(caller->tcbFault); if (unlikely(fault_type != seL4_Fault_NullFault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ cap_pd = cap_vtable_cap_get_vspace_root_fp(newVTable); /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysReplyRecv); } #ifdef CONFIG_ARCH_AARCH32 /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif #ifdef CONFIG_ARCH_X86_64 stored_hw_asid.words[0] = cap_pml4_cap_get_capPML4MappedASID(newVTable); #endif #ifdef CONFIG_ARCH_AARCH64 stored_hw_asid.words[0] = cap_page_global_directory_cap_get_capPGDMappedASID(newVTable); #endif #ifdef CONFIG_ARCH_RISCV stored_hw_asid.words[0] = cap_page_table_cap_get_capPTMappedASID(newVTable); #endif /* Ensure the original caller can be scheduled directly. */ dom = maxDom ? ksCurDomain : 0; if (unlikely(!isHighestPrio(dom, caller->tcbPriority))) { slowpath(SysReplyRecv); } #ifdef CONFIG_ARCH_AARCH32 /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } #ifdef ENABLE_SMP_SUPPORT /* Ensure both threads have the same affinity */ if (unlikely(NODE_STATE(ksCurThread)->tcbAffinity != caller->tcbAffinity)) { slowpath(SysReplyRecv); } #endif /* ENABLE_SMP_SUPPORT */ /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.is_fastpath = true; #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &NODE_STATE(ksCurThread)->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingIPCCanGrant(&NODE_STATE(ksCurThread)->tcbState, cap_endpoint_cap_get_capCanGrant(ep_cap));; /* Place the thread in the endpoint queue */ endpointTail = endpoint_ptr_get_epQueue_tail_fp(ep_ptr); if (likely(!endpointTail)) { NODE_STATE(ksCurThread)->tcbEPPrev = NULL; NODE_STATE(ksCurThread)->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(NODE_STATE(ksCurThread))); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(NODE_STATE(ksCurThread)), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = NODE_STATE(ksCurThread); NODE_STATE(ksCurThread)->tcbEPPrev = endpointTail; NODE_STATE(ksCurThread)->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(NODE_STATE(ksCurThread)), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs(length, NODE_STATE(ksCurThread), caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread)); }
exception_t decodeSetSpace(cap_t cap, unsigned int length, cte_t* slot, extra_caps_t extraCaps, word_t *buffer) { cptr_t faultEP; word_t cRootData, vRootData; cte_t *cRootSlot, *vRootSlot; cap_t cRootCap, vRootCap; deriveCap_ret_t dc_ret; if (length < 3 || extraCaps.excaprefs[0] == NULL || extraCaps.excaprefs[1] == NULL) { userError("TCB SetSpace: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } faultEP = getSyscallArg(0, buffer); cRootData = getSyscallArg(1, buffer); vRootData = getSyscallArg(2, buffer); cRootSlot = extraCaps.excaprefs[0]; cRootCap = extraCaps.excaprefs[0]->cap; vRootSlot = extraCaps.excaprefs[1]; vRootCap = extraCaps.excaprefs[1]->cap; if (slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) || slotCapLongRunningDelete( TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) { userError("TCB SetSpace: CSpace or VSpace currently being deleted."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (cRootData != 0) { cRootCap = updateCapData(false, cRootData, cRootCap); } dc_ret = deriveCap(cRootSlot, cRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } cRootCap = dc_ret.cap; if (cap_get_capType(cRootCap) != cap_cnode_cap && (!config_set(CONFIG_ALLOW_NULL_CSPACE) || cap_get_capType(cRootCap) != cap_null_cap)) { userError("TCB SetSpace: Invalid CNode cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } if (vRootData != 0) { vRootCap = updateCapData(false, vRootData, vRootCap); } dc_ret = deriveCap(vRootSlot, vRootCap); if (dc_ret.status != EXCEPTION_NONE) { return dc_ret.status; } vRootCap = dc_ret.cap; if (!isValidVTableRoot(vRootCap)) { userError("TCB SetSpace: Invalid VSpace cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(ksCurThread, ThreadState_Restart); return invokeTCB_ThreadControl( TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot, faultEP, 0, /* used to be prioInvalid, but it doesn't matter */ cRootCap, cRootSlot, vRootCap, vRootSlot, 0, cap_null_cap_new(), NULL, thread_control_update_space); }
deriveCap_ret_t Arch_deriveCap(cte_t *slot, cap_t cap) { deriveCap_ret_t ret; switch (cap_get_capType(cap)) { case cap_page_global_directory_cap: if (cap_page_global_directory_cap_get_capPGDIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a PDG cap without an assigned ASID"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_page_upper_directory_cap: if (cap_page_upper_directory_cap_get_capPUDIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a PUD cap without an assigned ASID"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_page_directory_cap: if (cap_page_directory_cap_get_capPDIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a PD cap without an assigned ASID"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_page_table_cap: if (cap_page_table_cap_get_capPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a PT cap without an assigned ASID"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_frame_cap: ret.cap = cap_frame_cap_set_capFMappedASID(cap, asidInvalid); ret.status = EXCEPTION_NONE; return ret; case cap_asid_control_cap: case cap_asid_pool_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT case cap_vcpu_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; #endif default: /* This assert has no equivalent in haskell, * as the options are restricted by type */ fail("Invalid arch cap"); } }
deriveCap_ret_t Arch_deriveCap(cte_t *slot, cap_t cap) { deriveCap_ret_t ret; switch (cap_get_capType(cap)) { case cap_page_table_cap: if (cap_page_table_cap_get_capPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving an unmapped PT cap"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_page_directory_cap: if (cap_page_directory_cap_get_capPDIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a PD cap without an assigned ASID"); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_asid_control_cap: case cap_asid_pool_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; case cap_io_port_control_cap: ret.status = EXCEPTION_NONE; ret.cap = cap_null_cap_new(); return ret; case cap_io_port_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; #ifdef CONFIG_IOMMU case cap_io_space_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; case cap_io_page_table_cap: if (cap_io_page_table_cap_get_capIOPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; #endif #ifdef CONFIG_VTX case cap_vcpu_cap: ret.cap = cap; ret.status = EXCEPTION_NONE; return ret; case cap_ept_pml4_cap: if (cap_ept_pml4_cap_get_capPML4IsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving a EPT PML4 cap without an assigned ASID."); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_ept_pdpt_cap: if (cap_ept_pdpt_cap_get_capPDPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving an unmapped EPT PDPT cap."); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_ept_pd_cap: if (cap_ept_pd_cap_get_capPDIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving an unmapped EPT PD cap."); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; case cap_ept_pt_cap: if (cap_ept_pt_cap_get_capPTIsMapped(cap)) { ret.cap = cap; ret.status = EXCEPTION_NONE; } else { userError("Deriving an unmapped EPT PT cap."); current_syscall_error.type = seL4_IllegalOperation; ret.cap = cap_null_cap_new(); ret.status = EXCEPTION_SYSCALL_ERROR; } return ret; #endif default: return Mode_deriveCap(slot, cap); } }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysReplyRecv; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (ksCurThread->tcbBoundNotification && notification_ptr_get_state(ksCurThread->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = fault_get_faultType(caller->tcbFault); if (unlikely(fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp (newVTable))) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the original caller can be scheduled directly. */ if (unlikely(caller->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &ksCurThread->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); /* Place the thread in the endpoint queue */ endpointTail = TCB_PTR(endpoint_ptr_get_epQueue_tail(ep_ptr)); if (likely(!endpointTail)) { ksCurThread->tcbEPPrev = NULL; ksCurThread->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(ksCurThread)); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = ksCurThread; ksCurThread->tcbEPPrev = endpointTail; ksCurThread->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs (length, ksCurThread, caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }