void #ifdef ARCH_X86 NORETURN #endif fastpath_call(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; tcb_t *dest; word_t badge; cte_t *replySlot, *callerSlot; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; word_t fault_type; /* Get message info, length, and fault type. */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysCall; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysCall); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanSend(ep_cap))) { slowpath(SysCall); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Get the destination thread, which is only going to be valid * if the endpoint is valid. */ dest = TCB_PTR(endpoint_ptr_get_epQueue_head(ep_ptr)); /* Check that there's a thread waiting to receive */ if (unlikely(endpoint_ptr_get_state(ep_ptr) != EPState_Recv)) { slowpath(SysCall); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(dest, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid VTable. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysCall); } #ifdef ARCH_ARM /* Get HW ASID */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the destination has a higher/equal priority to us. */ if (unlikely(dest->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysCall); } /* Ensure that the endpoint has has grant rights so that we can * create the reply cap */ if (unlikely(!cap_endpoint_cap_get_capCanGrant(ep_cap))) { slowpath(SysCall); } #ifdef ARCH_ARM if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysCall); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(dest->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysCall); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Dequeue the destination. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(dest->tcbEPNext)); if (unlikely(dest->tcbEPNext)) { dest->tcbEPNext->tcbEPPrev = NULL; } else { endpoint_ptr_mset_epQueue_tail_state(ep_ptr, 0, EPState_Idle); } badge = cap_endpoint_cap_get_capEPBadge(ep_cap); /* Block sender */ thread_state_ptr_set_tsType_np(&ksCurThread->tcbState, ThreadState_BlockedOnReply); /* Get sender reply slot */ replySlot = TCB_PTR_CTE_PTR(ksCurThread, tcbReply); /* Get dest caller slot */ callerSlot = TCB_PTR_CTE_PTR(dest, tcbCaller); /* Insert reply cap */ cap_reply_cap_ptr_new_np(&callerSlot->cap, 0, TCB_REF(ksCurThread)); mdb_node_ptr_set_mdbPrev_np(&callerSlot->cteMDBNode, CTE_REF(replySlot)); mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &replySlot->cteMDBNode, CTE_REF(callerSlot), 1, 1); fastpath_copy_mrs (length, ksCurThread, dest); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&dest->tcbState, ThreadState_Running); switchToThread_fp(dest, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysReplyRecv; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (ksCurThread->tcbBoundNotification && notification_ptr_get_state(ksCurThread->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = fault_get_faultType(caller->tcbFault); if (unlikely(fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp (newVTable))) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the original caller can be scheduled directly. */ if (unlikely(caller->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &ksCurThread->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); /* Place the thread in the endpoint queue */ endpointTail = TCB_PTR(endpoint_ptr_get_epQueue_tail(ep_ptr)); if (likely(!endpointTail)) { ksCurThread->tcbEPPrev = NULL; ksCurThread->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(ksCurThread)); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = ksCurThread; ksCurThread->tcbEPPrev = endpointTail; ksCurThread->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs (length, ksCurThread, caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }
/* Create an address space for the initial thread. * This includes page directory and page tables */ BOOT_CODE static cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg) { cap_t vspace_cap; vptr_t vptr; pptr_t pptr; slot_pos_t slot_pos_before; slot_pos_t slot_pos_after; slot_pos_before = ndks_boot.slot_pos_cur; if (PDPT_BITS == 0) { cap_t pd_cap; pptr_t pd_pptr; /* just create single PD obj and cap */ pd_pptr = alloc_region(PD_SIZE_BITS); if (!pd_pptr) { return cap_null_cap_new(); } memzero(PDE_PTR(pd_pptr), 1 << PD_SIZE_BITS); copyGlobalMappings(PDE_PTR(pd_pptr)); pd_cap = create_it_page_directory_cap(cap_null_cap_new(), pd_pptr, 0, IT_ASID); if (!provide_cap(root_cnode_cap, pd_cap)) { return cap_null_cap_new(); } write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pd_cap); vspace_cap = pd_cap; } else { cap_t pdpt_cap; pptr_t pdpt_pptr; unsigned int i; /* create a PDPT obj and cap */ pdpt_pptr = alloc_region(PDPT_SIZE_BITS); if (!pdpt_pptr) { return cap_null_cap_new(); } memzero(PDPTE_PTR(pdpt_pptr), 1 << PDPT_SIZE_BITS); pdpt_cap = cap_pdpt_cap_new( true, /* capPDPTISMapped */ IT_ASID, /* capPDPTMappedASID */ pdpt_pptr /* capPDPTBasePtr */ ); /* create all PD objs and caps necessary to cover userland image. For simplicity * to ensure we also cover the kernel window we create all PDs */ for (i = 0; i < BIT(PDPT_BITS); i++) { /* The compiler is under the mistaken belief here that this shift could be * undefined. However, in the case that it would be undefined this code path * is not reachable because PDPT_BITS == 0 (see if statement at the top of * this function), so to work around it we must both put in a redundant * if statement AND place the shift in a variable. While the variable * will get compiled away it prevents the compiler from evaluating * the 1 << 32 as a constant when it shouldn't * tl;dr gcc evaluates constants even if code is unreachable */ int shift = (PD_BITS + PT_BITS + PAGE_BITS); if (shift != 32) { vptr = i << shift; } else { return cap_null_cap_new(); } pptr = alloc_region(PD_SIZE_BITS); if (!pptr) { return cap_null_cap_new(); } memzero(PDE_PTR(pptr), 1 << PD_SIZE_BITS); if (!provide_cap(root_cnode_cap, create_it_page_directory_cap(pdpt_cap, pptr, vptr, IT_ASID)) ) { return cap_null_cap_new(); } } /* now that PDs exist we can copy the global mappings */ copyGlobalMappings(PDPTE_PTR(pdpt_pptr)); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pdpt_cap); vspace_cap = pdpt_cap; } slot_pos_after = ndks_boot.slot_pos_cur; ndks_boot.bi_frame->ui_pd_caps = (slot_region_t) { slot_pos_before, slot_pos_after }; /* create all PT objs and caps necessary to cover userland image */ slot_pos_before = ndks_boot.slot_pos_cur; for (vptr = ROUND_DOWN(it_v_reg.start, PT_BITS + PAGE_BITS); vptr < it_v_reg.end; vptr += BIT(PT_BITS + PAGE_BITS)) { pptr = alloc_region(PT_SIZE_BITS); if (!pptr) { return cap_null_cap_new(); } memzero(PTE_PTR(pptr), 1 << PT_SIZE_BITS); if (!provide_cap(root_cnode_cap, create_it_page_table_cap(vspace_cap, pptr, vptr, IT_ASID)) ) { return cap_null_cap_new(); } } slot_pos_after = ndks_boot.slot_pos_cur; ndks_boot.bi_frame->ui_pt_caps = (slot_region_t) { slot_pos_before, slot_pos_after }; return vspace_cap; }