void epCancelAll(endpoint_t *epptr) { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: break; default: { tcb_t *thread = TCB_PTR(endpoint_ptr_get_epQueue_head(epptr)); /* Make endpoint idle */ endpoint_ptr_set_state(epptr, EPState_Idle); endpoint_ptr_set_epQueue_head(epptr, 0); endpoint_ptr_set_epQueue_tail(epptr, 0); /* Set all blocked threads to restart */ for (; thread; thread = thread->tcbEPNext) { setThreadState (thread, ThreadState_Restart); tcbSchedEnqueue(thread); } rescheduleRequired(); break; } } }
void cancelIPC(tcb_t *tptr) { thread_state_t *state = &tptr->tcbState; switch (thread_state_ptr_get_tsType(state)) { case ThreadState_BlockedOnSend: case ThreadState_BlockedOnReceive: { /* blockedIPCCancel state */ endpoint_t *epptr; tcb_queue_t queue; epptr = EP_PTR(thread_state_ptr_get_blockingObject(state)); /* Haskell error "blockedIPCCancel: endpoint must not be idle" */ assert(endpoint_ptr_get_state(epptr) != EPState_Idle); /* Dequeue TCB */ queue = ep_ptr_get_queue(epptr); queue = tcbEPDequeue(tptr, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } setThreadState(tptr, ThreadState_Inactive); break; } case ThreadState_BlockedOnNotification: cancelSignal(tptr, NTFN_PTR(thread_state_ptr_get_blockingObject(state))); break; case ThreadState_BlockedOnReply: { cte_t *slot, *callerCap; tptr->tcbFault = seL4_Fault_NullFault_new(); /* Get the reply cap slot */ slot = TCB_PTR_CTE_PTR(tptr, tcbReply); callerCap = CTE_PTR(mdb_node_get_mdbNext(slot->cteMDBNode)); if (callerCap) { /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */ cteDeleteOne(callerCap); } break; } } }
void ipcCancel(tcb_t *tptr) { thread_state_t *state = &tptr->tcbState; switch (thread_state_ptr_get_tsType(state)) { case ThreadState_BlockedOnSend: case ThreadState_BlockedOnReceive: { /* blockedIPCCancel state */ endpoint_t *epptr; tcb_queue_t queue; epptr = EP_PTR(thread_state_ptr_get_blockingIPCEndpoint(state)); /* Haskell error "blockedIPCCancel: endpoint must not be idle" */ assert(endpoint_ptr_get_state(epptr) != EPState_Idle); /* Dequeue TCB */ queue = ep_ptr_get_queue(epptr); queue = tcbEPDequeue(tptr, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } setThreadState(tptr, ThreadState_Inactive); break; } case ThreadState_BlockedOnAsyncEvent: asyncIPCCancel(tptr, AEP_PTR(thread_state_ptr_get_blockingIPCEndpoint(state))); break; case ThreadState_BlockedOnReply: { cte_t *slot, *callerCap; fault_null_fault_ptr_new(&tptr->tcbFault); /* Get the reply cap slot */ slot = TCB_PTR_CTE_PTR(tptr, tcbReply); callerCap = CTE_PTR(cap_reply_cap_get_capCallerSlot(slot->cap)); if (callerCap) { finaliseCap(callerCap->cap, true, true); callerCap->cap = cap_null_cap_new(); } cap_reply_cap_ptr_set_capCallerSlot(&slot->cap, CTE_REF(NULL)); break; } } }
void epCancelBadgedSends(endpoint_t *epptr, word_t badge) { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: case EPState_Recv: break; case EPState_Send: { tcb_t *thread, *next; tcb_queue_t queue = ep_ptr_get_queue(epptr); /* this is a de-optimisation for verification * reasons. it allows the contents of the endpoint * queue to be ignored during the for loop. */ endpoint_ptr_set_state(epptr, EPState_Idle); endpoint_ptr_set_epQueue_head(epptr, 0); endpoint_ptr_set_epQueue_tail(epptr, 0); for (thread = queue.head; thread; thread = next) { word_t b = thread_state_ptr_get_blockingIPCBadge( &thread->tcbState); next = thread->tcbEPNext; if (b == badge) { setThreadState(thread, ThreadState_Restart); tcbSchedEnqueue(thread); queue = tcbEPDequeue(thread, queue); } } ep_ptr_set_queue(epptr, queue); if (queue.head) { endpoint_ptr_set_state(epptr, EPState_Send); } rescheduleRequired(); break; } default: fail("invalid EP state"); } }
void sendIPC(bool_t blocking, bool_t do_call, word_t badge, bool_t canGrant, tcb_t *thread, endpoint_t *epptr) { //printf("\n===in sendIPC funtion==\n"); switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: //printf("in case idle\n"); case EPState_Send: //printf("in case send\n"); if (blocking) { tcb_queue_t queue; /* Set thread state to BlockedOnSend */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnSend); thread_state_ptr_set_blockingIPCEndpoint( &thread->tcbState, EP_REF(epptr)); thread_state_ptr_set_blockingIPCBadge( &thread->tcbState, badge); thread_state_ptr_set_blockingIPCCanGrant( &thread->tcbState, canGrant); thread_state_ptr_set_blockingIPCIsCall( &thread->tcbState, do_call); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Send); ep_ptr_set_queue(epptr, queue); } break; case EPState_Recv: { tcb_queue_t queue; tcb_t *dest; bool_t diminish; //printf("in case recv\n"); /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); dest = queue.head; /* Haskell error "Receive endpoint queue must not be empty" */ assert(dest); /* Dequeue the first TCB */ queue = tcbEPDequeue(dest, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Do the transfer */ diminish = thread_state_get_blockingIPCDiminishCaps(dest->tcbState); doIPCTransfer(thread, epptr, badge, canGrant, dest, diminish); setThreadState(dest, ThreadState_Running); attemptSwitchTo(dest); //printf("the dest thread's prio is %d\n", dest->tcbPriority); if (do_call || fault_ptr_get_faultType(&thread->tcbFault) != fault_null_fault) { if (canGrant && !diminish) { setupCallerCap(thread, dest); } else { setThreadState(thread, ThreadState_Inactive); } } break; } } }
void receiveIPC(tcb_t *thread, cap_t cap) { endpoint_t *epptr; bool_t diminish; async_endpoint_t *aepptr; /* Haskell error "receiveIPC: invalid cap" */ assert(cap_get_capType(cap) == cap_endpoint_cap); //printf("\n;;;;;;;;;In function receiveIPC;;;;;;;\n"); epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)); diminish = !cap_endpoint_cap_get_capCanSend(cap); /* Check for anything waiting in the async endpoint*/ aepptr = thread->boundAsyncEndpoint; if (aepptr && async_endpoint_ptr_get_state(aepptr) == AEPState_Active) { completeAsyncIPC(aepptr, thread); } else { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: //printf("in case idle\n"); case EPState_Recv: { tcb_queue_t queue; //printf("in case recv\n"); /* Set thread state to BlockedOnReceive */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingIPCEndpoint( &thread->tcbState, EP_REF(epptr)); thread_state_ptr_set_blockingIPCDiminishCaps( &thread->tcbState, diminish); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Recv); ep_ptr_set_queue(epptr, queue); break; } case EPState_Send: { tcb_queue_t queue; tcb_t *sender; word_t badge; bool_t canGrant; bool_t do_call; //printf("in case send\n"); /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); sender = queue.head; /* Haskell error "Send endpoint queue must not be empty" */ assert(sender); /* Dequeue the first TCB */ queue = tcbEPDequeue(sender, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Get sender IPC details */ badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState); canGrant = thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState); /* Do the transfer */ doIPCTransfer(sender, epptr, badge, canGrant, thread, diminish); do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState); if (do_call || fault_get_faultType(sender->tcbFault) != fault_null_fault) { if (canGrant && !diminish) { setupCallerCap(sender, thread); } else { setThreadState(sender, ThreadState_Inactive); } } else { setThreadState(sender, ThreadState_Running); switchIfRequiredTo(sender); } break; } } } }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; vspace_root_t *cap_pd; pde_t stored_hw_asid; dom_t dom; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = seL4_Fault_get_seL4_FaultType(NODE_STATE(ksCurThread)->tcbFault); /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != seL4_Fault_NullFault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (NODE_STATE(ksCurThread)->tcbBoundNotification && notification_ptr_get_state(NODE_STATE(ksCurThread)->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* ensure we are not single stepping the caller in ia32 */ #if defined(CONFIG_HARDWARE_DEBUG_API) && defined(CONFIG_ARCH_IA32) if (caller->tcbArch.tcbContext.breakpointState.single_step_enabled) { slowpath(SysReplyRecv); } #endif /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = seL4_Fault_get_seL4_FaultType(caller->tcbFault); if (unlikely(fault_type != seL4_Fault_NullFault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ cap_pd = cap_vtable_cap_get_vspace_root_fp(newVTable); /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysReplyRecv); } #ifdef CONFIG_ARCH_AARCH32 /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif #ifdef CONFIG_ARCH_X86_64 stored_hw_asid.words[0] = cap_pml4_cap_get_capPML4MappedASID(newVTable); #endif #ifdef CONFIG_ARCH_AARCH64 stored_hw_asid.words[0] = cap_page_global_directory_cap_get_capPGDMappedASID(newVTable); #endif #ifdef CONFIG_ARCH_RISCV stored_hw_asid.words[0] = cap_page_table_cap_get_capPTMappedASID(newVTable); #endif /* Ensure the original caller can be scheduled directly. */ dom = maxDom ? ksCurDomain : 0; if (unlikely(!isHighestPrio(dom, caller->tcbPriority))) { slowpath(SysReplyRecv); } #ifdef CONFIG_ARCH_AARCH32 /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } #ifdef ENABLE_SMP_SUPPORT /* Ensure both threads have the same affinity */ if (unlikely(NODE_STATE(ksCurThread)->tcbAffinity != caller->tcbAffinity)) { slowpath(SysReplyRecv); } #endif /* ENABLE_SMP_SUPPORT */ /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.is_fastpath = true; #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &NODE_STATE(ksCurThread)->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingIPCCanGrant(&NODE_STATE(ksCurThread)->tcbState, cap_endpoint_cap_get_capCanGrant(ep_cap));; /* Place the thread in the endpoint queue */ endpointTail = endpoint_ptr_get_epQueue_tail_fp(ep_ptr); if (likely(!endpointTail)) { NODE_STATE(ksCurThread)->tcbEPPrev = NULL; NODE_STATE(ksCurThread)->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(NODE_STATE(ksCurThread))); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(NODE_STATE(ksCurThread)), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = NODE_STATE(ksCurThread); NODE_STATE(ksCurThread)->tcbEPPrev = endpointTail; NODE_STATE(ksCurThread)->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(NODE_STATE(ksCurThread)), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs(length, NODE_STATE(ksCurThread), caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread)); }
void #ifdef ARCH_X86 NORETURN #endif fastpath_call(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; tcb_t *dest; word_t badge; cte_t *replySlot, *callerSlot; cap_t newVTable; vspace_root_t *cap_pd; pde_t stored_hw_asid; word_t fault_type; dom_t dom; word_t replyCanGrant; /* Get message info, length, and fault type. */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = seL4_Fault_get_seL4_FaultType(NODE_STATE(ksCurThread)->tcbFault); /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != seL4_Fault_NullFault)) { slowpath(SysCall); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanSend(ep_cap))) { slowpath(SysCall); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Get the destination thread, which is only going to be valid * if the endpoint is valid. */ dest = TCB_PTR(endpoint_ptr_get_epQueue_head(ep_ptr)); /* Check that there's a thread waiting to receive */ if (unlikely(endpoint_ptr_get_state(ep_ptr) != EPState_Recv)) { slowpath(SysCall); } /* ensure we are not single stepping the destination in ia32 */ #if defined(CONFIG_HARDWARE_DEBUG_API) && defined(CONFIG_ARCH_IA32) if (dest->tcbArch.tcbContext.breakpointState.single_step_enabled) { slowpath(SysCall); } #endif /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(dest, tcbVTable)->cap; /* Get vspace root. */ cap_pd = cap_vtable_cap_get_vspace_root_fp(newVTable); /* Ensure that the destination has a valid VTable. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysCall); } #ifdef CONFIG_ARCH_AARCH32 /* Get HW ASID */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif #ifdef CONFIG_ARCH_X86_64 /* borrow the stored_hw_asid for PCID */ stored_hw_asid.words[0] = cap_pml4_cap_get_capPML4MappedASID_fp(newVTable); #endif #ifdef CONFIG_ARCH_AARCH64 stored_hw_asid.words[0] = cap_page_global_directory_cap_get_capPGDMappedASID(newVTable); #endif #ifdef CONFIG_ARCH_RISCV /* Get HW ASID */ stored_hw_asid.words[0] = cap_page_table_cap_get_capPTMappedASID(newVTable); #endif /* let gcc optimise this out for 1 domain */ dom = maxDom ? ksCurDomain : 0; /* ensure only the idle thread or lower prio threads are present in the scheduler */ if (likely(dest->tcbPriority < NODE_STATE(ksCurThread->tcbPriority)) && !isHighestPrio(dom, dest->tcbPriority)) { slowpath(SysCall); } /* Ensure that the endpoint has has grant or grant-reply rights so that we can * create the reply cap */ if (unlikely(!cap_endpoint_cap_get_capCanGrant(ep_cap) && !cap_endpoint_cap_get_capCanGrantReply(ep_cap))) { slowpath(SysCall); } #ifdef CONFIG_ARCH_AARCH32 if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysCall); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(dest->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysCall); } #ifdef ENABLE_SMP_SUPPORT /* Ensure both threads have the same affinity */ if (unlikely(NODE_STATE(ksCurThread)->tcbAffinity != dest->tcbAffinity)) { slowpath(SysCall); } #endif /* ENABLE_SMP_SUPPORT */ /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.is_fastpath = true; #endif /* Dequeue the destination. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(dest->tcbEPNext)); if (unlikely(dest->tcbEPNext)) { dest->tcbEPNext->tcbEPPrev = NULL; } else { endpoint_ptr_mset_epQueue_tail_state(ep_ptr, 0, EPState_Idle); } badge = cap_endpoint_cap_get_capEPBadge(ep_cap); /* Block sender */ thread_state_ptr_set_tsType_np(&NODE_STATE(ksCurThread)->tcbState, ThreadState_BlockedOnReply); /* Get sender reply slot */ replySlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbReply); /* Get dest caller slot */ callerSlot = TCB_PTR_CTE_PTR(dest, tcbCaller); /* Insert reply cap */ replyCanGrant = thread_state_ptr_get_blockingIPCCanGrant(&dest->tcbState);; cap_reply_cap_ptr_new_np(&callerSlot->cap, replyCanGrant, 0, TCB_REF(NODE_STATE(ksCurThread))); mdb_node_ptr_set_mdbPrev_np(&callerSlot->cteMDBNode, CTE_REF(replySlot)); mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &replySlot->cteMDBNode, CTE_REF(callerSlot), 1, 1); fastpath_copy_mrs(length, NODE_STATE(ksCurThread), dest); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&dest->tcbState, ThreadState_Running); switchToThread_fp(dest, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread)); }
void sendIPC(bool_t blocking, bool_t do_call, word_t badge, bool_t canGrant, tcb_t *thread, endpoint_t *epptr) { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: case EPState_Send: if (blocking) { tcb_queue_t queue; /* Set thread state to BlockedOnSend */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnSend); thread_state_ptr_set_blockingObject( &thread->tcbState, EP_REF(epptr)); thread_state_ptr_set_blockingIPCBadge( &thread->tcbState, badge); thread_state_ptr_set_blockingIPCCanGrant( &thread->tcbState, canGrant); thread_state_ptr_set_blockingIPCIsCall( &thread->tcbState, do_call); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Send); ep_ptr_set_queue(epptr, queue); } break; case EPState_Recv: { tcb_queue_t queue; tcb_t *dest; /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); dest = queue.head; /* Haskell error "Receive endpoint queue must not be empty" */ assert(dest); /* Dequeue the first TCB */ queue = tcbEPDequeue(dest, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Do the transfer */ doIPCTransfer(thread, epptr, badge, canGrant, dest); setThreadState(dest, ThreadState_Running); attemptSwitchTo(dest); if (do_call || seL4_Fault_ptr_get_seL4_FaultType(&thread->tcbFault) != seL4_Fault_NullFault) { if (canGrant) { setupCallerCap(thread, dest); } else { setThreadState(thread, ThreadState_Inactive); } } break; } } }
void receiveIPC(tcb_t *thread, cap_t cap, bool_t isBlocking) { endpoint_t *epptr; notification_t *ntfnPtr; /* Haskell error "receiveIPC: invalid cap" */ assert(cap_get_capType(cap) == cap_endpoint_cap); epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)); /* Check for anything waiting in the notification */ ntfnPtr = thread->tcbBoundNotification; if (ntfnPtr && notification_ptr_get_state(ntfnPtr) == NtfnState_Active) { completeSignal(ntfnPtr, thread); } else { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: case EPState_Recv: { tcb_queue_t queue; if (isBlocking) { /* Set thread state to BlockedOnReceive */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingObject( &thread->tcbState, EP_REF(epptr)); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Recv); ep_ptr_set_queue(epptr, queue); } else { doNBRecvFailedTransfer(thread); } break; } case EPState_Send: { tcb_queue_t queue; tcb_t *sender; word_t badge; bool_t canGrant; bool_t do_call; /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); sender = queue.head; /* Haskell error "Send endpoint queue must not be empty" */ assert(sender); /* Dequeue the first TCB */ queue = tcbEPDequeue(sender, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Get sender IPC details */ badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState); canGrant = thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState); /* Do the transfer */ doIPCTransfer(sender, epptr, badge, canGrant, thread); do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState); if (do_call || seL4_Fault_get_seL4_FaultType(sender->tcbFault) != seL4_Fault_NullFault) { if (canGrant) { setupCallerCap(sender, thread); } else { setThreadState(sender, ThreadState_Inactive); } } else { setThreadState(sender, ThreadState_Running); switchIfRequiredTo(sender); } break; } } } }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysReplyRecv; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (ksCurThread->tcbBoundNotification && notification_ptr_get_state(ksCurThread->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = fault_get_faultType(caller->tcbFault); if (unlikely(fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp (newVTable))) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the original caller can be scheduled directly. */ if (unlikely(caller->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &ksCurThread->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); /* Place the thread in the endpoint queue */ endpointTail = TCB_PTR(endpoint_ptr_get_epQueue_tail(ep_ptr)); if (likely(!endpointTail)) { ksCurThread->tcbEPPrev = NULL; ksCurThread->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(ksCurThread)); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = ksCurThread; ksCurThread->tcbEPPrev = endpointTail; ksCurThread->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs (length, ksCurThread, caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }
void #ifdef ARCH_X86 NORETURN #endif fastpath_call(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; tcb_t *dest; word_t badge; cte_t *replySlot, *callerSlot; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; word_t fault_type; /* Get message info, length, and fault type. */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysCall; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysCall); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanSend(ep_cap))) { slowpath(SysCall); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Get the destination thread, which is only going to be valid * if the endpoint is valid. */ dest = TCB_PTR(endpoint_ptr_get_epQueue_head(ep_ptr)); /* Check that there's a thread waiting to receive */ if (unlikely(endpoint_ptr_get_state(ep_ptr) != EPState_Recv)) { slowpath(SysCall); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(dest, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid VTable. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysCall); } #ifdef ARCH_ARM /* Get HW ASID */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the destination has a higher/equal priority to us. */ if (unlikely(dest->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysCall); } /* Ensure that the endpoint has has grant rights so that we can * create the reply cap */ if (unlikely(!cap_endpoint_cap_get_capCanGrant(ep_cap))) { slowpath(SysCall); } #ifdef ARCH_ARM if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysCall); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(dest->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysCall); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Dequeue the destination. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(dest->tcbEPNext)); if (unlikely(dest->tcbEPNext)) { dest->tcbEPNext->tcbEPPrev = NULL; } else { endpoint_ptr_mset_epQueue_tail_state(ep_ptr, 0, EPState_Idle); } badge = cap_endpoint_cap_get_capEPBadge(ep_cap); /* Block sender */ thread_state_ptr_set_tsType_np(&ksCurThread->tcbState, ThreadState_BlockedOnReply); /* Get sender reply slot */ replySlot = TCB_PTR_CTE_PTR(ksCurThread, tcbReply); /* Get dest caller slot */ callerSlot = TCB_PTR_CTE_PTR(dest, tcbCaller); /* Insert reply cap */ cap_reply_cap_ptr_new_np(&callerSlot->cap, 0, TCB_REF(ksCurThread)); mdb_node_ptr_set_mdbPrev_np(&callerSlot->cteMDBNode, CTE_REF(replySlot)); mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &replySlot->cteMDBNode, CTE_REF(callerSlot), 1, 1); fastpath_copy_mrs (length, ksCurThread, dest); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&dest->tcbState, ThreadState_Running); switchToThread_fp(dest, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }