void sendSignal(notification_t *ntfnPtr, word_t badge) { switch (notification_ptr_get_state(ntfnPtr)) { case NtfnState_Idle: { tcb_t *tcb = (tcb_t*)notification_ptr_get_ntfnBoundTCB(ntfnPtr); /* Check if we are bound and that thread is waiting for a message */ if (tcb) { if (thread_state_ptr_get_tsType(&tcb->tcbState) == ThreadState_BlockedOnReceive) { /* Send and start thread running */ cancelIPC(tcb); setThreadState(tcb, ThreadState_Running); setRegister(tcb, badgeRegister, badge); switchIfRequiredTo(tcb); } else { ntfn_set_active(ntfnPtr, badge); } } else { ntfn_set_active(ntfnPtr, badge); } break; } case NtfnState_Waiting: { tcb_queue_t ntfn_queue; tcb_t *dest; ntfn_queue = ntfn_ptr_get_queue(ntfnPtr); dest = ntfn_queue.head; /* Haskell error "WaitingNtfn Notification must have non-empty queue" */ assert(dest); /* Dequeue TCB */ ntfn_queue = tcbEPDequeue(dest, ntfn_queue); ntfn_ptr_set_queue(ntfnPtr, ntfn_queue); /* set the thread state to idle if the queue is empty */ if (!ntfn_queue.head) { notification_ptr_set_state(ntfnPtr, NtfnState_Idle); } setThreadState(dest, ThreadState_Running); setRegister(dest, badgeRegister, badge); switchIfRequiredTo(dest); break; } case NtfnState_Active: { word_t badge2; badge2 = notification_ptr_get_ntfnMsgIdentifier(ntfnPtr); badge2 |= badge; notification_ptr_set_ntfnMsgIdentifier(ntfnPtr, badge2); break; } } }
void ipcCancel(tcb_t *tptr) { thread_state_t *state = &tptr->tcbState; switch (thread_state_ptr_get_tsType(state)) { case ThreadState_BlockedOnSend: case ThreadState_BlockedOnReceive: { /* blockedIPCCancel state */ endpoint_t *epptr; tcb_queue_t queue; epptr = EP_PTR(thread_state_ptr_get_blockingIPCEndpoint(state)); /* Haskell error "blockedIPCCancel: endpoint must not be idle" */ assert(endpoint_ptr_get_state(epptr) != EPState_Idle); /* Dequeue TCB */ queue = ep_ptr_get_queue(epptr); queue = tcbEPDequeue(tptr, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } setThreadState(tptr, ThreadState_Inactive); break; } case ThreadState_BlockedOnAsyncEvent: asyncIPCCancel(tptr, AEP_PTR(thread_state_ptr_get_blockingIPCEndpoint(state))); break; case ThreadState_BlockedOnReply: { cte_t *slot, *callerCap; fault_null_fault_ptr_new(&tptr->tcbFault); /* Get the reply cap slot */ slot = TCB_PTR_CTE_PTR(tptr, tcbReply); callerCap = CTE_PTR(cap_reply_cap_get_capCallerSlot(slot->cap)); if (callerCap) { finaliseCap(callerCap->cap, true, true); callerCap->cap = cap_null_cap_new(); } cap_reply_cap_ptr_set_capCallerSlot(&slot->cap, CTE_REF(NULL)); break; } } }
void cancelIPC(tcb_t *tptr) { thread_state_t *state = &tptr->tcbState; switch (thread_state_ptr_get_tsType(state)) { case ThreadState_BlockedOnSend: case ThreadState_BlockedOnReceive: { /* blockedIPCCancel state */ endpoint_t *epptr; tcb_queue_t queue; epptr = EP_PTR(thread_state_ptr_get_blockingObject(state)); /* Haskell error "blockedIPCCancel: endpoint must not be idle" */ assert(endpoint_ptr_get_state(epptr) != EPState_Idle); /* Dequeue TCB */ queue = ep_ptr_get_queue(epptr); queue = tcbEPDequeue(tptr, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } setThreadState(tptr, ThreadState_Inactive); break; } case ThreadState_BlockedOnNotification: cancelSignal(tptr, NTFN_PTR(thread_state_ptr_get_blockingObject(state))); break; case ThreadState_BlockedOnReply: { cte_t *slot, *callerCap; tptr->tcbFault = seL4_Fault_NullFault_new(); /* Get the reply cap slot */ slot = TCB_PTR_CTE_PTR(tptr, tcbReply); callerCap = CTE_PTR(mdb_node_get_mdbNext(slot->cteMDBNode)); if (callerCap) { /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */ cteDeleteOne(callerCap); } break; } } }
void epCancelBadgedSends(endpoint_t *epptr, word_t badge) { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: case EPState_Recv: break; case EPState_Send: { tcb_t *thread, *next; tcb_queue_t queue = ep_ptr_get_queue(epptr); /* this is a de-optimisation for verification * reasons. it allows the contents of the endpoint * queue to be ignored during the for loop. */ endpoint_ptr_set_state(epptr, EPState_Idle); endpoint_ptr_set_epQueue_head(epptr, 0); endpoint_ptr_set_epQueue_tail(epptr, 0); for (thread = queue.head; thread; thread = next) { word_t b = thread_state_ptr_get_blockingIPCBadge( &thread->tcbState); next = thread->tcbEPNext; if (b == badge) { setThreadState(thread, ThreadState_Restart); tcbSchedEnqueue(thread); queue = tcbEPDequeue(thread, queue); } } ep_ptr_set_queue(epptr, queue); if (queue.head) { endpoint_ptr_set_state(epptr, EPState_Send); } rescheduleRequired(); break; } default: fail("invalid EP state"); } }
void cancelSignal(tcb_t *threadPtr, notification_t *ntfnPtr) { tcb_queue_t ntfn_queue; /* Haskell error "cancelSignal: notification object must be in a waiting" state */ assert(notification_ptr_get_state(ntfnPtr) == NtfnState_Waiting); /* Dequeue TCB */ ntfn_queue = ntfn_ptr_get_queue(ntfnPtr); ntfn_queue = tcbEPDequeue(threadPtr, ntfn_queue); ntfn_ptr_set_queue(ntfnPtr, ntfn_queue); /* Make notification object idle */ if (!ntfn_queue.head) { notification_ptr_set_state(ntfnPtr, NtfnState_Idle); } /* Make thread inactive */ setThreadState(threadPtr, ThreadState_Inactive); }
void sendIPC(bool_t blocking, bool_t do_call, word_t badge, bool_t canGrant, tcb_t *thread, endpoint_t *epptr) { //printf("\n===in sendIPC funtion==\n"); switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: //printf("in case idle\n"); case EPState_Send: //printf("in case send\n"); if (blocking) { tcb_queue_t queue; /* Set thread state to BlockedOnSend */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnSend); thread_state_ptr_set_blockingIPCEndpoint( &thread->tcbState, EP_REF(epptr)); thread_state_ptr_set_blockingIPCBadge( &thread->tcbState, badge); thread_state_ptr_set_blockingIPCCanGrant( &thread->tcbState, canGrant); thread_state_ptr_set_blockingIPCIsCall( &thread->tcbState, do_call); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Send); ep_ptr_set_queue(epptr, queue); } break; case EPState_Recv: { tcb_queue_t queue; tcb_t *dest; bool_t diminish; //printf("in case recv\n"); /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); dest = queue.head; /* Haskell error "Receive endpoint queue must not be empty" */ assert(dest); /* Dequeue the first TCB */ queue = tcbEPDequeue(dest, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Do the transfer */ diminish = thread_state_get_blockingIPCDiminishCaps(dest->tcbState); doIPCTransfer(thread, epptr, badge, canGrant, dest, diminish); setThreadState(dest, ThreadState_Running); attemptSwitchTo(dest); //printf("the dest thread's prio is %d\n", dest->tcbPriority); if (do_call || fault_ptr_get_faultType(&thread->tcbFault) != fault_null_fault) { if (canGrant && !diminish) { setupCallerCap(thread, dest); } else { setThreadState(thread, ThreadState_Inactive); } } break; } } }
void receiveIPC(tcb_t *thread, cap_t cap) { endpoint_t *epptr; bool_t diminish; async_endpoint_t *aepptr; /* Haskell error "receiveIPC: invalid cap" */ assert(cap_get_capType(cap) == cap_endpoint_cap); //printf("\n;;;;;;;;;In function receiveIPC;;;;;;;\n"); epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)); diminish = !cap_endpoint_cap_get_capCanSend(cap); /* Check for anything waiting in the async endpoint*/ aepptr = thread->boundAsyncEndpoint; if (aepptr && async_endpoint_ptr_get_state(aepptr) == AEPState_Active) { completeAsyncIPC(aepptr, thread); } else { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: //printf("in case idle\n"); case EPState_Recv: { tcb_queue_t queue; //printf("in case recv\n"); /* Set thread state to BlockedOnReceive */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingIPCEndpoint( &thread->tcbState, EP_REF(epptr)); thread_state_ptr_set_blockingIPCDiminishCaps( &thread->tcbState, diminish); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Recv); ep_ptr_set_queue(epptr, queue); break; } case EPState_Send: { tcb_queue_t queue; tcb_t *sender; word_t badge; bool_t canGrant; bool_t do_call; //printf("in case send\n"); /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); sender = queue.head; /* Haskell error "Send endpoint queue must not be empty" */ assert(sender); /* Dequeue the first TCB */ queue = tcbEPDequeue(sender, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Get sender IPC details */ badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState); canGrant = thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState); /* Do the transfer */ doIPCTransfer(sender, epptr, badge, canGrant, thread, diminish); do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState); if (do_call || fault_get_faultType(sender->tcbFault) != fault_null_fault) { if (canGrant && !diminish) { setupCallerCap(sender, thread); } else { setThreadState(sender, ThreadState_Inactive); } } else { setThreadState(sender, ThreadState_Running); switchIfRequiredTo(sender); } break; } } } }
void sendIPC(bool_t blocking, bool_t do_call, word_t badge, bool_t canGrant, tcb_t *thread, endpoint_t *epptr) { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: case EPState_Send: if (blocking) { tcb_queue_t queue; /* Set thread state to BlockedOnSend */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnSend); thread_state_ptr_set_blockingObject( &thread->tcbState, EP_REF(epptr)); thread_state_ptr_set_blockingIPCBadge( &thread->tcbState, badge); thread_state_ptr_set_blockingIPCCanGrant( &thread->tcbState, canGrant); thread_state_ptr_set_blockingIPCIsCall( &thread->tcbState, do_call); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Send); ep_ptr_set_queue(epptr, queue); } break; case EPState_Recv: { tcb_queue_t queue; tcb_t *dest; /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); dest = queue.head; /* Haskell error "Receive endpoint queue must not be empty" */ assert(dest); /* Dequeue the first TCB */ queue = tcbEPDequeue(dest, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Do the transfer */ doIPCTransfer(thread, epptr, badge, canGrant, dest); setThreadState(dest, ThreadState_Running); attemptSwitchTo(dest); if (do_call || seL4_Fault_ptr_get_seL4_FaultType(&thread->tcbFault) != seL4_Fault_NullFault) { if (canGrant) { setupCallerCap(thread, dest); } else { setThreadState(thread, ThreadState_Inactive); } } break; } } }
void receiveIPC(tcb_t *thread, cap_t cap, bool_t isBlocking) { endpoint_t *epptr; notification_t *ntfnPtr; /* Haskell error "receiveIPC: invalid cap" */ assert(cap_get_capType(cap) == cap_endpoint_cap); epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)); /* Check for anything waiting in the notification */ ntfnPtr = thread->tcbBoundNotification; if (ntfnPtr && notification_ptr_get_state(ntfnPtr) == NtfnState_Active) { completeSignal(ntfnPtr, thread); } else { switch (endpoint_ptr_get_state(epptr)) { case EPState_Idle: case EPState_Recv: { tcb_queue_t queue; if (isBlocking) { /* Set thread state to BlockedOnReceive */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingObject( &thread->tcbState, EP_REF(epptr)); scheduleTCB(thread); /* Place calling thread in endpoint queue */ queue = ep_ptr_get_queue(epptr); queue = tcbEPAppend(thread, queue); endpoint_ptr_set_state(epptr, EPState_Recv); ep_ptr_set_queue(epptr, queue); } else { doNBRecvFailedTransfer(thread); } break; } case EPState_Send: { tcb_queue_t queue; tcb_t *sender; word_t badge; bool_t canGrant; bool_t do_call; /* Get the head of the endpoint queue. */ queue = ep_ptr_get_queue(epptr); sender = queue.head; /* Haskell error "Send endpoint queue must not be empty" */ assert(sender); /* Dequeue the first TCB */ queue = tcbEPDequeue(sender, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } /* Get sender IPC details */ badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState); canGrant = thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState); /* Do the transfer */ doIPCTransfer(sender, epptr, badge, canGrant, thread); do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState); if (do_call || seL4_Fault_get_seL4_FaultType(sender->tcbFault) != seL4_Fault_NullFault) { if (canGrant) { setupCallerCap(sender, thread); } else { setThreadState(sender, ThreadState_Inactive); } } else { setThreadState(sender, ThreadState_Running); switchIfRequiredTo(sender); } break; } } } }
void sendSignal(notification_t *ntfnPtr, word_t badge) { switch (notification_ptr_get_state(ntfnPtr)) { case NtfnState_Idle: { tcb_t *tcb = (tcb_t *)notification_ptr_get_ntfnBoundTCB(ntfnPtr); /* Check if we are bound and that thread is waiting for a message */ if (tcb) { if (thread_state_ptr_get_tsType(&tcb->tcbState) == ThreadState_BlockedOnReceive) { /* Send and start thread running */ cancelIPC(tcb); setThreadState(tcb, ThreadState_Running); setRegister(tcb, badgeRegister, badge); possibleSwitchTo(tcb); #ifdef CONFIG_VTX } else if (thread_state_ptr_get_tsType(&tcb->tcbState) == ThreadState_RunningVM) { #ifdef ENABLE_SMP_SUPPORT if (tcb->tcbAffinity != getCurrentCPUIndex()) { ntfn_set_active(ntfnPtr, badge); doRemoteVMCheckBoundNotification(tcb->tcbAffinity, tcb); } else #endif /* ENABLE_SMP_SUPPORT */ { setThreadState(tcb, ThreadState_Running); setRegister(tcb, badgeRegister, badge); Arch_leaveVMAsyncTransfer(tcb); possibleSwitchTo(tcb); } #endif /* CONFIG_VTX */ } else { /* In particular, this path is taken when a thread * is waiting on a reply cap since BlockedOnReply * would also trigger this path. I.e, a thread * with a bound notification will not be awakened * by signals on that bound notification if it is * in the middle of an seL4_Call. */ ntfn_set_active(ntfnPtr, badge); } } else { ntfn_set_active(ntfnPtr, badge); } break; } case NtfnState_Waiting: { tcb_queue_t ntfn_queue; tcb_t *dest; ntfn_queue = ntfn_ptr_get_queue(ntfnPtr); dest = ntfn_queue.head; /* Haskell error "WaitingNtfn Notification must have non-empty queue" */ assert(dest); /* Dequeue TCB */ ntfn_queue = tcbEPDequeue(dest, ntfn_queue); ntfn_ptr_set_queue(ntfnPtr, ntfn_queue); /* set the thread state to idle if the queue is empty */ if (!ntfn_queue.head) { notification_ptr_set_state(ntfnPtr, NtfnState_Idle); } setThreadState(dest, ThreadState_Running); setRegister(dest, badgeRegister, badge); possibleSwitchTo(dest); break; } case NtfnState_Active: { word_t badge2; badge2 = notification_ptr_get_ntfnMsgIdentifier(ntfnPtr); badge2 |= badge; notification_ptr_set_ntfnMsgIdentifier(ntfnPtr, badge2); break; } } }