static void handleRecv(bool_t isBlocking) { word_t epCPtr; lookupCap_ret_t lu_ret; epCPtr = getRegister(ksCurThread, capRegister); lu_ret = lookupCap(ksCurThread, epCPtr); #if defined(DEBUG) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.cap_type = cap_get_capType(lu_ret.cap); #endif if (unlikely(lu_ret.status != EXCEPTION_NONE)) { /* current_lookup_fault has been set by lookupCap */ current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); return; } switch (cap_get_capType(lu_ret.cap)) { case cap_endpoint_cap: if (unlikely(!cap_endpoint_cap_get_capCanReceive(lu_ret.cap))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } deleteCallerCap(ksCurThread); receiveIPC(ksCurThread, lu_ret.cap, isBlocking); break; case cap_notification_cap: { notification_t *ntfnPtr; tcb_t *boundTCB; ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(lu_ret.cap)); boundTCB = (tcb_t*)notification_ptr_get_ntfnBoundTCB(ntfnPtr); if (unlikely(!cap_notification_cap_get_capNtfnCanReceive(lu_ret.cap) || (boundTCB && boundTCB != ksCurThread))) { current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } receiveSignal(ksCurThread, lu_ret.cap, isBlocking); break; } default: current_lookup_fault = lookup_fault_missing_capability_new(0); current_fault = fault_cap_fault_new(epCPtr, true); handleFault(ksCurThread); break; } }
void cancelIPC(tcb_t *tptr) { thread_state_t *state = &tptr->tcbState; switch (thread_state_ptr_get_tsType(state)) { case ThreadState_BlockedOnSend: case ThreadState_BlockedOnReceive: { /* blockedIPCCancel state */ endpoint_t *epptr; tcb_queue_t queue; epptr = EP_PTR(thread_state_ptr_get_blockingObject(state)); /* Haskell error "blockedIPCCancel: endpoint must not be idle" */ assert(endpoint_ptr_get_state(epptr) != EPState_Idle); /* Dequeue TCB */ queue = ep_ptr_get_queue(epptr); queue = tcbEPDequeue(tptr, queue); ep_ptr_set_queue(epptr, queue); if (!queue.head) { endpoint_ptr_set_state(epptr, EPState_Idle); } setThreadState(tptr, ThreadState_Inactive); break; } case ThreadState_BlockedOnNotification: cancelSignal(tptr, NTFN_PTR(thread_state_ptr_get_blockingObject(state))); break; case ThreadState_BlockedOnReply: { cte_t *slot, *callerCap; tptr->tcbFault = seL4_Fault_NullFault_new(); /* Get the reply cap slot */ slot = TCB_PTR_CTE_PTR(tptr, tcbReply); callerCap = CTE_PTR(mdb_node_get_mdbNext(slot->cteMDBNode)); if (callerCap) { /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */ cteDeleteOne(callerCap); } break; } } }
void receiveSignal(tcb_t *thread, cap_t cap, bool_t isBlocking) { notification_t *ntfnPtr; ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap)); switch (notification_ptr_get_state(ntfnPtr)) { case NtfnState_Idle: case NtfnState_Waiting: { tcb_queue_t ntfn_queue; if (isBlocking) { /* Block thread on notification object */ thread_state_ptr_set_tsType(&thread->tcbState, ThreadState_BlockedOnNotification); thread_state_ptr_set_blockingObject(&thread->tcbState, NTFN_REF(ntfnPtr)); scheduleTCB(thread); /* Enqueue TCB */ ntfn_queue = ntfn_ptr_get_queue(ntfnPtr); ntfn_queue = tcbEPAppend(thread, ntfn_queue); notification_ptr_set_state(ntfnPtr, NtfnState_Waiting); ntfn_ptr_set_queue(ntfnPtr, ntfn_queue); } else { doNBRecvFailedTransfer(thread); } break; } case NtfnState_Active: setRegister( thread, badgeRegister, notification_ptr_get_ntfnMsgIdentifier(ntfnPtr)); notification_ptr_set_state(ntfnPtr, NtfnState_Idle); break; } }
void handleInterrupt(irq_t irq) { if (unlikely(irq > maxIRQ)) { /* mask, ack and pretend it didn't happen. We assume that because * the interrupt controller for the platform returned this IRQ that * it is safe to use in mask and ack operations, even though it is * above the claimed maxIRQ. i.e. we're assuming maxIRQ is wrong */ printf("Received IRQ %d, which is above the platforms maxIRQ of %d\n", (int)irq, (int)maxIRQ); maskInterrupt(true, irq); ackInterrupt(irq); return; } switch (intStateIRQTable[irq]) { case IRQSignal: { cap_t cap; cap = intStateIRQNode[irq].cap; if (cap_get_capType(cap) == cap_notification_cap && cap_notification_cap_get_capNtfnCanSend(cap)) { sendSignal(NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap)), cap_notification_cap_get_capNtfnBadge(cap)); } else { #ifdef CONFIG_IRQ_REPORTING printf("Undelivered IRQ: %d\n", (int)irq); #endif } maskInterrupt(true, irq); break; } case IRQTimer: timerTick(); resetTimer(); break; case IRQReserved: #ifdef CONFIG_IRQ_REPORTING printf("Received reserved IRQ: %d", (int)irq); #endif handleReservedIRQ(irq); break; case IRQInactive: /* * This case shouldn't happen anyway unless the hardware or * platform code is broken. Hopefully masking it again should make * the interrupt go away. */ maskInterrupt(true, irq); #ifdef CONFIG_IRQ_REPORTING printf("Received disabled IRQ: %d\n", (int)irq); #endif break; default: /* No corresponding haskell error */ fail("Invalid IRQ state"); } ackInterrupt(irq); }