void benchmark_track_utilisation_dump(void) { uint64_t *buffer = ((uint64_t *) & (((seL4_IPCBuffer *)lookupIPCBuffer(true, NODE_STATE(ksCurThread)))->msg[0])); tcb_t *tcb = NULL; word_t tcb_cptr = getRegister(NODE_STATE(ksCurThread), capRegister); lookupCap_ret_t lu_ret; word_t cap_type; lu_ret = lookupCap(NODE_STATE(ksCurThread), tcb_cptr); /* ensure we got a TCB cap */ cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysBenchmarkFinalizeLog: cap is not a TCB, halting"); return; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)); buffer[BENCHMARK_TCB_UTILISATION] = tcb->benchmark.utilisation; /* Requested thread utilisation */ buffer[BENCHMARK_IDLE_LOCALCPU_UTILISATION] = NODE_STATE( ksIdleThread)->benchmark.utilisation; /* Idle thread utilisation of current CPU */ #ifdef ENABLE_SMP_SUPPORT buffer[BENCHMARK_IDLE_TCBCPU_UTILISATION] = NODE_STATE_ON_CORE(ksIdleThread, tcb->tcbAffinity)->benchmark.utilisation; /* Idle thread utilisation of CPU the TCB is running on */ #else buffer[BENCHMARK_IDLE_TCBCPU_UTILISATION] = buffer[BENCHMARK_IDLE_LOCALCPU_UTILISATION]; #endif #ifdef CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT buffer[BENCHMARK_TOTAL_UTILISATION] = (ccnt_num_overflows * 0xFFFFFFFFU) + benchmark_end_time - benchmark_start_time; #else buffer[BENCHMARK_TOTAL_UTILISATION] = benchmark_end_time - benchmark_start_time; /* Overall time */ #endif /* CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT */ }
exception_t handleUserLevelDebugException(int int_vector) { tcb_t *ct; getAndResetActiveBreakpoint_t active_bp; testAndResetSingleStepException_t single_step_info; #if defined(CONFIG_DEBUG_BUILD) || defined(CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES) ksKernelEntry.path = Entry_UserLevelFault; ksKernelEntry.word = int_vector; #else (void)int_vector; #endif /* DEBUG */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_start(); #endif ct = NODE_STATE(ksCurThread); /* Software break request (INT3) is detected by the vector number */ if (int_vector == int_software_break_request) { current_fault = seL4_Fault_DebugException_new(getRestartPC(NODE_STATE(ksCurThread)), 0, seL4_SoftwareBreakRequest); } else { /* Hardware breakpoint trigger is detected using DR6 */ active_bp = getAndResetActiveBreakpoint(ct); if (active_bp.bp_num >= 0) { current_fault = seL4_Fault_DebugException_new(active_bp.vaddr, active_bp.bp_num, active_bp.reason); } else { single_step_info = testAndResetSingleStepException(ct); if (single_step_info.ret == true) { /* If the caller asked us to skip over N instructions before * generating the next single-step breakpoint, we shouldn't * bother to construct a fault message until we've skipped N * instructions. */ if (singleStepFaultCounterReady(ct) == false) { return EXCEPTION_NONE; } current_fault = seL4_Fault_DebugException_new(single_step_info.instr_vaddr, 0, seL4_SingleStep); } else { return EXCEPTION_SYSCALL_ERROR; } } } handleFault(NODE_STATE(ksCurThread)); schedule(); activateThread(); return EXCEPTION_NONE; }
exception_t decodeIRQHandlerInvocation(word_t invLabel, irq_t irq, extra_caps_t excaps) { switch (invLabel) { case IRQAckIRQ: setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); invokeIRQHandler_AckIRQ(irq); return EXCEPTION_NONE; case IRQSetIRQHandler: { cap_t ntfnCap; cte_t *slot; if (excaps.excaprefs[0] == NULL) { current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } ntfnCap = excaps.excaprefs[0]->cap; slot = excaps.excaprefs[0]; if (cap_get_capType(ntfnCap) != cap_notification_cap || !cap_notification_cap_get_capNtfnCanSend(ntfnCap)) { if (cap_get_capType(ntfnCap) != cap_notification_cap) { userError("IRQSetHandler: provided cap is not an notification capability."); } else { userError("IRQSetHandler: caller does not have send rights on the endpoint."); } current_syscall_error.type = seL4_InvalidCapability; current_syscall_error.invalidCapNumber = 0; return EXCEPTION_SYSCALL_ERROR; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); invokeIRQHandler_SetIRQHandler(irq, ntfnCap, slot); return EXCEPTION_NONE; } case IRQClearIRQHandler: setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); invokeIRQHandler_ClearIRQHandler(irq); return EXCEPTION_NONE; default: userError("IRQHandler: Illegal operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } }
static void sendRunqueues(void) { word_t i; sendWord((unsigned long)NODE_STATE(ksCurThread)); for (i = 0; i < NUM_READY_QUEUES; i++) { tcb_t *current = NODE_STATE(ksReadyQueues[i]).head; if (current != 0) { while (current != NODE_STATE(ksReadyQueues[i]).end) { sendWord((unsigned long)current); current = current -> tcbSchedNext; } sendWord((unsigned long)current); } } }
exception_t decodeIRQControlInvocation(word_t invLabel, word_t length, cte_t *srcSlot, extra_caps_t excaps, word_t *buffer) { if (invLabel == IRQIssueIRQHandler) { word_t index, depth, irq_w; irq_t irq; cte_t *destSlot; cap_t cnodeCap; lookupSlot_ret_t lu_ret; exception_t status; if (length < 3 || excaps.excaprefs[0] == NULL) { current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } irq_w = getSyscallArg(0, buffer); irq = (irq_t) irq_w; index = getSyscallArg(1, buffer); depth = getSyscallArg(2, buffer); cnodeCap = excaps.excaprefs[0]->cap; status = Arch_checkIRQ(irq); if (status != EXCEPTION_NONE) { return status; } if (isIRQActive(irq)) { current_syscall_error.type = seL4_RevokeFirst; userError("Rejecting request for IRQ %u. Already active.", (int)irq); return EXCEPTION_SYSCALL_ERROR; } lu_ret = lookupTargetSlot(cnodeCap, index, depth); if (lu_ret.status != EXCEPTION_NONE) { userError("Target slot for new IRQ Handler cap invalid: cap %lu, IRQ %u.", getExtraCPtr(buffer, 0), (int)irq); return lu_ret.status; } destSlot = lu_ret.slot; status = ensureEmptySlot(destSlot); if (status != EXCEPTION_NONE) { userError("Target slot for new IRQ Handler cap not empty: cap %lu, IRQ %u.", getExtraCPtr(buffer, 0), (int)irq); return status; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return invokeIRQControl(irq, destSlot, srcSlot); } else { return Arch_decodeIRQControlInvocation(invLabel, length, srcSlot, excaps, buffer); } }
void benchmark_track_reset_utilisation(void) { tcb_t *tcb = NULL; word_t tcb_cptr = getRegister(NODE_STATE(ksCurThread), capRegister); lookupCap_ret_t lu_ret; word_t cap_type; lu_ret = lookupCap(NODE_STATE(ksCurThread), tcb_cptr); /* ensure we got a TCB cap */ cap_type = cap_get_capType(lu_ret.cap); if (cap_type != cap_thread_cap) { userError("SysBenchmarkResetThreadUtilisation: cap is not a TCB, halting"); return; } tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(lu_ret.cap)); tcb->benchmark.utilisation = 0; tcb->benchmark.schedule_start_time = 0; }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; vspace_root_t *cap_pd; pde_t stored_hw_asid; dom_t dom; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = seL4_Fault_get_seL4_FaultType(NODE_STATE(ksCurThread)->tcbFault); /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != seL4_Fault_NullFault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (NODE_STATE(ksCurThread)->tcbBoundNotification && notification_ptr_get_state(NODE_STATE(ksCurThread)->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* ensure we are not single stepping the caller in ia32 */ #if defined(CONFIG_HARDWARE_DEBUG_API) && defined(CONFIG_ARCH_IA32) if (caller->tcbArch.tcbContext.breakpointState.single_step_enabled) { slowpath(SysReplyRecv); } #endif /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = seL4_Fault_get_seL4_FaultType(caller->tcbFault); if (unlikely(fault_type != seL4_Fault_NullFault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ cap_pd = cap_vtable_cap_get_vspace_root_fp(newVTable); /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysReplyRecv); } #ifdef CONFIG_ARCH_AARCH32 /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif #ifdef CONFIG_ARCH_X86_64 stored_hw_asid.words[0] = cap_pml4_cap_get_capPML4MappedASID(newVTable); #endif #ifdef CONFIG_ARCH_AARCH64 stored_hw_asid.words[0] = cap_page_global_directory_cap_get_capPGDMappedASID(newVTable); #endif #ifdef CONFIG_ARCH_RISCV stored_hw_asid.words[0] = cap_page_table_cap_get_capPTMappedASID(newVTable); #endif /* Ensure the original caller can be scheduled directly. */ dom = maxDom ? ksCurDomain : 0; if (unlikely(!isHighestPrio(dom, caller->tcbPriority))) { slowpath(SysReplyRecv); } #ifdef CONFIG_ARCH_AARCH32 /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } #ifdef ENABLE_SMP_SUPPORT /* Ensure both threads have the same affinity */ if (unlikely(NODE_STATE(ksCurThread)->tcbAffinity != caller->tcbAffinity)) { slowpath(SysReplyRecv); } #endif /* ENABLE_SMP_SUPPORT */ /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.is_fastpath = true; #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &NODE_STATE(ksCurThread)->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); thread_state_ptr_set_blockingIPCCanGrant(&NODE_STATE(ksCurThread)->tcbState, cap_endpoint_cap_get_capCanGrant(ep_cap));; /* Place the thread in the endpoint queue */ endpointTail = endpoint_ptr_get_epQueue_tail_fp(ep_ptr); if (likely(!endpointTail)) { NODE_STATE(ksCurThread)->tcbEPPrev = NULL; NODE_STATE(ksCurThread)->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(NODE_STATE(ksCurThread))); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(NODE_STATE(ksCurThread)), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = NODE_STATE(ksCurThread); NODE_STATE(ksCurThread)->tcbEPPrev = endpointTail; NODE_STATE(ksCurThread)->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(NODE_STATE(ksCurThread)), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs(length, NODE_STATE(ksCurThread), caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread)); }
void #ifdef ARCH_X86 NORETURN #endif fastpath_call(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; tcb_t *dest; word_t badge; cte_t *replySlot, *callerSlot; cap_t newVTable; vspace_root_t *cap_pd; pde_t stored_hw_asid; word_t fault_type; dom_t dom; word_t replyCanGrant; /* Get message info, length, and fault type. */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = seL4_Fault_get_seL4_FaultType(NODE_STATE(ksCurThread)->tcbFault); /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != seL4_Fault_NullFault)) { slowpath(SysCall); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanSend(ep_cap))) { slowpath(SysCall); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Get the destination thread, which is only going to be valid * if the endpoint is valid. */ dest = TCB_PTR(endpoint_ptr_get_epQueue_head(ep_ptr)); /* Check that there's a thread waiting to receive */ if (unlikely(endpoint_ptr_get_state(ep_ptr) != EPState_Recv)) { slowpath(SysCall); } /* ensure we are not single stepping the destination in ia32 */ #if defined(CONFIG_HARDWARE_DEBUG_API) && defined(CONFIG_ARCH_IA32) if (dest->tcbArch.tcbContext.breakpointState.single_step_enabled) { slowpath(SysCall); } #endif /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(dest, tcbVTable)->cap; /* Get vspace root. */ cap_pd = cap_vtable_cap_get_vspace_root_fp(newVTable); /* Ensure that the destination has a valid VTable. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysCall); } #ifdef CONFIG_ARCH_AARCH32 /* Get HW ASID */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif #ifdef CONFIG_ARCH_X86_64 /* borrow the stored_hw_asid for PCID */ stored_hw_asid.words[0] = cap_pml4_cap_get_capPML4MappedASID_fp(newVTable); #endif #ifdef CONFIG_ARCH_AARCH64 stored_hw_asid.words[0] = cap_page_global_directory_cap_get_capPGDMappedASID(newVTable); #endif #ifdef CONFIG_ARCH_RISCV /* Get HW ASID */ stored_hw_asid.words[0] = cap_page_table_cap_get_capPTMappedASID(newVTable); #endif /* let gcc optimise this out for 1 domain */ dom = maxDom ? ksCurDomain : 0; /* ensure only the idle thread or lower prio threads are present in the scheduler */ if (likely(dest->tcbPriority < NODE_STATE(ksCurThread->tcbPriority)) && !isHighestPrio(dom, dest->tcbPriority)) { slowpath(SysCall); } /* Ensure that the endpoint has has grant or grant-reply rights so that we can * create the reply cap */ if (unlikely(!cap_endpoint_cap_get_capCanGrant(ep_cap) && !cap_endpoint_cap_get_capCanGrantReply(ep_cap))) { slowpath(SysCall); } #ifdef CONFIG_ARCH_AARCH32 if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysCall); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(dest->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysCall); } #ifdef ENABLE_SMP_SUPPORT /* Ensure both threads have the same affinity */ if (unlikely(NODE_STATE(ksCurThread)->tcbAffinity != dest->tcbAffinity)) { slowpath(SysCall); } #endif /* ENABLE_SMP_SUPPORT */ /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.is_fastpath = true; #endif /* Dequeue the destination. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(dest->tcbEPNext)); if (unlikely(dest->tcbEPNext)) { dest->tcbEPNext->tcbEPPrev = NULL; } else { endpoint_ptr_mset_epQueue_tail_state(ep_ptr, 0, EPState_Idle); } badge = cap_endpoint_cap_get_capEPBadge(ep_cap); /* Block sender */ thread_state_ptr_set_tsType_np(&NODE_STATE(ksCurThread)->tcbState, ThreadState_BlockedOnReply); /* Get sender reply slot */ replySlot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbReply); /* Get dest caller slot */ callerSlot = TCB_PTR_CTE_PTR(dest, tcbCaller); /* Insert reply cap */ replyCanGrant = thread_state_ptr_get_blockingIPCCanGrant(&dest->tcbState);; cap_reply_cap_ptr_new_np(&callerSlot->cap, replyCanGrant, 0, TCB_REF(NODE_STATE(ksCurThread))); mdb_node_ptr_set_mdbPrev_np(&callerSlot->cteMDBNode, CTE_REF(replySlot)); mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &replySlot->cteMDBNode, CTE_REF(callerSlot), 1, 1); fastpath_copy_mrs(length, NODE_STATE(ksCurThread), dest); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&dest->tcbState, ThreadState_Running); switchToThread_fp(dest, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); fastpath_restore(badge, msgInfo, NODE_STATE(ksCurThread)); }
exception_t Arch_decodeIRQControlInvocation(word_t invLabel, word_t length, cte_t *srcSlot, extra_caps_t excaps, word_t *buffer) { word_t index, depth; cte_t *destSlot; cap_t cnodeCap; lookupSlot_ret_t lu_ret; exception_t status; irq_t irq; word_t vector; if (!config_set(CONFIG_IRQ_IOAPIC)) { userError("IRQControl: Illegal operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } /* check the common parameters */ if (length < 7 || excaps.excaprefs[0] == NULL) { userError("IRQControl: Truncated message"); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } index = getSyscallArg(0, buffer); depth = getSyscallArg(1, buffer); cnodeCap = excaps.excaprefs[0]->cap; irq = getSyscallArg(6, buffer); if (irq > irq_user_max - irq_user_min) { userError("IRQControl: Invalid irq %ld should be between 0-%ld", (long)irq, (long)(irq_user_max - irq_user_min - 1)); current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 0; current_syscall_error.rangeErrorMax = irq_user_max - irq_user_min; return EXCEPTION_SYSCALL_ERROR; } irq += irq_user_min; vector = (word_t)irq + IRQ_INT_OFFSET; lu_ret = lookupTargetSlot(cnodeCap, index, depth); if (lu_ret.status != EXCEPTION_NONE) { return lu_ret.status; } destSlot = lu_ret.slot; status = ensureEmptySlot(destSlot); if (status != EXCEPTION_NONE) { return status; } switch (invLabel) { case X86IRQIssueIRQHandlerIOAPIC: { word_t ioapic = getSyscallArg(2, buffer); word_t pin = getSyscallArg(3, buffer); word_t level = getSyscallArg(4, buffer); word_t polarity = getSyscallArg(5, buffer); if (isIRQActive(irq)) { userError("IOAPICGet: IRQ %d is already active.", (int)irq); current_syscall_error.type = seL4_RevokeFirst; return EXCEPTION_SYSCALL_ERROR; } status = ioapic_decode_map_pin_to_vector(ioapic, pin, level, polarity, vector); if (status != EXCEPTION_NONE) { return status; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return invokeIssueIRQHandlerIOAPIC(irq, ioapic, pin, level, polarity, vector, destSlot, srcSlot); } break; case X86IRQIssueIRQHandlerMSI: { word_t pci_bus = getSyscallArg(2, buffer); word_t pci_dev = getSyscallArg(3, buffer); word_t pci_func = getSyscallArg(4, buffer); word_t handle = getSyscallArg(5, buffer); x86_irq_state_t irqState; /* until we support msi interrupt remaping through vt-d we ignore the * vector and trust the user */ (void)vector; if (isIRQActive(irq)) { current_syscall_error.type = seL4_RevokeFirst; return EXCEPTION_SYSCALL_ERROR; } if (pci_bus > PCI_BUS_MAX) { current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 0; current_syscall_error.rangeErrorMax = PCI_BUS_MAX; return EXCEPTION_SYSCALL_ERROR; } if (pci_dev > PCI_DEV_MAX) { current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 0; current_syscall_error.rangeErrorMax = PCI_DEV_MAX; return EXCEPTION_SYSCALL_ERROR; } if (pci_func > PCI_FUNC_MAX) { current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 0; current_syscall_error.rangeErrorMax = PCI_FUNC_MAX; return EXCEPTION_SYSCALL_ERROR; } irqState = x86_irq_state_irq_msi_new(pci_bus, pci_dev, pci_func, handle); setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return Arch_invokeIRQControl(irq, destSlot, srcSlot, irqState); } break; default: userError("IRQControl: Illegal operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } }