errno_t mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid, mbuf_tx_compl_func callback) { size_t i; if (m == NULL || !(m->m_flags & M_PKTHDR) || callback == NULL || pktid == NULL) return (EINVAL); i = get_tx_compl_callback_index(callback); if (i == UINT32_MAX) return (ENOENT); #if (DEBUG || DEVELOPMENT) VERIFY(i < sizeof(m->m_pkthdr.pkt_compl_callbacks)); #endif /* (DEBUG || DEVELOPMENT) */ if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) { m->m_pkthdr.pkt_compl_callbacks = 0; m->m_pkthdr.pkt_flags |= PKTF_TX_COMPL_TS_REQ; m->m_pkthdr.pkt_compl_context = atomic_add_32_ov(&mbuf_tx_compl_index, 1); #if (DEBUG || DEVELOPMENT) if (mbuf_tx_compl_debug != 0) { OSIncrementAtomic64(&mbuf_tx_compl_outstanding); } #endif /* (DEBUG || DEVELOPMENT) */ } m->m_pkthdr.pkt_compl_callbacks |= (1 << i); *pktid = m->m_pkthdr.pkt_compl_context; return (0); }
kern_return_t thread_policy_create_work_interval( thread_t thread, uint64_t *work_interval_id) { thread_mtx_lock(thread); if (thread->work_interval_id) { /* already assigned a work interval ID */ thread_mtx_unlock(thread); return (KERN_INVALID_VALUE); } thread->work_interval_id = OSIncrementAtomic64((volatile int64_t *)&unique_work_interval_id); *work_interval_id = thread->work_interval_id; thread_mtx_unlock(thread); return KERN_SUCCESS; }