/* * Calculate the absolute or boot-relative time from the * machine-specific fast timecounter and the published timehands * structure read from the shared page. * * The lockless reading scheme is similar to the one used to read the * in-kernel timehands, see sys/kern/kern_tc.c:binuptime(). This code * is based on the kernel implementation. */ static int binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) { struct vdso_timehands *th; uint32_t curr, gen; do { if (!tk->tk_enabled) return (ENOSYS); curr = atomic_load_acq_32(&tk->tk_current); th = &tk->tk_th[curr]; if (th->th_algo != VDSO_TH_ALGO_1) return (ENOSYS); gen = atomic_load_acq_32(&th->th_gen); *bt = th->th_offset; bintime_addx(bt, th->th_scale * tc_delta(th)); if (abs) bintime_add(bt, &th->th_boottime); /* * Ensure that the load of th_offset is completed * before the load of th_gen. */ atomic_thread_fence_acq(); } while (curr != tk->tk_current || gen == 0 || gen != th->th_gen); return (0); }
/* * Calculate the absolute or boot-relative time from the * machine-specific fast timecounter and the published timehands * structure read from the shared page. * * The lockless reading scheme is similar to the one used to read the * in-kernel timehands, see sys/kern/kern_tc.c:binuptime(). This code * is based on the kernel implementation. */ static int binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) { struct vdso_timehands *th; uint32_t curr, gen; u_int delta; int error; do { if (!tk->tk_enabled) return (ENOSYS); curr = atomic_load_acq_32(&tk->tk_current); th = &tk->tk_th[curr]; gen = atomic_load_acq_32(&th->th_gen); *bt = th->th_offset; error = tc_delta(th, &delta); if (error == EAGAIN) continue; if (error != 0) return (error); bintime_addx(bt, th->th_scale * delta); if (abs) bintime_add(bt, &th->th_boottime); /* * Ensure that the load of th_offset is completed * before the load of th_gen. */ atomic_thread_fence_acq(); } while (curr != tk->tk_current || gen == 0 || gen != th->th_gen); return (0); }
static bool_t svc_vc_ack(SVCXPRT *xprt, uint32_t *ack) { *ack = atomic_load_acq_32(&xprt->xp_snt_cnt); *ack -= sbused(&xprt->xp_socket->so_snd); return (TRUE); }
inline int ATOMIC_READ(ATOMIC_T *v) { #ifdef PLATFORM_LINUX return atomic_read(v); #elif defined(PLATFORM_WINDOWS) return *v; // other choice???? #elif defined(PLATFORM_FREEBSD) return atomic_load_acq_32(v); #endif }
inline int ATOMIC_DEC_RETURN(ATOMIC_T *v) { #ifdef PLATFORM_LINUX return atomic_dec_return(v); #elif defined(PLATFORM_WINDOWS) return InterlockedDecrement(v); #elif defined(PLATFORM_FREEBSD) atomic_subtract_int(v,1); return atomic_load_acq_32(v); #endif }
inline int ATOMIC_INC_RETURN(ATOMIC_T *v) { #ifdef PLATFORM_LINUX return atomic_inc_return(v); #elif defined(PLATFORM_WINDOWS) return InterlockedIncrement(v); #elif defined(PLATFORM_FREEBSD) atomic_add_int(v,1); return atomic_load_acq_32(v); #endif }
inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i) { #ifdef PLATFORM_LINUX return atomic_sub_return(i,v); #elif defined(PLATFORM_WINDOWS) return InterlockedAdd(v,-i); #elif defined(PLATFORM_FREEBSD) atomic_subtract_int(v,i); return atomic_load_acq_32(v); #endif }
t_Handle bman_portal_setup(struct bman_softc *bsc) { struct dpaa_portals_softc *sc; t_BmPortalParam bpp; t_Handle portal; unsigned int cpu, p; /* Return NULL if we're not ready or while detach */ if (bp_sc == NULL) return (NULL); sc = bp_sc; sched_pin(); portal = NULL; cpu = PCPU_GET(cpuid); /* Check if portal is ready */ while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0, -1) == 0) { p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph); /* Return if portal is already initialized */ if (p != 0 && p != -1) { sched_unpin(); return ((t_Handle)p); } /* Not inititialized and "owned" by another thread */ thread_lock(curthread); mi_switch(SW_VOL, NULL); thread_unlock(curthread); } /* Map portal registers */ dpaa_portal_map_registers(sc); /* Configure and initialize portal */ bpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]); bpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]); bpp.h_Bm = bsc->sc_bh; bpp.swPortalId = cpu; bpp.irq = (int)sc->sc_dp[cpu].dp_ires; portal = BM_PORTAL_Config(&bpp); if (portal == NULL) goto err; if (BM_PORTAL_Init(portal) != E_OK) goto err; atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, (uint32_t)portal); sched_unpin(); return (portal); err: if (portal != NULL) BM_PORTAL_Free(portal); atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0); sched_unpin(); return (NULL); }
t_Handle qman_portal_setup(struct qman_softc *qsc) { struct dpaa_portals_softc *sc; t_QmPortalParam qpp; unsigned int cpu, p; t_Handle portal; /* Return NULL if we're not ready or while detach */ if (qp_sc == NULL) return (NULL); sc = qp_sc; sched_pin(); portal = NULL; cpu = PCPU_GET(cpuid); /* Check if portal is ready */ while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0, -1) == 0) { p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph); /* Return if portal is already initialized */ if (p != 0 && p != -1) { sched_unpin(); return ((t_Handle)p); } /* Not inititialized and "owned" by another thread */ thread_lock(curthread); mi_switch(SW_VOL, NULL); thread_unlock(curthread); } /* Map portal registers */ dpaa_portal_map_registers(sc); /* Configure and initialize portal */ qpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]); qpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]); qpp.h_Qm = qsc->sc_qh; qpp.swPortalId = cpu; qpp.irq = (int)sc->sc_dp[cpu].dp_ires; qpp.fdLiodnOffset = 0; qpp.f_DfltFrame = qman_received_frame_callback; qpp.f_RejectedFrame = qman_rejected_frame_callback; qpp.h_App = qsc; portal = QM_PORTAL_Config(&qpp); if (portal == NULL) goto err; if (QM_PORTAL_Init(portal) != E_OK) goto err; if (QM_PORTAL_AddPoolChannel(portal, QMAN_COMMON_POOL_CHANNEL) != E_OK) goto err; atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, (uint32_t)portal); sched_unpin(); return (portal); err: if (portal != NULL) QM_PORTAL_Free(portal); atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0); sched_unpin(); return (NULL); }