int syscall_register(int *offset, struct sysent *new_sysent, struct sysent *old_sysent, int flags) { int i; if ((flags & ~SY_THR_STATIC) != 0) return (EINVAL); if (*offset == NO_SYSCALL) { for (i = 1; i < SYS_MAXSYSCALL; ++i) if (sysent[i].sy_call == (sy_call_t *)lkmnosys) break; if (i == SYS_MAXSYSCALL) return (ENFILE); *offset = i; } else if (*offset < 0 || *offset >= SYS_MAXSYSCALL) return (EINVAL); else if (sysent[*offset].sy_call != (sy_call_t *)lkmnosys && sysent[*offset].sy_call != (sy_call_t *)lkmressys) return (EEXIST); KASSERT(sysent[*offset].sy_thrcnt == SY_THR_ABSENT, ("dynamic syscall is not protected")); *old_sysent = sysent[*offset]; new_sysent->sy_thrcnt = SY_THR_ABSENT; sysent[*offset] = *new_sysent; atomic_store_rel_32(&sysent[*offset].sy_thrcnt, flags); return (0); }
t_Handle bman_portal_setup(struct bman_softc *bsc) { struct dpaa_portals_softc *sc; t_BmPortalParam bpp; t_Handle portal; unsigned int cpu, p; /* Return NULL if we're not ready or while detach */ if (bp_sc == NULL) return (NULL); sc = bp_sc; sched_pin(); portal = NULL; cpu = PCPU_GET(cpuid); /* Check if portal is ready */ while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0, -1) == 0) { p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph); /* Return if portal is already initialized */ if (p != 0 && p != -1) { sched_unpin(); return ((t_Handle)p); } /* Not inititialized and "owned" by another thread */ thread_lock(curthread); mi_switch(SW_VOL, NULL); thread_unlock(curthread); } /* Map portal registers */ dpaa_portal_map_registers(sc); /* Configure and initialize portal */ bpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]); bpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]); bpp.h_Bm = bsc->sc_bh; bpp.swPortalId = cpu; bpp.irq = (int)sc->sc_dp[cpu].dp_ires; portal = BM_PORTAL_Config(&bpp); if (portal == NULL) goto err; if (BM_PORTAL_Init(portal) != E_OK) goto err; atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, (uint32_t)portal); sched_unpin(); return (portal); err: if (portal != NULL) BM_PORTAL_Free(portal); atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0); sched_unpin(); return (NULL); }
t_Handle qman_portal_setup(struct qman_softc *qsc) { struct dpaa_portals_softc *sc; t_QmPortalParam qpp; unsigned int cpu, p; t_Handle portal; /* Return NULL if we're not ready or while detach */ if (qp_sc == NULL) return (NULL); sc = qp_sc; sched_pin(); portal = NULL; cpu = PCPU_GET(cpuid); /* Check if portal is ready */ while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0, -1) == 0) { p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph); /* Return if portal is already initialized */ if (p != 0 && p != -1) { sched_unpin(); return ((t_Handle)p); } /* Not inititialized and "owned" by another thread */ thread_lock(curthread); mi_switch(SW_VOL, NULL); thread_unlock(curthread); } /* Map portal registers */ dpaa_portal_map_registers(sc); /* Configure and initialize portal */ qpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]); qpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]); qpp.h_Qm = qsc->sc_qh; qpp.swPortalId = cpu; qpp.irq = (int)sc->sc_dp[cpu].dp_ires; qpp.fdLiodnOffset = 0; qpp.f_DfltFrame = qman_received_frame_callback; qpp.f_RejectedFrame = qman_rejected_frame_callback; qpp.h_App = qsc; portal = QM_PORTAL_Config(&qpp); if (portal == NULL) goto err; if (QM_PORTAL_Init(portal) != E_OK) goto err; if (QM_PORTAL_AddPoolChannel(portal, QMAN_COMMON_POOL_CHANNEL) != E_OK) goto err; atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, (uint32_t)portal); sched_unpin(); return (portal); err: if (portal != NULL) QM_PORTAL_Free(portal); atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0); sched_unpin(); return (NULL); }