int platform_start_ap(int cpuid) { uint64_t cores_in_reset; /* * Release the core if it is in reset, and let it rev up a bit. * The real synchronization happens below via octeon_ap_boot. */ cores_in_reset = cvmx_read_csr(CVMX_CIU_PP_RST); if (cores_in_reset & (1ULL << cpuid)) { if (bootverbose) printf ("AP #%d still in reset\n", cpuid); cores_in_reset &= ~(1ULL << cpuid); cvmx_write_csr(CVMX_CIU_PP_RST, (uint64_t)(cores_in_reset)); DELAY(2000); /* Give it a moment to start */ } if (atomic_cmpset_32(&octeon_ap_boot, ~0, cpuid) == 0) return (-1); for (;;) { DELAY(1000); if (atomic_cmpset_32(&octeon_ap_boot, 0, ~0) != 0) return (0); printf("Waiting for cpu%d to start\n", cpuid); } }
static void syscall_thread_drain(struct sysent *se) { u_int32_t cnt, oldcnt; do { oldcnt = se->sy_thrcnt; KASSERT((oldcnt & SY_THR_STATIC) == 0, ("drain on static syscall")); cnt = oldcnt | SY_THR_DRAINING; } while (atomic_cmpset_acq_32(&se->sy_thrcnt, oldcnt, cnt) == 0); while (atomic_cmpset_32(&se->sy_thrcnt, SY_THR_DRAINING, SY_THR_ABSENT) == 0) pause("scdrn", hz/2); }
static void nvme_notify(struct nvme_consumer *cons, struct nvme_controller *ctrlr) { struct nvme_namespace *ns; void *ctrlr_cookie; int cmpset, ns_idx; /* * The consumer may register itself after the nvme devices * have registered with the kernel, but before the * driver has completed initialization. In that case, * return here, and when initialization completes, the * controller will make sure the consumer gets notified. */ if (!ctrlr->is_initialized) return; cmpset = atomic_cmpset_32(&ctrlr->notification_sent, 0, 1); if (cmpset == 0) return; if (cons->ctrlr_fn != NULL) ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr); else ctrlr_cookie = NULL; ctrlr->cons_cookie[cons->id] = ctrlr_cookie; if (ctrlr->is_failed) { if (cons->fail_fn != NULL) (*cons->fail_fn)(ctrlr_cookie); /* * Do not notify consumers about the namespaces of a * failed controller. */ return; } for (ns_idx = 0; ns_idx < ctrlr->cdata.nn; ns_idx++) { ns = &ctrlr->ns[ns_idx]; if (cons->ns_fn != NULL) ns->cons_cookie[cons->id] = (*cons->ns_fn)(ns, ctrlr_cookie); } }
static inline void _ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind, boolean_t locked) { uint32_t old; KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); if (n == 0) return; #ifdef INVARIANTS old = atomic_fetchadd_32(&ioat->refkinds[kind], -n); KASSERT(old >= n, ("refcnt kind underflow")); #endif /* Skip acquiring the lock if resulting refcnt > 0. */ for (;;) { old = ioat->refcnt; if (old <= n) break; if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) return; } if (locked) mtx_assert(IOAT_REFLK, MA_OWNED); else mtx_lock(IOAT_REFLK); old = atomic_fetchadd_32(&ioat->refcnt, -n); KASSERT(old >= n, ("refcnt error")); if (old == n) wakeup(IOAT_REFLK); if (!locked) mtx_unlock(IOAT_REFLK); }