static inline void pmap_tlbstat_count(struct pmap *pm, vaddr_t va, tlbwhy_t why) { #ifdef TLBSTATS const cpuid_t cid = cpu_index(curcpu()); bool local = false, remote = false; if (va != (vaddr_t)-1LL) { atomic_inc_64(&tlbstat_single_req.ev_count); } if (pm == pmap_kernel()) { atomic_inc_64(&tlbstat_kernel[why].ev_count); return; } if (va >= VM_MAXUSER_ADDRESS) { remote = kcpuset_isotherset(pm->pm_kernel_cpus, cid); local = kcpuset_isset(pm->pm_kernel_cpus, cid); } remote |= kcpuset_isotherset(pm->pm_cpus, cid); local |= kcpuset_isset(pm->pm_cpus, cid); if (local) { atomic_inc_64(&tlbstat_local[why].ev_count); } if (remote) { atomic_inc_64(&tlbstat_remote[why].ev_count); } #endif }
/*ARGSUSED*/ void dcopy_device_channel_notify(dcopy_handle_t handle, int status) { struct dcopy_channel_s *channel; dcopy_list_t *poll_list; dcopy_cmd_priv_t priv; int e; ASSERT(status == DCOPY_COMPLETION); channel = handle; poll_list = &channel->ch_poll_list; /* * when we get a completion notification from the device, go through * all of the commands blocking on this channel and see if they have * completed. Remove the command and wake up the block thread if they * have. Once we hit a command which is still pending, we are done * polling since commands in a channel complete in order. */ mutex_enter(&poll_list->dl_mutex); if (poll_list->dl_cnt != 0) { priv = list_head(&poll_list->dl_list); while (priv != NULL) { atomic_inc_64(&channel-> ch_stat.cs_notify_poll.value.ui64); e = channel->ch_cb->cb_cmd_poll( channel->ch_channel_private, priv->pr_cmd); if (e == DCOPY_PENDING) { atomic_inc_64(&channel-> ch_stat.cs_notify_pending.value.ui64); break; } poll_list->dl_cnt--; list_remove(&poll_list->dl_list, priv); mutex_enter(&priv->pr_mutex); priv->pr_wait = B_FALSE; cv_signal(&priv->pr_cv); mutex_exit(&priv->pr_mutex); priv = list_head(&poll_list->dl_list); } } mutex_exit(&poll_list->dl_mutex); }
/* * dcopy_cmd_alloc() */ int dcopy_cmd_alloc(dcopy_handle_t handle, int flags, dcopy_cmd_t *cmd) { dcopy_handle_t channel; dcopy_cmd_priv_t priv; int e; channel = handle; atomic_inc_64(&channel->ch_stat.cs_cmd_alloc.value.ui64); e = channel->ch_cb->cb_cmd_alloc(channel->ch_channel_private, flags, cmd); if (e == DCOPY_SUCCESS) { priv = (*cmd)->dp_private; priv->pr_channel = channel; /* * we won't initialize the blocking state until we actually * need to block. */ priv->pr_block_init = B_FALSE; } return (e); }
/* * ndi_fmc_insert - * Add a new entry to the specified cache. * * This function must be called at or below LOCK_LEVEL */ void ndi_fmc_insert(dev_info_t *dip, int flag, void *resource, void *bus_specific) { struct dev_info *devi = DEVI(dip); ndi_fmc_t *fcp; ndi_fmcentry_t *fep, **fpp; struct i_ddi_fmhdl *fmhdl; ASSERT(devi); ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE); fmhdl = devi->devi_fmhdl; if (fmhdl == NULL) { return; } if (flag == DMA_HANDLE) { if (!DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) { return; } fcp = fmhdl->fh_dma_cache; fpp = &((ddi_dma_impl_t *)resource)->dmai_error.err_fep; } else if (flag == ACC_HANDLE) { if (!DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) { i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP); return; } fcp = fmhdl->fh_acc_cache; fpp = &((ddi_acc_impl_t *)resource)->ahi_err->err_fep; } fep = kmem_cache_alloc(ndi_fm_entry_cache, KM_NOSLEEP); if (fep == NULL) { atomic_inc_64(&fmhdl->fh_kstat.fek_fmc_full.value.ui64); return; } /* * Set-up the handle resource and bus_specific information. * Also remember the pointer back to the cache for quick removal. */ fep->fce_bus_specific = bus_specific; fep->fce_resource = resource; fep->fce_next = NULL; /* Add entry to the end of the active list */ mutex_enter(&fcp->fc_lock); ASSERT(*fpp == NULL); *fpp = fep; fep->fce_prev = fcp->fc_tail; if (fcp->fc_tail != NULL) fcp->fc_tail->fce_next = fep; else fcp->fc_head = fep; fcp->fc_tail = fep; mutex_exit(&fcp->fc_lock); }
void dsl_pool_tx_assign_add_usecs(dsl_pool_t *dp, uint64_t usecs) { uint64_t idx = 0; while (((1 << idx) < usecs) && (idx < dp->dp_tx_assign_size - 1)) idx++; atomic_inc_64(&dp->dp_tx_assign_buckets[idx].value.ui64); }
/* * Apply specified callback to all caps contained in the list `l'. */ static void cap_walk(list_t *l, void (*cb)(cpucap_t *, int64_t)) { static uint64_t cpucap_walk_gen; cpucap_t *cap; ASSERT(MUTEX_HELD(&caps_lock)); for (cap = list_head(l); cap != NULL; cap = list_next(l, cap)) { (*cb)(cap, cpucap_walk_gen); } atomic_inc_64(&cpucap_walk_gen); }
/* * dcopy_cmd_post() */ int dcopy_cmd_post(dcopy_cmd_t cmd) { dcopy_handle_t channel; int e; channel = cmd->dp_private->pr_channel; atomic_inc_64(&channel->ch_stat.cs_cmd_post.value.ui64); if (cmd->dp_cmd == DCOPY_CMD_COPY) { atomic_add_64(&channel->ch_stat.cs_bytes_xfer.value.ui64, cmd->dp.copy.cc_size); } e = channel->ch_cb->cb_cmd_post(channel->ch_channel_private, cmd); if (e != DCOPY_SUCCESS) { return (e); } return (DCOPY_SUCCESS); }
static void splat_atomic_work(void *priv) { atomic_priv_t *ap; atomic_op_t op; int i; ap = (atomic_priv_t *)priv; ASSERT(ap->ap_magic == SPLAT_ATOMIC_TEST_MAGIC); spin_lock(&ap->ap_lock); op = ap->ap_op; wake_up(&ap->ap_waitq); spin_unlock(&ap->ap_lock); splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d successfully started: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); for (i = 0; i < SPLAT_ATOMIC_INIT_VALUE / 10; i++) { /* Periodically sleep to mix up the ordering */ if ((i % (SPLAT_ATOMIC_INIT_VALUE / 100)) == 0) { splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d sleeping: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ / 100); } switch (op) { case SPLAT_ATOMIC_INC_64: atomic_inc_64(&ap->ap_atomic); break; case SPLAT_ATOMIC_DEC_64: atomic_dec_64(&ap->ap_atomic); break; case SPLAT_ATOMIC_ADD_64: atomic_add_64(&ap->ap_atomic, 3); break; case SPLAT_ATOMIC_SUB_64: atomic_sub_64(&ap->ap_atomic, 3); break; case SPLAT_ATOMIC_ADD_64_NV: atomic_add_64_nv(&ap->ap_atomic, 5); break; case SPLAT_ATOMIC_SUB_64_NV: atomic_sub_64_nv(&ap->ap_atomic, 5); break; default: PANIC("Undefined op %d\n", op); } } atomic_inc_64(&ap->ap_atomic_exited); splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME, "Thread %d successfully exited: %lu/%lu\n", op, (long unsigned)ap->ap_atomic, (long unsigned)ap->ap_atomic_exited); wake_up(&ap->ap_waitq); thread_exit(); }
template<typename T> static void increase(T *ptr) { atomic_inc_64(ptr); }
/* * dcopy_cmd_poll() */ int dcopy_cmd_poll(dcopy_cmd_t cmd, int flags) { dcopy_handle_t channel; dcopy_cmd_priv_t priv; int e; priv = cmd->dp_private; channel = priv->pr_channel; /* * if the caller is trying to block, they needed to post the * command with DCOPY_CMD_INTR set. */ if ((flags & DCOPY_POLL_BLOCK) && !(cmd->dp_flags & DCOPY_CMD_INTR)) { return (DCOPY_FAILURE); } atomic_inc_64(&channel->ch_stat.cs_cmd_poll.value.ui64); repoll: e = channel->ch_cb->cb_cmd_poll(channel->ch_channel_private, cmd); if (e == DCOPY_PENDING) { /* * if the command is still active, and the blocking flag * is set. */ if (flags & DCOPY_POLL_BLOCK) { /* * if we haven't initialized the state, do it now. A * command can be re-used, so it's possible it's * already been initialized. */ if (!priv->pr_block_init) { priv->pr_block_init = B_TRUE; mutex_init(&priv->pr_mutex, NULL, MUTEX_DRIVER, NULL); cv_init(&priv->pr_cv, NULL, CV_DRIVER, NULL); priv->pr_cmd = cmd; } /* push it on the list for blocking commands */ priv->pr_wait = B_TRUE; dcopy_list_push(&channel->ch_poll_list, priv); mutex_enter(&priv->pr_mutex); /* * it's possible we already cleared pr_wait before we * grabbed the mutex. */ if (priv->pr_wait) { cv_wait(&priv->pr_cv, &priv->pr_mutex); } mutex_exit(&priv->pr_mutex); /* * the command has completed, go back and poll so we * get the status. */ goto repoll; } } return (e); }
/* * Remove an entry from the specified cache of access or dma mappings * * This function must be called at or below LOCK_LEVEL. */ void ndi_fmc_remove(dev_info_t *dip, int flag, const void *resource) { ndi_fmc_t *fcp; ndi_fmcentry_t *fep; struct dev_info *devi = DEVI(dip); struct i_ddi_fmhdl *fmhdl; ASSERT(devi); ASSERT(flag == DMA_HANDLE || flag == ACC_HANDLE); fmhdl = devi->devi_fmhdl; if (fmhdl == NULL) { return; } /* Find cache entry pointer for this resource */ if (flag == DMA_HANDLE) { if (!DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap)) { return; } fcp = fmhdl->fh_dma_cache; ASSERT(fcp); mutex_enter(&fcp->fc_lock); fep = ((ddi_dma_impl_t *)resource)->dmai_error.err_fep; ((ddi_dma_impl_t *)resource)->dmai_error.err_fep = NULL; } else if (flag == ACC_HANDLE) { if (!DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap)) { i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP); return; } fcp = fmhdl->fh_acc_cache; ASSERT(fcp); mutex_enter(&fcp->fc_lock); fep = ((ddi_acc_impl_t *)resource)->ahi_err->err_fep; ((ddi_acc_impl_t *)resource)->ahi_err->err_fep = NULL; } else { return; } /* * Resource not in cache, return */ if (fep == NULL) { mutex_exit(&fcp->fc_lock); atomic_inc_64(&fmhdl->fh_kstat.fek_fmc_miss.value.ui64); return; } /* * Updates to FM cache pointers require us to grab fmc_lock * to synchronize access to the cache for ndi_fmc_insert() * and ndi_fmc_error() */ if (fep == fcp->fc_head) fcp->fc_head = fep->fce_next; else fep->fce_prev->fce_next = fep->fce_next; if (fep == fcp->fc_tail) fcp->fc_tail = fep->fce_prev; else fep->fce_next->fce_prev = fep->fce_prev; mutex_exit(&fcp->fc_lock); kmem_cache_free(ndi_fm_entry_cache, fep); }