static void ps_fini(void) { extern void mdeg_fini(void); /* * Stop incoming requests from Zeus */ (void) ds_cap_fini(&ps_md_cap); (void) ds_cap_fini(&ps_shutdown_cap); (void) ds_cap_fini(&ps_panic_cap); if (ps_suspend_enabled) { (void) ds_cap_fini(&ps_suspend_cap); if (ps_suspend_thread != NULL) { mutex_enter(&ps_suspend_mutex); ps_suspend_thread_exit = B_TRUE; cv_signal(&ps_suspend_cv); mutex_exit(&ps_suspend_mutex); thread_join(ps_suspend_thread->t_did); ps_suspend_thread = NULL; mutex_destroy(&ps_suspend_mutex); cv_destroy(&ps_suspend_cv); } } mdeg_fini(); }
/** * Destroy all elements in queue and queue itself. Also notifies squeue_pop * Doesn't deallocate squeue_t * * @param sq synchronized queue to destroy * @param free helper to destroy element's data * * @note squeue_destroy() will notify consumer but doesn't guarantee that it \ * will leave squeue_pop(). You need to check this on your own. \ * It could be easily done by joining consumer thread. * */ void squeue_destroy(squeue_t* sq, void (*el_free)(void* obj)) { squeue_el_t* el; squeue_el_t* next; mutex_lock(&sq->sq_mutex); el = sq->sq_head; while(el != NULL) { next = el->s_next; el_free(el->s_data); mp_free(el); el = next; } sq->sq_is_destroyed = B_TRUE; cv_notify_all(&sq->sq_cv); mutex_unlock(&sq->sq_mutex); mutex_destroy(&sq->sq_mutex); cv_destroy(&sq->sq_cv); }
/* * configuration clean up */ static void uftdi_cleanup(uftdi_state_t *uf, int level) { ASSERT(level > 0 && level <= UFTDI_CLEANUP_LEVEL_MAX); switch (level) { default: case 6: uftdi_close_pipes(uf); /*FALLTHROUGH*/ case 5: usb_unregister_event_cbs(uf->uf_dip, uf->uf_usb_events); /*FALLTHROUGH*/ case 4: uftdi_destroy_pm_components(uf); /*FALLTHROUGH*/ case 3: mutex_destroy(&uf->uf_lock); cv_destroy(&uf->uf_tx_cv); usb_free_log_hdl(uf->uf_lh); uf->uf_lh = NULL; usb_free_descr_tree(uf->uf_dip, uf->uf_dev_data); uf->uf_def_ph = NULL; /*FALLTHROUGH*/ case 2: usb_client_detach(uf->uf_dip, uf->uf_dev_data); /*FALLTHROUGH*/ case 1: kmem_free(uf, sizeof (*uf)); break; } }
/*ARGSUSED*/ static int mouse8042_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) { struct mouse_state *state; state = ddi_get_driver_private(dip); switch (cmd) { case DDI_SUSPEND: /* Ignore all data from mouse8042_intr until we fully resume */ state->ready = 0; return (DDI_SUCCESS); case DDI_DETACH: ddi_remove_intr(dip, 0, state->ms_iblock_cookie); mouse8042_dip = NULL; cv_destroy(&state->reset_cv); mutex_destroy(&state->reset_mutex); mutex_destroy(&state->ms_mutex); ddi_prop_remove_all(dip); ddi_regs_map_free(&state->ms_handle); ddi_remove_minor_node(dip, NULL); kmem_free(state, sizeof (struct mouse_state)); return (DDI_SUCCESS); default: return (DDI_FAILURE); } }
void kitchen_destroy(struct kitchen *k) { int i; // Destroy the queue elements while (!q_empty(k->group_list)) { kfree(q_remhead(k->group_list)); } // Destroy the queue q_destroy(k->group_list); // Destroy the cv cv_destroy(k->kitchen_cv); // Destroy the entrance lock lock_destroy(k->kitchen_lock); // Destroy the bowl locks for (i = 0; i < NumBowls; i++) { lock_destroy(k->bowl_locks[i]); } // Destroy the bowl lock array kfree(k->bowl_locks); // Destroy the kitchen kfree(k); // Clear the pointer k = NULL; }
/* * Unlock this directory entry and wake anyone who was waiting for it. */ void zfs_dirent_unlock(zfs_dirlock_t *dl) { znode_t *dzp = dl->dl_dzp; zfs_dirlock_t **prev_dl, *cur_dl; mutex_enter(&dzp->z_lock); if (!dl->dl_namelock) rw_exit(&dzp->z_name_lock); if (dl->dl_sharecnt > 1) { dl->dl_sharecnt--; mutex_exit(&dzp->z_lock); return; } prev_dl = &dzp->z_dirlocks; while ((cur_dl = *prev_dl) != dl) prev_dl = &cur_dl->dl_next; *prev_dl = dl->dl_next; cv_broadcast(&dl->dl_cv); mutex_exit(&dzp->z_lock); cv_destroy(&dl->dl_cv); kmem_free(dl, sizeof (*dl) + dl->dl_namesize); }
static void udf_discstrat_finish_seq(struct udf_strat_args *args) { struct udf_mount *ump = args->ump; struct strat_private *priv = PRIV(ump); int error; if (ump == NULL) return; /* stop our sheduling thread */ KASSERT(priv->run_thread == 1); priv->run_thread = 0; wakeup(priv->queue_lwp); do { error = tsleep(&priv->run_thread, PRIBIO+1, "udfshedfin", hz); } while (error); /* kthread should be finished now */ /* set back old device strategy method */ VOP_IOCTL(ump->devvp, DIOCSSTRATEGY, &priv->old_strategy_setting, FWRITE, NOCRED); /* destroy our pool */ pool_destroy(&priv->desc_pool); mutex_destroy(&priv->discstrat_mutex); cv_destroy(&priv->discstrat_cv); /* free our private space */ free(ump->strategy_private, M_UDFTEMP); ump->strategy_private = NULL; }
void sys_exit(int exitcode, bool is_sig){ lock_acquire(curproc->exitlock); for (int fd = 0; fd < OPEN_MAX; fd++) { int err; sys_close(fd, &err); } curproc->exit_flag = true; if (is_sig) { curproc->exit_code = _MKWAIT_SIG(exitcode); } else { curproc->exit_code = _MKWAIT_EXIT(exitcode); } if (proc_ids[curproc->ppid]->exit_flag == false) { cv_broadcast(curproc->exitcv, curproc->exitlock); lock_release(curproc->exitlock); } else { /* Clean Up */ lock_release(curproc->exitlock); cv_destroy(curproc->exitcv); as_destroy(curproc->p_addrspace); kfree(proc_ids[curproc->pid]->p_name); curproc->p_addrspace = NULL; kfree(proc_ids[curproc->pid]); proc_ids[curproc->pid] = NULL; lock_destroy(curproc->exitlock); } thread_exit(); }
int testcall(struct lwp *l, void *uap, register_t *retval) { printf("test: initializing\n"); mutex_init(&test_mutex, MUTEX_DEFAULT, IPL_NONE); cv_init(&test_cv, "testcv"); test_sih = softint_establish(SOFTINT_MPSAFE | SOFTINT_SERIAL, test_softint, NULL); callout_init(&test_ch, CALLOUT_MPSAFE); callout_setfunc(&test_ch, test_callout, NULL); printf("test: firing\n"); callout_schedule(&test_ch, hz / 10); printf("test: waiting\n"); mutex_enter(&test_mutex); while (!test_done) { cv_wait(&test_cv, &test_mutex); } mutex_exit(&test_mutex); printf("test: finished\n"); callout_destroy(&test_ch); softint_disestablish(test_sih); mutex_destroy(&test_mutex); cv_destroy(&test_cv); return 0; }
void test_cv() { long i; int result; unintr_printf("starting cv test\n"); unintr_printf("threads should print out in reverse order\n"); testcv = cv_create(); testlock = lock_create(); done = 0; testval1 = NTHREADS - 1; for (i = 0; i < NTHREADS; i++) { result = thread_create((void (*)(void *))test_cv_thread, (void *)i); assert(thread_ret_ok(result)); } while (__sync_fetch_and_add(&done, 0) < NTHREADS) { /* this requires thread_yield to be working correctly */ thread_yield(THREAD_ANY); } cv_destroy(testcv); unintr_printf("cv test done\n"); }
void kstat_delete(kstat_t *ksp) { ekstat_t *e = (ekstat_t *)ksp; kmutex_t *lock = ksp->ks_lock; int lock_needs_release = 0; // destroy the sysctl if (ksp->ks_type == KSTAT_TYPE_NAMED) { if (lock && MUTEX_NOT_HELD(lock)) { mutex_enter(lock); lock_needs_release = 1; } remove_child_sysctls(e); if (lock_needs_release) { mutex_exit(lock); } } sysctl_unregister_oid(&e->e_oid); if (e->e_vals) { kfree(e->e_vals, sizeof(sysctl_leaf_t) * e->e_num_vals); } cv_destroy(&e->e_cv); kfree(e, e->e_size); }
pid_t sys_waitpid(pid_t pid, int *status, int options, int *err) { if(status == (int*) 0x0) { return 0; } if(status == (int*) 0x40000000 || status == (int*) 0x80000000 || ((int)status & 3) != 0) { *err = EFAULT; return -1; } if(options != 0 && options != WNOHANG && options != WUNTRACED){ *err = EINVAL; return -1; } if(pid < PID_MIN || pid > PID_MAX_256) { *err = ESRCH; return -1; } if(curproc->pid != proc_ids[pid]->ppid ){ *err = ECHILD; return -1; } if(proc_ids[pid] == NULL){ *err = ESRCH; return -1; } lock_acquire(proc_ids[pid]->exitlock); if (proc_ids[pid]->exit_flag == false) { if (options == WNOHANG) { lock_release(proc_ids[pid]->exitlock); return 0; } else { cv_wait(proc_ids[pid]->exitcv, proc_ids[pid]->exitlock); } } *status = proc_ids[pid]->exit_code; lock_release(proc_ids[pid]->exitlock); /* Clean Up */ lock_destroy(proc_ids[pid]->exitlock); cv_destroy(proc_ids[pid]->exitcv); as_destroy(proc_ids[pid]->p_addrspace); proc_ids[pid]->p_addrspace = NULL; kfree(proc_ids[pid]->p_name); kfree(proc_ids[pid]); proc_ids[pid] = NULL; return pid; }
void iscsit_sess_destroy(iscsit_sess_t *ist) { idm_refcnt_destroy(&ist->ist_refcnt); if (ist->ist_initiator_name) kmem_free(ist->ist_initiator_name, strlen(ist->ist_initiator_name) + 1); if (ist->ist_initiator_alias) kmem_free(ist->ist_initiator_alias, strlen(ist->ist_initiator_alias) + 1); if (ist->ist_target_name) kmem_free(ist->ist_target_name, strlen(ist->ist_target_name) + 1); if (ist->ist_target_alias) kmem_free(ist->ist_target_alias, strlen(ist->ist_target_alias) + 1); avl_destroy(&ist->ist_task_list); kmem_free(ist->ist_rxpdu_queue, sizeof (iscsit_cbuf_t)); list_destroy(&ist->ist_conn_list); list_destroy(&ist->ist_events); cv_destroy(&ist->ist_cv); mutex_destroy(&ist->ist_mutex); mutex_destroy(&ist->ist_sn_mutex); kmem_free(ist, sizeof (*ist)); }
static int adb_kbd_detach(device_t dev) { struct adb_kbd_softc *sc; keyboard_t *kbd; sc = device_get_softc(dev); adb_set_autopoll(dev,0); callout_stop(&sc->sc_repeater); mtx_lock(&sc->sc_mutex); kbd = kbd_get_keyboard(kbd_find_keyboard(KBD_DRIVER_NAME, device_get_unit(dev))); kbdd_disable(kbd); #ifdef KBD_INSTALL_CDEV kbd_detach(kbd); #endif kbdd_term(kbd); mtx_unlock(&sc->sc_mutex); mtx_destroy(&sc->sc_mutex); cv_destroy(&sc->sc_cv); return (0); }
/* ARGSUSED */ static int fifo_close(void *v) { struct vop_close_args /* { struct vnode *a_vp; int a_fflag; kauth_cred_t a_cred; struct lwp *a_l; } */ *ap = v; struct vnode *vp; struct fifoinfo *fip; struct socket *wso, *rso; int isrevoke; vp = ap->a_vp; fip = vp->v_fifoinfo; isrevoke = (ap->a_fflag & (FREAD | FWRITE | FNONBLOCK)) == FNONBLOCK; wso = fip->fi_writesock; rso = fip->fi_readsock; solock(wso); if (isrevoke) { if (fip->fi_readers != 0) { fip->fi_readers = 0; socantsendmore(wso); } if (fip->fi_writers != 0) { fip->fi_writers = 0; socantrcvmore(rso); } } else { if ((ap->a_fflag & FREAD) && --fip->fi_readers == 0) socantsendmore(wso); if ((ap->a_fflag & FWRITE) && --fip->fi_writers == 0) socantrcvmore(rso); } if ((fip->fi_readers + fip->fi_writers) == 0) { sounlock(wso); (void) soclose(rso); (void) soclose(wso); cv_destroy(&fip->fi_rcv); cv_destroy(&fip->fi_wcv); kmem_free(fip, sizeof(*fip)); vp->v_fifoinfo = NULL; } else sounlock(wso); return (0); }
void dsl_pool_close(dsl_pool_t *dp) { /* * Drop our references from dsl_pool_open(). * * Since we held the origin_snap from "syncing" context (which * includes pool-opening context), it actually only got a "ref" * and not a hold, so just drop that here. */ if (dp->dp_origin_snap != NULL) dsl_dataset_rele(dp->dp_origin_snap, dp); if (dp->dp_mos_dir != NULL) dsl_dir_rele(dp->dp_mos_dir, dp); if (dp->dp_free_dir != NULL) dsl_dir_rele(dp->dp_free_dir, dp); if (dp->dp_leak_dir != NULL) dsl_dir_rele(dp->dp_leak_dir, dp); if (dp->dp_root_dir != NULL) dsl_dir_rele(dp->dp_root_dir, dp); bpobj_close(&dp->dp_free_bpobj); bpobj_close(&dp->dp_obsolete_bpobj); /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ if (dp->dp_meta_objset != NULL) dmu_objset_evict(dp->dp_meta_objset); txg_list_destroy(&dp->dp_dirty_datasets); txg_list_destroy(&dp->dp_dirty_zilogs); txg_list_destroy(&dp->dp_sync_tasks); txg_list_destroy(&dp->dp_dirty_dirs); taskq_destroy(dp->dp_zil_clean_taskq); taskq_destroy(dp->dp_sync_taskq); /* * We can't set retry to TRUE since we're explicitly specifying * a spa to flush. This is good enough; any missed buffers for * this spa won't cause trouble, and they'll eventually fall * out of the ARC just like any other unused buffer. */ arc_flush(dp->dp_spa, FALSE); mmp_fini(dp->dp_spa); txg_fini(dp); dsl_scan_fini(dp); dmu_buf_user_evict_wait(); rrw_destroy(&dp->dp_config_rwlock); mutex_destroy(&dp->dp_lock); cv_destroy(&dp->dp_spaceavail_cv); taskq_destroy(dp->dp_iput_taskq); if (dp->dp_blkstats != NULL) { mutex_destroy(&dp->dp_blkstats->zab_lock); vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); } kmem_free(dp, sizeof (dsl_pool_t)); }
void sleepq_finish(sleepq_t *sq) { if ( --initialized_count == 0 ) { mutex_destroy(&sq_mtx); cv_destroy(&sq_cv); } }
/* ARGSUSED */ static void kcf_sreq_cache_destructor(void *buf, void *cdrarg) { kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; mutex_destroy(&sreq->sn_lock); cv_destroy(&sreq->sn_cv); }
void whalemating_cleanup() { lock_destroy(male_lock); lock_destroy(female_lock); lock_destroy(match_maker_lock); lock_destroy(thread_count_lock); cv_destroy(thread_count_cv); return; }
/*ARGSUSED*/ static void taskq_ent_destructor(void *arg, void *obj) { taskq_ent_t *tqe = obj; ASSERT(tqe->tqent_thread == NULL); cv_destroy(&tqe->tqent_cv); }
/* * xpvtap_drv_fini() */ static void xpvtap_drv_fini(xpvtap_state_t *state) { xpvtap_user_fini(state); cv_destroy(&state->bt_open.bo_exit_cv); mutex_destroy(&state->bt_open.bo_mutex); (void) ddi_soft_state_free(xpvtap_statep, state->bt_instance); }
void ipmi_shutdown(struct ipmi_softc *sc) { taskq_destroy(sc->ipmi_kthread); cv_destroy(&sc->ipmi_request_added); mutex_destroy(&sc->ipmi_lock); }
void whalemating_cleanup() { sem_destroy(male_sem); sem_destroy(female_sem); lock_destroy(hold); cv_destroy(mate_cv); return; }
/* * Tear down a firm_event queue. */ void ev_fini(struct evvar *ev) { cv_destroy(&ev->ev_cv); seldestroy(&ev->ev_sel); kmem_free(ev->ev_q, (size_t)EV_QSIZE * sizeof(struct firm_event)); }
static void nukepark(void *obj, void *privdata) { struct puffs_msgpark *park = obj; cv_destroy(&park->park_cv); lockuninit(&park->park_mtx); }
void npf_worker_sysfini(void) { lwp_t *l = worker_lwp; /* Notify the worker and wait for the exit. */ mutex_enter(&worker_lock); worker_lwp = NULL; cv_broadcast(&worker_cv); mutex_exit(&worker_lock); kthread_join(l); /* LWP has exited, destroy the structures. */ cv_destroy(&worker_cv); cv_destroy(&worker_event_cv); mutex_destroy(&worker_lock); }
void pppt_task_free(pppt_task_t *ptask) { mutex_enter(&ptask->pt_mutex); mutex_destroy(&ptask->pt_mutex); cv_destroy(&ptask->pt_cv); kmem_free(ptask, sizeof (pppt_task_t) + sizeof (pppt_buf_t) + sizeof (stmf_data_buf_t)); }
/* * smb_net_txl_destructor * * Transmit list destructor */ void smb_net_txl_destructor(smb_txlst_t *txl) { ASSERT(txl->tl_magic == SMB_TXLST_MAGIC); txl->tl_magic = 0; cv_destroy(&txl->tl_wait_cv); mutex_destroy(&txl->tl_mutex); }
/* * Clean up a pidinfo structure. */ static void pidinfo_destroy(struct pidinfo *pi) { KASSERT(pi->pi_exited == true); KASSERT(pi->pi_ppid == INVALID_PID); cv_destroy(pi->pi_cv); kfree(pi); }
void soput(struct socket *so) { #if 0 KASSERT(!cv_has_waiters(&so->so_cv)); KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv)); KASSERT(!cv_has_waiters(&so->so_snd.sb_cv)); seldestroy(&so->so_rcv.sb_sel); seldestroy(&so->so_snd.sb_sel); mutex_obj_free(so->so_lock); cv_destroy(&so->so_cv); cv_destroy(&so->so_rcv.sb_cv); cv_destroy(&so->so_snd.sb_cv); #endif pool_cache_put(socket_cache, so); }