Beispiel #1
0
static void
s1394_cmp_fini(s1394_hal_t *hal)
{
	s1394_cmp_hal_t *cmp = &hal->hal_cmp;

	rw_destroy(&cmp->cmp_ompr_rwlock);
	rw_destroy(&cmp->cmp_impr_rwlock);
}
static void
zfs_freezfsvfs(zfsvfs_t *zfsvfs)
{
	mutex_destroy(&zfsvfs->z_znodes_lock);
	mutex_destroy(&zfsvfs->z_online_recv_lock);
	list_destroy(&zfsvfs->z_all_znodes);
	rrw_destroy(&zfsvfs->z_teardown_lock);
	rw_destroy(&zfsvfs->z_teardown_inactive_lock);
	rw_destroy(&zfsvfs->z_fuid_lock);
	kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
Beispiel #3
0
int
testcall(struct lwp *l, void *uap, register_t *retval)
{
	int i;

	mutex_init(&test_mutex, MUTEX_DEFAULT, IPL_NONE);
	rw_init(&test_rwlock);
	cv_init(&test_cv, "testcv");

	printf("test: creating threads\n");

	test_count = NTHREADS;
	test_exit = 0;

	for (i = 0; i < test_count; i++)
		kthread_create(0, KTHREAD_MPSAFE, NULL, thread1, &primes[i],
		    &test_threads[i], "thread%d", i);

	printf("test: sleeping\n");

	mutex_enter(&test_mutex);
	while (test_count != 0) {
		(void)cv_timedwait(&test_cv, &test_mutex, hz * SECONDS);
		test_exit = 1;
	}
	mutex_exit(&test_mutex);

	printf("test: finished\n");

	cv_destroy(&test_cv);
	rw_destroy(&test_rwlock);
	mutex_destroy(&test_mutex);

	return 0;
}
Beispiel #4
0
void
mdeg_fini(void)
{
	/*
	 * Flip the enabled switch off to make sure that
	 * no events get dispatched while things are being
	 * torn down.
	 */
	mdeg.enabled = B_FALSE;

	/* destroy the task queue */
	taskq_destroy(mdeg.taskq);

	/*
	 * Deallocate the table of registered clients
	 */
	kmem_free(mdeg.tbl, mdeg.maxclnts * sizeof (mdeg_clnt_t));
	rw_destroy(&mdeg.rwlock);

	/*
	 * Free up the cached MDs.
	 */
	if (mdeg.md_curr)
		(void) md_fini_handle(mdeg.md_curr);

	if (mdeg.md_prev)
		(void) md_fini_handle(mdeg.md_prev);

	mutex_destroy(&mdeg.lock);
}
Beispiel #5
0
void
rdsv3_sock_exit_data(struct rsock *sk)
{
	struct rdsv3_sock *rs = sk->sk_protinfo;

	RDSV3_DPRINTF4("rdsv3_sock_exit_data", "rs: %p sk: %p", rs, sk);

	ASSERT(rs != NULL);
	ASSERT(rdsv3_sk_sock_flag(sk, SOCK_DEAD));

	rs->rs_sk = NULL;

	list_destroy(&rs->rs_send_queue);
	list_destroy(&rs->rs_notify_queue);
	list_destroy(&rs->rs_recv_queue);

	rw_destroy(&rs->rs_recv_lock);
	mutex_destroy(&rs->rs_lock);

	mutex_destroy(&rs->rs_rdma_lock);
	avl_destroy(&rs->rs_rdma_keys);

	mutex_destroy(&rs->rs_conn_lock);
	mutex_destroy(&rs->rs_congested_lock);
	cv_destroy(&rs->rs_congested_cv);

	rdsv3_exit_waitqueue(sk->sk_sleep);
	kmem_free(sk->sk_sleep, sizeof (rdsv3_wait_queue_t));
	mutex_destroy(&sk->sk_lock);

	kmem_cache_free(rdsv3_alloc_cache, sk);
	RDSV3_DPRINTF4("rdsv3_sock_exit_data", "rs: %p sk: %p", rs, sk);
}
Beispiel #6
0
void
taskq_destroy(taskq_t *tq)
{
	int t;
	int nthreads = tq->tq_nthreads;

	taskq_wait(tq);

	mutex_enter(&tq->tq_lock);

	tq->tq_flags &= ~TASKQ_ACTIVE;
	cv_broadcast(&tq->tq_dispatch_cv);

	while (tq->tq_nthreads != 0)
		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);

	tq->tq_minalloc = 0;
	while (tq->tq_nalloc != 0) {
		ASSERT(tq->tq_freelist != NULL);
		task_free(tq, task_alloc(tq, KM_SLEEP));
	}

	mutex_exit(&tq->tq_lock);

	for (t = 0; t < nthreads; t++)
		(void) thr_join(tq->tq_threadlist[t], NULL, NULL);

	kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));

	rw_destroy(&tq->tq_threadlock);

	kmem_free(tq, sizeof (taskq_t));
}
Beispiel #7
0
void
zcrypt_keystore_fini(spa_t *spa)
{
	void *cookie;
	avl_tree_t *tree;
	zcrypt_keystore_node_t *node;

	if (spa->spa_keystore == NULL)
		return;

	rw_enter(&spa->spa_keystore->sk_lock, RW_WRITER);
	/*
	 * Note we don't bother with the refcnt of the keys in here
	 * because this function can't return failure so we just need to
	 * destroy everything.
	 */
	cookie = NULL;
	tree = &spa->spa_keystore->sk_dslkeys;
	while ((node = avl_destroy_nodes(tree, &cookie)) != NULL) {
		mutex_enter(&node->skn_lock);
		(void) zcrypt_keychain_fini(node->skn_keychain);
		zcrypt_key_free(node->skn_wrapkey);
		mutex_exit(&node->skn_lock);
		bzero(node, sizeof (zcrypt_keystore_node_t));
		kmem_free(node, sizeof (zcrypt_keystore_node_t));
	}
	avl_destroy(tree);

	rw_exit(&spa->spa_keystore->sk_lock);
	rw_destroy(&spa->spa_keystore->sk_lock);
	kmem_free(spa->spa_keystore, sizeof (zcrypt_keystore_t));
	spa->spa_keystore = NULL;
}
Beispiel #8
0
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held.  If it is dropped and re-acquired,
 *   *slept should be set to warn the caller.
 *
 * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it
 * is not dropped and re-acquired for any platform.  It may be that *slept is
 * therefore obsolescent.
 */
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
    if (avc->vrefCount != 0)
	return EBUSY;

    if (avc->opens)
	return EBUSY;

    /* if a lock is held, give up */
    if (CheckLock(&avc->lock))
	return EBUSY;

    AFS_GUNLOCK();
    pvn_vplist_dirty(AFSTOV(avc), 0, NULL, B_TRUNC | B_INVAL, CRED());
    AFS_GLOCK();

    /* Might as well make the obvious check */
    if (AFSTOV(avc)->v_pages)
	return EBUSY;		/* should be all gone still */

    rw_destroy(&avc->rwlock);
    if (avc->credp) {
	crfree(avc->credp);
	avc->credp = NULL;
    }


    return 0;
}
Beispiel #9
0
void
mem_range_destroy(void)
{

	if (mem_range_softc.mr_op == NULL)
		return;
	rw_destroy(&mr_lock);
}
Beispiel #10
0
static int
splat_rwlock_test2(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	taskq_t *tq;
	int i, rc = 0, tq_count = 256;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	splat_init_rw_priv(rwp, file);

	/* Create several threads allowing tasks to race with each other */
	tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(),
			  maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
	if (tq == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	/*
	 * Schedule N work items to the work queue each of which enters the
	 * writer rwlock, sleeps briefly, then exits the writer rwlock.  On a
	 * multiprocessor box these work items will be handled by all available
	 * CPUs.  The task function checks to ensure the tracked shared variable
	 * is always only incremented by one.  Additionally, the rwlock itself
	 * is instrumented such that if any two processors are in the
	 * critical region at the same time the system will panic.  If the
	 * rwlock is implemented right this will never happy, that's a pass.
	 */
	for (i = 0; i < tq_count; i++) {
		if (!taskq_dispatch(tq,splat_rwlock_test2_func,rwp,TQ_SLEEP)) {
			splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME,
				     "Failed to queue task %d\n", i);
			rc = -EINVAL;
		}
	}

	taskq_wait(tq);

	if (rwp->rw_rc == tq_count) {
		splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
			     "correctly entered/exited the rwlock %d times\n",
			     num_online_cpus(), rwp->rw_rc);
	} else {
		splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
			     "only processed %d/%d w rwlock work items\n",
			     num_online_cpus(), rwp->rw_rc, tq_count);
		rc = -EINVAL;
	}

	taskq_destroy(tq);
	rw_destroy(&(rwp->rw_rwlock));
out:
	kfree(rwp);
	return rc;
}
Beispiel #11
0
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *cdarg)
{
	znode_t *zp = buf;

	ASSERT(zp->z_dirlocks == 0);
	mutex_destroy(&zp->z_lock);
	rw_destroy(&zp->z_map_lock);
	rw_destroy(&zp->z_parent_lock);
	rw_destroy(&zp->z_name_lock);
	mutex_destroy(&zp->z_acl_lock);
	avl_destroy(&zp->z_range_avl);
	mutex_destroy(&zp->z_range_lock);

	ASSERT(zp->z_dbuf == NULL);
	ASSERT(ZTOV(zp)->v_count == 0);
	vn_free(ZTOV(zp));
}
Beispiel #12
0
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
    znode_t *zp = buf;

    ASSERT(!list_link_active(&zp->z_link_node));
    mutex_destroy(&zp->z_lock);
    rw_destroy(&zp->z_parent_lock);
    rw_destroy(&zp->z_name_lock);
    mutex_destroy(&zp->z_acl_lock);
    rw_destroy(&zp->z_xattr_lock);
    avl_destroy(&zp->z_range_avl);
    mutex_destroy(&zp->z_range_lock);

    ASSERT(zp->z_dirlocks == NULL);
    ASSERT(zp->z_acl_cached == NULL);
    ASSERT(zp->z_xattr_cached == NULL);
}
Beispiel #13
0
void
vnic_dev_fini(void)
{
	ASSERT(vnic_count == 0);

	rw_destroy(&vnic_lock);
	mod_hash_destroy_idhash(vnic_hash);
	kmem_cache_destroy(vnic_cache);
}
Beispiel #14
0
static void
kcpc_fini(void)
{
	long hash;

	for (hash = 0; hash < CPC_HASH_BUCKETS; hash++)
		mutex_destroy(&kcpc_ctx_llock[hash]);
	rw_destroy(&kcpc_cpuctx_lock);
}
Beispiel #15
0
int
ddi_intr_free(ddi_intr_handle_t h)
{
	ddi_intr_handle_impl_t	*hdlp = (ddi_intr_handle_impl_t *)h;
	int			ret;

	DDI_INTR_APIDBG((CE_CONT, "ddi_intr_free: hdlp = %p\n", (void *)hdlp));

	if (hdlp == NULL)
		return (DDI_EINVAL);

	rw_enter(&hdlp->ih_rwlock, RW_WRITER);
	if (((hdlp->ih_flags & DDI_INTR_MSIX_DUP) &&
	    (hdlp->ih_state != DDI_IHDL_STATE_ADDED)) ||
	    ((hdlp->ih_state != DDI_IHDL_STATE_ALLOC) &&
	    (!(hdlp->ih_flags & DDI_INTR_MSIX_DUP)))) {
		rw_exit(&hdlp->ih_rwlock);
		return (DDI_EINVAL);
	}

	/* Set the number of interrupts to free */
	hdlp->ih_scratch1 = 1;

	ret = i_ddi_intr_ops(hdlp->ih_dip, hdlp->ih_dip,
	    DDI_INTROP_FREE, hdlp, NULL);

	rw_exit(&hdlp->ih_rwlock);
	if (ret == DDI_SUCCESS) {
		/* This would be the dup vector */
		if (hdlp->ih_flags & DDI_INTR_MSIX_DUP)
			atomic_dec_32(&hdlp->ih_main->ih_dup_cnt);
		else {
			int	n, curr_type;

			n = i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1;
			curr_type = i_ddi_intr_get_current_type(hdlp->ih_dip);

			i_ddi_intr_set_current_nintrs(hdlp->ih_dip, n);

			if ((i_ddi_irm_supported(hdlp->ih_dip, curr_type)
			    != DDI_SUCCESS) && (n > 0))
				(void) i_ddi_irm_modify(hdlp->ih_dip, n);

			if (hdlp->ih_type & DDI_INTR_TYPE_FIXED)
				i_ddi_set_intr_handle(hdlp->ih_dip,
				    hdlp->ih_inum, NULL);

			i_ddi_intr_devi_fini(hdlp->ih_dip);
			i_ddi_free_intr_phdl(hdlp);
		}
		rw_destroy(&hdlp->ih_rwlock);
		kmem_free(hdlp, sizeof (ddi_intr_handle_impl_t));
	}

	return (ret);
}
Beispiel #16
0
void
zfs_sb_free(zfs_sb_t *zsb)
{
	int i;

	zfs_fuid_destroy(zsb);

	mutex_destroy(&zsb->z_znodes_lock);
	mutex_destroy(&zsb->z_lock);
	list_destroy(&zsb->z_all_znodes);
	rrw_destroy(&zsb->z_teardown_lock);
	rw_destroy(&zsb->z_teardown_inactive_lock);
	rw_destroy(&zsb->z_fuid_lock);
	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_destroy(&zsb->z_hold_mtx[i]);
	mutex_destroy(&zsb->z_ctldir_lock);
	avl_destroy(&zsb->z_ctldir_snaps);
	kmem_free(zsb, sizeof (zfs_sb_t));
}
Beispiel #17
0
/* Called from _fini */
void
rdsib_fini()
{
	/* Stop logging */
	rds_logging_destroy();

	cv_destroy(&rds_dpool.pool_cv);
	mutex_destroy(&rds_dpool.pool_lock);
	cv_destroy(&rds_cpool.pool_cv);
	mutex_destroy(&rds_cpool.pool_lock);

	rw_destroy(&rds_loopback_portmap_lock);

	rw_destroy(&rdsib_statep->rds_hca_lock);
	rw_destroy(&rdsib_statep->rds_sessionlock);
	kmem_free(rdsib_statep, sizeof (rds_state_t));

	rds_transport_ops = NULL;
}
Beispiel #18
0
void
zfs_sb_free(zfs_sb_t *zsb)
{
	int i;

	zfs_fuid_destroy(zsb);

	mutex_destroy(&zsb->z_znodes_lock);
	mutex_destroy(&zsb->z_lock);
	list_destroy(&zsb->z_all_znodes);
	rrm_destroy(&zsb->z_teardown_lock);
	rw_destroy(&zsb->z_teardown_inactive_lock);
	rw_destroy(&zsb->z_fuid_lock);
	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_destroy(&zsb->z_hold_mtx[i]);
	vmem_free(zsb->z_hold_mtx, sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ);
	zfs_mntopts_free(zsb->z_mntopts);
	kmem_free(zsb, sizeof (zfs_sb_t));
}
Beispiel #19
0
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
	znode_t *zp = buf;

	// ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
	ASSERT(ZTOV(zp)->v_data == zp);
	vn_free(ZTOV(zp));
	ASSERT(!list_link_active(&zp->z_link_node));
	mutex_destroy(&zp->z_lock);
	rw_destroy(&zp->z_parent_lock);
	rw_destroy(&zp->z_name_lock);
	mutex_destroy(&zp->z_acl_lock);
	avl_destroy(&zp->z_range_avl);
	mutex_destroy(&zp->z_range_lock);

	ASSERT(zp->z_dirlocks == NULL);
	ASSERT(zp->z_acl_cached == NULL);
}
Beispiel #20
0
/*ARGSUSED*/
static void
taskq_destructor(void *arg, void *obj)
{
    taskq_t *tq = obj;

    mutex_destroy(&tq->tq_lock);
    rw_destroy(&tq->tq_threadlock);
    cv_destroy(&tq->tq_dispatch_cv);
    cv_destroy(&tq->tq_wait_cv);
}
Beispiel #21
0
void
dce_stack_destroy(ip_stack_t *ipst)
{
	int i;
	for (i = 0; i < ipst->ips_dce_hashsize; i++) {
		rw_destroy(&ipst->ips_dce_hash_v4[i].dcb_lock);
		rw_destroy(&ipst->ips_dce_hash_v6[i].dcb_lock);
	}
	kmem_free(ipst->ips_dce_hash_v4,
	    ipst->ips_dce_hashsize * sizeof (dcb_t));
	ipst->ips_dce_hash_v4 = NULL;
	kmem_free(ipst->ips_dce_hash_v6,
	    ipst->ips_dce_hashsize * sizeof (dcb_t));
	ipst->ips_dce_hash_v6 = NULL;
	ipst->ips_dce_hashsize = 0;

	ASSERT(ipst->ips_dce_default->dce_refcnt == 1);
	kmem_cache_free(dce_cache, ipst->ips_dce_default);
	ipst->ips_dce_default = NULL;
}
Beispiel #22
0
/* detach */
int
url_detach(device_t self, int flags)
{
	struct url_softc *sc = device_private(self);
	struct ifnet *ifp = GET_IFP(sc);
	int s;

	DPRINTF(("%s: %s: enter\n", device_xname(sc->sc_dev), __func__));

	/* Detached before attached finished */
	if (!sc->sc_attached)
		return (0);

	callout_stop(&sc->sc_stat_ch);

	/* Remove any pending tasks */
	usb_rem_task(sc->sc_udev, &sc->sc_tick_task);
	usb_rem_task(sc->sc_udev, &sc->sc_stop_task);

	s = splusb();

	if (--sc->sc_refcnt >= 0) {
		/* Wait for processes to go away */
		usb_detach_waitold(sc->sc_dev);
	}

	if (ifp->if_flags & IFF_RUNNING)
		url_stop(GET_IFP(sc), 1);

	rnd_detach_source(&sc->rnd_source);
	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
	ether_ifdetach(ifp);
	if_detach(ifp);

#ifdef DIAGNOSTIC
	if (sc->sc_pipe_tx != NULL)
		aprint_debug_dev(self, "detach has active tx endpoint.\n");
	if (sc->sc_pipe_rx != NULL)
		aprint_debug_dev(self, "detach has active rx endpoint.\n");
	if (sc->sc_pipe_intr != NULL)
		aprint_debug_dev(self, "detach has active intr endpoint.\n");
#endif

	sc->sc_attached = 0;

	splx(s);

	rw_destroy(&sc->sc_mii_rwlock);
	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev,
			   sc->sc_dev);

	return (0);
}
Beispiel #23
0
static int
splat_rwlock_test6(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	int rc = -EINVAL;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	splat_init_rw_priv(rwp, file);

	rw_enter(&rwp->rw_rwlock, RW_READER);
	if (!RW_READ_HELD(&rwp->rw_rwlock)) {
		splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
		             "rwlock should be read lock: %d\n",
			     RW_READ_HELD(&rwp->rw_rwlock));
		goto out;
	}
#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
	/* With one reader upgrade should never fail */
	rc = rw_tryupgrade(&rwp->rw_rwlock);
	if (!rc) {
		splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
			     "rwlock contended preventing upgrade: %d\n",
			     RW_READ_HELD(&rwp->rw_rwlock));
		goto out;
	}

	if (RW_READ_HELD(&rwp->rw_rwlock) || !RW_WRITE_HELD(&rwp->rw_rwlock)) {
		splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should "
			   "have 0 (not %d) reader and 1 (not %d) writer\n",
			   RW_READ_HELD(&rwp->rw_rwlock),
			   RW_WRITE_HELD(&rwp->rw_rwlock));
		goto out;
	}

	rc = 0;
	splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
		     "rwlock properly upgraded\n");
#else
        rc = 0;
        splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
                "rw_tryupgrade() is disabled for this arch\n");
#endif

out:
	rw_exit(&rwp->rw_rwlock);
	rw_destroy(&rwp->rw_rwlock);
	kfree(rwp);

	return rc;
}
Beispiel #24
0
int
_fini(void)
{
	int	rv;

	rv = mod_remove(&modlinkage);
	if (rv == DDI_SUCCESS) {
		rw_destroy(&bd_lock);
		ddi_soft_state_fini(&bd_state);
	}
	return (rv);
}
Beispiel #25
0
int
t4_free_l2t(struct l2t_data *d)
{
	int i;

	for (i = 0; i < d->l2t_size; i++)
		mtx_destroy(&d->l2tab[i].lock);
	rw_destroy(&d->lock);
	free(d, M_CXGBE);

	return (0);
}
Beispiel #26
0
void
zfs_sb_free(zfs_sb_t *zsb)
{
	int i, size = zsb->z_hold_size;

	zfs_fuid_destroy(zsb);

	mutex_destroy(&zsb->z_znodes_lock);
	mutex_destroy(&zsb->z_lock);
	list_destroy(&zsb->z_all_znodes);
	rrm_destroy(&zsb->z_teardown_lock);
	rw_destroy(&zsb->z_teardown_inactive_lock);
	rw_destroy(&zsb->z_fuid_lock);
	for (i = 0; i != size; i++) {
		avl_destroy(&zsb->z_hold_trees[i]);
		mutex_destroy(&zsb->z_hold_locks[i]);
	}
	vmem_free(zsb->z_hold_trees, sizeof (avl_tree_t) * size);
	vmem_free(zsb->z_hold_locks, sizeof (kmutex_t) * size);
	zfs_mntopts_free(zsb->z_mntopts);
	kmem_free(zsb, sizeof (zfs_sb_t));
}
Beispiel #27
0
/*
 * iscsi_door_term
 *
 * This function releases the resources allocated to handle the door
 * upcall.  It disconnects from the door if currently connected.
 */
boolean_t
iscsi_door_term(void)
{
	ASSERT(iscsi_door_init);
	if (iscsi_door_init) {
		iscsi_door_init = B_FALSE;
		iscsi_door_unbind();
		rw_destroy(&iscsi_door_lock);
		sema_destroy(&iscsi_door_sema);
		return (B_TRUE);
	}
	return (B_FALSE);
}
Beispiel #28
0
/* ARGSUSED */
int
ddi_intr_add_softint(dev_info_t *dip, ddi_softint_handle_t *h_p, int soft_pri,
    ddi_intr_handler_t handler, void *arg1)
{
	ddi_softint_hdl_impl_t	*hdlp;
	int			ret;

	DDI_INTR_APIDBG((CE_CONT, "ddi_intr_add_softint: dip = %p, "
	    "softpri = 0x%x\n", (void *)dip, soft_pri));

	if ((dip == NULL) || (h_p == NULL) || (handler == NULL)) {
		DDI_INTR_APIDBG((CE_CONT, "ddi_intr_add_softint: "
		    "invalid arguments"));

		return (DDI_EINVAL);
	}

	/* Validate input arguments */
	if (soft_pri < DDI_INTR_SOFTPRI_MIN ||
	    soft_pri > DDI_INTR_SOFTPRI_MAX) {
		DDI_INTR_APIDBG((CE_CONT, "ddi_intr_add_softint: invalid "
		    "soft_pri input given  = %x\n", soft_pri));
		return (DDI_EINVAL);
	}

	hdlp = (ddi_softint_hdl_impl_t *)kmem_zalloc(
	    sizeof (ddi_softint_hdl_impl_t), KM_SLEEP);

	/* fill up internally */
	rw_init(&hdlp->ih_rwlock, NULL, RW_DRIVER, NULL);
	rw_enter(&hdlp->ih_rwlock, RW_WRITER);
	hdlp->ih_pri = soft_pri;
	hdlp->ih_dip = dip;
	hdlp->ih_cb_func = handler;
	hdlp->ih_cb_arg1 = arg1;
	DDI_INTR_APIDBG((CE_CONT, "ddi_intr_add_softint: hdlp = %p\n",
	    (void *)hdlp));

	/* do the platform specific calls */
	if ((ret = i_ddi_add_softint(hdlp)) != DDI_SUCCESS) {
		rw_exit(&hdlp->ih_rwlock);
		rw_destroy(&hdlp->ih_rwlock);
		kmem_free(hdlp, sizeof (ddi_softint_hdl_impl_t));
		return (ret);
	}

	*h_p = (ddi_softint_handle_t)hdlp;
	rw_exit(&hdlp->ih_rwlock);
	return (ret);
}
Beispiel #29
0
/*ARGSUSED*/
static void
i_dls_destructor(void *buf, void *arg)
{
	dls_impl_t	*dip = buf;

	ASSERT(dip->di_dvp == NULL);
	ASSERT(dip->di_mnh == NULL);
	ASSERT(dip->di_dmap == NULL);
	ASSERT(!dip->di_bound);
	ASSERT(dip->di_rx == NULL);
	ASSERT(dip->di_txinfo == NULL);

	rw_destroy(&(dip->di_lock));
}
RTDECL(int)  RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx)
{
    PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx;
    if (pThis == NIL_RTSEMFASTMUTEX)
        return VINF_SUCCESS;
    AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
    AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE);
    RT_ASSERT_INTS_ON();

    ASMAtomicXchgU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD);
    rw_destroy(&pThis->Mtx);
    RTMemFree(pThis);

    return VINF_SUCCESS;
}