Ejemplo n.º 1
0
/*
 * Determine if the I/O in question should return failure.  Returns the errno
 * to be returned to the caller.
 */
int
zio_handle_fault_injection(zio_t *zio, int error)
{
	int ret = 0;
	inject_handler_t *handler;

	/*
	 * Ignore I/O not associated with any logical data.
	 */
	if (zio->io_logical == NULL)
		return (0);

	/*
	 * Currently, we only support fault injection on reads.
	 */
	if (zio->io_type != ZIO_TYPE_READ)
		return (0);

	rw_enter(&inject_lock, RW_READER);

	for (handler = list_head(&inject_handlers); handler != NULL;
	    handler = list_next(&inject_handlers, handler)) {

		if (zio->io_spa != handler->zi_spa ||
		    handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
			continue;

		/* If this handler matches, return EIO */
		if (zio_match_handler(&zio->io_logical->io_bookmark,
		    zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
		    &handler->zi_record, error)) {
			ret = error;
			break;
		}
	}

	rw_exit(&inject_lock);

	return (ret);
}
Ejemplo n.º 2
0
/*
 * Look up an entry in a directory.
 *
 * NOTE: '.' and '..' are handled as special cases because
 *	no directory entries are actually stored for them.  If this is
 *	the root of a filesystem, then '.zfs' is also treated as a
 *	special pseudo-directory.
 */
int
zfs_dirlook(znode_t *dzp, char *name, vnode_t **vpp)
{
	zfs_dirlock_t *dl;
	znode_t *zp;
	int error = 0;

	if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
		*vpp = ZTOV(dzp);
		VN_HOLD(*vpp);
	} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
		zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
		/*
		 * If we are a snapshot mounted under .zfs, return
		 * the vp for the snapshot directory.
		 */
		if (dzp->z_phys->zp_parent == dzp->z_id &&
		    zfsvfs->z_parent != zfsvfs) {
			error = zfsctl_root_lookup(zfsvfs->z_parent->z_ctldir,
			    "snapshot", vpp, NULL, 0, NULL, kcred);
			return (error);
		}
		rw_enter(&dzp->z_parent_lock, RW_READER);
		error = zfs_zget(zfsvfs, dzp->z_phys->zp_parent, &zp);
		if (error == 0)
			*vpp = ZTOV(zp);
		rw_exit(&dzp->z_parent_lock);
	} else if (zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) {
		*vpp = zfsctl_root(dzp);
	} else {
		error = zfs_dirent_lock(&dl, dzp, name, &zp, ZEXISTS | ZSHARED);
		if (error == 0) {
			*vpp = ZTOV(zp);
			zfs_dirent_unlock(dl);
			dzp->z_zn_prefetch = B_TRUE; /* enable prefetching */
		}
	}

	return (error);
}
Ejemplo n.º 3
0
int
rds_open_transport_driver()
{
	int ret = 0;

	rw_enter(&rds_transport_lock, RW_WRITER);
	if (rds_transport_handle != NULL) {
		/*
		 * Someone beat us to it.
		 */
		goto done;
	}

	if (ibt_hw_is_present() == 0) {
		ret = ENODEV;
		goto done;
	}

	if (rds_li == NULL) {
		ret = EPROTONOSUPPORT;
		goto done;
	}

	ret = ldi_open_by_name("/devices/ib/rdsib@0:rdsib",
	    FREAD | FWRITE, kcred, &rds_transport_handle, rds_li);
	if (ret != 0) {
		ret = EPROTONOSUPPORT;
		rds_transport_handle = NULL;
		goto done;
	}

	ret = rds_transport_ops->rds_transport_open_ib();
	if (ret != 0) {
		(void) ldi_close(rds_transport_handle, FNDELAY, kcred);
		rds_transport_handle = NULL;
	}
done:
	rw_exit(&rds_transport_lock);
	return (ret);
}
Ejemplo n.º 4
0
static int
splat_rwlock_test4_type(taskq_t *tq, rw_priv_t *rwp, int expected_rc,
			krw_t holder_type, krw_t try_type)
{
	int id, rc = 0;

	/* Schedule a task function which will try and acquire the rwlock
	 * using type try_type while the rwlock is being held as holder_type.
	 * The result must match expected_rc for the test to pass */
	rwp->rw_rc = -EINVAL;
	rwp->rw_type = try_type;

	if (holder_type == RW_WRITER || holder_type == RW_READER)
		rw_enter(&rwp->rw_rwlock, holder_type);

	id = taskq_dispatch(tq, splat_rwlock_test4_func, rwp, TQ_SLEEP);
	if (id == 0) {
		splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME, "%s",
			     "taskq_dispatch() failed\n");
		rc = -EINVAL;
		goto out;
	}

	taskq_wait_id(tq, id);

	if (rwp->rw_rc != expected_rc)
		rc = -EINVAL;

	splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME,
		     "%srw_tryenter(%s) returned %d (expected %d) when %s\n",
		     rc ? "Fail " : "", splat_rwlock_test4_name(try_type),
		     rwp->rw_rc, expected_rc,
		     splat_rwlock_test4_name(holder_type));
out:
	if (holder_type == RW_WRITER || holder_type == RW_READER)
		rw_exit(&rwp->rw_rwlock);

	return rc;
}
Ejemplo n.º 5
0
/*
 * Install an exec module.
 */
static int
mod_installexec(struct modlexec *modl, struct modlinkage *modlp)
{
	struct execsw *eswp;
	struct modctl *mcp;
	char *modname;
	char *magic;
	size_t magic_size;

	/*
	 * See if execsw entry is already allocated.  Can't use findexectype()
	 * because we may get a recursive call to here.
	 */

	if ((eswp = findexecsw(modl->exec_execsw->exec_magic)) == NULL) {
		mcp = mod_getctl(modlp);
		ASSERT(mcp != NULL);
		modname = mcp->mod_modname;
		magic = modl->exec_execsw->exec_magic;
		magic_size = modl->exec_execsw->exec_maglen;
		if ((eswp = allocate_execsw(modname, magic, magic_size)) ==
		    NULL) {
			printf("no unused entries in 'execsw'\n");
			return (ENOSPC);
		}
	}
	if (eswp->exec_func != NULL) {
		printf("exec type %x is already installed\n",
			*eswp->exec_magic);
			return (EBUSY);		 /* it's already there! */
	}

	rw_enter(eswp->exec_lock, RW_WRITER);
	eswp->exec_func = modl->exec_execsw->exec_func;
	eswp->exec_core = modl->exec_execsw->exec_core;
	rw_exit(eswp->exec_lock);

	return (0);
}
Ejemplo n.º 6
0
/*
 * Query domain table by index, returning domain string
 *
 * Returns a pointer from an avl node of the domain string.
 *
 */
const char *
zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
{
	char *domain;

	if (idx == 0 || !zfsvfs->z_use_fuids)
		return (NULL);

	if (!zfsvfs->z_fuid_loaded)
		zfs_fuid_init(zfsvfs);

	rw_enter(&zfsvfs->z_fuid_lock, RW_READER);

	if (zfsvfs->z_fuid_obj || zfsvfs->z_fuid_dirty)
		domain = zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx, idx);
	else
		domain = nulldomain;
	rw_exit(&zfsvfs->z_fuid_lock);

	ASSERT(domain);
	return (domain);
}
Ejemplo n.º 7
0
void
dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
{
	mutex_enter(&dd->dd_lock);
	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
	    dd->dd_phys->dd_used_bytes);
	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
	    dd->dd_phys->dd_reserved);
	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
	    dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
	    (dd->dd_phys->dd_uncompressed_bytes * 100 /
	    dd->dd_phys->dd_compressed_bytes));
	if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
		    dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]);
		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
		    dd->dd_phys->dd_used_breakdown[DD_USED_HEAD]);
		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
		    dd->dd_phys->dd_used_breakdown[DD_USED_REFRSRV]);
		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
		    dd->dd_phys->dd_used_breakdown[DD_USED_CHILD] +
		    dd->dd_phys->dd_used_breakdown[DD_USED_CHILD_RSRV]);
	}
	mutex_exit(&dd->dd_lock);

	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
	if (dsl_dir_is_clone(dd)) {
		dsl_dataset_t *ds;
		char buf[MAXNAMELEN];

		VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
		    dd->dd_phys->dd_origin_obj, FTAG, &ds));
		dsl_dataset_name(ds, buf);
		dsl_dataset_rele(ds, FTAG);
		dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
	}
	rw_exit(&dd->dd_pool->dp_config_rwlock);
}
Ejemplo n.º 8
0
/*
 * Selects the raidz operation for raidz_map
 * If rm_ops is set to NULL original raidz implementation will be used
 */
void
vdev_raidz_math_get_ops(raidz_map_t *rm)
{
	rw_enter(&vdev_raidz_impl_lock, RW_READER);

	rm->rm_ops = vdev_raidz_used_impl;

#if !defined(_KERNEL)
	if (zfs_vdev_raidz_impl == IMPL_CYCLE) {
		static size_t cycle_impl_idx = 0;
		size_t idx;
		/*
		 * Cycle through all supported new implementations, and
		 * when idx == raidz_supp_impl_cnt, use the original
		 */
		idx = (++cycle_impl_idx) % (raidz_supp_impl_cnt + 1);
		rm->rm_ops = raidz_supp_impl[idx];
	}
#endif

	rw_exit(&vdev_raidz_impl_lock);
}
Ejemplo n.º 9
0
/*
 * returns ENOENT, EIO, or 0.
 *
 * This interface will allocate a blank spill dbuf when a spill blk
 * doesn't already exist on the dnode.
 *
 * if you only want to find an already existing spill db, then
 * dmu_spill_hold_existing() should be used.
 */
int
dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
{
	dmu_buf_impl_t *db = NULL;
	int err;

	if ((flags & DB_RF_HAVESTRUCT) == 0)
		rw_enter(&dn->dn_struct_rwlock, RW_READER);

	db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);

	if ((flags & DB_RF_HAVESTRUCT) == 0)
		rw_exit(&dn->dn_struct_rwlock);

	ASSERT(db != NULL);
	err = dbuf_read(db, NULL, flags);
	if (err == 0)
		*dbp = &db->db;
	else
		dbuf_rele(db, tag);
	return (err);
}
Ejemplo n.º 10
0
static int
splat_rwlock_test5(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	int rc = -EINVAL;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	splat_init_rw_priv(rwp, file);

	rw_enter(&rwp->rw_rwlock, RW_WRITER);
	if (!RW_WRITE_HELD(&rwp->rw_rwlock)) {
		splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
			     "rwlock should be write lock: %d\n",
			     RW_WRITE_HELD(&rwp->rw_rwlock));
		goto out;
	}

	rw_downgrade(&rwp->rw_rwlock);
	if (!RW_READ_HELD(&rwp->rw_rwlock)) {
		splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
			     "rwlock should be read lock: %d\n",
			     RW_READ_HELD(&rwp->rw_rwlock));
		goto out;
	}

	rc = 0;
	splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "%s",
		     "rwlock properly downgraded\n");
out:
	rw_exit(&rwp->rw_rwlock);
	rw_destroy(&rwp->rw_rwlock);
	kfree(rwp);

	return rc;
}
Ejemplo n.º 11
0
int64_t pap_putmsg(int fildes, struct strbuf * ctlptr, struct strbuf
                   * dataptr, int *flagsp)
{
    struct strbuf kctlptr;
    dl_promiscon_req_t *kpromiscon;

    inc_refcnt();

    dcmn_err((CE_CONT, "putmsg() syscall.\n"));

    rw_enter(&config_rwlock, RW_READER);

    if (!config.ppromisc ||
        copyin(ctlptr, &kctlptr, sizeof(struct strbuf)) != 0)
        goto err;

    mutex_enter(&promisc_lock);

    kpromiscon = (dl_promiscon_req_t *) kmem_alloc(kctlptr.len, KM_SLEEP);
    if (!kpromiscon)
        goto err_and_unlock;

    if (copyin(kctlptr.buf, kpromiscon, kctlptr.len) != 0)
        goto err_and_free;

    check_promisc(fildes, kpromiscon);

  err_and_free:
    kmem_free(kpromiscon, kctlptr.len);
  err_and_unlock:
    mutex_exit(&promisc_lock);
  err:

    rw_exit(&config_rwlock);
    dec_refcnt();

    return syscalls[PUTMSG].sc(fildes, ctlptr, dataptr, flagsp);
}
Ejemplo n.º 12
0
/*
 * increase statefile size
 */
static int
cpr_grow_statefile(vnode_t *vp, u_longlong_t newsize)
{
	extern uchar_t cpr_pagecopy[];
	struct inode *ip = VTOI(vp);
	u_longlong_t offset;
	int error, increase;
	ssize_t resid;

	rw_enter(&ip->i_contents, RW_READER);
	increase = (ip->i_size < newsize);
	offset = ip->i_size;
	rw_exit(&ip->i_contents);

	if (increase == 0)
		return (0);

	/*
	 * write to each logical block to reserve disk space
	 */
	error = 0;
	cpr_pagecopy[0] = '1';
	for (; offset < newsize; offset += ip->i_fs->fs_bsize) {
		if (error = vn_rdwr(UIO_WRITE, vp, (caddr_t)cpr_pagecopy,
		    ip->i_fs->fs_bsize, (offset_t)offset, UIO_SYSSPACE, 0,
		    (rlim64_t)MAXOFF_T, CRED(), &resid)) {
			if (error == ENOSPC) {
				cpr_err(CE_WARN, "error %d while reserving "
				    "disk space for statefile %s\n"
				    "wanted %lld bytes, file is %lld short",
				    error, cpr_cprconfig_to_path(),
				    newsize, newsize - offset);
			}
			break;
		}
	}
	return (error);
}
Ejemplo n.º 13
0
/*
 * Remove an exec module.
 */
static int
mod_removeexec(struct modlexec *modl, struct modlinkage *modlp)
{
	struct execsw *eswp;
	struct modctl *mcp;
	char *modname;

	eswp = findexecsw(modl->exec_execsw->exec_magic);
	if (eswp == NULL) {
		mcp = mod_getctl(modlp);
		ASSERT(mcp != NULL);
		modname = mcp->mod_modname;
		cmn_err(CE_WARN, uninstall_err, modname);
		return (EINVAL);
	}
	if (moddebug & MODDEBUG_NOAUL_EXEC ||
	    !rw_tryenter(eswp->exec_lock, RW_WRITER))
		return (EBUSY);
	eswp->exec_func = NULL;
	eswp->exec_core = NULL;
	rw_exit(eswp->exec_lock);
	return (0);
}
Ejemplo n.º 14
0
static void
s1394_cmp_impr_recv_read_request(cmd1394_cmd_t *req)
{
	s1394_hal_t	*hal = req->cmd_callback_arg;
	s1394_cmp_hal_t *cmp = &hal->hal_cmp;

	TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_read_request_enter,
	    S1394_TNF_SL_CMP_STACK, "");

	if (req->cmd_type != CMD1394_ASYNCH_RD_QUAD) {
		req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
	} else {
		rw_enter(&cmp->cmp_impr_rwlock, RW_READER);
		req->cmd_u.q.quadlet_data = cmp->cmp_impr_val;
		rw_exit(&cmp->cmp_impr_rwlock);
		req->cmd_result = IEEE1394_RESP_COMPLETE;
	}

	(void) s1394_send_response(hal, req);

	TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_read_request_exit,
	    S1394_TNF_SL_CMP_STACK, "");
}
Ejemplo n.º 15
0
static int
nfs4_active_reclaim(void)
{
	int freed;
	int index;
	rnode4_t *rp;

#ifdef DEBUG
	clstat4_debug.a_reclaim.value.ui64++;
#endif
	freed = 0;
	for (index = 0; index < rtable4size; index++) {
		rw_enter(&rtable4[index].r_lock, RW_READER);
		for (rp = rtable4[index].r_hashf;
		    rp != (rnode4_t *)(&rtable4[index]);
		    rp = rp->r_hashf) {
			if (nfs4_active_data_reclaim(rp))
				freed = 1;
		}
		rw_exit(&rtable4[index].r_lock);
	}
	return (freed);
}
Ejemplo n.º 16
0
static int
filemon_wrapper_execve(struct lwp * l, struct sys_execve_args * uap,
    register_t * retval)
{
	char fname[MAXPATHLEN];
	int ret;
	int error;
	size_t done;
	struct filemon *filemon;
	
	error = copyinstr(SCARG(uap, path), fname, sizeof(fname), &done);

	if ((ret = sys_execve(l, uap, retval)) == EJUSTRETURN && error == 0) {
		filemon = filemon_lookup(curproc);

		if (filemon) {
			filemon_printf(filemon, "E %d %s\n",
			    curproc->p_pid, fname);
			rw_exit(&filemon->fm_mtx);
		}
	}
	return (ret);
}
Ejemplo n.º 17
0
extern uint32_t
emlxs_nport_count(emlxs_port_t *port)
{
	NODELIST *nlp;
	uint32_t i;
	uint32_t nport_count = 0;

	rw_enter(&port->node_rwlock, RW_READER);
	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
		nlp = port->node_table[i];
		while (nlp != NULL) {
			if ((nlp->nlp_DID & 0xFFF000) != 0xFFF000) {
				nport_count++;
			}

			nlp = (NODELIST *)nlp->nlp_list_next;
		}
	}
	rw_exit(&port->node_rwlock);

	return (nport_count);

} /* emlxs_nport_count() */
Ejemplo n.º 18
0
/*
 * iscsi_door_bind
 *
 * This function tries to connect the iscsi_door.  If it succeeds
 * it keeps the vnode.
 */
boolean_t
iscsi_door_bind(
	int		did
)
{
	door_handle_t	new_handle;

	new_handle = door_ki_lookup(did);
	if (new_handle == NULL) {
		/* The lookup failed. */
		return (B_FALSE);
	}

	/* The new handle is stored.  If we had one, it is released. */
	rw_enter(&iscsi_door_lock, RW_WRITER);
	if (iscsi_door_handle != NULL) {
		door_ki_rele(iscsi_door_handle);
	}
	iscsi_door_handle = new_handle;
	rw_exit(&iscsi_door_lock);

	return (B_TRUE);
}
Ejemplo n.º 19
0
static int
xmem_link(struct vnode *dvp, struct vnode *srcvp, char *tnm, struct cred *cred)
{
	struct xmemnode *parent;
	struct xmemnode *from;
	struct xmount *xm = (struct xmount *)VTOXM(dvp);
	int error;
	struct xmemnode *found = NULL;
	struct vnode *realvp;

	if (VOP_REALVP(srcvp, &realvp) == 0)
		srcvp = realvp;

	parent = (struct xmemnode *)VTOXN(dvp);
	from = (struct xmemnode *)VTOXN(srcvp);

	if ((srcvp->v_type == VDIR &&
	    secpolicy_fs_linkdir(cred, dvp->v_vfsp) != 0) ||
	    (from->xn_uid != crgetuid(cred) && secpolicy_basic_link(cred) != 0))
		return (EPERM);

	error = xdirlookup(parent, tnm, &found, cred);
	if (error == 0) {
		ASSERT(found);
		xmemnode_rele(found);
		return (EEXIST);
	}

	if (error != ENOENT)
		return (error);

	rw_enter(&parent->xn_rwlock, RW_WRITER);
	error = xdirenter(xm, parent, tnm, DE_LINK, (struct xmemnode *)NULL,
		from, NULL, (struct xmemnode **)NULL, cred);
	rw_exit(&parent->xn_rwlock);
	return (error);
}
Ejemplo n.º 20
0
int
mdeg_unregister(mdeg_handle_t hdl)
{
	mdeg_clnt_t	*clnt;
	mdeg_handle_t	mdh;

	/*
	 * If the RW lock is held, a client is calling
	 * unregister from its own callback.
	 */
	if (RW_LOCK_HELD(&mdeg.rwlock)) {
		MDEG_DBG("mdeg_unregister: rwlock already held\n");
		return (MDEG_FAILURE);
	}

	/* lookup the client */
	if ((clnt = mdeg_get_client(hdl)) == NULL) {
		return (MDEG_FAILURE);
	}

	rw_enter(&mdeg.rwlock, RW_WRITER);

	MDEG_DBG("client unregistered (0x%lx):\n", hdl);
	MDEG_DUMP_CLNT(clnt);

	/* save the handle to prevent reuse */
	mdh = clnt->hdl;
	bzero(clnt, sizeof (mdeg_clnt_t));

	clnt->hdl = mdh;

	mdeg.nclnts--;

	rw_exit(&mdeg.rwlock);

	return (MDEG_SUCCESS);
}
Ejemplo n.º 21
0
static void
s1394_cmp_impr_recv_lock_request(cmd1394_cmd_t *req)
{
	s1394_hal_t	*hal = req->cmd_callback_arg;
	s1394_cmp_hal_t *cmp = &hal->hal_cmp;
	boolean_t	notify = B_TRUE;

	TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_lock_request_enter,
	    S1394_TNF_SL_CMP_STACK, "");

	if ((req->cmd_type != CMD1394_ASYNCH_LOCK_32) ||
	    (req->cmd_u.l32.lock_type != CMD1394_LOCK_COMPARE_SWAP)) {
		req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
		notify = B_FALSE;
	} else {
		rw_enter(&cmp->cmp_impr_rwlock, RW_WRITER);
		req->cmd_u.l32.old_value = cmp->cmp_impr_val;
		if (cmp->cmp_impr_val == req->cmd_u.l32.arg_value) {
			/* write only allowed bits */
			cmp->cmp_impr_val = (req->cmd_u.l32.data_value &
			    IEC61883_CMP_IMPR_LOCK_MASK) |
			    (cmp->cmp_impr_val & ~IEC61883_CMP_IMPR_LOCK_MASK);
		}
		rw_exit(&cmp->cmp_impr_rwlock);
		req->cmd_result = IEEE1394_RESP_COMPLETE;
	}

	(void) s1394_send_response(hal, req);

	/* notify all targets */
	if (notify) {
		s1394_cmp_notify_reg_change(hal, T1394_CMP_IMPR, NULL);
	}

	TNF_PROBE_0_DEBUG(s1394_cmp_impr_recv_lock_request_exit,
	    S1394_TNF_SL_CMP_STACK, "");
}
Ejemplo n.º 22
0
int
zcrypt_keystore_remove(spa_t *spa, uint64_t dsobj)
{
	zcrypt_keystore_node_t zk_tofind;
	zcrypt_keystore_node_t *zk;
	avl_tree_t *tree;
	int err = 0;

	ASSERT(spa->spa_keystore != NULL);

	zk_tofind.skn_os = dsobj;
	tree = &spa->spa_keystore->sk_dslkeys;

	rw_enter(&spa->spa_keystore->sk_lock, RW_WRITER);
	zk = avl_find(tree, &zk_tofind, NULL);
	if (zk == NULL) {
		goto out;
	}
	mutex_enter(&zk->skn_lock);

	err = zcrypt_keychain_fini(zk->skn_keychain);
	if (err != 0) {
		mutex_exit(&zk->skn_lock);
		goto out;
	}
	zcrypt_key_free(zk->skn_wrapkey);
	zk->skn_wrapkey = NULL;
	mutex_exit(&zk->skn_lock);
	mutex_destroy(&zk->skn_lock);

	avl_remove(tree, zk);
	kmem_free(zk, sizeof (zcrypt_keystore_node_t));
out:
	rw_exit(&spa->spa_keystore->sk_lock);

	return (err);
}
Ejemplo n.º 23
0
int
dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
    void *tag, dmu_buf_t **dbp, int flags)
{
	#if defined(_KERNEL)
	//printk(" ***** Entering dmu buf hold\r\n");
	#endif
	dnode_t *dn;
	uint64_t blkid;
	dmu_buf_impl_t *db;
	int err;
	int db_flags = DB_RF_CANFAIL;

	if (flags & DMU_READ_NO_PREFETCH)
		db_flags |= DB_RF_NOPREFETCH;

	err = dnode_hold(os, object, FTAG, &dn);
	if (err)
		return (err);
	blkid = dbuf_whichblock(dn, offset);
	rw_enter(&dn->dn_struct_rwlock, RW_READER);
	db = dbuf_hold(dn, blkid, tag);
	rw_exit(&dn->dn_struct_rwlock);
	if (db == NULL) {
		err = SET_ERROR(EIO);
	} else {
		err = dbuf_read(db, NULL, db_flags);
		if (err) {
			dbuf_rele(db, tag);
			db = NULL;
		}
	}

	dnode_rele(dn, FTAG);
	*dbp = &db->db; /* NULL db plus first field offset is NULL */
	return (err);
}
Ejemplo n.º 24
0
static void
vdev_disk_close_impl(vdev_t *vd, boolean_t ldi_offline)
{
	vdev_disk_t *dvd;

	rw_enter(&vd->vdev_tsd_lock, RW_WRITER);
	dvd = vd->vdev_tsd;

	if (vd->vdev_reopening || dvd == NULL)
		goto out;

	if (dvd->vd_minor != NULL) {
		ddi_devid_str_free(dvd->vd_minor);
		dvd->vd_minor = NULL;
	}

	if (dvd->vd_devid != NULL) {
		ddi_devid_free(dvd->vd_devid);
		dvd->vd_devid = NULL;
	}

	if (dvd->vd_lh != NULL) {
		(void) ldi_close(dvd->vd_lh, spa_mode(vd->vdev_spa), kcred);
		dvd->vd_lh = NULL;
	}

	vd->vdev_delayed_close = B_FALSE;
	/*
	 * If we closed the LDI handle due to an offline notify from LDI,
	 * don't free vd->vdev_tsd or unregister the callbacks here;
	 * the offline finalize callback or a reopen will take care of it.
	 */
	if (!ldi_offline)
		vdev_disk_free_locked(vd);
out:
	rw_exit(&vd->vdev_tsd_lock);
}
Ejemplo n.º 25
0
static void
vdev_disk_io_done(zio_t *zio)
{
	vdev_t *vd = zio->io_vd;

	/*
	 * If the device returned EIO, then attempt a DKIOCSTATE ioctl to see if
	 * the device has been removed.  If this is the case, then we trigger an
	 * asynchronous removal of the device. Otherwise, probe the device and
	 * make sure it's still accessible.
	 */
	if (zio->io_error == EIO && !vd->vdev_remove_wanted) {
		vdev_disk_t *dvd;
		int rc = EIO, state = DKIO_NONE;

		rw_enter(&vd->vdev_tsd_lock, RW_READER);
		dvd = vd->vdev_tsd;
		if (dvd != NULL && dvd->vd_lh != NULL)
			rc = ldi_ioctl(dvd->vd_lh, DKIOCSTATE,
			    (intptr_t)&state, FKIOCTL, kcred, NULL);
		rw_exit(&vd->vdev_tsd_lock);
		if (rc == 0 && state != DKIO_INSERTED) {
			/*
			 * We post the resource as soon as possible, instead of
			 * when the async removal actually happens, because the
			 * DE is using this information to discard previous I/O
			 * errors.
			 */
			zfs_post_remove(zio->io_spa, vd);
			vd->vdev_remove_wanted = B_TRUE;
			spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
		} else if (!vd->vdev_delayed_close) {
			vd->vdev_delayed_close = B_TRUE;
		}
	}
}
Ejemplo n.º 26
0
static ldi_handle_t
lx_ptm_lh_remove(uint_t index)
{
	ldi_handle_t	lh;

	rw_enter(&lps.lps_lh_rwlock, RW_WRITER);

	ASSERT(index < lps.lps_lh_count);
	ASSERT(lps.lps_lh_array[index].lph_handle != NULL);
	ASSERT(lps.lps_lh_array[index].lph_lpo->lpo_rops == 0);
	ASSERT(!MUTEX_HELD(&lps.lps_lh_array[index].lph_lpo->lpo_rops_lock));

	/* free the write handle */
	kmem_free(lps.lps_lh_array[index].lph_lpo, sizeof (lx_ptm_ops_t));
	lps.lps_lh_array[index].lph_lpo = NULL;

	/* remove the handle and return it */
	lh = lps.lps_lh_array[index].lph_handle;
	lps.lps_lh_array[index].lph_handle = NULL;
	lps.lps_lh_array[index].lph_pktio = 0;
	lps.lps_lh_array[index].lph_eofed = 0;
	rw_exit(&lps.lps_lh_rwlock);
	return (lh);
}
Ejemplo n.º 27
0
int
dls_bind(dls_channel_t dc, uint32_t sap)
{
	dls_impl_t	*dip = (dls_impl_t *)dc;
	dls_link_t	*dlp;
	uint32_t	dls_sap;

	/*
	 * Check to see the value is legal for the media type.
	 */
	if (!mac_sap_verify(dip->di_mh, sap, &dls_sap))
		return (EINVAL);
	if (dip->di_promisc & DLS_PROMISC_SAP)
		dls_sap = DLS_SAP_PROMISC;

	/*
	 * Set up the dls_impl_t to mark it as able to receive packets.
	 */
	rw_enter(&(dip->di_lock), RW_WRITER);
	ASSERT(!dip->di_bound);
	dip->di_sap = sap;
	dip->di_bound = B_TRUE;
	rw_exit(&(dip->di_lock));

	/*
	 * Now bind the dls_impl_t by adding it into the hash table in the
	 * dls_link_t.
	 *
	 * NOTE: This must be done without the dls_impl_t lock being held
	 *	 otherwise deadlock may ensue.
	 */
	dlp = dip->di_dvp->dv_dlp;
	dls_link_add(dlp, dls_sap, dip);

	return (0);
}
Ejemplo n.º 28
0
void
e_devid_cache_cleanup(void)
{
	nvp_devid_t *np, *next;

	rw_enter(&dcfd->nvf_lock, RW_WRITER);

	for (np = NVF_DEVID_LIST(dcfd); np; np = next) {
		next = NVP_DEVID_NEXT(np);
		if (np->nvp_devid == NULL)
			continue;
		if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) {
			DEVID_LOG_REMOVE((CE_CONT,
				    "cleanup: %s\n", np->nvp_devpath));
			NVF_MARK_DIRTY(dcfd);
			nfd_nvp_free_and_unlink(dcfd, NVPLIST(np));
		}
	}

	rw_exit(&dcfd->nvf_lock);

	if (NVF_IS_DIRTY(dcfd))
		wake_nvpflush_daemon();
}
Ejemplo n.º 29
0
/*ARGSUSED*/
int64_t
loadable_syscall(
    long a0, long a1, long a2, long a3,
    long a4, long a5, long a6, long a7)
{
	int64_t		rval;
	struct sysent	*callp;
	struct sysent	*se = LWP_GETSYSENT(ttolwp(curthread));
	krwlock_t	*module_lock;
	int		code;

	code = curthread->t_sysnum;
	callp = se + code;

	/*
	 * Try to autoload the system call if necessary.
	 */
	module_lock = lock_syscall(se, code);
	THREAD_KPRI_RELEASE();	/* drop priority given by rw_enter */

	/*
	 * we've locked either the loaded syscall or nosys
	 */
	if (callp->sy_flags & SE_ARGC) {
		int64_t (*sy_call)();

		sy_call = (int64_t (*)())callp->sy_call;
		rval = (*sy_call)(a0, a1, a2, a3, a4, a5);
	} else {
		rval = syscall_ap();
	}

	THREAD_KPRI_REQUEST();	/* regain priority from read lock */
	rw_exit(module_lock);
	return (rval);
}
Ejemplo n.º 30
0
Archivo: taskq.c Proyecto: jpeach/zfsd
static void *
taskq_thread(void *arg)
{
	taskq_t *tq = arg;
	taskq_ent_t *t;
	boolean_t prealloc;

	mutex_enter(&tq->tq_lock);
	while (tq->tq_flags & TASKQ_ACTIVE) {
		if ((t = tq->tq_task.tqent_next) == &tq->tq_task) {
			if (--tq->tq_active == 0)
				cv_broadcast(&tq->tq_wait_cv);
			cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
			tq->tq_active++;
			continue;
		}
		t->tqent_prev->tqent_next = t->tqent_next;
		t->tqent_next->tqent_prev = t->tqent_prev;
		t->tqent_next = NULL;
		t->tqent_prev = NULL;
		prealloc = t->tqent_flags & TQENT_FLAG_PREALLOC;
		mutex_exit(&tq->tq_lock);

		rw_enter(&tq->tq_threadlock, RW_READER);
		t->tqent_func(t->tqent_arg);
		rw_exit(&tq->tq_threadlock);

		mutex_enter(&tq->tq_lock);
		if (!prealloc)
			task_free(tq, t);
	}
	tq->tq_nthreads--;
	cv_broadcast(&tq->tq_wait_cv);
	mutex_exit(&tq->tq_lock);
	return (NULL);
}