예제 #1
0
파일: pios.c 프로젝트: ColinIanKing/zfs
static int
zpios_threads_run(run_args_t *run_args)
{
	struct task_struct *tsk, **tsks;
	thread_data_t *thr = NULL;
	zpios_time_t *tt = &(run_args->stats.total_time);
	zpios_time_t *tw = &(run_args->stats.wr_time);
	zpios_time_t *tr = &(run_args->stats.rd_time);
	int i, rc = 0, tc = run_args->thread_count;

	tsks = kmem_zalloc(sizeof (struct task_struct *) * tc, KM_SLEEP);

	run_args->threads = kmem_zalloc(sizeof (thread_data_t *)*tc, KM_SLEEP);

	init_waitqueue_head(&run_args->waitq);
	run_args->threads_done = 0;

	/* Create all the needed threads which will sleep until awoken */
	for (i = 0; i < tc; i++) {
		thr = kmem_zalloc(sizeof (thread_data_t), KM_SLEEP);

		thr->thread_no = i;
		thr->run_args = run_args;
		thr->rc = 0;
		mutex_init(&thr->lock, NULL, MUTEX_DEFAULT, NULL);
		run_args->threads[i] = thr;

		tsk = kthread_create(zpios_thread_main, (void *)thr,
		    "%s/%d", "zpios_io", i);
		if (IS_ERR(tsk)) {
			rc = -EINVAL;
			goto taskerr;
		}

		tsks[i] = tsk;
	}

	tt->start = zpios_timespec_now();

	/* Wake up all threads for write phase */
	(void) zpios_upcall(run_args->pre, PHASE_PRE_WRITE, run_args, 0);
	for (i = 0; i < tc; i++)
		wake_up_process(tsks[i]);

	/* Wait for write phase to complete */
	tw->start = zpios_timespec_now();
	wait_event(run_args->waitq, zpios_thread_done(run_args));
	tw->stop = zpios_timespec_now();
	(void) zpios_upcall(run_args->post, PHASE_POST_WRITE, run_args, rc);

	for (i = 0; i < tc; i++) {
		thr = run_args->threads[i];

		mutex_enter(&thr->lock);

		if (!rc && thr->rc)
			rc = thr->rc;

		run_args->stats.wr_data += thr->stats.wr_data;
		run_args->stats.wr_chunks += thr->stats.wr_chunks;
		mutex_exit(&thr->lock);
	}

	if (rc) {
		/* Wake up all threads and tell them to exit */
		for (i = 0; i < tc; i++) {
			mutex_enter(&thr->lock);
			thr->rc = rc;
			mutex_exit(&thr->lock);

			wake_up_process(tsks[i]);
		}
		goto out;
	}

	mutex_enter(&run_args->lock_ctl);
	ASSERT(run_args->threads_done == run_args->thread_count);
	run_args->threads_done = 0;
	mutex_exit(&run_args->lock_ctl);

	/* Wake up all threads for read phase */
	(void) zpios_upcall(run_args->pre, PHASE_PRE_READ, run_args, 0);
	for (i = 0; i < tc; i++)
		wake_up_process(tsks[i]);

	/* Wait for read phase to complete */
	tr->start = zpios_timespec_now();
	wait_event(run_args->waitq, zpios_thread_done(run_args));
	tr->stop = zpios_timespec_now();
	(void) zpios_upcall(run_args->post, PHASE_POST_READ, run_args, rc);

	for (i = 0; i < tc; i++) {
		thr = run_args->threads[i];

		mutex_enter(&thr->lock);

		if (!rc && thr->rc)
			rc = thr->rc;

		run_args->stats.rd_data += thr->stats.rd_data;
		run_args->stats.rd_chunks += thr->stats.rd_chunks;
		mutex_exit(&thr->lock);
	}
out:
	tt->stop  = zpios_timespec_now();
	tt->delta = zpios_timespec_sub(tt->stop, tt->start);
	tw->delta = zpios_timespec_sub(tw->stop, tw->start);
	tr->delta = zpios_timespec_sub(tr->stop, tr->start);

cleanup:
	kmem_free(tsks, sizeof (struct task_struct *) * tc);
	return (rc);

taskerr:
	/* Destroy all threads that were created successfully */
	for (i = 0; i < tc; i++)
		if (tsks[i] != NULL)
			(void) kthread_stop(tsks[i]);

	goto cleanup;
}
예제 #2
0
/*
 * Common code for mount and mountroot
 */
int
ext2fs_mountfs(struct vnode *devvp, struct mount *mp)
{
	struct lwp *l = curlwp;
	struct ufsmount *ump;
	struct buf *bp;
	struct ext2fs *fs;
	struct m_ext2fs *m_fs;
	dev_t dev;
	int error, i, ronly;
	kauth_cred_t cred;

	dev = devvp->v_rdev;
	cred = l->l_cred;

	/* Flush out any old buffers remaining from a previous use. */
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
	VOP_UNLOCK(devvp);
	if (error)
		return error;

	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;

	bp = NULL;
	ump = NULL;

	/* Read the superblock from disk, and swap it directly. */
	error = bread(devvp, SBLOCK, SBSIZE, 0, &bp);
	if (error)
		goto out;
	fs = (struct ext2fs *)bp->b_data;
	m_fs = kmem_zalloc(sizeof(struct m_ext2fs), KM_SLEEP);
	e2fs_sbload(fs, &m_fs->e2fs);

	brelse(bp, 0);
	bp = NULL;

	/* Once swapped, validate and fill in the superblock. */
	error = ext2fs_sbfill(m_fs, ronly);
	if (error) {
		kmem_free(m_fs, sizeof(struct m_ext2fs));
		goto out;
	}
	m_fs->e2fs_ronly = ronly;

	ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
	ump->um_fstype = UFS1;
	ump->um_ops = &ext2fs_ufsops;
	ump->um_e2fs = m_fs;

	if (ronly == 0) {
		if (m_fs->e2fs.e2fs_state == E2FS_ISCLEAN)
			m_fs->e2fs.e2fs_state = 0;
		else
			m_fs->e2fs.e2fs_state = E2FS_ERRORS;
		m_fs->e2fs_fmod = 1;
	}

	/* XXX: should be added in ext2fs_sbfill()? */
	m_fs->e2fs_gd = kmem_alloc(m_fs->e2fs_ngdb * m_fs->e2fs_bsize, KM_SLEEP);
	for (i = 0; i < m_fs->e2fs_ngdb; i++) {
		error = bread(devvp,
		    EXT2_FSBTODB(m_fs, m_fs->e2fs.e2fs_first_dblock +
		    1 /* superblock */ + i),
		    m_fs->e2fs_bsize, 0, &bp);
		if (error) {
			kmem_free(m_fs->e2fs_gd,
			    m_fs->e2fs_ngdb * m_fs->e2fs_bsize);
			goto out;
		}
		e2fs_cgload((struct ext2_gd *)bp->b_data,
		    &m_fs->e2fs_gd[
			i * m_fs->e2fs_bsize / sizeof(struct ext2_gd)],
		    m_fs->e2fs_bsize);
		brelse(bp, 0);
		bp = NULL;
	}

	error = ext2fs_cg_verify_and_initialize(devvp, m_fs, ronly);
	if (error) {
		kmem_free(m_fs->e2fs_gd, m_fs->e2fs_ngdb * m_fs->e2fs_bsize);
		goto out;
	}

	mp->mnt_data = ump;
	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_EXT2FS);
	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
	mp->mnt_stat.f_namemax = EXT2FS_MAXNAMLEN;
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_dev_bshift = DEV_BSHIFT;	/* XXX */
	mp->mnt_fs_bshift = m_fs->e2fs_bshift;
	mp->mnt_iflag |= IMNT_DTYPE;
	ump->um_flags = 0;
	ump->um_mountp = mp;
	ump->um_dev = dev;
	ump->um_devvp = devvp;
	ump->um_nindir = EXT2_NINDIR(m_fs);
	ump->um_lognindir = ffs(EXT2_NINDIR(m_fs)) - 1;
	ump->um_bptrtodb = m_fs->e2fs_fsbtodb;
	ump->um_seqinc = 1; /* no frags */
	ump->um_maxsymlinklen = EXT2_MAXSYMLINKLEN;
	ump->um_dirblksiz = m_fs->e2fs_bsize;
	ump->um_maxfilesize = ((uint64_t)0x80000000 * m_fs->e2fs_bsize - 1);
	spec_node_setmountedfs(devvp, mp);
	return 0;

out:
	if (bp != NULL)
		brelse(bp, 0);
	if (ump) {
		kmem_free(ump->um_e2fs, sizeof(struct m_ext2fs));
		kmem_free(ump, sizeof(*ump));
		mp->mnt_data = NULL;
	}
	return error;
}
예제 #3
0
파일: fipe_pm.c 프로젝트: bahamas10/openzfs
/*
 * Initialize IOAT relative resources.
 */
static int
fipe_ioat_init(void)
{
	char *buf;
	size_t size;

	bzero(&fipe_ioat_ctrl, sizeof (fipe_ioat_ctrl));
	mutex_init(&fipe_ioat_ctrl.ioat_lock, NULL, MUTEX_DRIVER, NULL);

	/*
	 * Allocate memory for IOAT memory copy operation.
	 * The allocated memory should be page aligned to achieve better power
	 * savings.
	 * Don't use ddi_dma_mem_alloc here to keep thing simple.  This also
	 * makes quiesce easier.
	 */
	size = PAGESIZE;
	buf = kmem_zalloc(size, KM_SLEEP);
	if ((intptr_t)buf & PAGEOFFSET) {
		kmem_free(buf, PAGESIZE);
		size <<= 1;
		buf = kmem_zalloc(size, KM_SLEEP);
	}
	fipe_ioat_ctrl.ioat_buf_size = size;
	fipe_ioat_ctrl.ioat_buf_start = buf;
	buf = (char *)P2ROUNDUP((intptr_t)buf, PAGESIZE);
	fipe_ioat_ctrl.ioat_buf_virtaddr = buf;
	fipe_ioat_ctrl.ioat_buf_physaddr = hat_getpfnum(kas.a_hat, buf);
	fipe_ioat_ctrl.ioat_buf_physaddr <<= PAGESHIFT;

#ifdef	FIPE_IOAT_BUILTIN
	{
		uint64_t bufpa;
		/* IOAT descriptor data structure copied from ioat.h. */
		struct fipe_ioat_cmd_desc {
			uint32_t	dd_size;
			uint32_t	dd_ctrl;
			uint64_t	dd_src_paddr;
			uint64_t	dd_dest_paddr;
			uint64_t	dd_next_desc;
			uint64_t	dd_res4;
			uint64_t	dd_res5;
			uint64_t	dd_res6;
			uint64_t	dd_res7;
		} *desc;

		/*
		 * Build two IOAT command descriptors and chain them into ring.
		 * Control flags as below:
		 *	0x2: disable source snoop
		 *	0x4: disable destination snoop
		 *	0x0 << 24: memory copy operation
		 * The layout for command descriptors and memory buffers are
		 * organized for power saving effect, please don't change it.
		 */
		buf = fipe_ioat_ctrl.ioat_buf_virtaddr;
		bufpa = fipe_ioat_ctrl.ioat_buf_physaddr;
		fipe_ioat_ctrl.ioat_cmd_physaddr = bufpa;

		/* First command descriptor. */
		desc = (struct fipe_ioat_cmd_desc *)(buf);
		desc->dd_size = 128;
		desc->dd_ctrl = 0x6;
		desc->dd_src_paddr = bufpa + 2048;
		desc->dd_dest_paddr = bufpa + 3072;
		/* Point to second descriptor. */
		desc->dd_next_desc = bufpa + 64;

		/* Second command descriptor. */
		desc = (struct fipe_ioat_cmd_desc *)(buf + 64);
		desc->dd_size = 128;
		desc->dd_ctrl = 0x6;
		desc->dd_src_paddr = bufpa + 2048;
		desc->dd_dest_paddr = bufpa + 3072;
		/* Point to first descriptor. */
		desc->dd_next_desc = bufpa;
	}
#endif	/* FIPE_IOAT_BUILTIN */

	return (0);
}
예제 #4
0
/*
 * Exported interface to register a LDC endpoint with
 * the channel nexus
 */
static int
cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass)
{
	int		idx;
	cnex_ldc_t	*cldcp;
	cnex_ldc_t	*new_cldcp;
	int		listsz, num_nodes, num_channels;
	md_t		*mdp = NULL;
	mde_cookie_t	rootnode, *listp = NULL;
	uint64_t	tmp_id;
	uint64_t	rxino = (uint64_t)-1;
	uint64_t	txino = (uint64_t)-1;
	cnex_soft_state_t *cnex_ssp;
	int		status, instance;
	dev_info_t	*chan_dip = NULL;

	/* Get device instance and structure */
	instance = ddi_get_instance(dip);
	cnex_ssp = ddi_get_soft_state(cnex_state, instance);

	/* Check to see if channel is already registered */
	mutex_enter(&cnex_ssp->clist_lock);
	cldcp = cnex_ssp->clist;
	while (cldcp) {
		if (cldcp->id == id) {
			DWARN("cnex_reg_chan: channel 0x%llx exists\n", id);
			mutex_exit(&cnex_ssp->clist_lock);
			return (EINVAL);
		}
		cldcp = cldcp->next;
	}
	mutex_exit(&cnex_ssp->clist_lock);

	/* Get the Tx/Rx inos from the MD */
	if ((mdp = md_get_handle()) == NULL) {
		DWARN("cnex_reg_chan: cannot init MD\n");
		return (ENXIO);
	}
	num_nodes = md_node_count(mdp);
	ASSERT(num_nodes > 0);

	listsz = num_nodes * sizeof (mde_cookie_t);
	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);

	rootnode = md_root_node(mdp);

	/* search for all channel_endpoint nodes */
	num_channels = md_scan_dag(mdp, rootnode,
	    md_find_name(mdp, "channel-endpoint"),
	    md_find_name(mdp, "fwd"), listp);
	if (num_channels <= 0) {
		DWARN("cnex_reg_chan: invalid channel id\n");
		kmem_free(listp, listsz);
		(void) md_fini_handle(mdp);
		return (EINVAL);
	}

	for (idx = 0; idx < num_channels; idx++) {

		/* Get the channel ID */
		status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id);
		if (status) {
			DWARN("cnex_reg_chan: cannot read LDC ID\n");
			kmem_free(listp, listsz);
			(void) md_fini_handle(mdp);
			return (ENXIO);
		}
		if (tmp_id != id)
			continue;

		/* Get the Tx and Rx ino */
		status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino);
		if (status) {
			DWARN("cnex_reg_chan: cannot read Tx ino\n");
			kmem_free(listp, listsz);
			(void) md_fini_handle(mdp);
			return (ENXIO);
		}
		status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino);
		if (status) {
			DWARN("cnex_reg_chan: cannot read Rx ino\n");
			kmem_free(listp, listsz);
			(void) md_fini_handle(mdp);
			return (ENXIO);
		}
		chan_dip = cnex_find_chan_dip(dip, id, mdp, listp[idx]);
		ASSERT(chan_dip != NULL);
	}
	kmem_free(listp, listsz);
	(void) md_fini_handle(mdp);

	/*
	 * check to see if we looped through the list of channel IDs without
	 * matching one (i.e. an 'ino' has not been initialised).
	 */
	if ((rxino == -1) || (txino == -1)) {
		DERR("cnex_reg_chan: no ID matching '%llx' in MD\n", id);
		return (ENOENT);
	}

	/* Allocate a new channel structure */
	new_cldcp = kmem_zalloc(sizeof (*new_cldcp), KM_SLEEP);

	/* Initialize the channel */
	mutex_init(&new_cldcp->lock, NULL, MUTEX_DRIVER, NULL);

	new_cldcp->id = id;
	new_cldcp->tx.ino = txino;
	new_cldcp->rx.ino = rxino;
	new_cldcp->devclass = devclass;
	new_cldcp->tx.weight = CNEX_TX_INTR_WEIGHT;
	new_cldcp->rx.weight = cnex_class_weight(devclass);
	new_cldcp->dip = chan_dip;

	/*
	 * Add channel to nexus channel list.
	 * Check again to see if channel is already registered since
	 * clist_lock was dropped above.
	 */
	mutex_enter(&cnex_ssp->clist_lock);
	cldcp = cnex_ssp->clist;
	while (cldcp) {
		if (cldcp->id == id) {
			DWARN("cnex_reg_chan: channel 0x%llx exists\n", id);
			mutex_exit(&cnex_ssp->clist_lock);
			mutex_destroy(&new_cldcp->lock);
			kmem_free(new_cldcp, sizeof (*new_cldcp));
			return (EINVAL);
		}
		cldcp = cldcp->next;
	}
	new_cldcp->next = cnex_ssp->clist;
	cnex_ssp->clist = new_cldcp;
	mutex_exit(&cnex_ssp->clist_lock);

	return (0);
}
예제 #5
0
static void
dtrace_load(void *dummy)
{
	dtrace_provider_id_t id;

	/* Hook into the trap handler. */
	dtrace_trap_func = dtrace_trap;

	/* Hang our hook for thread switches. */
	dtrace_vtime_switch_func = dtrace_vtime_switch;

	/* Hang our hook for exceptions. */
	dtrace_invop_init();

	dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 0, 0, 0);

	dtrace_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);

	/* Register callbacks for linker file load and unload events. */
	dtrace_kld_load_tag = EVENTHANDLER_REGISTER(kld_load,
	    dtrace_kld_load, NULL, EVENTHANDLER_PRI_ANY);
	dtrace_kld_unload_try_tag = EVENTHANDLER_REGISTER(kld_unload_try,
	    dtrace_kld_unload_try, NULL, EVENTHANDLER_PRI_ANY);

	/*
	 * Initialise the mutexes without 'witness' because the dtrace
	 * code is mostly written to wait for memory. To have the
	 * witness code change a malloc() from M_WAITOK to M_NOWAIT
	 * because a lock is held would surely create a panic in a
	 * low memory situation. And that low memory situation might be
	 * the very problem we are trying to trace.
	 */
	mutex_init(&dtrace_lock,"dtrace probe state", MUTEX_DEFAULT, NULL);
	mutex_init(&dtrace_provider_lock,"dtrace provider state", MUTEX_DEFAULT, NULL);
	mutex_init(&dtrace_meta_lock,"dtrace meta-provider state", MUTEX_DEFAULT, NULL);
#ifdef DEBUG
	mutex_init(&dtrace_errlock,"dtrace error lock", MUTEX_DEFAULT, NULL);
#endif

	mutex_enter(&dtrace_provider_lock);
	mutex_enter(&dtrace_lock);
	mutex_enter(&cpu_lock);

	ASSERT(MUTEX_HELD(&cpu_lock));

	dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
	    sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
	    NULL, NULL, NULL, NULL, NULL, 0);

	ASSERT(MUTEX_HELD(&cpu_lock));
	dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
	    offsetof(dtrace_probe_t, dtpr_nextmod),
	    offsetof(dtrace_probe_t, dtpr_prevmod));

	dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
	    offsetof(dtrace_probe_t, dtpr_nextfunc),
	    offsetof(dtrace_probe_t, dtpr_prevfunc));

	dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
	    offsetof(dtrace_probe_t, dtpr_nextname),
	    offsetof(dtrace_probe_t, dtpr_prevname));

	if (dtrace_retain_max < 1) {
		cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
		    "setting to 1", dtrace_retain_max);
		dtrace_retain_max = 1;
	}

	/*
	 * Now discover our toxic ranges.
	 */
	dtrace_toxic_ranges(dtrace_toxrange_add);

	/*
	 * Before we register ourselves as a provider to our own framework,
	 * we would like to assert that dtrace_provider is NULL -- but that's
	 * not true if we were loaded as a dependency of a DTrace provider.
	 * Once we've registered, we can assert that dtrace_provider is our
	 * pseudo provider.
	 */
	(void) dtrace_register("dtrace", &dtrace_provider_attr,
	    DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);

	ASSERT(dtrace_provider != NULL);
	ASSERT((dtrace_provider_id_t)dtrace_provider == id);

	dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
	dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "END", 0, NULL);
	dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "ERROR", 1, NULL);

	mutex_exit(&cpu_lock);

	/*
	 * If DTrace helper tracing is enabled, we need to allocate the
	 * trace buffer and initialize the values.
	 */
	if (dtrace_helptrace_enabled) {
		ASSERT(dtrace_helptrace_buffer == NULL);
		dtrace_helptrace_buffer =
		    kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
		dtrace_helptrace_next = 0;
	}

	mutex_exit(&dtrace_lock);
	mutex_exit(&dtrace_provider_lock);

	mutex_enter(&cpu_lock);

	/* Setup the boot CPU */
	(void) dtrace_cpu_setup(CPU_CONFIG, 0);

	mutex_exit(&cpu_lock);

#if __FreeBSD_version < 800039
	/* Enable device cloning. */
	clone_setup(&dtrace_clones);

	/* Setup device cloning events. */
	eh_tag = EVENTHANDLER_REGISTER(dev_clone, dtrace_clone, 0, 1000);
#else
	dtrace_dev = make_dev(&dtrace_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
	    "dtrace/dtrace");
	helper_dev = make_dev(&helper_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
	    "dtrace/helper");
#endif

	return;
}
예제 #6
0
static int
VMBlockMount(struct vfs *vfsp,     // IN: file system to mount
             struct vnode *vnodep, // IN: Vnode that we are mounting on
             struct mounta *mntp,  // IN: Arguments to mount(2) from user
             struct cred *credp)   // IN: Credentials of caller
{
   VMBlockMountInfo *mip;
   int ret;

   Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMBlockMount: entry\n");

   /*
    * These next few checks are done by all other Solaris file systems, so
    * let's follow their lead.
    */
   ret = secpolicy_fs_mount(credp, vnodep, vfsp);
   if (ret) {
      Warning("VMBlockMount: mounting security check failed.\n");
      return ret;
   }

   if (vnodep->v_type != VDIR) {
      Warning("VMBlockMount: not mounting on a directory.\n");
      return ENOTDIR;
   }

   mutex_enter(&vnodep->v_lock);
   if ((mntp->flags & MS_OVERLAY) == 0 &&
       (vnodep->v_count != 1 || (vnodep->v_flag & VROOT))) {
      mutex_exit(&vnodep->v_lock);
      Warning("VMBlockMount: cannot allow unrequested overlay mount.\n");
      return EBUSY;
   }
   mutex_exit(&vnodep->v_lock);

   /*
    * The directory we are redirecting to is specified as the special file
    * since we have no actual device to mount on.  We store that path in the
    * mount information structure (note that there's another allocation inside
    * pn_get() so we must pn_free() that path at unmount time). KM_SLEEP
    * guarantees our memory allocation will succeed (pn_get() uses this flag
    * too).
    */
   mip = kmem_zalloc(sizeof *mip, KM_SLEEP);
   ret = pn_get(mntp->spec,
                (mntp->flags & MS_SYSSPACE) ? UIO_SYSSPACE : UIO_USERSPACE,
                &mip->redirectPath);
   if (ret) {
      Warning("VMBlockMount: could not obtain redirecting directory.\n");
      kmem_free(mip, sizeof *mip);
      return ret;
   }

   /* Do a lookup on the specified path. */
   ret = lookupname(mntp->spec,
                    (mntp->flags & MS_SYSSPACE) ? UIO_SYSSPACE : UIO_USERSPACE,
                    FOLLOW,
                    NULLVPP,
                    &mip->redirectVnode);
   if (ret) {
      Warning("VMBlockMount: could not obtain redirecting directory.\n");
      goto error_lookup;
   }

   if (mip->redirectVnode->v_type != VDIR) {
      Warning("VMBlockMount: not redirecting to a directory.\n");
      ret = ENOTDIR;
      goto error;
   }

   /*
    * Initialize our vfs structure.
    */
   vfsp->vfs_vnodecovered = vnodep;
   vfsp->vfs_flag &= ~VFS_UNMOUNTED;
   vfsp->vfs_flag |= VMBLOCK_VFS_FLAGS;
   vfsp->vfs_bsize = PAGESIZE;
   vfsp->vfs_fstype = vmblockType;
   vfsp->vfs_bcount = 0;
   /* If we had mount options, we'd call vfs_setmntopt with vfsp->vfs_mntopts */

   /* Locate a unique device minor number for this mount. */
   mutex_enter(&vmblockMutex);
   do {
      vfsp->vfs_dev = makedevice(vmblockMajor, vmblockMinor);
      vmblockMinor = (vmblockMinor + 1) & L_MAXMIN32;
   } while (vfs_devismounted(vfsp->vfs_dev));
   mutex_exit(&vmblockMutex);

   vfs_make_fsid(&vfsp->vfs_fsid, vfsp->vfs_dev, vmblockType);
   vfsp->vfs_data = (caddr_t)mip;

   /*
    * Now create the root vnode of the file system.
    */
   ret = VMBlockVnodeGet(&mip->root, mip->redirectVnode,
                         mip->redirectPath.pn_path,
                         mip->redirectPath.pn_pathlen,
                         NULL, vfsp, TRUE);
   if (ret) {
      Warning("VMBlockMount: couldn't create root vnode.\n");
      ret = EFAULT;
      goto error;
   }

   VN_HOLD(vfsp->vfs_vnodecovered);
   return 0;

error:
   /* lookupname() provides a held vnode. */
   VN_RELE(mip->redirectVnode);
error_lookup:
   pn_free(&mip->redirectPath);
   kmem_free(mip, sizeof *mip);
   return ret;
}
예제 #7
0
static int
vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
{
	spa_t *spa = vd->vdev_spa;
	vdev_disk_t *dvd;
	struct dk_minfo_ext dkmext;
	int error;
	dev_t dev;
	int otyp;

	/*
	 * We must have a pathname, and it must be absolute.
	 */
	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
		return (EINVAL);
	}

	/*
	 * Reopen the device if it's not currently open. Otherwise,
	 * just update the physical size of the device.
	 */
	if (vd->vdev_tsd != NULL) {
		ASSERT(vd->vdev_reopening);
		dvd = vd->vdev_tsd;
		goto skip_open;
	}

	dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);

	/*
	 * When opening a disk device, we want to preserve the user's original
	 * intent.  We always want to open the device by the path the user gave
	 * us, even if it is one of multiple paths to the save device.  But we
	 * also want to be able to survive disks being removed/recabled.
	 * Therefore the sequence of opening devices is:
	 *
	 * 1. Try opening the device by path.  For legacy pools without the
	 *    'whole_disk' property, attempt to fix the path by appending 's0'.
	 *
	 * 2. If the devid of the device matches the stored value, return
	 *    success.
	 *
	 * 3. Otherwise, the device may have moved.  Try opening the device
	 *    by the devid instead.
	 */
	if (vd->vdev_devid != NULL) {
		if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid,
		    &dvd->vd_minor) != 0) {
			vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
			return (EINVAL);
		}
	}

	error = EINVAL;		/* presume failure */

	if (vd->vdev_path != NULL) {
		ddi_devid_t devid;

		if (vd->vdev_wholedisk == -1ULL) {
			size_t len = strlen(vd->vdev_path) + 3;
			char *buf = kmem_alloc(len, KM_SLEEP);
			ldi_handle_t lh;

			(void) snprintf(buf, len, "%ss0", vd->vdev_path);

			if (ldi_open_by_name(buf, spa_mode(spa), kcred,
			    &lh, zfs_li) == 0) {
				spa_strfree(vd->vdev_path);
				vd->vdev_path = buf;
				vd->vdev_wholedisk = 1ULL;
				(void) ldi_close(lh, spa_mode(spa), kcred);
			} else {
				kmem_free(buf, len);
			}
		}

		error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), kcred,
		    &dvd->vd_lh, zfs_li);

		/*
		 * Compare the devid to the stored value.
		 */
		if (error == 0 && vd->vdev_devid != NULL &&
		    ldi_get_devid(dvd->vd_lh, &devid) == 0) {
			if (ddi_devid_compare(devid, dvd->vd_devid) != 0) {
				error = EINVAL;
				(void) ldi_close(dvd->vd_lh, spa_mode(spa),
				    kcred);
				dvd->vd_lh = NULL;
			}
			ddi_devid_free(devid);
		}

		/*
		 * If we succeeded in opening the device, but 'vdev_wholedisk'
		 * is not yet set, then this must be a slice.
		 */
		if (error == 0 && vd->vdev_wholedisk == -1ULL)
			vd->vdev_wholedisk = 0;
	}

	/*
	 * If we were unable to open by path, or the devid check fails, open by
	 * devid instead.
	 */
	if (error != 0 && vd->vdev_devid != NULL)
		error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor,
		    spa_mode(spa), kcred, &dvd->vd_lh, zfs_li);

	/*
	 * If all else fails, then try opening by physical path (if available)
	 * or the logical path (if we failed due to the devid check).  While not
	 * as reliable as the devid, this will give us something, and the higher
	 * level vdev validation will prevent us from opening the wrong device.
	 */
	if (error) {
		if (vd->vdev_physpath != NULL &&
		    (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != NODEV)
			error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode(spa),
			    kcred, &dvd->vd_lh, zfs_li);

		/*
		 * Note that we don't support the legacy auto-wholedisk support
		 * as above.  This hasn't been used in a very long time and we
		 * don't need to propagate its oddities to this edge condition.
		 */
		if (error && vd->vdev_path != NULL)
			error = ldi_open_by_name(vd->vdev_path, spa_mode(spa),
			    kcred, &dvd->vd_lh, zfs_li);
	}

	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
		return (error);
	}

	/*
	 * Once a device is opened, verify that the physical device path (if
	 * available) is up to date.
	 */
	if (ldi_get_dev(dvd->vd_lh, &dev) == 0 &&
	    ldi_get_otyp(dvd->vd_lh, &otyp) == 0) {
		char *physpath, *minorname;

		physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
		minorname = NULL;
		if (ddi_dev_pathname(dev, otyp, physpath) == 0 &&
		    ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 &&
		    (vd->vdev_physpath == NULL ||
		    strcmp(vd->vdev_physpath, physpath) != 0)) {
			if (vd->vdev_physpath)
				spa_strfree(vd->vdev_physpath);
			(void) strlcat(physpath, ":", MAXPATHLEN);
			(void) strlcat(physpath, minorname, MAXPATHLEN);
			vd->vdev_physpath = spa_strdup(physpath);
		}
		if (minorname)
			kmem_free(minorname, strlen(minorname) + 1);
		kmem_free(physpath, MAXPATHLEN);
	}

skip_open:
	/*
	 * Determine the actual size of the device.
	 */
	if (ldi_get_size(dvd->vd_lh, psize) != 0) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
		return (EINVAL);
	}

	/*
	 * If we own the whole disk, try to enable disk write caching.
	 * We ignore errors because it's OK if we can't do it.
	 */
	if (vd->vdev_wholedisk == 1) {
		int wce = 1;
		(void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce,
		    FKIOCTL, kcred, NULL);
	}

	/*
	 * Determine the device's minimum transfer size.
	 * If the ioctl isn't supported, assume DEV_BSIZE.
	 */
	if (ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFOEXT, (intptr_t)&dkmext,
	    FKIOCTL, kcred, NULL) != 0)
		dkmext.dki_pbsize = DEV_BSIZE;

	*ashift = highbit(MAX(dkmext.dki_pbsize, SPA_MINBLOCKSIZE)) - 1;

	/*
	 * Clear the nowritecache bit, so that on a vdev_reopen() we will
	 * try again.
	 */
	vd->vdev_nowritecache = B_FALSE;

	return (0);
}
예제 #8
0
/*
 * If bridge is PM capable, set up PM state for nexus.
 */
static void
ppb_pwr_setup(ppb_devstate_t *ppb, dev_info_t *pdip)
{
	char *comp_array[5];
	int i;
	ddi_acc_handle_t conf_hdl;
	uint8_t pmcsr_bse;
	uint16_t pmcap;

	/*
	 * Determine if bridge is PM capable.  If not, leave ppb_pwr_p NULL
	 * and return.
	 */
	if (pci_config_setup(pdip, &ppb->ppb_conf_hdl) != DDI_SUCCESS) {

		return;
	}

	conf_hdl = ppb->ppb_conf_hdl;

	/*
	 * Locate and store the power management cap_ptr for future references.
	 */
	if ((PCI_CAP_LOCATE(conf_hdl, PCI_CAP_ID_PM, &ppb->ppb_pm_cap_ptr))
	    == DDI_FAILURE) {
		DEBUG0(DBG_PWR, pdip, "bridge does not support PM. PCI"
		    " PM data structure not found in config header\n");
		pci_config_teardown(&conf_hdl);

		return;
	}

	/*
	 * Allocate PM state structure for ppb.
	 */
	ppb->ppb_pwr_p = (pci_pwr_t *)
	    kmem_zalloc(sizeof (pci_pwr_t), KM_SLEEP);
	ppb->ppb_pwr_p->pwr_fp = 0;

	pmcsr_bse = PCI_CAP_GET8(conf_hdl, NULL, ppb->ppb_pm_cap_ptr,
	    PCI_PMCSR_BSE);

	pmcap = PCI_CAP_GET16(conf_hdl, NULL, ppb->ppb_pm_cap_ptr,
	    PCI_PMCAP);

	if (pmcap == PCI_CAP_EINVAL16 || pmcsr_bse == PCI_CAP_EINVAL8) {
		pci_config_teardown(&conf_hdl);
		return;
	}

	if (pmcap & PCI_PMCAP_D1) {
		DEBUG0(DBG_PWR, pdip, "setup: B1 state supported\n");
		ppb->ppb_pwr_p->pwr_flags |= PCI_PWR_B1_CAPABLE;
	} else {
		DEBUG0(DBG_PWR, pdip, "setup: B1 state NOT supported\n");
	}
	if (pmcap & PCI_PMCAP_D2) {
		DEBUG0(DBG_PWR, pdip, "setup: B2 state supported\n");
		ppb->ppb_pwr_p->pwr_flags |= PCI_PWR_B2_CAPABLE;
	} else {
		DEBUG0(DBG_PWR, pdip, "setup: B2 via D2 NOT supported\n");
	}

	if (pmcsr_bse & PCI_PMCSR_BSE_BPCC_EN) {
		DEBUG0(DBG_PWR, pdip,
		"setup: bridge power/clock control enable\n");
	} else {
		DEBUG0(DBG_PWR, pdip,
		"setup: bridge power/clock control disabled\n");

		kmem_free(ppb->ppb_pwr_p, sizeof (pci_pwr_t));
		ppb->ppb_pwr_p = NULL;
		pci_config_teardown(&conf_hdl);

		return;
	}

	/*
	 * PCI states D0 and D3 always are supported for normal PCI
	 * devices.  D1 and D2 are optional which are checked for above.
	 * Bridge function states D0-D3 correspond to secondary bus states
	 * B0-B3, EXCEPT if PCI_PMCSR_BSE_B2_B3 is set.  In this case, setting
	 * the bridge function to D3 will set the bridge bus to state B2 instead
	 * of B3.  D2 will not correspond to B2 (and in fact, probably
	 * won't be D2 capable).  Implicitly, this means that if
	 * PCI_PMCSR_BSE_B2_B3 is set, the bus will not be B3 capable.
	 */
	if (pmcsr_bse & PCI_PMCSR_BSE_B2_B3) {
		ppb->ppb_pwr_p->pwr_flags |= PCI_PWR_B2_CAPABLE;
		DEBUG0(DBG_PWR, pdip, "B2 supported via D3\n");
	} else {
		ppb->ppb_pwr_p->pwr_flags |= PCI_PWR_B3_CAPABLE;
		DEBUG0(DBG_PWR, pdip, "B3 supported via D3\n");
	}

	ppb->ppb_pwr_p->pwr_dip = pdip;
	mutex_init(&ppb->ppb_pwr_p->pwr_mutex, NULL, MUTEX_DRIVER, NULL);

	i = 0;
	comp_array[i++] = "NAME=PCI bridge PM";
	if (ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B3_CAPABLE) {
		comp_array[i++] = "0=Clock/Power Off (B3)";
	}
	if (ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B2_CAPABLE) {
		comp_array[i++] = "1=Clock Off (B2)";
	}
	if (ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B1_CAPABLE) {
		comp_array[i++] = "2=Bus Inactive (B1)";
	}
	comp_array[i++] = "3=Full Power (B0)";

	/*
	 * Create pm-components property. It does not already exist.
	 */
	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, pdip,
	    "pm-components", comp_array, i) != DDI_PROP_SUCCESS) {
		cmn_err(CE_WARN,
		    "%s%d pm-components prop update failed",
		    ddi_driver_name(pdip), ddi_get_instance(pdip));
		pci_config_teardown(&conf_hdl);
		mutex_destroy(&ppb->ppb_pwr_p->pwr_mutex);
		kmem_free(ppb->ppb_pwr_p, sizeof (pci_pwr_t));
		ppb->ppb_pwr_p = NULL;

		return;
	}

	if (ddi_prop_create(DDI_DEV_T_NONE, pdip, DDI_PROP_CANSLEEP,
	    "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
		cmn_err(CE_WARN,
		    "%s%d fail to create pm-want-child-notification? prop",
		    ddi_driver_name(pdip), ddi_get_instance(pdip));

		(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip, "pm-components");
		pci_config_teardown(&conf_hdl);
		mutex_destroy(&ppb->ppb_pwr_p->pwr_mutex);
		kmem_free(ppb->ppb_pwr_p, sizeof (pci_pwr_t));
		ppb->ppb_pwr_p = NULL;

		return;
	}

	ppb->ppb_pwr_p->current_lvl =
	    pci_pwr_current_lvl(ppb->ppb_pwr_p);
}
예제 #9
0
int
iscsi_ioctl_set_tunable_param(iscsi_hba_t *ihp, iscsi_tunable_object_t *tpss)
{
	uchar_t *name;
	iscsi_sess_t *isp;
	iscsi_conn_t *icp;
	int	param_id = 0;
	persistent_tunable_param_t *pparam;

	if (tpss->t_oid == ihp->hba_oid) {
		name = ihp->hba_name;
	} else {
		/* get target name */
		name = iscsi_targetparam_get_name(tpss->t_oid);
		if (name == NULL) {
			/* invalid node name */
			return (EINVAL);
		}
	}

	pparam = (persistent_tunable_param_t *)kmem_zalloc(sizeof (*pparam),
	    KM_SLEEP);
	if (persistent_get_tunable_param((char *)name, pparam) == B_FALSE) {
		/* use default value */
		pparam->p_params.recv_login_rsp_timeout =
		    ISCSI_DEFAULT_RX_TIMEOUT_VALUE;
		pparam->p_params.polling_login_delay =
		    ISCSI_DEFAULT_LOGIN_POLLING_DELAY;
		pparam->p_params.conn_login_max =
		    ISCSI_DEFAULT_CONN_DEFAULT_LOGIN_MAX;
	}

	pparam->p_bitmap |= (1 << (tpss->t_param -1));
	param_id = 1 << (tpss->t_param -1);
	switch (param_id) {
	case ISCSI_TUNABLE_PARAM_RX_TIMEOUT_VALUE:
		pparam->p_params.recv_login_rsp_timeout =
		    tpss->t_value.v_integer;
		break;
	case ISCSI_TUNABLE_PARAM_LOGIN_POLLING_DELAY:
		pparam->p_params.polling_login_delay =
		    tpss->t_value.v_integer;
		break;
	case ISCSI_TUNABLE_PARAM_CONN_LOGIN_MAX:
		pparam->p_params.conn_login_max =
		    tpss->t_value.v_integer;
		break;
	default:
		break;
	}
	if (persistent_set_tunable_param((char *)name,
	    pparam) == B_FALSE) {
		kmem_free(pparam, sizeof (*pparam));
		return (EINVAL);
	}

	if (tpss->t_oid == ihp->hba_oid) {
		bcopy(&pparam->p_params, &ihp->hba_tunable_params,
		    sizeof (iscsi_tunable_params_t));
	}

	rw_enter(&ihp->hba_sess_list_rwlock, RW_READER);
	for (isp = ihp->hba_sess_list; isp; isp = isp->sess_next) {
		if (isp->sess_type != ISCSI_SESS_TYPE_NORMAL) {
			continue;
		}
		rw_enter(&isp->sess_conn_list_rwlock, RW_READER);
		icp = isp->sess_conn_list;
		while (icp != NULL) {
			if (strcmp((const char *)name,
			    (const char *)isp->sess_name) == 0) {
				bcopy(&pparam->p_params,
				    &icp->conn_tunable_params,
				    sizeof (iscsi_tunable_params_t));
			} else {
				/*
				 * this session connected target
				 * tunable parameters not set,
				 * use initiator's default
				 */
				bcopy(&ihp->hba_tunable_params,
				    &icp->conn_tunable_params,
				    sizeof (iscsi_tunable_params_t));
			}
			icp = icp->conn_next;
		}
		rw_exit(&isp->sess_conn_list_rwlock);
	}
	rw_exit(&ihp->hba_sess_list_rwlock);
	kmem_free(pparam, sizeof (*pparam));
	return (0);
}
예제 #10
0
파일: zfs_sa.c 프로젝트: avg-I/zfs
void
zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
{
    dmu_buf_t *db = sa_get_db(hdl);
    znode_t *zp = sa_get_userdata(hdl);
    zfs_sb_t *zsb = ZTOZSB(zp);
    int count = 0;
    sa_bulk_attr_t *bulk, *sa_attrs;
    zfs_acl_locator_cb_t locate = { 0 };
    uint64_t uid, gid, mode, rdev, xattr, parent;
    uint64_t crtime[2], mtime[2], ctime[2];
    zfs_acl_phys_t znode_acl;
    char scanstamp[AV_SCANSTAMP_SZ];
    boolean_t drop_lock = B_FALSE;

    /*
     * No upgrade if ACL isn't cached
     * since we won't know which locks are held
     * and ready the ACL would require special "locked"
     * interfaces that would be messy
     */
    if (zp->z_acl_cached == NULL || S_ISLNK(ZTOI(zp)->i_mode))
        return;

    /*
     * If the z_lock is held and we aren't the owner
     * the just return since we don't want to deadlock
     * trying to update the status of z_is_sa.  This
     * file can then be upgraded at a later time.
     *
     * Otherwise, we know we are doing the
     * sa_update() that caused us to enter this function.
     */
    if (mutex_owner(&zp->z_lock) != curthread) {
        if (mutex_tryenter(&zp->z_lock) == 0)
            return;
        else
            drop_lock = B_TRUE;
    }

    /* First do a bulk query of the attributes that aren't cached */
    bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zsb), NULL, &crtime, 16);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, &parent, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zsb), NULL, &xattr, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zsb), NULL, &rdev, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &uid, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &gid, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zsb), NULL,
                     &znode_acl, 88);

    if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) {
        kmem_free(bulk, sizeof (sa_bulk_attr_t) * 20);
        goto done;
    }

    /*
     * While the order here doesn't matter its best to try and organize
     * it is such a way to pick up an already existing layout number
     */
    count = 0;
    sa_attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zsb), NULL,
                     &zp->z_size, 8);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zsb),
                     NULL, &zp->z_gen, 8);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zsb), NULL, &uid, 8);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zsb), NULL, &gid, 8);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zsb),
                     NULL, &parent, 8);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zsb), NULL,
                     &zp->z_pflags, 8);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zsb), NULL,
                     zp->z_atime, 16);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zsb), NULL,
                     &mtime, 16);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zsb), NULL,
                     &ctime, 16);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zsb), NULL,
                     &crtime, 16);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zsb), NULL,
                     &zp->z_links, 8);
    if (S_ISBLK(ZTOI(zp)->i_mode) || S_ISCHR(ZTOI(zp)->i_mode))
        SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zsb), NULL,
                         &rdev, 8);
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zsb), NULL,
                     &zp->z_acl_cached->z_acl_count, 8);

    if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
        zfs_acl_xform(zp, zp->z_acl_cached, CRED());

    locate.cb_aclp = zp->z_acl_cached;
    SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zsb),
                     zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes);

    if (xattr)
        SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zsb),
                         NULL, &xattr, 8);

    /* if scanstamp then add scanstamp */

    if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
        bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
              scanstamp, AV_SCANSTAMP_SZ);
        SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zsb),
                         NULL, scanstamp, AV_SCANSTAMP_SZ);
        zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
    }

    VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
    VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
            count, tx) == 0);
    if (znode_acl.z_acl_extern_obj)
        VERIFY(0 == dmu_object_free(zsb->z_os,
                                    znode_acl.z_acl_extern_obj, tx));

    zp->z_is_sa = B_TRUE;
    kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * 20);
    kmem_free(bulk, sizeof (sa_bulk_attr_t) * 20);
done:
    if (drop_lock)
        mutex_exit(&zp->z_lock);
}
예제 #11
0
int
kcpc_bind_cpu(kcpc_set_t *set, processorid_t cpuid, int *subcode)
{
	cpu_t		*cp;
	kcpc_ctx_t	*ctx;
	int		error;

	ctx = kcpc_ctx_alloc();

	if (kcpc_assign_reqs(set, ctx) != 0) {
		kcpc_ctx_free(ctx);
		*subcode = CPC_RESOURCE_UNAVAIL;
		return (EINVAL);
	}

	ctx->kc_cpuid = cpuid;
	ctx->kc_thread = curthread;

	set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP);

	if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) {
		kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
		kcpc_ctx_free(ctx);
		return (error);
	}

	set->ks_ctx = ctx;
	ctx->kc_set = set;

	/*
	 * We must hold cpu_lock to prevent DR, offlining, or unbinding while
	 * we are manipulating the cpu_t and programming the hardware, else the
	 * the cpu_t could go away while we're looking at it.
	 */
	mutex_enter(&cpu_lock);
	cp = cpu_get(cpuid);

	if (cp == NULL)
		/*
		 * The CPU could have been DRd out while we were getting set up.
		 */
		goto unbound;

	mutex_enter(&cp->cpu_cpc_ctxlock);

	if (cp->cpu_cpc_ctx != NULL) {
		/*
		 * If this CPU already has a bound set, return an error.
		 */
		mutex_exit(&cp->cpu_cpc_ctxlock);
		goto unbound;
	}

	if (curthread->t_bind_cpu != cpuid) {
		mutex_exit(&cp->cpu_cpc_ctxlock);
		goto unbound;
	}
	cp->cpu_cpc_ctx = ctx;

	/*
	 * Kernel preemption must be disabled while fiddling with the hardware
	 * registers to prevent partial updates.
	 */
	kpreempt_disable();
	ctx->kc_rawtick = KCPC_GET_TICK();
	pcbe_ops->pcbe_program(ctx);
	kpreempt_enable();

	mutex_exit(&cp->cpu_cpc_ctxlock);
	mutex_exit(&cpu_lock);

	return (0);

unbound:
	mutex_exit(&cpu_lock);
	set->ks_ctx = NULL;
	kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
	kcpc_ctx_free(ctx);
	return (EAGAIN);
}
예제 #12
0
파일: drm_memory.c 프로젝트: nido/pscnv
void *
drm_calloc(size_t nmemb, size_t size, int area)
{
    return (kmem_zalloc(size * nmemb, KM_NOSLEEP));
}
예제 #13
0
파일: drm_memory.c 프로젝트: nido/pscnv
void *
drm_alloc(size_t size, int area)
{
    return (kmem_zalloc(1 * size, KM_NOSLEEP));
}
예제 #14
0
/*
 * Generate the pool's configuration based on the current in-core state.
 * We infer whether to generate a complete config or just one top-level config
 * based on whether vd is the root vdev.
 */
nvlist_t *
spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
{
	nvlist_t *config, *nvroot;
	vdev_t *rvd = spa->spa_root_vdev;
	unsigned long hostid = 0;
	boolean_t locked = B_FALSE;
	uint64_t split_guid;

	if (vd == NULL) {
		vd = rvd;
		locked = B_TRUE;
		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
	}

	ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) ==
	    (SCL_CONFIG | SCL_STATE));

	/*
	 * If txg is -1, report the current value of spa->spa_config_txg.
	 */
	if (txg == -1ULL)
		txg = spa->spa_config_txg;

	VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0);

	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
	    spa_version(spa)) == 0);
	VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
	    spa_name(spa)) == 0);
	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
	    spa_state(spa)) == 0);
	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
	    txg) == 0);
	VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
	    spa_guid(spa)) == 0);
	VERIFY(spa->spa_comment == NULL || nvlist_add_string(config,
	    ZPOOL_CONFIG_COMMENT, spa->spa_comment) == 0);


#ifdef	_KERNEL
	hostid = zone_get_hostid(NULL);
#else	/* _KERNEL */
	/*
	 * We're emulating the system's hostid in userland, so we can't use
	 * zone_get_hostid().
	 */
	(void) ddi_strtoul(hw_serial, NULL, 10, &hostid);
#endif	/* _KERNEL */
	if (hostid != 0) {
		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
		    hostid) == 0);
	}
	VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
	    utsname.nodename) == 0);

	if (vd != rvd) {
		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID,
		    vd->vdev_top->vdev_guid) == 0);
		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_GUID,
		    vd->vdev_guid) == 0);
		if (vd->vdev_isspare)
			VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_SPARE,
			    1ULL) == 0);
		if (vd->vdev_islog)
			VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_LOG,
			    1ULL) == 0);
		vd = vd->vdev_top;		/* label contains top config */
	} else {
		/*
		 * Only add the (potentially large) split information
		 * in the mos config, and not in the vdev labels
		 */
		if (spa->spa_config_splitting != NULL)
			VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT,
			    spa->spa_config_splitting) == 0);
	}

	/*
	 * Add the top-level config.  We even add this on pools which
	 * don't support holes in the namespace.
	 */
	vdev_top_config_generate(spa, config);

	/*
	 * If we're splitting, record the original pool's guid.
	 */
	if (spa->spa_config_splitting != NULL &&
	    nvlist_lookup_uint64(spa->spa_config_splitting,
	    ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) {
		VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID,
		    split_guid) == 0);
	}

	nvroot = vdev_config_generate(spa, vd, getstats, 0);
	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0);
	nvlist_free(nvroot);

	/*
	 * Store what's necessary for reading the MOS in the label.
	 */
	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
	    spa->spa_label_features) == 0);

	if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) {
		ddt_histogram_t *ddh;
		ddt_stat_t *dds;
		ddt_object_t *ddo;

		ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
		ddt_get_dedup_histogram(spa, ddh);
		VERIFY(nvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_HISTOGRAM,
		    (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t)) == 0);
		kmem_free(ddh, sizeof (ddt_histogram_t));

		ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP);
		ddt_get_dedup_object_stats(spa, ddo);
		VERIFY(nvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_OBJ_STATS,
		    (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t)) == 0);
		kmem_free(ddo, sizeof (ddt_object_t));

		dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP);
		ddt_get_dedup_stats(spa, dds);
		VERIFY(nvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_STATS,
		    (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t)) == 0);
		kmem_free(dds, sizeof (ddt_stat_t));
	}

	if (locked)
		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);

	return (config);
}
예제 #15
0
/*
 * Save the configuration registers for cdip as a property
 * so that it persists after detach/uninitchild.
 */
int
pci_save_config_regs(dev_info_t *dip)
{
	ddi_acc_handle_t confhdl;
	pci_config_header_state_t *chsp;
	pci_cap_save_desc_t *pci_cap_descp;
	int ret;
	uint32_t i, ncaps, nwords;
	uint32_t *regbuf, *p;
	uint8_t *maskbuf;
	size_t maskbufsz, regbufsz, capbufsz;
	ddi_acc_hdl_t *hp;
	off_t offset = 0;
	uint8_t cap_ptr, cap_id;
	int pcie = 0;
	PMD(PMD_SX, ("pci_save_config_regs %s:%d\n", ddi_driver_name(dip),
	    ddi_get_instance(dip)))

	if (pci_config_setup(dip, &confhdl) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s%d can't get config handle",
		    ddi_driver_name(dip), ddi_get_instance(dip));

		return (DDI_FAILURE);
	}
	/*
	 * Determine if it is a pci express device. If it is, save entire
	 * 4k config space treating it as a array of 32 bit integers.
	 * If it is not, do it in a usual PCI way.
	 */
	cap_ptr = pci_config_get8(confhdl, PCI_BCNF_CAP_PTR);
	/*
	 * Walk the capabilities searching for pci express capability
	 */
	while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
		cap_id = pci_config_get8(confhdl,
		    cap_ptr + PCI_CAP_ID);
		if (cap_id == PCI_CAP_ID_PCI_E) {
			pcie = 1;
			break;
		}
		cap_ptr = pci_config_get8(confhdl,
		    cap_ptr + PCI_CAP_NEXT_PTR);
	}

	if (pcie) {
		/* PCI express device. Can have data in all 4k space */
		regbuf = (uint32_t *)kmem_zalloc((size_t)PCIE_CONF_HDR_SIZE,
		    KM_SLEEP);
		p = regbuf;
		/*
		 * Allocate space for mask.
		 * mask size is 128 bytes (4096 / 4 / 8 )
		 */
		maskbufsz = (size_t)((PCIE_CONF_HDR_SIZE/ sizeof (uint32_t)) >>
		    INDEX_SHIFT);
		maskbuf = (uint8_t *)kmem_zalloc(maskbufsz, KM_SLEEP);
		hp = impl_acc_hdl_get(confhdl);
		for (i = 0; i < (PCIE_CONF_HDR_SIZE / sizeof (uint32_t)); i++) {
			if (ddi_peek32(dip, (int32_t *)(hp->ah_addr + offset),
			    (int32_t *)p) == DDI_SUCCESS) {
				/* it is readable register. set the bit */
				maskbuf[i >> INDEX_SHIFT] |=
				    (uint8_t)(1 << (i & BITMASK));
			}
			p++;
			offset += sizeof (uint32_t);
		}

		if ((ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip,
		    SAVED_CONFIG_REGS_MASK, (uchar_t *)maskbuf,
		    maskbufsz)) != DDI_PROP_SUCCESS) {
			cmn_err(CE_WARN, "couldn't create %s property while"
			    "saving config space for %s@%d\n",
			    SAVED_CONFIG_REGS_MASK, ddi_driver_name(dip),
			    ddi_get_instance(dip));
		} else if ((ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE,
		    dip, SAVED_CONFIG_REGS, (uchar_t *)regbuf,
		    (size_t)PCIE_CONF_HDR_SIZE)) != DDI_PROP_SUCCESS) {
			(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
			    SAVED_CONFIG_REGS_MASK);
			cmn_err(CE_WARN, "%s%d can't update prop %s",
			    ddi_driver_name(dip), ddi_get_instance(dip),
			    SAVED_CONFIG_REGS);
		}

		kmem_free(maskbuf, (size_t)maskbufsz);
		kmem_free(regbuf, (size_t)PCIE_CONF_HDR_SIZE);
	} else {
예제 #16
0
/*
 * iscsi_ioctl_sendtgts_get - 0 on success; errno on failure
 *
 */
int
iscsi_ioctl_sendtgts_get(iscsi_hba_t *ihp, iscsi_sendtgts_list_t *stl)
{
#define	ISCSI_SENDTGTS_REQ_STR		"SendTargets=All"

	int			rtn = EFAULT;
	iscsi_status_t		status;
	iscsi_sess_t		*isp;
	iscsi_conn_t		*icp;
	uint32_t		oid;
	char			*data;
	uint32_t		data_len;
	uint32_t		rx_data_len;
	iscsi_sockaddr_t	addr_snd;

	ASSERT(ihp != NULL);
	ASSERT(stl != NULL);

	iscsid_addr_to_sockaddr(stl->stl_entry.e_insize,
	    &stl->stl_entry.e_u, stl->stl_entry.e_port,
	    &addr_snd.sin);

	/* create discovery session */
	rw_enter(&ihp->hba_sess_list_rwlock, RW_WRITER);
	isp = iscsi_sess_create(ihp, iSCSIDiscoveryMethodSendTargets,
	    NULL, SENDTARGETS_DISCOVERY, ISCSI_DEFAULT_TPGT,
	    ISCSI_SUN_ISID_5, ISCSI_SESS_TYPE_DISCOVERY, &oid);
	if (isp == NULL) {
		rw_exit(&ihp->hba_sess_list_rwlock);
		return (1);
	}

	/* create connection */
	rw_enter(&isp->sess_conn_list_rwlock, RW_WRITER);
	status = iscsi_conn_create(&addr_snd.sin, isp, &icp);
	rw_exit(&isp->sess_conn_list_rwlock);

	if (!ISCSI_SUCCESS(status)) {
		(void) iscsi_sess_destroy(isp);
		rw_exit(&ihp->hba_sess_list_rwlock);
		return (1);
	}
	rw_exit(&ihp->hba_sess_list_rwlock);

	/* start login */
	mutex_enter(&icp->conn_state_mutex);
	status = iscsi_conn_online(icp);
	mutex_exit(&icp->conn_state_mutex);

	if (status == ISCSI_STATUS_SUCCESS) {
		data_len = icp->conn_params.max_xmit_data_seg_len;
retry_sendtgts:
		/* alloc/init buffer for SendTargets req/resp */
		data = kmem_zalloc(data_len, KM_SLEEP);
		bcopy(ISCSI_SENDTGTS_REQ_STR, data,
		    sizeof (ISCSI_SENDTGTS_REQ_STR));

		/* execute SendTargets operation */
		status = iscsi_handle_text(icp, data, data_len,
		    sizeof (ISCSI_SENDTGTS_REQ_STR), &rx_data_len);

		/* check if allocated buffer is too small for response */
		if (status == ISCSI_STATUS_DATA_OVERFLOW) {
			kmem_free(data, data_len);
			data_len = rx_data_len;
			goto retry_sendtgts;
		}

		if (ISCSI_SUCCESS(status)) {
			status = iscsi_create_sendtgts_list(icp, data,
			    rx_data_len, stl);
			if (ISCSI_SUCCESS(status)) {
				rtn = 0;
			}
		} else {
			rtn = EFAULT;
		}

		kmem_free(data, data_len);
	} else {
		rtn = EFAULT;
	}

	/*
	 * check if session is still alive.  It may have been destroyed
	 * by a driver unload
	 */
	rw_enter(&ihp->hba_sess_list_rwlock, RW_WRITER);
	if (iscsi_sess_get(oid, ihp, &isp) == 0) {
		(void) iscsi_sess_destroy(isp);
	}
	rw_exit(&ihp->hba_sess_list_rwlock);

	return (rtn);
}
예제 #17
0
파일: dmu_zfetch.c 프로젝트: Shengliang/zfs
/*
 * This is the prefetch entry point.  It calls all of the other dmu_zfetch
 * routines to create, delete, find, or operate upon prefetch streams.
 */
void
dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
{
	zstream_t	zst;
	zstream_t	*newstream;
	boolean_t	fetched;
	int		inserted;
	unsigned int	blkshft;
	uint64_t	blksz;

	if (zfs_prefetch_disable)
		return;

	/* files that aren't ln2 blocksz are only one block -- nothing to do */
	if (!zf->zf_dnode->dn_datablkshift)
		return;

	/* convert offset and size, into blockid and nblocks */
	blkshft = zf->zf_dnode->dn_datablkshift;
	blksz = (1 << blkshft);

	bzero(&zst, sizeof (zstream_t));
	zst.zst_offset = offset >> blkshft;
	zst.zst_len = (P2ROUNDUP(offset + size, blksz) -
	    P2ALIGN(offset, blksz)) >> blkshft;

	fetched = dmu_zfetch_find(zf, &zst, prefetched);
	if (fetched) {
		ZFETCHSTAT_BUMP(zfetchstat_hits);
	} else {
		ZFETCHSTAT_BUMP(zfetchstat_misses);
		if ((fetched = dmu_zfetch_colinear(zf, &zst))) {
			ZFETCHSTAT_BUMP(zfetchstat_colinear_hits);
		} else {
			ZFETCHSTAT_BUMP(zfetchstat_colinear_misses);
		}
	}

	if (!fetched) {
		newstream = dmu_zfetch_stream_reclaim(zf);

		/*
		 * we still couldn't find a stream, drop the lock, and allocate
		 * one if possible.  Otherwise, give up and go home.
		 */
		if (newstream) {
			ZFETCHSTAT_BUMP(zfetchstat_reclaim_successes);
		} else {
			uint64_t	maxblocks;
			uint32_t	max_streams;
			uint32_t	cur_streams;

			ZFETCHSTAT_BUMP(zfetchstat_reclaim_failures);
			cur_streams = zf->zf_stream_cnt;
			maxblocks = zf->zf_dnode->dn_maxblkid;

			max_streams = MIN(zfetch_max_streams,
			    (maxblocks / zfetch_block_cap));
			if (max_streams == 0) {
				max_streams++;
			}

			if (cur_streams >= max_streams) {
				return;
			}
			newstream = kmem_zalloc(sizeof (zstream_t), KM_PUSHPAGE);
		}

		newstream->zst_offset = zst.zst_offset;
		newstream->zst_len = zst.zst_len;
		newstream->zst_stride = zst.zst_len;
		newstream->zst_ph_offset = zst.zst_len + zst.zst_offset;
		newstream->zst_cap = zst.zst_len;
		newstream->zst_direction = ZFETCH_FORWARD;
		newstream->zst_last = ddi_get_lbolt();

		mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL);

		rw_enter(&zf->zf_rwlock, RW_WRITER);
		inserted = dmu_zfetch_stream_insert(zf, newstream);
		rw_exit(&zf->zf_rwlock);

		if (!inserted) {
			mutex_destroy(&newstream->zst_lock);
			kmem_free(newstream, sizeof (zstream_t));
		}
	}
}
예제 #18
0
/*
 * iscsi_create_sendtgts_list -  Based upon the given data, build a
 * linked list of SendTarget information.  The data passed into this
 * function  is expected to be the data portion(s) of SendTarget text
 * response.
 */
static iscsi_status_t
iscsi_create_sendtgts_list(iscsi_conn_t *icp, char *data, int data_len,
    iscsi_sendtgts_list_t *stl)
{
	char			*line = NULL;
	boolean_t		targetname_added = B_FALSE;
	iscsi_sendtgts_entry_t	*curr_ste = NULL,
	    *prev_ste = NULL;
	struct hostent		*hptr;
	int			error_num;

	/* initialize number of targets found */
	stl->stl_out_cnt = 0;

	if (data_len == 0)
		return (ISCSI_STATUS_SUCCESS);

	while ((line = iscsi_get_next_text(data, data_len, line)) != NULL) {
		if (strncmp(TARGETNAME, line, strlen(TARGETNAME)) == 0) {
			/* check if this is first targetname */
			if (prev_ste != NULL) {
				stl->stl_out_cnt++;
			}
			if (stl->stl_out_cnt >= stl->stl_in_cnt) {
				/*
				 * continue processing the data so that
				 * the total number of targets are known
				 * and the caller can retry with the correct
				 * number of entries in the list
				 */
				continue;
			}
			curr_ste = &(stl->stl_list[stl->stl_out_cnt]);

			/*
			 * This entry will use the IP address and port
			 * that was passed into this routine. If the next
			 * line that we receive is a TargetAddress we will
			 * know to modify this entry with the new IP address,
			 * port and portal group tag. If this state flag
			 * is not set we'll just create a new entry using
			 * only the previous entries targetname.
			 */
			(void) strncpy((char *)curr_ste->ste_name,
			    line + strlen(TARGETNAME),
			    sizeof (curr_ste->ste_name));

			if (icp->conn_base_addr.sin.sa_family == AF_INET) {

				struct sockaddr_in *addr_in =
				    &icp->conn_base_addr.sin4;
				curr_ste->ste_ipaddr.a_addr.i_insize =
				    sizeof (struct in_addr);
				bcopy(&addr_in->sin_addr.s_addr,
				    &curr_ste->ste_ipaddr.a_addr.i_addr,
				    sizeof (struct in_addr));
				curr_ste->ste_ipaddr.a_port =
				    htons(addr_in->sin_port);

			} else {

				struct sockaddr_in6 *addr_in6 =
				    &icp->conn_base_addr.sin6;
				curr_ste->ste_ipaddr.a_addr.i_insize =
				    sizeof (struct in6_addr);
				bcopy(&addr_in6->sin6_addr.s6_addr,
				    &curr_ste->ste_ipaddr.a_addr.i_addr,
				    sizeof (struct in6_addr));
				curr_ste->ste_ipaddr.a_port =
				    htons(addr_in6->sin6_port);
			}
			curr_ste->ste_tpgt = -1;

			targetname_added = B_TRUE;

		} else if (strncmp(TARGETADDRESS, line,
		    strlen(TARGETADDRESS)) == 0) {

			char *in_str,
			    *tmp_buf,
			    *addr_str,
			    *port_str,
			    *tpgt_str;
			int type,
			    tmp_buf_len;
			long result;

			/*
			 * If TARGETADDRESS is first line a SendTarget response
			 * (i.e. no TARGETNAME lines preceding), treat as
			 * an error.  To check this an assumption is made that
			 * at least one sendtarget_entry_t should exist prior
			 * to entering this code.
			 */
			if (prev_ste == NULL) {
				cmn_err(CE_NOTE, "SendTargets protocol error: "
				    "TARGETADDRESS first");
				return (ISCSI_STATUS_PROTOCOL_ERROR);
			}

			/*
			 * If we can't find an '=' then the sendtargets
			 * response if invalid per spec.  Return empty list.
			 */
			in_str = strchr(line, '=');
			if (in_str == NULL) {
				return (ISCSI_STATUS_PROTOCOL_ERROR);
			}

			/* move past the '=' */
			in_str++;

			/* Copy  addr, port, and tpgt into temporary buffer */
			tmp_buf_len = strlen(in_str) + 1;
			tmp_buf = kmem_zalloc(tmp_buf_len, KM_SLEEP);
			(void) strncpy(tmp_buf, in_str, tmp_buf_len);

			/*
			 * Parse the addr, port, and tpgt from
			 * sendtarget response
			 */
			if (parse_addr_port_tpgt(tmp_buf, &addr_str, &type,
			    &port_str, &tpgt_str) == B_FALSE) {
				/* Unable to extract addr */
				kmem_free(tmp_buf, tmp_buf_len);
				return (ISCSI_STATUS_PROTOCOL_ERROR);
			}

			/* Now convert string addr to binary */
			hptr = kgetipnodebyname(addr_str, type,
			    AI_ALL, &error_num);
			if (!hptr) {
				/* Unable to get valid address */
				kmem_free(tmp_buf, tmp_buf_len);
				return (ISCSI_STATUS_PROTOCOL_ERROR);
			}

			/* Check if space for response */
			if (targetname_added == B_FALSE) {
				stl->stl_out_cnt++;
				if (stl->stl_out_cnt >= stl->stl_in_cnt) {
					/*
					 * continue processing the data so that
					 * the total number of targets are
					 * known and the caller can retry with
					 * the correct number of entries in
					 * the list
					 */
					kfreehostent(hptr);
					kmem_free(tmp_buf, tmp_buf_len);
					continue;
				}
				curr_ste = &(stl->stl_list[stl->stl_out_cnt]);
				(void) strcpy((char *)curr_ste->ste_name,
				    (char *)prev_ste->ste_name);
			}

			curr_ste->ste_ipaddr.a_addr.i_insize = hptr->h_length;
			bcopy(*hptr->h_addr_list,
			    &(curr_ste->ste_ipaddr.a_addr.i_addr),
			    curr_ste->ste_ipaddr.a_addr.i_insize);
			kfreehostent(hptr);

			if (port_str != NULL) {
				(void) ddi_strtol(port_str, NULL, 0, &result);
				curr_ste->ste_ipaddr.a_port = (short)result;
			} else {
				curr_ste->ste_ipaddr.a_port = ISCSI_LISTEN_PORT;
			}

			if (tpgt_str != NULL) {
				(void) ddi_strtol(tpgt_str, NULL, 0, &result);
				curr_ste->ste_tpgt = (short)result;
			} else {
				cmn_err(CE_NOTE, "SendTargets protocol error: "
				    "TPGT not specified");
				kmem_free(tmp_buf, tmp_buf_len);
				return (ISCSI_STATUS_PROTOCOL_ERROR);
			}

			kmem_free(tmp_buf, tmp_buf_len);

			targetname_added = B_FALSE;

		} else if (strlen(line) != 0) {
			/*
			 * Any other string besides an empty string
			 * is a protocol error
			 */
			cmn_err(CE_NOTE, "SendTargets protocol error: "
			    "unexpected response");
			return (ISCSI_STATUS_PROTOCOL_ERROR);
		}

		prev_ste = curr_ste;
	}

	/*
	 * If target found increment out count one more time because
	 * this is the total number of entries in the list not an index
	 * like it was used above
	 */
	if (prev_ste != NULL) {
		stl->stl_out_cnt++;
	}

	return (ISCSI_STATUS_SUCCESS);
}
예제 #19
0
int
VMBlockVnodeGet(struct vnode **vpp,        // OUT: Filled with address of new vnode
                struct vnode *realVp,      // IN:  Real vnode (assumed held)
                const char *name,          // IN:  Relative name of the file
                size_t nameLen,            // IN:  Size of name
                struct vnode *dvp,         // IN:  Parent directory's vnode
                struct vfs *vfsp,          // IN:  Filesystem structure
                Bool isRoot)               // IN:  If is root directory of fs
{
   VMBlockVnodeInfo *vip;
   struct vnode *vp;
   char *curr;
   int ret;

   Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMBlockVnodeGet: entry\n");

   ASSERT(vpp);
   ASSERT(realVp);
   ASSERT(vfsp);
   ASSERT(name);
   ASSERT(dvp || isRoot);

   vp = vn_alloc(KM_SLEEP);
   if (!vp) {
      return ENOMEM;
   }

   vip = kmem_zalloc(sizeof *vip, KM_SLEEP);
   vp->v_data = (void *)vip;

   /*
    * Store the path that this file redirects to.  For the root vnode we just
    * store the provided path, but for all others we first copy in the parent
    * directory's path.
    */
   curr = vip->name;

   if (!isRoot) {
      VMBlockVnodeInfo *dvip = VPTOVIP(dvp);
      if (dvip->nameLen + 1 + nameLen + 1 >= sizeof vip->name) {
         ret = ENAMETOOLONG;
         goto error;
      }

      memcpy(vip->name, dvip->name, dvip->nameLen);
      vip->name[dvip->nameLen] = '/';
      curr = vip->name + dvip->nameLen + 1;
   }

   if (nameLen + 1 > (sizeof vip->name - (curr - vip->name))) {
      ret = ENAMETOOLONG;
      goto error;
   }

   memcpy(curr, name, nameLen);
   curr[nameLen] = '\0';
   vip->nameLen = nameLen + (curr - vip->name);

   /*
    * We require the caller to have held realVp so we don't need VN_HOLD() it
    * here here even though we VN_RELE() this vnode in VMBlockVnodePut().
    * Despite seeming awkward, this is more natural since the function that our
    * caller obtained realVp from provided a held vnode.
    */
   vip->realVnode = realVp;

   /*
    * Now we'll initialize the vnode.  We need to set the file type, vnode
    * operations, flags, filesystem pointer, reference count, and device.
    */
   /* The root directory is our only directory; the rest are symlinks. */
   vp->v_type = isRoot ? VDIR : VLNK;

   vn_setops(vp, vmblockVnodeOps);

   vp->v_flag  = VNOMAP | VNOMOUNT | VNOSWAP | isRoot ? VROOT : 0;
   vp->v_vfsp  = vfsp;
   vp->v_rdev  = NODEV;

   /* Fill in the provided address with the new vnode. */
   *vpp = vp;

   return 0;

error:
   kmem_free(vip, sizeof *vip);
   vn_free(vp);
   return ret;
}
예제 #20
0
int
iscsi_set_params(iscsi_param_set_t *ils, iscsi_hba_t *ihp, boolean_t persist)
{
	iscsi_login_params_t	*params	= NULL;
	uchar_t			*name	= NULL;
	iscsi_sess_t		*isp	= NULL;
	iscsi_param_get_t	*ilg;
	int			rtn	= 0;

	/* handle special case for Initiator name */
	if (ils->s_param == ISCSI_LOGIN_PARAM_INITIATOR_NAME) {
		(void) strlcpy((char *)ihp->hba_name,
		    (char *)ils->s_value.v_name, ISCSI_MAX_NAME_LEN);
		if (persist) {
			char			*name;
			boolean_t		rval;

			/* save off old Initiator name */
			name = kmem_alloc(ISCSI_MAX_NAME_LEN, KM_SLEEP);
			rval = persistent_initiator_name_get(name,
			    ISCSI_MAX_NAME_LEN);

			(void) persistent_initiator_name_set(
			    (char *)ihp->hba_name);
			if (rval == B_TRUE) {
				/*
				 * check to see if we have login param,
				 * chap param, or authentication params
				 * loaded in persistent that we have to change
				 * the name of
				 */
				persistent_param_t	*pp;
				iscsi_chap_props_t	*chap;
				iscsi_auth_props_t	*auth;

				/* checking login params */
				pp = kmem_zalloc(sizeof (persistent_param_t),
				    KM_SLEEP);
				if (persistent_param_get(name, pp)) {
					rval = persistent_param_clear(name);
					if (rval == B_TRUE) {
						rval = persistent_param_set(
						    (char *)ihp->hba_name, pp);
					}
					if (rval == B_FALSE) {
						rtn = EFAULT;
					}
				}
				kmem_free(pp, sizeof (persistent_param_t));

				/* check chap params */
				chap = kmem_zalloc(sizeof (iscsi_chap_props_t),
				    KM_SLEEP);
				if (persistent_chap_get(name, chap)) {
					rval = persistent_chap_clear(name);
					if (rval == B_TRUE) {
					/*
					 * Update CHAP user name only if the
					 * original username was set to the
					 * initiator node name.  Otherwise
					 * leave it the way it is.
					 */
						int userSize;
						userSize =
						    sizeof (chap->c_user);
						if (strncmp((char *)
						    chap->c_user, name,
						    sizeof (chap->c_user))
						    == 0) {
							bzero(chap->c_user,
							    userSize);
							bcopy((char *)
							    ihp->hba_name,
							    chap->c_user,
							    strlen((char *)
							    ihp->hba_name));
							chap->c_user_len =
							    strlen((char *)
							    ihp->hba_name);

					}
					rval = persistent_chap_set(
					    (char *)ihp->hba_name, chap);
					}
					if (rval == B_FALSE) {
						rtn = EFAULT;
					}
				}
				kmem_free(chap, sizeof (iscsi_chap_props_t));

				/* check authentication params */
				auth = kmem_zalloc(sizeof (iscsi_auth_props_t),
				    KM_SLEEP);
				if (persistent_auth_get(name, auth)) {
					rval = persistent_auth_clear(name);
					if (rval == B_TRUE) {
						rval = persistent_auth_set(
						    (char *)ihp->hba_name,
						    auth);
					}
					if (rval == B_FALSE) {
						rtn = EFAULT;
					}
				}
				kmem_free(auth, sizeof (iscsi_auth_props_t));
			}
			kmem_free(name, ISCSI_MAX_NAME_LEN);
		}
	} else if (ils->s_param == ISCSI_LOGIN_PARAM_INITIATOR_ALIAS) {
		(void) strlcpy((char *)ihp->hba_alias,
		    (char *)ils->s_value.v_name, ISCSI_MAX_NAME_LEN);
		ihp->hba_alias_length =
		    strlen((char *)ils->s_value.v_name);
		if (persist) {
			(void) persistent_alias_name_set(
			    (char *)ihp->hba_alias);
		}
	} else {
		/* switch login based if looking for initiator params */
		if (ils->s_oid == ihp->hba_oid) {
			/* initiator */
			params = &ihp->hba_params;
			name = ihp->hba_name;
			rtn = iscsi_set_param(params, ils);
		} else {
			/* session */
			name = iscsi_targetparam_get_name(ils->s_oid);
			if (name == NULL)
				rtn = EFAULT;

			if (persist && (rtn == 0)) {
				boolean_t		rval;
				persistent_param_t	*pp;

				pp = (persistent_param_t *)
				    kmem_zalloc(sizeof (*pp), KM_SLEEP);
				if (!persistent_param_get((char *)name, pp)) {
					iscsi_set_default_login_params(
					    &pp->p_params);
				}

				pp->p_bitmap |= (1 << ils->s_param);
				rtn = iscsi_set_param(&pp->p_params, ils);
				if (rtn == 0) {
					rval = persistent_param_set(
					    (char *)name, pp);
					if (rval == B_FALSE) {
						rtn = EFAULT;
					}
				}
				kmem_free(pp, sizeof (*pp));
			}

			/*
			 * Here may have multiple sessions with different
			 * tpgt values.  So it is needed to loop through
			 * the sessions and update all sessions.
			 */
			if (rtn == 0) {
				rw_enter(&ihp->hba_sess_list_rwlock, RW_READER);
				for (isp = ihp->hba_sess_list; isp;
				    isp = isp->sess_next) {
					if (iscsiboot_prop &&
					    isp->sess_boot &&
					    iscsi_chk_bootlun_mpxio(ihp)) {
						/*
						 * MPxIO is enabled so capable
						 * of changing. All changes
						 * will be applied later,
						 * after this function
						 */
						continue;
					}

					if (strncmp((char *)isp->sess_name,
					    (char *)name,
					    ISCSI_MAX_NAME_LEN) == 0) {
mutex_enter(&isp->sess_state_mutex);
iscsi_sess_state_machine(isp, ISCSI_SESS_EVENT_N7);
mutex_exit(&isp->sess_state_mutex);
					}
				}
				rw_exit(&ihp->hba_sess_list_rwlock);
			}

		} /* end of 'else' */

		if (params && persist && (rtn == 0)) {
			boolean_t		rval;
			persistent_param_t	*pp;

			pp = (persistent_param_t *)
			    kmem_zalloc(sizeof (*pp), KM_SLEEP);
			(void) persistent_param_get((char *)name, pp);
			pp->p_bitmap |= (1 << ils->s_param);
			bcopy(params, &pp->p_params, sizeof (*params));
			rval = persistent_param_set((char *)name, pp);
			if (rval == B_FALSE) {
				rtn = EFAULT;
			}
			kmem_free(pp, sizeof (*pp));
		}
		/*
		 * if initiator parameter set, modify all associated
		 * sessions that don't already have the parameter
		 * overriden
		 */
		if ((ils->s_oid == ihp->hba_oid) && (rtn == 0)) {
			ilg = (iscsi_param_get_t *)
			    kmem_alloc(sizeof (*ilg), KM_SLEEP);

			rw_enter(&ihp->hba_sess_list_rwlock, RW_READER);
			for (isp = ihp->hba_sess_list; isp;
			    isp = isp->sess_next) {
				ilg->g_param = ils->s_param;
				params = &isp->sess_params;
				if (iscsi_get_persisted_param(
				    isp->sess_name, ilg, params) != 0) {
					rtn = iscsi_set_param(params, ils);
					if (rtn != 0) {
						break;
					}
					if (iscsiboot_prop &&
					    isp->sess_boot &&
					    iscsi_chk_bootlun_mpxio(ihp)) {
						/*
						 * MPxIO is enabled so capable
						 * of changing. Changes will
						 * be applied later, right
						 * after this function
						 */
						continue;
					}

					/*
					 * Notify the session that
					 * the login parameters have
					 * changed.
					 */
					mutex_enter(&isp->
					    sess_state_mutex);
					iscsi_sess_state_machine(isp,
					    ISCSI_SESS_EVENT_N7);
					mutex_exit(&isp->
					    sess_state_mutex);
				}
			}
			kmem_free(ilg, sizeof (*ilg));
			rw_exit(&ihp->hba_sess_list_rwlock);
		}
	}
	return (rtn);
}
예제 #21
0
/*
 * cnex_find_chan_dip -- Find the dip of a device that is corresponding
 * 	to the specific channel. Below are the details on how the dip
 *	is derived.
 *
 *	- In the MD, the cfg-handle is expected to be unique for
 *	  virtual-device nodes that have the same 'name' property value.
 *	  This value is expected to be the same as that of "reg" property
 *	  of the corresponding OBP device node.
 *
 *	- The value of the 'name' property of a virtual-device node
 *	  in the MD is expected to be the same for the corresponding
 *	  OBP device node.
 *
 *	- Find the virtual-device node corresponding to a channel-endpoint
 *	  by walking backwards. Then obtain the values for the 'name' and
 *	  'cfg-handle' properties.
 *
 *	- Walk all the children of the cnex, find a matching dip which
 *	  has the same 'name' and 'reg' property values.
 *
 *	- The channels that have no corresponding device driver are
 *	  treated as if they  correspond to the cnex driver,
 *	  that is, return cnex dip for them. This means, the
 *	  cnex acts as an umbrella device driver. Note, this is
 *	  for 'intrstat' statistics purposes only. As a result of this,
 *	  the 'intrstat' shows cnex as the device that is servicing the
 *	  interrupts corresponding to these channels.
 *
 *	  For now, only one such case is known, that is, the channels that
 *	  are used by the "domain-services".
 */
static dev_info_t *
cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id,
    md_t *mdp, mde_cookie_t mde)
{
	int listsz;
	int num_nodes;
	int num_devs;
	uint64_t cfghdl;
	char *md_name;
	mde_cookie_t *listp;
	dev_info_t *cdip = NULL;

	num_nodes = md_node_count(mdp);
	ASSERT(num_nodes > 0);
	listsz = num_nodes * sizeof (mde_cookie_t);
	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);

	num_devs = md_scan_dag(mdp, mde, md_find_name(mdp, "virtual-device"),
	    md_find_name(mdp, "back"), listp);
	ASSERT(num_devs <= 1);
	if (num_devs <= 0) {
		DWARN("cnex_find_chan_dip:channel(0x%llx): "
		    "No virtual-device found\n", chan_id);
		goto fdip_exit;
	}
	if (md_get_prop_str(mdp, listp[0], "name", &md_name) != 0) {
		DWARN("cnex_find_chan_dip:channel(0x%llx): "
		    "name property not found\n", chan_id);
		goto fdip_exit;
	}

	D1("cnex_find_chan_dip: channel(0x%llx): virtual-device "
	    "name property value = %s\n", chan_id, md_name);

	if (md_get_prop_val(mdp, listp[0], "cfg-handle", &cfghdl) != 0) {
		DWARN("cnex_find_chan_dip:channel(0x%llx): virtual-device's "
		    "cfg-handle property not found\n", chan_id);
		goto fdip_exit;
	}

	D1("cnex_find_chan_dip:channel(0x%llx): virtual-device cfg-handle "
	    " property value = 0x%x\n", chan_id, cfghdl);

	for (cdip = ddi_get_child(dip); cdip != NULL;
	    cdip = ddi_get_next_sibling(cdip)) {

		int *cnex_regspec;
		uint32_t reglen;
		char	*dev_name;

		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip,
		    DDI_PROP_DONTPASS, "name",
		    &dev_name) != DDI_PROP_SUCCESS) {
			DWARN("cnex_find_chan_dip: name property not"
			    " found for dip(0x%p)\n", cdip);
			continue;
		}
		if (strcmp(md_name, dev_name) != 0) {
			ddi_prop_free(dev_name);
			continue;
		}
		ddi_prop_free(dev_name);
		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
		    DDI_PROP_DONTPASS, "reg",
		    &cnex_regspec, &reglen) != DDI_SUCCESS) {
			DWARN("cnex_find_chan_dip: reg property not"
			    " found for dip(0x%p)\n", cdip);
			continue;
		}
		if (*cnex_regspec == cfghdl) {
			D1("cnex_find_chan_dip:channel(0x%llx): found "
			    "dip(0x%p) drvname=%s\n", chan_id, cdip,
			    ddi_driver_name(cdip));
			ddi_prop_free(cnex_regspec);
			break;
		}
		ddi_prop_free(cnex_regspec);
	}

fdip_exit:
	if (cdip == NULL) {
		/*
		 * If a virtual-device node exists but no dip found,
		 * then for now print a DEBUG error message only.
		 */
		if (num_devs > 0) {
			DERR("cnex_find_chan_dip:channel(0x%llx): "
			    "No device found\n", chan_id);
		}

		/* If no dip was found, return cnex device's dip. */
		cdip = dip;
	}

	kmem_free(listp, listsz);
	D1("cnex_find_chan_dip:channel(0x%llx): returning dip=0x%p\n",
	    chan_id, cdip);
	return (cdip);
}
예제 #22
0
파일: spa_config.c 프로젝트: clefru/zfs
static int
spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
{
	size_t buflen;
	char *buf;
	vnode_t *vp;
	int oflags = FWRITE | FTRUNC | FCREAT | FOFFMAX;
	char *temp;
	int err;

	/*
	 * If the nvlist is empty (NULL), then remove the old cachefile.
	 */
	if (nvl == NULL) {
		err = vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE);
		return (err);
	}

	/*
	 * Pack the configuration into a buffer.
	 */
	buf = fnvlist_pack(nvl, &buflen);
	temp = kmem_zalloc(MAXPATHLEN, KM_SLEEP);

#if defined(__linux__) && defined(_KERNEL)
	/*
	 * Write the configuration to disk.  Due to the complexity involved
	 * in performing a rename from within the kernel the file is truncated
	 * and overwritten in place.  In the event of an error the file is
	 * unlinked to make sure we always have a consistent view of the data.
	 */
	err = vn_open(dp->scd_path, UIO_SYSSPACE, oflags, 0644, &vp, 0, 0);
	if (err == 0) {
		err = vn_rdwr(UIO_WRITE, vp, buf, buflen, 0,
		    UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, NULL);
		if (err == 0)
			err = VOP_FSYNC(vp, FSYNC, kcred, NULL);

		(void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL);

		if (err)
			(void) vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE);
	}
#else
	/*
	 * Write the configuration to disk.  We need to do the traditional
	 * 'write to temporary file, sync, move over original' to make sure we
	 * always have a consistent view of the data.
	 */
	(void) snprintf(temp, MAXPATHLEN, "%s.tmp", dp->scd_path);

	err = vn_open(temp, UIO_SYSSPACE, oflags, 0644, &vp, CRCREAT, 0);
	if (err == 0) {
		err = vn_rdwr(UIO_WRITE, vp, buf, buflen, 0, UIO_SYSSPACE,
		    0, RLIM64_INFINITY, kcred, NULL);
		if (err == 0)
			err = VOP_FSYNC(vp, FSYNC, kcred, NULL);
		if (err == 0)
			err = vn_rename(temp, dp->scd_path, UIO_SYSSPACE);
		(void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL);
	}

	(void) vn_remove(temp, UIO_SYSSPACE, RMFILE);
#endif

	fnvlist_pack_free(buf, buflen);
	kmem_free(temp, MAXPATHLEN);
	return (err);
}
예제 #23
0
/* PRIVATE, debugging */
int
xfs_qm_internalqcheck(
	xfs_mount_t	*mp)
{
	xfs_ino_t	lastino;
	int		done, count;
	int		i;
	xfs_dqtest_t	*d, *e;
	xfs_dqhash_t	*h1;
	int		error;

	lastino = 0;
	qmtest_hashmask = 32;
	count = 5;
	done = 0;
	qmtest_nfails = 0;

	if (! XFS_IS_QUOTA_ON(mp))
		return XFS_ERROR(ESRCH);

	xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
	XFS_bflush(mp->m_ddev_targp);
	xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
	XFS_bflush(mp->m_ddev_targp);

	mutex_lock(&qcheck_lock);
	/* There should be absolutely no quota activity while this
	   is going on. */
	qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
				    sizeof(xfs_dqhash_t), KM_SLEEP);
	qmtest_gdqtab = kmem_zalloc(qmtest_hashmask *
				    sizeof(xfs_dqhash_t), KM_SLEEP);
	do {
		/*
		 * Iterate thru all the inodes in the file system,
		 * adjusting the corresponding dquot counters
		 */
		if ((error = xfs_bulkstat(mp, &lastino, &count,
				 xfs_qm_internalqcheck_adjust, NULL,
				 0, NULL, BULKSTAT_FG_IGET, &done))) {
			break;
		}
	} while (! done);
	if (error) {
		cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
	}
	cmn_err(CE_DEBUG, "Checking results against system dquots");
	for (i = 0; i < qmtest_hashmask; i++) {
		h1 = &qmtest_udqtab[i];
		for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
			xfs_dqtest_cmp(d);
			e = (xfs_dqtest_t *) d->HL_NEXT;
			kmem_free(d);
			d = e;
		}
		h1 = &qmtest_gdqtab[i];
		for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
			xfs_dqtest_cmp(d);
			e = (xfs_dqtest_t *) d->HL_NEXT;
			kmem_free(d);
			d = e;
		}
	}

	if (qmtest_nfails) {
		cmn_err(CE_DEBUG, "******** quotacheck failed  ********");
		cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails);
	} else {
		cmn_err(CE_DEBUG, "******** quotacheck successful! ********");
	}
	kmem_free(qmtest_udqtab);
	kmem_free(qmtest_gdqtab);
	mutex_unlock(&qcheck_lock);
	return (qmtest_nfails);
}
예제 #24
0
파일: spa_config.c 프로젝트: clefru/zfs
/*
 * Generate the pool's configuration based on the current in-core state.
 *
 * We infer whether to generate a complete config or just one top-level config
 * based on whether vd is the root vdev.
 */
nvlist_t *
spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
{
	nvlist_t *config, *nvroot;
	vdev_t *rvd = spa->spa_root_vdev;
	unsigned long hostid = 0;
	boolean_t locked = B_FALSE;
	uint64_t split_guid;
	char *pool_name;
	int config_gen_flags = 0;

	if (vd == NULL) {
		vd = rvd;
		locked = B_TRUE;
		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
	}

	ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) ==
	    (SCL_CONFIG | SCL_STATE));

	/*
	 * If txg is -1, report the current value of spa->spa_config_txg.
	 */
	if (txg == -1ULL)
		txg = spa->spa_config_txg;

	/*
	 * Originally, users had to handle spa namespace collisions by either
	 * exporting the already imported pool or by specifying a new name for
	 * the pool with a conflicting name. In the case of root pools from
	 * virtual guests, neither approach to collision resolution is
	 * reasonable. This is addressed by extending the new name syntax with
	 * an option to specify that the new name is temporary. When specified,
	 * ZFS_IMPORT_TEMP_NAME will be set in spa->spa_import_flags to tell us
	 * to use the previous name, which we do below.
	 */
	if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) {
		VERIFY0(nvlist_lookup_string(spa->spa_config,
		    ZPOOL_CONFIG_POOL_NAME, &pool_name));
	} else
		pool_name = spa_name(spa);

	config = fnvlist_alloc();

	fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
	fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, pool_name);
	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa));
	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
	fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, spa->spa_errata);
	if (spa->spa_comment != NULL)
		fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT,
		    spa->spa_comment);

#ifdef	_KERNEL
	hostid = zone_get_hostid(NULL);
#else	/* _KERNEL */
	/*
	 * We're emulating the system's hostid in userland, so we can't use
	 * zone_get_hostid().
	 */
	(void) ddi_strtoul(hw_serial, NULL, 10, &hostid);
#endif	/* _KERNEL */
	if (hostid != 0)
		fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid);
	fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename);

	if (vd != rvd) {
		fnvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID,
		    vd->vdev_top->vdev_guid);
		fnvlist_add_uint64(config, ZPOOL_CONFIG_GUID,
		    vd->vdev_guid);
		if (vd->vdev_isspare)
			fnvlist_add_uint64(config,
			    ZPOOL_CONFIG_IS_SPARE, 1ULL);
		if (vd->vdev_islog)
			fnvlist_add_uint64(config,
			    ZPOOL_CONFIG_IS_LOG, 1ULL);
		vd = vd->vdev_top;		/* label contains top config */
	} else {
		/*
		 * Only add the (potentially large) split information
		 * in the mos config, and not in the vdev labels
		 */
		if (spa->spa_config_splitting != NULL)
			fnvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT,
			    spa->spa_config_splitting);

		fnvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS);

		config_gen_flags |= VDEV_CONFIG_MOS;
	}

	/*
	 * Add the top-level config.  We even add this on pools which
	 * don't support holes in the namespace.
	 */
	vdev_top_config_generate(spa, config);

	/*
	 * If we're splitting, record the original pool's guid.
	 */
	if (spa->spa_config_splitting != NULL &&
	    nvlist_lookup_uint64(spa->spa_config_splitting,
	    ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) {
		fnvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, split_guid);
	}

	nvroot = vdev_config_generate(spa, vd, getstats, config_gen_flags);
	fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot);
	nvlist_free(nvroot);

	/*
	 * Store what's necessary for reading the MOS in the label.
	 */
	fnvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
	    spa->spa_label_features);

	if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) {
		ddt_histogram_t *ddh;
		ddt_stat_t *dds;
		ddt_object_t *ddo;

		ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
		ddt_get_dedup_histogram(spa, ddh);
		fnvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_HISTOGRAM,
		    (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t));
		kmem_free(ddh, sizeof (ddt_histogram_t));

		ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP);
		ddt_get_dedup_object_stats(spa, ddo);
		fnvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_OBJ_STATS,
		    (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t));
		kmem_free(ddo, sizeof (ddt_object_t));

		dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP);
		ddt_get_dedup_stats(spa, dds);
		fnvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_STATS,
		    (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t));
		kmem_free(dds, sizeof (ddt_stat_t));
	}

	if (locked)
		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);

	return (config);
}
예제 #25
0
파일: ehci.c 프로젝트: andreiw/polaris
/*
 * ehci_hcdi_pipe_open:
 *
 * Member of HCD Ops structure and called during client specific pipe open
 * Add the pipe to the data structure representing the device and allocate
 * bandwidth for the pipe if it is a interrupt or isochronous endpoint.
 */
int
ehci_hcdi_pipe_open(
	usba_pipe_handle_data_t	*ph,
	usb_flags_t		flags)
{
	ehci_state_t		*ehcip = ehci_obtain_state(
				    ph->p_usba_device->usb_root_hub_dip);
	usb_ep_descr_t		*epdt = &ph->p_ep;
	int			rval, error = USB_SUCCESS;
	int			kmflag = (flags & USB_FLAGS_SLEEP) ?
				KM_SLEEP : KM_NOSLEEP;
	uchar_t			smask = 0;
	uchar_t			cmask = 0;
	uint_t			pnode = 0;
	ehci_pipe_private_t	*pp;

	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
	    "ehci_hcdi_pipe_open: addr = 0x%x, ep%d",
	    ph->p_usba_device->usb_addr,
	    epdt->bEndpointAddress & USB_EP_NUM_MASK);

	mutex_enter(&ehcip->ehci_int_mutex);
	rval = ehci_state_is_operational(ehcip);
	mutex_exit(&ehcip->ehci_int_mutex);

	if (rval != USB_SUCCESS) {

		return (rval);
	}

	/*
	 * Check and handle root hub pipe open.
	 */
	if (ph->p_usba_device->usb_addr == ROOT_HUB_ADDR) {

		mutex_enter(&ehcip->ehci_int_mutex);
		error = ehci_handle_root_hub_pipe_open(ph, flags);
		mutex_exit(&ehcip->ehci_int_mutex);

		return (error);
	}

	/*
	 * Opening of other pipes excluding root hub pipe are
	 * handled below. Check whether pipe is already opened.
	 */
	if (ph->p_hcd_private) {
		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
		    "ehci_hcdi_pipe_open: Pipe is already opened");

		return (USB_FAILURE);
	}

	/*
	 * A portion of the bandwidth is reserved for the non-periodic
	 * transfers, i.e control and bulk transfers in each of one
	 * millisecond frame period & usually it will be 20% of frame
	 * period. Hence there is no need to check for the available
	 * bandwidth before adding the control or bulk endpoints.
	 *
	 * There is a need to check for the available bandwidth before
	 * adding the periodic transfers, i.e interrupt & isochronous,
	 * since all these periodic transfers are guaranteed transfers.
	 * Usually 80% of the total frame time is reserved for periodic
	 * transfers.
	 */
	if (EHCI_PERIODIC_ENDPOINT(epdt)) {

		mutex_enter(&ehcip->ehci_int_mutex);
		mutex_enter(&ph->p_mutex);

		error = ehci_allocate_bandwidth(ehcip,
		    ph, &pnode, &smask, &cmask);

		if (error != USB_SUCCESS) {

			USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
			    "ehci_hcdi_pipe_open: Bandwidth allocation failed");

			mutex_exit(&ph->p_mutex);
			mutex_exit(&ehcip->ehci_int_mutex);

			return (error);
		}

		mutex_exit(&ph->p_mutex);
		mutex_exit(&ehcip->ehci_int_mutex);
	}

	/* Create the HCD pipe private structure */
	pp = kmem_zalloc(sizeof (ehci_pipe_private_t), kmflag);

	/*
	 * Return failure if ehci pipe private
	 * structure allocation fails.
	 */
	if (pp == NULL) {

		mutex_enter(&ehcip->ehci_int_mutex);

		/* Deallocate bandwidth */
		if (EHCI_PERIODIC_ENDPOINT(epdt)) {

			mutex_enter(&ph->p_mutex);
			ehci_deallocate_bandwidth(ehcip,
			    ph, pnode, smask, cmask);
			mutex_exit(&ph->p_mutex);
		}

		mutex_exit(&ehcip->ehci_int_mutex);

		return (USB_NO_RESOURCES);
	}

	mutex_enter(&ehcip->ehci_int_mutex);

	/* Save periodic nodes */
	pp->pp_pnode = pnode;

	/* Save start and complete split mask values */
	pp->pp_smask = smask;
	pp->pp_cmask = cmask;

	/* Create prototype for xfer completion condition variable */
	cv_init(&pp->pp_xfer_cmpl_cv, NULL, CV_DRIVER, NULL);

	/* Set the state of pipe as idle */
	pp->pp_state = EHCI_PIPE_STATE_IDLE;

	/* Store a pointer to the pipe handle */
	pp->pp_pipe_handle = ph;

	mutex_enter(&ph->p_mutex);

	/* Store the pointer in the pipe handle */
	ph->p_hcd_private = (usb_opaque_t)pp;

	/* Store a copy of the pipe policy */
	bcopy(&ph->p_policy, &pp->pp_policy, sizeof (usb_pipe_policy_t));

	mutex_exit(&ph->p_mutex);

	/* Allocate the host controller endpoint descriptor */
	pp->pp_qh = ehci_alloc_qh(ehcip, ph, NULL);

	/* Initialize the halting flag */
	pp->pp_halt_state = EHCI_HALT_STATE_FREE;

	/* Create prototype for halt completion condition variable */
	cv_init(&pp->pp_halt_cmpl_cv, NULL, CV_DRIVER, NULL);

	/* Isoch does not use QH, so ignore this */
	if ((pp->pp_qh == NULL) && !(EHCI_ISOC_ENDPOINT(epdt))) {
		ASSERT(pp->pp_qh == NULL);

		USB_DPRINTF_L2(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
		    "ehci_hcdi_pipe_open: QH allocation failed");

		mutex_enter(&ph->p_mutex);

		/* Deallocate bandwidth */
		if (EHCI_PERIODIC_ENDPOINT(epdt)) {

			ehci_deallocate_bandwidth(ehcip,
			    ph, pnode, smask, cmask);
		}

		/* Destroy the xfer completion condition variable */
		cv_destroy(&pp->pp_xfer_cmpl_cv);

		/*
		 * Deallocate the hcd private portion
		 * of the pipe handle.
		 */
		kmem_free(ph->p_hcd_private, sizeof (ehci_pipe_private_t));

		/*
		 * Set the private structure in the
		 * pipe handle equal to NULL.
		 */
		ph->p_hcd_private = NULL;

		mutex_exit(&ph->p_mutex);
		mutex_exit(&ehcip->ehci_int_mutex);

		return (USB_NO_RESOURCES);
	}

	/*
	 * Isoch does not use QH so no need to
	 * restore data toggle or insert QH
	 */
	if (!(EHCI_ISOC_ENDPOINT(epdt))) {
		/* Restore the data toggle information */
		ehci_restore_data_toggle(ehcip, ph);
	}

	/*
	 * Insert the endpoint onto the host controller's
	 * appropriate endpoint list. The host controller
	 * will not schedule this endpoint and will not have
	 * any QTD's to process.  It will also update the pipe count.
	 */
	ehci_insert_qh(ehcip, ph);

	USB_DPRINTF_L4(PRINT_MASK_HCDI, ehcip->ehci_log_hdl,
	    "ehci_hcdi_pipe_open: ph = 0x%p", (void *)ph);

	ehcip->ehci_open_pipe_count++;

	mutex_exit(&ehcip->ehci_int_mutex);

	return (USB_SUCCESS);
}
예제 #26
0
파일: dsl_dir.c 프로젝트: RJVB/zfs
int
dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
    const char *tail, void *tag, dsl_dir_t **ddp)
{
	dmu_buf_t *dbuf;
	dsl_dir_t *dd;
	int err;

	ASSERT(dsl_pool_config_held(dp));

	err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
	if (err != 0)
		return (err);
	dd = dmu_buf_get_user(dbuf);
#ifdef ZFS_DEBUG
	{
		dmu_object_info_t doi;
		dmu_object_info_from_db(dbuf, &doi);
		ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
		ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
	}
#endif
	if (dd == NULL) {
		dsl_dir_t *winner;

		dd = kmem_zalloc(sizeof (dsl_dir_t), KM_PUSHPAGE);
		dd->dd_object = ddobj;
		dd->dd_dbuf = dbuf;
		dd->dd_pool = dp;
		dd->dd_phys = dbuf->db_data;
		mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);

		list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
		    offsetof(dsl_prop_cb_record_t, cbr_node));

		dsl_dir_snap_cmtime_update(dd);

		if (dd->dd_phys->dd_parent_obj) {
			err = dsl_dir_hold_obj(dp, dd->dd_phys->dd_parent_obj,
			    NULL, dd, &dd->dd_parent);

			if (err != 0)
				goto errout;
			if (tail) {
#ifdef ZFS_DEBUG
				uint64_t foundobj;

				err = zap_lookup(dp->dp_meta_objset,
				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
				    tail, sizeof (foundobj), 1, &foundobj);
				ASSERT(err || foundobj == ddobj);
#endif
				(void) strlcpy(dd->dd_myname, tail, MAXNAMELEN);
			} else {
				err = zap_value_search(dp->dp_meta_objset,
				    dd->dd_parent->dd_phys->dd_child_dir_zapobj,
				    ddobj, 0, dd->dd_myname);
			}
			if (err != 0)
				goto errout;
		} else {
			(void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa), MAXNAMELEN);
		}
		if (dsl_dir_is_clone(dd)) {
			dmu_buf_t *origin_bonus;
			dsl_dataset_phys_t *origin_phys;

			/*
			 * We can't open the origin dataset, because
			 * that would require opening this dsl_dir.
			 * Just look at its phys directly instead.
			 */
			err = dmu_bonus_hold(dp->dp_meta_objset,
			    dd->dd_phys->dd_origin_obj, FTAG, &origin_bonus);
			if (err != 0)
				goto errout;
			origin_phys = origin_bonus->db_data;
			dd->dd_origin_txg =
			    origin_phys->ds_creation_txg;
			dmu_buf_rele(origin_bonus, FTAG);
		}
		winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
		    dsl_dir_evict);
		if (winner) {
			if (dd->dd_parent)
				dsl_dir_rele(dd->dd_parent, dd);
			mutex_destroy(&dd->dd_lock);
			kmem_free(dd, sizeof (dsl_dir_t));
			dd = winner;
		} else {
			spa_open_ref(dp->dp_spa, dd);
		}
	}

	/*
	 * The dsl_dir_t has both open-to-close and instantiate-to-evict
	 * holds on the spa.  We need the open-to-close holds because
	 * otherwise the spa_refcnt wouldn't change when we open a
	 * dir which the spa also has open, so we could incorrectly
	 * think it was OK to unload/export/destroy the pool.  We need
	 * the instantiate-to-evict hold because the dsl_dir_t has a
	 * pointer to the dd_pool, which has a pointer to the spa_t.
	 */
	spa_open_ref(dp->dp_spa, tag);
	ASSERT3P(dd->dd_pool, ==, dp);
	ASSERT3U(dd->dd_object, ==, ddobj);
	ASSERT3P(dd->dd_dbuf, ==, dbuf);
	*ddp = dd;

	return (0);

errout:
	if (dd->dd_parent)
		dsl_dir_rele(dd->dd_parent, dd);
	mutex_destroy(&dd->dd_lock);
	kmem_free(dd, sizeof (dsl_dir_t));
	dmu_buf_rele(dbuf, tag);
	return (err);
}
예제 #27
0
파일: fipe_pm.c 프로젝트: bahamas10/openzfs
/*
 * Initialize memory power management subsystem.
 * Note: This function should only be called from ATTACH.
 * Note: caller must ensure exclusive access to all fipe_xxx interfaces.
 */
int
fipe_init(dev_info_t *dip)
{
	size_t nsize;
	hrtime_t hrt;

	/* Initialize global control structure. */
	bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl));
	mutex_init(&fipe_gbl_ctrl.lock, NULL, MUTEX_DRIVER, NULL);

	/* Query power management policy from device property. */
	fipe_pm_policy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
	    FIPE_PROP_PM_POLICY, fipe_pm_policy);
	if (fipe_pm_policy < 0 || fipe_pm_policy >= FIPE_PM_POLICY_MAX) {
		cmn_err(CE_CONT,
		    "?fipe: invalid power management policy %d.\n",
		    fipe_pm_policy);
		fipe_pm_policy = FIPE_PM_POLICY_BALANCE;
	}
	fipe_profile_curr = &fipe_profiles[fipe_pm_policy];

	/*
	 * Compute unscaled hrtime value corresponding to FIPE_STAT_INTERVAL.
	 * (1 << 36) should be big enough here.
	 */
	hrt = 1ULL << 36;
	scalehrtime(&hrt);
	fipe_idle_ctrl.tick_interval = FIPE_STAT_INTERVAL * (1ULL << 36) / hrt;

	if (fipe_mc_init(dip) != 0) {
		cmn_err(CE_WARN, "!fipe: failed to initialize mc state.");
		goto out_mc_error;
	}
	if (fipe_ioat_init() != 0) {
		cmn_err(CE_NOTE, "!fipe: failed to initialize ioat state.");
		goto out_ioat_error;
	}

	/* Allocate per-CPU structure. */
	nsize = max_ncpus * sizeof (fipe_cpu_state_t);
	nsize += CPU_CACHE_COHERENCE_SIZE;
	fipe_gbl_ctrl.state_buf = kmem_zalloc(nsize, KM_SLEEP);
	fipe_gbl_ctrl.state_size = nsize;
	fipe_cpu_states = (fipe_cpu_state_t *)P2ROUNDUP(
	    (intptr_t)fipe_gbl_ctrl.state_buf, CPU_CACHE_COHERENCE_SIZE);

#ifdef	FIPE_KSTAT_SUPPORT
	fipe_gbl_ctrl.fipe_kstat = kstat_create("fipe", 0, "fipe-pm", "misc",
	    KSTAT_TYPE_NAMED, sizeof (fipe_kstat) / sizeof (kstat_named_t),
	    KSTAT_FLAG_VIRTUAL);
	if (fipe_gbl_ctrl.fipe_kstat == NULL) {
		cmn_err(CE_CONT, "?fipe: failed to create kstat object.\n");
	} else {
		fipe_gbl_ctrl.fipe_kstat->ks_lock = &fipe_gbl_ctrl.lock;
		fipe_gbl_ctrl.fipe_kstat->ks_data = &fipe_kstat;
		fipe_gbl_ctrl.fipe_kstat->ks_update = fipe_kstat_update;
		kstat_install(fipe_gbl_ctrl.fipe_kstat);
	}
#endif	/* FIPE_KSTAT_SUPPORT */

	return (0);

out_ioat_error:
	fipe_mc_fini();
out_mc_error:
	mutex_destroy(&fipe_gbl_ctrl.lock);
	bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl));

	return (-1);
}
예제 #28
0
파일: vdev_file.c 프로젝트: Zak-Adelman/zfs
static int
vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
    uint64_t *ashift)
{
	static vattr_t vattr;
	vdev_file_t *vf;
	struct vnode *vp;
	int error = 0;
    struct vnode *rootdir;

    dprintf("vdev_file_open %p\n", vd->vdev_tsd);

	/*
	 * We must have a pathname, and it must be absolute.
	 */
	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
		return (EINVAL);
	}

	/*
	 * Reopen the device if it's not currently open.  Otherwise,
	 * just update the physical size of the device.
	 */
#ifdef _KERNEL
	if (vd->vdev_tsd != NULL) {
		ASSERT(vd->vdev_reopening);
		vf = vd->vdev_tsd;
        vnode_getwithvid(vf->vf_vnode, vf->vf_vid);
        dprintf("skip to open\n");
		goto skip_open;
	}
#endif

	vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_PUSHPAGE);

	/*
	 * We always open the files from the root of the global zone, even if
	 * we're in a local zone.  If the user has gotten to this point, the
	 * administrator has already decided that the pool should be available
	 * to local zone users, so the underlying devices should be as well.
	 */
	ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/');

    /*
      vn_openat(char *pnamep,
      enum uio_seg seg,
      int filemode,
      int createmode,
      struct vnode **vpp,
      enum create crwhy,
      mode_t umask,
      struct vnode *startvp)
      extern int vn_openat(char *pnamep, enum uio_seg seg, int filemode,
      int createmode, struct vnode **vpp, enum create crwhy,
      mode_t umask, struct vnode *startvp);
    */

    rootdir = getrootdir();

    error = vn_openat(vd->vdev_path + 1,
                      UIO_SYSSPACE,
                      spa_mode(vd->vdev_spa) | FOFFMAX,
                      0,
                      &vp,
                      0,
                      0,
                      rootdir
                      );

	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
		return (error);
	}

	vf->vf_vnode = vp;
#ifdef _KERNEL
    vf->vf_vid = vnode_vid(vp);
    dprintf("assigning vid %d\n", vf->vf_vid);

	/*
	 * Make sure it's a regular file.
	 */
	if (!vnode_isreg(vp)) {
        vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
        VN_RELE(vf->vf_vnode);
        return (ENODEV);
    }

#endif

#if _KERNEL
skip_open:
	/*
	 * Determine the physical size of the file.
	 */
	vattr.va_mask = AT_SIZE;
    vn_lock(vf->vf_vnode, LK_SHARED | LK_RETRY);
	error = VOP_GETATTR(vf->vf_vnode, &vattr, 0, kcred, NULL);
    VN_UNLOCK(vf->vf_vnode);
#endif
	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
        VN_RELE(vf->vf_vnode);
		return (error);
	}

	*max_psize = *psize = vattr.va_size;
	*ashift = SPA_MINBLOCKSHIFT;
    VN_RELE(vf->vf_vnode);

	return (0);
}
예제 #29
0
파일: zfs_vfsops.c 프로젝트: dun/zfs
int
zfs_sb_create(const char *osname, zfs_sb_t **zsbp)
{
	objset_t *os;
	zfs_sb_t *zsb;
	uint64_t zval;
	int i, error;
	uint64_t sa_obj;

	zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);

	/*
	 * We claim to always be readonly so we can open snapshots;
	 * other ZPL code will prevent us from writing to snapshots.
	 */
	error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
	if (error) {
		kmem_free(zsb, sizeof (zfs_sb_t));
		return (error);
	}

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zsb->z_sb = NULL;
	zsb->z_parent = zsb;
	zsb->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
	zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
	zsb->z_os = os;

	error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
	if (error) {
		goto out;
	} else if (zsb->z_version > ZPL_VERSION) {
		error = SET_ERROR(ENOTSUP);
		goto out;
	}
	if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
		goto out;
	zsb->z_norm = (int)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
		goto out;
	zsb->z_utf8 = (zval != 0);

	if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
		goto out;
	zsb->z_case = (uint_t)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &zval)) != 0)
		goto out;
	zsb->z_acl_type = (uint_t)zval;

	/*
	 * Fold case on file systems that are always or sometimes case
	 * insensitive.
	 */
	if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
	    zsb->z_case == ZFS_CASE_MIXED)
		zsb->z_norm |= U8_TEXTPREP_TOUPPER;

	zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
	zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);

	if (zsb->z_use_sa) {
		/* should either have both of these objects or none */
		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
		    &sa_obj);
		if (error)
			goto out;

		error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval);
		if ((error == 0) && (zval == ZFS_XATTR_SA))
			zsb->z_xattr_sa = B_TRUE;
	} else {
		/*
		 * Pre SA versions file systems should never touch
		 * either the attribute registration or layout objects.
		 */
		sa_obj = 0;
	}

	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
	    &zsb->z_attr_table);
	if (error)
		goto out;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(os, zfs_sa_upgrade);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
	    &zsb->z_root);
	if (error)
		goto out;
	ASSERT(zsb->z_root != 0);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
	    &zsb->z_unlinkedobj);
	if (error)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
	    8, 1, &zsb->z_userquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
	    8, 1, &zsb->z_groupquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
	    &zsb->z_fuid_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
	    &zsb->z_shares_dir);
	if (error && error != ENOENT)
		goto out;

	mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zsb->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrm_init(&zsb->z_teardown_lock, B_FALSE);
	rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);

	zsb->z_hold_mtx = vmem_zalloc(sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ,
	    KM_SLEEP);
	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);

	avl_create(&zsb->z_ctldir_snaps, snapentry_compare,
	    sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
	mutex_init(&zsb->z_ctldir_lock, NULL, MUTEX_DEFAULT, NULL);

	*zsbp = zsb;
	return (0);

out:
	dmu_objset_disown(os, zsb);
	*zsbp = NULL;

	vmem_free(zsb->z_hold_mtx, sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ);
	kmem_free(zsb, sizeof (zfs_sb_t));
	return (error);
}
예제 #30
0
파일: dadk.c 프로젝트: apprisi/illumos-gate
/* ARGSUSED  */
int
dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
	cred_t *cred_p, int *rval_p)
{
	struct dadk *dadkp = (struct dadk *)objp;

	switch (cmd) {
	case DKIOCGETDEF:
		{
		struct buf	*bp;
		int		err, head;
		unsigned char	*secbuf;
		STRUCT_DECL(defect_header, adh);

		STRUCT_INIT(adh, flag & FMODELS);

		/*
		 * copyin header ....
		 * yields head number and buffer address
		 */
		if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
		    flag))
			return (EFAULT);
		head = STRUCT_FGET(adh, head);
		if (head < 0 || head >= dadkp->dad_phyg.g_head)
			return (ENXIO);
		secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
		if (!secbuf)
			return (ENOMEM);
		bp = getrbuf(KM_SLEEP);
		if (!bp) {
			kmem_free(secbuf, NBPSCTR);
			return (ENOMEM);
		}

		bp->b_edev = dev;
		bp->b_dev  = cmpdev(dev);
		bp->b_flags = B_BUSY;
		bp->b_resid = 0;
		bp->b_bcount = NBPSCTR;
		bp->b_un.b_addr = (caddr_t)secbuf;
		bp->b_blkno = head; /* I had to put it somwhere! */
		bp->b_forw = (struct buf *)dadkp;
		bp->b_back = (struct buf *)DCMD_GETDEF;

		mutex_enter(&dadkp->dad_cmd_mutex);
		dadkp->dad_cmd_count++;
		mutex_exit(&dadkp->dad_cmd_mutex);
		FLC_ENQUE(dadkp->dad_flcobjp, bp);
		err = biowait(bp);
		if (!err) {
			if (ddi_copyout((caddr_t)secbuf,
			    STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
				err = ENXIO;
		}
		kmem_free(secbuf, NBPSCTR);
		freerbuf(bp);
		return (err);
		}
	case DIOCTL_RWCMD:
		{
		struct dadkio_rwcmd *rwcmdp;
		int status, rw;

		/*
		 * copied in by cmdk and, if necessary, converted to the
		 * correct datamodel
		 */
		rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;

		/*
		 * handle the complex cases here; we pass these
		 * through to the driver, which will queue them and
		 * handle the requests asynchronously.  The simpler
		 * cases ,which can return immediately, fail here, and
		 * the request reverts to the dadk_ioctl routine, while
		 *  will reroute them directly to the ata driver.
		 */
		switch (rwcmdp->cmd) {
			case DADKIO_RWCMD_READ :
				/*FALLTHROUGH*/
			case DADKIO_RWCMD_WRITE:
				rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
				    B_WRITE : B_READ);
				status = dadk_dk_buf_setup(dadkp,
				    (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
				    UIO_SYSSPACE : UIO_USERSPACE), rw);
				return (status);
			default:
				return (EINVAL);
			}
		}
	case DKIOC_UPDATEFW:

		/*
		 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
		 * to protect the firmware update from malicious use
		 */
		if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
			return (EPERM);
		else
			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));

	case DKIOCFLUSHWRITECACHE:
		{
			struct buf *bp;
			int err = 0;
			struct dk_callback *dkc = (struct dk_callback *)arg;
			struct cmpkt *pktp;
			int is_sync = 1;

			mutex_enter(&dadkp->dad_mutex);
			if (dadkp->dad_noflush || !  dadkp->dad_wce) {
				err = dadkp->dad_noflush ? ENOTSUP : 0;
				mutex_exit(&dadkp->dad_mutex);
				/*
				 * If a callback was requested: a
				 * callback will always be done if the
				 * caller saw the DKIOCFLUSHWRITECACHE
				 * ioctl return 0, and never done if the
				 * caller saw the ioctl return an error.
				 */
				if ((flag & FKIOCTL) && dkc != NULL &&
				    dkc->dkc_callback != NULL) {
					(*dkc->dkc_callback)(dkc->dkc_cookie,
					    err);
					/*
					 * Did callback and reported error.
					 * Since we did a callback, ioctl
					 * should return 0.
					 */
					err = 0;
				}
				return (err);
			}
			mutex_exit(&dadkp->dad_mutex);

			bp = getrbuf(KM_SLEEP);

			bp->b_edev = dev;
			bp->b_dev  = cmpdev(dev);
			bp->b_flags = B_BUSY;
			bp->b_resid = 0;
			bp->b_bcount = 0;
			SET_BP_SEC(bp, 0);

			if ((flag & FKIOCTL) && dkc != NULL &&
			    dkc->dkc_callback != NULL) {
				struct dk_callback *dkc2 =
				    (struct dk_callback *)kmem_zalloc(
				    sizeof (struct dk_callback), KM_SLEEP);

				bcopy(dkc, dkc2, sizeof (*dkc2));
				bp->b_private = dkc2;
				bp->b_iodone = dadk_flushdone;
				is_sync = 0;
			}

			/*
			 * Setup command pkt
			 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
			 */
			pktp = dadk_pktprep(dadkp, NULL, bp,
			    dadk_iodone, DDI_DMA_SLEEP, NULL);

			pktp->cp_time = DADK_FLUSH_CACHE_TIME;

			*((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
			pktp->cp_byteleft = 0;
			pktp->cp_private = NULL;
			pktp->cp_secleft = 0;
			pktp->cp_srtsec = -1;
			pktp->cp_bytexfer = 0;

			CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);

			mutex_enter(&dadkp->dad_cmd_mutex);
			dadkp->dad_cmd_count++;
			mutex_exit(&dadkp->dad_cmd_mutex);
			FLC_ENQUE(dadkp->dad_flcobjp, bp);

			if (is_sync) {
				err = biowait(bp);
				freerbuf(bp);
			}
			return (err);
		}
	default:
		if (!dadkp->dad_rmb)
			return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
	}

	switch (cmd) {
	case CDROMSTOP:
		return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
		    0, DADK_SILENT));
	case CDROMSTART:
		return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
		    0, DADK_SILENT));
	case DKIOCLOCK:
		return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
	case DKIOCUNLOCK:
		return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
	case DKIOCEJECT:
	case CDROMEJECT:
		{
			int ret;

			if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
			    DADK_SILENT)) {
				return (ret);
			}
			if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
			    DADK_SILENT)) {
				return (ret);
			}
			mutex_enter(&dadkp->dad_mutex);
			dadkp->dad_iostate = DKIO_EJECTED;
			cv_broadcast(&dadkp->dad_state_cv);
			mutex_exit(&dadkp->dad_mutex);

			return (0);

		}
	default:
		return (ENOTTY);
	/*
	 * cdrom audio commands
	 */
	case CDROMPAUSE:
		cmd = DCMD_PAUSE;
		break;
	case CDROMRESUME:
		cmd = DCMD_RESUME;
		break;
	case CDROMPLAYMSF:
		cmd = DCMD_PLAYMSF;
		break;
	case CDROMPLAYTRKIND:
		cmd = DCMD_PLAYTRKIND;
		break;
	case CDROMREADTOCHDR:
		cmd = DCMD_READTOCHDR;
		break;
	case CDROMREADTOCENTRY:
		cmd = DCMD_READTOCENT;
		break;
	case CDROMVOLCTRL:
		cmd = DCMD_VOLCTRL;
		break;
	case CDROMSUBCHNL:
		cmd = DCMD_SUBCHNL;
		break;
	case CDROMREADMODE2:
		cmd = DCMD_READMODE2;
		break;
	case CDROMREADMODE1:
		cmd = DCMD_READMODE1;
		break;
	case CDROMREADOFFSET:
		cmd = DCMD_READOFFSET;
		break;
	}
	return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
}