Beispiel #1
0
static kmem_cache_priv_t *
splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
				int size, int align, int alloc, int count)
{
	kmem_cache_priv_t *kcp;

	kcp = vmem_zalloc(sizeof(kmem_cache_priv_t) +
			  count * sizeof(kmem_cache_data_t *), KM_SLEEP);
	if (!kcp)
		return NULL;

	kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
	kcp->kcp_file = file;
	kcp->kcp_cache = NULL;
	spin_lock_init(&kcp->kcp_lock);
	init_waitqueue_head(&kcp->kcp_ctl_waitq);
	init_waitqueue_head(&kcp->kcp_thr_waitq);
	kcp->kcp_flags = 0;
	kcp->kcp_kct_count = -1;
	kcp->kcp_size = size;
	kcp->kcp_align = align;
	kcp->kcp_count = 0;
	kcp->kcp_alloc = alloc;
	kcp->kcp_rc = 0;
	kcp->kcp_kcd_count = count;

	return kcp;
}
Beispiel #2
0
/*
 * Prepare the txg subsystem.
 */
void
txg_init(dsl_pool_t *dp, uint64_t txg)
{
	tx_state_t *tx = &dp->dp_tx;
	int c;
	bzero(tx, sizeof (tx_state_t));

	tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);

	for (c = 0; c < max_ncpus; c++) {
		int i;

		mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
		for (i = 0; i < TXG_SIZE; i++) {
			cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
			    NULL);
			list_create(&tx->tx_cpu[c].tc_callbacks[i],
			    sizeof (dmu_tx_callback_t),
			    offsetof(dmu_tx_callback_t, dcb_node));
		}
	}

	mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);

	cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
	cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
	cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
	cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
	cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);

	tx->tx_open_txg = txg;
}
Beispiel #3
0
static int
zpios_setup_run(run_args_t **run_args, zpios_cmd_t *kcmd, struct file *file)
{
	run_args_t *ra;
	int rc, size;

	size = sizeof (*ra) + kcmd->cmd_region_count * sizeof (zpios_region_t);

	ra = vmem_zalloc(size, KM_SLEEP);
	if (ra == NULL) {
		zpios_print(file, "Unable to vmem_zalloc() %d bytes "
			    "for regions\n", size);
		return (-ENOMEM);
	}

	*run_args = ra;
	strncpy(ra->pool, kcmd->cmd_pool, ZPIOS_NAME_SIZE - 1);
	strncpy(ra->pre, kcmd->cmd_pre, ZPIOS_PATH_SIZE - 1);
	strncpy(ra->post, kcmd->cmd_post, ZPIOS_PATH_SIZE - 1);
	strncpy(ra->log, kcmd->cmd_log, ZPIOS_PATH_SIZE - 1);
	ra->id			= kcmd->cmd_id;
	ra->chunk_size		= kcmd->cmd_chunk_size;
	ra->thread_count	= kcmd->cmd_thread_count;
	ra->region_count	= kcmd->cmd_region_count;
	ra->region_size		= kcmd->cmd_region_size;
	ra->offset		= kcmd->cmd_offset;
	ra->region_noise	= kcmd->cmd_region_noise;
	ra->chunk_noise		= kcmd->cmd_chunk_noise;
	ra->thread_delay	= kcmd->cmd_thread_delay;
	ra->flags		= kcmd->cmd_flags;
	ra->block_size		= kcmd->cmd_block_size;
	ra->stats.wr_data	= 0;
	ra->stats.wr_chunks	= 0;
	ra->stats.rd_data	= 0;
	ra->stats.rd_chunks	= 0;
	ra->region_next		= 0;
	ra->file		= file;
	mutex_init(&ra->lock_work, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&ra->lock_ctl, NULL, MUTEX_DEFAULT, NULL);

	(void) zpios_upcall(ra->pre, PHASE_PRE_RUN, ra, 0);

	rc = zpios_dmu_setup(ra);
	if (rc) {
		mutex_destroy(&ra->lock_ctl);
		mutex_destroy(&ra->lock_work);
		vmem_free(ra, size);
		*run_args = NULL;
	}

	return (rc);
}
Beispiel #4
0
static int
splat_kmem_test4(struct file *file, void *arg)
{
	void *ptr[SPLAT_VMEM_ALLOC_COUNT];
	int size = PAGE_SIZE;
	int i, j, count, rc = 0;

	/*
	 * Test up to 4x the maximum kmem_zalloc() size to ensure both
	 * the kmem_zalloc() and vmem_zalloc() call paths are used.
	 */
	while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
		count = 0;

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
			ptr[i] = vmem_zalloc(size, KM_SLEEP);
			if (ptr[i])
				count++;
		}

		/* Ensure buffer has been zero filled */
		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
			for (j = 0; j < size; j++) {
				if (((char *)ptr[i])[j] != '\0') {
					splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
						  "%d-byte allocation was "
						  "not zeroed\n", size);
					rc = -EFAULT;
				}
			}
		}

		for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
			if (ptr[i])
				vmem_free(ptr[i], size);

		splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
			   "%d byte allocations, %d/%d successful\n",
			   size, count, SPLAT_VMEM_ALLOC_COUNT);
		if (count != SPLAT_VMEM_ALLOC_COUNT)
			rc = -ENOMEM;

		size *= 2;
	}

	return rc;
}
Beispiel #5
0
static kmem_cache_thread_t *
splat_kmem_cache_test_kct_alloc(int id, int count)
{
	kmem_cache_thread_t *kct;

	ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
	kct = vmem_zalloc(sizeof(kmem_cache_thread_t) +
			  count * sizeof(kmem_cache_data_t *), KM_SLEEP);
	if (!kct)
		return NULL;

	spin_lock_init(&kct->kct_lock);
	kct->kct_cache = NULL;
	kct->kct_id = id;
	kct->kct_kcd_count = count;

	return kct;
}
Beispiel #6
0
int
zfs_sb_create(const char *osname, zfs_mntopts_t *zmo, zfs_sb_t **zsbp)
{
	objset_t *os;
	zfs_sb_t *zsb;
	uint64_t zval;
	int i, error;
	uint64_t sa_obj;

	zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);

	/*
	 * We claim to always be readonly so we can open snapshots;
	 * other ZPL code will prevent us from writing to snapshots.
	 */
	error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
	if (error) {
		kmem_free(zsb, sizeof (zfs_sb_t));
		return (error);
	}

	/*
	 * Optional temporary mount options, free'd in zfs_sb_free().
	 */
	zsb->z_mntopts = (zmo ? zmo : zfs_mntopts_alloc());

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zsb->z_sb = NULL;
	zsb->z_parent = zsb;
	zsb->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
	zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
	zsb->z_os = os;

	error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
	if (error) {
		goto out;
	} else if (zsb->z_version > ZPL_VERSION) {
		error = SET_ERROR(ENOTSUP);
		goto out;
	}
	if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
		goto out;
	zsb->z_norm = (int)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
		goto out;
	zsb->z_utf8 = (zval != 0);

	if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
		goto out;
	zsb->z_case = (uint_t)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &zval)) != 0)
		goto out;
	zsb->z_acl_type = (uint_t)zval;

	/*
	 * Fold case on file systems that are always or sometimes case
	 * insensitive.
	 */
	if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
	    zsb->z_case == ZFS_CASE_MIXED)
		zsb->z_norm |= U8_TEXTPREP_TOUPPER;

	zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
	zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);

	if (zsb->z_use_sa) {
		/* should either have both of these objects or none */
		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
		    &sa_obj);
		if (error)
			goto out;

		error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval);
		if ((error == 0) && (zval == ZFS_XATTR_SA))
			zsb->z_xattr_sa = B_TRUE;
	} else {
		/*
		 * Pre SA versions file systems should never touch
		 * either the attribute registration or layout objects.
		 */
		sa_obj = 0;
	}

	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
	    &zsb->z_attr_table);
	if (error)
		goto out;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(os, zfs_sa_upgrade);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
	    &zsb->z_root);
	if (error)
		goto out;
	ASSERT(zsb->z_root != 0);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
	    &zsb->z_unlinkedobj);
	if (error)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
	    8, 1, &zsb->z_userquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
	    8, 1, &zsb->z_groupquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
	    &zsb->z_fuid_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
	    &zsb->z_shares_dir);
	if (error && error != ENOENT)
		goto out;

	mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zsb->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrm_init(&zsb->z_teardown_lock, B_FALSE);
	rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);

	zsb->z_hold_mtx = vmem_zalloc(sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ,
	    KM_SLEEP);
	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);

	*zsbp = zsb;
	return (0);

out:
	dmu_objset_disown(os, zsb);
	*zsbp = NULL;

	vmem_free(zsb->z_hold_mtx, sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ);
	kmem_free(zsb, sizeof (zfs_sb_t));
	return (error);
}