Esempio n. 1
0
static void
zpios_remove_objset(run_args_t *run_args)
{
	zpios_time_t *t = &(run_args->stats.rm_time);
	zpios_region_t *region;
	char name[32];
	int rc = 0, i;

	(void)zpios_upcall(run_args->pre, PHASE_PRE_REMOVE, run_args, 0);
	t->start = zpios_timespec_now();

	(void)snprintf(name, 32, "%s/id_%d", run_args->pool, run_args->id);

	if (run_args->flags & DMU_REMOVE) {
		if (run_args->flags & DMU_FPP) {
			for (i = 0; i < run_args->region_count; i++) {
				region = &run_args->regions[i];
				rc = zpios_dmu_object_free(run_args,
							   region->obj.os,
							   region->obj.obj);
				if (rc)
					zpios_print(run_args->file, "Error "
						    "removing object %d, %d\n",
					            (int)region->obj.obj, rc);
			}
		} else {
			region = &run_args->regions[0];
			rc = zpios_dmu_object_free(run_args,
						   region->obj.os,
						   region->obj.obj);
			if (rc)
				zpios_print(run_args->file, "Error "
					    "removing object %d, %d\n",
				            (int)region->obj.obj, rc);
		}
	}

	dmu_objset_disown(run_args->os, zpios_tag);

	if (run_args->flags & DMU_REMOVE) {
		rc = dsl_destroy_head(name);
		if (rc)
			zpios_print(run_args->file, "Error dsl_destroy_head"
				    "(%s, ...) failed: %d\n", name, rc);
	}

	t->stop  = zpios_timespec_now();
	t->delta = zpios_timespec_sub(t->stop, t->start);
	(void)zpios_upcall(run_args->post, PHASE_POST_REMOVE, run_args, rc);
}
Esempio n. 2
0
File: pios.c Progetto: 64116278/zfs
static int
zpios_threads_run(run_args_t *run_args)
{
	struct task_struct *tsk, **tsks;
	thread_data_t *thr = NULL;
	zpios_time_t *tt = &(run_args->stats.total_time);
	zpios_time_t *tw = &(run_args->stats.wr_time);
	zpios_time_t *tr = &(run_args->stats.rd_time);
	int i, rc = 0, tc = run_args->thread_count;

	tsks = kmem_zalloc(sizeof (struct task_struct *) * tc, KM_SLEEP);
	if (tsks == NULL) {
		rc = -ENOMEM;
		goto cleanup2;
	}

	run_args->threads = kmem_zalloc(sizeof (thread_data_t *)*tc, KM_SLEEP);
	if (run_args->threads == NULL) {
		rc = -ENOMEM;
		goto cleanup;
	}

	init_waitqueue_head(&run_args->waitq);
	run_args->threads_done = 0;

	/* Create all the needed threads which will sleep until awoken */
	for (i = 0; i < tc; i++) {
		thr = kmem_zalloc(sizeof (thread_data_t), KM_SLEEP);
		if (thr == NULL) {
			rc = -ENOMEM;
			goto taskerr;
		}

		thr->thread_no = i;
		thr->run_args = run_args;
		thr->rc = 0;
		mutex_init(&thr->lock, NULL, MUTEX_DEFAULT, NULL);
		run_args->threads[i] = thr;

		tsk = kthread_create(zpios_thread_main, (void *)thr,
		    "%s/%d", "zpios_io", i);
		if (IS_ERR(tsk)) {
			rc = -EINVAL;
			goto taskerr;
		}

		tsks[i] = tsk;
	}

	tt->start = zpios_timespec_now();

	/* Wake up all threads for write phase */
	(void) zpios_upcall(run_args->pre, PHASE_PRE_WRITE, run_args, 0);
	for (i = 0; i < tc; i++)
		wake_up_process(tsks[i]);

	/* Wait for write phase to complete */
	tw->start = zpios_timespec_now();
	wait_event(run_args->waitq, zpios_thread_done(run_args));
	tw->stop = zpios_timespec_now();
	(void) zpios_upcall(run_args->post, PHASE_POST_WRITE, run_args, rc);

	for (i = 0; i < tc; i++) {
		thr = run_args->threads[i];

		mutex_enter(&thr->lock);

		if (!rc && thr->rc)
			rc = thr->rc;

		run_args->stats.wr_data += thr->stats.wr_data;
		run_args->stats.wr_chunks += thr->stats.wr_chunks;
		mutex_exit(&thr->lock);
	}

	if (rc) {
		/* Wake up all threads and tell them to exit */
		for (i = 0; i < tc; i++) {
			mutex_enter(&thr->lock);
			thr->rc = rc;
			mutex_exit(&thr->lock);

			wake_up_process(tsks[i]);
		}
		goto out;
	}

	mutex_enter(&run_args->lock_ctl);
	ASSERT(run_args->threads_done == run_args->thread_count);
	run_args->threads_done = 0;
	mutex_exit(&run_args->lock_ctl);

	/* Wake up all threads for read phase */
	(void) zpios_upcall(run_args->pre, PHASE_PRE_READ, run_args, 0);
	for (i = 0; i < tc; i++)
		wake_up_process(tsks[i]);

	/* Wait for read phase to complete */
	tr->start = zpios_timespec_now();
	wait_event(run_args->waitq, zpios_thread_done(run_args));
	tr->stop = zpios_timespec_now();
	(void) zpios_upcall(run_args->post, PHASE_POST_READ, run_args, rc);

	for (i = 0; i < tc; i++) {
		thr = run_args->threads[i];

		mutex_enter(&thr->lock);

		if (!rc && thr->rc)
			rc = thr->rc;

		run_args->stats.rd_data += thr->stats.rd_data;
		run_args->stats.rd_chunks += thr->stats.rd_chunks;
		mutex_exit(&thr->lock);
	}
out:
	tt->stop  = zpios_timespec_now();
	tt->delta = zpios_timespec_sub(tt->stop, tt->start);
	tw->delta = zpios_timespec_sub(tw->stop, tw->start);
	tr->delta = zpios_timespec_sub(tr->stop, tr->start);

cleanup:
	kmem_free(tsks, sizeof (struct task_struct *) * tc);
cleanup2:
	/* Returns first encountered thread error (if any) */
	return (rc);

taskerr:
	/* Destroy all threads that were created successfully */
	for (i = 0; i < tc; i++)
		if (tsks[i] != NULL)
			(void) kthread_stop(tsks[i]);

	goto cleanup;
}
Esempio n. 3
0
File: pios.c Progetto: 64116278/zfs
static int
zpios_thread_main(void *data)
{
	thread_data_t *thr = (thread_data_t *)data;
	run_args_t *run_args = thr->run_args;
	zpios_time_t t;
	dmu_obj_t obj;
	__u64 offset;
	__u32 chunk_size;
	zpios_region_t *region;
	char *buf;
	unsigned int random_int;
	int chunk_noise = run_args->chunk_noise;
	int chunk_noise_tmp = 0;
	int thread_delay = run_args->thread_delay;
	int thread_delay_tmp = 0;
	int i, rc = 0;

	if (chunk_noise) {
		get_random_bytes(&random_int, sizeof (unsigned int));
		chunk_noise_tmp = (random_int % (chunk_noise * 2))-chunk_noise;
	}

	/*
	 * It's OK to vmem_alloc() this memory because it will be copied
	 * in to the slab and pointers to the slab copy will be setup in
	 * the bio when the IO is submitted.  This of course is not ideal
	 * since we want a zero-copy IO path if possible.  It would be nice
	 * to have direct access to those slab entries.
	 */
	chunk_size = run_args->chunk_size + chunk_noise_tmp;
	buf = (char *)vmem_alloc(chunk_size, KM_SLEEP);
	ASSERT(buf);

	/* Trivial data verification pattern for now. */
	if (run_args->flags & DMU_VERIFY)
		memset(buf, 'z', chunk_size);

	/* Write phase */
	mutex_enter(&thr->lock);
	thr->stats.wr_time.start = zpios_timespec_now();
	mutex_exit(&thr->lock);

	while (zpios_get_work_item(run_args, &obj, &offset,
	    &chunk_size, &region, DMU_WRITE)) {
		if (thread_delay) {
			get_random_bytes(&random_int, sizeof (unsigned int));
			thread_delay_tmp = random_int % thread_delay;
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_timeout(thread_delay_tmp); /* In jiffies */
		}

		t.start = zpios_timespec_now();
		rc = zpios_dmu_write(run_args, obj.os, obj.obj,
		    offset, chunk_size, buf);
		t.stop  = zpios_timespec_now();
		t.delta = zpios_timespec_sub(t.stop, t.start);

		if (rc) {
			zpios_print(run_args->file, "IO error while doing "
				    "dmu_write(): %d\n", rc);
			break;
		}

		mutex_enter(&thr->lock);
		thr->stats.wr_data += chunk_size;
		thr->stats.wr_chunks++;
		thr->stats.wr_time.delta = zpios_timespec_add(
		    thr->stats.wr_time.delta, t.delta);
		mutex_exit(&thr->lock);

		mutex_enter(&region->lock);
		region->stats.wr_data += chunk_size;
		region->stats.wr_chunks++;
		region->stats.wr_time.delta = zpios_timespec_add(
		    region->stats.wr_time.delta, t.delta);

		/* First time region was accessed */
		if (region->init_offset == offset)
			region->stats.wr_time.start = t.start;

		mutex_exit(&region->lock);
	}

	mutex_enter(&run_args->lock_ctl);
	run_args->threads_done++;
	mutex_exit(&run_args->lock_ctl);

	mutex_enter(&thr->lock);
	thr->rc = rc;
	thr->stats.wr_time.stop = zpios_timespec_now();
	mutex_exit(&thr->lock);
	wake_up(&run_args->waitq);

	set_current_state(TASK_UNINTERRUPTIBLE);
	schedule();

	/* Check if we should exit */
	mutex_enter(&thr->lock);
	rc = thr->rc;
	mutex_exit(&thr->lock);
	if (rc)
		goto out;

	/* Read phase */
	mutex_enter(&thr->lock);
	thr->stats.rd_time.start = zpios_timespec_now();
	mutex_exit(&thr->lock);

	while (zpios_get_work_item(run_args, &obj, &offset,
	    &chunk_size, &region, DMU_READ)) {
		if (thread_delay) {
			get_random_bytes(&random_int, sizeof (unsigned int));
			thread_delay_tmp = random_int % thread_delay;
			set_current_state(TASK_UNINTERRUPTIBLE);
			schedule_timeout(thread_delay_tmp); /* In jiffies */
		}

		if (run_args->flags & DMU_VERIFY)
			memset(buf, 0, chunk_size);

		t.start = zpios_timespec_now();
		rc = zpios_dmu_read(run_args, obj.os, obj.obj,
				    offset, chunk_size, buf);
		t.stop  = zpios_timespec_now();
		t.delta = zpios_timespec_sub(t.stop, t.start);

		if (rc) {
			zpios_print(run_args->file, "IO error while doing "
				    "dmu_read(): %d\n", rc);
			break;
		}

		/* Trivial data verification, expensive! */
		if (run_args->flags & DMU_VERIFY) {
			for (i = 0; i < chunk_size; i++) {
				if (buf[i] != 'z') {
					zpios_print(run_args->file,
					    "IO verify error: %d/%d/%d\n",
					    (int)obj.obj, (int)offset,
					    (int)chunk_size);
					break;
				}
			}
		}

		mutex_enter(&thr->lock);
		thr->stats.rd_data += chunk_size;
		thr->stats.rd_chunks++;
		thr->stats.rd_time.delta = zpios_timespec_add(
		    thr->stats.rd_time.delta, t.delta);
		mutex_exit(&thr->lock);

		mutex_enter(&region->lock);
		region->stats.rd_data += chunk_size;
		region->stats.rd_chunks++;
		region->stats.rd_time.delta = zpios_timespec_add(
		    region->stats.rd_time.delta, t.delta);

		/* First time region was accessed */
		if (region->init_offset == offset)
			region->stats.rd_time.start = t.start;

		mutex_exit(&region->lock);
	}

	mutex_enter(&run_args->lock_ctl);
	run_args->threads_done++;
	mutex_exit(&run_args->lock_ctl);

	mutex_enter(&thr->lock);
	thr->rc = rc;
	thr->stats.rd_time.stop = zpios_timespec_now();
	mutex_exit(&thr->lock);
	wake_up(&run_args->waitq);

out:
	vmem_free(buf, chunk_size);
	do_exit(0);

	return (rc); /* Unreachable, due to do_exit() */
}
Esempio n. 4
0
File: pios.c Progetto: 64116278/zfs
static int
zpios_get_work_item(run_args_t *run_args, dmu_obj_t *obj, __u64 *offset,
		    __u32 *chunk_size, zpios_region_t **region, __u32 flags)
{
	int i, j, count = 0;
	unsigned int random_int;

	get_random_bytes(&random_int, sizeof (unsigned int));

	mutex_enter(&run_args->lock_work);
	i = run_args->region_next;

	/*
	 * XXX: I don't much care for this chunk selection mechansim
	 * there's the potential to burn a lot of time here doing nothing
	 * useful while holding the global lock.  This could give some
	 * misleading performance results.  I'll fix it latter.
	 */
	while (count < run_args->region_count) {
		__u64 *rw_offset;
		zpios_time_t *rw_time;

		j = i % run_args->region_count;
		*region = &(run_args->regions[j]);

		if (flags & DMU_WRITE) {
			rw_offset = &((*region)->wr_offset);
			rw_time = &((*region)->stats.wr_time);
		} else {
			rw_offset = &((*region)->rd_offset);
			rw_time = &((*region)->stats.rd_time);
		}

		/* test if region is fully written */
		if (*rw_offset + *chunk_size > (*region)->max_offset) {
			i++;
			count++;

			if (unlikely(rw_time->stop.ts_sec == 0) &&
			    unlikely(rw_time->stop.ts_nsec == 0))
				rw_time->stop = zpios_timespec_now();

			continue;
		}

		*offset = *rw_offset;
		*obj = (*region)->obj;
		*rw_offset += *chunk_size;

		/* update ctl structure */
		if (run_args->region_noise) {
			get_random_bytes(&random_int, sizeof (unsigned int));
			run_args->region_next +=
			    random_int % run_args->region_noise;
		} else {
			run_args->region_next++;
		}

		mutex_exit(&run_args->lock_work);
		return (1);
	}

	/* nothing left to do */
	mutex_exit(&run_args->lock_work);

	return (0);
}
Esempio n. 5
0
File: pios.c Progetto: 64116278/zfs
static int
zpios_dmu_setup(run_args_t *run_args)
{
	zpios_time_t *t = &(run_args->stats.cr_time);
	objset_t *os;
	char name[32];
	uint64_t obj = 0ULL;
	int i, rc = 0, rc2;

	(void) zpios_upcall(run_args->pre, PHASE_PRE_CREATE, run_args, 0);
	t->start = zpios_timespec_now();

	(void) snprintf(name, 32, "%s/id_%d", run_args->pool, run_args->id);
	rc = dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL);
	if (rc) {
		zpios_print(run_args->file, "Error dmu_objset_create(%s, ...) "
			    "failed: %d\n", name, rc);
		goto out;
	}

	rc = dmu_objset_own(name, DMU_OST_OTHER, 0, zpios_tag, &os);
	if (rc) {
		zpios_print(run_args->file, "Error dmu_objset_own(%s, ...) "
			    "failed: %d\n", name, rc);
		goto out_destroy;
	}

	if (!(run_args->flags & DMU_FPP)) {
		obj = zpios_dmu_object_create(run_args, os);
		if (obj == 0) {
			rc = -EBADF;
			zpios_print(run_args->file, "Error zpios_dmu_"
				    "object_create() failed, %d\n", rc);
			goto out_destroy;
		}
	}

	for (i = 0; i < run_args->region_count; i++) {
		zpios_region_t *region;

		region = &run_args->regions[i];
		mutex_init(&region->lock, NULL, MUTEX_DEFAULT, NULL);

		if (run_args->flags & DMU_FPP) {
			/* File per process */
			region->obj.os  = os;
			region->obj.obj = zpios_dmu_object_create(run_args, os);
			ASSERT(region->obj.obj > 0); /* XXX - Handle this */
			region->wr_offset   = run_args->offset;
			region->rd_offset   = run_args->offset;
			region->init_offset = run_args->offset;
			region->max_offset  = run_args->offset +
			    run_args->region_size;
		} else {
			/* Single shared file */
			region->obj.os  = os;
			region->obj.obj = obj;
			region->wr_offset   = run_args->offset * i;
			region->rd_offset   = run_args->offset * i;
			region->init_offset = run_args->offset * i;
			region->max_offset  = run_args->offset *
			    i + run_args->region_size;
		}
	}

	run_args->os = os;
out_destroy:
	if (rc) {
		rc2 = dsl_destroy_head(name);
		if (rc2)
			zpios_print(run_args->file, "Error dsl_destroy_head"
				    "(%s, ...) failed: %d\n", name, rc2);
	}
out:
	t->stop  = zpios_timespec_now();
	t->delta = zpios_timespec_sub(t->stop, t->start);
	(void) zpios_upcall(run_args->post, PHASE_POST_CREATE, run_args, rc);

	return (rc);
}