Beispiel #1
0
/*
 * Must be serialized by caller.
 */
int workqueue_enqueue(struct workqueue *wq, struct io_u *io_u)
{
	struct submit_worker *sw;

	sw = get_submit_worker(wq);
	if (sw) {
		const enum fio_ddir ddir = acct_ddir(io_u);
		struct thread_data *parent = wq->td;

		if (ddir_rw(ddir)) {
			parent->io_issues[ddir]++;
			parent->io_issue_bytes[ddir] += io_u->xfer_buflen;
			parent->rate_io_issue_bytes[ddir] += io_u->xfer_buflen;
		}

		pthread_mutex_lock(&sw->lock);
		flist_add_tail(&io_u->verify_list, &sw->work_list);
		sw->seq = ++wq->work_seq;
		sw->flags &= ~SW_F_IDLE;
		pthread_mutex_unlock(&sw->lock);

		pthread_cond_signal(&sw->cond);
		return FIO_Q_QUEUED;
	}

	return FIO_Q_BUSY;
}
Beispiel #2
0
static void add_reply(uint64_t tag, struct flist_head *list)
{
	struct fio_net_cmd_reply *reply;

	reply = (struct fio_net_cmd_reply *) (uintptr_t) tag;
	flist_add_tail(&reply->list, list);
}
Beispiel #3
0
static void fio_server_add_fork_item(pid_t pid, struct flist_head *list)
{
	struct fio_fork_item *ffi;

	ffi = malloc(sizeof(*ffi));
	ffi->exitval = 0;
	ffi->signal = 0;
	ffi->exited = 0;
	ffi->pid = pid;
	flist_add_tail(&ffi->list, list);
}
Beispiel #4
0
static void trace_add_open_event(struct thread_data *td, int fileno)
{
	struct io_piece *ipo;

	ipo = calloc(1, sizeof(*ipo));
	init_ipo(ipo);

	ipo->ddir = DDIR_INVAL;
	ipo->fileno = fileno;
	ipo->file_action = FIO_LOG_OPEN_FILE;
	flist_add_tail(&ipo->list, &td->io_log_list);
}
Beispiel #5
0
static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
{
	struct io_piece *ipo;

	ipo = calloc(1, sizeof(*ipo));
	init_ipo(ipo);

	ipo->ddir = DDIR_INVAL;
	ipo->fileno = fileno;
	ipo->file_action = action;
	flist_add_tail(&ipo->list, &td->io_log_list);
}
Beispiel #6
0
/*
 * Must be serialized by caller. Returns true for queued, false for busy.
 */
void workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work)
{
	struct submit_worker *sw;

	sw = get_submit_worker(wq);
	assert(sw);

	pthread_mutex_lock(&sw->lock);
	flist_add_tail(&work->list, &sw->work_list);
	sw->seq = ++wq->work_seq;
	sw->flags &= ~SW_F_IDLE;
	pthread_mutex_unlock(&sw->lock);

	pthread_cond_signal(&sw->cond);
}
Beispiel #7
0
static void gopt_changed(struct gopt *gopt)
{
	struct gopt_job_view *gjv = gopt->gjv;

	if (gjv->in_job_switch)
		return;

	/*
	 * Add to changed list. This also prevents the option from being
	 * freed when the widget is destroyed.
	 */
	if (flist_empty(&gopt->changed_list)) {
		flist_add_tail(&gopt->changed_list, &gjv->changed_list);
		gopt_dialog_update_apply_button(gjv);
	}
}
Beispiel #8
0
/*
 * log a successful write, so we can unwind the log for verify
 */
void log_io_piece(struct thread_data *td, struct io_u *io_u)
{
    struct rb_node **p, *parent;
    struct io_piece *ipo, *__ipo;

    ipo = malloc(sizeof(struct io_piece));
    ipo->file = io_u->file;
    ipo->offset = io_u->offset;
    ipo->len = io_u->buflen;

    /*
     * We don't need to sort the entries, if:
     *
     *	Sequential writes, or
     *	Random writes that lay out the file as it goes along
     *
     * For both these cases, just reading back data in the order we
     * wrote it out is the fastest.
     */
    if (!td_random(td) || !td->o.overwrite) {
        INIT_FLIST_HEAD(&ipo->list);
        flist_add_tail(&ipo->list, &td->io_hist_list);
        return;
    }

    RB_CLEAR_NODE(&ipo->rb_node);
    p = &td->io_hist_tree.rb_node;
    parent = NULL;

    /*
     * Sort the entry into the verification list
     */
    while (*p) {
        parent = *p;

        __ipo = rb_entry(parent, struct io_piece, rb_node);
        if (ipo->offset <= __ipo->offset)
            p = &(*p)->rb_left;
        else
            p = &(*p)->rb_right;
    }

    rb_link_node(&ipo->rb_node, parent, p);
    rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
}
Beispiel #9
0
/*
 * Push IO verification to a separate thread
 */
int verify_io_u_async(struct thread_data *td, struct io_u *io_u)
{
	if (io_u->file)
		put_file_log(td, io_u->file);

	pthread_mutex_lock(&td->io_u_lock);

	if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
		td->cur_depth--;
		io_u->flags &= ~IO_U_F_IN_CUR_DEPTH;
	}
	flist_add_tail(&io_u->verify_list, &td->verify_list);
	io_u->flags |= IO_U_F_FREE_DEF;
	pthread_mutex_unlock(&td->io_u_lock);

	pthread_cond_signal(&td->verify_cond);
	return 0;
}
Beispiel #10
0
struct fio_file *add_file_hash(struct fio_file *f)
{
    struct fio_file *alias;

    if (fio_file_hashed(f))
        return NULL;

    INIT_FLIST_HEAD(&f->hash_list);

    fio_mutex_down(hash_lock);

    alias = __lookup_file_hash(f->file_name);
    if (!alias) {
        fio_file_set_hashed(f);
        flist_add_tail(&f->hash_list, &file_hash[hash(f->file_name)]);
    }

    fio_mutex_up(hash_lock);
    return alias;
}
Beispiel #11
0
/*
 * Push IO verification to a separate thread
 */
int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
{
	struct io_u *io_u = *io_u_ptr;

	pthread_mutex_lock(&td->io_u_lock);

	if (io_u->file)
		put_file_log(td, io_u->file);

	if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
		td->cur_depth--;
		io_u_clear(io_u, IO_U_F_IN_CUR_DEPTH);
	}
	flist_add_tail(&io_u->verify_list, &td->verify_list);
	*io_u_ptr = NULL;
	pthread_mutex_unlock(&td->io_u_lock);

	pthread_cond_signal(&td->verify_cond);
	return 0;
}
Beispiel #12
0
struct fio_file *add_file_hash(struct fio_file *f)
{
	struct fio_file *alias;

	if (f->flags & FIO_FILE_HASHED)
		return NULL;

	INIT_FLIST_HEAD(&f->hash_list);

	fio_mutex_down(hash_lock);

	alias = __lookup_file_hash(f->file_name);
	if (!alias) {
		f->flags |= FIO_FILE_HASHED;
		flist_add_tail(&f->hash_list, &file_hash[hash(f->file_name)]);
	}

	fio_mutex_up(hash_lock);
	return alias;
}
Beispiel #13
0
void requeue_io_u(struct thread_data *td, struct io_u **io_u)
{
	struct io_u *__io_u = *io_u;

	dprint(FD_IO, "requeue %p\n", __io_u);

	td_io_u_lock(td);

	__io_u->flags |= IO_U_F_FREE;
	if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir))
		td->io_issues[__io_u->ddir]--;

	__io_u->flags &= ~IO_U_F_FLIGHT;
	if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
		td->cur_depth--;
	flist_del(&__io_u->list);
	flist_add_tail(&__io_u->list, &td->io_u_requeues);
	td_io_u_unlock(td);
	*io_u = NULL;
}
Beispiel #14
0
struct io_piece *iohist_hash_add(struct thread_data *td, struct io_piece *ipo)
{
    struct flist_head *iohist_hash=td->iohist_hash;
	struct io_piece *_ipo;
    struct iohist_key key;

	if (io_piece_hashed(ipo))
		return NULL;

    set_iohist_key(&key, ipo->file, ipo->block);
	INIT_FLIST_HEAD(&ipo->hash_list);

	_ipo = __iohist_hash_find(td, &key);
	if (!_ipo) {
		io_piece_set_hashed(ipo);
		flist_add_tail(&ipo->hash_list, &iohist_hash[hash(&key)]);
	}

	return _ipo;
}
Beispiel #15
0
/*
 * Invoked from our compress helper thread, when logging would have exceeded
 * the specified memory limitation. Compresses the previously stored
 * entries.
 */
static int gz_work(struct tp_work *work)
{
	struct iolog_flush_data *data;
	struct iolog_compress *c;
	struct flist_head list;
	unsigned int seq;
	z_stream stream;
	size_t total = 0;
	int ret;

	INIT_FLIST_HEAD(&list);

	data = container_of(work, struct iolog_flush_data, work);

	stream.zalloc = Z_NULL;
	stream.zfree = Z_NULL;
	stream.opaque = Z_NULL;

	ret = deflateInit(&stream, Z_DEFAULT_COMPRESSION);
	if (ret != Z_OK) {
		log_err("fio: failed to init gz stream\n");
		return 0;
	}

	seq = ++data->log->chunk_seq;

	stream.next_in = (void *) data->samples;
	stream.avail_in = data->nr_samples * log_entry_sz(data->log);

	dprint(FD_COMPRESS, "deflate input size=%lu, seq=%u\n",
				(unsigned long) stream.avail_in, seq);
	do {
		c = get_new_chunk(seq);
		stream.avail_out = GZ_CHUNK;
		stream.next_out = c->buf;
		ret = deflate(&stream, Z_NO_FLUSH);
		if (ret < 0) {
			log_err("fio: deflate log (%d)\n", ret);
			free_chunk(c);
			goto err;
		}

		c->len = GZ_CHUNK - stream.avail_out;
		flist_add_tail(&c->list, &list);
		total += c->len;
	} while (stream.avail_in);

	stream.next_out = c->buf + c->len;
	stream.avail_out = GZ_CHUNK - c->len;

	ret = deflate(&stream, Z_FINISH);
	if (ret == Z_STREAM_END)
		c->len = GZ_CHUNK - stream.avail_out;
	else {
		do {
			c = get_new_chunk(seq);
			stream.avail_out = GZ_CHUNK;
			stream.next_out = c->buf;
			ret = deflate(&stream, Z_FINISH);
			c->len = GZ_CHUNK - stream.avail_out;
			total += c->len;
			flist_add_tail(&c->list, &list);
		} while (ret != Z_STREAM_END);
	}

	dprint(FD_COMPRESS, "deflated to size=%lu\n", (unsigned long) total);

	ret = deflateEnd(&stream);
	if (ret != Z_OK)
		log_err("fio: deflateEnd %d\n", ret);

	free(data->samples);

	if (!flist_empty(&list)) {
		pthread_mutex_lock(&data->log->chunk_lock);
		flist_splice_tail(&list, &data->log->chunk_list);
		pthread_mutex_unlock(&data->log->chunk_lock);
	}

	ret = 0;
done:
	if (work->wait) {
		work->done = 1;
		pthread_cond_signal(&work->cv);
	} else
		free(data);

	return ret;
err:
	while (!flist_empty(&list)) {
		c = flist_first_entry(list.next, struct iolog_compress, list);
		flist_del(&c->list);
		free_chunk(c);
	}
	ret = 1;
	goto done;
}
Beispiel #16
0
void queue_io_piece(struct thread_data *td, struct io_piece *ipo)
{
	flist_add_tail(&ipo->list, &td->io_log_list);
	td->total_io_size += ipo->len;
}
Beispiel #17
0
/*
 * log a successful write, so we can unwind the log for verify
 */
void log_io_piece(struct thread_data *td, struct io_u *io_u)
{
	struct rb_node **p, *parent;
	struct io_piece *ipo, *__ipo;

	ipo = malloc(sizeof(struct io_piece));
	init_ipo(ipo);
	ipo->file = io_u->file;
	ipo->offset = io_u->offset;
	ipo->len = io_u->buflen;
	ipo->numberio = io_u->numberio;
	ipo->flags = IP_F_IN_FLIGHT;

	io_u->ipo = ipo;

	if (io_u_should_trim(td, io_u)) {
		flist_add_tail(&ipo->trim_list, &td->trim_list);
		td->trim_entries++;
	}

	/*
	 * We don't need to sort the entries, if:
	 *
	 *	Sequential writes, or
	 *	Random writes that lay out the file as it goes along
	 *
	 * For both these cases, just reading back data in the order we
	 * wrote it out is the fastest.
	 *
	 * One exception is if we don't have a random map AND we are doing
	 * verifies, in that case we need to check for duplicate blocks and
	 * drop the old one, which we rely on the rb insert/lookup for
	 * handling.
	 */
	if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) &&
	      (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
		INIT_FLIST_HEAD(&ipo->list);
		flist_add_tail(&ipo->list, &td->io_hist_list);
		ipo->flags |= IP_F_ONLIST;
		td->io_hist_len++;
		return;
	}

	RB_CLEAR_NODE(&ipo->rb_node);

	/*
	 * Sort the entry into the verification list
	 */
restart:
	p = &td->io_hist_tree.rb_node;
	parent = NULL;
	while (*p) {
		parent = *p;

		__ipo = rb_entry(parent, struct io_piece, rb_node);
		if (ipo->file < __ipo->file)
			p = &(*p)->rb_left;
		else if (ipo->file > __ipo->file)
			p = &(*p)->rb_right;
		else if (ipo->offset < __ipo->offset)
			p = &(*p)->rb_left;
		else if (ipo->offset > __ipo->offset)
			p = &(*p)->rb_right;
		else {
			dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu",
				__ipo->offset, __ipo->len,
				ipo->offset, ipo->len);
			td->io_hist_len--;
			rb_erase(parent, &td->io_hist_tree);
			remove_trim_entry(td, __ipo);
			free(__ipo);
			goto restart;
		}
	}

	rb_link_node(&ipo->rb_node, parent, p);
	rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
	ipo->flags |= IP_F_ONRB;
	td->io_hist_len++;
}