Esempio n. 1
0
static int async_io(struct dm_io_client *client, unsigned int num_regions,
		    struct dm_io_region *where, int rw, struct dpages *dp,
		    io_notify_fn fn, void *context)
{
	struct io *io;

	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
		WARN_ON(1);
		fn(1, context);
		return -EIO;
	}

	io = mempool_alloc(client->pool, GFP_NOIO);
	io->error_bits = 0;
	atomic_set(&io->count, 1); 
	io->sleeper = NULL;
	io->client = client;
	io->callback = fn;
	io->context = context;

	io->vma_invalidate_address = dp->vma_invalidate_address;
	io->vma_invalidate_size = dp->vma_invalidate_size;

	dispatch_io(rw, num_regions, where, dp, io, 0);
	return 0;
}
Esempio n. 2
0
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
		   struct dm_io_region *where, int rw, struct dpages *dp,
		   unsigned long *error_bits)
{
	struct io *io;
	struct sync_io sio;

	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
		WARN_ON(1);
		return -EIO;
	}

	init_completion(&sio.wait);

	io = mempool_alloc(client->pool, GFP_NOIO);
	io->error_bits = 0;
	atomic_set(&io->count, 1); /* see dispatch_io() */
	io->client = client;
	io->callback = sync_io_complete;
	io->context = &sio;

	io->vma_invalidate_address = dp->vma_invalidate_address;
	io->vma_invalidate_size = dp->vma_invalidate_size;

	dispatch_io(rw, num_regions, where, dp, io, 1);

	wait_for_completion_io(&sio.wait);

	if (error_bits)
		*error_bits = sio.error_bits;

	return sio.error_bits ? -EIO : 0;
}
Esempio n. 3
0
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
		   struct dm_io_region *where, int rw, struct dpages *dp,
		   unsigned long *error_bits)
{
	/*
	 * gcc <= 4.3 can't do the alignment for stack variables, so we must
	 * align it on our own.
	 * volatile prevents the optimizer from removing or reusing
	 * "io_" field from the stack frame (allowed in ANSI C).
	 */
	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));

	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
		WARN_ON(1);
		return -EIO;
	}

retry:
	io->error_bits = 0;
	io->eopnotsupp_bits = 0;
	atomic_set(&io->count, 1); /* see dispatch_io() */
	io->sleeper = current;
	io->client = client;

	dispatch_io(rw, num_regions, where, dp, io, 1);

	while (1) {
		set_current_state(TASK_UNINTERRUPTIBLE);

		if (!atomic_read(&io->count))
			break;

		io_schedule();
	}
	set_current_state(TASK_RUNNING);

	if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
		rw &= ~(1 << BIO_RW_BARRIER);
		goto retry;
	}

	if (error_bits)
		*error_bits = io->error_bits;

	return io->error_bits ? -EIO : 0;
}
Esempio n. 4
0
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
		   struct dm_io_region *where, int rw, struct dpages *dp,
		   unsigned long *error_bits)
{
	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));

	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
		WARN_ON(1);
		return -EIO;
	}

	io->error_bits = 0;
	atomic_set(&io->count, 1); 
	io->sleeper = current;
	io->client = client;

	io->vma_invalidate_address = dp->vma_invalidate_address;
	io->vma_invalidate_size = dp->vma_invalidate_size;

	dispatch_io(rw, num_regions, where, dp, io, 1);

	while (1) {
		set_current_state(TASK_UNINTERRUPTIBLE);

		if (!atomic_read(&io->count))
			break;

		io_schedule();
	}
	set_current_state(TASK_RUNNING);

	if (error_bits)
		*error_bits = io->error_bits;

	return io->error_bits ? -EIO : 0;
}
Esempio n. 5
0
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
		   struct dm_io_region *where, int rw, struct dpages *dp,
		   unsigned long *error_bits)
{
	/*
	 * gcc <= 4.3 can't do the alignment for stack variables, so we must
	 * align it on our own.
	 * volatile prevents the optimizer from removing or reusing
	 * "io_" field from the stack frame (allowed in ANSI C).
	 */
	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
	DECLARE_COMPLETION_ONSTACK(wait);

	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
		WARN_ON(1);
		return -EIO;
	}

	io->error_bits = 0;
	atomic_set(&io->count, 1); /* see dispatch_io() */
	io->wait = &wait;
	io->client = client;

	io->vma_invalidate_address = dp->vma_invalidate_address;
	io->vma_invalidate_size = dp->vma_invalidate_size;

	dispatch_io(rw, num_regions, where, dp, io, 1);

	wait_for_completion_io(&wait);

	if (error_bits)
		*error_bits = io->error_bits;

	return io->error_bits ? -EIO : 0;
}