Exemple #1
0
static void laio_cancel(BlockDriverAIOCB *blockacb)
{
    struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
    struct io_event event;
    int ret;

    if (laiocb->ret != -EINPROGRESS)
        return;

    /*
     * Note that as of Linux 2.6.31 neither the block device code nor any
     * filesystem implements cancellation of AIO request.
     * Thus the polling loop below is the normal code path.
     */
    ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
    if (ret == 0) {
        laiocb->ret = -ECANCELED;
        return;
    }

    /*
     * We have to wait for the iocb to finish.
     *
     * The only way to get the iocb status update is by polling the io context.
     * We might be able to do this slightly more optimal by removing the
     * O_NONBLOCK flag.
     */
    while (laiocb->ret == -EINPROGRESS)
        qemu_laio_completion_cb(laiocb->ctx);
}
int compat0_1_io_cancel(io_context_t ctx, struct iocb *iocb)
{
	struct io_event event;

	/* FIXME: the old ABI would return the event on the completion queue */
	return io_cancel(ctx, iocb, &event);
}
Exemple #3
0
static int
sector_io(struct sbd_context *st, int sector, void *data, int rw)
{
	struct timespec	timeout;
	struct io_event event;
	struct iocb	*ios[1] = { &st->io };
	long		r;

	timeout.tv_sec  = timeout_io;
	timeout.tv_nsec = 0;

	memset(&st->io, 0, sizeof(struct iocb));
	if (rw) {
		io_prep_pwrite(&st->io, st->devfd, data, sector_size, sector_size * sector);
	} else {
		io_prep_pread(&st->io, st->devfd, data, sector_size, sector_size * sector);
	}

	if (io_submit(st->ioctx, 1, ios) != 1) {
		cl_log(LOG_ERR, "Failed to submit IO request! (rw=%d)", rw);
		return -1;
	}

	errno = 0;
	r = io_getevents(st->ioctx, 1L, 1L, &event, &timeout);

	if (r < 0 ) {
		cl_log(LOG_ERR, "Failed to retrieve IO events (rw=%d)", rw);
		return -1;
	} else if (r < 1L) {
		cl_log(LOG_INFO, "Cancelling IO request due to timeout (rw=%d)", rw);
		r = io_cancel(st->ioctx, ios[0], &event);
		if (r) {
			DBGLOG(LOG_INFO, "Could not cancel IO request (rw=%d)", rw);
			/* Doesn't really matter, debugging information.
			 */
		}
		return -1;
	} else if (r > 1L) {
		cl_log(LOG_ERR, "More than one IO was returned (r=%ld)", r);
		return -1;
	}

	
	/* IO is happy */
	if (event.res == sector_size) {
		return 0;
	} else {
		cl_log(LOG_ERR, "Short IO (rw=%d, res=%lu, sector_size=%d)",
				rw, event.res, sector_size);
		return -1;
	}
}
Exemple #4
0
static void laio_cancel(BlockDriverAIOCB *blockacb)
{
    struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
    struct io_event event;
    int ret;

    if (laiocb->ret != -EINPROGRESS) {
        return;
    }
    ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
    laiocb->ret = -ECANCELED;
    if (ret != 0) {
        /* iocb is not cancelled, cb will be called by the event loop later */
        return;
    }

    laiocb->common.cb(laiocb->common.opaque, laiocb->ret);
}
Exemple #5
0
int main(int argc, char *argv[])
{
	int lc;
	const char *msg;

	io_context_t ctx;

	memset(&ctx, 0, sizeof(ctx));

	if ((msg = parse_opts(argc, argv, NULL, NULL)) != NULL)
		tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg);

	setup();

	for (lc = 0; TEST_LOOPING(lc); lc++) {
		tst_count = 0;

		TEST(io_cancel(ctx, NULL, NULL));

		switch (TEST_RETURN) {
		case 0:
			tst_resm(TFAIL, "call succeeded unexpectedly");
			break;
		case EXP_RET:
			tst_resm(TPASS, "expected failure - "
				 "returned value = %ld : %s", TEST_RETURN,
				 strerror(-TEST_RETURN));
			break;
		case -ENOSYS:
			tst_resm(TCONF, "io_cancel returned ENOSYS");
			break;
		default:
			tst_resm(TFAIL, "unexpected returned value - %s (%i) - "
				 "expected %s (%i)", strerror(-TEST_RETURN),
				 (int)TEST_RETURN, strerror(-EXP_RET), EXP_RET);
			break;
		}

	}

	cleanup();
	tst_exit();
}
Exemple #6
0
void sig_term(int sig)
{
	io_cancel();
	terminate = 1;
}
static int
check_state(int fd, struct directio_context *ct, int sync, int timeout_secs)
{
	struct timespec	timeout = { .tv_nsec = 5 };
	struct io_event event;
	struct stat	sb;
	int		rc = PATH_UNCHECKED;
	long		r;

	if (fstat(fd, &sb) == 0) {
		LOG(4, "called for %x", (unsigned) sb.st_rdev);
	}
	if (sync > 0) {
		LOG(4, "called in synchronous mode");
		timeout.tv_sec  = timeout_secs;
		timeout.tv_nsec = 0;
	}

	if (!ct->running) {
		struct iocb *ios[1] = { &ct->io };

		LOG(3, "starting new request");
		memset(&ct->io, 0, sizeof(struct iocb));
		io_prep_pread(&ct->io, fd, ct->ptr, ct->blksize, 0);
		if (io_submit(ct->ioctx, 1, ios) != 1) {
			LOG(3, "io_submit error %i", errno);
			return PATH_UNCHECKED;
		}
	}
	ct->running++;

	errno = 0;
	r = io_getevents(ct->ioctx, 1L, 1L, &event, &timeout);

	if (r < 0 ) {
		LOG(3, "async io getevents returned %li (errno=%s)", r,
		    strerror(errno));
		ct->running = 0;
		rc = PATH_UNCHECKED;
	} else if (r < 1L) {
		if (ct->running > timeout_secs || sync) {
			struct iocb *ios[1] = { &ct->io };

			LOG(3, "abort check on timeout");
			r = io_cancel(ct->ioctx, ios[0], &event);
			/*
			 * Only reset ct->running if we really
			 * could abort the pending I/O
			 */
			if (r)
				LOG(3, "io_cancel error %i", errno);
			else
				ct->running = 0;
			rc = PATH_DOWN;
		} else {
			LOG(3, "async io pending");
			rc = PATH_PENDING;
		}
	} else {
		LOG(3, "io finished %lu/%lu", event.res, event.res2);
		ct->running = 0;
		rc = (event.res == ct->blksize) ? PATH_UP : PATH_DOWN;
	}

	return rc;
}

int libcheck_check (struct checker * c)
{
	int ret;
	struct directio_context * ct = (struct directio_context *)c->context;

	if (!ct)
		return PATH_UNCHECKED;

	ret = check_state(c->fd, ct, c->sync, c->timeout);

	switch (ret)
	{
	case PATH_UNCHECKED:
		MSG(c, MSG_DIRECTIO_UNKNOWN);
		break;
	case PATH_DOWN:
		MSG(c, MSG_DIRECTIO_DOWN);
		break;
	case PATH_UP:
		MSG(c, MSG_DIRECTIO_UP);
		break;
	case PATH_PENDING:
		MSG(c, MSG_DIRECTIO_PENDING);
		break;
	default:
		break;
	}
	return ret;
}
static void sig_term(int sig)
{
	io_cancel();
	g_main_loop_quit(event_loop);
}
Exemple #9
0
static int do_linux_aio(int fd, uint64_t offset, char *buf, int len,
			struct task *task, int cmd)
{
	struct timespec ts;
	struct aicb *aicb;
	struct iocb *iocb;
	struct io_event event;
	int rv;

	/* I expect this pre-emptively catches the io_submit EAGAIN case */

	aicb = find_callback_slot(task);
	if (!aicb)
		return -ENOENT;

	iocb = &aicb->iocb;

	memset(iocb, 0, sizeof(struct iocb));
	iocb->aio_fildes = fd;
	iocb->aio_lio_opcode = cmd;
	iocb->u.c.buf = buf;
	iocb->u.c.nbytes = len;
	iocb->u.c.offset = offset;

	rv = io_submit(task->aio_ctx, 1, &iocb);
	if (rv < 0) {
		log_taske(task, "aio submit %p:%p:%p rv %d fd %d cmd %d",
			  aicb, iocb, buf, rv, fd, cmd);
		goto out;
	}

	task->io_count++;

	/* don't reuse aicb->iocb or free the buf until we reap the event */
	aicb->used = 1;
	aicb->buf = buf;

	memset(&ts, 0, sizeof(struct timespec));
	ts.tv_sec = task->io_timeout_seconds;
 retry:
	memset(&event, 0, sizeof(event));

	rv = io_getevents(task->aio_ctx, 1, 1, &event, &ts);
	if (rv == -EINTR)
		goto retry;
	if (rv < 0) {
		log_taske(task, "aio getevent %p:%p:%p rv %d",
			  aicb, iocb, buf, rv);
		goto out;
	}
	if (rv == 1) {
		struct iocb *ev_iocb = event.obj;
		struct aicb *ev_aicb = container_of(ev_iocb, struct aicb, iocb);

		ev_aicb->used = 0;

		if (ev_iocb != iocb) {
			log_taske(task, "aio collect %p:%p:%p result %ld:%ld other free",
				  ev_aicb, ev_iocb, ev_aicb->buf, event.res, event.res2);
			free(ev_aicb->buf);
			ev_aicb->buf = NULL;
			goto retry;
		}
		if ((int)event.res < 0) {
			log_taske(task, "aio collect %p:%p:%p result %ld:%ld match res",
				  ev_aicb, ev_iocb, ev_aicb->buf, event.res, event.res2);
			rv = event.res;
			goto out;
		}
		if (event.res != len) {
			log_taske(task, "aio collect %p:%p:%p result %ld:%ld match len %d",
				  ev_aicb, ev_iocb, ev_aicb->buf, event.res, event.res2, len);
			rv = -EMSGSIZE;
			goto out;
		}

		/* standard success case */
		rv = 0;
		goto out;
	}

	/* Timed out waiting for result.  If cancel fails, we could try retry
	   io_getevents indefinately, but that removes the whole point of using
	   aio, which is the timeout.  So, we need to be prepared to reap the
	   event the next time we call io_getevents for a different i/o.  We
	   can't reuse the iocb for this timed out io until we get an event for
	   it because we need to compare the iocb to event.obj to distinguish
	   events for separate submissions.

	   <phro> dct: io_cancel doesn't work, in general.  you are very
	   likely going to get -EINVAL from that call */

	task->to_count++;

	log_taske(task, "aio timeout %p:%p:%p sec %d to_count %d",
		  aicb, iocb, buf, task->io_timeout_seconds, task->to_count);

	rv = io_cancel(task->aio_ctx, iocb, &event);
	if (!rv) {
		aicb->used = 0;
		rv = -ECANCELED;
	} else {
		/* aicb->used and aicb->buf both remain set */
		rv = SANLK_AIO_TIMEOUT;

		if (cmd == IO_CMD_PREAD)
			task->read_iobuf_timeout_aicb = aicb;
	}
 out:
	return rv;
}
Exemple #10
0
static void sig_hup(int sig)
{
     vtun_syslog(LOG_INFO, "Reestablishing connection");
     io_cancel();
     linker_term = VTUN_SIG_HUP;
}
Exemple #11
0
static void sig_term(int sig)
{
     vtun_syslog(LOG_INFO, "Closing connection");
     io_cancel();
     linker_term = VTUN_SIG_TERM;
}
Exemple #12
0
static void sig_term(int sig)
{
	syslog(LOG_INFO, "Closing RFCOMM channel");
	io_cancel();
}