Example #1
0
int read_some_events(struct thread_info *t) {
    struct io_unit *event_io;
    struct io_event *event;
    int nr;
    int i; 
    int min_nr = io_iter;
    struct timeval stop_time;

    if (t->num_global_pending < io_iter)
        min_nr = t->num_global_pending;

#ifdef NEW_GETEVENTS
    nr = io_getevents(t->io_ctx, min_nr, t->num_global_events, t->events,NULL);
#else
    nr = io_getevents(t->io_ctx, t->num_global_events, t->events, NULL);
#endif
    if (nr <= 0)
        return nr;

    gettimeofday(&stop_time, NULL);
    for (i = 0 ; i < nr ; i++) {
	event = t->events + i;
	event_io = (struct io_unit *)((unsigned long)event->obj); 
	finish_io(t, event_io, event->res, &stop_time);
    }
    return nr;
}
Example #2
0
int file_wait(int thread_id, long nreq)
{ 
  long            i;
  long            nr;
  struct io_event *event;
  sb_aio_oper_t   *oper;
  struct iocb     *iocbp;

  /* Try to read some events */
#ifdef HAVE_OLD_GETEVENTS
  (void)nreq; /* unused */
  nr = io_getevents(aio_ctxts[thread_id].io_ctxt, file_async_backlog,
                    aio_ctxts[thread_id].events, NULL);
#else
  nr = io_getevents(aio_ctxts[thread_id].io_ctxt, nreq, file_async_backlog,
                    aio_ctxts[thread_id].events, NULL);
#endif
  if (nr < 1)
  {
    log_errno(LOG_FATAL, "io_getevents() failed!");
    return 1;
  }

  /* Verify results */
  for (i = 0; i < nr; i++)
  {
    event = (struct io_event *)aio_ctxts[thread_id].events + i;
    iocbp = (struct iocb *)(unsigned long)event->obj;
    oper = (sb_aio_oper_t *)iocbp;
    switch (oper->type) {
      case FILE_OP_TYPE_FSYNC:
        if (event->res != 0)
        {
          log_text(LOG_FATAL, "Asynchronous fsync failed!\n");
          return 1;
        }
        break;
      case FILE_OP_TYPE_READ:
        if ((ssize_t)event->res != oper->len)
        {
          log_text(LOG_FATAL, "Asynchronous read failed!\n");
          return 1;
        }
        break;
      case FILE_OP_TYPE_WRITE:
        if ((ssize_t)event->res != oper->len)
        {
          log_text(LOG_FATAL, "Asynchronous write failed!\n");
          return 1;
        }
        break;
      default:
        break;
    }
    free(oper);
    aio_ctxts[thread_id].nrequests--;
  }
  
  return 0;
}
void fun_read(void *ptr)
{
	long n = MAX_AIO_EVENTS;
	struct stat filestat;
	long long exSize;
	long i;
	long r;

	while (n > 0) {
		r = io_getevents(ctxp, 1, MAX_AIO_EVENTS, ioevents, NULL);
		if (r < 0) 
			fail("io_getevents returned %ld\n", r);

		n -= r;
		for (i = 0; i < r; ++i) {
			if (ioevents[i].obj->u.c.nbytes != BUFSIZE)
				fail("error in block: expacted %d bytes, "
				     "receiced %ld\n", BUFSIZE,
				     ioevents[i].obj->u.c.nbytes);

			exSize = ioevents[i].obj->u.c.offset +
				 ioevents[i].obj->u.c.nbytes;
			fstat(handle, &filestat);
			if (filestat.st_size < exSize)
				fail("write of %lu bytes @%llu finished, "
				     "expected filesize at least %llu, but "
				     "got %ld\n", ioevents[i].obj->u.c.nbytes,
				     ioevents[i].obj->u.c.offset, exSize,
				     filestat.st_size);
		}
	}
}
Example #4
0
int
__mb_aiom_getevents(mb_aiom_t *aiom, long min_nr, long nr,
                    struct io_event *events, struct timespec *timeout){
    int i;
    int nr_completed;
    aiom_cb_t *aiom_cb;
    struct io_event *event;

    nr_completed = io_getevents(aiom->context, min_nr, nr, events, timeout);
    aiom->nr_inflight -= nr_completed;
    aiom->iocount += nr_completed;

    if (aio_tracefile != NULL) {
        fprintf(aio_tracefile,
                "[%d] %d infl %d comp\n",
                tid, aiom->nr_inflight, nr_completed);
    }

    for(i = 0; i < nr_completed; i++){
        event = &aiom->events[i];
        aiom_cb = (aiom_cb_t *) event->obj;

        // TODO: callback or something
        if (!(event->res == option.blk_sz && event->res2 == 0)){
            fprintf(stderr, "fatal error: res = %ld, res2 = %ld\n",
                    (long) event->res, (long) event->res2);
        }

        aiom->iowait += mb_elapsed_time_from(&aiom_cb->submit_time);

        mb_res_pool_push(aiom->cbpool, aiom_cb);
    }

    return nr_completed;
}
Example #5
0
static void qemu_laio_completion_cb(void *opaque)
{
    struct qemu_laio_state *s = opaque;

    while (1) {
        struct io_event events[MAX_EVENTS];
        uint64_t val;
        ssize_t ret;
        struct timespec ts = { 0 };
        int nevents, i;

        do {
            ret = read(s->efd, &val, sizeof(val));
        } while (ret == -1 && errno == EINTR);

        if (ret == -1 && errno == EAGAIN)
            break;

        if (ret != 8)
            break;

        do {
            nevents = io_getevents(s->ctx, val, MAX_EVENTS, events, &ts);
        } while (nevents == -EINTR);

        for (i = 0; i < nevents; i++) {
            struct iocb *iocb = events[i].obj;
            struct qemu_laiocb *laiocb =
                    container_of(iocb, struct qemu_laiocb, iocb);

            laiocb->ret = io_event_ret(&events[i]);
            qemu_laio_process_completion(s, laiocb);
        }
    }
}
        void native_linux_aio_provider::get_event()
        {
            struct io_event events[1];
            int ret;

            const char* name = ::dsn::tools::get_service_node_name(node());
            char buffer[128];
            sprintf(buffer, "%s.aio", name);
            task_worker::set_name(buffer);

            while (true)
            {
                ret = io_getevents(_ctx, 1, 1, events, NULL);
                if (ret > 0) // should be 1
                {
                    dassert(ret == 1, "");
                    struct iocb *io = events[0].obj;
                    complete_aio(io, static_cast<int>(events[0].res), static_cast<int>(events[0].res2));
                }
                else
                {
                    dwarn("io_getevents returns %d, you probably want to try on another machine:-(", ret);
                }
            }
        }
Example #7
0
void file_aio_loop(void) 
{
	struct io_event event[16];
	struct timespec io_ts;
	int res;
	
	io_ts.tv_sec = 0;
	io_ts.tv_nsec = 0;

	res = io_getevents(*aio_queue, 1, 16, event, &io_ts);
	printf("res = %d\r\n", res);
	if(res > 0) 
	{
		int i;
		for(i = 0; i < res; i++) 
		{
			io_callback_t callback = (io_callback_t)event[i].data;
			
			struct iocb *iocb = event[i].obj;
			callback(*aio_queue, iocb, event[i].res, event[i].res2);
		}	
	}
	else if(res < 0) 
	{
		printf("file_aio_loop %s\n", strerror(errno));
	}
}
int compat0_1_io_queue_wait(io_context_t ctx, struct timespec *when)
{
	struct timespec timeout;
	if (when)
		timeout = *when;
	return io_getevents(ctx, 0, 0, NULL, when ? &timeout : NULL);
}
Example #9
0
int aio_read_random(char* file_name)
{
	int ret = 0;
	
	int fd = -1;
	fd = ::open(file_name, O_RDONLY);
	fprintf(stdout, "open %s, fd %d\n", file_name, fd);

	int64_t file_length = ::lseek(fd, 0, SEEK_END);
	int piece_num = (file_length + PIECE_LEN - 1)/ PIECE_LEN;
	int bitfield_size = (piece_num + 8 - 1)/8;
	u_int8_t* bitfield = (u_int8_t*)malloc(bitfield_size);
	memset(bitfield, 0, bitfield_size);

	fprintf(stdout, "file_length=%ld, piece_num=%d, bitfield_size=%d\n", file_length, piece_num, bitfield_size);

	io_context_t myctx;
	memset(&myctx, 0, sizeof(myctx));
    io_queue_init(AIO_MAXIO, &myctx);

	while(1)
	{
		// read piece by random
		int piece_index = rand_index(piece_num);
		int piece_pos = bitfield_find_unset(bitfield, piece_num, piece_index);
		if(piece_pos == -1)
		{
			break;
		}
		//printf("read piece=%d\n", piece_pos);
		struct iocb* io2 = (struct iocb*)malloc(sizeof(struct iocb));
		memset(io2, 0, sizeof(struct iocb));

		u_int8_t* buff = NULL;
		posix_memalign((void **)&buff, getpagesize(), PIECE_LEN);
		io_prep_pread(io2, fd, buff, PIECE_LEN, PIECE_LEN*piece_pos);
		io2->data = (void*)piece_pos;
		io_submit(myctx, 1, &io2);

		struct io_event events[AIO_MAXIO];
		int num = io_getevents(myctx, 0, AIO_MAXIO, events, NULL);
	    //printf("io_request completed %d\n", num);
		for(int i=0;i<num;i++)
		{
			struct iocb *objp = events[i].obj;
			int finish_piece_pos = (int)(long)objp->data;
			//printf("done_piece=%d, res=%ld, res2=%ld\n", finish_piece_pos, events[i].res, events[i].res2);
			//cb(myctx, io2, events[i].res, events[i].res2);
			bitfield_set_one(bitfield, piece_num, finish_piece_pos);
			free(objp->u.c.buf);
			free(objp);
		}
		
	}
	
	close(fd);
	fd = -1;
	
	return 0;
}
Example #10
0
static void
blockdev_aio_poll(void *arg)
{
	struct blockdev_aio_io_channel *ch = arg;
	int nr, i;
	enum spdk_bdev_io_status status;
	struct blockdev_aio_task *aio_task;
	struct timespec timeout;

	timeout.tv_sec = 0;
	timeout.tv_nsec = 0;

	nr = io_getevents(ch->io_ctx, 1, ch->queue_depth,
			  ch->events, &timeout);

	if (nr < 0) {
		SPDK_ERRLOG("%s: io_getevents returned %d\n", __func__, nr);
		return;
	}

	for (i = 0; i < nr; i++) {
		aio_task = ch->events[i].data;
		if (ch->events[i].res != aio_task->len) {
			status = SPDK_BDEV_IO_STATUS_FAILED;
		} else {
			status = SPDK_BDEV_IO_STATUS_SUCCESS;
		}

		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(aio_task), status);
	}
}
Example #11
0
/* The completion BH fetches completed I/O requests and invokes their
 * callbacks.
 *
 * The function is somewhat tricky because it supports nested event loops, for
 * example when a request callback invokes aio_poll().  In order to do this,
 * the completion events array and index are kept in qemu_laio_state.  The BH
 * reschedules itself as long as there are completions pending so it will
 * either be called again in a nested event loop or will be called after all
 * events have been completed.  When there are no events left to complete, the
 * BH returns without rescheduling.
 */
static void qemu_laio_completion_bh(void *opaque)
{
    struct qemu_laio_state *s = opaque;

    /* Fetch more completion events when empty */
    if (s->event_idx == s->event_max) {
        do {
            struct timespec ts = { 0 };
            s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS,
                                        s->events, &ts);
        } while (s->event_max == -EINTR);

        s->event_idx = 0;
        if (s->event_max <= 0) {
            s->event_max = 0;
            return; /* no more events */
        }
    }

    /* Reschedule so nested event loops see currently pending completions */
    qemu_bh_schedule(s->completion_bh);

    /* Process completion events */
    while (s->event_idx < s->event_max) {
        struct iocb *iocb = s->events[s->event_idx].obj;
        struct qemu_laiocb *laiocb =
                container_of(iocb, struct qemu_laiocb, iocb);

        laiocb->ret = io_event_ret(&s->events[s->event_idx]);
        s->event_idx++;

        qemu_laio_process_completion(s, laiocb);
    }
}
Example #12
0
File: io_aio.c Project: choki/wlg
/* Static functions */
static void *aio_dequeue(void *arg)
{
    struct io_event event;
    int rtn;
    int resp;
    my_iocb *cbp;

    while(1){
	resp = io_getevents(context, 1, 1, &event, NULL);
	//PRINT("io_getevent resp:%d\n", resp);
	if(resp <= 0){
	    if(resp == 0){
		continue;
	    }else if(resp == EINTR){
		PRINT("EINTR recieved \n");
		continue;
	    }
	    PRINT("Error I/O getevent file:%s, line:%d errno:%d\n", __func__, __LINE__, resp);
	    //exit(1);
	}
	else{
	    cbp = (my_iocb *)event.obj;
	    PRINT("\tFinished qid:%d\n", cbp->qid);
	    clear_id(cbp->qid);
	    pthread_mutex_lock(&aio_req_num_mutex);
	    req_num--;
	    pthread_mutex_unlock(&aio_req_num_mutex);
	}
	//TODO for test
    	//usleep(1000);
    }

}
Example #13
0
int
tap_aio_get_events(tap_aio_context_t *ctx)
{
        int nr_events = 0;

        if (!ctx->poll_in_thread)
                nr_events = io_getevents(ctx->aio_ctx, 1,
                                         ctx->max_aio_events, ctx->aio_events, NULL);
        else {
		int r;
		r = read(ctx->completion_fd[0], &nr_events, sizeof(nr_events));
		if (r < 0) {
			if (errno == EAGAIN || errno == EINTR)
				return 0;
			/* This is pretty bad, we'll probably spin */
			DPRINTF("Aargh, read completion_fd failed: %s",
				strerror(errno));
		} else if (r != sizeof(nr_events)) {
			/* Should never happen because sizeof(nr_events)
			 * fits in the guaranteed atomic pipe write size.
			 * Blundering on is slightly nicer than asserting */
			DPRINTF("Aargh, read completion_fd short read %d", r);
		}
	}

        return nr_events;
}
Example #14
0
Range<AsyncIO::Op**> AsyncIO::doWait(size_t minRequests, size_t maxRequests) {
  io_event events[pending_];
  int count;
  do {
    // Wait forever
    count = io_getevents(ctx_, minRequests, maxRequests, events, nullptr);
  } while (count == -EINTR);
  checkKernelError(count, "AsyncIO: io_getevents failed");
  DCHECK_GE(count, minRequests);  // the man page says so
  DCHECK_LE(count, pending_);

  completed_.clear();
  if (count == 0) {
    return folly::Range<Op**>();
  }

  for (size_t i = 0; i < count; ++i) {
    DCHECK(events[i].obj);
    Op* op = boost::intrusive::get_parent_from_member(
        events[i].obj, &AsyncIOOp::iocb_);
    --pending_;
    op->complete(events[i].res);
    completed_.push_back(op);
  }

  return folly::Range<Op**>(&completed_.front(), count);
}
Example #15
0
static errcode_t unix_vec_read_blocks(io_channel *channel,
				      struct io_vec_unit *ivus, int count)
{
	int i;
	int ret;
	io_context_t io_ctx;
	struct iocb *iocb = NULL, **iocbs = NULL;
	struct io_event *events = NULL;
	int64_t offset;
	int submitted, completed = 0;

	ret = OCFS2_ET_NO_MEMORY;
	iocb = malloc((sizeof(struct iocb) * count));
	iocbs = malloc((sizeof(struct iocb *) * count));
	events = malloc((sizeof(struct io_event) * count));
	if (!iocb || !iocbs || !events)
		goto out;

	memset(&io_ctx, 0, sizeof(io_ctx));
	ret = io_queue_init(count, &io_ctx);
	if (ret)
		return ret;

	for (i = 0; i < count; ++i) {
		offset = ivus[i].ivu_blkno * channel->io_blksize;
		io_prep_pread(&(iocb[i]), channel->io_fd, ivus[i].ivu_buf,
			      ivus[i].ivu_buflen, offset);
		iocbs[i] = &iocb[i];
	}

resubmit:
	ret = io_submit(io_ctx, count - completed, &iocbs[completed]);
	if (!ret && (count - completed))
		ret = OCFS2_ET_SHORT_READ;
	if (ret < 0)
		goto out;
	submitted = ret;

	ret = io_getevents(io_ctx, submitted, submitted, events, NULL);
	if (ret < 0)
		goto out;

	completed += submitted;
	if (completed < count)
		goto resubmit;

out:
	if (ret >= 0)
		ret = 0;
	if (!ret)
		channel->io_bytes_read += (count * channel->io_blksize);
	free(iocb);
	free(iocbs);
	free(events);
	io_queue_release(io_ctx);

	return ret;
}
Example #16
0
long test_write(aio_context_t ctx, int fd, long range, int afd) {
	long i, n, r, j;
	u_int64_t eval;
	struct iocb **piocb;
	struct iocb *iocb;
	struct timespec tmo;
	static struct io_event events[NUM_EVENTS];
	static char buf[IORTX_SIZE];

	for (i = 0; i < IORTX_SIZE; i++)
		buf[i] = i & 0xff;
	n = range / IORTX_SIZE;
	iocb = malloc(n * sizeof(struct iocb));
	piocb = malloc(n * sizeof(struct iocb *));
	if (!iocb || !piocb) {
		perror("iocb alloc");
		return -1;
	}
	for (i = 0; i < n; i++) {
		piocb[i] = &iocb[i];
		asyio_prep_pwrite(&iocb[i], fd, buf, sizeof(buf),
				  (n - i - 1) * IORTX_SIZE, afd);
		iocb[i].aio_data = (u_int64_t) i + 1;
	}
	fprintf(stdout, "submitting write request ...\n");
	if (io_submit(ctx, n, piocb) <= 0) {
		perror("io_submit");
		return -1;
	}
	for (i = 0; i < n;) {
		fprintf(stdout, "waiting ... ");
		waitasync(afd, -1);
		eval = 0;
		if (read(afd, &eval, sizeof(eval)) != sizeof(eval))
			perror("read");
		fprintf(stdout, "done! %llu\n", (unsigned long long) eval);
		while (eval > 0) {
			tmo.tv_sec = 0;
			tmo.tv_nsec = 0;
			r = io_getevents(ctx, 1, eval > NUM_EVENTS ? NUM_EVENTS: (long) eval,
					 events, &tmo);
			if (r > 0) {
				for (j = 0; j < r; j++) {

				}
				i += r;
				eval -= r;
				fprintf(stdout, "test_write got %ld/%ld results so far\n",
					i, n);
			}
		}
	}
	free(iocb);
	free(piocb);

	return n;
}
int compat0_1_io_getevents(io_context_t ctx_id, long nr,
		       struct io_event *events,
		       const struct timespec *const_timeout)
{
	struct timespec timeout;
	if (const_timeout)
		timeout = *const_timeout;
	return io_getevents(ctx_id, 1, nr, events,
			const_timeout ? &timeout : NULL);
}
Example #18
0
static void* aiobe_thread(void* p) {
	struct device* dev = (struct device*)p;
	struct io_event event;
	while(1) {
		int ret=io_getevents(D(dev)->ctx, 1, 1, &event, NULL);
		if (ret!=1)
			break;
		aiobe_complete((struct aiobe_request*) event.data, event.res);
	}
	return NULL;
}
Example #19
0
static int
sector_io(struct sbd_context *st, int sector, void *data, int rw)
{
	struct timespec	timeout;
	struct io_event event;
	struct iocb	*ios[1] = { &st->io };
	long		r;

	timeout.tv_sec  = timeout_io;
	timeout.tv_nsec = 0;

	memset(&st->io, 0, sizeof(struct iocb));
	if (rw) {
		io_prep_pwrite(&st->io, st->devfd, data, sector_size, sector_size * sector);
	} else {
		io_prep_pread(&st->io, st->devfd, data, sector_size, sector_size * sector);
	}

	if (io_submit(st->ioctx, 1, ios) != 1) {
		cl_log(LOG_ERR, "Failed to submit IO request! (rw=%d)", rw);
		return -1;
	}

	errno = 0;
	r = io_getevents(st->ioctx, 1L, 1L, &event, &timeout);

	if (r < 0 ) {
		cl_log(LOG_ERR, "Failed to retrieve IO events (rw=%d)", rw);
		return -1;
	} else if (r < 1L) {
		cl_log(LOG_INFO, "Cancelling IO request due to timeout (rw=%d)", rw);
		r = io_cancel(st->ioctx, ios[0], &event);
		if (r) {
			DBGLOG(LOG_INFO, "Could not cancel IO request (rw=%d)", rw);
			/* Doesn't really matter, debugging information.
			 */
		}
		return -1;
	} else if (r > 1L) {
		cl_log(LOG_ERR, "More than one IO was returned (r=%ld)", r);
		return -1;
	}

	
	/* IO is happy */
	if (event.res == sector_size) {
		return 0;
	} else {
		cl_log(LOG_ERR, "Short IO (rw=%d, res=%lu, sector_size=%d)",
				rw, event.res, sector_size);
		return -1;
	}
}
Example #20
0
void random_io(int fd, off_t ionum, int access_size, int num_requests)
{
  // (1) io_context_tの初期化
  io_context_t ctx;
  memset(&ctx, 0, sizeof(io_context_t));
  int r = io_setup(num_requests, &ctx);
  assert(r == 0);

  // (2) iocbs(I/O要求)の構築
  struct iocb **iocbs = new struct iocb*[num_requests];
  char **bufs = new char*[num_requests];
  for (int i = 0; i < num_requests; i++) {
    iocbs[i] = new struct iocb();
    posix_memalign((void **)&bufs[i], 512, access_size);

    off_t block_number = rand() % ionum;
    io_prep_pread(iocbs[i], fd, bufs[i], access_size, block_number * access_size);
    io_set_callback(iocbs[i], read_done);
  }

  // (3) I/O要求を投げる
  r = io_submit(ctx, num_requests, iocbs);
  assert(r == num_requests);

  // (4) 完了したI/O要求を待ち、終わったものについてはcallbackを呼び出す
  int cnt = 0;
  while (true) {
    struct io_event events[32];
    int n = io_getevents(ctx, 1, 32, events, NULL);
    if (n > 0)
      cnt += n;

    for (int i = 0; i < n; i++) {
      struct io_event *ev = events + i;
      io_callback_t callback = (io_callback_t)ev->data;
      struct iocb *iocb = ev->obj;
      callback(ctx, iocb, ev->res, ev->res2);
    }

    if (n == 0 || cnt == num_requests)
      break;
  }

  for (int i = 0; i < num_requests; i++) {
    delete iocbs[i];
    free(bufs[i]);
  }
  delete[] iocbs;
  delete[] bufs;
}
JNIEXPORT void JNICALL Java_org_apache_activemq_artemis_jlibaio_LibaioContext_deleteContext(JNIEnv* env, jclass clazz, jobject contextPointer) {
    int i;
    struct io_control * theControl = getIOControl(env, contextPointer);
    if (theControl == NULL) {
      return;
    }

    struct iocb * iocb = getIOCB(theControl);

    if (iocb == NULL) {
        throwIOException(env, "Not enough space in libaio queue");
        return;
    }

    // Submitting a dumb write so the loop finishes
    io_prep_pwrite(iocb, dumbWriteHandler, 0, 0, 0);
    iocb->data = (void *) -1;
    if (!submit(env, theControl, iocb)) {
        return;
    }

    // to make sure the poll has finished
    pthread_mutex_lock(&(theControl->pollLock));
    pthread_mutex_unlock(&(theControl->pollLock));

    // To return any pending IOCBs
    int result = io_getevents(theControl->ioContext, 0, 1, theControl->events, 0);
    for (i = 0; i < result; i++) {
        struct io_event * event = &(theControl->events[i]);
        struct iocb * iocbp = event->obj;
        putIOCB(theControl, iocbp);
    }

    io_queue_release(theControl->ioContext);

    pthread_mutex_destroy(&(theControl->pollLock));
    pthread_mutex_destroy(&(theControl->iocbLock));

    // Releasing each individual iocb
    for (i = 0; i < theControl->queueSize; i++) {
       free(theControl->iocb[i]);
    }

    (*env)->DeleteGlobalRef(env, theControl->thisObject);

    free(theControl->iocb);
    free(theControl->events);
    free(theControl);
}
Example #22
0
int o2test_aio_query(struct o2test_aio *o2a, long min_nr, long nr)
{
	int ret = 0;
	struct io_event ev;

	ret = io_getevents(o2a->o2a_ctx, min_nr, nr, &ev, NULL);
	if (ret < min_nr) {
		ret = errno;
		fprintf(stderr, "error %s during %s\n", strerror(errno),
			"io_getevents");
		ret = -1;
	}

	return ret;
}
Example #23
0
int FlatFileReader::FinishRead(void)
{
	u32 bytes;

	int min_nr = 1;
	int max_nr = 1;
	struct io_event events[max_nr];

	int event = io_getevents(m_aio_context, min_nr, max_nr, events, NULL);
	if (event < 1) {
		return -1;
	}

	return 1;
}
Example #24
0
static ssize_t aio_pwrite(int fd, void *buf, size_t nbytes, off_t offset)
{
    aio_cb.aio_lio_opcode = IOCB_CMD_PWRITE;
    aio_cb.aio_fildes = fd;
    aio_cb.aio_buf = (unsigned long) buf;
    aio_cb.aio_nbytes = nbytes;
    aio_cb.aio_offset = offset;

    if (io_submit(aio_ctx, 1, &aio_cbp) != 1)
        err(1, "aio submit failed");

    if (io_getevents(aio_ctx, 1, 1, &aio_ev, NULL) != 1)
        err(1, "aio getevents failed");

    if (aio_ev.res < 0) {
        errno = -aio_ev.res;
        return -1;
    }

    if (!cached && fdatasync(fd) < 0)
        return -1;

    return aio_ev.res;

#if 0
    aio_cb.aio_lio_opcode = IOCB_CMD_FDSYNC;
    if (io_submit(aio_ctx, 1, &aio_cbp) != 1)
        err(1, "aio fdsync submit failed");

    if (io_getevents(aio_ctx, 1, 1, &aio_ev, NULL) != 1)
        err(1, "aio getevents failed");

    if (aio_ev.res < 0)
        return aio_ev.res;
#endif
}
Example #25
0
static int swAioLinux_onFinish(swReactor *reactor, swEvent *event)
{
    struct io_event events[SW_AIO_MAX_EVENTS];
    swAio_event aio_ev;
    uint64_t finished_aio;
    struct iocb *aiocb;
    struct timespec tms;
    int i, n;

    if (read(event->fd, &finished_aio, sizeof(finished_aio)) != sizeof(finished_aio))
    {
        swWarn("read() failed. Error: %s[%d]", strerror(errno), errno);
        return SW_ERR;
    }

    while (finished_aio > 0)
    {
        tms.tv_sec = 0;
        tms.tv_nsec = 0;
        n = io_getevents(swoole_aio_context, 1, SW_AIO_MAX_EVENTS, events, &tms);
        if (n > 0)
        {
            for (i = 0; i < n; i++)
            {
                aiocb = (struct iocb *) events[i].obj;
                if ((int) events[i].res < 0)
                {
                    aio_ev.error = abs((int) events[i].res);
                    aio_ev.ret = -1;
                }
                else
                {
                    aio_ev.ret = (int) events[i].res;
                }
                aio_ev.fd = aiocb->aio_fildes;
                aio_ev.type = aiocb->aio_lio_opcode == IOCB_CMD_PREAD ? SW_AIO_READ : SW_AIO_WRITE;
                aio_ev.nbytes = aio_ev.ret;
                aio_ev.offset = aiocb->aio_offset;
                aio_ev.buf = (void *) aiocb->aio_buf;
                aio_ev.task_id = aiocb->aio_reqprio;
                SwooleAIO.callback(&aio_ev);
            }
            finished_aio -= n;
            SwooleAIO.task_num -= n;
        }
    }
    return SW_OK;
}
Example #26
0
/* Called by each worker thread */
static void* worker_func(void* aio_context)
{
	int num_events = -1, i = 0;
	as_async_info_t *reference;
	struct io_event events[MAXEVENTS];

	while(g_running)
	{
		num_events = io_getevents(*(aio_context_t*)aio_context, 1, MAXEVENTS, (events + 0), NULL);
		for(i = 0; i < num_events; i++)
		{
			reference = (as_async_info_t*)events[i].data;
			process_read(reference);  
		}
	}
	return (0);	
}
Example #27
0
static PyObject *IOManager_getevents(IOManager *self, PyObject *args) {
   long min_nr;
   PyObject *timeout = NULL, *rv, *ptype, *pval, *ptb;
   IORequest *req;
   int rc, i;
   double timeout_d = 0.0;
   struct timespec tv, *tvp;
   
   if (!PyArg_ParseTuple(args, "lO", &min_nr, &timeout)) return NULL;
   if (min_nr > self->pending_events) {
      PyErr_SetString(PyExc_ValueError, "min_nr too large: insufficient outstanding requests to fulfill.");
      return NULL;
   }
   
   if (timeout == Py_None) tvp = NULL;
   else {
      tvp = &tv;
      timeout_d = PyFloat_AsDouble(timeout);
      if (PyErr_Occurred()) return NULL;
      tv.tv_sec = (long) timeout_d;
      tv.tv_nsec = ((timeout_d - (double)(long) timeout_d) * 1E9);
   }
   
   rc = io_getevents(self->ctx, min_nr, self->nr_events, self->events, tvp);
   if (rc < 0) {
      PyErr_SetFromErrno(PyExc_OSError);
      return NULL;
   }
   
   self->pending_events -= rc;
   if (!(rv = PyTuple_New(rc))) {
      /* Talk about being painted into a corner.*/
      PyErr_Fetch(&ptype, &pval, &ptb);
      for (i = 0; i < rc; i++) Py_DECREF(self->events[i].data);
      PyErr_Restore(ptype, pval, ptb);
      return NULL;
   }
   for (i = 0; i < rc; i++) {
      req = self->events[i].data;
      req->res = self->events[i].res;
      req->res2 = self->events[i].res2;
      PyTuple_SET_ITEM(rv, i, (PyObject*) req);
   }
   return rv;
}
JNIEXPORT jint JNICALL Java_org_apache_activemq_artemis_jlibaio_LibaioContext_poll
  (JNIEnv * env, jobject obj, jobject contextPointer, jobjectArray callbacks, jint min, jint max) {
    int i = 0;
    struct io_control * theControl = getIOControl(env, contextPointer);
    if (theControl == NULL) {
      return 0;
    }


    int result = io_getevents(theControl->ioContext, min, max, theControl->events, 0);
    int retVal = result;

    for (i = 0; i < result; i++) {
        struct io_event * event = &(theControl->events[i]);
        struct iocb * iocbp = event->obj;
        int eventResult = (int)event->res;

        #ifdef DEBUG
            fprintf (stdout, "Poll res: %d totalRes=%d\n", eventResult, result);
        #endif

        if (eventResult < 0) {
            #ifdef DEBUG
                fprintf (stdout, "Error: %s\n", strerror(-eventResult));
            #endif

            if (iocbp->data != NULL && iocbp->data != (void *) -1) {
                jstring jstrError = (*env)->NewStringUTF(env, strerror(-eventResult));

                (*env)->CallVoidMethod(env, (jobject)(iocbp->data), errorMethod, (jint)(-eventResult), jstrError);
            }
        }

        if (iocbp->data != NULL && iocbp->data != (void *) -1) {
            (*env)->SetObjectArrayElement(env, callbacks, i, (jobject)iocbp->data);
            // We delete the globalRef after the completion of the callback
            (*env)->DeleteGlobalRef(env, (jobject)iocbp->data);
        }

        putIOCB(theControl, iocbp);
    }

    return retVal;
}
Example #29
0
File: perf.c Project: ninataki/spdk
static void
aio_check_io(struct ns_worker_ctx *ns_ctx)
{
    int count, i;
    struct timespec timeout;

    timeout.tv_sec = 0;
    timeout.tv_nsec = 0;

    count = io_getevents(ns_ctx->ctx, 1, g_queue_depth, ns_ctx->events, &timeout);
    if (count < 0) {
        fprintf(stderr, "io_getevents error\n");
        exit(1);
    }

    for (i = 0; i < count; i++) {
        task_complete(ns_ctx->events[i].data);
    }
}
Example #30
0
File: overhead.c Project: spdk/spdk
static void
aio_check_io(void)
{
	int count, i;
	struct timespec timeout;

	timeout.tv_sec = 0;
	timeout.tv_nsec = 0;

	count = io_getevents(g_ns->u.aio.ctx, 1, 1, g_ns->u.aio.events, &timeout);
	if (count < 0) {
		fprintf(stderr, "io_getevents error\n");
		exit(1);
	}

	for (i = 0; i < count; i++) {
		g_ns->current_queue_depth--;
	}
}