Esempio n. 1
0
AsyncFile::~AsyncFile()
{
	if (io_queue_release(aioContext))
	{
		throw AIOException(NATIVE_ERROR_CANT_RELEASE_AIO,"Can't release aio");
	}
	if (::close(fileHandle))
	{
		throw AIOException(NATIVE_ERROR_CANT_OPEN_CLOSE_FILE,"Can't close file");
	}
	free(events);
	::pthread_mutex_destroy(&fileMutex);
	::pthread_mutex_destroy(&pollerMutex);
}
JNIEXPORT void JNICALL Java_org_apache_activemq_artemis_jlibaio_LibaioContext_deleteContext(JNIEnv* env, jclass clazz, jobject contextPointer) {
    int i;
    struct io_control * theControl = getIOControl(env, contextPointer);
    if (theControl == NULL) {
      return;
    }

    struct iocb * iocb = getIOCB(theControl);

    if (iocb == NULL) {
        throwIOException(env, "Not enough space in libaio queue");
        return;
    }

    // Submitting a dumb write so the loop finishes
    io_prep_pwrite(iocb, dumbWriteHandler, 0, 0, 0);
    iocb->data = (void *) -1;
    if (!submit(env, theControl, iocb)) {
        return;
    }

    // to make sure the poll has finished
    pthread_mutex_lock(&(theControl->pollLock));
    pthread_mutex_unlock(&(theControl->pollLock));

    // To return any pending IOCBs
    int result = io_getevents(theControl->ioContext, 0, 1, theControl->events, 0);
    for (i = 0; i < result; i++) {
        struct io_event * event = &(theControl->events[i]);
        struct iocb * iocbp = event->obj;
        putIOCB(theControl, iocbp);
    }

    io_queue_release(theControl->ioContext);

    pthread_mutex_destroy(&(theControl->pollLock));
    pthread_mutex_destroy(&(theControl->iocbLock));

    // Releasing each individual iocb
    for (i = 0; i < theControl->queueSize; i++) {
       free(theControl->iocb[i]);
    }

    (*env)->DeleteGlobalRef(env, theControl->thisObject);

    free(theControl->iocb);
    free(theControl->events);
    free(theControl);
}
Esempio n. 3
0
int file_async_done(void)
{
  unsigned int i;
  
  if (file_io_mode != FILE_IO_MODE_ASYNC)
    return 0;

  for (i = 0; i < sb_globals.num_threads; i++)
  {
    io_queue_release(aio_ctxts[i].io_ctxt);
    free(aio_ctxts[i].events);
  }
  
  free(aio_ctxts);

  return 0;
}  
Esempio n. 4
0
/* this is the meat of the state machine.  There is a list of
 * active operations structs, and as each one finishes the required
 * io it is moved to a list of finished operations.  Once they have
 * all finished whatever stage they were in, they are given the chance
 * to restart and pick a different stage (read/write/random read etc)
 *
 * various timings are printed in between the stages, along with
 * thread synchronization if there are more than one threads.
 */
int worker(struct thread_info *t)
{
    struct io_oper *oper;
    char *this_stage = NULL;
    struct timeval stage_time;
    int status = 0;
    int iteration = 0;
    int cnt;

    aio_setup(&t->io_ctx, 512);

restart:
    if (num_threads > 1) {
        pthread_mutex_lock(&stage_mutex);
	threads_starting++;
	if (threads_starting == num_threads) {
	    threads_ending = 0;
	    gettimeofday(&global_stage_start_time, NULL);
	    pthread_cond_broadcast(&stage_cond);
	}
	while (threads_starting != num_threads)
	    pthread_cond_wait(&stage_cond, &stage_mutex);
        pthread_mutex_unlock(&stage_mutex);
    }
    if (t->active_opers) {
        this_stage = stage_name(t->active_opers->rw);
	gettimeofday(&stage_time, NULL);
	t->stage_mb_trans = 0;
    }

    cnt = 0;
    /* first we send everything through aio */
    while(t->active_opers && (cnt < iterations || iterations == RUN_FOREVER)) {
	if (stonewall && threads_ending) {
	    oper = t->active_opers;
	    oper->stonewalled = 1;
	    oper_list_del(oper, &t->active_opers);
	    oper_list_add(oper, &t->finished_opers);
	} else {
	    run_active_list(t, io_iter,  max_io_submit);
        }
	cnt++;
    }
    if (latency_stats)
        print_latency(t);

    if (completion_latency_stats)
	print_completion_latency(t);

    /* then we wait for all the operations to finish */
    oper = t->finished_opers;
    do {
	if (!oper)
		break;
	io_oper_wait(t, oper);
	oper = oper->next;
    } while(oper != t->finished_opers);

    /* then we do an fsync to get the timing for any future operations
     * right, and check to see if any of these need to get restarted
     */
    oper = t->finished_opers;
    while(oper) {
	if (fsync_stages)
            fsync(oper->fd);
	t->stage_mb_trans += oper_mb_trans(oper);
	if (restart_oper(oper)) {
	    oper_list_del(oper, &t->finished_opers);
	    oper_list_add(oper, &t->active_opers);
	    oper = t->finished_opers;
	    continue;
	}
	oper = oper->next;
	if (oper == t->finished_opers)
	    break;
    } 

    if (t->stage_mb_trans && t->num_files > 0) {
        double seconds = time_since_now(&stage_time);
	fprintf(stderr, "thread %llu %s totals (%.2f MB/s) %.2f MB in %.2fs\n",
	        (unsigned long long)(t - global_thread_info), this_stage,
		t->stage_mb_trans/seconds, t->stage_mb_trans, seconds);
    }

    if (num_threads > 1) {
	pthread_mutex_lock(&stage_mutex);
	threads_ending++;
	if (threads_ending == num_threads) {
	    threads_starting = 0;
	    pthread_cond_broadcast(&stage_cond);
	    global_thread_throughput(t, this_stage);
	}
	while(threads_ending != num_threads)
	    pthread_cond_wait(&stage_cond, &stage_mutex);
	pthread_mutex_unlock(&stage_mutex);
    }
    
    /* someone got restarted, go back to the beginning */
    if (t->active_opers && (cnt < iterations || iterations == RUN_FOREVER)) {
	iteration++;
        goto restart;
    }

    /* finally, free all the ram */
    while(t->finished_opers) {
	oper = t->finished_opers;
	oper_list_del(oper, &t->finished_opers);
	status = finish_oper(t, oper);
    }

    if (t->num_global_pending) {
        fprintf(stderr, "global num pending is %d\n", t->num_global_pending);
    }
    io_queue_release(t->io_ctx);
    
    return status;
}
Esempio n. 5
0
/* 一次提交多个io请求的操作 */
int io_tio(char *pathname, int flag, int n, int operation)
{
	int res, fd = 0, i = 0;
	void *bufptr = NULL;
	off_t offset = 0;
	struct timespec timeout;

	io_context_t myctx;
	struct iocb iocb_array[AIO_MAXIO];
	struct iocb *iocbps[AIO_MAXIO];

	fd = open(pathname, flag, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
	if (fd <= 0) {
		printf("open for %s failed: %s\n", pathname, strerror(errno));
		return -1;
	}

	res = io_queue_init(n, &myctx);
	//printf (" res = %d \n", res);

	for (i = 0; i < AIO_MAXIO; i++) {

		switch (operation) {
		case IO_CMD_PWRITE:
			if (posix_memalign(&bufptr, alignment, AIO_BLKSIZE)) {
				perror(" posix_memalign failed ");
				return -1;
			}
			memset(bufptr, 0, AIO_BLKSIZE);

			io_prep_pwrite(&iocb_array[i], fd, bufptr,
				       AIO_BLKSIZE, offset);
			io_set_callback(&iocb_array[i], work_done);
			iocbps[i] = &iocb_array[i];
			offset += AIO_BLKSIZE;

			break;
		case IO_CMD_PREAD:
			if (posix_memalign(&bufptr, alignment, AIO_BLKSIZE)) {
				perror(" posix_memalign failed ");
				return -1;
			}
			memset(bufptr, 0, AIO_BLKSIZE);

			io_prep_pread(&iocb_array[i], fd, bufptr,
				      AIO_BLKSIZE, offset);
			io_set_callback(&iocb_array[i], work_done);
			iocbps[i] = &iocb_array[i];
			offset += AIO_BLKSIZE;
			break;
		case IO_CMD_POLL:
		case IO_CMD_NOOP:
			break;
		default:
			tst_resm(TFAIL,
				 "Command failed; opcode returned: %d\n",
				 operation);
			return -1;
			break;
		}
	}

	do {
		res = io_submit(myctx, AIO_MAXIO, iocbps);
	} while (res == -EAGAIN);
	if (res < 0) {
		io_error("io_submit tio", res);
	}

	/*
	 * We have submitted all the i/o requests. Wait for at least one to complete
	 * and call the callbacks.
	 */
	wait_count = AIO_MAXIO;

	timeout.tv_sec = 30;
	timeout.tv_nsec = 0;

	switch (operation) {
	case IO_CMD_PREAD:
	case IO_CMD_PWRITE:
		{
			while (wait_count) {
				res = io_wait_run(myctx, &timeout);
				if (res < 0)
					io_error("io_wait_run", res);
			}
		}
		break;
	}

	close(fd);

	for (i = 0; i < AIO_MAXIO; i++) {
		if (iocb_array[i].u.c.buf != NULL) {
			free(iocb_array[i].u.c.buf);
		}
	}

	io_queue_release(myctx);

	return 0;
}
Esempio n. 6
0
void loop_aio(int fd, void *buf)
{
  int res;
  unsigned int elapsed;

  /* i/o context initialization */
  io_context_t myctx;
  memset(&myctx, 0, sizeof(myctx));
  if ((res = io_queue_init(maxinflight, &myctx)))
    io_error("io_queue_init", res);

  copied = 0;
  inflight = 0;

  if (opt_verbose)
    printf("[run %d] start\n", runid);

  while (copied < mycount)
  {
    struct iocb *ioq[maxsubmit];
    int tosubmit = 0;
    unsigned long long int index;
    struct iocb *iocb;
    struct timeval tv1, tv2;

    /* filling a context with io queries */
    while (copied + inflight + tosubmit < mycount &&
      inflight + tosubmit < maxinflight &&
      tosubmit < maxsubmit)
    {
      /* Simultaneous asynchronous operations using the same iocb produce undefined results. */
      iocb = calloc(1, sizeof(struct iocb));

      if (mode_rnd)
        index = (mydevsize / myiosize) * random() / RAND_MAX;
      else
        index = copied + inflight + tosubmit;

      if (mode_write)
        io_prep_pwrite(iocb, fd, buf, myiosize, index * myiosize);
      else
        io_prep_pread(iocb, fd, buf, myiosize, index * myiosize);

      io_set_callback(iocb, io_done);
      ioq[tosubmit] = iocb;
      tosubmit += 1;
    }

    /* if there are available slots for submitting queries, do it */
    if (tosubmit)
    {
      /* submit io and check elapsed time */
      gettimeofday(&tv1, NULL);
      if ((res = io_submit(myctx, tosubmit, ioq)) != tosubmit)
      {
        printf("only %d io submitted\n", res);
        io_error("io_submit write", res);
      }
      gettimeofday(&tv2, NULL);
      elapsed = (tv2.tv_sec - tv1.tv_sec) * 1000 + (tv2.tv_usec - tv1.tv_usec) / 1000;
      if (elapsed > 200)
        printf("warning: io_submit() took %d ms, this is suspicious, maybe nr_request is too low.\n", elapsed);

      /* inflight io += newly submitted io */
      inflight += tosubmit;
    }

    /* handle completed io events */
    if ((res = io_queue_run(myctx)) < 0)
      io_error("io_queue_run", res);

    if (inflight == maxinflight ||
      (inflight && copied + inflight == mycount))
    {
      struct io_event event;
      if ((res = io_getevents(myctx, 1, 1, &event, NULL)) < 0)
        io_error("io_getevents", res);
      if (res != 1)
        errx(1, "no events?");
      ((io_callback_t)event.obj->data)(myctx, event.obj, event.res, event.res2);
    }
  }

  io_queue_release(myctx);
}
Esempio n. 7
0
File: aio02.c Progetto: kraj/ltp
static int io_tio(char *pathname, int flag, int operation)
{
	int res, fd = 0, i = 0;
	void *bufptr = NULL;
	off_t offset = 0;
	struct timespec timeout;
	struct stat fi_stat;
	size_t alignment;

	io_context_t myctx;
	struct iocb iocb_array[AIO_MAXIO];
	struct iocb *iocbps[AIO_MAXIO];

	fd = SAFE_OPEN(pathname, flag, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);

	/* determine the alignment from the blksize of the underlying device */
	SAFE_FSTAT(fd, &fi_stat);
	alignment = fi_stat.st_blksize;

	res = io_queue_init(AIO_MAXIO, &myctx);

	for (i = 0; i < AIO_MAXIO; i++) {

		switch (operation) {
		case IO_CMD_PWRITE:
			if (posix_memalign(&bufptr, alignment, AIO_BLKSIZE)) {
				tst_brk(TBROK | TERRNO, "posix_memalign failed");
				return -1;
			}
			memset(bufptr, 0, AIO_BLKSIZE);

			io_prep_pwrite(&iocb_array[i], fd, bufptr,
					   AIO_BLKSIZE, offset);
			io_set_callback(&iocb_array[i], work_done);
			iocbps[i] = &iocb_array[i];
			offset += AIO_BLKSIZE;

			break;
		case IO_CMD_PREAD:
			if (posix_memalign(&bufptr, alignment, AIO_BLKSIZE)) {
				tst_brk(TBROK | TERRNO, "posix_memalign failed");
				return -1;
			}
			memset(bufptr, 0, AIO_BLKSIZE);

			io_prep_pread(&iocb_array[i], fd, bufptr,
					  AIO_BLKSIZE, offset);
			io_set_callback(&iocb_array[i], work_done);
			iocbps[i] = &iocb_array[i];
			offset += AIO_BLKSIZE;
			break;
		default:
			tst_res(TFAIL, "Command failed; opcode returned: %d\n", operation);
			return -1;
			break;
		}
	}

	do {
		res = io_submit(myctx, AIO_MAXIO, iocbps);
	} while (res == -EAGAIN);

	if (res < 0)
		io_error("io_submit tio", res);

	/*
	 * We have submitted all the i/o requests. Wait for them to complete and
	 * call the callbacks.
	 */
	wait_count = AIO_MAXIO;

	timeout.tv_sec = 30;
	timeout.tv_nsec = 0;

	switch (operation) {
	case IO_CMD_PREAD:
	case IO_CMD_PWRITE:
		{
			while (wait_count) {
				res = io_wait_run(myctx, &timeout);
				if (res < 0)
					io_error("io_wait_run", res);
			}
		}
		break;
	}

	SAFE_CLOSE(fd);

	for (i = 0; i < AIO_MAXIO; i++)
		if (iocb_array[i].u.c.buf != NULL)
			free(iocb_array[i].u.c.buf);

	io_queue_release(myctx);

	return 0;
}