//return eventfd int AIORead(std::string path,void *buf,int epfd=-1) { m_filefd = openFile(path); if (-1 == m_filefd) { return -1; } m_ctx = 0; if (io_setup(8192, &m_ctx)) { perror("io_setup"); return -1; } if (posix_memalign(&buf, ALIGN_SIZE, RD_WR_SIZE)) { perror("posix_memalign"); return 5; } printf("buf: %p\n", buf); for (i = 0, iocbp = iocbs; i < NUM_EVENTS; ++i, ++iocbp) { iocbps[i] = &iocbp->iocb; io_prep_pread(&iocbp->iocb, fd, buf, RD_WR_SIZE, i * RD_WR_SIZE); io_set_eventfd(&iocbp->iocb, efd); io_set_callback(&iocbp->iocb, aio_callback); iocbp->nth_request = i + 1; } if (io_submit(ctx, NUM_EVENTS, iocbps) != NUM_EVENTS) { perror("io_submit"); return 6; } }
/* * Read complete callback. * Change read iocb into a write iocb and start it. */ static void rd_done(io_context_t ctx, struct iocb *iocb, long res, long res2) { /* library needs accessors to look at iocb? */ int iosize = iocb->u.c.nbytes; char *buf = iocb->u.c.buf; off_t offset = iocb->u.c.offset; if (res2 != 0) io_error("aio read", res2); if (res != iosize) { fprintf(stderr, "read missing bytes expect %lu got %ld\n", iocb->u.c.nbytes, res); exit(1); } /* turn read into write */ if (no_write) { --tocopy; --busy; free_iocb(iocb); } else { io_prep_pwrite(iocb, dstfd, buf, iosize, offset); io_set_callback(iocb, wr_done); if (1 != (res = io_submit(ctx, 1, &iocb))) io_error("io_submit write", res); } if (debug) write(2, "r", 1); if (debug > 1) printf("%d", iosize); }
int file_aio_read(int fd, void *start, int size, long long offset) { struct iocb *cb = malloc(sizeof(struct iocb)); /* remaind to free */ if(!cb) { printf("file_aio_write %s\n", strerror(errno)); return -1; } io_prep_pread(cb, fd, start, size, offset); io_set_callback(cb, read_done); return io_submit(*aio_queue, 1, &cb); }
void random_io(int fd, off_t ionum, int access_size, int num_requests) { // (1) io_context_tの初期化 io_context_t ctx; memset(&ctx, 0, sizeof(io_context_t)); int r = io_setup(num_requests, &ctx); assert(r == 0); // (2) iocbs(I/O要求)の構築 struct iocb **iocbs = new struct iocb*[num_requests]; char **bufs = new char*[num_requests]; for (int i = 0; i < num_requests; i++) { iocbs[i] = new struct iocb(); posix_memalign((void **)&bufs[i], 512, access_size); off_t block_number = rand() % ionum; io_prep_pread(iocbs[i], fd, bufs[i], access_size, block_number * access_size); io_set_callback(iocbs[i], read_done); } // (3) I/O要求を投げる r = io_submit(ctx, num_requests, iocbs); assert(r == num_requests); // (4) 完了したI/O要求を待ち、終わったものについてはcallbackを呼び出す int cnt = 0; while (true) { struct io_event events[32]; int n = io_getevents(ctx, 1, 32, events, NULL); if (n > 0) cnt += n; for (int i = 0; i < n; i++) { struct io_event *ev = events + i; io_callback_t callback = (io_callback_t)ev->data; struct iocb *iocb = ev->obj; callback(ctx, iocb, ev->res, ev->res2); } if (n == 0 || cnt == num_requests) break; } for (int i = 0; i < num_requests; i++) { delete iocbs[i]; free(bufs[i]); } delete[] iocbs; delete[] bufs; }
static void rd_done(io_context_t ctx, struct iocb *iocb, long res, long res2) { /*library needs accessors to look at iocb*/ int iosize = iocb->u.c.nbytes; char *buf = (char *)iocb->u.c.buf; off_t offset = iocb->u.c.offset; int tmp; char *wrbuff = NULL; if(res2 != 0) { printf("aio read\n"); } if(res != iosize) { printf("read missing bytes expect %lu got %ld", iocb->u.c.nbytes, res); //exit(1); } /*turn read into write*/ tmp = posix_memalign((void **)&wrbuff, getpagesize(), AIO_BLKSIZE); if(tmp < 0) { printf("posix_memalign222\n"); exit(1); } snprintf(wrbuff, iosize + 1, "%s", buf); printf("wrbuff-len = %lu:%s\n", strlen(wrbuff), wrbuff); printf("wrbuff_len = %lu\n", strlen(wrbuff)); free(buf); io_prep_pwrite(iocb, odsfd, wrbuff, iosize, offset); io_set_callback(iocb, wr_done); if(1!= (res=io_submit(ctx, 1, &iocb))) printf("io_submit write error\n"); printf("\nsubmit %ld write request\n", res); }
int main(int argc, char * argv[]) { int sg_fd, k, ok; unsigned char inqCmdBlk [INQ_CMD_LEN] = {0x12, 0, 0, 0, INQ_REPLY_LEN, 0}; unsigned char turCmdBlk [TUR_CMD_LEN] = {0x00, 0, 0, 0, 0, 0}; unsigned char inqBuff[INQ_REPLY_LEN]; sg_io_hdr_t io_hdr; char * file_name = 0; char ebuff[EBUFF_SZ]; unsigned char sense_buffer[32]; int do_extra = 0; for (k = 1; k < argc; ++k) { if (0 == memcmp("-x", argv[k], 2)) do_extra = 1; else if (*argv[k] == '-') { printf("Unrecognized switch: %s\n", argv[k]); file_name = 0; break; } else if (0 == file_name) file_name = argv[k]; else { printf("too many arguments\n"); file_name = 0; break; } } if (0 == file_name) { printf("Usage: 'sg_simple_aio [-x] <sg_device>'\n"); return 1; } /* An access mode of O_RDWR is required for write()/read() interface */ if ((sg_fd = open(file_name, O_RDWR)) < 0) { snprintf(ebuff, EBUFF_SZ, "sg_simple_aio: error opening file: %s", file_name); perror(ebuff); return 1; } /* Just to be safe, check we have a new sg device by trying an ioctl */ if ((ioctl(sg_fd, SG_GET_VERSION_NUM, &k) < 0) || (k < 30000)) { printf("sg_simple_aio: %s doesn't seem to be an new sg device\n", file_name); close(sg_fd); return 1; } /* Prepare INQUIRY command */ memset(&io_hdr, 0, sizeof(sg_io_hdr_t)); io_hdr.interface_id = 'S'; io_hdr.cmd_len = sizeof(inqCmdBlk); /* io_hdr.iovec_count = 0; */ /* memset takes care of this */ io_hdr.mx_sb_len = sizeof(sense_buffer); io_hdr.dxfer_direction = SG_DXFER_FROM_DEV; io_hdr.dxfer_len = INQ_REPLY_LEN; io_hdr.dxferp = inqBuff; io_hdr.cmdp = inqCmdBlk; io_hdr.sbp = sense_buffer; io_hdr.timeout = 20000; /* 20000 millisecs == 20 seconds */ /* io_hdr.flags = 0; */ /* take defaults: indirect IO, etc */ /* io_hdr.pack_id = 0; */ /* io_hdr.usr_ptr = NULL; */ #if 1 { struct iocb a_iocb; struct iocb * iocb_arr[1]; io_context_t io_ctx; int res; if (0 != (res = io_queue_init(1, &io_ctx))) { printf("io_queue_init: failed %s\n", strerror(-res)); close(sg_fd); return 1; } iocb_arr[0] = &a_iocb; io_prep_pwrite(iocb_arr[0], sg_fd, &io_hdr, sizeof(io_hdr), 0); io_set_callback(iocb_arr[0], my_io_callback); res = io_submit(io_ctx, 1, iocb_arr); if (1 != res) { printf("io_submit: returned %d\n", res); close(sg_fd); return 1; } } #else if (write(sg_fd, &io_hdr, sizeof(io_hdr)) < 0) { perror("sg_simple_aio: Inquiry write error"); close(sg_fd); return 1; } #endif /* sleep(3); */ if (read(sg_fd, &io_hdr, sizeof(io_hdr)) < 0) { perror("sg_simple_aio: Inquiry read error"); close(sg_fd); return 1; } /* now for the error processing */ ok = 0; switch (sg_err_category3(&io_hdr)) { case SG_LIB_CAT_CLEAN: ok = 1; break; case SG_LIB_CAT_RECOVERED: printf("Recovered error on INQUIRY, continuing\n"); ok = 1; break; default: /* won't bother decoding other categories */ sg_chk_n_print3("INQUIRY command error", &io_hdr); break; } if (ok) { /* output result if it is available */ char * p = (char *)inqBuff; int f = (int)*(p + 7); printf("Some of the INQUIRY command's results:\n"); printf(" %.8s %.16s %.4s ", p + 8, p + 16, p + 32); printf("[wide=%d sync=%d cmdque=%d sftre=%d]\n", !!(f & 0x20), !!(f & 0x10), !!(f & 2), !!(f & 1)); /* Extra info, not necessary to look at */ if (do_extra) printf("INQUIRY duration=%u millisecs, resid=%d, msg_status=%d\n", io_hdr.duration, io_hdr.resid, (int)io_hdr.msg_status); } /* Prepare TEST UNIT READY command */ memset(&io_hdr, 0, sizeof(sg_io_hdr_t)); io_hdr.interface_id = 'S'; io_hdr.cmd_len = sizeof(turCmdBlk); io_hdr.mx_sb_len = sizeof(sense_buffer); io_hdr.dxfer_direction = SG_DXFER_NONE; io_hdr.cmdp = turCmdBlk; io_hdr.sbp = sense_buffer; io_hdr.timeout = 20000; /* 20000 millisecs == 20 seconds */ if (ioctl(sg_fd, SG_IO, &io_hdr) < 0) { perror("sg_simple_aio: Test Unit Ready SG_IO ioctl error"); close(sg_fd); return 1; } /* now for the error processing */ ok = 0; switch (sg_err_category3(&io_hdr)) { case SG_LIB_CAT_CLEAN: ok = 1; break; case SG_LIB_CAT_RECOVERED: printf("Recovered error on Test Unit Ready, continuing\n"); ok = 1; break; default: /* won't bother decoding other categories */ sg_chk_n_print3("Test Unit Ready command error", &io_hdr); break; } if (ok) printf("Test Unit Ready successful so unit is ready!\n"); else printf("Test Unit Ready failed so unit may _not_ be ready!\n"); if (do_extra) printf("TEST UNIT READY duration=%u millisecs, resid=%d, msg_status=%d\n", io_hdr.duration, io_hdr.resid, (int)io_hdr.msg_status); close(sg_fd); return 0; }
static void *aio_out_thread (void *param) { char *name = (char *) param; int status; io_context_t ctx = 0; struct iocb *queue, *iocb; unsigned i; status = sink_open (name); if (status < 0) return 0; sink_fd = status; pthread_cleanup_push (close_fd, &sink_fd); /* initialize i/o queue */ status = io_setup (aio_out, &ctx); if (status < 0) { perror ("aio_out_thread, io_setup"); return 0; } pthread_cleanup_push (queue_release, &ctx); if (aio_out == 0) aio_out = 1; queue = alloca (aio_out * sizeof *iocb); /* populate and (re)run the queue */ for (i = 0, iocb = queue; i < aio_out; i++, iocb++) { char *buf = malloc (iosize); if (!buf) { fprintf(stderr, "%s can't get buffer[%d]\n", __FUNCTION__, i); return 0; } /* data can be processed in out_complete() */ io_prep_pread (iocb, sink_fd, buf, iosize, 0); io_set_callback (iocb, out_complete); iocb->key = USB_DIR_OUT; status = io_submit (ctx, 1, &iocb); if (status < 0) { perror (__FUNCTION__); break; } aio_out_pending++; if (verbose > 2) fprintf(stderr, "%s submit uiocb %p\n", __FUNCTION__, iocb); } status = io_run (ctx, &aio_out_pending); if (status < 0) perror ("aio_out_thread, io_run"); /* clean up */ fflush (stderr); pthread_cleanup_pop (1); pthread_cleanup_pop (1); return 0; }
static void *aio_in_thread (void *param) { char *name = (char *) param; int status; io_context_t ctx = 0; struct iocb *queue, *iocb; unsigned i; status = source_open (name); if (status < 0) return 0; source_fd = status; pthread_cleanup_push (close_fd, &source_fd); /* initialize i/o queue */ status = io_setup (aio_in, &ctx); if (status < 0) { perror ("aio_in_thread, io_setup"); return 0; } pthread_cleanup_push (queue_release, &ctx); if (aio_in == 0) aio_in = 1; queue = alloca (aio_in * sizeof *iocb); /* populate and (re)run the queue */ for (i = 0, iocb = queue; i < aio_in; i++, iocb++) { char *buf = malloc (iosize); if (!buf) { fprintf(stderr, "%s can't get buffer[%d]\n", __FUNCTION__, i); return 0; } /* host receives the data we're writing */ io_prep_pwrite (iocb, source_fd, buf, fill_in_buf (buf, iosize), 0); io_set_callback (iocb, in_complete); iocb->key = USB_DIR_IN; status = io_submit (ctx, 1, &iocb); if (status < 0) { perror (__FUNCTION__); break; } aio_in_pending++; if (verbose > 2) fprintf(stderr, "%s submit uiocb %p\n", __FUNCTION__, iocb); } status = io_run (ctx, &aio_in_pending); if (status < 0) perror ("aio_in_thread, io_run"); /* clean up */ fflush (stderr); pthread_cleanup_pop (1); pthread_cleanup_pop (1); return 0; }
void loop_aio(int fd, void *buf) { int res; unsigned int elapsed; /* i/o context initialization */ io_context_t myctx; memset(&myctx, 0, sizeof(myctx)); if ((res = io_queue_init(maxinflight, &myctx))) io_error("io_queue_init", res); copied = 0; inflight = 0; if (opt_verbose) printf("[run %d] start\n", runid); while (copied < mycount) { struct iocb *ioq[maxsubmit]; int tosubmit = 0; unsigned long long int index; struct iocb *iocb; struct timeval tv1, tv2; /* filling a context with io queries */ while (copied + inflight + tosubmit < mycount && inflight + tosubmit < maxinflight && tosubmit < maxsubmit) { /* Simultaneous asynchronous operations using the same iocb produce undefined results. */ iocb = calloc(1, sizeof(struct iocb)); if (mode_rnd) index = (mydevsize / myiosize) * random() / RAND_MAX; else index = copied + inflight + tosubmit; if (mode_write) io_prep_pwrite(iocb, fd, buf, myiosize, index * myiosize); else io_prep_pread(iocb, fd, buf, myiosize, index * myiosize); io_set_callback(iocb, io_done); ioq[tosubmit] = iocb; tosubmit += 1; } /* if there are available slots for submitting queries, do it */ if (tosubmit) { /* submit io and check elapsed time */ gettimeofday(&tv1, NULL); if ((res = io_submit(myctx, tosubmit, ioq)) != tosubmit) { printf("only %d io submitted\n", res); io_error("io_submit write", res); } gettimeofday(&tv2, NULL); elapsed = (tv2.tv_sec - tv1.tv_sec) * 1000 + (tv2.tv_usec - tv1.tv_usec) / 1000; if (elapsed > 200) printf("warning: io_submit() took %d ms, this is suspicious, maybe nr_request is too low.\n", elapsed); /* inflight io += newly submitted io */ inflight += tosubmit; } /* handle completed io events */ if ((res = io_queue_run(myctx)) < 0) io_error("io_queue_run", res); if (inflight == maxinflight || (inflight && copied + inflight == mycount)) { struct io_event event; if ((res = io_getevents(myctx, 1, 1, &event, NULL)) < 0) io_error("io_getevents", res); if (res != 1) errx(1, "no events?"); ((io_callback_t)event.obj->data)(myctx, event.obj, event.res, event.res2); } } io_queue_release(myctx); }
int main(int argc, char *const *argv) { int srcfd; struct stat st; off_t length = 0, offset = 0; io_context_t myctx; if (argc != 2 || argv[1][0] == '-') { fprintf(stderr, "Usage: aioread SOURCE"); exit(1); } if ((srcfd = open(srcname = argv[1], O_RDONLY)) < 0) { perror(srcname); exit(1); } if (fstat(srcfd, &st) < 0) { perror("fstat"); exit(1); } length = st.st_size; /* initialize state machine */ memset(&myctx, 0, sizeof(myctx)); io_queue_init(AIO_MAXIO, &myctx); tocopy = howmany(length, AIO_BLKSIZE); printf("tocopy: %ld times\n", tocopy); int i, j; for (i = 0; i < AIO_MAXIO; i ++) memset(&aiobuf[i], '\0', sizeof(struct mybuf)); int ps = getpagesize(); while (tocopy > 0) { rc; /* Submit as many reads as once as possible upto AIO_MAXIO */ int n = MIN(MIN(AIO_MAXIO - busy, AIO_MAXIO / 2), \ howmany(length - offset, AIO_BLKSIZE)); if (n > 0) { struct iocb *ioq[n]; for (i = 0; i < n; i++) { struct iocb *io = (struct iocb *) malloc(sizeof(struct iocb)); int iosize = MIN(length - offset, AIO_BLKSIZE); for (j = 0; j < AIO_MAXIO; j ++) { } char *buf; posix_memalign(&buf, ps, iosize); if (NULL == buf || NULL == io) { fprintf(stderr, "out of memory\n"); exit(1); } io_prep_pread(io, srcfd, buf, iosize, offset); io_set_callback(io, rd_done); ioq[i] = io; offset += iosize; } rc = io_submit(myctx, n, ioq); if (rc < 0) io_error("io_submit", rc); busy += n; } // Handle IO’s that have completed rc = io_queue_run(myctx); if (rc < 0) io_error("io_queue_run", rc); // if we have maximum number of i/o’s in flight // then wait for one to complete if (busy == AIO_MAXIO) { rc = io_queue_wait(myctx, NULL); if (rc < 0) io_error("io_queue_wait", rc); } } close(srcfd); exit(0); }
int test_aio(char* src_file, char* dst_file) { int length = sizeof("abcdefg"); char* content = (char*)malloc(length); io_context_t myctx; int rc; char* buff = NULL; int offset = 0; int num, i, tmp; if((srcfd=open(src_file, O_CREAT|O_RDWR, 0666))<0) { printf("open srcfile error\n"); exit(1); } printf("srcfd=%d\n", srcfd); lseek(srcfd, 0, SEEK_SET); write(srcfd, "abcdefg", length); lseek(srcfd, 0, SEEK_SET); read(srcfd, content, length); printf("write in the srcfile successful, content is [%s]\n", content); if((odsfd=open(dst_file, O_CREAT|O_RDWR, 0666))<0) { close(srcfd); printf("open odsfile error\n"); exit(1); } memset(&myctx, 0, sizeof(myctx)); io_queue_init(AIO_MAXIO, &myctx); struct iocb *io = (struct iocb*)malloc(sizeof(struct iocb)); int iosize = AIO_BLKSIZE; tmp = posix_memalign((void **)&buff, getpagesize(), AIO_BLKSIZE); if(tmp < 0) { printf("posix_memalign error\n"); exit(1); } if(NULL == io) { printf("io out of memeory\n"); exit(1); } io_prep_pread(io, srcfd, buff, iosize, offset); io_set_callback(io, rd_done); printf("START...\n\n"); rc = io_submit(myctx, 1, &io); if(rc < 0) printf("io_submit read error\n"); printf("\nsubmit %d read request\n", rc); //m_io_queue_run(myctx); struct io_event events[AIO_MAXIO]; io_callback_t cb; int finish_count = 0; while(1) { num = io_getevents(myctx, 0, AIO_MAXIO, events, NULL); printf("\n%d io_request completed\n\n", num); for(i=0;i<num;i++) { cb = (io_callback_t)events[i].data; struct iocb *io = events[i].obj; printf("events[%d].data = %lX, res = %ld, res2 = %ld\n", i, cb, events[i].res, events[i].res2); cb(myctx, io, events[i].res, events[i].res2); } finish_count += num; if(finish_count >= 2) { break; } } return 0; }
static int io_tio(char *pathname, int flag, int operation) { int res, fd = 0, i = 0; void *bufptr = NULL; off_t offset = 0; struct timespec timeout; struct stat fi_stat; size_t alignment; io_context_t myctx; struct iocb iocb_array[AIO_MAXIO]; struct iocb *iocbps[AIO_MAXIO]; fd = SAFE_OPEN(pathname, flag, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); /* determine the alignment from the blksize of the underlying device */ SAFE_FSTAT(fd, &fi_stat); alignment = fi_stat.st_blksize; res = io_queue_init(AIO_MAXIO, &myctx); for (i = 0; i < AIO_MAXIO; i++) { switch (operation) { case IO_CMD_PWRITE: if (posix_memalign(&bufptr, alignment, AIO_BLKSIZE)) { tst_brk(TBROK | TERRNO, "posix_memalign failed"); return -1; } memset(bufptr, 0, AIO_BLKSIZE); io_prep_pwrite(&iocb_array[i], fd, bufptr, AIO_BLKSIZE, offset); io_set_callback(&iocb_array[i], work_done); iocbps[i] = &iocb_array[i]; offset += AIO_BLKSIZE; break; case IO_CMD_PREAD: if (posix_memalign(&bufptr, alignment, AIO_BLKSIZE)) { tst_brk(TBROK | TERRNO, "posix_memalign failed"); return -1; } memset(bufptr, 0, AIO_BLKSIZE); io_prep_pread(&iocb_array[i], fd, bufptr, AIO_BLKSIZE, offset); io_set_callback(&iocb_array[i], work_done); iocbps[i] = &iocb_array[i]; offset += AIO_BLKSIZE; break; default: tst_res(TFAIL, "Command failed; opcode returned: %d\n", operation); return -1; break; } } do { res = io_submit(myctx, AIO_MAXIO, iocbps); } while (res == -EAGAIN); if (res < 0) io_error("io_submit tio", res); /* * We have submitted all the i/o requests. Wait for them to complete and * call the callbacks. */ wait_count = AIO_MAXIO; timeout.tv_sec = 30; timeout.tv_nsec = 0; switch (operation) { case IO_CMD_PREAD: case IO_CMD_PWRITE: { while (wait_count) { res = io_wait_run(myctx, &timeout); if (res < 0) io_error("io_wait_run", res); } } break; } SAFE_CLOSE(fd); for (i = 0; i < AIO_MAXIO; i++) if (iocb_array[i].u.c.buf != NULL) free(iocb_array[i].u.c.buf); io_queue_release(myctx); return 0; }
int main(int argc, char *argv[]) { int efd, fd, epfd; io_context_t ctx; struct timespec tms; struct io_event events[NUM_EVENTS]; struct custom_iocb iocbs[NUM_EVENTS]; struct iocb *iocbps[NUM_EVENTS]; struct custom_iocb *iocbp; int i, j, r; void *buf; struct epoll_event epevent; efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (efd == -1) { perror("eventfd"); return 2; } fd = open(TEST_FILE, O_RDWR | O_CREAT | O_DIRECT, 0644); if (fd == -1) { perror("open"); return 3; } ftruncate(fd, TEST_FILE_SIZE); ctx = 0; if (io_setup(8192, &ctx)) { perror("io_setup"); return 4; } if (posix_memalign(&buf, ALIGN_SIZE, RD_WR_SIZE)) { perror("posix_memalign"); return 5; } printf("buf: %p\n", buf); for (i = 0, iocbp = iocbs; i < NUM_EVENTS; ++i, ++iocbp) { iocbps[i] = &iocbp->iocb; io_prep_pread(&iocbp->iocb, fd, buf, RD_WR_SIZE, i * RD_WR_SIZE); io_set_eventfd(&iocbp->iocb, efd); io_set_callback(&iocbp->iocb, aio_callback); iocbp->nth_request = i + 1; } if (io_submit(ctx, NUM_EVENTS, iocbps) != NUM_EVENTS) { perror("io_submit"); return 6; } epfd = epoll_create(1); if (epfd == -1) { perror("epoll_create"); return 7; } epevent.events = EPOLLIN | EPOLLET; epevent.data.ptr = NULL; if (epoll_ctl(epfd, EPOLL_CTL_ADD, efd, &epevent)) { perror("epoll_ctl"); return 8; } i = 0; while (i < NUM_EVENTS) { uint64_t finished_aio; if (epoll_wait(epfd, &epevent, 1, -1) != 1) { perror("epoll_wait"); return 9; } if (read(efd, &finished_aio, sizeof(finished_aio)) != sizeof(finished_aio)) { perror("read"); return 10; } printf("finished io number: %"PRIu64"\n", finished_aio); while (finished_aio > 0) { tms.tv_sec = 0; tms.tv_nsec = 0; r = io_getevents(ctx, 1, NUM_EVENTS, events, &tms); if (r > 0) { for (j = 0; j < r; ++j) { ((io_callback_t)(events[j].data))(ctx, events[j].obj, events[j].res, events[j].res2); } i += r; finished_aio -= r; } } } close(epfd); free(buf); io_destroy(ctx); close(fd); close(efd); remove(TEST_FILE); return 0; }
static inline int io_fdsync(io_context_t ctx, struct iocb *iocb, io_callback_t cb, int fd) { io_prep_fdsync(iocb, fd); io_set_callback(iocb, cb); return io_submit(ctx, 1, &iocb); }
static inline int io_poll(io_context_t ctx, struct iocb *iocb, io_callback_t cb, int fd, int events) { io_prep_poll(iocb, fd, events); io_set_callback(iocb, cb); return io_submit(ctx, 1, &iocb); }
int main(int argc, char *argv[]) { int efd, fd, epfd; io_context_t ctx; struct timespec tms; struct io_event events[NUM_EVENTS]; struct custom_iocb iocbs[NUM_EVENTS]; struct iocb *iocbps[NUM_EVENTS]; struct custom_iocb *iocbp; int i, j, r; void *buf; struct epoll_event epevent; // TODO:Used to record event? efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (efd == -1) { perror("eventfd"); return 2; } // File Open Operation fd = open(TEST_FILE, O_RDWR | O_CREAT | O_DIRECT, 0644); if (fd == -1) { perror("open"); return 3; } // Init the file size first. ftruncate(fd, TEST_FILE_SIZE); ctx = 0; // 1. io_setup: Create an asynchronous I/O context capable of receiving // at least nr_events. // 2. 8192: It could receive at least 8192 events?? if (io_setup(8192, &ctx)) { perror("io_setup"); return 4; } // Allocate RD_WR_SIZE bytes, and places the address of the allocated memory // in *buf. The address of the allocated memory will be a multiple of ALIGN_SIZE, if (posix_memalign(&buf, ALIGN_SIZE, RD_WR_SIZE)) { perror("posix_memalign"); return 5; } printf("buf: %p\n", buf); for (i = 0, iocbp = iocbs; i < NUM_EVENTS; ++i, ++iocbp) { iocbps[i] = &iocbp->iocb; // An inline convenience function designed to facilitate the initialization of the iocb for // an asynchronous read operation. When the function finished, the iocbp->iocb will be: // iocb->u.c.nbytes= RD_WR_SIZE // iocb->aio_fildes = fd // iocb->u.c.buf = buf // iocb->u.c.offset = i * RD_WR_SIZE io_prep_pread(&iocbp->iocb, fd, buf, RD_WR_SIZE, i * RD_WR_SIZE); io_set_eventfd(&iocbp->iocb, efd); // Set up io completion callback function. io_set_callback(&iocbp->iocb, aio_callback); iocbp->nth_request = i + 1; } // Submit asynchronous I/O blocks for processing. It will queue NUM_EVENTS // I/O request blocks for processing in the AIO context ctx. // iocbps is an array of nr AIO control blocks, which will be submitted to context ctx. if (io_submit(ctx, NUM_EVENTS, iocbps) != NUM_EVENTS) { perror("io_submit"); return 6; } // Open an epoll file descriptor ( epoll is I/O event notification facility). // It will returns a file descriptor referring to the new epoll instance. epfd = epoll_create(1); if (epfd == -1) { perror("epoll_create"); return 7; } epevent.events = EPOLLIN | EPOLLET; epevent.data.ptr = NULL; // epoll_ctl: Control interface for an epoll descriptor. // EPOLL_CTL_ADD: the operation that will be performanced // efd: The target file descriptor that the operation will be performed for. // epevent: describle the object linked to the fle descriptor efd. if (epoll_ctl(epfd, EPOLL_CTL_ADD, efd, &epevent)) { perror("epoll_ctl"); return 8; } i = 0; while (i < NUM_EVENTS) { uint64_t finished_aio; // epoll_wait: wait for an I/O event on an epoll file descriptor // epfd: the file descriptor that the epoll instance refer to. // epevent: points to the events that will be available for the caller. // 1: at most 1 are returned by the function // -1: specifies the minimum number of milliseconds that epoll_wait() will block. // -1 causes epoll_wait to block indefinitely. if (epoll_wait(epfd, &epevent, 1, -1) != 1) { perror("epoll_wait"); return 9; } if (read(efd, &finished_aio, sizeof(finished_aio)) != sizeof(finished_aio)) { perror("read"); return 10; } printf("finished io number: %"PRIu64"\n", finished_aio); while (finished_aio > 0) { tms.tv_sec = 0; tms.tv_nsec = 0; // read asynchronous I/O events from the completion queue. r = io_getevents(ctx, 1, NUM_EVENTS, events, &tms); if (r > 0) { for (j = 0; j < r; ++j) { ((io_callback_t)(events[j].data))(ctx, events[j].obj, events[j].res, events[j].res2); } i += r; finished_aio -= r; } } } close(epfd); free(buf); // destroy an asynchronous I/O context io_destroy(ctx); close(fd); close(efd); remove(TEST_FILE); return 0; }
/* 一次提交多个io请求的操作 */ int io_tio(char *pathname, int flag, int n, int operation) { int res, fd = 0, i = 0; void *bufptr = NULL; off_t offset = 0; struct timespec timeout; io_context_t myctx; struct iocb iocb_array[AIO_MAXIO]; struct iocb *iocbps[AIO_MAXIO]; fd = open(pathname, flag, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (fd <= 0) { printf("open for %s failed: %s\n", pathname, strerror(errno)); return -1; } res = io_queue_init(n, &myctx); //printf (" res = %d \n", res); for (i = 0; i < AIO_MAXIO; i++) { switch (operation) { case IO_CMD_PWRITE: if (posix_memalign(&bufptr, alignment, AIO_BLKSIZE)) { perror(" posix_memalign failed "); return -1; } memset(bufptr, 0, AIO_BLKSIZE); io_prep_pwrite(&iocb_array[i], fd, bufptr, AIO_BLKSIZE, offset); io_set_callback(&iocb_array[i], work_done); iocbps[i] = &iocb_array[i]; offset += AIO_BLKSIZE; break; case IO_CMD_PREAD: if (posix_memalign(&bufptr, alignment, AIO_BLKSIZE)) { perror(" posix_memalign failed "); return -1; } memset(bufptr, 0, AIO_BLKSIZE); io_prep_pread(&iocb_array[i], fd, bufptr, AIO_BLKSIZE, offset); io_set_callback(&iocb_array[i], work_done); iocbps[i] = &iocb_array[i]; offset += AIO_BLKSIZE; break; case IO_CMD_POLL: case IO_CMD_NOOP: break; default: tst_resm(TFAIL, "Command failed; opcode returned: %d\n", operation); return -1; break; } } do { res = io_submit(myctx, AIO_MAXIO, iocbps); } while (res == -EAGAIN); if (res < 0) { io_error("io_submit tio", res); } /* * We have submitted all the i/o requests. Wait for at least one to complete * and call the callbacks. */ wait_count = AIO_MAXIO; timeout.tv_sec = 30; timeout.tv_nsec = 0; switch (operation) { case IO_CMD_PREAD: case IO_CMD_PWRITE: { while (wait_count) { res = io_wait_run(myctx, &timeout); if (res < 0) io_error("io_wait_run", res); } } break; } close(fd); for (i = 0; i < AIO_MAXIO; i++) { if (iocb_array[i].u.c.buf != NULL) { free(iocb_array[i].u.c.buf); } } io_queue_release(myctx); return 0; }
int main(int argc, char *const *argv) { struct stat st; off_t length = 0, offset = 0; io_context_t myctx; int c; extern char *optarg; extern int optind, opterr, optopt; while ((c = getopt(argc, argv, "a:b:df:n:s:wzD:")) != -1) { char *endp; switch (c) { case 'a': /* alignment of data buffer */ alignment = strtol(optarg, &endp, 0); alignment = (long)scale_by_kmg((long long)alignment, *endp); break; case 'f': /* use these open flags */ if (strcmp(optarg, "LARGEFILE") == 0 || strcmp(optarg, "O_LARGEFILE") == 0) { source_open_flag |= O_LARGEFILE; dest_open_flag |= O_LARGEFILE; } else if (strcmp(optarg, "TRUNC") == 0 || strcmp(optarg, "O_TRUNC") == 0) { dest_open_flag |= O_TRUNC; } else if (strcmp(optarg, "SYNC") == 0 || strcmp(optarg, "O_SYNC") == 0) { dest_open_flag |= O_SYNC; } else if (strcmp(optarg, "DIRECT") == 0 || strcmp(optarg, "O_DIRECT") == 0) { source_open_flag |= O_DIRECT; dest_open_flag |= O_DIRECT; } else if (strncmp(optarg, "CREAT", 5) == 0 || strncmp(optarg, "O_CREAT", 5) == 0) { dest_open_flag |= O_CREAT; } break; case 'd': debug++; break; case 'D': delay.tv_usec = atoi(optarg); break; case 'b': /* block size */ aio_blksize = strtol(optarg, &endp, 0); aio_blksize = (long)scale_by_kmg((long long)aio_blksize, *endp); break; case 'n': /* num io */ aio_maxio = strtol(optarg, &endp, 0); break; case 's': /* size to transfer */ length = strtoll(optarg, &endp, 0); length = scale_by_kmg(length, *endp); break; case 'w': /* no write */ no_write = 1; break; case 'z': /* write zero's */ zero = 1; break; default: usage(); } } argc -= optind; argv += optind; #ifndef DEBUG if (argc < 1) { usage(); } #else source_open_flag |= O_DIRECT; dest_open_flag |= O_DIRECT; aio_blksize = 1; aio_maxio=1; srcname = "junkdata"; dstname = "ff2"; #endif if (!zero) { #ifndef DEBUG if ((srcfd = open(srcname = *argv, source_open_flag)) < 0) { #else if ((srcfd = open(srcname, source_open_flag)) < 0) { #endif perror(srcname); exit(1); } argv++; argc--; length = 1073741824; #if 0 if (fstat(srcfd, &st) < 0) { perror("fstat"); exit(1); } if (length == 0) length = st.st_size; #endif } if (!no_write) { /* * We are either copying or writing zeros to dstname */ #ifndef DEBUG if (argc < 1) { usage(); } if ((dstfd = open(dstname = *argv, dest_open_flag, 0666)) < 0) { #else if ((dstfd = open(dstname, dest_open_flag, 0666)) < 0) { #endif perror(dstname); exit(1); } if (zero) { /* * get size of dest, if we are zeroing it. * TODO: handle devices. */ if (fstat(dstfd, &st) < 0) { perror("fstat"); exit(1); } if (length == 0) length = st.st_size; } } /* initialize state machine */ memset(&myctx, 0, sizeof(myctx)); io_queue_init(aio_maxio, &myctx); tocopy = howmany(length, aio_blksize); if (init_iocb(aio_maxio, aio_blksize) < 0) { fprintf(stderr, "Error allocating the i/o buffers\n"); exit(1); } while (tocopy > 0) { int i, rc; /* Submit as many reads as once as possible upto aio_maxio */ int n = MIN(MIN(aio_maxio - busy, aio_maxio), howmany(length - offset, aio_blksize)); if (n > 0) { struct iocb *ioq[n]; for (i = 0; i < n; i++) { struct iocb *io = alloc_iocb(); int iosize = MIN(length - offset, aio_blksize); if (zero) { /* * We are writing zero's to dstfd */ io_prep_pwrite(io, dstfd, io->u.c.buf, iosize, offset); io_set_callback(io, wr_done); } else { io_prep_pread(io, srcfd, io->u.c.buf, iosize, offset); io_set_callback(io, rd_done); } ioq[i] = io; offset += iosize; } rc = io_submit(myctx, n, ioq); if (rc < 0) io_error("io_submit", rc); busy += n; if (debug > 1) printf("io_submit(%d) busy:%d\n", n, busy); if (delay.tv_usec) { struct timeval t = delay; (void)select(0,0,0,0,&t); } } /* * We have submitted all the i/o requests. Wait for at least one to complete * and call the callbacks. */ count_io_q_waits++; rc = io_wait_run(myctx, 0); if (rc < 0) io_error("io_wait_run", rc); if (debug > 1) { printf("io_wait_run: rc == %d\n", rc); printf("busy:%d aio_maxio:%d tocopy:%d\n", busy, aio_maxio, tocopy); } } if (srcfd != -1) close(srcfd); if (dstfd != -1) close(dstfd); exit(0); }