int main(int ac, char **av) { int afd, fd; aio_context_t ctx = 0; char const *testfn = "/tmp/eventfd-aio-test.data"; fprintf(stdout, "creating an eventfd ...\n"); if ((afd = eventfd(0)) == -1) { perror("eventfd"); return 2; } fprintf(stdout, "done! eventfd = %d\n", afd); if (io_setup(TESTFILE_SIZE / IORTX_SIZE + 256, &ctx)) { perror("io_setup"); return 3; } if ((fd = open(testfn, O_RDWR | O_CREAT, 0644)) == -1) { perror(testfn); return 4; } ftruncate(fd, TESTFILE_SIZE); fcntl(afd, F_SETFL, fcntl(afd, F_GETFL, 0) | O_NONBLOCK); test_write(ctx, fd, TESTFILE_SIZE, afd); test_read(ctx, fd, TESTFILE_SIZE, afd); io_destroy(ctx); close(fd); close(afd); remove(testfn); return 0; }
int swAioLinux_init(int max_aio_events) { swoole_aio_context = 0; if (io_setup(SW_AIO_MAX_EVENTS, &swoole_aio_context) < 0) { swWarn("io_setup() failed. Error: %s[%d]", strerror(errno), errno); return SW_ERR; } if (swPipeNotify_auto(&swoole_aio_pipe, 0, 0) < 0) { return SW_ERR; } swoole_aio_eventfd = swoole_aio_pipe.getFd(&swoole_aio_pipe, 0); SwooleG.main_reactor->setHandle(SwooleG.main_reactor, SW_FD_AIO, swAioLinux_onFinish); SwooleG.main_reactor->add(SwooleG.main_reactor, swoole_aio_eventfd, SW_FD_AIO); SwooleAIO.callback = swAio_callback_test; SwooleAIO.destroy = swAioLinux_destroy; SwooleAIO.read = swAioLinux_read; SwooleAIO.write = swAioLinux_write; return SW_OK; }
void setup_main_loop(void) { signal(SIGPIPE, SIG_IGN); state.epoll_fd = epoll_create(1); if (state.epoll_fd < 0) fail(1, "epoll_create"); rt_mutex_init(&state.sched_lock); #if ENABLE_AIO int ret; static event_t aio_dummy_event; state.aio_dummy_event = &aio_dummy_event; ret = io_setup(MAX_AIO_EVENTS, &state.aio_ctx); if (ret < 0) fail2(1, -ret, "io_setup"); state.aio_eventfd = eventfd(0, EFD_NONBLOCK); if (state.aio_eventfd < 0) fail(1, "eventfd"); ret = epoll_ctler(EPOLL_CTL_ADD, state.aio_eventfd, EPOLLIN, state.aio_dummy_event); if (ret < 0) fail(1, "epoll_ctl eventfd"); #endif }
int aio_create(void) { int n; aio_fd = eventfd(0, 0); if (-1 == aio_fd) { logerror("create eventaio_fd fail. %s", strerror(errno)); return aio_fd; } n = 1; if (-1 == ioctl(aio_fd, FIONBIO, &n)) { logerror("ioctl fail. %s", strerror(errno)); close(aio_fd); return aio_fd; } if(-1 == io_setup(64, &aio_ctx)) { logerror("io_setup fail. %s", strerror(errno)); close(aio_fd); return aio_fd; } #if AIO_QUEUE aio_queue = listCreate(); #endif return aio_fd; }
CAsyncIO::CAsyncIO() { //m_aioHandler.create(); m_aioHandler = NULL; int z = io_setup(NUM_EVENTS, &m_aioHandler); ON_ERROR_PRINT_LASTMSG_AND_DO(z, != , 0, throw "Fail to step aioContext"); }
//return eventfd int AIORead(std::string path,void *buf,int epfd=-1) { m_filefd = openFile(path); if (-1 == m_filefd) { return -1; } m_ctx = 0; if (io_setup(8192, &m_ctx)) { perror("io_setup"); return -1; } if (posix_memalign(&buf, ALIGN_SIZE, RD_WR_SIZE)) { perror("posix_memalign"); return 5; } printf("buf: %p\n", buf); for (i = 0, iocbp = iocbs; i < NUM_EVENTS; ++i, ++iocbp) { iocbps[i] = &iocbp->iocb; io_prep_pread(&iocbp->iocb, fd, buf, RD_WR_SIZE, i * RD_WR_SIZE); io_set_eventfd(&iocbp->iocb, efd); io_set_callback(&iocbp->iocb, aio_callback); iocbp->nth_request = i + 1; } if (io_submit(ctx, NUM_EVENTS, iocbps) != NUM_EVENTS) { perror("io_submit"); return 6; } }
static int init_ns_worker_ctx(void) { if (g_ns->type == ENTRY_TYPE_AIO_FILE) { #ifdef HAVE_LIBAIO g_ns->u.aio.events = calloc(1, sizeof(struct io_event)); if (!g_ns->u.aio.events) { return -1; } g_ns->u.aio.ctx = 0; if (io_setup(1, &g_ns->u.aio.ctx) < 0) { free(g_ns->u.aio.events); perror("io_setup"); return -1; } #endif } else { /* * TODO: If a controller has multiple namespaces, they could all use the same queue. * For now, give each namespace/thread combination its own queue. */ g_ns->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ns->u.nvme.ctrlr, 0); if (!g_ns->u.nvme.qpair) { printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n"); return -1; } } return 0; }
int main(){ int i; pthread_t reaperThread; /* memory alignment is very essential for memory writes*/ posix_memalign((void**)&buffer,BUFFERSIZE,BUFFERSIZE); filedes=open(PATH,O_RDONLY|O_DIRECT,0644); /* Open the file for reading */ if(filedes<0){ printf("Error Opening File\n"); return 0; } io_setup(MAXEVENTS,&context); /* Initialize context */ /* Create a separate thread to process results */ pthread_create(&reaperThread,NULL,Reap,(void *)&i); for(i=0;i<100;i++){ SubmitRead(i); /* Submit Requests for Reading */ } pthread_join(reaperThread,NULL); /* Wait till reaperThread Exits */ /* Deallocate file descriptor and the context */ close(filedes); io_destroy(context); return 0; }
static int __lio_setup_aio_poll(struct tqueue *queue, int qlen) { struct lio *lio = queue->tio_data; int err, fd; lio->aio_ctx = REQUEST_ASYNC_FD; fd = io_setup(qlen, &lio->aio_ctx); if (fd < 0) { lio->aio_ctx = 0; err = -errno; if (err == -EINVAL) goto fail_fd; goto fail; } lio->event_fd = fd; return 0; fail_fd: DPRINTF("Couldn't get fd for AIO poll support. This is probably " "because your kernel does not have the aio-poll patch " "applied.\n"); fail: return err; }
int tap_aio_setup(tap_aio_context_t *ctx, struct io_event *aio_events, int max_aio_events) { int ret; ctx->aio_events = aio_events; ctx->max_aio_events = max_aio_events; ctx->poll_in_thread = 0; ctx->aio_ctx = (io_context_t) REQUEST_ASYNC_FD; ret = io_setup(ctx->max_aio_events, &ctx->aio_ctx); if (ret < 0 && ret != -EINVAL) return ret; else if (ret > 0) { ctx->pollfd = ret; return ctx->pollfd; } ctx->aio_ctx = (io_context_t) 0; ret = io_setup(ctx->max_aio_events, &ctx->aio_ctx); if (ret < 0) return ret; if ((ret = pipe(ctx->command_fd)) < 0) { DPRINTF("Unable to create command pipe\n"); return -1; } if ((ret = pipe(ctx->completion_fd)) < 0) { DPRINTF("Unable to create completion pipe\n"); return -1; } if ((ret = pthread_create(&ctx->aio_thread, NULL, tap_aio_completion_thread, ctx)) != 0) { DPRINTF("Unable to create completion thread\n"); return -1; } ctx->pollfd = ctx->completion_fd[0]; ctx->poll_in_thread = 1; tap_aio_continue(ctx); return 0; }
native_linux_aio_provider::native_linux_aio_provider(disk_engine* disk, aio_provider* inner_provider) : aio_provider(disk, inner_provider) { memset(&_ctx, 0, sizeof(_ctx)); auto ret = io_setup(128, &_ctx); // 128 concurrent events dassert(ret == 0, "io_setup error, ret = %d", ret); }
int libcheck_init (struct checker * c) { unsigned long pgsize = getpagesize(); struct directio_context * ct; long flags; ct = malloc(sizeof(struct directio_context)); if (!ct) return 1; memset(ct, 0, sizeof(struct directio_context)); if (io_setup(1, &ct->ioctx) != 0) { condlog(1, "io_setup failed"); free(ct); return 1; } if (ioctl(c->fd, BLKBSZGET, &ct->blksize) < 0) { MSG(c, "cannot get blocksize, set default"); ct->blksize = 512; } if (ct->blksize > 4096) { /* * Sanity check for DASD; BSZGET is broken */ ct->blksize = 4096; } if (!ct->blksize) goto out; ct->buf = (unsigned char *)malloc(ct->blksize + pgsize); if (!ct->buf) goto out; flags = fcntl(c->fd, F_GETFL); if (flags < 0) goto out; if (!(flags & O_DIRECT)) { flags |= O_DIRECT; if (fcntl(c->fd, F_SETFL, flags) < 0) goto out; ct->reset_flags = 1; } ct->ptr = (unsigned char *) (((unsigned long)ct->buf + pgsize - 1) & (~(pgsize - 1))); /* Sucessfully initialized, return the context. */ c->context = (void *) ct; return 0; out: if (ct->buf) free(ct->buf); io_destroy(ct->ioctx); free(ct); return 1; }
int main(void) { io_setup(); interrupt_setup(); const int msecsDelayPost = 300; while (1) { _delay_ms (msecsDelayPost); overrun++; } return 0; }
void trace_queue(struct io *q_iop) { if (q_iop->t.bytes == 0) return; if (io_setup(q_iop, IOP_Q)) handle_queue(q_iop); else io_release(q_iop); }
static void aio_setup(void) { memset(&aio_ctx, 0, sizeof aio_ctx); memset(&aio_cb, 0, sizeof aio_cb); if (io_setup(1, &aio_ctx)) err(2, "aio setup failed"); make_request = write_test ? aio_pwrite : aio_pread; }
/******************************************************************************* * Function which will perform any remaining platform-specific setup that can * occur after the MMU and data cache have been enabled. ******************************************************************************/ void bl1_platform_setup(void) { init_nic400(); init_pcie(); /* Initialise the IO layer and register platform IO devices */ io_setup(); /* Enable and initialize the System level generic timer */ mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_FCREQ(0) | CNTCR_EN); }
int main(int argc, char **argv) { pthread_t thread_read; pthread_t thread_write; int i; int ret; if (argc != 2) fail("only arg should be file name\n"); for (i = 0; i < BUFSIZE; ++i) buf[i] = 'A' + (char)(i % ('Z'-'A'+1)); buf[BUFSIZE-1] = '\n'; handle = open(argv[1], O_CREAT | O_TRUNC | O_DIRECT | O_RDWR, 0600); if (handle == -1) fail("failed to open test file %s, errno: %d\n", argv[1], errno); memset(&ctxp, 0, sizeof(ctxp)); ret = io_setup(MAX_AIO_EVENTS, &ctxp); if (ret) fail("io_setup returned %d\n", ret); for (i = 0; i < MAX_AIO_EVENTS; ++i) { iocbs[i] = calloc(1, sizeof(struct iocb)); if (iocbs[i] == NULL) fail("failed to allocate an iocb\n"); /* iocbs[i]->data = i; */ iocbs[i]->aio_fildes = handle; iocbs[i]->aio_lio_opcode = IO_CMD_PWRITE; iocbs[i]->aio_reqprio = 0; iocbs[i]->u.c.buf = buf; iocbs[i]->u.c.nbytes = BUFSIZE; iocbs[i]->u.c.offset = BUFSIZE*i; } pthread_create(&thread_read, NULL, (void*)&fun_read, NULL); pthread_create(&thread_write, NULL, (void*)&fun_writeN, NULL); pthread_join(thread_read, NULL); pthread_join(thread_write, NULL); io_destroy(ctxp); close(handle); printf("%u iterations of racing extensions and collection passed\n", MAX_AIO_EVENTS); return 0; }
bool FlatFileReader::Open(const wxString& fileName) { m_filename = fileName; int err = io_setup(64, &m_aio_context); if (err) return false; m_fd = wxOpen(fileName, O_RDONLY, 0); return (m_fd != -1); }
int leda_aio_init(aio_context_t ** ctx_p) { if ((afd = aio_eventfd(0)) == -1) { return -1; } if (io_setup(MAX_AIO_EVENTS, &ctx)) { return -1; } fcntl(afd, F_SETFL, fcntl(afd, F_GETFL, 0) | O_NONBLOCK); *ctx_p=&ctx; return afd; }
/******************************************************************************* * Function which will evaluate how much of the trusted ram has been gobbled * up by BL1 and return the base and size of whats available for loading BL2. * Its called after coherency and the MMU have been turned on. ******************************************************************************/ void bl1_platform_setup(void) { /* Initialise the IO layer and register platform IO devices */ io_setup(); /* Enable and initialize the System level generic timer */ mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_EN); /* Initialize the console */ console_init(); return; }
int main(int argc, char* argv[]) { if (argc < 2) return 1; int ctx_id; io_context_t ctx; if ((ctx_id = io_setup (20, &ctx)) < 0) { perror ("io_setup"); return 1; } return 0; }
void random_io(int fd, off_t ionum, int access_size, int num_requests) { // (1) io_context_tの初期化 io_context_t ctx; memset(&ctx, 0, sizeof(io_context_t)); int r = io_setup(num_requests, &ctx); assert(r == 0); // (2) iocbs(I/O要求)の構築 struct iocb **iocbs = new struct iocb*[num_requests]; char **bufs = new char*[num_requests]; for (int i = 0; i < num_requests; i++) { iocbs[i] = new struct iocb(); posix_memalign((void **)&bufs[i], 512, access_size); off_t block_number = rand() % ionum; io_prep_pread(iocbs[i], fd, bufs[i], access_size, block_number * access_size); io_set_callback(iocbs[i], read_done); } // (3) I/O要求を投げる r = io_submit(ctx, num_requests, iocbs); assert(r == num_requests); // (4) 完了したI/O要求を待ち、終わったものについてはcallbackを呼び出す int cnt = 0; while (true) { struct io_event events[32]; int n = io_getevents(ctx, 1, 32, events, NULL); if (n > 0) cnt += n; for (int i = 0; i < n; i++) { struct io_event *ev = events + i; io_callback_t callback = (io_callback_t)ev->data; struct iocb *iocb = ev->obj; callback(ctx, iocb, ev->res, ev->res2); } if (n == 0 || cnt == num_requests) break; } for (int i = 0; i < num_requests; i++) { delete iocbs[i]; free(bufs[i]); } delete[] iocbs; delete[] bufs; }
/* * Test whether counter overflow is detected and handled correctly. * * It is not possible to directly overflow the counter using the * write() syscall. Overflows occur when the counter is incremented * from kernel space, in an irq context, when it is not possible to * block the calling thread of execution. * * The AIO subsystem internally uses eventfd mechanism for * notification of completion of read or write requests. In this test * we trigger a counter overflow, by setting the counter value to the * max possible value initially. When the AIO subsystem notifies * through the eventfd counter, the counter overflows. * * NOTE: If the the counter starts from an initial value of 0, it will * take decades for an overflow to occur. But since we set the initial * value to the max possible counter value, we are able to cause it to * overflow with a single increment. * * When the counter overflows, the following are tested * 1. Check whether POLLERR event occurs in poll() for the eventfd. * 2. Check whether readfd_set/writefd_set is set in select() for the eventfd. * 3. The counter value is UINT64_MAX. */ static int trigger_eventfd_overflow(int evfd, int *fd, io_context_t * ctx) { int ret; struct iocb iocb; struct iocb *iocbap[1]; static char buf[4 * 1024]; *ctx = 0; ret = io_setup(16, ctx); if (ret < 0) { errno = -ret; tst_resm(TINFO | TERRNO, "io_setup error"); return -1; } *fd = open("testfile", O_RDWR | O_CREAT, 0644); if (*fd == -1) { tst_resm(TINFO | TERRNO, "open(testfile) failed"); goto err_io_destroy; } ret = set_counter(evfd, UINT64_MAX - 1); if (ret == -1) { tst_resm(TINFO, "error setting counter to UINT64_MAX-1"); goto err_close_file; } io_prep_pwrite(&iocb, *fd, buf, sizeof(buf), 0); io_set_eventfd(&iocb, evfd); iocbap[0] = &iocb; ret = io_submit(*ctx, 1, iocbap); if (ret < 0) { errno = -ret; tst_resm(TINFO | TERRNO, "error submitting iocb"); goto err_close_file; } return 0; err_close_file: close(*fd); err_io_destroy: io_destroy(*ctx); return -1; }
static void trace_message(struct io *iop) { char scratch[15]; char msg[iop->t.pdu_len + 1]; if (!io_setup(iop, IOP_M)) return; memcpy(msg, iop->pdu, iop->t.pdu_len); msg[iop->t.pdu_len] = '\0'; fprintf(msgs_ofp, "%s %5d.%09lu %s\n", make_dev_hdr(scratch, 15, iop->dip, 1), (int)SECONDS(iop->t.time), (unsigned long)NANO_SECONDS(iop->t.time), msg); }
static int associate_workers_with_ns(void) { struct ns_entry *entry = g_namespaces; struct worker_thread *worker = g_workers; struct ns_worker_ctx *ns_ctx; int i, count; count = g_num_namespaces > g_num_workers ? g_num_namespaces : g_num_workers; for (i = 0; i < count; i++) { ns_ctx = malloc(sizeof(struct ns_worker_ctx)); if (!ns_ctx) { return -1; } memset(ns_ctx, 0, sizeof(*ns_ctx)); #ifdef HAVE_LIBAIO ns_ctx->events = calloc(g_queue_depth, sizeof(struct io_event)); if (!ns_ctx->events) { return -1; } ns_ctx->ctx = 0; if (io_setup(g_queue_depth, &ns_ctx->ctx) < 0) { perror("io_setup"); return -1; } #endif printf("Associating %s with lcore %d\n", entry->name, worker->lcore); ns_ctx->entry = entry; ns_ctx->next = worker->ns_ctx; worker->ns_ctx = ns_ctx; worker = worker->next; if (worker == NULL) { worker = g_workers; } entry = entry->next; if (entry == NULL) { entry = g_namespaces; } } return 0; }
static PyObject *IOManager_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) { static char *kwlist[] = {"nr_events", NULL}; unsigned nr_events; IOManager *rv; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "I", kwlist, &nr_events)) return NULL; if (!(rv = (IOManager*)type->tp_alloc(type, 0))) return NULL; if ((rv->fd = eventfd(0,0)) < 0) { PyErr_SetFromErrno(PyExc_OSError); Py_DECREF(rv); return NULL; } if (fcntl(rv->fd, F_SETFL, O_NONBLOCK)) { PyErr_SetFromErrno(PyExc_OSError); Py_DECREF(rv); return NULL; }; memset(&rv->ctx, 0, sizeof(io_context_t)); if (io_setup(nr_events, &rv->ctx)) { PyErr_SetFromErrno(PyExc_OSError); close(rv->fd); Py_DECREF(rv); return NULL; }; if (!(rv->events = PyMem_Malloc(sizeof(struct io_event)*nr_events))) { close(rv->fd); io_destroy(rv->ctx); Py_DECREF(rv); return NULL; } if (!(rv->cbs = PyMem_Malloc(sizeof(struct iocb*)*nr_events))) { close(rv->fd); PyMem_Free(rv->events); io_destroy(rv->ctx); Py_DECREF(rv); return NULL; } rv->nr_events = nr_events; rv->pending_events = 0; return (void*)rv; }
/******************************************************************************* * Perform platform specific setup. For now just initialize the memory location * to use for passing arguments to BL31. ******************************************************************************/ void bl2_platform_setup() { /* * Do initial security configuration to allow DRAM/device access. On * Base FVP only DRAM security is programmable (via TrustZone), but * other platforms might have more programmable security devices * present. */ plat_security_setup(); /* Initialise the IO layer and register platform IO devices */ io_setup(); /* * Ensure that the secure DRAM memory used for passing BL31 arguments * does not overlap with the BL32_BASE. */ assert (BL32_BASE > TZDRAM_BASE + sizeof(bl31_args_t)); /* Use the Trusted DRAM for passing args to BL31 */ bl2_to_bl31_args = (bl31_args_t *) TZDRAM_BASE; /* Populate the extents of memory available for loading BL33 */ bl2_to_bl31_args->bl33_meminfo.total_base = DRAM_BASE; bl2_to_bl31_args->bl33_meminfo.total_size = DRAM_SIZE; bl2_to_bl31_args->bl33_meminfo.free_base = DRAM_BASE; bl2_to_bl31_args->bl33_meminfo.free_size = DRAM_SIZE; bl2_to_bl31_args->bl33_meminfo.attr = 0; bl2_to_bl31_args->bl33_meminfo.next = 0; /* * Populate the extents of memory available for loading BL32. * TODO: We are temporarily executing BL2 from TZDRAM; will eventually * move to Trusted SRAM */ bl2_to_bl31_args->bl32_meminfo.total_base = BL32_BASE; bl2_to_bl31_args->bl32_meminfo.free_base = BL32_BASE; bl2_to_bl31_args->bl32_meminfo.total_size = (TZDRAM_BASE + TZDRAM_SIZE) - BL32_BASE; bl2_to_bl31_args->bl32_meminfo.free_size = (TZDRAM_BASE + TZDRAM_SIZE) - BL32_BASE; bl2_to_bl31_args->bl32_meminfo.attr = BOT_LOAD; bl2_to_bl31_args->bl32_meminfo.next = 0; }
int aio_initialize(unsigned int max_queue_depth) { int rtn; max_depth = max_queue_depth; int i; rtn = io_setup(125, &context); if(rtn<0){ PRINT("Error on setup I/O, file:%s, line:%d, errno=%d\n", __func__, __LINE__, rtn); exit(1); } rtn = pthread_create(&thr, NULL, &aio_dequeue, &context); if(rtn<0){ PRINT("Error on thread creation, line:%d, errno:%d\n", __LINE__, rtn); exit(1); } }
static int blockdev_aio_initialize_io_channel(struct blockdev_aio_io_channel *ch) { ch->queue_depth = 128; if (io_setup(ch->queue_depth, &ch->io_ctx) < 0) { SPDK_ERRLOG("async I/O context setup failure\n"); return -1; } ch->events = calloc(sizeof(struct io_event), ch->queue_depth); if (!ch->events) { io_destroy(ch->io_ctx); return -1; } return 0; }
static int afalg_init_aio(afalg_aio *aio) { int r = -1; /* Initialise for AIO */ aio->aio_ctx = 0; r = io_setup(MAX_INFLIGHTS, &aio->aio_ctx); if (r < 0) { ALG_PERR("%s: io_setup error : ", __func__); AFALGerr(AFALG_F_AFALG_INIT_AIO, AFALG_R_IO_SETUP_FAILED); return 0; } memset(aio->cbt, 0, sizeof(aio->cbt)); aio->efd = -1; aio->mode = MODE_UNINIT; return 1; }