int main() { char tmpfname[256]; #define BUF_SIZE 111 unsigned char buf[BUF_SIZE]; unsigned char check[BUF_SIZE]; int fd; struct aiocb aiocb; int i; if (sysconf(_SC_ASYNCHRONOUS_IO) < 200112L) return PTS_UNSUPPORTED; snprintf(tmpfname, sizeof(tmpfname), "/tmp/pts_aio_read_5_1_%d", getpid()); unlink(tmpfname); fd = open(tmpfname, O_CREAT | O_RDWR | O_EXCL, S_IRUSR | S_IWUSR); if (fd == -1) { printf(TNAME " Error at open(): %s\n", strerror(errno)); exit(PTS_UNRESOLVED); } unlink(tmpfname); for (i = 0; i < BUF_SIZE; i++) buf[i] = i; if (write(fd, buf, BUF_SIZE) != BUF_SIZE) { printf(TNAME " Error at write(): %s\n", strerror(errno)); exit(PTS_UNRESOLVED); } memset(check, 0xaa, BUF_SIZE); memset(&aiocb, 0, sizeof(struct aiocb)); aiocb.aio_fildes = fd; aiocb.aio_buf = check; aiocb.aio_nbytes = BUF_SIZE; aiocb.aio_lio_opcode = LIO_WRITE; if (aio_read(&aiocb) == -1) { printf(TNAME " Error at aio_read(): %s\n", strerror(errno)); exit(PTS_FAIL); } int err; int ret; /* Wait until end of transaction */ do { usleep(10000); err = aio_error(&aiocb); } while (err == EINPROGRESS); ret = aio_return(&aiocb); if (err != 0) { printf(TNAME " Error at aio_error() : %s\n", strerror(err)); close(fd); exit(PTS_FAIL); } if (ret != BUF_SIZE) { printf(TNAME " Error at aio_return()\n"); close(fd); exit(PTS_FAIL); } /* check it */ for (i = 0; i < BUF_SIZE; i++) { if (buf[i] != check[i]) { printf(TNAME " read values are corrupted\n"); exit(PTS_FAIL); } } close(fd); printf("Test PASSED\n"); return PTS_PASS; }
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies, PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs) { int rc = VINF_SUCCESS; int cRequestsCompleted = 0; /* * Validate the parameters, making sure to always set pcReqs. */ AssertPtrReturn(pcReqs, VERR_INVALID_POINTER); *pcReqs = 0; /* always set */ PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; RTFILEAIOCTX_VALID_RETURN(pCtxInt); AssertPtrReturn(pahReqs, VERR_INVALID_POINTER); AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER); AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE); if (RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0)) return VERR_FILE_AIO_NO_REQUEST; /* * Convert the timeout if specified. */ struct timespec *pTimeout = NULL; struct timespec Timeout = {0,0}; uint64_t StartNanoTS = 0; if (cMillies != RT_INDEFINITE_WAIT) { Timeout.tv_sec = cMillies / 1000; Timeout.tv_nsec = cMillies % 1000 * 1000000; pTimeout = &Timeout; StartNanoTS = RTTimeNanoTS(); } /* Wait for at least one. */ if (!cMinReqs) cMinReqs = 1; /* For the wakeup call. */ Assert(pCtxInt->hThreadWait == NIL_RTTHREAD); ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf()); while ( cMinReqs && RT_SUCCESS_NP(rc)) { struct kevent aKEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT]; int cRequestsToWait = cMinReqs < AIO_MAXIMUM_REQUESTS_PER_CONTEXT ? cReqs : AIO_MAXIMUM_REQUESTS_PER_CONTEXT; int rcBSD; uint64_t StartTime; ASMAtomicXchgBool(&pCtxInt->fWaiting, true); rcBSD = kevent(pCtxInt->iKQueue, NULL, 0, aKEvents, cRequestsToWait, pTimeout); ASMAtomicXchgBool(&pCtxInt->fWaiting, false); if (RT_UNLIKELY(rcBSD < 0)) { rc = RTErrConvertFromErrno(errno); break; } uint32_t const cDone = rcBSD; /* Process received events. */ for (uint32_t i = 0; i < cDone; i++) { PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aKEvents[i].udata; AssertPtr(pReqInt); Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC); /* * Retrieve the status code here already because the * user may omit the RTFileAioReqGetRC() call and * we will leak kernel resources then. * This will result in errors during submission * of other requests as soon as the max_aio_queue_per_proc * limit is reached. */ int cbTransfered = aio_return(&pReqInt->AioCB); if (cbTransfered < 0) { pReqInt->Rc = RTErrConvertFromErrno(cbTransfered); pReqInt->cbTransfered = 0; } else { pReqInt->Rc = VINF_SUCCESS; pReqInt->cbTransfered = cbTransfered; } RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt; } /* * Done Yet? If not advance and try again. */ if (cDone >= cMinReqs) break; cMinReqs -= cDone; cReqs -= cDone; if (cMillies != RT_INDEFINITE_WAIT) { /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */ uint64_t NanoTS = RTTimeNanoTS(); uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000; if (cMilliesElapsed >= cMillies) { rc = VERR_TIMEOUT; break; } /* The syscall supposedly updates it, but we're paranoid. :-) */ Timeout.tv_sec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000; Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000; } } /* * Update the context state and set the return value. */ *pcReqs = cRequestsCompleted; ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted); Assert(pCtxInt->hThreadWait == RTThreadSelf()); ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD); /* * Clear the wakeup flag and set rc. */ if ( pCtxInt->fWokenUp && RT_SUCCESS(rc)) { ASMAtomicXchgBool(&pCtxInt->fWokenUp, false); rc = VERR_INTERRUPTED; } return rc; }
int main (int argc, char *argv[]) { struct aiocb *iocb[MAX_IOCBS], *kq_iocb; char *file, pathname[sizeof(PATH_TEMPLATE)+1]; struct kevent ke, kq_returned; struct timespec ts; char buffer[32768]; #ifdef DEBUG int cancel, error; #endif int failed = 0, fd, kq, pending, result, run; int tmp_file = 0; unsigned i, j; PLAIN_REQUIRE_KERNEL_MODULE("aio", 0); PLAIN_REQUIRE_UNSAFE_AIO(0); kq = kqueue(); if (kq < 0) { perror("No kqeueue\n"); exit(1); } if (argc == 1) { strcpy(pathname, PATH_TEMPLATE); fd = mkstemp(pathname); file = pathname; tmp_file = 1; } else { file = argv[1]; fd = open(file, O_RDWR|O_CREAT, 0666); } if (fd == -1) err(1, "Can't open %s\n", file); for (run = 0; run < MAX_RUNS; run++){ #ifdef DEBUG printf("Run %d\n", run); #endif for (i = 0; i < nitems(iocb); i++) { iocb[i] = (struct aiocb *)calloc(1, sizeof(struct aiocb)); if (iocb[i] == NULL) err(1, "calloc"); } pending = 0; for (i = 0; i < nitems(iocb); i++) { pending++; iocb[i]->aio_nbytes = sizeof(buffer); iocb[i]->aio_buf = buffer; iocb[i]->aio_fildes = fd; iocb[i]->aio_offset = iocb[i]->aio_nbytes * i * run; iocb[i]->aio_sigevent.sigev_notify_kqueue = kq; iocb[i]->aio_sigevent.sigev_value.sival_ptr = iocb[i]; iocb[i]->aio_sigevent.sigev_notify = SIGEV_KEVENT; result = aio_write(iocb[i]); if (result != 0) { perror("aio_write"); printf("Result %d iteration %d\n", result, i); exit(1); } #ifdef DEBUG printf("WRITE %d is at %p\n", i, iocb[i]); #endif result = rand(); if (result < RAND_MAX/32) { if (result > RAND_MAX/64) { result = aio_cancel(fd, iocb[i]); #ifdef DEBUG printf("Cancel %d %p result %d\n", i, iocb[i], result); #endif if (result == AIO_CANCELED) { aio_return(iocb[i]); iocb[i] = NULL; pending--; } } } } #ifdef DEBUG cancel = nitems(iocb) - pending; #endif i = 0; while (pending) { for (;;) { bzero(&ke, sizeof(ke)); bzero(&kq_returned, sizeof(ke)); ts.tv_sec = 0; ts.tv_nsec = 1; result = kevent(kq, NULL, 0, &kq_returned, 1, &ts); #ifdef DEBUG error = errno; #endif if (result < 0) perror("kevent error: "); kq_iocb = kq_returned.udata; #ifdef DEBUG printf("kevent %d %d errno %d return.ident %p " "return.data %p return.udata %p %p\n", i, result, error, kq_returned.ident, kq_returned.data, kq_returned.udata, kq_iocb); #endif if (kq_iocb) break; #ifdef DEBUG printf("Try again left %d out of %d %d\n", pending, nitems(iocb), cancel); #endif } for (j = 0; j < nitems(iocb) && iocb[j] != kq_iocb; j++) ; #ifdef DEBUG printf("kq_iocb %p\n", kq_iocb); printf("Error Result for %d is %d pending %d\n", j, result, pending); #endif result = aio_return(kq_iocb); #ifdef DEBUG printf("Return Result for %d is %d\n\n", j, result); #endif if (result != sizeof(buffer)) { printf("FAIL: run %d, operation %d, result %d " " (errno=%d) should be %zu\n", run, pending, result, errno, sizeof(buffer)); failed++; } else printf("PASS: run %d, left %d\n", run, pending - 1); free(kq_iocb); iocb[j] = NULL; pending--; i++; } for (i = 0; i < nitems(iocb); i++) free(iocb[i]); } if (tmp_file) unlink(pathname); if (failed != 0) printf("FAIL: %d tests failed\n", failed); else printf("PASS: All tests passed\n"); exit (failed == 0 ? 0 : 1); }