void internal_function __aio_notify (struct requestlist *req) { struct waitlist *waitlist; struct aiocb *aiocbp = &req->aiocbp->aiocb; #ifdef BROKEN_THREAD_SIGNALS if (__aio_notify_only (&aiocbp->aio_sigevent, req->caller_pid) != 0) #else if (__aio_notify_only (&aiocbp->aio_sigevent) != 0) #endif { /* XXX What shall we do if already an error is set by read/write/fsync? */ aiocbp->__error_code = errno; aiocbp->__return_value = -1; } /* Now also notify possibly waiting threads. */ waitlist = req->waiting; while (waitlist != NULL) { struct waitlist *next = waitlist->next; /* Decrement the counter. This is used in both cases. */ --*waitlist->counterp; if (waitlist->sigevp == NULL) pthread_cond_signal (waitlist->cond); else /* This is part of a asynchronous `lio_listio' operation. If this request is the last one, send the signal. */ if (*waitlist->counterp == 0) { #ifdef BROKEN_THREAD_SIGNALS __aio_notify_only (waitlist->sigevp, waitlist->caller_pid); #else __aio_notify_only (waitlist->sigevp); #endif /* This is tricky. See lio_listio.c for the reason why this works. */ free ((void *) waitlist->counterp); } waitlist = next; } }
void __aio_notify (struct requestlist *req) { struct waitlist *waitlist; struct aiocb *aiocbp = &req->aiocbp->aiocb; if (__aio_notify_only (&aiocbp->aio_sigevent) != 0) { /* XXX What shall we do if already an error is set by read/write/fsync? */ aiocbp->__error_code = errno; aiocbp->__return_value = -1; } /* Now also notify possibly waiting threads. */ waitlist = req->waiting; while (waitlist != NULL) { struct waitlist *next = waitlist->next; if (waitlist->sigevp == NULL) { if (waitlist->result != NULL && aiocbp->__return_value == -1) *waitlist->result = -1; #ifdef DONT_NEED_AIO_MISC_COND AIO_MISC_NOTIFY (waitlist); #else /* Decrement the counter. */ --*waitlist->counterp; pthread_cond_signal (waitlist->cond); #endif } else /* This is part of an asynchronous `lio_listio' operation. If this request is the last one, send the signal. */ if (--*waitlist->counterp == 0) { __aio_notify_only (waitlist->sigevp); /* This is tricky. See lio_listio.c for the reason why this works. */ free ((void *) waitlist->counterp); } waitlist = next; } }
static int lio_listio_internal (int mode, struct aiocb *const list[], int nent, struct sigevent *sig) { struct sigevent defsigev; struct requestlist *requests[nent]; int cnt; volatile int total = 0; int result = 0; if (sig == NULL) { defsigev.sigev_notify = SIGEV_NONE; sig = &defsigev; } /* Request the mutex. */ pthread_mutex_lock (&__aio_requests_mutex); /* Now we can enqueue all requests. Since we already acquired the mutex the enqueue function need not do this. */ for (cnt = 0; cnt < nent; ++cnt) if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP) { if (NO_INDIVIDUAL_EVENT_P (mode)) list[cnt]->aio_sigevent.sigev_notify = SIGEV_NONE; requests[cnt] = __aio_enqueue_request ((aiocb_union *) list[cnt], (list[cnt]->aio_lio_opcode | LIO_OPCODE_BASE)); if (requests[cnt] != NULL) /* Successfully enqueued. */ ++total; else /* Signal that we've seen an error. `errno' and the error code of the aiocb will tell more. */ result = -1; } else requests[cnt] = NULL; if (total == 0) { /* We don't have anything to do except signalling if we work asynchronously. */ /* Release the mutex. We do this before raising a signal since the signal handler might do a `siglongjmp' and then the mutex is locked forever. */ pthread_mutex_unlock (&__aio_requests_mutex); if (LIO_MODE (mode) == LIO_NOWAIT) { #ifdef BROKEN_THREAD_SIGNALS __aio_notify_only (sig, sig->sigev_notify == SIGEV_SIGNAL ? getpid () : 0); #else __aio_notify_only (sig); #endif } return result; } else if (LIO_MODE (mode) == LIO_WAIT) { #ifndef DONT_NEED_AIO_MISC_COND pthread_cond_t cond = PTHREAD_COND_INITIALIZER; int oldstate; #endif struct waitlist waitlist[nent]; total = 0; for (cnt = 0; cnt < nent; ++cnt) { assert (requests[cnt] == NULL || list[cnt] != NULL); if (requests[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP) { #ifndef DONT_NEED_AIO_MISC_COND waitlist[cnt].cond = &cond; #endif waitlist[cnt].result = &result; waitlist[cnt].next = requests[cnt]->waiting; waitlist[cnt].counterp = &total; waitlist[cnt].sigevp = NULL; #ifdef BROKEN_THREAD_SIGNALS waitlist[cnt].caller_pid = 0; /* Not needed. */ #endif requests[cnt]->waiting = &waitlist[cnt]; ++total; } } #ifdef DONT_NEED_AIO_MISC_COND AIO_MISC_WAIT (result, total, NULL, 0); #else /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancellation points we must be careful. We added entries to the waiting lists which we must remove. So defer cancellation for now. */ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate); while (total > 0 && result == 0) result = pthread_cond_wait (&cond, &__aio_requests_mutex); /* Now it's time to restore the cancellation state. */ pthread_setcancelstate (oldstate, NULL); /* Release the conditional variable. */ if (pthread_cond_destroy (&cond) != 0) /* This must never happen. */ abort (); #endif /* If any of the I/O requests failed, return -1 and set errno. */ if (result != 0) { __set_errno (result == EINTR ? EINTR : EIO); result = -1; } } else { struct async_waitlist *waitlist; waitlist = (struct async_waitlist *) malloc (sizeof (struct async_waitlist) + (nent * sizeof (struct waitlist))); if (waitlist == NULL) { __set_errno (EAGAIN); result = -1; } else { #ifdef BROKEN_THREAD_SIGNALS pid_t caller_pid = sig->sigev_notify == SIGEV_SIGNAL ? getpid () : 0; #endif total = 0; for (cnt = 0; cnt < nent; ++cnt) { assert (requests[cnt] == NULL || list[cnt] != NULL); if (requests[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP) { #ifndef DONT_NEED_AIO_MISC_COND waitlist->list[cnt].cond = NULL; #endif waitlist->list[cnt].result = NULL; waitlist->list[cnt].next = requests[cnt]->waiting; waitlist->list[cnt].counterp = &waitlist->counter; waitlist->list[cnt].sigevp = &waitlist->sigev; #ifdef BROKEN_THREAD_SIGNALS waitlist->list[cnt].caller_pid = caller_pid; #endif requests[cnt]->waiting = &waitlist->list[cnt]; ++total; } } waitlist->counter = total; waitlist->sigev = *sig; } } /* Release the mutex. */ pthread_mutex_unlock (&__aio_requests_mutex); return result; }