void aioc_free(FAR struct aio_container_s *aioc) { DEBUGASSERT(aioc); /* Return the container to the free list */ aio_lock(); dq_addlast(&aioc->aioc_link, &g_aioc_free); aio_unlock(); /* The post the counting semaphore, announcing the availability of the * free AIO container. */ sem_post(&g_aioc_freesem); }
FAR struct aiocb *aioc_decant(FAR struct aio_container_s *aioc) { FAR struct aiocb *aiocbp; DEBUGASSERT(aioc); /* Remove the container to the pending transfer list. */ aio_lock(); dq_rem(&aioc->aioc_link, &g_aio_pending); /* De-cant the AIO control block and return the container to the free list */ aiocbp = aioc->aioc_aiocbp; aioc_free(aioc); aio_unlock(); return aiocbp; }
FAR struct aio_container_s *aioc_alloc(void) { FAR struct aio_container_s *aioc; /* Take a count from semaphore, thus guaranteeing that we have an AIO * container set aside for us. */ while (sem_wait(&g_aioc_freesem) < 0) { DEBUGASSERT(get_errno() == EINTR); } /* Get our AIO container */ aio_lock(); aioc = (FAR struct aio_container_s *)dq_remfirst(&g_aioc_free); aio_unlock(); DEBUGASSERT(aioc); return aioc; }
int aio_cancel(int fildes, FAR struct aiocb *aiocbp) { FAR struct aio_container_s *aioc; FAR struct aio_container_s *next; int status; int ret; /* Check if a non-NULL aiocbp was provided */ /* Lock the scheduler so that no I/O events can complete on the worker * thread until we set complete this operation. */ ret = AIO_ALLDONE; sched_lock(); aio_lock(); if (aiocbp) { /* Check if the I/O has completed */ if (aiocbp->aio_result == -EINPROGRESS) { /* No.. Find the container for this AIO control block */ for (aioc = (FAR struct aio_container_s *)g_aio_pending.head; aioc && aioc->aioc_aiocbp != aiocbp; aioc = (FAR struct aio_container_s *)aioc->aioc_link.flink) ; /* Did we find a container for this fildes? We should; the aio_result says * that the transfer is pending. If not we return AIO_ALLDONE. */ if (aioc) { /* Yes... attempt to cancel the I/O. There are two * possibilities:* (1) the work has already been started and * is no longer queued, or (2) the work has not been started * and is still in the work queue. Only the second case can * be cancelled. work_cancel() will return -ENOENT in the * first case. */ status = work_cancel(LPWORK, &aioc->aioc_work); if (status >= 0) { aiocbp->aio_result = -ECANCELED; ret = AIO_CANCELED; } else { ret = AIO_NOTCANCELED; } /* Remove the container from the list of pending transfers */ (void)aioc_decant(aioc); } } } else { /* No aiocbp.. cancel all outstanding I/O for the fildes */ next = (FAR struct aio_container_s *)g_aio_pending.head; do { /* Find the next container with this AIO control block */ for (aioc = next; aioc && aioc->aioc_aiocbp->aio_fildes != fildes; aioc = (FAR struct aio_container_s *)aioc->aioc_link.flink) ; /* Did we find the container? We should; the aio_result says * that the transfer is pending. If not we return AIO_ALLDONE. */ if (aioc) { /* Yes... attempt to cancel the I/O. There are two * possibilities:* (1) the work has already been started and * is no longer queued, or (2) the work has not been started * and is still in the work queue. Only the second case can * be cancelled. work_cancel() will return -ENOENT in the * first case. */ status = work_cancel(LPWORK, &aioc->aioc_work); /* Remove the container from the list of pending transfers */ next = (FAR struct aio_container_s *)aioc->aioc_link.flink; aiocbp = aioc_decant(aioc); DEBUGASSERT(aiocbp); if (status >= 0) { aiocbp->aio_result = -ECANCELED; if (ret != AIO_NOTCANCELED) { ret = AIO_CANCELED; } } else { ret = AIO_NOTCANCELED; } } } while (aioc); } aio_unlock(); sched_unlock(); return ret; }
FAR struct aio_container_s *aio_contain(FAR struct aiocb *aiocbp) { FAR struct aio_container_s *aioc; union { #ifdef AIO_HAVE_FILEP FAR struct file *filep; #endif #ifdef AIO_HAVE_FILEP FAR struct socket *psock; #endif FAR void *ptr; } u; #ifdef CONFIG_PRIORITY_INHERITANCE struct sched_param param; #endif #if defined(AIO_HAVE_FILEP) && defined(AIO_HAVE_PSOCK) if (aiocbp->aio_fildes >= CONFIG_NFILE_DESCRIPTORS) #endif #ifdef AIO_HAVE_FILEP { /* Get the file structure corresponding to the file descriptor. */ u.filep = fs_getfilep(aiocbp->aio_fildes); if (!u.filep) { /* The errno value has already been set */ return NULL; } } #endif #if defined(AIO_HAVE_FILEP) && defined(AIO_HAVE_PSOCK) else #endif #ifdef AIO_HAVE_PSOCK { /* Get the socket structure corresponding to the socket descriptor */ u.psock = sockfd_socket(aiocbp->aio_fildes); if (!u.psock) { /* Does not set the errno. EBADF is the most likely explanation. */ set_errno(EBADF); return NULL; } } #endif /* Allocate the AIO control block container, waiting for one to become * available if necessary. This should never fail. */ aioc = aioc_alloc(); DEBUGASSERT(aioc); /* Initialize the container */ memset(aioc, 0, sizeof(struct aio_container_s)); aioc->aioc_aiocbp = aiocbp; aioc->u.aioc_filep = u.ptr; aioc->aioc_pid = getpid(); #ifdef CONFIG_PRIORITY_INHERITANCE DEBUGVERIFY(sched_getparam (aioc->aioc_pid, ¶m)); aioc->aioc_prio = param.sched_priority; #endif /* Add the container to the pending transfer list. */ aio_lock(); dq_addlast(&aioc->aioc_link, &g_aio_pending); aio_unlock(); return aioc; }