/* If COND is a null pointer, wait until the blocking operation in CTX finished and return its error value. Otherwise, wait until COND is satisfied or the operation finished. */ gpgme_error_t _gpgme_wait_on_condition (gpgme_ctx_t ctx, volatile int *cond) { gpgme_error_t err = 0; int hang = 1; do { int nr = _gpgme_io_select (ctx->fdt.fds, ctx->fdt.size, 0); unsigned int i; if (nr < 0) { /* An error occured. Close all fds in this context, and signal it. */ unsigned int idx; err = gpg_error_from_errno (errno); for (idx = 0; idx < ctx->fdt.size; idx++) if (ctx->fdt.fds[idx].fd != -1) _gpgme_io_close (ctx->fdt.fds[idx].fd); _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &err); return err; } for (i = 0; i < ctx->fdt.size && nr; i++) { if (ctx->fdt.fds[i].fd != -1 && ctx->fdt.fds[i].signaled) { ctx->fdt.fds[i].signaled = 0; assert (nr); nr--; err = _gpgme_run_io_cb (&ctx->fdt.fds[i], 0); if (err) { /* An error occured. Close all fds in this context, and signal it. */ unsigned int idx; for (idx = 0; idx < ctx->fdt.size; idx++) if (ctx->fdt.fds[idx].fd != -1) _gpgme_io_close (ctx->fdt.fds[idx].fd); _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &err); return err; } } } for (i = 0; i < ctx->fdt.size; i++) if (ctx->fdt.fds[i].fd != -1) break; if (i == ctx->fdt.size) { _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &err); hang = 0; } if (cond && *cond) hang = 0; } while (hang); return 0; }
/* Perform asynchronous operations in the global event loop (ie, any asynchronous operation except key listing and trustitem listing operations). If CTX is not a null pointer, the function will return if the asynchronous operation in the context CTX finished. Otherwise the function will return if any asynchronous operation finished. If HANG is zero, the function will not block for a long time. Otherwise the function does not return until an operation matching CTX finished. If a matching context finished, it is returned, and *STATUS is set to the error value of the operation in that context. Otherwise, if the timeout expires, NULL is returned and *STATUS is 0. If an error occurs, NULL is returned and *STATUS is set to the error value. */ gpgme_ctx_t gpgme_wait_ext (gpgme_ctx_t ctx, gpgme_error_t *status, gpgme_error_t *op_err, int hang) { do { unsigned int i = 0; struct ctx_list_item *li; struct fd_table fdt; int nr; /* Collect the active file descriptors. */ LOCK (ctx_list_lock); for (li = ctx_active_list; li; li = li->next) i += li->ctx->fdt.size; fdt.fds = malloc (i * sizeof (struct io_select_fd_s)); if (!fdt.fds) { int saved_err = gpg_error_from_syserror (); UNLOCK (ctx_list_lock); if (status) *status = saved_err; if (op_err) *op_err = 0; return NULL; } fdt.size = i; i = 0; for (li = ctx_active_list; li; li = li->next) { memcpy (&fdt.fds[i], li->ctx->fdt.fds, li->ctx->fdt.size * sizeof (struct io_select_fd_s)); i += li->ctx->fdt.size; } UNLOCK (ctx_list_lock); nr = _gpgme_io_select (fdt.fds, fdt.size, 0); if (nr < 0) { int saved_err = gpg_error_from_syserror (); free (fdt.fds); if (status) *status = saved_err; if (op_err) *op_err = 0; return NULL; } for (i = 0; i < fdt.size && nr; i++) { if (fdt.fds[i].fd != -1 && fdt.fds[i].signaled) { gpgme_ctx_t ictx; gpgme_error_t err = 0; gpgme_error_t local_op_err = 0; struct wait_item_s *item; assert (nr); nr--; item = (struct wait_item_s *) fdt.fds[i].opaque; assert (item); ictx = item->ctx; assert (ictx); LOCK (ctx->lock); if (ctx->canceled) err = gpg_error (GPG_ERR_CANCELED); UNLOCK (ctx->lock); if (!err) err = _gpgme_run_io_cb (&fdt.fds[i], 0, &local_op_err); if (err || local_op_err) { /* An error occured. Close all fds in this context, and signal it. */ _gpgme_cancel_with_err (ictx, err, local_op_err); /* Break out of the loop, and retry the select() from scratch, because now all fds should be gone. */ break; } } } free (fdt.fds); /* Now some contexts might have finished successfully. */ LOCK (ctx_list_lock); retry: for (li = ctx_active_list; li; li = li->next) { gpgme_ctx_t actx = li->ctx; for (i = 0; i < actx->fdt.size; i++) if (actx->fdt.fds[i].fd != -1) break; if (i == actx->fdt.size) { struct gpgme_io_event_done_data data; data.err = 0; data.op_err = 0; /* FIXME: This does not perform too well. We have to release the lock because the I/O event handler acquires it to remove the context from the active list. Two alternative strategies are worth considering: Either implement the DONE event handler here in a lock-free manner, or save a list of all contexts to be released and call the DONE events afterwards. */ UNLOCK (ctx_list_lock); _gpgme_engine_io_event (actx->engine, GPGME_EVENT_DONE, &data); LOCK (ctx_list_lock); goto retry; } } UNLOCK (ctx_list_lock); { gpgme_ctx_t dctx = ctx_wait (ctx, status, op_err); if (dctx) { ctx = dctx; hang = 0; } else if (!hang) { ctx = NULL; if (status) *status = 0; if (op_err) *op_err = 0; } } } while (hang); return ctx; }
/* If COND is a null pointer, wait until the blocking operation in CTX finished and return its error value. Otherwise, wait until COND is satisfied or the operation finished. */ gpgme_error_t _gpgme_wait_on_condition (gpgme_ctx_t ctx, volatile int *cond, gpgme_error_t *op_err_p) { gpgme_error_t err = 0; int hang = 1; if (op_err_p) *op_err_p = 0; do { int nr = _gpgme_io_select (ctx->fdt.fds, ctx->fdt.size, 0); unsigned int i; if (nr < 0) { /* An error occured. Close all fds in this context, and signal it. */ err = gpg_error_from_syserror (); _gpgme_cancel_with_err (ctx, err, 0); return err; } for (i = 0; i < ctx->fdt.size && nr; i++) { if (ctx->fdt.fds[i].fd != -1 && ctx->fdt.fds[i].signaled) { gpgme_error_t op_err = 0; ctx->fdt.fds[i].signaled = 0; assert (nr); nr--; LOCK (ctx->lock); if (ctx->canceled) err = gpg_error (GPG_ERR_CANCELED); UNLOCK (ctx->lock); if (!err) err = _gpgme_run_io_cb (&ctx->fdt.fds[i], 0, &op_err); if (err) { /* An error occured. Close all fds in this context, and signal it. */ _gpgme_cancel_with_err (ctx, err, 0); return err; } else if (op_err) { /* An operational error occured. Cancel the current operation but not the session, and signal it. */ _gpgme_cancel_with_err (ctx, 0, op_err); /* NOTE: This relies on the operational error being generated after the operation really has completed, for example after no further status line output is generated. Otherwise the following I/O will spill over into the next operation. */ if (op_err_p) *op_err_p = op_err; return 0; } } } for (i = 0; i < ctx->fdt.size; i++) if (ctx->fdt.fds[i].fd != -1) break; if (i == ctx->fdt.size) { struct gpgme_io_event_done_data data; data.err = 0; data.op_err = 0; _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &data); hang = 0; } if (cond && *cond) hang = 0; } while (hang); return 0; }