gpgme_error_t _gpgme_cancel_with_err (gpgme_ctx_t ctx, gpg_error_t ctx_err, gpg_error_t op_err) { gpgme_error_t err; struct gpgme_io_event_done_data data; TRACE_BEG2 (DEBUG_CTX, "_gpgme_cancel_with_err", ctx, "ctx_err=%i, op_err=%i", ctx_err, op_err); if (ctx_err) { err = _gpgme_engine_cancel (ctx->engine); if (err) return TRACE_ERR (err); } else { err = _gpgme_engine_cancel_op (ctx->engine); if (err) return TRACE_ERR (err); } data.err = ctx_err; data.op_err = op_err; _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &data); return TRACE_ERR (0); }
/* This handler is used to parse the output of --list-trust-path: Format: level:keyid:type:recno:ot:val:mc:cc:name: With TYPE = U for a user ID K for a key The RECNO is either the one of the dir record or the one of the uid record. OT is the the usual trust letter and only availabel on K lines. VAL is the calcualted validity MC is the marginal trust counter and only available on U lines CC is the same for the complete count NAME ist the username and only printed on U lines. */ static gpgme_error_t trustlist_colon_handler (void *priv, char *line) { gpgme_ctx_t ctx = (gpgme_ctx_t) priv; gpgme_error_t err; char *p, *pend; int field = 0; gpgme_trust_item_t item = NULL; if (!line) return 0; /* EOF */ for (p = line; p; p = pend) { field++; pend = strchr (p, ':'); if (pend) *pend++ = 0; switch (field) { case 1: /* level */ err = _gpgme_trust_item_new (&item); if (err) return err; item->level = atoi (p); break; case 2: /* long keyid */ if (strlen (p) == DIM(item->keyid) - 1) strcpy (item->keyid, p); break; case 3: /* type */ item->type = *p == 'K'? 1 : *p == 'U'? 2 : 0; break; case 5: /* owner trust */ item->_owner_trust[0] = *p; break; case 6: /* validity */ item->_validity[0] = *p; break; case 9: /* user ID */ item->name = strdup (p); if (!item->name) { int saved_errno = errno; gpgme_trust_item_unref (item); return gpg_error_from_errno (saved_errno); } break; } } if (item) _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_NEXT_TRUSTITEM, item); return 0; }
void _gpgme_wait_global_event_cb (void *data, gpgme_event_io_t type, void *type_data) { gpgme_ctx_t ctx = (gpgme_ctx_t) data; assert (ctx); switch (type) { case GPGME_EVENT_START: { gpgme_error_t err = ctx_active (ctx); if (err) { /* An error occured. Close all fds in this context, and send the error in a done event. */ unsigned int idx; for (idx = 0; idx <= ctx->fdt.size; idx++) if (ctx->fdt.fds[idx].fd != -1) _gpgme_io_close (ctx->fdt.fds[idx].fd); _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &err); } } break; case GPGME_EVENT_DONE: { gpgme_error_t *errp = (gpgme_error_t *) type_data; assert (errp); ctx_done (ctx, *errp); } break; case GPGME_EVENT_NEXT_KEY: assert (!"Unexpected event GPGME_EVENT_NEXT_KEY"); break; case GPGME_EVENT_NEXT_TRUSTITEM: assert (!"Unexpected event GPGME_EVENT_NEXT_TRUSTITEM"); break; default: assert (!"Unexpected event"); break; } }
/* Perform asynchronous operations in the global event loop (ie, any asynchronous operation except key listing and trustitem listing operations). If CTX is not a null pointer, the function will return if the asynchronous operation in the context CTX finished. Otherwise the function will return if any asynchronous operation finished. If HANG is zero, the function will not block for a long time. Otherwise the function does not return until an operation matching CTX finished. If a matching context finished, it is returned, and *STATUS is set to the error value of the operation in that context. Otherwise, if the timeout expires, NULL is returned and *STATUS is 0. If an error occurs, NULL is returned and *STATUS is set to the error value. */ gpgme_ctx_t gpgme_wait_ext (gpgme_ctx_t ctx, gpgme_error_t *status, gpgme_error_t *op_err, int hang) { do { unsigned int i = 0; struct ctx_list_item *li; struct fd_table fdt; int nr; /* Collect the active file descriptors. */ LOCK (ctx_list_lock); for (li = ctx_active_list; li; li = li->next) i += li->ctx->fdt.size; fdt.fds = malloc (i * sizeof (struct io_select_fd_s)); if (!fdt.fds) { int saved_err = gpg_error_from_syserror (); UNLOCK (ctx_list_lock); if (status) *status = saved_err; if (op_err) *op_err = 0; return NULL; } fdt.size = i; i = 0; for (li = ctx_active_list; li; li = li->next) { memcpy (&fdt.fds[i], li->ctx->fdt.fds, li->ctx->fdt.size * sizeof (struct io_select_fd_s)); i += li->ctx->fdt.size; } UNLOCK (ctx_list_lock); nr = _gpgme_io_select (fdt.fds, fdt.size, 0); if (nr < 0) { int saved_err = gpg_error_from_syserror (); free (fdt.fds); if (status) *status = saved_err; if (op_err) *op_err = 0; return NULL; } for (i = 0; i < fdt.size && nr; i++) { if (fdt.fds[i].fd != -1 && fdt.fds[i].signaled) { gpgme_ctx_t ictx; gpgme_error_t err = 0; gpgme_error_t local_op_err = 0; struct wait_item_s *item; assert (nr); nr--; item = (struct wait_item_s *) fdt.fds[i].opaque; assert (item); ictx = item->ctx; assert (ictx); LOCK (ctx->lock); if (ctx->canceled) err = gpg_error (GPG_ERR_CANCELED); UNLOCK (ctx->lock); if (!err) err = _gpgme_run_io_cb (&fdt.fds[i], 0, &local_op_err); if (err || local_op_err) { /* An error occured. Close all fds in this context, and signal it. */ _gpgme_cancel_with_err (ictx, err, local_op_err); /* Break out of the loop, and retry the select() from scratch, because now all fds should be gone. */ break; } } } free (fdt.fds); /* Now some contexts might have finished successfully. */ LOCK (ctx_list_lock); retry: for (li = ctx_active_list; li; li = li->next) { gpgme_ctx_t actx = li->ctx; for (i = 0; i < actx->fdt.size; i++) if (actx->fdt.fds[i].fd != -1) break; if (i == actx->fdt.size) { struct gpgme_io_event_done_data data; data.err = 0; data.op_err = 0; /* FIXME: This does not perform too well. We have to release the lock because the I/O event handler acquires it to remove the context from the active list. Two alternative strategies are worth considering: Either implement the DONE event handler here in a lock-free manner, or save a list of all contexts to be released and call the DONE events afterwards. */ UNLOCK (ctx_list_lock); _gpgme_engine_io_event (actx->engine, GPGME_EVENT_DONE, &data); LOCK (ctx_list_lock); goto retry; } } UNLOCK (ctx_list_lock); { gpgme_ctx_t dctx = ctx_wait (ctx, status, op_err); if (dctx) { ctx = dctx; hang = 0; } else if (!hang) { ctx = NULL; if (status) *status = 0; if (op_err) *op_err = 0; } } } while (hang); return ctx; }
/* If COND is a null pointer, wait until the blocking operation in CTX finished and return its error value. Otherwise, wait until COND is satisfied or the operation finished. */ gpgme_error_t _gpgme_wait_on_condition (gpgme_ctx_t ctx, volatile int *cond) { gpgme_error_t err = 0; int hang = 1; do { int nr = _gpgme_io_select (ctx->fdt.fds, ctx->fdt.size, 0); unsigned int i; if (nr < 0) { /* An error occured. Close all fds in this context, and signal it. */ unsigned int idx; err = gpg_error_from_errno (errno); for (idx = 0; idx < ctx->fdt.size; idx++) if (ctx->fdt.fds[idx].fd != -1) _gpgme_io_close (ctx->fdt.fds[idx].fd); _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &err); return err; } for (i = 0; i < ctx->fdt.size && nr; i++) { if (ctx->fdt.fds[i].fd != -1 && ctx->fdt.fds[i].signaled) { ctx->fdt.fds[i].signaled = 0; assert (nr); nr--; err = _gpgme_run_io_cb (&ctx->fdt.fds[i], 0); if (err) { /* An error occured. Close all fds in this context, and signal it. */ unsigned int idx; for (idx = 0; idx < ctx->fdt.size; idx++) if (ctx->fdt.fds[idx].fd != -1) _gpgme_io_close (ctx->fdt.fds[idx].fd); _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &err); return err; } } } for (i = 0; i < ctx->fdt.size; i++) if (ctx->fdt.fds[i].fd != -1) break; if (i == ctx->fdt.size) { _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &err); hang = 0; } if (cond && *cond) hang = 0; } while (hang); return 0; }
/* If COND is a null pointer, wait until the blocking operation in CTX finished and return its error value. Otherwise, wait until COND is satisfied or the operation finished. */ gpgme_error_t _gpgme_wait_on_condition (gpgme_ctx_t ctx, volatile int *cond, gpgme_error_t *op_err_p) { gpgme_error_t err = 0; int hang = 1; if (op_err_p) *op_err_p = 0; do { int nr = _gpgme_io_select (ctx->fdt.fds, ctx->fdt.size, 0); unsigned int i; if (nr < 0) { /* An error occured. Close all fds in this context, and signal it. */ err = gpg_error_from_syserror (); _gpgme_cancel_with_err (ctx, err, 0); return err; } for (i = 0; i < ctx->fdt.size && nr; i++) { if (ctx->fdt.fds[i].fd != -1 && ctx->fdt.fds[i].signaled) { gpgme_error_t op_err = 0; ctx->fdt.fds[i].signaled = 0; assert (nr); nr--; LOCK (ctx->lock); if (ctx->canceled) err = gpg_error (GPG_ERR_CANCELED); UNLOCK (ctx->lock); if (!err) err = _gpgme_run_io_cb (&ctx->fdt.fds[i], 0, &op_err); if (err) { /* An error occured. Close all fds in this context, and signal it. */ _gpgme_cancel_with_err (ctx, err, 0); return err; } else if (op_err) { /* An operational error occured. Cancel the current operation but not the session, and signal it. */ _gpgme_cancel_with_err (ctx, 0, op_err); /* NOTE: This relies on the operational error being generated after the operation really has completed, for example after no further status line output is generated. Otherwise the following I/O will spill over into the next operation. */ if (op_err_p) *op_err_p = op_err; return 0; } } } for (i = 0; i < ctx->fdt.size; i++) if (ctx->fdt.fds[i].fd != -1) break; if (i == ctx->fdt.size) { struct gpgme_io_event_done_data data; data.err = 0; data.op_err = 0; _gpgme_engine_io_event (ctx->engine, GPGME_EVENT_DONE, &data); hang = 0; } if (cond && *cond) hang = 0; } while (hang); return 0; }