/* Initiate teardown. Clear any alloc/free requests, and clear * the alloc->ready flag to stop prep/check from allocating. */ static void interface_teardown (struct alloc_ctx *ctx, char *s, int errnum) { if (ctx->ready) { struct job *job; flux_log (ctx->h, LOG_DEBUG, "alloc: stop due to %s: %s", s, flux_strerror (errnum)); job = queue_first (ctx->queue); while (job) { /* jobs with alloc pending need to go back in the queue * so they will automatically send alloc again. */ if (job->alloc_pending) { assert (job->aux_queue_handle == NULL); if (queue_insert (ctx->inqueue, job, &job->aux_queue_handle) < 0) flux_log_error (ctx->h, "%s: queue_insert", __FUNCTION__); job->alloc_pending = 0; job->alloc_queued = 1; } /* jobs with free pending (much smaller window for this to be true) * need to be picked up again after 'hello'. */ job->free_pending = 0; job = queue_next (ctx->queue); } ctx->ready = false; ctx->active_alloc_count = 0; } }
void flux_log_verror (flux_t *h, const char *fmt, va_list ap) { int saved_errno = errno; char *s = xvasprintf (fmt, ap); flux_log (h, LOG_ERR, "%s: %s", s, flux_strerror (errno)); free (s); errno = saved_errno; }
static int jobid_exist (flux_t *h, int64_t j) { flux_future_t *f = NULL; jscctx_t *ctx = getctx (h); const char *path = jscctx_jobid_path (ctx, j); int rc = -1; if (path == NULL) goto done; if (!(f = flux_kvs_lookup (h, FLUX_KVS_READDIR, path)) || flux_kvs_lookup_get_dir (f, NULL) < 0) { flux_log (h, LOG_DEBUG, "flux_kvs_lookup(%s): %s", path, flux_strerror (errno)); goto done; } rc = 0; done: flux_future_destroy (f); return rc; }