static int output_handler (zio_t *z, const char *json_str, int len, void *arg) { struct subprocess *p = (struct subprocess *) arg; json_object *o; if (p->io_cb) { if (!(o = json_tokener_parse (json_str))) { errno = EINVAL; return -1; } Jadd_int (o, "pid", subprocess_pid (p)); Jadd_str (o, "type", "io"); Jadd_str (o, "name", zio_name (z)); p->io_cb (p, json_object_to_json_string (o)); json_object_put (o); } else send_output_to_stream (zio_name (z), json_str); /* * Check for process completion in case EOF from I/O stream and process * already registered exit */ check_completion (p); return (0); }
static void kill_job(child_process *cp, int reason) { int ret; struct rusage ru; /* brutal but efficient */ ret = kill(cp->pid, SIGKILL); if (ret < 0) { if (errno == ESRCH) { finish_job(cp, reason); return; } wlog("kill(%d, SIGKILL) failed: %s\n", cp->pid, strerror(errno)); } ret = wait4(cp->pid, &cp->ret, 0, &ru); finish_job(cp, reason); #ifdef PLAY_NICE_IN_kill_job int i, sig = SIGTERM; pid = cp->pid; for (i = 0; i < 2; i++) { /* check one last time if the job is done */ ret = check_completion(cp, WNOHANG); if (!ret || ret == -ECHILD) { /* check_completion ran finish_job() */ return; } /* not done, so signal it. SIGTERM first and check again */ errno = 0; ret = kill(pid, sig); if (ret < 0) { finish_job(cp, -errno); } sig = SIGKILL; check_completion(cp, WNOHANG); if (ret < 0) { finish_job(cp, errno); } usleep(50000); } #endif /* PLAY_NICE_IN_kill_job */ }
int subprocess_manager_reap_all (struct subprocess_manager *sm) { struct subprocess *p; while ((p = subprocess_manager_wait (sm))) check_completion (p); return (0); }
static void subprocess_process_wait_status (struct subprocess *p, int status) { if (status < 0) return; p->status = status; if (WIFEXITED (p->status) || WIFSIGNALED (p->status)) { p->exited = 1; subprocess_run_hooks (p, p->hooks [SUBPROCESS_EXIT]); } subprocess_run_hooks (p, p->hooks [SUBPROCESS_STATUS]); check_completion (p); }
static void gather_output(child_process *cp, iobuf *io) { iobuf *other_io; other_io = io == &cp->outstd ? &cp->outerr : &cp->outstd; for (;;) { char buf[4096]; int rd; rd = read(io->fd, buf, sizeof(buf)); if (rd < 0) { if (errno == EINTR) continue; /* XXX: handle the error somehow */ check_completion(cp, WNOHANG); } if (rd) { /* we read some data */ io->buf = realloc(io->buf, rd + io->len + 1); memcpy(&io->buf[io->len], buf, rd); io->len += rd; io->buf[io->len] = '\0'; } else { iobroker_close(iobs, io->fd); io->fd = -1; if (other_io->fd < 0) { check_completion(cp, 0); } else { check_completion(cp, WNOHANG); } } break; } }
/* * "What can the harvest hope for, if not for the care * of the Reaper Man?" * -- Terry Pratchett, Reaper Man * * We end up here no matter if the job is stale (ie, the child is * stuck in uninterruptable sleep) or if it's the first time we try * to kill it. * A job is considered reaped once we reap our direct child, in * which case init will become parent of our grandchildren. * It's also considered fully reaped if kill() results in ESRCH or * EPERM, or if wait()ing for the process group results in ECHILD. */ static void kill_job(child_process *cp, int reason) { int ret, status, reaped = 0; int pid = cp ? cp->ei->pid : 0; /* * first attempt at reaping, so see if we just failed to * notice that things were going wrong her */ if (reason == ETIME && !check_completion(cp, WNOHANG)) { timeouts++; wlog("job %d with pid %d reaped at timeout. timeouts=%u; started=%u", cp->id, pid, timeouts, started); return; } /* brutal but efficient */ if (kill(-cp->ei->pid, SIGKILL) < 0) { if (errno == ESRCH) { reaped = 1; } else { wlog("kill(-%d, SIGKILL) failed: %s\n", cp->ei->pid, strerror(errno)); } } /* * we must iterate at least once, in case kill() returns * ESRCH when there's zombies */ do { ret = waitpid(cp->ei->pid, &status, WNOHANG); if (ret < 0 && errno == EINTR) continue; if (ret == cp->ei->pid || (ret < 0 && errno == ECHILD)) { reaped = 1; break; } if (!ret) { struct timeval tv; gettimeofday(&tv, NULL); /* * stale process (signal may not have been delivered, or * the child can be stuck in uninterruptible sleep). We * can't hang around forever, so just reschedule a new * reap attempt later. */ if (reason == ESTALE) { tv.tv_sec += 5; wlog("Failed to reap child with pid %d. Next attempt @ %lu.%lu", cp->ei->pid, tv.tv_sec, tv.tv_usec); } else { tv.tv_usec = 250000; if (tv.tv_usec > 1000000) { tv.tv_usec -= 1000000; tv.tv_sec += 1; } cp->ei->state = ESTALE; finish_job(cp, reason); } squeue_remove(sq, cp->ei->sq_event); cp->ei->sq_event = squeue_add_tv(sq, &tv, cp); return; } } while (!reaped); if (cp->ei->state != ESTALE) finish_job(cp, reason); else wlog("job %d (pid=%d): Dormant child reaped", cp->id, cp->ei->pid); destroy_job(cp); }
void CPPEffectorController::frame_update() { if (m_effector) { if (check_completion()) deactivate(); } else if (check_start_conditions()) activate(); }
static int igt_insert_complete(void *arg) { const u32 seqno_bias = 0x1000; struct intel_engine_cs *engine = arg; struct intel_wait *waiters; const int count = 4096; unsigned long *bitmap; int err = -ENOMEM; int n, m; mock_engine_reset(engine); waiters = kvmalloc_array(count, sizeof(*waiters), GFP_KERNEL); if (!waiters) goto out_engines; bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap), GFP_KERNEL); if (!bitmap) goto out_waiters; for (n = 0; n < count; n++) { intel_wait_init_for_seqno(&waiters[n], n + seqno_bias); intel_engine_add_wait(engine, &waiters[n]); __set_bit(n, bitmap); } err = check_rbtree(engine, bitmap, waiters, count); if (err) goto out_bitmap; /* On each step, we advance the seqno so that several waiters are then * complete (we increase the seqno by increasingly larger values to * retire more and more waiters at once). All retired waiters should * be woken and removed from the rbtree, and so that we check. */ for (n = 0; n < count; n = m) { int seqno = 2 * n; GEM_BUG_ON(find_first_bit(bitmap, count) != n); if (intel_wait_complete(&waiters[n])) { pr_err("waiter[%d, seqno=%d] completed too early\n", n, waiters[n].seqno); err = -EINVAL; goto out_bitmap; } /* complete the following waiters */ mock_seqno_advance(engine, seqno + seqno_bias); for (m = n; m <= seqno; m++) { if (m == count) break; GEM_BUG_ON(!test_bit(m, bitmap)); __clear_bit(m, bitmap); } intel_engine_remove_wait(engine, &waiters[n]); RB_CLEAR_NODE(&waiters[n].node); err = check_rbtree(engine, bitmap, waiters, count); if (err) { pr_err("rbtree corrupt after seqno advance to %d\n", seqno + seqno_bias); goto out_bitmap; } err = check_completion(engine, bitmap, waiters, count); if (err) { pr_err("completions after seqno advance to %d failed\n", seqno + seqno_bias); goto out_bitmap; } } err = check_rbtree_empty(engine); out_bitmap: kfree(bitmap); out_waiters: kvfree(waiters); out_engines: mock_engine_flush(engine); return err; }