static void stop_note(HmSynth *base, int num) { SineSynth *synth = (SineSynth *)base; if (num == synth->note) { synth->note = 0; env_stop(&synth->env); env_stop(&synth->fenv); } }
void func(void *arg) { while(1) { if(++counter > 1000) { env_stop(); } log(LOG_INFO, "counter:%d", counter); coro_yield(g_mastersched->current_coro); } }
/** Stop CT. * * @param h CT handler. * @param veid CT ID. * @param param CT parameters. * @param stop_mode stop mode, one of (M_REBOOT M_HALT M_KILL). * @param skip flag to skip run action script (SKIP_ACTION_SCRIPT) * @param action modules list, used to call cleanup() callback. * @return 0 on success. */ int vps_stop(vps_handler *h, envid_t veid, vps_param *param, int stop_mode, skipFlags skip, struct mod_action *action) { int ret; char buf[64]; vps_res *res = ¶m->res; if (check_var(res->fs.root, "VE_ROOT is not set")) return VZ_VE_ROOT_NOTSET; if (!vps_is_run(h, veid)) { logger(-1, 0, "Unable to stop: container is not running"); return 0; } if (!(skip & SKIP_ACTION_SCRIPT)) { snprintf(buf, sizeof(buf), VPS_CONF_DIR "%d.%s", veid, STOP_PREFIX); if (stat_file(buf)) { if (vps_exec_script(h, veid, res->fs.root, NULL, NULL, buf, NULL, 0)) { return VZ_ACTIONSCRIPT_ERROR; } } } /* get CT IP addresses for cleanup */ get_vps_ip(h, veid, ¶m->del_res.net.ip); if ((ret = env_stop(h, veid, res->fs.root, stop_mode))) goto end; mod_cleanup(h, veid, action, param); /* Cleanup CT IPs */ run_net_script(veid, DEL, ¶m->del_res.net.ip, STATE_STOPPING, param->res.net.skip_arpdetect); ret = vps_umount(h, veid, res->fs.root, skip); end: free_str_param(¶m->del_res.net.ip); return ret; }
int vps_start_custom(vps_handler *h, envid_t veid, vps_param *param, skipFlags skip, struct mod_action *mod, env_create_FN fn, void *data) { int wait_p[2]; int old_wait_p[2]; int err_p[2]; int ret, err; char buf[64]; char *dist_name; struct sigaction act; vps_res *res = ¶m->res; dist_actions actions; memset(&actions, 0, sizeof(actions)); if (check_var(res->fs.root, "VE_ROOT is not set")) return VZ_VE_ROOT_NOTSET; if (vps_is_run(h, veid)) { logger(-1, 0, "Container is already running"); return VZ_VE_RUNNING; } if ((ret = check_ub(&res->ub))) return ret; dist_name = get_dist_name(&res->tmpl); ret = read_dist_actions(dist_name, DIST_DIR, &actions); free(dist_name); if (ret) return ret; logger(0, 0, "Starting container ..."); if (vps_is_mounted(res->fs.root)) { /* if CT is mounted -- umount first, to cleanup mount state */ vps_umount(h, veid, res->fs.root, skip); } if (!vps_is_mounted(res->fs.root)) { /* increase quota to perform setup */ quota_inc(&res->dq, 100); if ((ret = vps_mount(h, veid, &res->fs, &res->dq, skip))) return ret; quota_inc(&res->dq, -100); } /* Fedora 14/15 hacks */ if (fix_ve_devconsole(res->fs.root) != 0) return VZ_FS_BAD_TMPL; if (fix_ve_systemd(res->fs.root) != 0) return VZ_FS_BAD_TMPL; if (pipe(wait_p) < 0) { logger(-1, errno, "Can not create pipe"); return VZ_RESOURCE_ERROR; } /* old_wait_p is needed for backward compatibility with older kernels, * while for recent ones (that support CPT_SET_LOCKFD2) we use wait_p. * * If old_wait_p is closed without writing any data, it's "OK to go" * signal, and if data are received from old_wait_p it's "no go" * signal". Note that such thing doesn't work if vzctl segfaults, * because in this case the descriptor will be closed without * sending data. */ if (pipe(old_wait_p) < 0) { logger(-1, errno, "Can not create pipe"); return VZ_RESOURCE_ERROR; } if (pipe(err_p) < 0) { close(wait_p[0]); close(wait_p[1]); logger(-1, errno, "Can not create pipe"); return VZ_RESOURCE_ERROR; } sigemptyset(&act.sa_mask); act.sa_handler = SIG_IGN; act.sa_flags = 0; sigaction(SIGPIPE, &act, NULL); fix_numiptent(&res->ub); fix_cpu(&res->cpu); ret = vz_env_create(h, veid, res, wait_p, old_wait_p, err_p, fn, data); if (ret) goto err; if ((ret = vps_setup_res(h, veid, &actions, &res->fs, param, STATE_STARTING, skip, mod))) { goto err; } if (!(skip & SKIP_ACTION_SCRIPT)) { snprintf(buf, sizeof(buf), VPS_CONF_DIR "%d.%s", veid, START_PREFIX); if (stat_file(buf)) { if (vps_exec_script(h, veid, res->fs.root, NULL, NULL, buf, NULL, 0)) { ret = VZ_ACTIONSCRIPT_ERROR; goto err; } } } /* Tell the child that it's time to start /sbin/init */ err = 0; if (write(wait_p[1], &err, sizeof(err)) != sizeof(err)) logger(-1, errno, "Unable to write to waitfd to start init"); close(wait_p[1]); close(old_wait_p[1]); err: free_dist_actions(&actions); if (ret) { /* Kill environment */ logger(-1, 0, "Container start failed (try to check kernel " "messages, e.g. \"dmesg | tail\")"); /* Close wait fd without writing anything to it * to signal the child that we have failed to configure * the environment, so it should not start /sbin/init */ close(wait_p[1]); write(old_wait_p[1], &err, sizeof(err)); close(old_wait_p[1]); } else { if (!read(err_p[0], &ret, sizeof(ret))) { if (res->misc.wait == YES) { logger(0, 0, "Container start in progress" ", waiting ..."); err = vps_execFn(h, veid, res->fs.root, wait_on_fifo, NULL, 0); if (err) { logger(0, 0, "Container wait failed%s", err == VZ_EXEC_TIMEOUT ? \ " - timeout expired" : ""); ret = VZ_WAIT_FAILED; } else { logger(0, 0, "Container started" " successfully"); } } else { logger(0, 0, "Container start in progress..."); } } else { if (ret == VZ_FS_BAD_TMPL) logger(-1, 0, "Unable to start init, probably" " incorrect template"); logger(-1, 0, "Container start failed"); } } if (ret) { if (vps_is_run(h, veid)) env_stop(h, veid, res->fs.root, M_KILL); /* restore original quota values */ vps_set_quota(veid, &res->dq); if (vps_is_mounted(res->fs.root)) vps_umount(h, veid, res->fs.root, skip); } close(wait_p[0]); close(wait_p[1]); close(err_p[0]); close(err_p[1]); return ret; }