static int env_stop(vps_handler *h, envid_t veid, const char *root, int stop_mode) { struct sigaction act, actold; int i, pid, ret = 0; sigaction(SIGCHLD, NULL, &actold); sigemptyset(&act.sa_mask); act.sa_handler = SIG_IGN; act.sa_flags = SA_NOCLDSTOP; sigaction(SIGCHLD, &act, NULL); logger(0, 0, "Stopping container ..."); if (stop_mode == M_KILL) goto kill_vps; if ((pid = fork()) < 0) { logger(-1, errno, "Can not fork"); ret = VZ_RESOURCE_ERROR; goto out; } else if (pid == 0) { ret = real_env_stop(h, veid, root, stop_mode); exit(ret); } for (i = 0; i < MAX_SHTD_TM; i++) { sleep(1); if (!vps_is_run(h, veid)) { ret = 0; goto out; } } kill_vps: if ((pid = fork()) < 0) { ret = VZ_RESOURCE_ERROR; logger(-1, errno, "Can not fork"); goto err; } else if (pid == 0) { ret = real_env_stop(h, veid, root, M_KILL); exit(ret); } ret = VZ_STOP_ERROR; for (i = 0; i < MAX_SHTD_TM; i++) { usleep(500000); if (!vps_is_run(h, veid)) { ret = 0; break; } } out: if (ret) logger(-1, 0, "Unable to stop container: operation timed out"); else logger(0, 0, "Container was stopped"); err: sigaction(SIGCHLD, &actold, NULL); return ret; }
/** Allow/disallow access to devices on host system from CT. * * @param h CT handler. * @param veid CT ID. * @param root CT root. * @param dev devices list. * @return 0 on success. */ int vps_set_devperm(vps_handler *h, envid_t veid, const char *root, dev_param *dev) { int ret = 0; dev_res *res; list_head_t *dev_h = &dev->dev; if (list_empty(dev_h)) return 0; if (!vps_is_run(h, veid)) { logger(-1, 0, "Unable to apply devperm: " "container is not running"); return VZ_VE_NOT_RUNNING; } logger(0, 0, "Setting devices"); list_for_each(res, dev_h, list) { if (res->name) if ((ret = dev_create(root, res))) goto out; if ((ret = set_devperm(h, veid, res))) goto out; } out: return ret; }
/** Apply cpu parameters on running CT. * * @param h CT handler. * @param veid CT ID. * @param cpu cpu parameters. * @return 0 on success. */ int vps_set_cpu(vps_handler *h, envid_t veid, cpu_param *cpu) { if (cpu->limit == NULL && cpu->units == NULL && cpu->weight == NULL && cpu->vcpus == NULL && cpu->mask == NULL) { return 0; } if (!vps_is_run(h, veid)) { logger(-1, 0, "Unable to apply CPU parameters: " "container is not running"); return VZ_VE_NOT_RUNNING; } return h->setcpus(h, veid, cpu); }
/** Stop CT. * * @param h CT handler. * @param veid CT ID. * @param param CT parameters. * @param stop_mode stop mode, one of (M_REBOOT M_HALT M_KILL). * @param skip flag to skip run action script (SKIP_ACTION_SCRIPT) * @param action modules list, used to call cleanup() callback. * @return 0 on success. */ int vps_stop(vps_handler *h, envid_t veid, vps_param *param, int stop_mode, skipFlags skip, struct mod_action *action) { int ret; char buf[64]; vps_res *res = ¶m->res; if (check_var(res->fs.root, "VE_ROOT is not set")) return VZ_VE_ROOT_NOTSET; if (!vps_is_run(h, veid)) { logger(-1, 0, "Unable to stop: container is not running"); return 0; } if (!(skip & SKIP_ACTION_SCRIPT)) { snprintf(buf, sizeof(buf), VPS_CONF_DIR "%d.%s", veid, STOP_PREFIX); if (stat_file(buf)) { if (vps_exec_script(h, veid, res->fs.root, NULL, NULL, buf, NULL, 0)) { return VZ_ACTIONSCRIPT_ERROR; } } } /* get CT IP addresses for cleanup */ get_vps_ip(h, veid, ¶m->del_res.net.ip); if ((ret = env_stop(h, veid, res->fs.root, stop_mode))) goto end; mod_cleanup(h, veid, action, param); /* Cleanup CT IPs */ run_net_script(veid, DEL, ¶m->del_res.net.ip, STATE_STOPPING, param->res.net.skip_arpdetect); ret = vps_umount(h, veid, res->fs.root, skip); end: free_str_param(¶m->del_res.net.ip); return ret; }
int vps_start_custom(vps_handler *h, envid_t veid, vps_param *param, skipFlags skip, struct mod_action *mod, env_create_FN fn, void *data) { int wait_p[2]; int old_wait_p[2]; int err_p[2]; int ret, err; char buf[64]; char *dist_name; struct sigaction act; vps_res *res = ¶m->res; dist_actions actions; memset(&actions, 0, sizeof(actions)); if (check_var(res->fs.root, "VE_ROOT is not set")) return VZ_VE_ROOT_NOTSET; if (vps_is_run(h, veid)) { logger(-1, 0, "Container is already running"); return VZ_VE_RUNNING; } if ((ret = check_ub(&res->ub))) return ret; dist_name = get_dist_name(&res->tmpl); ret = read_dist_actions(dist_name, DIST_DIR, &actions); free(dist_name); if (ret) return ret; logger(0, 0, "Starting container ..."); if (vps_is_mounted(res->fs.root)) { /* if CT is mounted -- umount first, to cleanup mount state */ vps_umount(h, veid, res->fs.root, skip); } if (!vps_is_mounted(res->fs.root)) { /* increase quota to perform setup */ quota_inc(&res->dq, 100); if ((ret = vps_mount(h, veid, &res->fs, &res->dq, skip))) return ret; quota_inc(&res->dq, -100); } /* Fedora 14/15 hacks */ if (fix_ve_devconsole(res->fs.root) != 0) return VZ_FS_BAD_TMPL; if (fix_ve_systemd(res->fs.root) != 0) return VZ_FS_BAD_TMPL; if (pipe(wait_p) < 0) { logger(-1, errno, "Can not create pipe"); return VZ_RESOURCE_ERROR; } /* old_wait_p is needed for backward compatibility with older kernels, * while for recent ones (that support CPT_SET_LOCKFD2) we use wait_p. * * If old_wait_p is closed without writing any data, it's "OK to go" * signal, and if data are received from old_wait_p it's "no go" * signal". Note that such thing doesn't work if vzctl segfaults, * because in this case the descriptor will be closed without * sending data. */ if (pipe(old_wait_p) < 0) { logger(-1, errno, "Can not create pipe"); return VZ_RESOURCE_ERROR; } if (pipe(err_p) < 0) { close(wait_p[0]); close(wait_p[1]); logger(-1, errno, "Can not create pipe"); return VZ_RESOURCE_ERROR; } sigemptyset(&act.sa_mask); act.sa_handler = SIG_IGN; act.sa_flags = 0; sigaction(SIGPIPE, &act, NULL); fix_numiptent(&res->ub); fix_cpu(&res->cpu); ret = vz_env_create(h, veid, res, wait_p, old_wait_p, err_p, fn, data); if (ret) goto err; if ((ret = vps_setup_res(h, veid, &actions, &res->fs, param, STATE_STARTING, skip, mod))) { goto err; } if (!(skip & SKIP_ACTION_SCRIPT)) { snprintf(buf, sizeof(buf), VPS_CONF_DIR "%d.%s", veid, START_PREFIX); if (stat_file(buf)) { if (vps_exec_script(h, veid, res->fs.root, NULL, NULL, buf, NULL, 0)) { ret = VZ_ACTIONSCRIPT_ERROR; goto err; } } } /* Tell the child that it's time to start /sbin/init */ err = 0; if (write(wait_p[1], &err, sizeof(err)) != sizeof(err)) logger(-1, errno, "Unable to write to waitfd to start init"); close(wait_p[1]); close(old_wait_p[1]); err: free_dist_actions(&actions); if (ret) { /* Kill environment */ logger(-1, 0, "Container start failed (try to check kernel " "messages, e.g. \"dmesg | tail\")"); /* Close wait fd without writing anything to it * to signal the child that we have failed to configure * the environment, so it should not start /sbin/init */ close(wait_p[1]); write(old_wait_p[1], &err, sizeof(err)); close(old_wait_p[1]); } else { if (!read(err_p[0], &ret, sizeof(ret))) { if (res->misc.wait == YES) { logger(0, 0, "Container start in progress" ", waiting ..."); err = vps_execFn(h, veid, res->fs.root, wait_on_fifo, NULL, 0); if (err) { logger(0, 0, "Container wait failed%s", err == VZ_EXEC_TIMEOUT ? \ " - timeout expired" : ""); ret = VZ_WAIT_FAILED; } else { logger(0, 0, "Container started" " successfully"); } } else { logger(0, 0, "Container start in progress..."); } } else { if (ret == VZ_FS_BAD_TMPL) logger(-1, 0, "Unable to start init, probably" " incorrect template"); logger(-1, 0, "Container start failed"); } } if (ret) { if (vps_is_run(h, veid)) env_stop(h, veid, res->fs.root, M_KILL); /* restore original quota values */ vps_set_quota(veid, &res->dq); if (vps_is_mounted(res->fs.root)) vps_umount(h, veid, res->fs.root, skip); } close(wait_p[0]); close(wait_p[1]); close(err_p[0]); close(err_p[1]); return ret; }