Пример #1
0
static int _move_current_to_root_cgroup(xcgroup_ns_t *ns)
{
	xcgroup_t cg;
	int rc;

	if (xcgroup_create(ns, &cg, "", 0, 0) != XCGROUP_SUCCESS)
		return SLURM_ERROR;

	rc = xcgroup_move_process(&cg, getpid());
	xcgroup_destroy(&cg);

	return rc;
}
extern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t devices_cg;

	/* Similarly to task_cgroup_{memory,cpuset}_fini(), we must lock the
	 * root cgroup so we don't race with another job step that is
	 * being started.  */
        if (xcgroup_create(&devices_ns, &devices_cg,"",0,0)
	    == XCGROUP_SUCCESS) {
                if (xcgroup_lock(&devices_cg) == XCGROUP_SUCCESS) {
			/* First move slurmstepd to the root devices cg
			 * so we can remove the step/job/user devices
			 * cg's.  */
			xcgroup_move_process(&devices_cg, getpid());
                        if (xcgroup_delete(&step_devices_cg) != SLURM_SUCCESS)
                                debug2("task/cgroup: unable to remove step "
                                       "devices : %m");
                        if (xcgroup_delete(&job_devices_cg) != XCGROUP_SUCCESS)
                                debug2("task/cgroup: not removing "
                                       "job devices : %m");
                        if (xcgroup_delete(&user_devices_cg)
			    != XCGROUP_SUCCESS)
                                debug2("task/cgroup: not removing "
                                       "user devices : %m");
                        xcgroup_unlock(&devices_cg);
                } else
                        error("task/cgroup: unable to lock root devices : %m");
                xcgroup_destroy(&devices_cg);
        } else
                error("task/cgroup: unable to create root devices : %m");

	if ( user_cgroup_path[0] != '\0' )
		xcgroup_destroy(&user_devices_cg);
	if ( job_cgroup_path[0] != '\0' )
		xcgroup_destroy(&job_devices_cg);
	if ( jobstep_cgroup_path[0] != '\0' )
		xcgroup_destroy(&step_devices_cg);

	user_cgroup_path[0] = '\0';
	job_cgroup_path[0] = '\0';
	jobstep_cgroup_path[0] = '\0';

	cgroup_allowed_devices_file[0] = '\0';

	xcgroup_ns_destroy(&devices_ns);

	xcpuinfo_fini();
	return SLURM_SUCCESS;
}
Пример #3
0
extern int task_cgroup_memory_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t memory_cg;

	if (user_cgroup_path[0] == '\0' ||
	     job_cgroup_path[0] == '\0' ||
	     jobstep_cgroup_path[0] == '\0')
		return SLURM_SUCCESS;

	/*
	 * Move the slurmstepd back to the root memory cg and remove[*]
	 * the step cgroup to move its allocated pages to its parent.
	 *
	 * [*] Calling rmdir(2) on an empty cgroup moves all resident charged
	 *  pages to the parent (i.e. the job cgroup). (If force_empty were
	 *  used instead, only clean pages would be flushed). This keeps
	 *  resident pagecache pages associated with the job. It is expected
	 *  that the job epilog will then optionally force_empty the
	 *  job cgroup (to flush pagecache), and then rmdir(2) the cgroup
	 *  or wait for release notification from kernel.
	 */
	if (xcgroup_create(&memory_ns,&memory_cg,"",0,0) == XCGROUP_SUCCESS) {
		xcgroup_move_process(&memory_cg, getpid());
		xcgroup_destroy(&memory_cg);
		if (xcgroup_delete(&step_memory_cg) != XCGROUP_SUCCESS)
			error ("cgroup: rmdir step memcg failed: %m");
	}

	xcgroup_destroy(&user_memory_cg);
	xcgroup_destroy(&job_memory_cg);
	xcgroup_destroy(&step_memory_cg);

	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';

	xcgroup_ns_destroy(&memory_ns);

	return SLURM_SUCCESS;
}
Пример #4
0
extern int task_cgroup_cpuset_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t cpuset_cg;

	/* Similarly to task_cgroup_memory_fini(), we must lock the
	 * root cgroup so we don't race with another job step that is
	 * being started.  */
        if (xcgroup_create(&cpuset_ns, &cpuset_cg,"",0,0) == XCGROUP_SUCCESS) {
		if (xcgroup_lock(&cpuset_cg) == XCGROUP_SUCCESS) {
			int i = 0, npids = 0, cnt = 0;
			pid_t* pids = NULL;
			/* First move slurmstepd to the root cpuset cg
			 * so we can remove the step/job/user cpuset
			 * cg's.  */
			xcgroup_move_process(&cpuset_cg, getpid());

			/* There is a delay in the cgroup system when moving the
			 * pid from one cgroup to another.  This is usually
			 * short, but we need to wait to make sure the pid is
			 * out of the step cgroup or we will occur an error
			 * leaving the cgroup unable to be removed.
			 */
			do {
				xcgroup_get_pids(&step_cpuset_cg,
						 &pids, &npids);
				for (i = 0 ; i<npids ; i++)
					if (pids[i] == getpid()) {
						cnt++;
						break;
					}
				xfree(pids);
			} while ((i < npids) && (cnt < MAX_MOVE_WAIT));

			if (cnt < MAX_MOVE_WAIT)
				debug3("Took %d checks before stepd pid was removed from the step cgroup.",
				       cnt);
			else
				error("Pid %d is still in the step cgroup.  It might be left uncleaned after the job.",
				      getpid());

                        if (xcgroup_delete(&step_cpuset_cg) != SLURM_SUCCESS)
                                debug2("task/cgroup: unable to remove step "
                                       "cpuset : %m");
                        if (xcgroup_delete(&job_cpuset_cg) != XCGROUP_SUCCESS)
                                debug2("task/cgroup: not removing "
                                       "job cpuset : %m");
                        if (xcgroup_delete(&user_cpuset_cg) != XCGROUP_SUCCESS)
                                debug2("task/cgroup: not removing "
                                       "user cpuset : %m");
                        xcgroup_unlock(&cpuset_cg);
                } else
                        error("task/cgroup: unable to lock root cpuset : %m");
                xcgroup_destroy(&cpuset_cg);
        } else
                error("task/cgroup: unable to create root cpuset : %m");

	if (user_cgroup_path[0] != '\0')
		xcgroup_destroy(&user_cpuset_cg);
	if (job_cgroup_path[0] != '\0')
		xcgroup_destroy(&job_cpuset_cg);
	if (jobstep_cgroup_path[0] != '\0')
		xcgroup_destroy(&step_cpuset_cg);

	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';

	xcgroup_ns_destroy(&cpuset_ns);

	return SLURM_SUCCESS;
}