Exemplo n.º 1
0
/*
 * init() is called when the plugin is loaded, before any other functions
 * are called.  Put global initialization here.
 */
extern int init (void)
{
	/* If running on the slurmctld don't do any of this since it
	   isn't needed.
	*/
	if (_run_in_daemon()) {
		jag_common_init(0);

		/* read cgroup configuration */
		if (read_slurm_cgroup_conf(&slurm_cgroup_conf))
			return SLURM_ERROR;

		/* initialize cpuinfo internal data */
		if (xcpuinfo_init() != XCPUINFO_SUCCESS) {
			free_slurm_cgroup_conf(&slurm_cgroup_conf);
			return SLURM_ERROR;
		}

		/* enable cpuacct cgroup subsystem */
		if (jobacct_gather_cgroup_cpuacct_init(&slurm_cgroup_conf) !=
		    SLURM_SUCCESS) {
			xcpuinfo_fini();
			free_slurm_cgroup_conf(&slurm_cgroup_conf);
			return SLURM_ERROR;
		}

		/* enable memory cgroup subsystem */
		if (jobacct_gather_cgroup_memory_init(&slurm_cgroup_conf) !=
		    SLURM_SUCCESS) {
			xcpuinfo_fini();
			free_slurm_cgroup_conf(&slurm_cgroup_conf);
			return SLURM_ERROR;
		}

		/* FIXME: Enable when kernel support ready.
		 *
		 * Enable blkio subsystem.
		 */
		/* if (jobacct_gather_cgroup_blkio_init(&slurm_cgroup_conf) */
		/*     != SLURM_SUCCESS) { */
		/* 	xcpuinfo_fini(); */
		/* 	free_slurm_cgroup_conf(&slurm_cgroup_conf); */
		/* 	return SLURM_ERROR; */
		/* } */
	}

	verbose("%s loaded", plugin_name);
	return SLURM_SUCCESS;
}
Exemplo n.º 2
0
extern int fini (void)
{
    _slurm_cgroup_destroy();
    xcpuinfo_fini();
    free_slurm_cgroup_conf(&slurm_cgroup_conf);
    return SLURM_SUCCESS;
}
Exemplo n.º 3
0
extern int task_cgroup_cpuset_init(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	char release_agent_path[PATH_MAX];

	/* initialize cpuinfo internal data */
	if (xcpuinfo_init() != XCPUINFO_SUCCESS) {
		return SLURM_ERROR;
	}

	/* initialize user/job/jobstep cgroup relative paths */
	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';

	/* initialize cpuset cgroup namespace */
	release_agent_path[0]='\0';
	if (snprintf(release_agent_path,PATH_MAX,"%s/release_cpuset",
		      slurm_cgroup_conf->cgroup_release_agent) >= PATH_MAX) {
		error("task/cgroup: unable to build cpuset release agent path");
		goto error;
	}
	if (xcgroup_ns_create(slurm_cgroup_conf, &cpuset_ns, "/cpuset", "",
			       "cpuset",release_agent_path) !=
	     XCGROUP_SUCCESS) {
		error("task/cgroup: unable to create cpuset namespace");
		goto error;
	}

	/* check that cpuset cgroup namespace is available */
	if (! xcgroup_ns_is_available(&cpuset_ns)) {
		if (slurm_cgroup_conf->cgroup_automount) {
			if (xcgroup_ns_mount(&cpuset_ns)) {
				error("task/cgroup: unable to mount cpuset "
				      "namespace");
				goto clean;
			}
			info("task/cgroup: cpuset namespace is now mounted");
		} else {
			error("task/cgroup: cpuset namespace not mounted. "
			      "aborting");
			goto clean;
		}
	}

	return SLURM_SUCCESS;

clean:
	xcgroup_ns_destroy(&cpuset_ns);

error:
	xcpuinfo_fini();
	return SLURM_ERROR;
}
extern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t devices_cg;

	/* Similarly to task_cgroup_{memory,cpuset}_fini(), we must lock the
	 * root cgroup so we don't race with another job step that is
	 * being started.  */
        if (xcgroup_create(&devices_ns, &devices_cg,"",0,0)
	    == XCGROUP_SUCCESS) {
                if (xcgroup_lock(&devices_cg) == XCGROUP_SUCCESS) {
			/* First move slurmstepd to the root devices cg
			 * so we can remove the step/job/user devices
			 * cg's.  */
			xcgroup_move_process(&devices_cg, getpid());
                        if (xcgroup_delete(&step_devices_cg) != SLURM_SUCCESS)
                                debug2("task/cgroup: unable to remove step "
                                       "devices : %m");
                        if (xcgroup_delete(&job_devices_cg) != XCGROUP_SUCCESS)
                                debug2("task/cgroup: not removing "
                                       "job devices : %m");
                        if (xcgroup_delete(&user_devices_cg)
			    != XCGROUP_SUCCESS)
                                debug2("task/cgroup: not removing "
                                       "user devices : %m");
                        xcgroup_unlock(&devices_cg);
                } else
                        error("task/cgroup: unable to lock root devices : %m");
                xcgroup_destroy(&devices_cg);
        } else
                error("task/cgroup: unable to create root devices : %m");

	if ( user_cgroup_path[0] != '\0' )
		xcgroup_destroy(&user_devices_cg);
	if ( job_cgroup_path[0] != '\0' )
		xcgroup_destroy(&job_devices_cg);
	if ( jobstep_cgroup_path[0] != '\0' )
		xcgroup_destroy(&step_devices_cg);

	user_cgroup_path[0] = '\0';
	job_cgroup_path[0] = '\0';
	jobstep_cgroup_path[0] = '\0';

	cgroup_allowed_devices_file[0] = '\0';

	xcgroup_ns_destroy(&devices_ns);

	xcpuinfo_fini();
	return SLURM_SUCCESS;
}
Exemplo n.º 5
0
extern int task_cgroup_cpuset_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{

	if (user_cgroup_path[0] != '\0')
		xcgroup_destroy(&user_cpuset_cg);
	if (job_cgroup_path[0] != '\0')
		xcgroup_destroy(&job_cpuset_cg);
	if (jobstep_cgroup_path[0] != '\0')
		xcgroup_destroy(&step_cpuset_cg);

	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';

	xcgroup_ns_destroy(&cpuset_ns);

	xcpuinfo_fini();
	return SLURM_SUCCESS;
}
Exemplo n.º 6
0
/*
 * init() is called when the plugin is loaded, before any other functions
 * are called.  Put global initialization here.
 */
extern int init (void)
{
    /* read cgroup configuration */
    if (read_slurm_cgroup_conf(&slurm_cgroup_conf))
        return SLURM_ERROR;

    /* initialize cpuinfo internal data */
    if (xcpuinfo_init() != XCPUINFO_SUCCESS) {
        free_slurm_cgroup_conf(&slurm_cgroup_conf);
        return SLURM_ERROR;
    }

    /* initialize cgroup internal data */
    if (_slurm_cgroup_init() != SLURM_SUCCESS) {
        xcpuinfo_fini();
        free_slurm_cgroup_conf(&slurm_cgroup_conf);
        return SLURM_ERROR;
    }

    return SLURM_SUCCESS;
}
Exemplo n.º 7
0
extern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{

	if ( user_cgroup_path[0] != '\0' )
		xcgroup_destroy(&user_devices_cg);
	if ( job_cgroup_path[0] != '\0' )
		xcgroup_destroy(&job_devices_cg);
	if ( jobstep_cgroup_path[0] != '\0' )
		xcgroup_destroy(&step_devices_cg);

	user_cgroup_path[0] = '\0';
	job_cgroup_path[0] = '\0';
	jobstep_cgroup_path[0] = '\0';

	cgroup_allowed_devices_file[0] = '\0';

	xcgroup_ns_destroy(&devices_ns);

	xcpuinfo_fini();
	return SLURM_SUCCESS;
}
extern int task_cgroup_devices_init(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	uint16_t cpunum;

	/* initialize cpuinfo internal data */
	if ( xcpuinfo_init() != XCPUINFO_SUCCESS )
		return SLURM_ERROR;

	/* initialize user/job/jobstep cgroup relative paths */
	user_cgroup_path[0] = '\0';
	job_cgroup_path[0] = '\0';
	jobstep_cgroup_path[0] = '\0';
	/* initialize allowed_devices_filename */
	cgroup_allowed_devices_file[0] = '\0';

	if ( get_procs(&cpunum) != 0 ) {
		error("task/cgroup: unable to get a number of CPU");
		goto error;
	}

	(void) gres_plugin_node_config_load(cpunum, conf->node_name, NULL);

	strcpy(cgroup_allowed_devices_file,
	       slurm_cgroup_conf->allowed_devices_file);
	if (xcgroup_ns_create(slurm_cgroup_conf, &devices_ns, "", "devices")
	    != XCGROUP_SUCCESS ) {
		error("task/cgroup: unable to create devices namespace");
		goto error;
	}

	return SLURM_SUCCESS;

error:
	xcgroup_ns_destroy(&devices_ns);
	xcpuinfo_fini();
	return SLURM_ERROR;
}