extern void acct_gather_profile_endpoll(void)
{
	int i;

	if (!acct_gather_profile_running) {
		debug2("acct_gather_profile_startpoll: poll already ended!");
		return;
	}

	acct_gather_profile_running = false;

	for (i=0; i < PROFILE_CNT; i++) {
		/* end remote threads */
		slurm_mutex_lock(&acct_gather_profile_timer[i].notify_mutex);
		slurm_cond_signal(&acct_gather_profile_timer[i].notify);
		slurm_mutex_unlock(&acct_gather_profile_timer[i].notify_mutex);
		slurm_cond_destroy(&acct_gather_profile_timer[i].notify);
		acct_gather_profile_timer[i].freq = 0;
		switch (i) {
		case PROFILE_ENERGY:
			break;
		case PROFILE_TASK:
			jobacct_gather_endpoll();
			break;
		case PROFILE_FILESYSTEM:
			break;
		case PROFILE_NETWORK:
			break;
		default:
			fatal("Unhandled profile option %d please update "
			      "slurm_acct_gather_profile.c "
			      "(acct_gather_profile_endpoll)", i);
		}
	}
}
示例#2
0
static void _msg_aggr_free(void *x)
{
	msg_aggr_t *object = (msg_aggr_t *)x;
	if (object) {
		slurm_cond_destroy(&object->wait_cond);
		xfree(object);
	}
}
示例#3
0
文件: forward.c 项目: artpol84/slurm
/*
 * start_msg_tree  - logic to begin the forward tree and
 *                   accumulate the return codes from processes getting the
 *                   the forwarded message
 *
 * IN: hl          - hostlist_t   - list of every node to send message to
 * IN: msg         - slurm_msg_t  - message to send.
 * IN: timeout     - int          - how long to wait in milliseconds.
 * RET List 	   - List containing the responses of the children
 *		     (if any) we forwarded the message to. List
 *		     containing type (ret_data_info_t).
 */
extern List start_msg_tree(hostlist_t hl, slurm_msg_t *msg, int timeout)
{
	fwd_tree_t fwd_tree;
	pthread_mutex_t tree_mutex;
	pthread_cond_t notify;
	int count = 0;
	List ret_list = NULL;
	int thr_count = 0;
	int host_count = 0;
	hostlist_t* sp_hl;
	int hl_count = 0;

	xassert(hl);
	xassert(msg);

	hostlist_uniq(hl);
	host_count = hostlist_count(hl);

	if (route_g_split_hostlist(hl, &sp_hl, &hl_count,
				   msg->forward.tree_width)) {
		error("unable to split forward hostlist");
		return NULL;
	}
	slurm_mutex_init(&tree_mutex);
	slurm_cond_init(&notify, NULL);

	ret_list = list_create(destroy_data_info);

	memset(&fwd_tree, 0, sizeof(fwd_tree));
	fwd_tree.orig_msg = msg;
	fwd_tree.ret_list = ret_list;
	fwd_tree.timeout = timeout;
	fwd_tree.notify = &notify;
	fwd_tree.p_thr_count = &thr_count;
	fwd_tree.tree_mutex = &tree_mutex;

	_start_msg_tree_internal(NULL, sp_hl, &fwd_tree, hl_count);

	xfree(sp_hl);

	slurm_mutex_lock(&tree_mutex);

	count = list_count(ret_list);
	debug2("Tree head got back %d looking for %d", count, host_count);
	while (thr_count > 0) {
		slurm_cond_wait(&notify, &tree_mutex);
		count = list_count(ret_list);
		debug2("Tree head got back %d", count);
	}
	xassert(count >= host_count);	/* Tree head did not get all responses,
					 * but no more active fwd threads!*/
	slurm_mutex_unlock(&tree_mutex);

	slurm_mutex_destroy(&tree_mutex);
	slurm_cond_destroy(&notify);

	return ret_list;
}
示例#4
0
文件: forward.c 项目: artpol84/slurm
void destroy_forward_struct(forward_struct_t *forward_struct)
{
	if (forward_struct) {
		xfree(forward_struct->buf);
		slurm_mutex_destroy(&forward_struct->forward_mutex);
		slurm_cond_destroy(&forward_struct->notify);
		xfree(forward_struct);
	}
}
示例#5
0
extern int fini(void)
{
	_end_container_thread();

	/* free up some memory */
	slurm_mutex_destroy(&notify_mutex);
	slurm_cond_destroy(&notify);
	slurm_mutex_destroy(&thread_mutex);

	return SLURM_SUCCESS;
}
示例#6
0
extern void msg_aggr_sender_fini(void)
{
	if (!msg_collection.running)
		return;
	msg_collection.running = 0;
	slurm_mutex_lock(&msg_collection.mutex);

	slurm_cond_signal(&msg_collection.cond);
	slurm_mutex_unlock(&msg_collection.mutex);

	pthread_join(msg_collection.thread_id, NULL);
	msg_collection.thread_id = (pthread_t) 0;

	slurm_cond_destroy(&msg_collection.cond);
	/* signal and clear the waiting list */
	slurm_mutex_lock(&msg_collection.aggr_mutex);
	_handle_msg_aggr_ret(0, 1);
	FREE_NULL_LIST(msg_collection.msg_aggr_list);
	slurm_mutex_unlock(&msg_collection.aggr_mutex);
	FREE_NULL_LIST(msg_collection.msg_list);
	slurm_mutex_destroy(&msg_collection.mutex);
}
示例#7
0
文件: scancel.c 项目: adammoody/slurm
/* _cancel_jobs - filter then cancel jobs or job steps per request */
static int _cancel_jobs(int filter_cnt)
{
	int rc = 0;

	slurm_attr_init(&attr);
	if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
		error("pthread_attr_setdetachstate error %m");

	slurm_mutex_init(&num_active_threads_lock);
	slurm_cond_init(&num_active_threads_cond, NULL);

	_cancel_jobs_by_state(JOB_PENDING, filter_cnt, &rc);
	/* Wait for any cancel of pending jobs to complete before starting
	 * cancellation of running jobs so that we don't have a race condition
	 * with pending jobs getting scheduled while running jobs are also
	 * being cancelled. */
	slurm_mutex_lock( &num_active_threads_lock );
	while (num_active_threads > 0) {
		slurm_cond_wait(&num_active_threads_cond,
				&num_active_threads_lock);
	}
	slurm_mutex_unlock(&num_active_threads_lock);

	_cancel_jobs_by_state(JOB_END, filter_cnt, &rc);
	/* Wait for any spawned threads that have not finished */
	slurm_mutex_lock( &num_active_threads_lock );
	while (num_active_threads > 0) {
		slurm_cond_wait(&num_active_threads_cond,
				&num_active_threads_lock);
	}
	slurm_mutex_unlock(&num_active_threads_lock);

	slurm_attr_destroy(&attr);
	slurm_mutex_destroy(&num_active_threads_lock);
	slurm_cond_destroy(&num_active_threads_cond);

	return rc;
}