Пример #1
0
static int
_build_min_max_32_string(char *buffer, int buf_size,
			 uint32_t min, uint32_t max,
			 bool range, bool use_suffix)
{
	char tmp_min[8];
	char tmp_max[8];

	if (use_suffix) {
		convert_num_unit((float)min, tmp_min, sizeof(tmp_min),
				 UNIT_NONE);
		convert_num_unit((float)max, tmp_max, sizeof(tmp_max),
				 UNIT_NONE);
	} else {
		snprintf(tmp_min, sizeof(tmp_min), "%u", min);
		snprintf(tmp_max, sizeof(tmp_max), "%u", max);
	}

	if (max == min)
		return snprintf(buffer, buf_size, "%s", tmp_max);
	else if (range) {
		if (max == INFINITE)
			return snprintf(buffer, buf_size, "%s-infinite",
					tmp_min);
		else
			return snprintf(buffer, buf_size, "%s-%s",
					tmp_min, tmp_max);
	} else
		return snprintf(buffer, buf_size, "%s+", tmp_min);
}
Пример #2
0
int _print_nodes_ai(sinfo_data_t * sinfo_data, int width,
		    bool right_justify, char *suffix)
{
	char id[FORMAT_STRING_SIZE];
	char tmpa[8];
	char tmpi[8];
	if (sinfo_data) {
		if(params.cluster_flags & CLUSTER_FLAG_BG) {
			convert_num_unit((float)sinfo_data->nodes_alloc,
					 tmpa, sizeof(tmpa), UNIT_NONE);
			convert_num_unit((float)sinfo_data->nodes_idle,
					 tmpi, sizeof(tmpi), UNIT_NONE);
		} else {
			snprintf(tmpa, sizeof(tmpa), "%d",
				 sinfo_data->nodes_alloc);
			snprintf(tmpi, sizeof(tmpi), "%d",
				 sinfo_data->nodes_idle);
		}
		snprintf(id, FORMAT_STRING_SIZE, "%s/%s",
		         tmpa, tmpi);
		_print_str(id, width, right_justify, true);
	} else
		_print_str("NODES(A/I)", width, right_justify, true);

	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #3
0
static void _sprint_range(char *str, uint32_t str_size,
			  uint32_t lower, uint32_t upper)
{
	char tmp[128];
	uint32_t cluster_flags = slurmdb_setup_cluster_flags();

	if (cluster_flags & CLUSTER_FLAG_BG) {
		convert_num_unit((float)lower, tmp, sizeof(tmp), UNIT_NONE,
				 NO_VAL, CONVERT_NUM_UNIT_EXACT);
	} else {
		snprintf(tmp, sizeof(tmp), "%u", lower);
	}
	if (upper > 0) {
    		char tmp2[128];
		if (cluster_flags & CLUSTER_FLAG_BG) {
			convert_num_unit((float)upper, tmp2, sizeof(tmp2),
					 UNIT_NONE, NO_VAL,
					 CONVERT_NUM_UNIT_EXACT);
		} else {
			snprintf(tmp2, sizeof(tmp2), "%u", upper);
		}
		snprintf(str, str_size, "%s-%s", tmp, tmp2);
	} else
		snprintf(str, str_size, "%s", tmp);

}
Пример #4
0
static void _update_block_record(sview_block_info_t *block_ptr,
				 GtkTreeStore *treestore)
{
	char cnode_cnt[20], cnode_cnt2[20];
	char *tmp_char = NULL, *tmp_char2 = NULL, *tmp_char3 = NULL;

	convert_num_unit((float)block_ptr->cnode_cnt, cnode_cnt,
			 sizeof(cnode_cnt), UNIT_NONE, NO_VAL,
			 working_sview_config.convert_flags);
	if (cluster_flags & CLUSTER_FLAG_BGQ) {
		convert_num_unit((float)block_ptr->cnode_err_cnt, cnode_cnt2,
				 sizeof(cnode_cnt), UNIT_NONE, NO_VAL,
				 working_sview_config.convert_flags);
		tmp_char3 = xstrdup_printf("%s/%s", cnode_cnt, cnode_cnt2);
	} else
		tmp_char3 = cnode_cnt;

	tmp_char = conn_type_string_full(block_ptr->bg_conn_type);
	tmp_char2 = _set_running_job_str(block_ptr->job_list, 0);
	/* Combining these records provides a slight performance improvement */
	gtk_tree_store_set(treestore, &block_ptr->iter_ptr,
			   SORTID_BLOCK,        block_ptr->bg_block_name,
			   SORTID_COLOR,
				sview_colors[block_ptr->color_inx],
			   SORTID_COLOR_INX,    block_ptr->color_inx,
			   SORTID_CONN,		tmp_char,
			   SORTID_IMAGEMLOADER, block_ptr->imagemloader,
			   SORTID_JOB,          tmp_char2,
			   SORTID_NODE_INX,     block_ptr->mp_inx,
			   SORTID_NODE_CNT,     tmp_char3,
			   SORTID_NODELIST,     block_ptr->mp_str,
			   SORTID_PARTITION,    block_ptr->slurm_part_name,
			   SORTID_REASON,       block_ptr->reason,
			   SORTID_SMALL_BLOCK,  block_ptr->small_block,
			   SORTID_STATE,
				bg_block_state_string(block_ptr->state),
			   SORTID_UPDATED,      1,
			   -1);
	xfree(tmp_char);
	xfree(tmp_char2);
	if (cluster_flags & CLUSTER_FLAG_BGQ)
		xfree(tmp_char3);

	if (cluster_flags & CLUSTER_FLAG_BGP) {
		gtk_tree_store_set(treestore, &block_ptr->iter_ptr,
				   SORTID_IMAGERAMDISK, block_ptr->imageramdisk,
				   SORTID_IMAGELINUX,   block_ptr->imagelinux,
				   -1);
	} else if (cluster_flags & CLUSTER_FLAG_BGL) {
		gtk_tree_store_set(treestore, &block_ptr->iter_ptr,
				   SORTID_IMAGERAMDISK, block_ptr->imageramdisk,
				   SORTID_IMAGELINUX,   block_ptr->imagelinux,
				   SORTID_IMAGEBLRTS,   block_ptr->imageblrts,
				   SORTID_USE,
					node_use_string(block_ptr->bg_node_use),
				   -1);
	}

	return;
}
Пример #5
0
/* updates the burst buffer record on sview */
static void _update_bb_record(sview_bb_info_t *sview_bb_info_ptr,
			      GtkTreeStore *treestore)
{
	char tmp_create_time[40];
	char tmp_size[20], tmp_user_id[60], bb_name_id[32];
	char *tmp_state, *tmp_user_name;
	burst_buffer_resv_t *bb_ptr = sview_bb_info_ptr->bb_ptr;

	if (bb_ptr->name) {
		strncpy(bb_name_id, bb_ptr->name, sizeof(bb_name_id));
	} else if (bb_ptr->array_task_id == NO_VAL) {
		convert_num_unit(bb_ptr->job_id, bb_name_id,
				 sizeof(bb_name_id),
				 UNIT_NONE, working_sview_config.convert_flags);
	} else {
		snprintf(bb_name_id, sizeof(bb_name_id),
			 "%u_%u(%u)",
			 bb_ptr->array_job_id,
			 bb_ptr->array_task_id,
			 bb_ptr->job_id);
	}

	if (bb_ptr->create_time) {
		slurm_make_time_str((time_t *)&bb_ptr->create_time,
				    tmp_create_time, sizeof(tmp_create_time));
	} else {
		time_t now = time(NULL);
		slurm_make_time_str(&now, tmp_create_time,
				    sizeof(tmp_create_time));
	}

	_get_size_str(tmp_size, sizeof(tmp_size), bb_ptr->size);

	tmp_state = bb_state_string(bb_ptr->state);

	tmp_user_name = uid_to_string(bb_ptr->user_id);
	snprintf(tmp_user_id, sizeof(tmp_user_id), "%s(%u)", tmp_user_name,
		 bb_ptr->user_id);
	xfree(tmp_user_name);

	/* Combining these records provides a slight performance improvement */
	gtk_tree_store_set(treestore, &sview_bb_info_ptr->iter_ptr,
			   SORTID_COLOR,
			   sview_colors[sview_bb_info_ptr->color_inx],
			   SORTID_COLOR_INX,     sview_bb_info_ptr->color_inx,
			   SORTID_PLUGIN,        sview_bb_info_ptr->plugin,
			   SORTID_ACCOUNT,       bb_ptr->account,
			   SORTID_CREATE_TIME,   tmp_create_time,
			   SORTID_NAME,          bb_name_id,
			   SORTID_PARTITION,     bb_ptr->partition,
			   SORTID_POOL,          bb_ptr->pool,
			   SORTID_QOS,           bb_ptr->qos,
			   SORTID_SIZE,          tmp_size,
			   SORTID_STATE,         tmp_state,
			   SORTID_UPDATED,       1,
			   SORTID_USERID,        tmp_user_id,
			   -1);

	return;
}
Пример #6
0
Файл: print.c Проект: Cray/slurm
int _print_job_num_nodes(job_info_t * job, int width, bool right_justify,
			 char* suffix)
{
	uint32_t node_cnt = 0;
	char tmp_char[8];

	if (job == NULL)	/* Print the Header instead */
		_print_str("NODES", width, right_justify, true);
	else {
		if (params.cluster_flags & CLUSTER_FLAG_BG)
			select_g_select_jobinfo_get(job->select_jobinfo,
						    SELECT_JOBDATA_NODE_CNT,
						    &node_cnt);

		if ((node_cnt == 0) || (node_cnt == NO_VAL))
			node_cnt = _get_node_cnt(job);

		if (params.cluster_flags & CLUSTER_FLAG_BG)
			convert_num_unit((float)node_cnt, tmp_char,
					 sizeof(tmp_char), UNIT_NONE);
		else
			snprintf(tmp_char, sizeof(tmp_char), "%d", node_cnt);

		_print_str(tmp_char, width, right_justify, true);
	}
	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #7
0
static void _display_info_bb(List info_list, popup_info_t *popup_win)
{
	specific_info_t *spec_info = popup_win->spec_info;
	char *name = (char *)spec_info->search_info->gchar_data;
	//int found = 0;
	burst_buffer_resv_t *bb_ptr = NULL;
	GtkTreeView *treeview = NULL;
	ListIterator itr = NULL;
	sview_bb_info_t *sview_bb_info = NULL;
	int update = 0;
	char bb_name_id[32];

	if (!spec_info->search_info->gchar_data) {
		//info = xstrdup("No pointer given!");
		goto finished;
	}

	if (!spec_info->display_widget) {
		treeview = create_treeview_2cols_attach_to_table(
			popup_win->table);
		spec_info->display_widget =
			gtk_widget_ref(GTK_WIDGET(treeview));
	} else {
		treeview = GTK_TREE_VIEW(spec_info->display_widget);
		update = 1;
	}

	itr = list_iterator_create(info_list);
	while ((sview_bb_info = (sview_bb_info_t*) list_next(itr))) {
		bb_ptr = sview_bb_info->bb_ptr;

		if (bb_ptr->name) {
			strcpy(bb_name_id, bb_ptr->name);
		} else if (bb_ptr->array_task_id == NO_VAL) {
			convert_num_unit(bb_ptr->job_id,
					 bb_name_id,
					 sizeof(bb_name_id),
					 UNIT_NONE,
					 working_sview_config.convert_flags);
		} else {
			snprintf(bb_name_id, sizeof(bb_name_id),
				 "%u_%u(%u)",
				 bb_ptr->array_job_id,
				 bb_ptr->array_task_id,
				 bb_ptr->job_id);
		}

		if (!xstrcmp(bb_name_id, name)) {
			_layout_bb_record(treeview, sview_bb_info, update);
			break;
		}
	}
	list_iterator_destroy(itr);
	gtk_widget_show(spec_info->display_widget);

finished:

	return;
}
Пример #8
0
/* Cpus, allocated/idle/other/total */
int _print_cpus_aiot(sinfo_data_t * sinfo_data, int width,
		     bool right_justify, char *suffix)
{
	char id[FORMAT_STRING_SIZE];
	char tmpa[8];
	char tmpi[8];
	char tmpo[8];
	char tmpt[8];
	if (sinfo_data) {
		if (params.cluster_flags & CLUSTER_FLAG_BG) {
			convert_num_unit((float)sinfo_data->cpus_alloc,
					 tmpa, sizeof(tmpa), UNIT_NONE,
					 params.convert_flags);
			convert_num_unit((float)sinfo_data->cpus_idle,
					 tmpi, sizeof(tmpi), UNIT_NONE,
					 params.convert_flags);
			convert_num_unit((float)sinfo_data->cpus_other,
					 tmpo, sizeof(tmpo), UNIT_NONE,
					 params.convert_flags);
			convert_num_unit((float)sinfo_data->cpus_total,
					 tmpt, sizeof(tmpt), UNIT_NONE,
					 params.convert_flags);
		} else {
			snprintf(tmpa, sizeof(tmpa), "%u",
				 sinfo_data->cpus_alloc);
			snprintf(tmpi, sizeof(tmpi), "%u",
				 sinfo_data->cpus_idle);
			snprintf(tmpo, sizeof(tmpo), "%u",
				 sinfo_data->cpus_other);
			snprintf(tmpt, sizeof(tmpt), "%u",
				 sinfo_data->cpus_total);
		}
		snprintf(id, FORMAT_STRING_SIZE, "%s/%s/%s/%s",
			 tmpa, tmpi, tmpo, tmpt);
		_print_str(id, width, right_justify, true);
	} else
		_print_str("CPUS(A/I/O/T)", width, right_justify, true);
	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #9
0
static int
_build_min_max_16_string(char *buffer, int buf_size, uint16_t min, uint16_t max,
			 bool range)
{
	char tmp_min[8];
	char tmp_max[8];
	convert_num_unit((float)min, tmp_min, sizeof(tmp_min), UNIT_NONE);
	convert_num_unit((float)max, tmp_max, sizeof(tmp_max), UNIT_NONE);

	if (max == min)
		return snprintf(buffer, buf_size, "%s", tmp_max);
	else if (range) {
		if (max == (uint16_t) INFINITE)
			return snprintf(buffer, buf_size, "%s-infinite",
					tmp_min);
		else
			return snprintf(buffer, buf_size, "%s-%s",
					tmp_min, tmp_max);
	} else
		return snprintf(buffer, buf_size, "%s+", tmp_min);
}
Пример #10
0
Файл: print.c Проект: Cray/slurm
static void _print_small_double(
	char *outbuf, int buf_size, double dub, int units)
{
	if (fuzzy_equal(dub, NO_VAL))
		return;

	if (dub > 1)
		convert_num_unit((float)dub, outbuf, buf_size, units);
	else if (dub > 0)
		snprintf(outbuf, buf_size, "%.2fM", dub);
	else
		snprintf(outbuf, buf_size, "0");
}
Пример #11
0
Файл: print.c Проект: Cray/slurm
int _print_job_num_sct(job_info_t * job, int width, bool right_justify,
			 char* suffix)
{
	char sockets[10];
	char cores[10];
	char threads[10];
	char sct[(10+1)*3];
	if (job) {
		if (job->sockets_per_node == (uint16_t) NO_VAL)
			strcpy(sockets, "*");
		else
			convert_num_unit((float)job->sockets_per_node, sockets,
					sizeof(sockets), UNIT_NONE);
		if (job->cores_per_socket == (uint16_t) NO_VAL)
			strcpy(cores, "*");
		else
			convert_num_unit((float)job->cores_per_socket, cores,
					sizeof(cores), UNIT_NONE);
		if (job->threads_per_core == (uint16_t) NO_VAL)
			strcpy(threads, "*");
		else
			convert_num_unit((float)job->threads_per_core, threads,
					sizeof(threads), UNIT_NONE);
		sct[0] = '\0';
		strcat(sct, sockets);
		strcat(sct, ":");
		strcat(sct, cores);
		strcat(sct, ":");
		strcat(sct, threads);
		_print_str(sct, width, right_justify, true);
	} else {
		_print_str("S:C:T", width, right_justify, true);
	}

	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #12
0
Файл: print.c Проект: Cray/slurm
int _print_pn_min_cpus(job_info_t * job, int width, bool right_justify,
			char* suffix)
{
	char tmp_char[8];

	if (job == NULL)	/* Print the Header instead */
		_print_str("MIN_CPUS", width, right_justify, true);
	else {
		convert_num_unit((float)job->pn_min_cpus, tmp_char,
				 sizeof(tmp_char), UNIT_NONE);
		_print_str(tmp_char, width, right_justify, true);
	}
	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #13
0
static void _update_resv_record(sview_resv_info_t *sview_resv_info_ptr,
				GtkTreeStore *treestore)
{
	char tmp_duration[40], tmp_end[40], tmp_nodes[40], tmp_start[40];
	char *tmp_flags;
	reserve_info_t *resv_ptr = sview_resv_info_ptr->resv_ptr;

	secs2time_str((uint32_t)difftime(resv_ptr->end_time,
					 resv_ptr->start_time),
		      tmp_duration, sizeof(tmp_duration));

	slurm_make_time_str((time_t *)&resv_ptr->end_time, tmp_end,
			    sizeof(tmp_end));

	tmp_flags = reservation_flags_string(resv_ptr->flags);

	convert_num_unit((float)resv_ptr->node_cnt,
			 tmp_nodes, sizeof(tmp_nodes), UNIT_NONE);

	slurm_make_time_str((time_t *)&resv_ptr->start_time, tmp_start,
			    sizeof(tmp_start));

	/* Combining these records provides a slight performance improvement */
	gtk_tree_store_set(treestore, &sview_resv_info_ptr->iter_ptr,
			   SORTID_ACCOUNTS,   resv_ptr->accounts,
			   SORTID_COLOR,
				sview_colors[sview_resv_info_ptr->color_inx],
			   SORTID_COLOR_INX,  sview_resv_info_ptr->color_inx,
			   SORTID_DURATION,   tmp_duration,
			   SORTID_FEATURES,   resv_ptr->features,
			   SORTID_FLAGS,      tmp_flags,
			   SORTID_LICENSES,   resv_ptr->licenses,
			   SORTID_NAME,       resv_ptr->name,
			   SORTID_NODE_CNT,   tmp_nodes,
			   SORTID_NODE_INX,   resv_ptr->node_inx,
			   SORTID_NODELIST,   resv_ptr->node_list,
			   SORTID_PARTITION,  resv_ptr->partition,
			   SORTID_TIME_START, tmp_start,
			   SORTID_TIME_END,   tmp_end,
			   SORTID_UPDATED,    1,
			   SORTID_USERS,      resv_ptr->users,
			   -1);

	xfree(tmp_flags);

	return;
}
Пример #14
0
static void _update_block_record(sview_block_info_t *block_ptr,
				 GtkTreeStore *treestore, GtkTreeIter *iter)
{
	char job_running[20], cnode_cnt[20];

	if (block_ptr->job_running > NO_JOB_RUNNING)
		snprintf(job_running, sizeof(job_running),
			 "%d", block_ptr->job_running);
	else
		snprintf(job_running, sizeof(job_running), "-");

	convert_num_unit((float)block_ptr->cnode_cnt, cnode_cnt, sizeof(cnode_cnt),
			 UNIT_NONE);

	/* Combining these records provides a slight performance improvement */
	gtk_tree_store_set(treestore, iter,
			   SORTID_BLOCK,        block_ptr->bg_block_name,
			   SORTID_COLOR,
				sview_colors[block_ptr->color_inx],
			   SORTID_COLOR_INX,    block_ptr->color_inx,
			   SORTID_CONN,
				conn_type_string(block_ptr->bg_conn_type),
			   SORTID_IMAGERAMDISK, block_ptr->imageramdisk,
			   SORTID_IMAGELINUX,   block_ptr->imagelinux,
			   SORTID_IMAGEMLOADER, block_ptr->imagemloader,
			   SORTID_JOB,          job_running,
			   SORTID_NODE_INX,     block_ptr->bp_inx,
			   SORTID_MP_STR,        cnode_cnt,
			   SORTID_NODELIST,     block_ptr->mp_str,
			   SORTID_PARTITION,    block_ptr->slurm_part_name,
			   SORTID_SMALL_BLOCK,  block_ptr->small_block,
			   SORTID_STATE,
				bg_block_state_string(block_ptr->state),
			   SORTID_USER,         block_ptr->bg_user_name,
			   SORTID_UPDATED,      1,
			   -1);

	if (cluster_flags & CLUSTER_FLAG_BGL) {
		gtk_tree_store_set(treestore, iter,
				   SORTID_IMAGEBLRTS,   block_ptr->imageblrts,
				   SORTID_USE,
					node_use_string(block_ptr->bg_node_use),
				   -1);
	}

	return;
}
Пример #15
0
Файл: print.c Проект: Cray/slurm
int _print_job_num_cpus(job_info_t * job, int width, bool right, char* suffix)
{
	char tmp_char[18];
	if (job == NULL)	/* Print the Header instead */
		_print_str("CPUS", width, right, true);
	else {
		if (params.cluster_flags & CLUSTER_FLAG_BG)
			convert_num_unit((float)job->num_cpus, tmp_char,
					 sizeof(tmp_char), UNIT_NONE);
		else
			snprintf(tmp_char, sizeof(tmp_char),
				 "%u", job->num_cpus);
       		_print_str(tmp_char, width, right, true);
	}
	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #16
0
Файл: print.c Проект: Cray/slurm
int _print_threads(job_info_t * job, int width, bool right_justify,
		       char* suffix)
{
	char tmp_char[8];

	if (job == NULL)	/* Print the Header instead */
		_print_str("THREADS_PER_CORE", width, right_justify, true);
	else {
		if (job->threads_per_core == (uint16_t) NO_VAL)
			strcpy(tmp_char, "*");
		else
			convert_num_unit((float)job->threads_per_core, tmp_char,
					sizeof(tmp_char), UNIT_NONE);
		_print_str(tmp_char, width, right_justify, true);
	}
	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #17
0
int _print_nodes_t(sinfo_data_t * sinfo_data, int width,
		   bool right_justify, char *suffix)
{
	char id[FORMAT_STRING_SIZE];
	char tmp[8];
	if (sinfo_data) {
		if(params.cluster_flags & CLUSTER_FLAG_BG)
			convert_num_unit((float)sinfo_data->nodes_total,
					 tmp, sizeof(tmp), UNIT_NONE);
		else
			snprintf(tmp, sizeof(tmp), "%d",
				 sinfo_data->nodes_total);
		snprintf(id, FORMAT_STRING_SIZE, "%s", tmp);
		_print_str(id, width, right_justify, true);
	} else
		_print_str("NODES", width, right_justify, true);

	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #18
0
Файл: print.c Проект: Cray/slurm
int _print_pn_min_memory(job_info_t * job, int width, bool right_justify,
			  char* suffix)
{
	char min_mem[10];
	char tmp_char[21];

	if (job == NULL)	/* Print the Header instead */
		_print_str("MIN_MEMORY", width, right_justify, true);
	else {
	    	tmp_char[0] = '\0';
		job->pn_min_memory &= (~MEM_PER_CPU);
		convert_num_unit((float)job->pn_min_memory, min_mem,
				 sizeof(min_mem), UNIT_NONE);
		strcat(tmp_char, min_mem);
		_print_str(tmp_char, width, right_justify, true);
	}

	if (suffix)
		printf("%s", suffix);
	return SLURM_SUCCESS;
}
Пример #19
0
void print_fields(slurmdb_step_rec_t *step)
{
	print_field_t *field = NULL;
	int curr_inx = 1;
	char outbuf[FORMAT_STRING_SIZE];

	list_iterator_reset(print_fields_itr);
	while ((field = list_next(print_fields_itr))) {
		char *tmp_char = NULL;

		memset(&outbuf, 0, sizeof(outbuf));
		switch(field->type) {
		case PRINT_AVECPU:

			tmp_char = _elapsed_time((long)step->stats.cpu_ave, 0);

			field->print_routine(field,
					     tmp_char,
					     (curr_inx == field_count));
			xfree(tmp_char);
			break;
		case PRINT_ACT_CPUFREQ:

			convert_num_unit2((double)step->stats.act_cpufreq,
					  outbuf, sizeof(outbuf), UNIT_KILO,
					  NO_VAL, 1000, params.convert_flags &
					  (~CONVERT_NUM_UNIT_EXACT));

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_CONSUMED_ENERGY:
			if (!fuzzy_equal(step->stats.consumed_energy, NO_VAL)) {
				convert_num_unit2((double)
						  step->stats.consumed_energy,
						  outbuf, sizeof(outbuf),
						  UNIT_NONE, NO_VAL, 1000,
						  params.convert_flags &
						  (~CONVERT_NUM_UNIT_EXACT));
			}
			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_CONSUMED_ENERGY_RAW:
			field->print_routine(field,
					     step->stats.consumed_energy,
					     (curr_inx == field_count));
			break;
		case PRINT_AVEDISKREAD:
			_print_small_double(outbuf, sizeof(outbuf),
					    step->stats.disk_read_ave,
					    UNIT_MEGA);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_AVEDISKWRITE:
			_print_small_double(outbuf, sizeof(outbuf),
					    step->stats.disk_write_ave,
					    UNIT_MEGA);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_AVEPAGES:
			convert_num_unit((double)step->stats.pages_ave, outbuf,
					 sizeof(outbuf), UNIT_KILO, NO_VAL,
					 params.convert_flags);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_AVERSS:
			convert_num_unit((double)step->stats.rss_ave, outbuf,
					 sizeof(outbuf), UNIT_KILO, NO_VAL,
					 params.convert_flags);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_AVEVSIZE:
			convert_num_unit((double)step->stats.vsize_ave, outbuf,
					 sizeof(outbuf), UNIT_KILO, NO_VAL,
					 params.convert_flags);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_JOBID:
			if (step->stepid == SLURM_BATCH_SCRIPT)
				snprintf(outbuf, sizeof(outbuf), "%u.batch",
					 step->job_ptr->jobid);
			else if (step->stepid == SLURM_EXTERN_CONT)
				snprintf(outbuf, sizeof(outbuf), "%u.extern",
					 step->job_ptr->jobid);
			else
				snprintf(outbuf, sizeof(outbuf), "%u.%u",
					 step->job_ptr->jobid,
					 step->stepid);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXDISKREAD:
			_print_small_double(outbuf, sizeof(outbuf),
					    step->stats.disk_read_max,
					    UNIT_MEGA);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXDISKREADNODE:
			tmp_char = find_hostname(
					step->stats.disk_read_max_nodeid,
					step->nodes);
			field->print_routine(field,
					     tmp_char,
					     (curr_inx == field_count));
			xfree(tmp_char);
			break;
		case PRINT_MAXDISKREADTASK:
			field->print_routine(field,
					     step->stats.disk_read_max_taskid,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXDISKWRITE:
			_print_small_double(outbuf, sizeof(outbuf),
					    step->stats.disk_write_max,
					    UNIT_MEGA);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXDISKWRITENODE:
			tmp_char = find_hostname(
					step->stats.disk_write_max_nodeid,
					step->nodes);
			field->print_routine(field,
					     tmp_char,
					     (curr_inx == field_count));
			xfree(tmp_char);
			break;
		case PRINT_MAXDISKWRITETASK:
			field->print_routine(field,
					     step->stats.disk_write_max_taskid,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXPAGES:
			convert_num_unit((double)step->stats.pages_max, outbuf,
					 sizeof(outbuf), UNIT_KILO, NO_VAL,
					 params.convert_flags);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXPAGESNODE:
			tmp_char = find_hostname(
					step->stats.pages_max_nodeid,
					step->nodes);
			field->print_routine(field,
					     tmp_char,
					     (curr_inx == field_count));
			xfree(tmp_char);
			break;
		case PRINT_MAXPAGESTASK:
			field->print_routine(field,
					     step->stats.pages_max_taskid,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXRSS:
			convert_num_unit((double)step->stats.rss_max, outbuf,
					 sizeof(outbuf), UNIT_KILO, NO_VAL,
					 params.convert_flags);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXRSSNODE:
			tmp_char = find_hostname(
					step->stats.rss_max_nodeid,
					step->nodes);
			field->print_routine(field,
					     tmp_char,
					     (curr_inx == field_count));
			xfree(tmp_char);
			break;
		case PRINT_MAXRSSTASK:
			field->print_routine(field,
					     step->stats.rss_max_taskid,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXVSIZE:
			convert_num_unit((double)step->stats.vsize_max, outbuf,
					 sizeof(outbuf), UNIT_KILO, NO_VAL,
					 params.convert_flags);

			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_MAXVSIZENODE:
			tmp_char = find_hostname(
					step->stats.vsize_max_nodeid,
					step->nodes);
			field->print_routine(field,
					     tmp_char,
					     (curr_inx == field_count));
			xfree(tmp_char);
			break;
		case PRINT_MAXVSIZETASK:
			field->print_routine(field,
					     step->stats.vsize_max_taskid,
					     (curr_inx == field_count));
			break;
		case PRINT_MINCPU:
			tmp_char = _elapsed_time((long)step->stats.cpu_min, 0);

			field->print_routine(field,
					     tmp_char,
					     (curr_inx == field_count));
			xfree(tmp_char);
			break;
		case PRINT_MINCPUNODE:
			tmp_char = find_hostname(
					step->stats.cpu_min_nodeid,
					step->nodes);
			field->print_routine(field,
					     tmp_char,
					     (curr_inx == field_count));
			xfree(tmp_char);
			break;
		case PRINT_MINCPUTASK:
			field->print_routine(field,
					     step->stats.cpu_min_taskid,
					     (curr_inx == field_count));
			break;
		case PRINT_NODELIST:
			field->print_routine(field,
					     step->nodes,
					     (curr_inx == field_count));
			break;
		case PRINT_NTASKS:
			field->print_routine(field,
					     step->ntasks,
					     (curr_inx == field_count));
			break;
		case PRINT_PIDS:
                        field->print_routine(field,
                                             step->pid_str,
                                             (curr_inx == field_count));
                        break;
		case PRINT_REQ_CPUFREQ_MIN:
			cpu_freq_to_string(outbuf, sizeof(outbuf),
					   step->req_cpufreq_min);
			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_REQ_CPUFREQ_MAX:
			cpu_freq_to_string(outbuf, sizeof(outbuf),
					   step->req_cpufreq_max);
			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		case PRINT_REQ_CPUFREQ_GOV:
			cpu_freq_to_string(outbuf, sizeof(outbuf),
					   step->req_cpufreq_gov);
			field->print_routine(field,
					     outbuf,
					     (curr_inx == field_count));
			break;
		default:
			break;
		}
		curr_inx++;
	}
	printf("\n");
}
Пример #20
0
/*
 * slurm_sprint_job_step_info - output information about a specific Slurm
 *	job step based upon message as loaded using slurm_get_job_steps
 * IN job_ptr - an individual job step information record pointer
 * IN one_liner - print as a single line if true
 * RET out - char * containing formatted output (must be freed after call)
 *           NULL is returned on failure.
 */
char *
slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
			    int one_liner )
{
	char tmp_node_cnt[40];
	char time_str[32];
	char limit_str[32];
	char tmp_line[128];
	char *out = NULL;
	uint32_t cluster_flags = slurmdb_setup_cluster_flags();

	/****** Line 1 ******/
	slurm_make_time_str ((time_t *)&job_step_ptr->start_time, time_str,
		sizeof(time_str));
	if (job_step_ptr->time_limit == INFINITE)
		sprintf(limit_str, "UNLIMITED");
	else
		secs2time_str ((time_t)job_step_ptr->time_limit * 60,
				limit_str, sizeof(limit_str));
	snprintf(tmp_line, sizeof(tmp_line),
		 "StepId=%u.%u UserId=%u StartTime=%s TimeLimit=%s",
		 job_step_ptr->job_id, job_step_ptr->step_id,
		 job_step_ptr->user_id, time_str, limit_str);
	out = xstrdup(tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 2 ******/
	snprintf(tmp_line, sizeof(tmp_line),
		 "State=%s ",
		 job_state_string(job_step_ptr->state));
	xstrcat(out, tmp_line);
	if (cluster_flags & CLUSTER_FLAG_BG) {
		char *io_nodes;
		select_g_select_jobinfo_get(job_step_ptr->select_jobinfo,
					    SELECT_JOBDATA_IONODES,
					    &io_nodes);
		if (io_nodes) {
			snprintf(tmp_line, sizeof(tmp_line),
				 "Partition=%s MidplaneList=%s[%s] Gres=%s",
				 job_step_ptr->partition,
				 job_step_ptr->nodes, io_nodes,
				 job_step_ptr->gres);
			xfree(io_nodes);
		} else
			snprintf(tmp_line, sizeof(tmp_line),
				 "Partition=%s MidplaneList=%s Gres=%s",
				 job_step_ptr->partition,
				 job_step_ptr->nodes,
				 job_step_ptr->gres);
	} else {
		snprintf(tmp_line, sizeof(tmp_line),
			"Partition=%s NodeList=%s Gres=%s",
			job_step_ptr->partition, job_step_ptr->nodes,
			job_step_ptr->gres);
	}
	xstrcat(out, tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 3 ******/
	if (cluster_flags & CLUSTER_FLAG_BGQ) {
		uint32_t nodes = 0;
		select_g_select_jobinfo_get(job_step_ptr->select_jobinfo,
					    SELECT_JOBDATA_NODE_CNT,
					    &nodes);
		convert_num_unit((float)nodes, tmp_node_cnt,
				 sizeof(tmp_node_cnt), UNIT_NONE);
	} else {
		convert_num_unit((float)_nodes_in_list(job_step_ptr->nodes),
				 tmp_node_cnt, sizeof(tmp_node_cnt),
				 UNIT_NONE);
	}

	snprintf(tmp_line, sizeof(tmp_line),
		"Nodes=%s Tasks=%u Name=%s Network=%s",
		 tmp_node_cnt, job_step_ptr->num_tasks, job_step_ptr->name,
		job_step_ptr->network);
	xstrcat(out, tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 4 ******/
	snprintf(tmp_line, sizeof(tmp_line),
		"ResvPorts=%s Checkpoint=%u CheckpointDir=%s",
		 job_step_ptr->resv_ports,
		 job_step_ptr->ckpt_interval, job_step_ptr->ckpt_dir);
	xstrcat(out, tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 5 ******/
	if (job_step_ptr->cpu_freq == NO_VAL) {
		snprintf(tmp_line, sizeof(tmp_line), 
			 "CPUFreqReq=Default\n\n");
	} else if (job_step_ptr->cpu_freq & CPU_FREQ_RANGE_FLAG) {
		switch (job_step_ptr->cpu_freq) 
		{
		case CPU_FREQ_LOW :
			snprintf(tmp_line, sizeof(tmp_line),
				 "CPUFreqReq=Low\n\n");
			break;
		case CPU_FREQ_MEDIUM :
			snprintf(tmp_line, sizeof(tmp_line),
				 "CPUFreqReq=Medium\n\n");
			break;
		case CPU_FREQ_HIGH :
			snprintf(tmp_line, sizeof(tmp_line),
				 "CPUFreqReq=High\n\n");
			break;
		default :
			snprintf(tmp_line, sizeof(tmp_line),
				 "CPUFreqReq=Unknown\n\n");
		}
	} else {
		snprintf(tmp_line, sizeof(tmp_line),
			 "CPUFreqReq=%u\n\n", job_step_ptr->cpu_freq);
	}
	xstrcat(out, tmp_line);

	return out;
}
Пример #21
0
/*
 * slurm_sprint_partition_info - output information about a specific Slurm
 *	partition based upon message as loaded using slurm_load_partitions
 * IN part_ptr - an individual partition information record pointer
 * IN one_liner - print as a single line if true
 * RET out - char * containing formatted output (must be freed after call)
 *           NULL is returned on failure.
 */
char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
				    int one_liner )
{
	char tmp[16];
	char *out = NULL;
	char *allow_deny, *value;
	uint16_t force, preempt_mode, val;
	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
	char *line_end = (one_liner) ? " " : "\n   ";

	/****** Line 1 ******/

	xstrfmtcat(out, "PartitionName=%s", part_ptr->name);
	xstrcat(out, line_end);

	/****** Line 2 ******/

	if ((part_ptr->allow_groups == NULL) ||
	    (part_ptr->allow_groups[0] == '\0'))
		xstrcat(out, "AllowGroups=ALL");
	else {
		xstrfmtcat(out, "AllowGroups=%s", part_ptr->allow_groups);
	}

	if (part_ptr->allow_accounts || !part_ptr->deny_accounts) {
		allow_deny = "Allow";
		if ((part_ptr->allow_accounts == NULL) ||
		    (part_ptr->allow_accounts[0] == '\0'))
			value = "ALL";
		else
			value = part_ptr->allow_accounts;
	} else {
		allow_deny = "Deny";
		value = part_ptr->deny_accounts;
	}
	xstrfmtcat(out, " %sAccounts=%s", allow_deny, value);

	if (part_ptr->allow_qos || !part_ptr->deny_qos) {
		allow_deny = "Allow";
		if ((part_ptr->allow_qos == NULL) ||
		    (part_ptr->allow_qos[0] == '\0'))
			value = "ALL";
		else
			value = part_ptr->allow_qos;
	} else {
		allow_deny = "Deny";
		value = part_ptr->deny_qos;
	}
	xstrfmtcat(out, " %sQos=%s", allow_deny, value);
	xstrcat(out, line_end);

	/****** Line 3 ******/
	if (part_ptr->allow_alloc_nodes == NULL)
		xstrcat(out, "AllocNodes=ALL");
	else
		xstrfmtcat(out, "AllocNodes=%s", part_ptr->allow_alloc_nodes);

	if (part_ptr->alternate != NULL) {
		xstrfmtcat(out, " Alternate=%s", part_ptr->alternate);
	}

	if (part_ptr->flags & PART_FLAG_DEFAULT)
		xstrcat(out, " Default=YES");
	else
		xstrcat(out, " Default=NO");

	if (part_ptr->qos_char)
		xstrfmtcat(out, " QoS=%s", part_ptr->qos_char);
	else
		xstrcat(out, " QoS=N/A");

	xstrcat(out, line_end);

	/****** Line 4 added here for BG partitions only
	 ****** to maintain alphabetized output ******/

	if (cluster_flags & CLUSTER_FLAG_BG) {
		xstrfmtcat(out, "Midplanes=%s", part_ptr->nodes);
		xstrcat(out, line_end);
	}

	/****** Line 5 ******/

	if (part_ptr->default_time == INFINITE)
		xstrcat(out, "DefaultTime=UNLIMITED");
	else if (part_ptr->default_time == NO_VAL)
		xstrcat(out, "DefaultTime=NONE");
	else {
		char time_line[32];
		secs2time_str(part_ptr->default_time * 60, time_line,
			sizeof(time_line));
		xstrfmtcat(out, "DefaultTime=%s", time_line);
	}

	if (part_ptr->flags & PART_FLAG_NO_ROOT)
		xstrcat(out, " DisableRootJobs=YES");
	else
		xstrcat(out, " DisableRootJobs=NO");

	if (part_ptr->flags & PART_FLAG_EXCLUSIVE_USER)
		xstrcat(out, " ExclusiveUser=YES");
	else
		xstrcat(out, " ExclusiveUser=NO");

	xstrfmtcat(out, " GraceTime=%u", part_ptr->grace_time);

	if (part_ptr->flags & PART_FLAG_HIDDEN)
		xstrcat(out, " Hidden=YES");
	else
		xstrcat(out, " Hidden=NO");

	xstrcat(out, line_end);

	/****** Line 6 ******/

	if (part_ptr->max_nodes == INFINITE)
		xstrcat(out, "MaxNodes=UNLIMITED");
	else {
		if (cluster_flags & CLUSTER_FLAG_BG) {
			convert_num_unit((float)part_ptr->max_nodes, tmp,
					 sizeof(tmp), UNIT_NONE, NO_VAL,
					 CONVERT_NUM_UNIT_EXACT);
			xstrfmtcat(out, "MaxNodes=%s", tmp);
		} else
			xstrfmtcat(out, "MaxNodes=%u", part_ptr->max_nodes);

	}

	if (part_ptr->max_time == INFINITE)
		xstrcat(out, " MaxTime=UNLIMITED");
	else {
		char time_line[32];
		secs2time_str(part_ptr->max_time * 60, time_line,
			      sizeof(time_line));
		xstrfmtcat(out, " MaxTime=%s", time_line);
	}

	if (cluster_flags & CLUSTER_FLAG_BG) {
		convert_num_unit((float)part_ptr->min_nodes, tmp, sizeof(tmp),
				 UNIT_NONE, NO_VAL, CONVERT_NUM_UNIT_EXACT);
		xstrfmtcat(out, " MinNodes=%s", tmp);
	} else
		xstrfmtcat(out, " MinNodes=%u", part_ptr->min_nodes);

	if (part_ptr->flags & PART_FLAG_LLN)
		xstrcat(out, " LLN=YES");
	else
		xstrcat(out, " LLN=NO");

	if (part_ptr->max_cpus_per_node == INFINITE)
		xstrcat(out, " MaxCPUsPerNode=UNLIMITED");
	else {
		xstrfmtcat(out, " MaxCPUsPerNode=%u",
			   part_ptr->max_cpus_per_node);
	}

	xstrcat(out, line_end);

	/****** Line added here for non BG nodes
	 to keep with alphabetized output******/

	if (!(cluster_flags & CLUSTER_FLAG_BG)) {
		xstrfmtcat(out, "Nodes=%s", part_ptr->nodes);
		xstrcat(out, line_end);
	}

	/****** Line 7 ******/

	xstrfmtcat(out, "PriorityJobFactor=%u", part_ptr->priority_job_factor);
	xstrfmtcat(out, " PriorityTier=%u", part_ptr->priority_tier);

	if (part_ptr->flags & PART_FLAG_ROOT_ONLY)
		xstrcat(out, " RootOnly=YES");
	else
		xstrcat(out, " RootOnly=NO");

	if (part_ptr->flags & PART_FLAG_REQ_RESV)
		xstrcat(out, " ReqResv=YES");
	else
		xstrcat(out, " ReqResv=NO");

	force = part_ptr->max_share & SHARED_FORCE;
	val = part_ptr->max_share & (~SHARED_FORCE);
	if (val == 0)
		xstrcat(out, " OverSubscribe=EXCLUSIVE");
	else if (force)
		xstrfmtcat(out, " OverSubscribe=FORCE:%u", val);
	else if (val == 1)
		xstrcat(out, " OverSubscribe=NO");
	else
		xstrfmtcat(out, " OverSubscribe=YES:%u", val);

	xstrcat(out, line_end);

	/****** Line ******/
	if (part_ptr->over_time_limit == NO_VAL16)
		xstrfmtcat(out, "OverTimeLimit=NONE");
	else if (part_ptr->over_time_limit == (uint16_t) INFINITE)
		xstrfmtcat(out, "OverTimeLimit=UNLIMITED");
	else
		xstrfmtcat(out, "OverTimeLimit=%u", part_ptr->over_time_limit);

	preempt_mode = part_ptr->preempt_mode;
	if (preempt_mode == NO_VAL16)
		preempt_mode = slurm_get_preempt_mode(); /* use cluster param */
	xstrfmtcat(out, " PreemptMode=%s", preempt_mode_string(preempt_mode));

	xstrcat(out, line_end);

	/****** Line ******/
	if (part_ptr->state_up == PARTITION_UP)
		xstrcat(out, "State=UP");
	else if (part_ptr->state_up == PARTITION_DOWN)
		xstrcat(out, "State=DOWN");
	else if (part_ptr->state_up == PARTITION_INACTIVE)
		xstrcat(out, "State=INACTIVE");
	else if (part_ptr->state_up == PARTITION_DRAIN)
		xstrcat(out, "State=DRAIN");
	else
		xstrcat(out, "State=UNKNOWN");

	if (cluster_flags & CLUSTER_FLAG_BG) {
		convert_num_unit((float)part_ptr->total_cpus, tmp, sizeof(tmp),
				 UNIT_NONE, NO_VAL, CONVERT_NUM_UNIT_EXACT);
		xstrfmtcat(out, " TotalCPUs=%s", tmp);
	} else
		xstrfmtcat(out, " TotalCPUs=%u", part_ptr->total_cpus);


	if (cluster_flags & CLUSTER_FLAG_BG) {
		convert_num_unit((float)part_ptr->total_nodes, tmp, sizeof(tmp),
				 UNIT_NONE, NO_VAL, CONVERT_NUM_UNIT_EXACT);
		xstrfmtcat(out, " TotalNodes=%s", tmp);
	} else
		xstrfmtcat(out, " TotalNodes=%u", part_ptr->total_nodes);

	xstrfmtcat(out, " SelectTypeParameters=%s",
		   select_type_param_string(part_ptr->cr_type));

	xstrcat(out, line_end);

	/****** Line 9 ******/
	if (part_ptr->def_mem_per_cpu & MEM_PER_CPU) {
		if (part_ptr->def_mem_per_cpu == MEM_PER_CPU) {
			xstrcat(out, "DefMemPerCPU=UNLIMITED");
		} else {
			xstrfmtcat(out, "DefMemPerCPU=%"PRIu64"",
				   part_ptr->def_mem_per_cpu & (~MEM_PER_CPU));
		}
	} else if (part_ptr->def_mem_per_cpu == 0) {
		xstrcat(out, "DefMemPerNode=UNLIMITED");
	} else {
		xstrfmtcat(out, "DefMemPerNode=%"PRIu64"", part_ptr->def_mem_per_cpu);
	}

	if (part_ptr->max_mem_per_cpu & MEM_PER_CPU) {
		if (part_ptr->max_mem_per_cpu == MEM_PER_CPU) {
			xstrcat(out, " MaxMemPerCPU=UNLIMITED");
		} else {
			xstrfmtcat(out, " MaxMemPerCPU=%"PRIu64"",
				   part_ptr->max_mem_per_cpu & (~MEM_PER_CPU));
		}
	} else if (part_ptr->max_mem_per_cpu == 0) {
		xstrcat(out, " MaxMemPerNode=UNLIMITED");
	} else {
		xstrfmtcat(out, " MaxMemPerNode=%"PRIu64"", part_ptr->max_mem_per_cpu);
	}

	/****** Line 10 ******/
	if (part_ptr->billing_weights_str) {
		xstrcat(out, line_end);

		xstrfmtcat(out, "TRESBillingWeights=%s",
			   part_ptr->billing_weights_str);
	}

	if (one_liner)
		xstrcat(out, "\n");
	else
		xstrcat(out, "\n\n");

	return out;
}
Пример #22
0
/* Function creates the record menu when you double click on a record */
static void _layout_bb_record(GtkTreeView *treeview,
			      sview_bb_info_t *sview_bb_info, int update)
{
	GtkTreeIter iter;
	char time_buf[20], tmp_user_id[60], tmp_size[20];
	char bb_name_id[32];
	char *tmp_state, *tmp_user_name;
	burst_buffer_resv_t *bb_ptr = sview_bb_info->bb_ptr;
	GtkTreeStore *treestore;

	treestore = GTK_TREE_STORE(gtk_tree_view_get_model(treeview));

	if (bb_ptr->name) {
		strncpy(bb_name_id, bb_ptr->name, sizeof(bb_name_id));
	} else if (bb_ptr->array_task_id == NO_VAL) {
		convert_num_unit(bb_ptr->job_id, bb_name_id,
				 sizeof(bb_name_id),
				 UNIT_NONE, working_sview_config.convert_flags);
	} else {
		snprintf(bb_name_id, sizeof(bb_name_id),
			 "%u_%u(%u)",
			 bb_ptr->array_job_id,
			 bb_ptr->array_task_id,
			 bb_ptr->job_id);
	}
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_NAME),
				   bb_name_id);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_PLUGIN),
				   sview_bb_info->plugin);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_ACCOUNT),
				   bb_ptr->account);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_PARTITION),
				   bb_ptr->partition);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_POOL),
				   bb_ptr->pool);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_QOS),
				   bb_ptr->qos);

	tmp_state = bb_state_string(bb_ptr->state);
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_STATE),
				   tmp_state);

	_get_size_str(tmp_size, sizeof(tmp_size), bb_ptr->size);
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_SIZE),
				   tmp_size);

	if (bb_ptr->create_time) {
		slurm_make_time_str((time_t *)&bb_ptr->create_time, time_buf,
				    sizeof(time_buf));
	} else {
		time_t now = time(NULL);
		slurm_make_time_str(&now, time_buf, sizeof(time_buf));
	}
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_CREATE_TIME),
				   time_buf);

	tmp_user_name = uid_to_string(bb_ptr->user_id);
	snprintf(tmp_user_id, sizeof(tmp_user_id), "%s(%u)", tmp_user_name,
		 bb_ptr->user_id);
	xfree(tmp_user_name);
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_bb,
						 SORTID_USERID),
				   tmp_user_id);
}
Пример #23
0
static List _create_bb_info_list(burst_buffer_info_msg_t *bb_info_ptr)
{
	static List info_list = NULL;
	List last_list = NULL;
	ListIterator last_list_itr = NULL;
	int i, j, pos = 0;
	static burst_buffer_info_msg_t *last_bb_info_ptr = NULL;
	sview_bb_info_t *sview_bb_info_ptr = NULL;
	burst_buffer_info_t *bb_ptr;
	burst_buffer_resv_t *bb_resv_ptr = NULL;
	char bb_name_id[32] = "";

	if (info_list && (bb_info_ptr == last_bb_info_ptr))
		return info_list;

	last_bb_info_ptr = bb_info_ptr;
	if (info_list)
		last_list = info_list;
	info_list = list_create(_bb_info_list_del);

	for (i = 0, bb_ptr = bb_info_ptr->burst_buffer_array;
	     i < bb_info_ptr->record_count; i++, bb_ptr++) {

		for (j = 0, bb_resv_ptr = bb_ptr->burst_buffer_resv_ptr;
		     j < bb_ptr->buffer_count; j++, bb_resv_ptr++) {

			/* Find any existing record for this burst buffer */
			if (last_list) {
				last_list_itr = list_iterator_create(last_list);
				while ((sview_bb_info_ptr =
					list_next(last_list_itr))) {
					if (bb_resv_ptr->job_id &&
					    (bb_resv_ptr->job_id != 
					     sview_bb_info_ptr->bb_ptr->job_id))
						continue;
					if (bb_resv_ptr->name &&
					    xstrcmp(sview_bb_info_ptr->bb_name,
						    bb_resv_ptr->name))
						continue;
					if (xstrcmp(sview_bb_info_ptr->plugin,
						    bb_ptr->name))
						continue;
					list_remove(last_list_itr);
					_bb_info_free(sview_bb_info_ptr);
					break;
				}
				list_iterator_destroy(last_list_itr);
			} else {
				sview_bb_info_ptr = NULL;
			}

			if (bb_resv_ptr->name) {
				strncpy(bb_name_id, bb_resv_ptr->name,
					sizeof(bb_name_id));
			} else if (bb_resv_ptr->array_task_id == NO_VAL) {
				convert_num_unit(bb_resv_ptr->job_id,
						 bb_name_id,
						 sizeof(bb_name_id),
						 UNIT_NONE,
						 working_sview_config.
						 convert_flags);
			} else {
				snprintf(bb_name_id, sizeof(bb_name_id),
					 "%u_%u(%u)",
					 bb_resv_ptr->array_job_id,
					 bb_resv_ptr->array_task_id,
					 bb_resv_ptr->job_id);
			}

			if (!sview_bb_info_ptr) {	/* Need new record */
				sview_bb_info_ptr =
					xmalloc(sizeof(sview_bb_info_t));
			}
			sview_bb_info_ptr->bb_ptr = bb_resv_ptr;
			sview_bb_info_ptr->bb_name = xstrdup(bb_name_id);
			strcpy(bb_name_id, "");	/* Clear bb_name_id */
			sview_bb_info_ptr->color_inx = pos % sview_colors_cnt;
			sview_bb_info_ptr->plugin = xstrdup(bb_ptr->name);
			sview_bb_info_ptr->pos = pos++;
			list_append(info_list, sview_bb_info_ptr);
		}
	}

	FREE_NULL_LIST(last_list);
	return info_list;
}
Пример #24
0
/*
 * slurm_sprint_partition_info - output information about a specific Slurm
 *	partition based upon message as loaded using slurm_load_partitions
 * IN part_ptr - an individual partition information record pointer
 * IN one_liner - print as a single line if true
 * RET out - char * containing formatted output (must be freed after call)
 *           NULL is returned on failure.
 */
char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
				    int one_liner )
{
	char tmp1[16], tmp2[16];
	char tmp_line[MAXHOSTRANGELEN];
	char *out = NULL;
	char *allow_deny, *value;
	uint16_t force, preempt_mode, val;
	uint32_t cluster_flags = slurmdb_setup_cluster_flags();

	/****** Line 1 ******/

	snprintf(tmp_line, sizeof(tmp_line),
		 "PartitionName=%s",
		 part_ptr->name);
	xstrcat(out, tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 2 ******/

	if ((part_ptr->allow_groups == NULL) ||
	    (part_ptr->allow_groups[0] == '\0'))
		sprintf(tmp_line, "AllowGroups=ALL");
	else {
		snprintf(tmp_line, sizeof(tmp_line),
			 "AllowGroups=%s", part_ptr->allow_groups);
	}
	xstrcat(out, tmp_line);

	if (part_ptr->allow_accounts || !part_ptr->deny_accounts) {
		allow_deny = "Allow";
		if ((part_ptr->allow_accounts == NULL) ||
		    (part_ptr->allow_accounts[0] == '\0'))
			value = "ALL";
		else
			value = part_ptr->allow_accounts;
	} else {
		allow_deny = "Deny";
		value = part_ptr->deny_accounts;
	}
	snprintf(tmp_line, sizeof(tmp_line),
		 " %sAccounts=%s", allow_deny, value);
	xstrcat(out, tmp_line);

	if (part_ptr->allow_qos || !part_ptr->deny_qos) {
		allow_deny = "Allow";
		if ((part_ptr->allow_qos == NULL) ||
		    (part_ptr->allow_qos[0] == '\0'))
			value = "ALL";
		else
			value = part_ptr->allow_qos;
	} else {
		allow_deny = "Deny";
		value = part_ptr->deny_qos;
	}
	snprintf(tmp_line, sizeof(tmp_line),
		 " %sQos=%s", allow_deny, value);
	xstrcat(out, tmp_line);

	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 3 ******/
	if (part_ptr->allow_alloc_nodes == NULL)
		snprintf(tmp_line, sizeof(tmp_line), "AllocNodes=%s","ALL");
	else
		snprintf(tmp_line, sizeof(tmp_line), "AllocNodes=%s",
			 part_ptr->allow_alloc_nodes);
	xstrcat(out, tmp_line);

	if (part_ptr->alternate != NULL) {
		snprintf(tmp_line, sizeof(tmp_line), " Alternate=%s",
			 part_ptr->alternate);
		xstrcat(out, tmp_line);
	}

	if (part_ptr->flags & PART_FLAG_DEFAULT)
		sprintf(tmp_line, " Default=YES");
	else
		sprintf(tmp_line, " Default=NO");
	xstrcat(out, tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 4 added here for BG partitions only
	 ****** to maintain alphabetized output ******/

	if (cluster_flags & CLUSTER_FLAG_BG) {
		snprintf(tmp_line, sizeof(tmp_line), "BasePartitions=%s",
			 part_ptr->nodes);
		xstrcat(out, tmp_line);
		if (one_liner)
			xstrcat(out, " ");
		else
			xstrcat(out, "\n   ");
	}

	/****** Line 5 ******/

	if (part_ptr->default_time == INFINITE)
		sprintf(tmp_line, "DefaultTime=UNLIMITED");
	else if (part_ptr->default_time == NO_VAL)
		sprintf(tmp_line, "DefaultTime=NONE");
	else {
		char time_line[32];
		secs2time_str(part_ptr->default_time * 60, time_line,
			sizeof(time_line));
		sprintf(tmp_line, "DefaultTime=%s", time_line);
	}
	xstrcat(out, tmp_line);
	if (part_ptr->flags & PART_FLAG_NO_ROOT)
		sprintf(tmp_line, " DisableRootJobs=YES");
	else
		sprintf(tmp_line, " DisableRootJobs=NO");
	xstrcat(out, tmp_line);
	sprintf(tmp_line, " GraceTime=%u", part_ptr->grace_time);
	xstrcat(out, tmp_line);
	if (part_ptr->flags & PART_FLAG_HIDDEN)
		sprintf(tmp_line, " Hidden=YES");
	else
		sprintf(tmp_line, " Hidden=NO");
	xstrcat(out, tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 6 ******/

	if (part_ptr->max_nodes == INFINITE)
		sprintf(tmp_line, "MaxNodes=UNLIMITED");
	else {
		if (cluster_flags & CLUSTER_FLAG_BG)
			convert_num_unit((float)part_ptr->max_nodes,
					 tmp1, sizeof(tmp1), UNIT_NONE);
		else
			snprintf(tmp1, sizeof(tmp1),"%u", part_ptr->max_nodes);

		sprintf(tmp_line, "MaxNodes=%s", tmp1);
	}
	xstrcat(out, tmp_line);
	if (part_ptr->max_time == INFINITE)
		sprintf(tmp_line, " MaxTime=UNLIMITED");
	else {
		char time_line[32];
		secs2time_str(part_ptr->max_time * 60, time_line,
			      sizeof(time_line));
		sprintf(tmp_line, " MaxTime=%s", time_line);
	}
	xstrcat(out, tmp_line);
	if (cluster_flags & CLUSTER_FLAG_BG)
		convert_num_unit((float)part_ptr->min_nodes, tmp1, sizeof(tmp1),
				 UNIT_NONE);
	else
		snprintf(tmp1, sizeof(tmp1), "%u", part_ptr->min_nodes);
	sprintf(tmp_line, " MinNodes=%s", tmp1);
	xstrcat(out, tmp_line);
	if (part_ptr->max_cpus_per_node == INFINITE)
		sprintf(tmp_line, " MaxCPUsPerNode=UNLIMITED");
	else {
		sprintf(tmp_line, " MaxCPUsPerNode=%u",
			part_ptr->max_cpus_per_node);
	}
	xstrcat(out, tmp_line);

	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line added here for non BG nodes
	 to keep with alphabetized output******/

	if (!(cluster_flags & CLUSTER_FLAG_BG)) {
		snprintf(tmp_line, sizeof(tmp_line), "Nodes=%s",
			 part_ptr->nodes);
		xstrcat(out, tmp_line);
		if (one_liner)
			xstrcat(out, " ");
		else
			xstrcat(out, "\n   ");
	}

	/****** Line 7 ******/

	sprintf(tmp_line, "Priority=%u", part_ptr->priority);
	xstrcat(out, tmp_line);
	if (part_ptr->flags & PART_FLAG_ROOT_ONLY)
		sprintf(tmp_line, " RootOnly=YES");
	else
		sprintf(tmp_line, " RootOnly=NO");
	xstrcat(out, tmp_line);
	if (part_ptr->flags & PART_FLAG_REQ_RESV)
		sprintf(tmp_line, " ReqResv=YES");
	else
		sprintf(tmp_line, " ReqResv=NO");
	xstrcat(out, tmp_line);

	force = part_ptr->max_share & SHARED_FORCE;
	val = part_ptr->max_share & (~SHARED_FORCE);
	if (val == 0)
		xstrcat(out, " Shared=EXCLUSIVE");
	else if (force) {
		sprintf(tmp_line, " Shared=FORCE:%u", val);
		xstrcat(out, tmp_line);
	} else if (val == 1)
		xstrcat(out, " Shared=NO");
	else {
		sprintf(tmp_line, " Shared=YES:%u", val);
		xstrcat(out, tmp_line);
	}
	preempt_mode = part_ptr->preempt_mode;
	if (preempt_mode == (uint16_t) NO_VAL)
		preempt_mode = slurm_get_preempt_mode(); /* use cluster param */
	snprintf(tmp_line, sizeof(tmp_line), " PreemptMode=%s",
		 preempt_mode_string(preempt_mode));
	xstrcat(out, tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 8 ******/

	if (part_ptr->state_up == PARTITION_UP)
		sprintf(tmp_line, "State=UP");
	else if (part_ptr->state_up == PARTITION_DOWN)
		sprintf(tmp_line, "State=DOWN");
	else if (part_ptr->state_up == PARTITION_INACTIVE)
		sprintf(tmp_line, "State=INACTIVE");
	else if (part_ptr->state_up == PARTITION_DRAIN)
		sprintf(tmp_line, "State=DRAIN");
	else
		sprintf(tmp_line, "State=UNKNOWN");

	xstrcat(out, tmp_line);

	if (cluster_flags & CLUSTER_FLAG_BG)
		convert_num_unit((float)part_ptr->total_cpus, tmp1,
				 sizeof(tmp1), UNIT_NONE);
	else
		snprintf(tmp1, sizeof(tmp1), "%u", part_ptr->total_cpus);

	sprintf(tmp_line, " TotalCPUs=%s", tmp1);
	xstrcat(out, tmp_line);

	if (cluster_flags & CLUSTER_FLAG_BG)
		convert_num_unit((float)part_ptr->total_nodes, tmp2,
				 sizeof(tmp2), UNIT_NONE);
	else
		snprintf(tmp2, sizeof(tmp2), "%u", part_ptr->total_nodes);

	sprintf(tmp_line, " TotalNodes=%s", tmp2);
	xstrcat(out, tmp_line);

	if (part_ptr->cr_type & CR_CORE)
		sprintf(tmp_line, " SelectTypeParameters=CR_CORE");
	else if (part_ptr->cr_type & CR_SOCKET)
		sprintf(tmp_line, " SelectTypeParameters=CR_SOCKET");
	else
		sprintf(tmp_line, " SelectTypeParameters=N/A");
	xstrcat(out, tmp_line);
	if (one_liner)
		xstrcat(out, " ");
	else
		xstrcat(out, "\n   ");

	/****** Line 9 ******/
	if (part_ptr->def_mem_per_cpu & MEM_PER_CPU) {
		snprintf(tmp_line, sizeof(tmp_line), "DefMemPerCPU=%u",
			 part_ptr->def_mem_per_cpu & (~MEM_PER_CPU));
		xstrcat(out, tmp_line);

	} else if (part_ptr->def_mem_per_cpu == 0) {
		xstrcat(out, "DefMemPerNode=UNLIMITED");
	} else {
		snprintf(tmp_line, sizeof(tmp_line), "DefMemPerNode=%u",
			 part_ptr->def_mem_per_cpu);
		xstrcat(out, tmp_line);
	}

	if (part_ptr->max_mem_per_cpu & MEM_PER_CPU) {
		snprintf(tmp_line, sizeof(tmp_line), " MaxMemPerCPU=%u",
			 part_ptr->max_mem_per_cpu & (~MEM_PER_CPU));
		xstrcat(out, tmp_line);

	} else if (part_ptr->max_mem_per_cpu == 0) {
		xstrcat(out, " MaxMemPerNode=UNLIMITED");
	} else {
		snprintf(tmp_line, sizeof(tmp_line), " MaxMemPerNode=%u",
			 part_ptr->max_mem_per_cpu);
		xstrcat(out, tmp_line);
	}

	if (one_liner)
		xstrcat(out, "\n");
	else
		xstrcat(out, "\n\n");

	return out;
}
Пример #25
0
static void _layout_resv_record(GtkTreeView *treeview,
				sview_resv_info_t *sview_resv_info,
				int update)
{
	GtkTreeIter iter;
	char time_buf[20], power_buf[20];
	reserve_info_t *resv_ptr = sview_resv_info->resv_ptr;
	char *temp_char = NULL;

	GtkTreeStore *treestore =
		GTK_TREE_STORE(gtk_tree_view_get_model(treeview));

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_ACCOUNTS),
				   resv_ptr->accounts);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_BURST_BUFFER),
				   resv_ptr->burst_buffer);

	convert_num_unit((float)resv_ptr->core_cnt,
			 time_buf, sizeof(time_buf), UNIT_NONE);
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_CORE_CNT),
				   time_buf);
	secs2time_str((uint32_t)difftime(resv_ptr->end_time,
					 resv_ptr->start_time),
		      time_buf, sizeof(time_buf));
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_DURATION),
				   time_buf);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_FEATURES),
				   resv_ptr->features);

	temp_char = reservation_flags_string(resv_ptr->flags);
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_FLAGS),
				   temp_char);
	xfree(temp_char);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_LICENSES),
				   resv_ptr->licenses);

	/* NOTE: node_cnt in reservation info from slurmctld ONE number */
	convert_num_unit((float)resv_ptr->node_cnt,
			 time_buf, sizeof(time_buf), UNIT_NONE);
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_NODE_CNT),
				   time_buf);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_NODELIST),
				   resv_ptr->node_list);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_PARTITION),
				   resv_ptr->partition);

	slurm_make_time_str((time_t *)&resv_ptr->end_time, time_buf,
			    sizeof(time_buf));
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_TIME_END),
				   time_buf);
	slurm_make_time_str((time_t *)&resv_ptr->start_time, time_buf,
			    sizeof(time_buf));
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_TIME_START),
				   time_buf);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_TRES),
				   resv_ptr->tres_str);

	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_USERS),
				   resv_ptr->users);

	if ((resv_ptr->resv_watts == NO_VAL) || (resv_ptr->resv_watts == 0)) {
		snprintf(power_buf, sizeof(power_buf), "0");
	} else if ((resv_ptr->resv_watts % 1000000) == 0) {
		snprintf(power_buf, sizeof(power_buf), "%uM",
			 resv_ptr->resv_watts / 1000000);
	} else if ((resv_ptr->resv_watts % 1000) == 0) {
		snprintf(power_buf, sizeof(power_buf), "%uK",
			 resv_ptr->resv_watts / 1000);
	} else {
		snprintf(power_buf, sizeof(power_buf), "%u",
			 resv_ptr->resv_watts);
	}
	add_display_treestore_line(update, treestore, &iter,
				   find_col_name(display_data_resv,
						 SORTID_WATTS),
				   power_buf);
}
static int _print_text_part(partition_info_t *part_ptr,
			    db2_block_info_t *db2_info_ptr)
{
	int printed = 0;
	int tempxcord;
	int prefixlen;
	int i = 0;
	int width = 0;
	char *nodes = NULL, time_buf[20], *conn_str = NULL;
	char tmp_cnt[8];
	char tmp_char[8];

	if (params.cluster_flags & CLUSTER_FLAG_BG)
		convert_num_unit((float)part_ptr->total_nodes, tmp_cnt,
				 sizeof(tmp_cnt), UNIT_NONE, NO_VAL,
				 CONVERT_NUM_UNIT_EXACT);
	else
		snprintf(tmp_cnt, sizeof(tmp_cnt), "%u", part_ptr->total_nodes);

	if (!params.commandline) {
		mvwprintw(text_win,
			  main_ycord,
			  main_xcord, "%c",
			  part_ptr->flags);
		main_xcord += 4;

		if (part_ptr->name) {
			mvwprintw(text_win,
				  main_ycord,
				  main_xcord, "%.9s",
				  part_ptr->name);
			main_xcord += 10;
			if (params.display != BGPART) {
				char *tmp_state;
				if (part_ptr->state_up == PARTITION_INACTIVE)
					tmp_state = "inact";
				else if (part_ptr->state_up == PARTITION_UP)
					tmp_state = "up";
				else if (part_ptr->state_up == PARTITION_DOWN)
					tmp_state = "down";
				else if (part_ptr->state_up == PARTITION_DRAIN)
					tmp_state = "drain";
				else
					tmp_state = "unk";
				mvwprintw(text_win, main_ycord, main_xcord,
					  tmp_state);
				main_xcord += 7;

				if (part_ptr->max_time == INFINITE)
					snprintf(time_buf, sizeof(time_buf),
						 "infinite");
				else {
					secs2time_str((part_ptr->max_time
						       * 60),
						      time_buf,
						      sizeof(time_buf));
				}

				width = strlen(time_buf);
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord + (9 - width),
					  "%s",
					  time_buf);
				main_xcord += 11;
			}
		} else
			main_xcord += 10;

		if (params.display == BGPART) {
			if (db2_info_ptr) {
				char *job_running = _set_running_job_str(
					db2_info_ptr->job_list, 1);
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "%.16s",
					  db2_info_ptr->bg_block_name);
				main_xcord += 18;
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "%.7s",
					  bg_block_state_string(
						  db2_info_ptr->state));
				main_xcord += 8;

				snprintf(tmp_char, sizeof(tmp_char),
					 "%s", job_running);
				xfree(job_running);

				mvwprintw(text_win,
					  main_ycord,
					  main_xcord,
					  "%.8s", tmp_char);
				main_xcord += 8;

				conn_str = conn_type_string_full(
					db2_info_ptr->bg_conn_type);
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "%.7s",
					  conn_str);
				xfree(conn_str);
				main_xcord += 8;

			} else {
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "?");
				main_xcord += 18;
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "?");
				main_xcord += 8;
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "?");
				main_xcord += 8;
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "?");
				main_xcord += 9;
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "?");
				main_xcord += 7;
				mvwprintw(text_win,
					  main_ycord,
					  main_xcord, "?");
				main_xcord += 10;
			}
		}
		mvwprintw(text_win,
			  main_ycord,
			  main_xcord, "%5s", tmp_cnt);

		main_xcord += 7;

		tempxcord = main_xcord;

		if (params.display == BGPART)
			nodes = part_ptr->allow_groups;
		else
			nodes = part_ptr->nodes;
		i = 0;
		prefixlen = i;
		while (nodes && nodes[i]) {
			width = getmaxx(text_win) - 1 - main_xcord;

			if (!prefixlen && (nodes[i] == '[') &&
			    (nodes[i - 1] == ','))
				prefixlen = i + 1;

			if (nodes[i - 1] == ',' && (width - 12) <= 0) {
				main_ycord++;
				main_xcord = tempxcord + prefixlen;
			} else if (main_xcord >= getmaxx(text_win)) {
				main_ycord++;
				main_xcord = tempxcord + prefixlen;
			}

			if ((printed = mvwaddch(text_win,
						main_ycord,
						main_xcord,
						nodes[i])) < 0)
				return printed;
			main_xcord++;

			i++;
		}
		if ((params.display == BGPART) && db2_info_ptr &&
		    (db2_info_ptr->ionode_str)) {
			mvwprintw(text_win,
				  main_ycord,
				  main_xcord, "[%s]",
				  db2_info_ptr->ionode_str);
		}

		main_xcord = 1;
		main_ycord++;
	} else {
		if (part_ptr->name) {
			printf("%9.9s ", part_ptr->name);

			if (params.display != BGPART) {
				if (part_ptr->state_up == PARTITION_INACTIVE)
					printf(" inact ");
				else if (part_ptr->state_up == PARTITION_UP)
					printf("   up ");
				else if (part_ptr->state_up == PARTITION_DOWN)
					printf(" down ");
				else if (part_ptr->state_up == PARTITION_DRAIN)
					printf(" drain ");
				else
					printf(" unk ");

				if (part_ptr->max_time == INFINITE)
					snprintf(time_buf, sizeof(time_buf),
						 "infinite");
				else {
					secs2time_str((part_ptr->max_time
						       * 60),
						      time_buf,
						      sizeof(time_buf));
				}

				printf("%9.9s ", time_buf);
			}
		}

		if (params.display == BGPART) {
			if (db2_info_ptr) {
				char *job_running = _set_running_job_str(
					db2_info_ptr->job_list, 1);
				printf("%16.16s ",
				       db2_info_ptr->bg_block_name);
				printf("%-7.7s ",
				       bg_block_state_string(
					       db2_info_ptr->state));

				printf("%8.8s ", job_running);
				xfree(job_running);

				conn_str = conn_type_string_full(
					db2_info_ptr->bg_conn_type);
				printf("%8.8s ", conn_str);
				xfree(conn_str);
			}
		}

		printf("%5s ", tmp_cnt);

		if (params.display == BGPART)
			nodes = part_ptr->allow_groups;
		else
			nodes = part_ptr->nodes;

		if ((params.display == BGPART) && db2_info_ptr &&
		    (db2_info_ptr->ionode_str)) {
			printf("%s[%s]\n", nodes, db2_info_ptr->ionode_str);
		} else
			printf("%s\n",nodes);
	}
	return printed;
}
Пример #27
0
/*
 * slurm_sprint_block_info - output information about a specific Bluegene
 *	block based upon message as loaded using slurm_load_block
 * IN block_ptr - an individual partition information record pointer
 * IN one_liner - print as a single line if true
 * RET out - char * containing formatted output (must be freed after call)
 *           NULL is returned on failure.
 */
char *slurm_sprint_block_info(
	block_info_t * block_ptr, int one_liner)
{
	int j;
	char tmp1[16], tmp2[16], *tmp_char = NULL;
	char *out = NULL;
	char *line_end = "\n   ";
	uint32_t cluster_flags = slurmdb_setup_cluster_flags();

	if (one_liner)
		line_end = " ";

	/****** Line 1 ******/
	convert_num_unit((float)block_ptr->cnode_cnt, tmp1, sizeof(tmp1),
			 UNIT_NONE, NO_VAL, CONVERT_NUM_UNIT_EXACT);
	if (cluster_flags & CLUSTER_FLAG_BGQ) {
		convert_num_unit((float)block_ptr->cnode_err_cnt, tmp2,
				 sizeof(tmp2), UNIT_NONE, NO_VAL,
				 CONVERT_NUM_UNIT_EXACT);
		tmp_char = xstrdup_printf("%s/%s", tmp1, tmp2);
	} else
		tmp_char = tmp1;

	out = xstrdup_printf("BlockName=%s TotalNodes=%s State=%s%s",
			     block_ptr->bg_block_id, tmp_char,
			     bg_block_state_string(block_ptr->state),
			     line_end);
	if (cluster_flags & CLUSTER_FLAG_BGQ)
		xfree(tmp_char);
	/****** Line 2 ******/
	j = 0;
	if (block_ptr->job_list)
		j = list_count(block_ptr->job_list);

	if (!j)
		xstrcat(out, "JobRunning=NONE ");
	else if (j == 1) {
		block_job_info_t *block_job = list_peek(block_ptr->job_list);
		xstrfmtcat(out, "JobRunning=%u ", block_job->job_id);
	} else
		xstrcat(out, "JobRunning=Multiple ");

	tmp_char = conn_type_string_full(block_ptr->conn_type);
	xstrfmtcat(out, "ConnType=%s", tmp_char);
	xfree(tmp_char);
	if (cluster_flags & CLUSTER_FLAG_BGL)
		xstrfmtcat(out, " NodeUse=%s",
			   node_use_string(block_ptr->node_use));

	xstrcat(out, line_end);

	/****** Line 3 ******/
	if (block_ptr->ionode_str)
		xstrfmtcat(out, "MidPlanes=%s[%s] MPIndices=",
			   block_ptr->mp_str, block_ptr->ionode_str);
	else
		xstrfmtcat(out, "MidPlanes=%s MPIndices=",
			   block_ptr->mp_str);
	for (j = 0;
	     (block_ptr->mp_inx && (block_ptr->mp_inx[j] != -1));
	     j+=2) {
		if (j > 0)
			xstrcat(out, ",");
		xstrfmtcat(out, "%d-%d", block_ptr->mp_inx[j],
			   block_ptr->mp_inx[j+1]);
	}
	xstrcat(out, line_end);

	/****** Line 4 ******/
	xstrfmtcat(out, "MloaderImage=%s%s",
		   block_ptr->mloaderimage, line_end);

	if (cluster_flags & CLUSTER_FLAG_BGL) {
		/****** Line 5 ******/
		xstrfmtcat(out, "BlrtsImage=%s%s", block_ptr->blrtsimage,
			   line_end);
		/****** Line 6 ******/
		xstrfmtcat(out, "LinuxImage=%s%s", block_ptr->linuximage,
			   line_end);
		/****** Line 7 ******/
		xstrfmtcat(out, "RamdiskImage=%s", block_ptr->ramdiskimage);
	} else if (cluster_flags & CLUSTER_FLAG_BGP) {
		/****** Line 5 ******/
		xstrfmtcat(out, "CnloadImage=%s%s", block_ptr->linuximage,
			   line_end);
		/****** Line 6 ******/
		xstrfmtcat(out, "IoloadImage=%s", block_ptr->ramdiskimage);
	}

	if (block_ptr->reason)
		xstrfmtcat(out, "Reason=%s%s",
			   block_ptr->reason, line_end);

	if (one_liner)
		xstrcat(out, "\n");
	else
		xstrcat(out, "\n\n");

	return out;
}
Пример #28
0
extern int sacctmgr_list_cluster(int argc, char *argv[])
{
	int rc = SLURM_SUCCESS;
	slurmdb_cluster_cond_t *cluster_cond =
		xmalloc(sizeof(slurmdb_cluster_cond_t));
	List cluster_list;
	int i=0;
	ListIterator itr = NULL;
	ListIterator itr2 = NULL;
	slurmdb_cluster_rec_t *cluster = NULL;
	char *tmp_char = NULL;

	int field_count = 0;

	print_field_t *field = NULL;

	List format_list = list_create(slurm_destroy_char);
	List print_fields_list; /* types are of print_field_t */

	slurmdb_init_cluster_cond(cluster_cond, 0);
	cluster_cond->cluster_list = list_create(slurm_destroy_char);
	for (i=0; i<argc; i++) {
		int command_len = strlen(argv[i]);
		if (!strncasecmp(argv[i], "Where", MAX(command_len, 5))
		    || !strncasecmp(argv[i], "Set", MAX(command_len, 3)))
			i++;
		_set_cond(&i, argc, argv, cluster_cond, format_list);
	}

	if(exit_code) {
		slurmdb_destroy_cluster_cond(cluster_cond);
		list_destroy(format_list);
		return SLURM_ERROR;
	}

	if(!list_count(format_list)) {
		slurm_addto_char_list(format_list,
				      "Cl,Controlh,Controlp,RPC");
		if(!without_limits)
			slurm_addto_char_list(format_list,
					      "Fa,GrpJ,GrpN,GrpS,MaxJ,MaxN,"
					      "MaxS,MaxW,QOS,DefaultQOS");
	}

	cluster_cond->with_deleted = with_deleted;

	print_fields_list = sacctmgr_process_format_list(format_list);
	list_destroy(format_list);

	if(exit_code) {
		slurmdb_destroy_cluster_cond(cluster_cond);
		list_destroy(print_fields_list);
		return SLURM_ERROR;
	}

	cluster_list = acct_storage_g_get_clusters(db_conn, my_uid,
						   cluster_cond);
	slurmdb_destroy_cluster_cond(cluster_cond);

	if(!cluster_list) {
		exit_code=1;
		fprintf(stderr, " Problem with query.\n");
		list_destroy(print_fields_list);
		return SLURM_ERROR;
	}

	itr = list_iterator_create(cluster_list);
	itr2 = list_iterator_create(print_fields_list);
	print_fields_header(print_fields_list);

	field_count = list_count(print_fields_list);

	while((cluster = list_next(itr))) {
		int curr_inx = 1;
		slurmdb_association_rec_t *assoc = cluster->root_assoc;
		/* set up the working cluster rec so nodecnt's and node names
		 * are handled correctly */
		working_cluster_rec = cluster;
		while((field = list_next(itr2))) {
			switch(field->type) {
			case PRINT_CLUSTER:
				field->print_routine(field,
						     cluster->name,
						     (curr_inx == field_count));
				break;
			case PRINT_CHOST:
				field->print_routine(field,
						     cluster->control_host,
						     (curr_inx == field_count));
				break;
			case PRINT_CPORT:
				field->print_routine(field,
						     cluster->control_port,
						     (curr_inx == field_count));
				break;
			case PRINT_CLASS:
				field->print_routine(field,
						     get_classification_str(
							     cluster->
							     classification),
						     (curr_inx == field_count));
				break;
			case PRINT_CPUS:
			{
				char tmp_char[9];
				convert_num_unit((float)cluster->cpu_count,
						 tmp_char, sizeof(tmp_char),
						 UNIT_NONE);
				field->print_routine(field,
						     tmp_char,
						     (curr_inx == field_count));
				break;
			}
			case PRINT_DQOS:
				if(!g_qos_list) {
					g_qos_list = acct_storage_g_get_qos(
						db_conn,
						my_uid,
						NULL);
				}
				tmp_char = slurmdb_qos_str(g_qos_list,
							   assoc->def_qos_id);
				field->print_routine(
					field,
					tmp_char,
					(curr_inx == field_count));
				break;
			case PRINT_FAIRSHARE:
				field->print_routine(
					field,
					assoc->shares_raw,
					(curr_inx == field_count));
				break;
			case PRINT_FLAGS:
			{
				char *tmp_char = slurmdb_cluster_flags_2_str(
					cluster->flags);
				field->print_routine(
					field,
					tmp_char,
					(curr_inx == field_count));
				xfree(tmp_char);
				break;
			}
			case PRINT_GRPC:
				field->print_routine(field,
						     assoc->grp_cpus,
						     (curr_inx == field_count));
				break;
			case PRINT_GRPJ:
				field->print_routine(field,
						     assoc->grp_jobs,
						     (curr_inx == field_count));
				break;
			case PRINT_GRPN:
				field->print_routine(field,
						     assoc->grp_nodes,
						     (curr_inx == field_count));
				break;
			case PRINT_GRPS:
				field->print_routine(field,
						     assoc->grp_submit_jobs,
						     (curr_inx == field_count));
				break;
			case PRINT_MAXCM:
				field->print_routine(
					field,
					assoc->max_cpu_mins_pj,
					(curr_inx == field_count));
				break;
			case PRINT_MAXC:
				field->print_routine(field,
						     assoc->max_cpus_pj,
						     (curr_inx == field_count));
				break;
			case PRINT_MAXJ:
				field->print_routine(field,
						     assoc->max_jobs,
						     (curr_inx == field_count));
				break;
			case PRINT_MAXN:
				field->print_routine(field,
						     assoc->max_nodes_pj,
						     (curr_inx == field_count));
				break;
			case PRINT_MAXS:
				field->print_routine(field,
						     assoc->max_submit_jobs,
						     (curr_inx == field_count));
				break;
			case PRINT_MAXW:
				field->print_routine(
					field,
					assoc->max_wall_pj,
					(curr_inx == field_count));
				break;

			case PRINT_NODECNT:
			{
				hostlist_t hl = hostlist_create(cluster->nodes);
				int cnt = 0;
				if(hl) {
					cnt = hostlist_count(hl);
					hostlist_destroy(hl);
				}
				field->print_routine(
					field,
					cnt,
					(curr_inx == field_count));
				break;
			}
			case PRINT_CLUSTER_NODES:
				field->print_routine(
					field,
					cluster->nodes,
					(curr_inx == field_count));
				break;
			case PRINT_QOS:
				if(!g_qos_list)
					g_qos_list = acct_storage_g_get_qos(
						db_conn, my_uid, NULL);

				field->print_routine(field,
						     g_qos_list,
						     assoc->qos_list,
						     (curr_inx == field_count));
				break;
			case PRINT_QOS_RAW:
				field->print_routine(field,
						     assoc->qos_list,
						     (curr_inx == field_count));
				break;
			case PRINT_RPC_VERSION:
				field->print_routine(
					field,
					cluster->rpc_version,
					(curr_inx == field_count));
				break;
			case PRINT_SELECT:
				field->print_routine(
					field,
					cluster->plugin_id_select,
					(curr_inx == field_count));
				break;
			default:
				field->print_routine(
					field, NULL,
					(curr_inx == field_count));
				break;
			}
			curr_inx++;
		}
		list_iterator_reset(itr2);
		printf("\n");
	}
	/* clear the working cluster rec */
	working_cluster_rec = NULL;

	list_iterator_destroy(itr2);
	list_iterator_destroy(itr);
	list_destroy(cluster_list);
	list_destroy(print_fields_list);

	return rc;
}
Пример #29
0
static void _update_resv_record(sview_resv_info_t *sview_resv_info_ptr,
				GtkTreeStore *treestore)
{
	char tmp_duration[40], tmp_end[40], tmp_nodes[40], tmp_start[40];
	char tmp_cores[40], power_buf[40];
	char *tmp_flags;
	reserve_info_t *resv_ptr = sview_resv_info_ptr->resv_ptr;

	secs2time_str((uint32_t)difftime(resv_ptr->end_time,
					 resv_ptr->start_time),
		      tmp_duration, sizeof(tmp_duration));

	slurm_make_time_str((time_t *)&resv_ptr->end_time, tmp_end,
			    sizeof(tmp_end));

	tmp_flags = reservation_flags_string(resv_ptr->flags);

	convert_num_unit((float)resv_ptr->core_cnt,
			 tmp_cores, sizeof(tmp_cores), UNIT_NONE, NO_VAL,
			 working_sview_config.convert_flags);

	convert_num_unit((float)resv_ptr->node_cnt,
			 tmp_nodes, sizeof(tmp_nodes), UNIT_NONE, NO_VAL,
			 working_sview_config.convert_flags);

	slurm_make_time_str((time_t *)&resv_ptr->start_time, tmp_start,
			    sizeof(tmp_start));

	if ((resv_ptr->resv_watts == NO_VAL) || (resv_ptr->resv_watts == 0)) {
		snprintf(power_buf, sizeof(power_buf), "0");
	} else if ((resv_ptr->resv_watts % 1000000) == 0) {
		snprintf(power_buf, sizeof(power_buf), "%uM",
			 resv_ptr->resv_watts / 1000000);
	} else if ((resv_ptr->resv_watts % 1000) == 0) {
		snprintf(power_buf, sizeof(power_buf), "%uK",
			 resv_ptr->resv_watts / 1000);
	} else {
		snprintf(power_buf, sizeof(power_buf), "%u",
			 resv_ptr->resv_watts);
	}

	/* Combining these records provides a slight performance improvement */
	gtk_tree_store_set(treestore, &sview_resv_info_ptr->iter_ptr,
			   SORTID_ACCOUNTS,   resv_ptr->accounts,
			   SORTID_BURST_BUFFER, resv_ptr->burst_buffer,
			   SORTID_COLOR,
				sview_colors[sview_resv_info_ptr->color_inx],
			   SORTID_COLOR_INX,  sview_resv_info_ptr->color_inx,
			   SORTID_CORE_CNT,   tmp_cores,
			   SORTID_DURATION,   tmp_duration,
			   SORTID_FEATURES,   resv_ptr->features,
			   SORTID_FLAGS,      tmp_flags,
			   SORTID_LICENSES,   resv_ptr->licenses,
			   SORTID_NAME,       resv_ptr->name,
			   SORTID_NODE_CNT,   tmp_nodes,
			   SORTID_NODE_INX,   resv_ptr->node_inx,
			   SORTID_NODELIST,   resv_ptr->node_list,
			   SORTID_PARTITION,  resv_ptr->partition,
			   SORTID_TIME_START, tmp_start,
			   SORTID_TIME_END,   tmp_end,
			   SORTID_TRES,       resv_ptr->tres_str,
			   SORTID_UPDATED,    1,
			   SORTID_USERS,      resv_ptr->users,
			   SORTID_WATTS,      power_buf,
			   -1);

	xfree(tmp_flags);

	return;
}
Пример #30
0
static bg_record_t *_find_matching_block(List block_list,
					 struct job_record* job_ptr,
					 bitstr_t* slurm_block_bitmap,
					 select_ba_request_t *request,
					 uint32_t max_cpus,
					 int *allow, int check_image,
					 int overlap_check,
					 List overlapped_list,
					 uint16_t query_mode)
{
	bg_record_t *bg_record = NULL;
	ListIterator itr = NULL;
	char tmp_char[256];

	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
		info("number of blocks to check: %d state %d "
		     "asking for %u-%u cpus",
		     list_count(block_list),
		     query_mode, request->procs, max_cpus);

	itr = list_iterator_create(block_list);
	while ((bg_record = list_next(itr))) {
		/* If test_only we want to fall through to tell the
		   scheduler that it is runnable just not right now.
		*/

		/* The job running could be reset so set it back up
		   here if there is a job_ptr
		*/
		if (bg_record->job_ptr)
			bg_record->job_running = bg_record->job_ptr->job_id;

		/*block is messed up some how (BLOCK_ERROR_STATE_FLAG)
		 * ignore it or if state == BG_BLOCK_ERROR */
		if ((bg_record->job_running == BLOCK_ERROR_STATE)
		    || (bg_record->state & BG_BLOCK_ERROR_FLAG)) {
			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
				info("block %s is in an error "
				     "state (can't use)",
				     bg_record->bg_block_id);
			continue;
		} else if ((bg_conf->layout_mode == LAYOUT_DYNAMIC)
			   || ((!SELECT_IS_CHECK_FULL_SET(query_mode)
				|| SELECT_IS_MODE_RUN_NOW(query_mode))
			       && (bg_conf->layout_mode != LAYOUT_DYNAMIC))) {
			if (bg_record->free_cnt) {
				/* No reason to look at a block that
				   is being freed unless we are
				   running static and looking at the
				   full set.
				*/
				if (bg_conf->slurm_debug_flags
				    & DEBUG_FLAG_BG_PICK)
					info("block %s being free for other "
					     "job(s), skipping",
					     bg_record->bg_block_id);
				continue;
			} else if ((bg_record->job_running != NO_JOB_RUNNING)
				   && (bg_record->job_running
				       != job_ptr->job_id)) {
				/* Look here if you are trying to run now or
				   if you aren't looking at the full set.  We
				   don't continue on running blocks for the
				   full set because we are seeing if the job
				   can ever run so look here.
				*/
				if (bg_conf->slurm_debug_flags
				    & DEBUG_FLAG_BG_PICK)
					info("block %s in use by %s job %d",
					     bg_record->bg_block_id,
					     bg_record->user_name,
					     bg_record->job_running);
				continue;
			}
		}

		/* Check processor count */
		if ((bg_record->cpu_cnt < request->procs)
		    || ((max_cpus != NO_VAL)
			&& (bg_record->cpu_cnt > max_cpus))) {
			/* We use the proccessor count per block here
			   mostly to see if we can run on a smaller block.
			*/
			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
				convert_num_unit((float)bg_record->cpu_cnt,
						 tmp_char,
						 sizeof(tmp_char), UNIT_NONE);
				info("block %s CPU count (%s) not suitable",
				     bg_record->bg_block_id,
				     tmp_char);
			}
			continue;
		}

		/*
		 * Next we check that this block's bitmap is within
		 * the set of nodes which the job can use.
		 * Nodes not available for the job could be down,
		 * drained, allocated to some other job, or in some
		 * SLURM block not available to this job.
		 */
		if (!bit_super_set(bg_record->mp_bitmap, slurm_block_bitmap)) {
			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
				char *temp = bitmap2node_name(
					bg_record->mp_bitmap);
				char *temp2 = bitmap2node_name(
					slurm_block_bitmap);
				info("bg block %s has nodes not "
				     "usable by this job %s %s",
				     bg_record->bg_block_id, temp, temp2);
				xfree(temp);
				xfree(temp2);
			}
			continue;
		}

		/*
		 * Insure that any required nodes are in this BG block
		 */
		if (job_ptr->details->req_node_bitmap
		    && (!bit_super_set(job_ptr->details->req_node_bitmap,
				       bg_record->mp_bitmap))) {
			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
				info("bg block %s lacks required nodes",
				     bg_record->bg_block_id);
			continue;
		}

		if (_check_for_booted_overlapping_blocks(
			    block_list, itr, bg_record,
			    overlap_check, overlapped_list, query_mode))
			continue;

		if (check_image) {
#ifdef HAVE_BGL
			if (request->blrtsimage &&
			    strcasecmp(request->blrtsimage,
				       bg_record->blrtsimage)) {
				*allow = 1;
				continue;
			}
#endif
#ifdef HAVE_BG_L_P
			if (request->linuximage &&
			    strcasecmp(request->linuximage,
				       bg_record->linuximage)) {
				*allow = 1;
				continue;
			}

			if (request->ramdiskimage &&
			    strcasecmp(request->ramdiskimage,
				       bg_record->ramdiskimage)) {
				*allow = 1;
				continue;
			}
#endif
			if (request->mloaderimage &&
			    strcasecmp(request->mloaderimage,
				       bg_record->mloaderimage)) {
				*allow = 1;
				continue;
			}
		}

		/***********************************************/
		/* check the connection type specified matches */
		/***********************************************/
		if ((request->conn_type[0] != bg_record->conn_type[0])
		    && (request->conn_type[0] != SELECT_NAV)) {
#ifdef HAVE_BGP
			if (request->conn_type[0] >= SELECT_SMALL) {
				/* we only want to reboot blocks if
				   they have to be so skip booted
				   blocks if in small state
				*/
				if (check_image
				    && (bg_record->state
					== BG_BLOCK_INITED)) {
					*allow = 1;
					continue;
				}
				goto good_conn_type;
			} else if (bg_record->conn_type[0] >= SELECT_SMALL) {
				/* since we already checked to see if
				   the cpus were good this means we are
				   looking for a block in a range that
				   includes small and regular blocks.
				   So we can just continue on.
				*/
				goto good_conn_type;
			}
#endif
			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
				info("bg block %s conn-type not usable "
				     "asking for %s bg_record is %s",
				     bg_record->bg_block_id,
				     conn_type_string(request->conn_type[0]),
				     conn_type_string(bg_record->conn_type[0]));
			continue;
		}
#ifdef HAVE_BGP
	good_conn_type:
#endif
		/*****************************************/
		/* match up geometry as "best" possible  */
		/*****************************************/
		if ((request->geometry[0] != (uint16_t)NO_VAL)
		    && (!_check_rotate_geo(bg_record->geo, request->geometry,
					   request->rotate)))
			continue;

		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
			info("we found one! %s", bg_record->bg_block_id);
		break;
	}
	list_iterator_destroy(itr);

	return bg_record;
}