Esempio n. 1
0
/* Load current job table information into *job_buffer_pptr */
extern int
scontrol_load_job(job_info_msg_t ** job_buffer_pptr, uint32_t job_id)
{
	int error_code;
	static uint16_t last_show_flags = 0xffff;
	uint16_t show_flags = 0;
	job_info_msg_t * job_info_ptr = NULL;

	if (all_flag)
		show_flags |= SHOW_ALL;

	if (detail_flag) {
		show_flags |= SHOW_DETAIL;
		if (detail_flag > 1)
			show_flags |= SHOW_DETAIL2;
	}
	if (federation_flag)
		show_flags |= SHOW_FEDERATION;
	if (local_flag)
		show_flags |= SHOW_LOCAL;
	if (sibling_flag)
		show_flags |= SHOW_FEDERATION | SHOW_SIBLING;

	if (old_job_info_ptr) {
		if (last_show_flags != show_flags)
			old_job_info_ptr->last_update = (time_t) 0;
		if (job_id) {
			error_code = slurm_load_job(&job_info_ptr, job_id,
						    show_flags);
		} else {
			error_code = slurm_load_jobs(
				old_job_info_ptr->last_update,
				&job_info_ptr, show_flags);
		}
		if (error_code == SLURM_SUCCESS)
			slurm_free_job_info_msg (old_job_info_ptr);
		else if (slurm_get_errno () == SLURM_NO_CHANGE_IN_DATA) {
			job_info_ptr = old_job_info_ptr;
			error_code = SLURM_SUCCESS;
			if (quiet_flag == -1)
 				printf ("slurm_load_jobs no change in data\n");
		}
	} else if (job_id) {
		error_code = slurm_load_job(&job_info_ptr, job_id, show_flags);
	} else {
		error_code = slurm_load_jobs((time_t) NULL, &job_info_ptr,
					     show_flags);
	}

	if (error_code == SLURM_SUCCESS) {
		old_job_info_ptr = job_info_ptr;
		if (job_id)
			old_job_info_ptr->last_update = (time_t) 0;
		last_show_flags  = show_flags;
		*job_buffer_pptr = job_info_ptr;
	}

	return error_code;
}
Esempio n. 2
0
// Get bar summaries for cluster nodes
void ClusterMenu::get_lines() {
    // First we set the time of this update
    last_update = std::chrono::steady_clock::now();

    // Call SLURM API to write node information to pointer
    // Free pointer memory first if it has been previously set
    if (node_info_buffer_ptr != NULL) {
        slurm_free_node_info_msg(node_info_buffer_ptr);
    }
    slurm_load_node ((time_t) NULL, &node_info_buffer_ptr, SHOW_ALL);

    // Create a NodeContainer struct and populate with node information
    node_container.populate_nodes_from_slurm(node_info_buffer_ptr);

    // Call API function, pass job_info_ptr as reference (double pointer); flags must be SHOW_DETAIL to get job allocations
    // Free pointer memory first if it has been previously set
    if (job_info_buffer_ptr != NULL) {
        slurm_free_job_info_msg(job_info_buffer_ptr);
    }
    slurm_load_jobs((time_t) NULL, &job_info_buffer_ptr, SHOW_DETAIL);

    // Populate nodes with job allocations
    node_container.populate_job_allocations_from_slurm(job_info_buffer_ptr);

    // Get line content
    lines = node_container.get_node_bar_summary(32);

    // Record largest line for later use in horizontal scrolling
    get_longest_line();
}
Esempio n. 3
0
static int _get_job_size(uint32_t job_id)
{
	job_info_msg_t *job_buffer_ptr;
	job_info_t * job_ptr;
	int i, size = 1;
	hostlist_t hl;

	if (slurm_load_jobs((time_t) 0, &job_buffer_ptr, SHOW_ALL)) {
		slurm_perror("slurm_load_jobs");
		return 1;
	}

	for (i = 0; i < job_buffer_ptr->record_count; i++) {
		job_ptr = &job_buffer_ptr->job_array[i];
		if (job_ptr->job_id != job_id)
			continue;
		hl = hostlist_create(job_ptr->nodes);
		if (hl) {
			size = hostlist_count(hl);
			hostlist_destroy(hl);
		}
		break;
	}
	slurm_free_job_info_msg (job_buffer_ptr);

#if _DEBUG
	printf("Size is %d\n", size);
#endif
	return size;
}
Esempio n. 4
0
/*
 * Load job information
 */
int load_job(job_info_msg_t ** job_buffer_pptr, uint32_t job_id) {

    int error_code;
    uint16_t show_flags = 0;
    job_info_msg_t * job_info_ptr = NULL;
    error_code = slurm_load_jobs((time_t) NULL, &job_info_ptr, show_flags);
    if (error_code == SLURM_SUCCESS) {
        *job_buffer_pptr = job_info_ptr;
    }
    return error_code;
}
Esempio n. 5
0
static hostlist_t _slurm_wcoll (List joblist)
{
    int i;
    hostlist_t hl = NULL;
    job_info_msg_t * msg;
    int32_t envjobid = 0;
    int alljobids = 0;

    if ((joblist == NULL) && (envjobid = _slurm_jobid()) < 0)
        return (NULL);

    if (slurm_load_jobs((time_t) NULL, &msg, 1) < 0) 
        errx ("Unable to contact slurm controller: %s\n", 
              slurm_strerror (errno));

    /*
     *  Check for "all" in joblist
     */
    alljobids = _alljobids_requested (joblist);

    for (i = 0; i < msg->record_count; i++) {
        job_info_t *j = &msg->job_array[i];

        if (alljobids && j->job_state == JOB_RUNNING)
            hl = _hl_append (hl, j->nodes);
        else if (!joblist && (j->job_id == envjobid)) {
            /*
             *  Only use SLURM_JOBID environment variable if user
             *   didn't override with -j option
             */
            hl = hostlist_create (j->nodes);
            break;
        }
        else if (_jobid_requested (joblist, j->job_id)) {
            hl = _hl_append (hl, j->nodes);
            /* 
             * Exit when there is no more jobids to search
             */
            if (list_count (joblist) == 0)
                break;
        }
    }
    
    slurm_free_job_info_msg (msg);

    if (hl)
        hostlist_uniq (hl);

    return (hl);
}
Esempio n. 6
0
/* _load_job_records - load all job information for filtering
 * and verification
 */
static void
_load_job_records (void)
{
	int error_code;

	/* We need the fill job array string representation for identifying
	 * and killing job arrays */
	setenv("SLURM_BITSTR_LEN", "0", 1);
	error_code = slurm_load_jobs ((time_t) NULL, &job_buffer_ptr, 1);

	if (error_code) {
		slurm_perror ("slurm_load_jobs error");
		exit (1);
	}
}
Esempio n. 7
0
/* main is used here for testing purposes only */
int 
main (int argc, char *argv[]) 
{
	static time_t last_update_time = (time_t) NULL;
	int error_code;
	job_info_msg_t * job_info_msg_ptr = NULL;

	error_code = slurm_load_jobs (last_update_time, &job_info_msg_ptr, 1);
	if (error_code) {
		slurm_perror ("slurm_load_jobs");
		return (error_code);
	}

	slurm_print_job_info_msg ( stdout, job_info_msg_ptr, 1 ) ;

	slurm_free_job_info_msg ( job_info_msg_ptr ) ;
	return (0);
}
Esempio n. 8
0
int main() {
    // Initialise container for all node information
    NodeContainer node_container;


    // Declare a pointer to which the SLURM API writes node information
    node_info_msg_t * node_info_buffer_ptr = NULL;
    // Call SLURM API to write node information to pointer
    slurm_load_node((time_t) NULL, &node_info_buffer_ptr, SHOW_ALL);

    // Create a NodeContainer struct and populate with node information
    node_container.populate_nodes_from_slurm(node_info_buffer_ptr);


    // Declare a pointer to which the SLURM API writes job information
    job_info_msg_t * job_info_buffer_ptr = NULL;
    // Call API function, pass job_info_ptr as reference (double pointer); flags must be SHOW_DETAIL to get job allocations
    slurm_load_jobs((time_t) NULL, &job_info_buffer_ptr, SHOW_DETAIL);

    // Populate nodes with job allocations
    node_container.populate_job_allocations_from_slurm(job_info_buffer_ptr);


    // Get lines for output
    std::vector<std::string> lines = node_container.get_node_bar_summary(32);

    // Print output
    for (std::vector<std::string>::iterator it = lines.begin(); it != lines.end(); ++it) {
        printf("%s\n", it->c_str());
    }


    // Clean up and nicely deallocated memory for SLURM pointers
    slurm_free_node_info_msg(node_info_buffer_ptr);
    slurm_free_job_info_msg(job_info_buffer_ptr);
}
Esempio n. 9
0
File: squeue.c Progetto: VURM/slurm
/* _print_job - print the specified job's information */
static int
_print_job ( bool clear_old )
{
	static job_info_msg_t * old_job_ptr = NULL, * new_job_ptr;
	int error_code;
	uint16_t show_flags = 0;
	uint32_t job_id = 0;

	if (params.all_flag || (params.job_list && list_count(params.job_list)))
		show_flags |= SHOW_ALL;

	/* We require detail data when CPUs are requested */
	if (params.format && strstr(params.format, "C"))
		show_flags |= SHOW_DETAIL;

	if (params.job_list && (list_count(params.job_list) == 1)) {
		ListIterator iterator;
		uint32_t *job_id_ptr;
		iterator = list_iterator_create(params.job_list);
		job_id_ptr = list_next(iterator);
		job_id = *job_id_ptr;
		list_iterator_destroy(iterator);
	}

	if (old_job_ptr) {
		if (clear_old)
			old_job_ptr->last_update = 0;
		if (job_id) {
			error_code = slurm_load_job(
				&new_job_ptr, job_id,
				show_flags);
		} else {
			error_code = slurm_load_jobs(
				old_job_ptr->last_update,
				&new_job_ptr, show_flags);
		}
		if (error_code ==  SLURM_SUCCESS)
			slurm_free_job_info_msg( old_job_ptr );
		else if (slurm_get_errno () == SLURM_NO_CHANGE_IN_DATA) {
			error_code = SLURM_SUCCESS;
			new_job_ptr = old_job_ptr;
		}
	} else if (job_id) {
		error_code = slurm_load_job(&new_job_ptr, job_id, show_flags);
	} else {
		error_code = slurm_load_jobs((time_t) NULL, &new_job_ptr,
					     show_flags);
	}

	if (error_code) {
		slurm_perror ("slurm_load_jobs error");
		return SLURM_ERROR;
	}
	old_job_ptr = new_job_ptr;
	if (job_id)
		old_job_ptr->last_update = (time_t) 0;

	if (params.verbose) {
		printf ("last_update_time=%ld\n",
		        (long) new_job_ptr->last_update);
	}

	if (params.format == NULL) {
		if (params.long_list)
			params.format = "%.7i %.9P %.8j %.8u %.8T %.10M %.9l "
				"%.6D %R";
		else
			params.format = "%.7i %.9P %.8j %.8u  %.2t %.10M %.6D %R";
	}
	if (params.format_list == NULL)
		parse_format(params.format);

	print_jobs_array( new_job_ptr->job_array, new_job_ptr->record_count ,
			  params.format_list ) ;
	return SLURM_SUCCESS;
}
int main()
{

//===========================================================================================
// Declarations
//===========================================================================================
	
	int i,j;
	int i2,j2;
	
	job_info_msg_t *job_ptr;	
	partition_info_msg_t *prt_ptr = NULL;
	node_info_msg_t *node_ptr = NULL;

	int err = SLURM_SUCCESS;
	err = slurm_load_partitions((time_t) NULL, &prt_ptr, 0);
	err = slurm_load_node((time_t) NULL, &node_ptr, 0);
	err = slurm_load_jobs((time_t) NULL, &job_ptr, 0);	

	Linked_List_Node* job_llist;
	struct json_object *partition = json_object_new_object();
	struct json_object *node = json_object_new_object();


//===========================================================================================
// Filling hash tables
//===========================================================================================

	j2 = 0;	
	i2 = 0;

	//fill node_job hash
	if (job_ptr->record_count > 0) {

		for (i = 0; i < job_ptr->record_count; i++) {
			
			
			j2=0;
			while( job_ptr->job_array[i].node_inx[j2] >= 0){
				
				i2 = 0;
								
				for(i2 = job_ptr->job_array[i].node_inx[j2];i2 <= job_ptr->job_array[i].node_inx[j2+1];i2++) {

					node_job_put(node_ptr->node_array[i2].name,job_ptr -> job_array[i]);
				}
				j2+=2;
			}
		
		}
	}
	

//============================================================================================
// Creating Output in Json
//============================================================================================

	// create json	

	int total_node_unknown = 0;
	int total_node_down = 0;
	int total_node_idle = 0;
	int total_node_allocated = 0;
	
	int current_node_unknown;
	int current_node_down;
	int current_node_idle;
	int current_node_allocated;

	int current_job_running;
	int current_job_waiting;
	int current_job_stopped;


	if(prt_ptr -> record_count > 0){	
	
		for (i = 0; i < prt_ptr->record_count; i++) {


			current_node_unknown = 0;
			current_node_down = 0;
			current_node_idle = 0;
			current_node_allocated = 0;

			current_job_running = 0;
			current_job_waiting = 0;			
			current_job_stopped = 0;
			
			
			int j2=0;
			while( prt_ptr->partition_array[i].node_inx[j2] >= 0){
				
				int i2 = 0;

				for(i2 = prt_ptr->partition_array[i].node_inx[j2];i2 <= prt_ptr->partition_array[i].node_inx[j2+1];i2++) {


					if(node_ptr->node_array[i2].node_state == 5 || node_ptr->node_array[i2].node_state == 3){   total_node_allocated++;   current_node_allocated++;}
					else if(node_ptr->node_array[i2].node_state == 1){ 	total_node_down++;      current_node_down++;}
					else if(node_ptr->node_array[i2].node_state == 2 || node_ptr->node_array[i2].node_state == 6){ 	total_node_idle++;      current_node_idle++;}
					else { 	total_node_unknown++; current_node_unknown++;}
	
					job_llist = node_job_get(node_ptr->node_array[i2].name); 			//get job name	
					while( job_llist != NULL){
						

						if(job_llist->value_job.job_state == 2 || job_llist->value_job.job_state == 8 || job_llist->value_job.job_state == 0) 	current_job_waiting++;
						else if(job_llist->value_job.job_state == 1) 	current_job_running++;
						else	current_job_stopped++;
					
						job_llist = job_llist->next;			
									
					}
				}	
				j2+=2;		
	
			}
			
			json_object_object_add(node, "Allocated", json_object_new_int(current_node_allocated));
			json_object_object_add(node, "Down", json_object_new_int(current_node_down));
			json_object_object_add(node, "Idle", json_object_new_int(current_node_idle));
			json_object_object_add(node, "Unknown", json_object_new_int(current_node_unknown));
			json_object_object_add(node, "Running", json_object_new_int(current_job_running));
			json_object_object_add(node, "Wating", json_object_new_int(current_job_waiting));
			json_object_object_add(node, "Stopped", json_object_new_int(current_job_stopped));

			json_object_object_add(partition, prt_ptr->partition_array[i].name , node);

		}
	
	}		
	
	node = json_object_new_object();
	json_object_object_add(node, "Unknown", json_object_new_int(total_node_unknown));
	json_object_object_add(node, "Down", json_object_new_int(total_node_down));
	json_object_object_add(node, "Idle", json_object_new_int(total_node_idle));
	json_object_object_add(node, "Allocated", json_object_new_int(total_node_allocated));
	
	json_object_object_add(partition, "General", node);
	printf("Content-type: text/html\n\n%s",json_object_to_json_string(partition));

	slurm_free_partition_info_msg(prt_ptr);
	slurm_free_node_info_msg(node_ptr);
	return 1;


}
Esempio n. 11
0
extern void get_job(void)
{
	int error_code = -1, i, recs;
	static int printed_jobs = 0;
	static int count = 0;
	static job_info_msg_t *job_info_ptr = NULL, *new_job_ptr = NULL;
	job_info_t *job_ptr = NULL;
	uint16_t show_flags = 0;
	bitstr_t *nodes_req = NULL;
	static uint16_t last_flags = 0;

	if (params.all_flag)
		show_flags |= SHOW_ALL;
	if (job_info_ptr) {
		if (show_flags != last_flags)
			job_info_ptr->last_update = 0;
		error_code = slurm_load_jobs(job_info_ptr->last_update,
					     &new_job_ptr, show_flags);
		if (error_code == SLURM_SUCCESS)
			slurm_free_job_info_msg(job_info_ptr);
		else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) {
			error_code = SLURM_SUCCESS;
			new_job_ptr = job_info_ptr;
		}
	} else
		error_code = slurm_load_jobs((time_t) NULL, &new_job_ptr,
					     show_flags);

	last_flags = show_flags;
	if (error_code) {
		if (quiet_flag != 1) {
			if (!params.commandline) {
				mvwprintw(text_win,
					  main_ycord, 1,
					  "slurm_load_jobs: %s",
					  slurm_strerror(slurm_get_errno()));
				main_ycord++;
			} else {
				printf("slurm_load_jobs: %s\n",
				       slurm_strerror(slurm_get_errno()));
			}
		}
	}

	if (!params.no_header)
		_print_header_job();

	if (new_job_ptr)
		recs = new_job_ptr->record_count;
	else
		recs = 0;

	if (!params.commandline)
		if ((text_line_cnt+printed_jobs) > count)
			text_line_cnt--;
	printed_jobs = 0;
	count = 0;

	if (params.hl)
		nodes_req = get_requested_node_bitmap();
	for (i = 0; i < recs; i++) {
		job_ptr = &(new_job_ptr->job_array[i]);
		if (!IS_JOB_PENDING(job_ptr)   && !IS_JOB_RUNNING(job_ptr) &&
		    !IS_JOB_SUSPENDED(job_ptr) && !IS_JOB_COMPLETING(job_ptr))
			continue;	/* job has completed */
		if (nodes_req) {
			int overlap = 0;
			bitstr_t *loc_bitmap = bit_alloc(bit_size(nodes_req));
			inx2bitstr(loc_bitmap, job_ptr->node_inx);
			overlap = bit_overlap(loc_bitmap, nodes_req);
			FREE_NULL_BITMAP(loc_bitmap);
			if (!overlap)
				continue;
		}

		if (job_ptr->node_inx[0] != -1) {
			int j = 0;
			job_ptr->num_nodes = 0;
			while (job_ptr->node_inx[j] >= 0) {
				job_ptr->num_nodes +=
					(job_ptr->node_inx[j + 1] + 1) -
					 job_ptr->node_inx[j];
				set_grid_inx(job_ptr->node_inx[j],
					     job_ptr->node_inx[j + 1], count);
				j += 2;
			}

			if (!params.commandline) {
				if ((count >= text_line_cnt) &&
				    (printed_jobs < (getmaxy(text_win) - 4))) {
					job_ptr->num_cpus =
						(int)letters[count%62];
					wattron(text_win,
						COLOR_PAIR(colors[count%6]));
					_print_text_job(job_ptr);
					wattroff(text_win,
						 COLOR_PAIR(colors[count%6]));
					printed_jobs++;
				}
			} else {
				job_ptr->num_cpus = (int)letters[count%62];
				_print_text_job(job_ptr);
			}
			count++;
		}
		if (count == 128)
			count = 0;
	}

	for (i = 0; i < recs; i++) {
		job_ptr = &(new_job_ptr->job_array[i]);

		if (!IS_JOB_PENDING(job_ptr))
			continue;	/* job has completed */

		if (!params.commandline) {
			if ((count>=text_line_cnt) &&
			    (printed_jobs < (getmaxy(text_win) - 4))) {
				xfree(job_ptr->nodes);
				job_ptr->nodes = xstrdup("waiting...");
				job_ptr->num_cpus = (int) letters[count%62];
				wattron(text_win,
					COLOR_PAIR(colors[count%6]));
				_print_text_job(job_ptr);
				wattroff(text_win,
					 COLOR_PAIR(colors[count%6]));
				printed_jobs++;
			}
		} else {
			xfree(job_ptr->nodes);
			job_ptr->nodes = xstrdup("waiting...");
			job_ptr->num_cpus = (int) letters[count%62];
			_print_text_job(job_ptr);
			printed_jobs++;
		}
		count++;

		if (count == 128)
			count = 0;
	}

	if (params.commandline && params.iterate)
		printf("\n");

	if (!params.commandline)
		main_ycord++;

	job_info_ptr = new_job_ptr;
	return;
}
Esempio n. 12
0
/* _print_job - print the specified job's information */
static int
_print_job ( bool clear_old )
{
	static job_info_msg_t * old_job_ptr = NULL, * new_job_ptr;
	int error_code;
	uint16_t show_flags = 0;

	if (params.all_flag || (params.job_list && list_count(params.job_list)))
		show_flags |= SHOW_ALL;

	/* We require detail data when CPUs are requested */
	if (params.format && strstr(params.format, "C"))
		show_flags |= SHOW_DETAIL;

	if (old_job_ptr) {
		if (clear_old)
			old_job_ptr->last_update = 0;
		if (params.job_id) {
			error_code = slurm_load_job(
				&new_job_ptr, params.job_id,
				show_flags);
		} else if (params.user_id) {
			error_code = slurm_load_job_user(&new_job_ptr,
							 params.user_id,
							 show_flags);
		} else {
			error_code = slurm_load_jobs(
				old_job_ptr->last_update,
				&new_job_ptr, show_flags);
		}
		if (error_code ==  SLURM_SUCCESS)
			slurm_free_job_info_msg( old_job_ptr );
		else if (slurm_get_errno () == SLURM_NO_CHANGE_IN_DATA) {
			error_code = SLURM_SUCCESS;
			new_job_ptr = old_job_ptr;
		}
	} else if (params.job_id) {
		error_code = slurm_load_job(&new_job_ptr, params.job_id,
					    show_flags);
	} else if (params.user_id) {
		error_code = slurm_load_job_user(&new_job_ptr, params.user_id,
						 show_flags);
	} else {
		error_code = slurm_load_jobs((time_t) NULL, &new_job_ptr,
					     show_flags);
	}

	if (error_code) {
		slurm_perror ("slurm_load_jobs error");
		return SLURM_ERROR;
	}
	old_job_ptr = new_job_ptr;
	if (params.job_id || params.job_id)
		old_job_ptr->last_update = (time_t) 0;

	if (params.verbose) {
		printf ("last_update_time=%ld records=%u\n",
			(long) new_job_ptr->last_update,
			new_job_ptr->record_count);
	}

	if (!params.format && !params.format_long) {
		if (params.long_list) {
			xstrcat(params.format,
				"%.18i %.9P %.8j %.8u %.8T %.10M %.9l %.6D %R");
		} else {
			xstrcat(params.format,
				"%.18i %.9P %.8j %.8u %.2t %.10M %.6D %R");
		}
	}

	if (!params.format_list) {
		if (params.format)
			parse_format(params.format);
		else if (params.format_long)
			parse_long_format(params.format_long);
	}

	print_jobs_array(new_job_ptr->job_array, new_job_ptr->record_count,
			 params.format_list) ;
	return SLURM_SUCCESS;
}
Esempio n. 13
0
int
get_batch_queues(bridge_batch_manager_t* p_batch_manager,
                 bridge_batch_queue_t** p_p_batch_queues,
                 int* p_batch_queues_nb, char* batch_queue_name)
{
    int fstatus=-1;

    int i,j;

    int queue_nb=0;
    int stored_queue_nb=0;

    bridge_batch_queue_t* bn;

    partition_info_msg_t* ppim;
    partition_info_t* ppi;

    job_info_msg_t* pjim;
    job_info_t* pji;

    node_info_msg_t* pnim;
    node_info_t* pni;

    /* get slurm partition infos */
    if (slurm_load_partitions(0,&ppim,SHOW_ALL) != 0) {
        DEBUG3_LOGGER("unable to get slurm partitions infos");
        ppim=NULL;
        goto exit;
    }

    /* get nodes status */
    if(slurm_load_node(0,&pnim,SHOW_ALL)) {
        DEBUG3_LOGGER("unable to get nodes informations");
        slurm_free_partition_info_msg(ppim);
        pnim=NULL;
        goto exit;
    }

    /* get slurm job infos */
    if (slurm_load_jobs(0,&pjim,SHOW_ALL) != 0) {
        DEBUG3_LOGGER("unable to get allocations informations");
        slurm_free_partition_info_msg(ppim);
        slurm_free_node_info_msg(pnim);
        goto exit;
    }

    /* build/initialize storage structures */
    queue_nb = ppim->record_count;
    if (*p_p_batch_queues != NULL) {
        if (*p_batch_queues_nb < queue_nb)
            queue_nb=*p_batch_queues_nb;
    }
    else {
        *p_p_batch_queues = (bridge_batch_queue_t*)
                            malloc(queue_nb*(sizeof(bridge_batch_queue_t)+1));
        if (*p_p_batch_queues == NULL) {
            *p_batch_queues_nb = 0;
            queue_nb = *p_batch_queues_nb;
        }
        else {
            *p_batch_queues_nb = queue_nb;
        }
    }
    stored_queue_nb=0;

    /* fill queue structures */
    for (i=0; i<ppim->record_count && stored_queue_nb<queue_nb; i++) {

        /* get partition pointer */
        ppi=ppim->partition_array+i;

        if (ppi->name == NULL)
            continue;

        /* queue name filter */
        if (batch_queue_name != NULL &&
                strcmp(batch_queue_name,ppi->name) != 0)
            continue;

        bn = &(*p_p_batch_queues)[stored_queue_nb];

        /* put default values */
        init_batch_queue(p_batch_manager,bn);

        /* queue Name */
        bn->name=strdup(ppi->name);

        bn->default_queue = (uint32_t) ( ppi->flags | PART_FLAG_DEFAULT);
        bn->priority = (uint32_t) ppi->priority;

        /* queue activity */
        if(ppi->state_up == PARTITION_UP) {
            bn->activity = BRIDGE_BATCH_QUEUE_ACTIVITY_ACTIVE ;
            bn->state = BRIDGE_BATCH_QUEUE_STATE_OPENED ;
        } else if (ppi->state_up == PARTITION_DRAIN) {
            bn->activity = BRIDGE_BATCH_QUEUE_ACTIVITY_ACTIVE ;
            bn->state = BRIDGE_BATCH_QUEUE_STATE_CLOSED ;
        } else if (ppi->state_up == PARTITION_DOWN) {
            bn->activity = BRIDGE_BATCH_QUEUE_ACTIVITY_INACTIVE ;
            bn->state = BRIDGE_BATCH_QUEUE_STATE_OPENED ;
        } else if (ppi->state_up == PARTITION_INACTIVE) {
            bn->activity = BRIDGE_BATCH_QUEUE_ACTIVITY_INACTIVE ;
            bn->state = BRIDGE_BATCH_QUEUE_STATE_CLOSED ;
        } else {
            bn->activity = BRIDGE_BATCH_QUEUE_ACTIVITY_UNKNOWN ;
            bn->state = BRIDGE_BATCH_QUEUE_STATE_UNKNOWN ;
        }

        /* max times */
        if ( ppi->max_time != INFINITE )
            bn->seq_time_max = (uint32_t) ppi->max_time * 60 ;
        else
            bn->seq_time_max = NO_LIMIT;
        bn->par_time_max = bn->seq_time_max ;

        /* slurm */
        for ( j=0 ; j < pjim->record_count ; j++ ) {

            pji=pjim->job_array+j;

            if ( strcmp(pji->partition,ppi->name) != 0 )
                continue;

            switch ( pji->job_state & JOB_STATE_BASE ) {
            case JOB_PENDING :
                bn->jobs_nb++;
                bn->pending_jobs_nb++;
                break;
            case JOB_RUNNING :
                bn->jobs_nb++;
                bn->running_jobs_nb++;
                break;
            case JOB_SUSPENDED :
                bn->jobs_nb++;
                bn->syssuspended_jobs_nb++;
                break;
            }

        }

        /* Slurm does not provide information about Min and Max cpus per
         * partition. So we use the following method :
         *
         * if partition->name ~= /.*_seq/ min=max=1
         * otherwise, calculate it using MinNodes, MaxNodes and nodes
         * informations
         */

        int done = 0 ;
        char * p;
        p = rindex(ppi->name,'_');
        if ( p != NULL ) {
            if ( strcmp(p+1,"seq") == 0 ) {
                done = 1;
                bn->par_cores_nb_min = 1;
                bn->par_cores_nb_max = 1;
            }
        }

        if ( ! done ) {
            /* use partition nodes information to build the min and max */
            /* number of cores (only min and max nodes number are provided */
            /* by slurm so we have to build this information) */
            uint32_t max_cpus_per_node=0;
            uint32_t min_cpus_per_node=-1;
            bridge_nodelist_t list1,list2;
            bridge_nodelist_init(&list1,NULL,0);
            bridge_nodelist_add_nodes(&list1,ppi->nodes);
            for ( j=0 ; j < pnim->record_count ; j++ ) {
                pni=pnim->node_array+j;
                bridge_nodelist_init(&list2,NULL,0);
                bridge_nodelist_add_nodes(&list2,pni->name);
                if(bridge_nodelist_intersects(&list1,&list2)==0) {
                    bridge_nodelist_free_contents(&list2);
                    continue;
                }
                if ( pni->cpus > max_cpus_per_node )
                    max_cpus_per_node = pni->cpus ;
                if ( pni->cpus < min_cpus_per_node )
                    min_cpus_per_node = pni->cpus ;
                bridge_nodelist_free_contents(&list2);
            }
            bridge_nodelist_free_contents(&list1);

            if ( max_cpus_per_node > 0 && ppi->max_nodes != INFINITE )
                bn->par_cores_nb_max = max_cpus_per_node * ppi->max_nodes ;
            if ( min_cpus_per_node < (uint32_t) -1 && ppi->min_nodes > 1 )
                bn->par_cores_nb_min = min_cpus_per_node * ppi->min_nodes ;
        }

        stored_queue_nb++;
    }

    fstatus=0;

    /* free slurm informations */
    slurm_free_job_info_msg(pjim);
    slurm_free_node_info_msg(pnim);
    slurm_free_partition_info_msg(ppim);


    if(stored_queue_nb<queue_nb) {
        *p_p_batch_queues=(bridge_batch_queue_t*)
                          realloc(*p_p_batch_queues,
                                  stored_queue_nb*(sizeof(bridge_batch_queue_t)+1));
        if(*p_p_batch_queues==NULL)
            *p_batch_queues_nb=0;
        else
            *p_batch_queues_nb=stored_queue_nb;
    }

exit:

    return fstatus;
}
Esempio n. 14
0
int
main(int argc, char* argv[]) {
	job_info_msg_t*   jobListPtr;
	slurm_job_info_t* job_ptr = NULL;
	int               rv, cnt, ix;
	struct tm*        localTime;

	procArgs(argc, argv);

	now       = time(NULL);
	localTime = localtime(&now);

	sprintf(outfname, "simqsnap.out.%04d%02d%02dT%02d%02d%02d",
			localTime->tm_year+1900, localTime->tm_mon+1,
			localTime->tm_mday, localTime->tm_hour,
			localTime->tm_min, localTime->tm_sec);

	printf("Output file name: (%s)\n", outfname);

	if (! (fp = fopen(outfname,"w"))) {
		printf("Can NOT open output file %s.\nAbort!\n", outfname);
		exit(-1);
	}

	if((trace_file = open(trace_file_name, O_CREAT | O_RDWR, S_IRUSR |
			S_IWUSR | S_IRGRP | S_IROTH)) < 0) {
		printf("Error opening file %s\n", trace_file_name);
		return -1;
	}

	printf( "========================================================\n");
	printf( "========================================================\n\n");

	printf (    "Now: %s", asctime(localTime));
	fprintf(fp, "Now:              %ld\n", now);

	rv = slurm_load_jobs((time_t)NULL, &jobListPtr, SHOW_ALL);
	if (rv != SLURM_PROTOCOL_SUCCESS) {
		printf("Error!  Couldn not read Slurm jobs.\nAbort!");
		exit(-1);
	} else {
		cnt = jobListPtr->record_count;

		printf("Record count: %d\n", cnt);
		fprintf(fp, "Number of records %d\n", cnt);
	}

	job_ptr = jobListPtr->job_array;

	for( ix = 0; ix < cnt; ix++ ) {

		fprintf(fp,"JOB RECORD %d) %u\n", ix+1, job_ptr->job_id);
		recordRecord(job_ptr);
		if (recordJob(job_ptr) < 0) {
			printf("Warning! Job # %d did not get properly written "
			       "to %s\n", ix, trace_file_name);
		}

		++job_ptr;
	}

	free (trace_file_name);

	return 0;
}