int init_scheduling_cycle(server_info *sinfo) { group_info *user; /* the user for the running jobs of the last cycle */ queue_info *qinfo; /* user to cycle through the queues to sort the jobs */ char decayed = 0; /* boolean: have we decayed usage? */ time_t t; /* used in decaying fair share */ int i, j; if (cstat.fair_share) { if (last_running != NULL) { /* add the usage which was accumulated between the last cycle and this * one and calculate a new value */ for (i = 0; i < last_running_size ; i++) { job_info** jobs; user = last_running[i].ginfo; #if HIGH_PRECISION_FAIRSHARE jobs = sinfo -> jobs; /* check all jobs (exiting, completed, running) */ #else jobs = sinfo -> running_jobs; /* check only running */ #endif for (j = 0; jobs[j] != NULL; j++) { if (jobs[j] -> is_completed || jobs[j] -> is_exiting || jobs[j] -> is_running) if (!strcmp(last_running[i].name, jobs[j] -> name)) break; } if (jobs[j] != NULL) { user -> usage += calculate_usage_value(jobs[j] -> resused) - calculate_usage_value(last_running[i].resused); } } /* assign usage into temp usage since temp usage is used for usage * calculations. Temp usage starts at usage and can be modified later. */ for (i = 0; i < last_running_size; i++) last_running[i].ginfo -> temp_usage = last_running[i].ginfo -> usage; } /* The half life for the fair share tree might have passed since the last * scheduling cycle. For that matter, several half lives could have * passed. If this is the case, perform as many decays as necessary */ t = cstat.current_time; while (t - last_decay > conf.half_life) { sched_log(PBSEVENT_DEBUG2, PBS_EVENTCLASS_SERVER, "", "Decaying Fairshare Tree"); decay_fairshare_tree(conf.group_root); t -= conf.half_life; decayed = 1; } if (decayed) { /* set the time to the acuall the half-life should have occured */ last_decay = cstat.current_time - (cstat.current_time - last_decay) % conf.half_life; } if (cstat.current_time - last_sync > conf.sync_time) { write_usage(); last_sync = cstat.current_time; sched_log(PBSEVENT_DEBUG2, PBS_EVENTCLASS_SERVER, "", "Usage Sync"); } } if (cstat.help_starving_jobs) cstat.starving_job = update_starvation(sinfo -> jobs); /* sort queues by priority if requested */ if (cstat.sort_queues) qsort(sinfo -> queues, sinfo -> num_queues, sizeof(queue_info *), cmp_queue_prio_dsc); if (cstat.sort_by[0].sort != NO_SORT) { if (cstat.by_queue || cstat.round_robin) { for (i = 0; i < sinfo -> num_queues; i++) { qinfo = sinfo -> queues[i]; qsort(qinfo -> jobs, qinfo -> sc.total, sizeof(job_info *), cmp_sort); } } else qsort(sinfo -> jobs, sinfo -> sc.total, sizeof(job_info *), cmp_sort); } next_job(sinfo, INITIALIZE); return 1; /* SUCCESS */ }
/* * * update_usage_on_run - update a users usage information when a * job is run * * jinfo - the job which just started running * * returns nothing * */ void update_usage_on_run(job_info *jinfo) { jinfo -> ginfo -> temp_usage += calculate_usage_value(jinfo -> resreq); }