// we're going to contact this project for reasons other than work fetch; // decide if we should piggy-back a work fetch request. // void WORK_FETCH::compute_work_request(PROJECT* p) { clear_request(); if (config.fetch_minimal_work && gstate.had_or_requested_work) return; if (p->dont_request_more_work) return; if (p->non_cpu_intensive) { if (!has_a_job(p)) { cpu_work_fetch.req_secs = 1; } return; } // See if this is the project we'd ask for work anyway. // Temporarily clear resource backoffs, // since we're going to contact this project in any case. // double cpu_save = p->cpu_pwf.backoff_time; double cuda_save = p->cuda_pwf.backoff_time; double ati_save = p->ati_pwf.backoff_time; p->cpu_pwf.backoff_time = 0; p->cuda_pwf.backoff_time = 0; p->ati_pwf.backoff_time = 0; PROJECT* pbest = choose_project(); p->cpu_pwf.backoff_time = cpu_save; p->cuda_pwf.backoff_time = cuda_save; p->ati_pwf.backoff_time = ati_save; if (p == pbest) { // Ask for work for all devices w/ a shortfall. // Otherwise we can have a situation where a GPU is idle, // we ask only for GPU work, and the project never has any // work_fetch.set_all_requests(pbest); return; } // if not, don't request any work // clear_request(); }
// Choose the best project to ask for work for this resource, // given the specific criterion // PROJECT* RSC_WORK_FETCH::choose_project(int criterion) { PROJECT* pbest = NULL; switch (criterion) { case FETCH_IF_IDLE_INSTANCE: if (nidle_now == 0) return NULL; break; case FETCH_IF_MAJOR_SHORTFALL: if (saturated_time > gstate.work_buf_min()) return NULL; break; case FETCH_IF_MINOR_SHORTFALL: if (saturated_time > gstate.work_buf_total()) return NULL; break; case FETCH_IF_PROJECT_STARVED: if (deadline_missed_instances >= ninstances) return NULL; break; } for (unsigned i=0; i<gstate.projects.size(); i++) { PROJECT* p = gstate.projects[i]; if (p->pwf.cant_fetch_work_reason) continue; if (!project_state(p).may_have_work) continue; RSC_PROJECT_WORK_FETCH& rpwf = project_state(p); if (rpwf.anon_skip) continue; switch (criterion) { case FETCH_IF_MINOR_SHORTFALL: if (wacky_dcf(p)) continue; if (!p->resource_share) continue; break; case FETCH_IF_MAJOR_SHORTFALL: if (wacky_dcf(p)) continue; if (!p->resource_share) continue; break; case FETCH_IF_PROJECT_STARVED: if (p->sched_priority < 0) continue; if (rpwf.nused_total >= ninstances) continue; if (!p->resource_share) continue; break; } if (pbest) { if (!p->resource_share) { continue; } if (pbest->sched_priority > p->sched_priority) { continue; } } pbest = p; } if (!pbest) return NULL; // decide how much work to request from each resource // work_fetch.clear_request(); switch (criterion) { case FETCH_IF_IDLE_INSTANCE: case FETCH_IF_MAJOR_SHORTFALL: set_request(pbest); break; case FETCH_IF_PROJECT_STARVED: set_request(pbest); break; case FETCH_IF_MINOR_SHORTFALL: // in this case, potentially request work for all resources // if (pbest->sched_priority < 0) { set_request(pbest); } else { work_fetch.set_all_requests(pbest); } break; } // in principle there should be a nonzero request. // check, just in case // if (!req_secs && !req_instances) { if (log_flags.work_fetch_debug) { msg_printf(pbest, MSG_INFO, "[work_fetch] error: project chosen but zero request" ); } return 0; } if (log_flags.work_fetch_debug) { msg_printf(pbest, MSG_INFO, "[work_fetch] chosen: %s %s: %.2f inst, %.2f sec", criterion_name(criterion), rsc_name(rsc_type), req_instances, req_secs ); } return pbest; }