// handle a workunit which has new results // int handle_wu( DB_VALIDATOR_ITEM_SET& validator, std::vector<VALIDATOR_ITEM>& items ) { int canonical_result_index = -1; bool update_result, retry; TRANSITION_TIME transition_time = NO_CHANGE; int retval = 0, canonicalid = 0, x; double credit = 0; unsigned int i; WORKUNIT& wu = items[0].wu; g_wup = &wu; vector<RESULT> results; vector<DB_HOST_APP_VERSION> host_app_versions, host_app_versions_orig; int nsuccess_results; // Here if WU doesn't have a canonical result yet. // Try to get one log_messages.printf(MSG_NORMAL, "[WU#%d %s] handle_wu(): No canonical result yet\n", wu.id, wu.name ); ++log_messages; // make a vector of the successful results, // and a parallel vector of host_app_versions // for (i=0; i<items.size(); i++) { RESULT& result = items[i].res; if ((result.server_state == RESULT_SERVER_STATE_OVER) && (result.outcome == RESULT_OUTCOME_SUCCESS) ) { results.push_back(result); DB_HOST_APP_VERSION hav; retval = hav_lookup(hav, result.hostid, generalized_app_version_id(result.app_version_id, result.appid) ); if (retval) { hav.host_id=0; // flag that it's missing } host_app_versions.push_back(hav); host_app_versions_orig.push_back(hav); } } log_messages.printf(MSG_DEBUG, "[WU#%d %s] Found %d successful results\n", wu.id, wu.name, (int)results.size() ); if (results.size() >= (unsigned int)wu.min_quorum) { log_messages.printf(MSG_DEBUG, "[WU#%d %s] Enough for quorum, checking set.\n", wu.id, wu.name ); double dummy; retval = check_set( results, wu, canonicalid, dummy, retry ); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%d %s] check_set returned %d, exiting\n", wu.id, wu.name, retval ); return retval; } if (retry) transition_time = DELAYED; if (credit_from_wu) { retval = get_credit_from_wu(wu, results, credit); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%d %s] get_credit_from_wu returned %d\n", wu.id, wu.name, retval ); return retval; } } if (canonicalid) { retval = assign_credit_set( wu, results, app, app_versions, host_app_versions, max_granted_credit, credit ); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%d %s] assign_credit_set() returned %d\n", wu.id, wu.name, retval ); transition_time = DELAYED; goto leave; } } if (max_granted_credit && credit>max_granted_credit) { credit = max_granted_credit; } // scan results. // update as needed, and count the # of results // that are still outcome=SUCCESS // (some may have changed to VALIDATE_ERROR) // nsuccess_results = 0; for (i=0; i<results.size(); i++) { RESULT& result = results[i]; DB_HOST_APP_VERSION& hav = host_app_versions[i]; DB_HOST_APP_VERSION& hav_orig = host_app_versions_orig[i]; update_result = false; bool update_host = false; if (result.outcome == RESULT_OUTCOME_VALIDATE_ERROR) { transition_time = IMMEDIATE; update_result = true; } else { nsuccess_results++; } DB_HOST host; HOST host_initial; switch (result.validate_state) { case VALIDATE_STATE_VALID: case VALIDATE_STATE_INVALID: retval = host.lookup_id(result.hostid); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%d] lookup of host %d failed %d\n", result.id, result.hostid, retval ); continue; } host_initial = host; } switch (result.validate_state) { case VALIDATE_STATE_VALID: update_result = true; update_host = true; retval = is_valid(host, result, wu, host_app_versions[i]); if (retval) { log_messages.printf(MSG_DEBUG, "[RESULT#%d %s] is_valid() failed: %d\n", result.id, result.name, retval ); } grant_credit( host, result.sent_time, result.cpu_time, result.granted_credit ); log_messages.printf(MSG_NORMAL, "[RESULT#%d %s] Valid; granted %f credit [HOST#%d]\n", result.id, result.name, result.granted_credit, result.hostid ); break; case VALIDATE_STATE_INVALID: update_result = true; update_host = true; log_messages.printf(MSG_NORMAL, "[RESULT#%d %s] Invalid [HOST#%d]\n", result.id, result.name, result.hostid ); is_invalid(host_app_versions[i]); break; case VALIDATE_STATE_INIT: log_messages.printf(MSG_NORMAL, "[RESULT#%d %s] Inconclusive [HOST#%d]\n", result.id, result.name, result.hostid ); result.validate_state = VALIDATE_STATE_INCONCLUSIVE; update_result = true; break; } if (hav.host_id) { retval = hav.update_validator(hav_orig); } if (update_host) { retval = host.update_diff_validator(host_initial); } if (update_result) { retval = validator.update_result(result); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%d %s] result.update() failed: %d\n", result.id, result.name, retval ); } } } if (canonicalid) { // if we found a canonical result, // trigger the assimilator, but do NOT trigger // the transitioner - doing so creates a race condition // transition_time = NEVER; log_messages.printf(MSG_DEBUG, "[WU#%d %s] Found a canonical result: id=%d\n", wu.id, wu.name, canonicalid ); wu.canonical_resultid = canonicalid; wu.canonical_credit = credit; wu.assimilate_state = ASSIMILATE_READY; // don't need to send any more results // for (i=0; i<items.size(); i++) { RESULT& result = items[i].res; if (result.server_state != RESULT_SERVER_STATE_UNSENT) { continue; } result.server_state = RESULT_SERVER_STATE_OVER; result.outcome = RESULT_OUTCOME_DIDNT_NEED; retval = validator.update_result(result); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%d %s] result.update() failed: %d\n", result.id, result.name, retval ); } } } else { // here if no consensus. // check if #success results is too large // if (nsuccess_results > wu.max_success_results) { wu.error_mask |= WU_ERROR_TOO_MANY_SUCCESS_RESULTS; transition_time = IMMEDIATE; } // if #success results >= target_nresults, // we need more results, so bump target_nresults // NOTE: nsuccess_results should never be > target_nresults, // but accommodate that if it should happen // if (nsuccess_results >= wu.target_nresults) { wu.target_nresults = nsuccess_results+1; transition_time = IMMEDIATE; } } } leave: --log_messages; switch (transition_time) { case IMMEDIATE: wu.transition_time = time(0); break; case DELAYED: x = time(0) + 6*3600; if (x < wu.transition_time) wu.transition_time = x; break; case NEVER: wu.transition_time = INT_MAX; break; case NO_CHANGE: break; } wu.need_validate = 0; retval = validator.update_workunit(wu); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%d %s] update_workunit() failed: %d; exiting\n", wu.id, wu.name, retval ); return retval; } return 0; }
// A result timed out; penalize the corresponding host_app_version // static int result_timed_out( TRANSITIONER_ITEM res_item, TRANSITIONER_ITEM& wu_item ) { DB_HOST_APP_VERSION hav; char query[512], clause[512]; int gavid = generalized_app_version_id( res_item.res_app_version_id, wu_item.appid ); int retval = hav_lookup(hav, res_item.res_hostid, gavid); if (retval) { log_messages.printf(MSG_NORMAL, "result_timed_out(): hav_lookup failed: %s\n", boincerror(retval) ); return 0; } hav.turnaround.update_var( (double)wu_item.delay_bound, HAV_AVG_THRESH, HAV_AVG_WEIGHT, HAV_AVG_LIMIT ); int n = hav.max_jobs_per_day; if (n == 0) { n = config.daily_result_quota; } if (n > config.daily_result_quota) { n = config.daily_result_quota; } n -= 1; if (n < 1) { n = 1; } if (config.debug_quota) { log_messages.printf(MSG_NORMAL, "[quota] max_jobs_per_day for %d; %d->%d\n", gavid, hav.max_jobs_per_day, n ); } hav.max_jobs_per_day = n; hav.consecutive_valid = 0; sprintf(query, "turnaround_n=%.15e, turnaround_avg=%.15e, turnaround_var=%.15e, turnaround_q=%.15e, max_jobs_per_day=%d, consecutive_valid=%d", hav.turnaround.n, hav.turnaround.avg, hav.turnaround.var, hav.turnaround.q, hav.max_jobs_per_day, hav.consecutive_valid ); sprintf(clause, "host_id=%d and app_version_id=%d", hav.host_id, hav.app_version_id ); retval = hav.update_fields_noid(query, clause); if (retval) { log_messages.printf(MSG_CRITICAL, "CRITICAL result_timed_out(): hav updated failed: %s\n", boincerror(retval) ); } return 0; }
// handle a workunit which has new results // int handle_wu( DB_VALIDATOR_ITEM_SET& validator, std::vector<VALIDATOR_ITEM>& items ) { int canonical_result_index = -1; bool update_result, retry; TRANSITION_TIME transition_time = NO_CHANGE; int retval = 0, x; DB_ID_TYPE canonicalid = 0; double credit = 0; unsigned int i; WORKUNIT& wu = items[0].wu; g_wup = &wu; if (wu.canonical_resultid) { log_messages.printf(MSG_NORMAL, "[WU#%lu %s] Already has canonical result %lu\n", wu.id, wu.name, wu.canonical_resultid ); ++log_messages; // Here if WU already has a canonical result. // Get unchecked results and see if they match the canonical result // for (i=0; i<items.size(); i++) { RESULT& result = items[i].res; if (result.id == wu.canonical_resultid) { canonical_result_index = i; } } if (canonical_result_index == -1) { log_messages.printf(MSG_CRITICAL, "[WU#%lu %s] Can't find canonical result %lu\n", wu.id, wu.name, wu.canonical_resultid ); return 0; } RESULT& canonical_result = items[canonical_result_index].res; // scan this WU's results, and check the unchecked ones // for (i=0; i<items.size(); i++) { RESULT& result = items[i].res; if (result.server_state != RESULT_SERVER_STATE_OVER) continue; if (result.outcome != RESULT_OUTCOME_SUCCESS) continue; switch (result.validate_state) { case VALIDATE_STATE_INIT: case VALIDATE_STATE_INCONCLUSIVE: break; default: continue; } log_messages.printf(MSG_NORMAL, "[WU#%lu] handle_wu(): testing result %lu\n", wu.id, result.id ); check_pair(result, canonical_result, retry); if (retry) { // this usually means an NFS mount has failed; // arrange to try again later. // transition_time = DELAYED; goto leave; } update_result = false; if (result.outcome == RESULT_OUTCOME_VALIDATE_ERROR) { update_result = true; } // this might be last result, so let transitioner // trigger file delete etc. if needed // transition_time = IMMEDIATE; DB_HOST host; retval = host.lookup_id(result.hostid); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%lu] lookup of host %lu failed: %s\n", result.id, result.hostid, boincerror(retval) ); continue; } HOST host_initial = host; bool update_hav = false; DB_HOST_APP_VERSION hav; retval = hav_lookup(hav, result.hostid, generalized_app_version_id(result.app_version_id, result.appid) ); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%lu %s] hav_lookup returned %d\n", result.id, result.name, retval ); hav.host_id = 0; } DB_HOST_APP_VERSION hav_orig = hav; vector<DB_HOST_APP_VERSION> havv; havv.push_back(hav); vector<RESULT> rv; switch (result.validate_state) { case VALIDATE_STATE_VALID: update_result = true; update_hav = true; log_messages.printf(MSG_NORMAL, "[RESULT#%lu %s] pair_check() matched: setting result to valid\n", result.id, result.name ); retval = is_valid(host, result, wu, havv[0]); if (retval) { log_messages.printf(MSG_NORMAL, "[RESULT#%lu %s] is_valid() error: %s\n", result.id, result.name, boincerror(retval) ); } // do credit computation, but grant credit of canonical result // rv.push_back(result); assign_credit_set( wu, rv, app, app_versions, havv, max_granted_credit, credit ); if (!no_credit) { result.granted_credit = canonical_result.granted_credit; grant_credit(host, result.sent_time, result.granted_credit); if (config.credit_by_app) { grant_credit_by_app(result, result.granted_credit); } } break; case VALIDATE_STATE_INVALID: update_result = true; update_hav = true; log_messages.printf(MSG_NORMAL, "[RESULT#%lu %s] pair_check() didn't match: setting result to invalid\n", result.id, result.name ); is_invalid(havv[0]); } if (hav.host_id && update_hav) { if (dry_run) { log_messages.printf(MSG_NORMAL, "DB not updated (dry run)\n"); } else { log_messages.printf(MSG_NORMAL, "[HOST#%lu AV#%lu] [outlier=%d] Updating HAV in DB. pfc.n=%f->%f\n", havv[0].host_id, havv[0].app_version_id, result.runtime_outlier, hav_orig.pfc.n, havv[0].pfc.n ); retval=havv[0].update_validator(hav_orig); if (retval) { log_messages.printf(MSG_CRITICAL, "[HOST#%lu AV%lu] hav.update_validator() failed: %s\n", hav.host_id, hav.app_version_id, boincerror(retval) ); } } } host.update_diff_validator(host_initial); if (update_result) { log_messages.printf(MSG_NORMAL, "[RESULT#%lu %s] granted_credit %f\n", result.id, result.name, result.granted_credit ); if (dry_run) { log_messages.printf(MSG_NORMAL, "DB not updated (dry run)\n"); } else { retval = validator.update_result(result); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%lu %s] Can't update result: %s\n", result.id, result.name, boincerror(retval) ); } } } } } else { // Here if WU doesn't have a canonical result yet. // Try to get one vector<RESULT> viable_results; vector<DB_HOST_APP_VERSION> host_app_versions, host_app_versions_orig; log_messages.printf(MSG_NORMAL, "[WU#%lu %s] handle_wu(): No canonical result yet\n", wu.id, wu.name ); ++log_messages; // make a vector of the "viable" (i.e. possibly canonical) results, // and a parallel vector of host_app_versions // for (i=0; i<items.size(); i++) { RESULT& result = items[i].res; if (result.server_state != RESULT_SERVER_STATE_OVER) continue; if (result.outcome != RESULT_OUTCOME_SUCCESS) continue; if (result.validate_state == VALIDATE_STATE_INVALID) continue; viable_results.push_back(result); DB_HOST_APP_VERSION hav; retval = hav_lookup(hav, result.hostid, generalized_app_version_id(result.app_version_id, result.appid) ); if (retval) { hav.host_id=0; // flag that it's missing } host_app_versions.push_back(hav); host_app_versions_orig.push_back(hav); } log_messages.printf(MSG_DEBUG, "[WU#%lu %s] Found %d viable results\n", wu.id, wu.name, (int)viable_results.size() ); if (viable_results.size() >= (unsigned int)wu.min_quorum) { log_messages.printf(MSG_DEBUG, "[WU#%lu %s] Enough for quorum, checking set.\n", wu.id, wu.name ); double dummy; retval = check_set(viable_results, wu, canonicalid, dummy, retry); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%lu %s] check_set() error: %s\n", wu.id, wu.name, boincerror(retval) ); return retval; } if (retry) transition_time = DELAYED; // if we found a canonical instance, decide on credit // if (canonicalid) { // always do the credit calculation, to update statistics, // even if we're granting credit a different way // retval = assign_credit_set( wu, viable_results, app, app_versions, host_app_versions, max_granted_credit, credit ); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%lu %s] assign_credit_set(): %s\n", wu.id, wu.name, boincerror(retval) ); transition_time = DELAYED; goto leave; } if (credit_from_wu) { retval = get_credit_from_wu(wu, viable_results, credit); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%lu %s] get_credit_from_wu(): credit not specified in WU\n", wu.id, wu.name ); credit = 0; } } else if (credit_from_runtime) { credit = 0; for (i=0; i<viable_results.size(); i++) { RESULT& result = viable_results[i]; if (result.id == canonicalid) { DB_HOST host; retval = host.lookup_id(result.hostid); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%lu %s] host %lu lookup failed\n", wu.id, wu.name, result.hostid ); break; } double runtime = result.elapsed_time; if (runtime <=0 || runtime > max_runtime) { runtime = max_runtime; } credit = result.flops_estimate * runtime * COBBLESTONE_SCALE; log_messages.printf(MSG_NORMAL, "[WU#%lu][RESULT#%lu] credit_from_runtime %.2f = %.0fs * %.2fGFLOPS\n", wu.id, result.id, credit, runtime, result.flops_estimate/1e9 ); break; } } } else if (no_credit) { credit = 0; } if (max_granted_credit && credit>max_granted_credit) { credit = max_granted_credit; } } // scan the viable results. // update as needed, // and count the # of results that are still viable // (some may now have outcome VALIDATE_ERROR, // or validate_state INVALID) // int n_viable_results = 0; for (i=0; i<viable_results.size(); i++) { RESULT& result = viable_results[i]; DB_HOST_APP_VERSION& hav = host_app_versions[i]; DB_HOST_APP_VERSION& hav_orig = host_app_versions_orig[i]; update_result = false; bool update_host = false; if (result.outcome != RESULT_OUTCOME_SUCCESS || result.validate_state == VALIDATE_STATE_INVALID ) { transition_time = IMMEDIATE; update_result = true; } else { n_viable_results++; } DB_HOST host; HOST host_initial; switch (result.validate_state) { case VALIDATE_STATE_VALID: case VALIDATE_STATE_INVALID: retval = host.lookup_id(result.hostid); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%lu] lookup of host %lu: %s\n", result.id, result.hostid, boincerror(retval) ); continue; } host_initial = host; } switch (result.validate_state) { case VALIDATE_STATE_VALID: update_result = true; update_host = true; retval = is_valid(host, result, wu, host_app_versions[i]); if (retval) { log_messages.printf(MSG_DEBUG, "[RESULT#%lu %s] is_valid() failed: %s\n", result.id, result.name, boincerror(retval) ); } if (!no_credit) { result.granted_credit = credit; grant_credit(host, result.sent_time, credit); log_messages.printf(MSG_NORMAL, "[RESULT#%lu %s] Valid; granted %f credit [HOST#%lu]\n", result.id, result.name, result.granted_credit, result.hostid ); if (config.credit_by_app) { grant_credit_by_app(result, credit); } } break; case VALIDATE_STATE_INVALID: update_result = true; update_host = true; log_messages.printf(MSG_NORMAL, "[RESULT#%lu %s] Invalid [HOST#%lu]\n", result.id, result.name, result.hostid ); is_invalid(host_app_versions[i]); break; case VALIDATE_STATE_INIT: log_messages.printf(MSG_NORMAL, "[RESULT#%lu %s] Inconclusive [HOST#%lu]\n", result.id, result.name, result.hostid ); result.validate_state = VALIDATE_STATE_INCONCLUSIVE; update_result = true; break; } if (dry_run) { log_messages.printf(MSG_NORMAL, "DB not updated (dry run)\n"); } else { if (hav.host_id) { log_messages.printf(MSG_NORMAL, "[HOST#%lu AV#%lu] [outlier=%d] Updating HAV in DB. pfc.n=%f->%f\n", hav.host_id, hav.app_version_id, result.runtime_outlier, hav_orig.pfc.n, hav.pfc.n ); retval = hav.update_validator(hav_orig); if (retval) { log_messages.printf(MSG_CRITICAL, "[HOST#%lu AV%lu] hav.update_validator() failed: %s\n", hav.host_id, hav.app_version_id, boincerror(retval) ); } } if (update_host) { retval = host.update_diff_validator(host_initial); if (retval) { log_messages.printf(MSG_CRITICAL, "[HOST#%lu] host.update_diff_validator() failed: %s\n", host.id, boincerror(retval) ); } } if (update_result) { retval = validator.update_result(result); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%lu %s] result.update() failed: %s\n", result.id, result.name, boincerror(retval) ); } } } } if (canonicalid) { // if we found a canonical result, // trigger the assimilator, but do NOT trigger // the transitioner - doing so creates a race condition // transition_time = NEVER; log_messages.printf(MSG_DEBUG, "[WU#%lu %s] Found a canonical result: id=%lu\n", wu.id, wu.name, canonicalid ); wu.canonical_resultid = canonicalid; wu.canonical_credit = credit; wu.assimilate_state = ASSIMILATE_READY; // don't need to send any more results // for (i=0; i<items.size(); i++) { RESULT& result = items[i].res; if (result.server_state != RESULT_SERVER_STATE_UNSENT) { continue; } result.server_state = RESULT_SERVER_STATE_OVER; result.outcome = RESULT_OUTCOME_DIDNT_NEED; if (dry_run) { log_messages.printf(MSG_NORMAL, "DB not updated (dry run)\n"); } else { retval = validator.update_result(result); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%lu %s] result.update() failed: %s\n", result.id, result.name, boincerror(retval) ); } } } } else { // here if no consensus. // check if #viable results is too large // if (n_viable_results > wu.max_success_results) { wu.error_mask |= WU_ERROR_TOO_MANY_SUCCESS_RESULTS; transition_time = IMMEDIATE; } // if #viable results >= target_nresults, // we need more results, so bump target_nresults // NOTE: n_viable_results should never be > target_nresults, // but accommodate that if it should happen // if (n_viable_results >= wu.target_nresults) { wu.target_nresults = n_viable_results+1; transition_time = IMMEDIATE; } } } } leave: --log_messages; switch (transition_time) { case IMMEDIATE: wu.transition_time = time(0); break; case DELAYED: x = time(0) + 6*3600; if (x < wu.transition_time) wu.transition_time = x; break; case NEVER: wu.transition_time = INT_MAX; break; case NO_CHANGE: break; } wu.need_validate = 0; if (dry_run) { log_messages.printf(MSG_NORMAL, "DB not updated (dry run)\n"); } else { retval = validator.update_workunit(wu); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%lu %s] update_workunit() failed: %s\n", wu.id, wu.name, boincerror(retval) ); return retval; } } return 0; }