// wait for the transitioner to create a result for the given WU. // This keeps us from getting infinitely far ahead of the transitioner // (e.g. if the transitioner isn't running) // void wait_for_results(int wu_id) { DB_RESULT result; int count, retval; char buf[256]; sprintf(buf, "where workunitid=%d", wu_id); while (1) { retval = result.count(count, buf); log_messages.printf(MSG_DEBUG, "result.count for %d returned %d, error: %s\n", wu_id, count, boincerror(retval) ); if (retval) { log_messages.printf(MSG_CRITICAL, "result.count: %s\n", boincerror(retval)); exit(1); } if (count > 0) return; sleep(10); check_stop_daemons(); } }
int count_results(char* query, int& n) { DB_RESULT result; return result.count(n, query); }
// Do checks that require DB access for whether we can send this job, // and return: // 0 if OK to send // 1 if can't send to this host // 2 if can't send to ANY host // int slow_check( WU_RESULT& wu_result, // the job cache entry. // We may refresh its hr_class and app_version_id fields. APP* app, BEST_APP_VERSION* bavp // the app version to be used ) { int n, retval; DB_RESULT result; char buf[256]; WORKUNIT& wu = wu_result.workunit; // Don't send if we've already sent a result of this WU to this user. // if (config.one_result_per_user_per_wu) { sprintf(buf, "where workunitid=%d and userid=%d", wu.id, g_reply->user.id ); retval = result.count(n, buf); if (retval) { log_messages.printf(MSG_CRITICAL, "send_work: can't get result count (%s)\n", boincerror(retval) ); return 1; } else { if (n>0) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] [USER#%d] already has %d result(s) for [WU#%u]\n", g_reply->user.id, n, wu.id ); } return 1; } } } else if (config.one_result_per_host_per_wu) { // Don't send if we've already sent a result of this WU to this host. // We only have to check this if we don't send one result per user. // sprintf(buf, "where workunitid=%d and hostid=%d", wu.id, g_reply->host.id ); retval = result.count(n, buf); if (retval) { log_messages.printf(MSG_CRITICAL, "send_work: can't get result count (%s)\n", boincerror(retval) ); return 1; } else { if (n>0) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] [HOST#%d] already has %d result(s) for [WU#%u]\n", g_reply->host.id, n, wu.id ); } return 1; } } } // Checks that require looking up the WU. // Lump these together so we only do 1 lookup // if (app_hr_type(*app) || app->homogeneous_app_version) { DB_WORKUNIT db_wu; db_wu.id = wu.id; int vals[3]; retval = db_wu.get_field_ints( "hr_class, app_version_id, error_mask", 3, vals ); if (retval) { log_messages.printf(MSG_CRITICAL, "can't get fields for [WU#%u]: %s\n", db_wu.id, boincerror(retval) ); return 1; } // check wu.error_mask // if (vals[2] != 0) { return 2; } if (app_hr_type(*app)) { wu.hr_class = vals[0]; if (already_sent_to_different_hr_class(wu, *app)) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] [HOST#%d] [WU#%u %s] is assigned to different HR class\n", g_reply->host.id, wu.id, wu.name ); } // Mark the workunit as infeasible. // This ensures that jobs already assigned to an HR class // are processed first. // wu_result.infeasible_count++; return 1; } } if (app->homogeneous_app_version) { int wu_avid = vals[1]; wu.app_version_id = wu_avid; if (wu_avid && wu_avid != bavp->avp->id) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] [HOST#%d] [WU#%u %s] is assigned to different app version\n", g_reply->host.id, wu.id, wu.name ); } wu_result.infeasible_count++; return 1; } } } return 0; }
// do slow checks (ones that require DB access) // static bool slow_check(WU_RESULT& wu_result, WORKUNIT& wu, APP* app) { int n, retval; DB_RESULT result; char buf[256]; // Don't send if we've already sent a result of this WU to this user. // if (config.one_result_per_user_per_wu) { sprintf(buf, "where workunitid=%d and userid=%d", wu_result.workunit.id, g_reply->user.id ); retval = result.count(n, buf); if (retval) { log_messages.printf(MSG_CRITICAL, "send_work: can't get result count (%d)\n", retval ); return false; } else { if (n>0) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] [USER#%d] already has %d result(s) for [WU#%d]\n", g_reply->user.id, n, wu_result.workunit.id ); } return false; } } } else if (config.one_result_per_host_per_wu) { // Don't send if we've already sent a result // of this WU to this host. // We only have to check this // if we don't send one result per user. // sprintf(buf, "where workunitid=%d and hostid=%d", wu_result.workunit.id, g_reply->host.id ); retval = result.count(n, buf); if (retval) { log_messages.printf(MSG_CRITICAL, "send_work: can't get result count (%d)\n", retval ); return false; } else { if (n>0) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] [HOST#%d] already has %d result(s) for [WU#%d]\n", g_reply->host.id, n, wu_result.workunit.id ); } return false; } } } if (app_hr_type(*app)) { if (already_sent_to_different_platform_careful( wu_result.workunit, *app )) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] [HOST#%d] [WU#%d %s] is assigned to different platform\n", g_reply->host.id, wu.id, wu.name ); } // Mark the workunit as infeasible. // This ensures that jobs already assigned to a platform // are processed first. // wu_result.infeasible_count++; return false; } } return true; }
int count_results(char* query, int& n) { DB_RESULT result; int retval = result.count(n, query); if (retval) return retval; return 0; }
bool wu_is_infeasible_slow( WU_RESULT& wu_result, SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply ) { char buf[256]; int retval; int n; DB_RESULT result; // Don't send if we've already sent a result of this WU to this user. // if (config.one_result_per_user_per_wu) { sprintf(buf, "where workunitid=%d and userid=%d", wu_result.workunit.id, g_reply->user.id ); retval = result.count(n, buf); if (retval) { log_messages.printf(MSG_CRITICAL, "send_work: can't get result count (%d)\n", retval ); return true; } else { if (n>0) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] send_work: user %d already has %d result(s) for WU %d\n", g_reply->user.id, n, wu_result.workunit.id ); } return true; } } } else if (config.one_result_per_host_per_wu) { // Don't send if we've already sent a result // of this WU to this host. // We only have to check this // if we don't send one result per user. // sprintf(buf, "where workunitid=%d and hostid=%d", wu_result.workunit.id, g_reply->host.id ); retval = result.count(n, buf); if (retval) { log_messages.printf(MSG_CRITICAL, "send_work: can't get result count (%d)\n", retval ); return true; } else { if (n>0) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] send_work: host %d already has %d result(s) for WU %d\n", g_reply->host.id, n, wu_result.workunit.id ); } return true; } } } APP* app = ssp->lookup_app(wu_result.workunit.appid); WORKUNIT wu = wu_result.workunit; if (app_hr_type(*app)) { if (already_sent_to_different_platform_careful(wu, *app)) { if (config.debug_send) { log_messages.printf(MSG_NORMAL, "[send] [HOST#%d] [WU#%d %s] WU is infeasible (assigned to different platform)\n", g_reply->host.id, wu.id, wu.name ); } // Mark the workunit as infeasible. // This ensures that jobs already assigned to a platform // are processed first. // wu_result.infeasible_count++; return true; } } return false; }