int main(int argc, char** argv) { HR_INFO hri; int retval; for (int i=0; i<argc; i++) { if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { usage(argv); } } check_stop_daemons(); retval = config.parse_file(); if (retval) { log_messages.printf(MSG_CRITICAL, "Can't parse config.xml: %s\n", boincerror(retval) ); exit(1); } retval = boinc_db.open( config.db_name, config.db_host, config.db_user, config.db_passwd ); if (retval) { log_messages.printf(MSG_CRITICAL, "Can't open DB\n"); exit(1); } log_messages.printf(MSG_NORMAL, "Starting\n"); boinc_db.set_isolation_level(READ_UNCOMMITTED); hri.init(); hri.scan_db(); hri.write_file(); hri.perf_info.write_file(); log_messages.printf(MSG_NORMAL, "Finished\n"); }
// write a summary of feeder state to stderr // void show_state(int) { ssp->show(stderr); if (config.hr_allocate_slots) { hr_info.show(stderr); } }
// see if we're using HR, and if so initialize the necessary data structures // void hr_init() { int i, retval; bool apps_differ = false; bool some_app_uses_hr = false; int hrt, hr_type0 = ssp->apps[0].homogeneous_redundancy; using_hr = false; for (i=0; i<ssp->napps; i++) { hrt = ssp->apps[i].homogeneous_redundancy; if (hrt <0 || hrt >= HR_NTYPES) { log_messages.printf(MSG_CRITICAL, "HR type %d out of range for app %d\n", hrt, i ); exit(1); } if (hrt) some_app_uses_hr = true; if (hrt != hr_type0) apps_differ = true; } if (config.homogeneous_redundancy) { log_messages.printf(MSG_NORMAL, "config HR is %d\n", config.homogeneous_redundancy ); hrt = config.homogeneous_redundancy; if (hrt < 0 || hrt >= HR_NTYPES) { log_messages.printf(MSG_CRITICAL, "Main HR type %d out of range\n", hrt ); exit(1); } if (some_app_uses_hr) { log_messages.printf(MSG_CRITICAL, "You can specify HR at global or app level, but not both\n" ); exit(1); } for (i=0; i<ssp->napps; i++) { ssp->apps[i].homogeneous_redundancy = config.homogeneous_redundancy; ssp->apps[i].weight = 1; } } else { if (some_app_uses_hr) { if (apps_differ && !all_apps) { log_messages.printf(MSG_CRITICAL, "You must use --allapps if apps have different HR\n" ); exit(1); } } else { return; // HR not being used } } using_hr = true; if (config.hr_allocate_slots) { hr_info.init(); retval = hr_info.read_file(); if (retval) { log_messages.printf(MSG_CRITICAL, "Can't read HR info file: %s\n", boincerror(retval) ); exit(1); } // find the weight for each HR type // for (i=0; i<ssp->napps; i++) { hrt = ssp->apps[i].homogeneous_redundancy; hr_info.type_weights[hrt] += ssp->apps[i].weight; hr_info.type_being_used[hrt] = true; } // compute the slot allocations for HR classes // hr_info.allocate(ssp->max_wu_results); hr_info.show(stderr); } }
// Enumerate jobs from DB until find one that is not already in the work array. // If find one, return true. // If reach end of enum for second time on this array scan, return false // static bool get_job_from_db( DB_WORK_ITEM& wi, // enumerator to get job from int app_index, // if using --allapps, the app index int& enum_phase, int& ncollisions ) { bool collision; int retval, j, enum_size; char select_clause[256]; if (all_apps) { sprintf(select_clause, "%s and r1.appid=%lu", mod_select_clause, ssp->apps[app_index].id ); enum_size = enum_sizes[app_index]; } else { safe_strcpy(select_clause, mod_select_clause); enum_size = enum_limit; } int hrt = ssp->apps[app_index].homogeneous_redundancy; while (1) { if (hrt && config.hr_allocate_slots) { retval = wi.enumerate_all(enum_size, select_clause); } else { retval = wi.enumerate(enum_size, select_clause, order_clause); } if (retval) { if (retval != ERR_DB_NOT_FOUND) { // If DB server dies, exit; // so /start (run from crontab) will restart us eventually. // log_messages.printf(MSG_CRITICAL, "DB connection lost, exiting\n" ); exit(0); } // we've reach the end of the result set // switch (enum_phase) { case ENUM_FIRST_PASS: enum_phase = ENUM_SECOND_PASS; ncollisions = 0; // disregard collisions - maybe we'll find new jobs break; case ENUM_SECOND_PASS: enum_phase = ENUM_OVER; return false; } log_messages.printf(MSG_NORMAL, "restarted enumeration for appid %lu\n", ssp->apps[app_index].id ); } else { // Check for invalid application ID // if (!ssp->lookup_app(wi.wu.appid)) { #if 0 log_messages.printf(MSG_CRITICAL, "result [RESULT#%u] has bad appid %d; clean up your DB!\n", wi.res_id, wi.wu.appid ); #endif continue; } // if the WU had an error, mark result as DIDNT_NEED // if (wi.wu.error_mask) { char buf[256]; DB_RESULT result; result.id = wi.res_id; sprintf(buf, "server_state=%d, outcome=%d", RESULT_SERVER_STATE_OVER, RESULT_OUTCOME_DIDNT_NEED ); result.update_field(buf); log_messages.printf(MSG_NORMAL, "[RESULT#%lu] WU had error, marking as DIDNT_NEED\n", wi.res_id ); continue; } // Check for collision (i.e. this result already is in the array) // collision = false; for (j=0; j<ssp->max_wu_results; j++) { if (ssp->wu_results[j].state != WR_STATE_EMPTY && ssp->wu_results[j].resultid == wi.res_id) { // If the result is already in shared mem, // and another instance of the WU has been sent, // bump the infeasible count to encourage // it to get sent more quickly // if (ssp->wu_results[j].infeasible_count == 0) { if (wi.wu.hr_class > 0) { ssp->wu_results[j].infeasible_count++; } } ncollisions++; collision = true; log_messages.printf(MSG_DEBUG, "result [RESULT#%lu] already in array\n", wi.res_id ); break; } } if (collision) { continue; } // if using HR, check whether we've exceeded quota for this class // if (hrt && config.hr_allocate_slots) { if (!hr_info.accept(hrt, wi.wu.hr_class)) { log_messages.printf(MSG_DEBUG, "rejecting [RESULT#%lu] because HR class %d/%d over quota\n", wi.res_id, hrt, wi.wu.hr_class ); continue; } } return true; } } return false; // never reached }
// write a summary of feeder state to stderr // void show_state(int) { ssp->show(stderr); hr_info.show(stderr); }