int main() { if ( boinc_db.open("predictor", "boinc", NULL, NULL) ) { printf("Open failed\n"); return 0; } DB_WORKUNIT workunit; char buf[256]; while (!workunit.enumerate()) { printf("workunit %d wsn %d\n", workunit.id, workunit.workseq_next); DB_RESULT result; sprintf(buf, "where workunitid=%d", workunit.id); if ( !result.enumerate(buf) ) { DB_HOST host; sprintf(buf, "where id=%d", result.hostid); if ( !host.enumerate(buf) ) { workunit.workseq_next = OS(host) + CPU(host); if ( workunit.update() ) printf("Update failed!\n"); } } } };
int main_loop(APP& app) { DB_WORKUNIT wu; DB_RESULT canonical_result, result; char buf[256]; char buf2[256]; int retval; task_t task; while(1) { check_stop_daemons(); sprintf(buf, "where appid=%d and assimilate_state=%d", app.id, ASSIMILATE_READY); // Заполнение полей текущего ворк юнита retval = wu.enumerate(buf); if (retval) { if (retval != ERR_DB_NOT_FOUND) { log_messages.printf(MSG_DEBUG, "DB connection lost, exiting\n"); exit(0); } } // Заполнение полей текущего задания sscanf(wu.name, "%[^_]_%[^_]_%d_%*d_%d", task.app_name, task.name, &task.timestamp, &task.size); // Создание списка результатов задания vector<RESULT> results; if (strlen(task.name) > 0) { sprintf(buf, "INNER JOIN workunit ON result.id = workunit.canonical_resultid WHERE workunit.name like \"%%_%s_%%\" and workunit.assimilate_state=%d", task.name, ASSIMILATE_READY); while (!result.enumerate(buf)) { results.push_back(result); } } // Склеивание заданий if ((results.size() == task.size) && (task.size != 0)) { log_messages.printf(MSG_NORMAL,"[%s] Assimilating task\n", task.name); retval = rmerge(task, results); if (retval) { log_messages.printf(MSG_CRITICAL,"[%s] Assimilation failed\n", task.name); } else { // Обновление записей в базе if (update_db) { sprintf(buf, "assimilate_state=%d, transition_time=%d", ASSIMILATE_DONE, (int)time(0)); sprintf(buf2, "appid=%d and assimilate_state=%d and name like \"%%_%s_%%\"", app.id, ASSIMILATE_READY, task.name); wu.update_fields_noid(buf, buf2); boinc_db.commit_transaction(); } log_messages.printf(MSG_NORMAL,"[%s] Task assimilated\n", task.name); //Очистка всех структур wu.clear(); memset(&task, 0, sizeof(task)); results.clear(); } } sleep(SLEEP_INTERVAL); } }
// collect information and call delete_antiques_from_dir() // for every relevant directory // static int delete_antiques() { DB_WORKUNIT wu; time_t t = 0; int ret = 0; // t = min (create_time_of_oldest_wu, 31days_ago) t = time(0) - 32*86400; if (!wu.enumerate("order by id limit 1") && (t > wu.create_time)) { t = wu.create_time - 86400; } // find numerical userid of apache struct passwd *apache_info = getpwnam(config.httpd_user); if (!apache_info) { log_messages.printf(MSG_CRITICAL, "Couldn't find http_user '%s' in passwd\n", config.httpd_user ); return -1; } log_messages.printf(MSG_DEBUG, "delete_antiques(): " "Deleting files older than epoch %lu (%s) with userid %u\n", (unsigned long)t, actime(t), apache_info->pw_uid ); // if fanout is configured, scan every fanout directory, // else just the plain upload directory if (config.uldl_dir_fanout) { for(int d = 0; d < config.uldl_dir_fanout; d++) { char buf[270]; snprintf(buf, sizeof(buf), "%s/%x", config.upload_dir, d); log_messages.printf(MSG_DEBUG, "delete_antiques(): scanning upload fanout directory '%s'\n", buf ); ret = delete_antiques_from_dir(buf, t, apache_info->pw_uid); if (ret < 0) return ret; } } else { log_messages.printf(MSG_DEBUG, "delete_antiques(): scanning upload directory '%s'\n", config.upload_dir ); ret = delete_antiques_from_dir(config.upload_dir, t, apache_info->pw_uid); } return ret; }
// returns number of files found & added, or negative for error. // int find_antique_files() { char buf[256]; DB_WORKUNIT wu; check_stop_daemons(); // Find the oldest workunit. We could add // "where file_delete_state!=FILE_DELETE_DONE" to the query, // but this might create some race condition // with the 'regular' file delete mechanism, // so better to do it like this. // sprintf(buf, "order by id limit 1"); if (!wu.enumerate(buf)) { // Don't ever delete files younger than a month. // int days = 1 + (time(0) - wu.create_time)/86400; if (days<31) days=31; return add_antiques_to_list(days); } return 0; }
// return true if we changed the file_delete_state of a WU or a result // bool do_pass(bool retry_error) { DB_WORKUNIT wu; DB_RESULT result; bool did_something = false; char buf[256]; char clause[256]; int retval, new_state; check_stop_daemons(); strcpy(clause, ""); if (id_modulus) { sprintf(clause, " and id %% %d = %d ", id_modulus, id_remainder); } if (dont_delete_batches) { strcat(clause, " and batch <= 0 "); } if (appid) { sprintf(buf, " and appid = %d ", appid); strcat(clause, buf); } sprintf(buf, "where file_delete_state=%d %s limit %d", retry_error?FILE_DELETE_ERROR:FILE_DELETE_READY, clause, WUS_PER_ENUM ); while (do_input_files) { retval = wu.enumerate(buf); if (retval) { if (retval != ERR_DB_NOT_FOUND) { log_messages.printf(MSG_DEBUG, "DB connection lost, exiting\n"); exit(0); } break; } if (preserve_wu_files) { retval = 0; } else { retval = wu_delete_files(wu); } if (retval) { new_state = FILE_DELETE_ERROR; log_messages.printf(MSG_CRITICAL, "[WU#%d] file deletion failed: %s\n", wu.id, boincerror(retval) ); } else { new_state = FILE_DELETE_DONE; } if (new_state != wu.file_delete_state) { sprintf(buf, "file_delete_state=%d", new_state); retval = wu.update_field(buf); if (retval) { log_messages.printf(MSG_CRITICAL, "[WU#%d] update failed: %s\n", wu.id, boincerror(retval) ); } else { log_messages.printf(MSG_DEBUG, "[WU#%d] file_delete_state updated\n", wu.id ); did_something = true; } } } sprintf(buf, "where file_delete_state=%d %s limit %d", retry_error?FILE_DELETE_ERROR:FILE_DELETE_READY, clause, RESULTS_PER_ENUM ); while (do_output_files) { retval = result.enumerate(buf); if (retval) { if (retval != ERR_DB_NOT_FOUND) { log_messages.printf(MSG_DEBUG, "DB connection lost, exiting\n"); exit(0); } break; } if (preserve_result_files) { retval = 0; } else { retval = result_delete_files(result); } if (retval) { new_state = FILE_DELETE_ERROR; log_messages.printf(MSG_CRITICAL, "[RESULT#%d] file deletion failed: %s\n", result.id, boincerror(retval) ); } else { new_state = FILE_DELETE_DONE; } if (new_state != result.file_delete_state) { sprintf(buf, "file_delete_state=%d", new_state); retval = result.update_field(buf); if (retval) { log_messages.printf(MSG_CRITICAL, "[RESULT#%d] update failed: %s\n", result.id, boincerror(retval) ); } else { log_messages.printf(MSG_DEBUG, "[RESULT#%d] file_delete_state updated\n", result.id ); did_something = true; } } } return did_something; }
// assimilate all WUs that need it // return nonzero (true) if did anything // bool do_pass(APP& app) { DB_WORKUNIT wu; DB_RESULT canonical_result, result; bool did_something = false; char buf[256]; char mod_clause[256]; int retval; int num_assimilated=0; check_stop_daemons(); if (wu_id_modulus) { sprintf(mod_clause, " and workunit.id %% %d = %d ", wu_id_modulus, wu_id_remainder ); } else { strcpy(mod_clause, ""); } sprintf(buf, "where appid=%d and assimilate_state=%d %s limit %d", app.id, ASSIMILATE_READY, mod_clause, one_pass_N_WU ? one_pass_N_WU : 1000 ); while (1) { retval = wu.enumerate(buf); if (retval) { if (retval != ERR_DB_NOT_FOUND) { log_messages.printf(MSG_DEBUG, "DB connection lost, exiting\n" ); exit(0); } break; } vector<RESULT> results; // must be inside while()! // for testing purposes, pretend we did nothing // if (update_db) { did_something = true; } log_messages.printf(MSG_DEBUG, "[%s] assimilating WU %d; state=%d\n", wu.name, wu.id, wu.assimilate_state ); sprintf(buf, "where workunitid=%d", wu.id); canonical_result.clear(); bool found = false; while (1) { retval = result.enumerate(buf); if (retval) { if (retval != ERR_DB_NOT_FOUND) { log_messages.printf(MSG_DEBUG, "DB connection lost, exiting\n" ); exit(0); } break; } results.push_back(result); if (result.id == wu.canonical_resultid) { canonical_result = result; found = true; } } // If no canonical result found and WU had no other errors, // something is wrong, e.g. result records got deleted prematurely. // This is probably unrecoverable, so mark the WU as having // an assimilation error and keep going. // if (!found && !wu.error_mask) { log_messages.printf(MSG_CRITICAL, "[%s] no canonical result\n", wu.name ); wu.error_mask = WU_ERROR_NO_CANONICAL_RESULT; sprintf(buf, "error_mask=%d", wu.error_mask); wu.update_field(buf); } retval = assimilate_handler(wu, results, canonical_result); if (retval && retval != DEFER_ASSIMILATION) { log_messages.printf(MSG_CRITICAL, "[%s] handler error: %s; exiting\n", wu.name, boincerror(retval) ); exit(retval); } if (update_db) { // Defer assimilation until next result is returned int assimilate_state = ASSIMILATE_DONE; if (retval == DEFER_ASSIMILATION) { assimilate_state = ASSIMILATE_INIT; } sprintf( buf, "assimilate_state=%d, transition_time=%d", assimilate_state, (int)time(0) ); retval = wu.update_field(buf); if (retval) { log_messages.printf(MSG_CRITICAL, "[%s] update failed: %s\n", wu.name, boincerror(retval) ); exit(1); } } num_assimilated++; } if (did_something) { boinc_db.commit_transaction(); } if (num_assimilated) { log_messages.printf(MSG_NORMAL, "Assimilated %d workunits.\n", num_assimilated ); } return did_something; }
// return true if did anything // bool do_pass() { int retval = 0; // The number of workunits/results purged in a single pass of do_pass(). // Since do_pass() may be invoked multiple times, // corresponding global variables store global totals. // int do_pass_purged_workunits = 0; int do_pass_purged_results = 0; // check to see if we got a stop signal. // Note that if we do catch a stop signal here, // we call an exit handler that closes [and optionally compresses] files // before returning to the OS. // check_stop_daemons(); bool did_something = false; DB_WORKUNIT wu; char buf[256]; if (min_age_days) { char timestamp[15]; mysql_timestamp(dtime()-min_age_days*86400., timestamp); sprintf(buf, "where file_delete_state=%d and mod_time<'%s' limit %d", FILE_DELETE_DONE, timestamp, DB_QUERY_LIMIT ); } else { sprintf(buf, "where file_delete_state=%d limit %d", FILE_DELETE_DONE, DB_QUERY_LIMIT ); } int n=0; while (1) { retval = wu.enumerate(buf); if (retval) { if (retval != ERR_DB_NOT_FOUND) { log_messages.printf(MSG_DEBUG, "DB connection lost, exiting\n" ); exit(0); } break; } if (strstr(wu.name, "nodelete")) continue; did_something = true; // if archives have not already been opened, then open them. // if (!no_archive && !wu_stream) { open_all_archives(); } retval = purge_and_archive_results(wu, n); do_pass_purged_results += n; if (!no_archive) { retval= archive_wu(wu); if (retval) { log_messages.printf(MSG_CRITICAL, "Failed to write to XML file workunit:%d\n", wu.id ); exit(5); } log_messages.printf(MSG_DEBUG, "Archived workunit [%d] to a file\n", wu.id ); } // purge workunit from DB // if (!dont_delete) { retval= wu.delete_from_db(); if (retval) { log_messages.printf(MSG_CRITICAL, "Can't delete workunit [%d] from database:%d\n", wu.id, retval ); exit(6); } } log_messages.printf(MSG_DEBUG, "Purged workunit [%d] from database\n", wu.id ); if (config.enable_assignment) { DB_ASSIGNMENT asg; char buf2[256]; sprintf(buf, "workunitid=%d", wu.id); asg.delete_from_db_multi(buf2); } purged_workunits++; do_pass_purged_workunits++; wu_stored_in_file++; if (!no_archive) { fflush(NULL); // if file has got max # of workunits, close and compress it. // This sets file pointers to NULL // if (max_wu_per_file && wu_stored_in_file>=max_wu_per_file) { close_all_archives(); wu_stored_in_file = 0; } } if (time_to_quit()) { break; } } if (do_pass_purged_workunits) { log_messages.printf(MSG_NORMAL, "Archived %d workunits and %d results\n", do_pass_purged_workunits, do_pass_purged_results ); } if (did_something && wu_stored_in_file>0) { log_messages.printf(MSG_DEBUG, "Currently open archive files contain %d workunits\n", wu_stored_in_file ); } if (do_pass_purged_workunits > DB_QUERY_LIMIT/2) { return true; } else { return false; } }
int main_loop(APP& app) { DB_WORKUNIT wu; DB_RESULT canonical_result, result; DB_APP_VERSION app_version; // http://boinc.berkeley.edu/doxygen/server/html/classDB__APP__VERSION.html char buf[256]; char buf2[256]; int retval; task_t task; while(1) { check_stop_daemons(); sprintf(buf, "WHERE appid=%d AND assimilate_state=%d AND error_mask<>16", app.id, ASSIMILATE_READY); // Заполнение полей текущего ворк юнита retval = wu.enumerate(buf); if (retval) { if (retval != ERR_DB_NOT_FOUND) { log_messages.printf(MSG_DEBUG, "DB connection lost, exiting\n"); exit(0); } } // Заполнение полей текущего задания sscanf(wu.name, "%[^_]_%d_%d_%[^_]_%d_%*d_%d.%[^_]", task.app_name, &task.id, &task.uid, task.name, &task.timestamp, &task.size, task.extension); sprintf(buf, "SELECT login FROM user WHERE id=%d", task.uid); mysql_query(frontend_db, buf); mysql_result = mysql_store_result(frontend_db); if ((row = mysql_fetch_row(mysql_result)) != NULL) { strcpy(task.login, row[0]); } // Создание списка результатов задания vector<RESULT> results; if (strlen(task.name) > 0) { sprintf(buf, "INNER JOIN workunit ON result.id = workunit.canonical_resultid WHERE workunit.name like \"%%_%d_%d_%s_%%\" and workunit.assimilate_state=%d and workunit.error_mask<>16", task.id, task.uid, task.name, ASSIMILATE_READY); while (!result.enumerate(buf)) { results.push_back(result); } } // Склеивание заданий if ((results.size() == task.size) && (task.size != 0)) { log_messages.printf(MSG_NORMAL,"[%d_%s] Assimilating task\n", task.uid, task.name); retval = handle_result(task, results); if (retval) { log_messages.printf(MSG_CRITICAL,"[%d_%s] Assimilation failed\n", task.uid, task.name); } else { // Обновление записей в базе if (update_db) { sprintf(buf, "assimilate_state=%d, transition_time=%d", ASSIMILATE_DONE, (int)time(0)); sprintf(buf2, "appid=%d and assimilate_state=%d and name like \"%%_%d_%d_%s_%%\"", app.id, ASSIMILATE_READY, task.id, task.uid, task.name); wu.update_fields_noid(buf, buf2); boinc_db.commit_transaction(); // Обновление планктона update_plankton(task, app_version); update_plankton_percent(results, task); } log_messages.printf(MSG_NORMAL,"[%d_%s] Task assimilated\n", task.uid, task.name); //Очистка всех структур wu.clear(); memset(&task, 0, sizeof(task)); results.clear(); } } else { if (results.size()) { update_plankton_percent(results, task); } } sleep(SLEEP_INTERVAL); } }