static unsigned int __IndividualProblemLoad( Problem *thisprob, unsigned int prob_i, Client *client, int *load_needed, unsigned load_problem_count, unsigned int *loaded_for_contest, int *bufupd_pending ) { unsigned int did_load = 0; int retry_due_to_failed_loadstate = 0; TRACE_OUT((+1, "__IndivProbLoad()\n")); do /* while (retry_due_to_failed_loadstate) */ { TRACE_OUT((0, "do /* while (retry_due_to_failed_loadstate) */\n")); WorkRecord wrdata; int update_on_current_contest_exhaust_flag = (client->connectoften & 4); long bufcount; int may_do_random_blocks, random_project = -1, proj_i; struct timeval tv; may_do_random_blocks = 0; for (proj_i = 0; proj_i < PROJECT_COUNT; ++proj_i) { int projectid = client->project_order_map[proj_i]; if ((ProjectGetFlags(projectid) & PROJECTFLAG_RANDOM_BLOCKS) && ((client->project_state[projectid] & (PROJECTSTATE_USER_DISABLED | PROJECTSTATE_CLOSED)) == 0)) { may_do_random_blocks = 1; random_project = projectid; break; } } if (!may_do_random_blocks && client->last_buffupd_failed_time != 0 && CliClock(&tv) == 0) { // If no random work is allowed and if buffer updates are temporarily // disabled (retry delay not elapsed), then exit early to prevent the // client from doing unecessary file I/O operations until updates are // enabled. if (!tv.tv_sec) tv.tv_sec++; if (client->last_buffupd_failed_time + client->buffupd_retry_delay > tv.tv_sec) { *load_needed = NOLOAD_NORANDOM; return 0; /* Retry delay not elapsed : did nothing */ } } retry_due_to_failed_loadstate = 0; bufcount = __loadapacket( client, &wrdata, 1, prob_i, update_on_current_contest_exhaust_flag ); if (bufcount < 0 && client->nonewblocks == 0) { if (CheckExitRequestTriggerNoIO()) break; //Log("3. BufferUpdate(client,(BUFFERUPDATE_FETCH|BUFFERUPDATE_FLUSH),0)\n"); TRACE_BUFFUPD((0, "BufferUpdate: reason = __IndividualProblemLoad && no more blocks\n")); int didupdate = BufferUpdate(client,(BUFFERUPDATE_FETCH|BUFFERUPDATE_FLUSH),0); if (CheckExitRequestTriggerNoIO()) break; if (!(didupdate < 0)) { if (didupdate!=0) *bufupd_pending&=~(didupdate&(BUFFERUPDATE_FLUSH|BUFFERUPDATE_FETCH)); if ((didupdate & BUFFERUPDATE_FETCH) != 0) /* fetched successfully */ bufcount = __loadapacket( client, &wrdata, 0, prob_i, update_on_current_contest_exhaust_flag ); } } *load_needed = 0; if (bufcount >= 0) /* load from file suceeded */ *load_needed = 0; else if (!may_do_random_blocks || client->blockcount < 0) *load_needed = NOLOAD_NORANDOM; /* -1 */ else if (client->nonewblocks) *load_needed = NOLOAD_NONEWBLOCKS; else /* using randoms is permitted */ *load_needed = 0; TRACE_OUT((0, "bufcount = %ld, load_needed = %d, may_do_randoms = %d\n", bufcount, *load_needed, may_do_random_blocks)); TRACE_BUFFUPD((0, "__Indiv...Load: bufcount = %ld, load_needed = %d\n", bufcount, *load_needed)); if (*load_needed == 0) { u32 timeslice = 0x10000; int expected_cpu = 0, expected_core = 0; int expected_os = 0, expected_build = 0; const ContestWork *work = &wrdata.work; int res = -1; #if (defined(INIT_TIMESLICE) && (INIT_TIMESLICE >= 64)) timeslice = INIT_TIMESLICE; #endif if (bufcount < 0) /* normal load from buffer failed */ { /* so generate random */ work = CONTESTWORK_MAGIC_RANDOM; *loaded_for_contest = random_project; } else { *loaded_for_contest = (unsigned int)(wrdata.contest); // If a user gets a packet already started by another user, // force a restart to occur (may happen when the input buffer // is shared). if (strcmp(wrdata.id, "*****@*****.**") == 0 || strncmp(wrdata.id, client->id, sizeof(wrdata.id)-1) == 0) { expected_cpu = wrdata.cpu; expected_core = wrdata.core; expected_os = wrdata.os; expected_build = wrdata.build; } work = &wrdata.work; /* if the total number of packets in buffers is less than the number of crunchers running then post a fetch request. This means that the effective minimum threshold is always >= num crunchers */ if (((unsigned long)(bufcount)) < (load_problem_count - prob_i)) { *bufupd_pending |= BUFFERUPDATE_FETCH; } } /* loadstate can fail if it selcore fails or the previous problem */ /* hadn't been purged, or the contest isn't available or ... */ res = ProblemLoadState( thisprob, work, *loaded_for_contest, timeslice, expected_cpu, expected_core, expected_os, expected_build, client ); if (res != 0) { /* The problem with LoadState() failing is that it implicitely ** causes the block to be discarded, which means, that the ** keyserver network will reissue it - a senseless undertaking ** if the data itself is invalid. */ if (res != -2) { retry_due_to_failed_loadstate = 1; } else { // should never happen Log("Serious ProblemLoadState() error! Aborting!\n"); RaiseExitRequestTrigger(); } } else { *load_needed = 0; did_load = 1; ClientEventSyncPost( CLIEVENT_PROBLEM_STARTED, &prob_i, sizeof(prob_i) ); if (load_problem_count <= COMBINEMSG_THRESHOLD) { ProblemInfo info; if (ProblemGetInfo( thisprob, &info, P_INFO_S_PERMIL | P_INFO_SIGBUF | P_INFO_DCOUNT | P_INFO_EXACT_PE ) != -1) { const char *extramsg = ""; char ddonebuf[15]; char perdone[32]; *loaded_for_contest = thisprob->pub_data.contest; if (thisprob->pub_data.was_reset) extramsg="\nPacket was from a different user/core/client cpu/os/build."; else if (info.s_permille > 0 && info.s_permille < 1000) { sprintf(perdone, " (%u.%u0%% done)", (info.s_permille/10), (info.s_permille%10)); extramsg = perdone; } else if (info.dcounthi || info.dcountlo) { strcat( strcat( strcpy(perdone, " ("), U64stringify(ddonebuf, sizeof(ddonebuf), info.dcounthi, info.dcountlo, 2, info.unit)), " done)"); extramsg = perdone; } if (load_problem_count > 1) { Log("%s #%c: Loaded %s%s%s\n", info.name, ProblemLetterId(prob_i), ((thisprob->pub_data.is_random)?("random "):("")), info.sigbuf, extramsg ); } else { Log("%s: Loaded %s%s%s\n", info.name, ((thisprob->pub_data.is_random)?("random "):("")), info.sigbuf, extramsg ); } } /* if (thisprob->GetProblemInfo(...) != -1) */ } /* if (load_problem_count <= COMBINEMSG_THRESHOLD) */ } /* if (LoadState(...) != -1) */ } /* if (*load_needed == 0) */ } while (retry_due_to_failed_loadstate); TRACE_OUT((-1, "__IndivProbLoad() => %d\n", did_load)); return did_load; }
static long __bench_or_test( Client *client, int which, unsigned int cont_i, unsigned int benchsecs, int in_corenum ) { long rc = -1; /* FIXME: without -devicenum, test/bench will be run on GPU 0 only */ int device = hackGetUsedDeviceIndex(client, 0); if (selcore_initlev > 0 /* core table is initialized? */ && cont_i < CONTEST_COUNT) /* valid contest id? */ { /* save current state */ int user_cputype = selcorestatics.user_cputype[cont_i]; int corenum = selcorestatics.corenum[cont_i]; int coreidx, corecount = corecount_for_contest( cont_i ); int fastest = -1; int hardcoded = selcoreGetPreselectedCoreForProject(cont_i, device); u32 bestrate_hi = 0, bestrate_lo = 0, refrate_hi = 0, refrate_lo = 0; rc = 0; /* assume nothing done */ for (coreidx = 0; coreidx < corecount; coreidx++) { /* only bench/test cores that won't be automatically substituted */ if (apply_selcore_substitution_rules(cont_i, coreidx, device) == coreidx) { if (in_corenum < 0) selcorestatics.user_cputype[cont_i] = coreidx; /* as if user set it */ else { if( in_corenum < corecount ) { selcorestatics.user_cputype[cont_i] = in_corenum; coreidx = corecount; } else /* invalid core selection, test them all */ { selcorestatics.user_cputype[cont_i] = coreidx; in_corenum = -1; } } selcorestatics.corenum[cont_i] = -1; /* reset to show name */ if (which == 't') /* selftest */ rc = SelfTest( client, cont_i ); else if (which == 's') /* stresstest */ rc = StressTest( client, cont_i ); else { u32 temprate_hi, temprate_lo; rc = TBenchmark( client, cont_i, benchsecs, 0, &temprate_hi, &temprate_lo ); if (rc > 0 && selcorestatics.corenum[cont_i] == hardcoded) { refrate_hi = temprate_hi; refrate_lo = temprate_lo; } if (rc > 0 && (temprate_hi > bestrate_hi || (temprate_hi == bestrate_hi && temprate_lo > bestrate_lo))) { bestrate_hi = temprate_hi; bestrate_lo = temprate_lo; fastest = selcorestatics.corenum[cont_i]; } } #if (CLIENT_OS != OS_WIN32 || !defined(SMC)) if (rc <= 0) /* failed (<0) or not supported (0) */ break; /* stop */ #else // HACK! to ignore failed benchmark for x86 rc5 smc core #7 if // started from menu and another cruncher is active in background. if (rc <= 0) /* failed (<0) or not supported (0) */ { if ( which == 'b' && cont_i == RC5 && coreidx == 7 ) ; /* continue */ else break; /* stop */ } #endif } } /* for (coreidx = 0; coreidx < corecount; coreidx++) */ selcorestatics.user_cputype[cont_i] = user_cputype; selcorestatics.corenum[cont_i] = corenum; /* Summarize the results if multiple cores have been benchmarked (#4108) */ #if (CLIENT_CPU != CPU_CELLBE) /* Not applicable for Cell due to PPU/SPU core selection hacks */ if (in_corenum < 0 && fastest >= 0 && (bestrate_hi != 0 || bestrate_lo != 0)) { double percent = 100.0 * ((double)refrate_hi * 4294967296.0 + (double)refrate_lo) / ((double)bestrate_hi * 4294967296.0 + (double)bestrate_lo); char bestrate_str[32], refrate_str[32]; U64stringify(bestrate_str, sizeof(bestrate_str), bestrate_hi, bestrate_lo, 2, CliGetContestUnitFromID(cont_i)); U64stringify(refrate_str, sizeof(refrate_str), refrate_hi, refrate_lo, 2, CliGetContestUnitFromID(cont_i)); Log("%s benchmark summary :\n" "Default core : #%d (%s) %s/sec\n" "Fastest core : #%d (%s) %s/sec\n", CliGetContestNameFromID(cont_i), hardcoded, (hardcoded >= 0 ? selcoreGetDisplayName(cont_i, hardcoded) : "undefined"), refrate_str, fastest, selcoreGetDisplayName(cont_i, fastest), bestrate_str); if (percent < 100 && hardcoded >= 0 && hardcoded != fastest) { if (percent >= 97) { Log("Core #%d is marginally faster than the default core.\n" "Testing variability might lead to pick one or the other.\n", fastest); } else { Log("Core #%d is significantly faster than the default core.\n" #if (CLIENT_CPU != CPU_CUDA && CLIENT_CPU != CPU_ATI_STREAM && CLIENT_CPU != CPU_OPENCL) "Please file a bug report along with the output of\n-cpuinfo.\n" #else "The GPU core selection has been made as a tradeoff between core speed\n" "and responsiveness of the graphical desktop.\n" "Please file a bug report along with the output of -gpuinfo\n" "only if the the faster core selection does not degrade graphics performance.\n" #endif "Changes in cores and selection are frequently made,\n" "so be sure to test with the latest client version,\n" "typically a pre-release, before filing a bug report.\n", fastest); } } } #endif // CPU_CELLBE #if (CLIENT_OS == OS_RISCOS) && defined(HAVE_X86_CARD_SUPPORT) if (rc > 0 && cont_i == RC5 && GetNumberOfDetectedProcessors() > 1) /* have x86 card */ { Problem *prob = ProblemAlloc(); /* so bench/test gets threadnum+1 */ /* FIXME: not true anymore */ rc = -1; /* assume alloc failed */ if (prob) { Log("RC5: using x86 core.\n" ); if (which != 's') /* bench */ rc = TBenchmark( client, cont_i, benchsecs, 0 ); else rc = SelfTest( client, cont_i ); ProblemFree(prob); } } #endif } /* if (cont_i < CONTEST_COUNT) */ return rc; }
static unsigned int __IndividualProblemSave( Problem *thisprob, unsigned int prob_i, Client *client, int *is_empty, unsigned load_problem_count, unsigned int *contest, int *bufupd_pending, int unconditional_unload, int abortive_action ) { unsigned int did_save = 0; DNETC_UNUSED_PARAM(prob_i); *contest = 0; *is_empty = 1; /* assume not initialized */ if ( ProblemIsInitialized(thisprob) ) { WorkRecord wrdata; int resultcode; unsigned int cont_i; memset( (void *)&wrdata, 0, sizeof(WorkRecord)); resultcode = ProblemRetrieveState( thisprob, &wrdata.work, &cont_i, 0, 0 ); *is_empty = 0; /* assume problem is in use */ if (resultcode == RESULT_FOUND || resultcode == RESULT_NOTHING || unconditional_unload || resultcode < 0 /* core error */ || (thisprob->pub_data.loaderflags & (PROBLDR_DISCARD|PROBLDR_FORCEUNLOAD)) != 0) { int finito = (resultcode==RESULT_FOUND || resultcode==RESULT_NOTHING); const char *action_msg = 0; const char *reason_msg = 0; int discarded = 0; char ratebuf[32]; char dcountbuf[64]; /* we use this as scratch space too */ struct timeval tv; ProblemInfo info; info.rate.ratebuf = ratebuf; info.rate.size = sizeof(ratebuf); *contest = cont_i; *is_empty = 1; /* will soon be */ wrdata.contest = cont_i; wrdata.resultcode = resultcode; wrdata.cpu = FILEENTRY_CPU(thisprob->pub_data.client_cpu); wrdata.os = FILEENTRY_OS; wrdata.build = FILEENTRY_BUILD; wrdata.core = FILEENTRY_CORE(thisprob->pub_data.coresel); strncpy( wrdata.id, client->id , sizeof(wrdata.id)); wrdata.id[sizeof(wrdata.id)-1]=0; if (finito) { wrdata.os = CLIENT_OS; #if (CLIENT_OS == OS_RISCOS) if (prob_i == 1) wrdata.cpu = CPU_X86; else #endif wrdata.cpu = CLIENT_CPU; wrdata.build = CLIENT_VERSION; wrdata.core = FILEENTRY_CORE(thisprob->pub_data.coresel); ClientEventSyncPost( CLIEVENT_PROBLEM_FINISHED, &prob_i, sizeof(prob_i) ); } if ((thisprob->pub_data.loaderflags & PROBLDR_DISCARD)!=0) { action_msg = "Discarded"; reason_msg = "project disabled/closed"; discarded = 1; } else if (resultcode < 0) { action_msg = "Discarded"; reason_msg = "core error"; discarded = 1; } else if (PutBufferRecord( client, &wrdata ) < 0) { action_msg = "Discarded"; reason_msg = "buffer error - unable to save"; discarded = 1; } else { did_save = 1; if (client->nodiskbuffers) *bufupd_pending |= BUFFERUPDATE_FLUSH; if (__check_outbufthresh_limit( client, cont_i, -1, 0,bufupd_pending)) { /* adjust bufupd_pending if outthresh has been crossed */ //Log("1. *bufupd_pending |= BUFFERUPDATE_FLUSH;\n"); } if (load_problem_count > COMBINEMSG_THRESHOLD) ; /* nothing */ else if (thisprob->pub_data.was_truncated) { action_msg = "Skipped"; discarded = 1; reason_msg = thisprob->pub_data.was_truncated; } else if (!finito) action_msg = "Saved"; else action_msg = "Completed"; } if (ProblemGetInfo( thisprob, &info, P_INFO_E_TIME | P_INFO_SWUCOUNT | P_INFO_C_PERMIL | P_INFO_SIGBUF | P_INFO_RATEBUF | P_INFO_TCOUNT | P_INFO_CCOUNT | P_INFO_DCOUNT | P_INFO_EXACT_PE ) != -1) { char info_name[32]; tv.tv_sec = info.elapsed_secs; tv.tv_usec = info.elapsed_usecs; if (load_problem_count > 1) { sprintf(info_name, "%s #%c", info.name, ProblemLetterId(prob_i)); } else strcpy(info_name, info.name); if (finito && !discarded && !info.is_test_packet) CliAddContestInfoSummaryData(cont_i,info.ccounthi,info.ccountlo,&tv,info.swucount); if (action_msg) { if (reason_msg) /* was discarded */ { //[....] Discarded CSC 12345678:ABCDEF00 4*2^28 // (project disabled/closed) Log("%s: %s %s%c(%s)\n", info_name, action_msg, info.sigbuf, ((strlen(reason_msg)>10)?('\n'):(' ')), reason_msg ); } else { U64stringify(dcountbuf, (15<sizeof(dcountbuf))?15:sizeof(dcountbuf), info.dcounthi, info.dcountlo, 2, info.unit); if (finito && info.is_test_packet) /* finished test packet */ strcat( strcpy( dcountbuf,"Test: RESULT_"), ((resultcode==RESULT_NOTHING)?("NOTHING"):("FOUND")) ); else if (finito) /* finished non-test packet */ { char *p = strrchr(info.sigbuf,':'); /* HACK! to supress too long */ if (p) *p = '\0'; /* crypto "Completed" lines */ sprintf( dcountbuf, "%u.%02u stats units", info.swucount/100, info.swucount%100); } else if (info.c_permille > 0) sprintf( dcountbuf, "%u.%u0%% done", info.c_permille/10, info.c_permille%10); else strcat( dcountbuf, " done" ); //[....] RC5: Saved 12345678:ABCDEF00 4*2^28 (5.20% done) // 1.23:45:67:89 - [987,654,321 keys/s] //[....] OGR: Saved 25/1-6-13-8-16-18 (12.34 Mnodes done) // 1.23:45:67:89 - [987,654,321 nodes/s] //[....] RC5: Completed 68E0D85A:A0000000 4*2^28 (4.00 stats units) // 1.23:45:67:89 - [987,654,321 keys/s] //[....] OGR: Completed 22/1-3-5-7 (12.30 stats units) // 1.23:45:67:89 - [987,654,321 nodes/s] //[....] OGR: 25/1-2-4-5-8-10 [12,345,578,910 nodes] Log("%s: %s %s (%s)\n%s - [%s/s]\n", info_name, action_msg, info.sigbuf, dcountbuf, CliGetTimeString( &tv, 2 ), info.rate.ratebuf ); if (finito && info.show_exact_iterations_done) { Log("%s: %s [%s]\n", info_name, info.sigbuf, ProblemComputeRate(cont_i, 0, 0, info.tcounthi, info.tcountlo, 0, 0, dcountbuf, sizeof(dcountbuf))); } } /* if (reason_msg) else */ } /* if (action_msg) */ } /* if (thisprob->GetProblemInfo( ... ) != -1) */ /* we can purge the object now */ /* we don't wait when aborting. thread might be hung */ ProblemRetrieveState( thisprob, NULL, NULL, 1, abortive_action /*==dontwait*/ ); } /* unload needed */ } /* is initialized */ return did_save; }