// Miner loop to manage a single processor (with possibly multiple threads per processor) void minerloop_scanhash(struct thr_info *mythr) { struct cgpu_info *cgpu = mythr->cgpu; struct device_drv *api = cgpu->drv; struct timeval tv_start, tv_end; struct timeval tv_hashes, tv_worktime; uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff; int64_t hashes; struct work *work; const bool primary = (!mythr->device_thread) || mythr->primary_thread; #ifdef HAVE_PTHREAD_CANCEL pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL); #endif while (likely(!cgpu->shutdown)) { mythr->work_restart = false; request_work(mythr); work = get_and_prepare_work(mythr); if (!work) break; timer_set_now(&work->tv_work_start); do { thread_reportin(mythr); /* Only allow the mining thread to be cancelled when * it is not in the driver code. */ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); timer_set_now(&tv_start); hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce); timer_set_now(&tv_end); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_testcancel(); thread_reportin(mythr); timersub(&tv_end, &tv_start, &tv_hashes); if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL)) goto disabled; if (unlikely(mythr->work_restart)) { /* Apart from device_thread 0, we stagger the * starting of every next thread to try and get * all devices busy before worrying about * getting work for their extra threads */ if (!primary) { struct timespec rgtp; rgtp.tv_sec = 0; rgtp.tv_nsec = 250 * mythr->device_thread * 1000000; nanosleep(&rgtp, NULL); } break; } if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) disabled: mt_disable(mythr); timersub(&tv_end, &work->tv_work_start, &tv_worktime); } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes)); free_work(work); } }
static bool cpu_thread_prepare(struct thr_info *thr) { thread_reportin(thr); return true; }