static int64_t bitforce_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) { struct cgpu_info *bitforce = thr->cgpu; bool send_ret; int64_t ret; send_ret = bitforce_send_work(thr, work); if (!restart_wait(bitforce->sleep_ms)) return 0; bitforce->wait_ms = bitforce->sleep_ms; if (send_ret) { bitforce->polling = true; ret = bitforce_get_result(thr, work); bitforce->polling = false; } else ret = -1; if (ret == -1) { ret = 0; applog(LOG_ERR, "BFL%i: Comms error", bitforce->device_id); bitforce->device_last_not_well = time(NULL); bitforce->device_not_well_reason = REASON_DEV_COMMS_ERROR; bitforce->dev_comms_error_count++; bitforce->hw_errors++; /* empty read buffer */ bitforce_clear_buffer(bitforce); } return ret; }
static int64_t bitforce_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) { struct cgpu_info *bitforce = thr->cgpu; bool send_ret; int64_t ret; send_ret = bitforce_send_work(thr, work); if (!restart_wait(bitforce->sleep_ms)) return 0; bitforce->wait_ms = bitforce->sleep_ms; if (send_ret) { bitforce->polling = true; ret = bitforce_get_result(thr, work); bitforce->polling = false; } else ret = -1; if (ret == -1) { ret = 0; applog(LOG_ERR, "%s%i: Comms error", bitforce->drv->name, bitforce->device_id); dev_error(bitforce, REASON_DEV_COMMS_ERROR); bitforce->hw_errors++; /* empty read buffer */ bitforce_initialise(bitforce, true); } return ret; }
static int64_t hfa_scanwork(struct thr_info *thr) { struct cgpu_info *hashfast = thr->cgpu; struct hashfast_info *info = hashfast->device_data; int64_t hashes; int jobs, ret; if (unlikely(hashfast->usbinfo.nodev)) { applog(LOG_WARNING, "HFA %d: device disappeared, disabling", hashfast->device_id); return -1; } if (unlikely(thr->work_restart)) { restart: thr->work_restart = false; ret = hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), 0, (uint8_t *)NULL, 0); if (unlikely(!ret)) { ret = hfa_reset(hashfast, info); if (unlikely(!ret)) { applog(LOG_ERR, "HFA %d: Failed to reset after write failure, disabling", hashfast->device_id); return -1; } } } jobs = hfa_jobs(info); if (!jobs) { ret = restart_wait(thr, 100); if (unlikely(!ret)) goto restart; jobs = hfa_jobs(info); } if (jobs) { applog(LOG_DEBUG, "HFA %d: Sending %d new jobs", hashfast->device_id, jobs); } while (jobs-- > 0) { struct hf_hash_usb op_hash_data; struct work *work; uint64_t intdiff; int i, sequence; uint32_t *p; /* This is a blocking function if there's no work */ work = get_work(thr, thr->id); /* Assemble the data frame and send the OP_HASH packet */ memcpy(op_hash_data.midstate, work->midstate, sizeof(op_hash_data.midstate)); memcpy(op_hash_data.merkle_residual, work->data + 64, 4); p = (uint32_t *)(work->data + 64 + 4); op_hash_data.timestamp = *p++; op_hash_data.bits = *p++; op_hash_data.starting_nonce = 0; op_hash_data.nonce_loops = 0; op_hash_data.ntime_loops = 0; /* Set the number of leading zeroes to look for based on diff. * Diff 1 = 32, Diff 2 = 33, Diff 4 = 34 etc. */ intdiff = (uint64_t)work->device_diff; for (i = 31; intdiff; i++, intdiff >>= 1); op_hash_data.search_difficulty = i; op_hash_data.group = 0; if ((sequence = info->hash_sequence_head + 1) >= info->num_sequence) sequence = 0; ret = hfa_send_frame(hashfast, OP_HASH, sequence, (uint8_t *)&op_hash_data, sizeof(op_hash_data)); if (unlikely(!ret)) { ret = hfa_reset(hashfast, info); if (unlikely(!ret)) { applog(LOG_ERR, "HFA %d: Failed to reset after write failure, disabling", hashfast->device_id); return -1; } } mutex_lock(&info->lock); info->hash_sequence_head = sequence; info->works[info->hash_sequence_head] = work; mutex_unlock(&info->lock); applog(LOG_DEBUG, "HFA %d: OP_HASH sequence %d search_difficulty %d work_difficulty %g", hashfast->device_id, info->hash_sequence_head, op_hash_data.search_difficulty, work->work_difficulty); } mutex_lock(&info->lock); hashes = info->hash_count; info->hash_count = 0; mutex_unlock(&info->lock); return hashes; }
static int64_t ztex_scanhash(struct thr_info *thr, struct work *work, __maybe_unused int64_t max_nonce) { struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex; unsigned char sendbuf[44]; int i, j, k; uint32_t *backlog; int backlog_p = 0, backlog_max; uint32_t *lastnonce; uint32_t nonce, noncecnt = 0; bool overflow, found; struct libztex_hash_data hdata[GOLDEN_BACKLOG]; if (thr->cgpu->deven == DEV_DISABLED) return -1; ztex = thr->cgpu->device_ztex; memcpy(sendbuf, work->data + 64, 12); memcpy(sendbuf + 12, work->midstate, 32); ztex_selectFpga(ztex, cgpu->proc_id); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // Something wrong happened in send applog(LOG_ERR, "%"PRIpreprv": Failed to send hash data with err %d, retrying", cgpu->proc_repr, i); cgsleep_ms(500); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%"PRIpreprv": Failed to send hash data with err %d, giving up", cgpu->proc_repr, i); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); applog(LOG_DEBUG, "%"PRIpreprv": sent hashdata", cgpu->proc_repr); lastnonce = calloc(1, sizeof(uint32_t)*ztex->numNonces); if (lastnonce == NULL) { applog(LOG_ERR, "%"PRIpreprv": failed to allocate lastnonce[%d]", cgpu->proc_repr, ztex->numNonces); return -1; } /* Add an extra slot for detecting dupes that lie around */ backlog_max = ztex->numNonces * (2 + ztex->extraSolutions); backlog = calloc(1, sizeof(uint32_t) * backlog_max); if (backlog == NULL) { applog(LOG_ERR, "%"PRIpreprv": failed to allocate backlog[%d]", cgpu->proc_repr, backlog_max); free(lastnonce); return -1; } overflow = false; int count = 0; applog(LOG_DEBUG, "%"PRIpreprv": entering poll loop", cgpu->proc_repr); while (!(overflow || thr->work_restart)) { count++; if (!restart_wait(thr, 250)) { applog(LOG_DEBUG, "%"PRIpreprv": New work detected", cgpu->proc_repr); break; } ztex_selectFpga(ztex, cgpu->proc_id); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // Something wrong happened in read applog(LOG_ERR, "%"PRIpreprv": Failed to read hash data with err %d, retrying", cgpu->proc_repr, i); cgsleep_ms(500); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%"PRIpreprv": Failed to read hash data with err %d, giving up", cgpu->proc_repr, i); free(lastnonce); free(backlog); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); if (thr->work_restart) { applog(LOG_DEBUG, "%"PRIpreprv": New work detected", cgpu->proc_repr); break; } dclk_gotNonces(&ztex->dclk); for (i = 0; i < ztex->numNonces; i++) { nonce = hdata[i].nonce; if (nonce > noncecnt) noncecnt = nonce; if (((0xffffffff - nonce) < (nonce - lastnonce[i])) || nonce < lastnonce[i]) { applog(LOG_DEBUG, "%"PRIpreprv": overflow nonce=%08x lastnonce=%08x", cgpu->proc_repr, nonce, lastnonce[i]); overflow = true; } else lastnonce[i] = nonce; if (!ztex_checkNonce(cgpu, work, &hdata[i])) { // do not count errors in the first 500ms after sendHashData (2x250 wait time) if (count > 2) dclk_errorCount(&ztex->dclk, 1.0 / ztex->numNonces); inc_hw_errors_only(thr); } for (j=0; j<=ztex->extraSolutions; j++) { nonce = hdata[i].goldenNonce[j]; if (nonce == ztex->offsNonces) { continue; } found = false; for (k = 0; k < backlog_max; k++) { if (backlog[k] == nonce) { found = true; break; } } if (!found) { backlog[backlog_p++] = nonce; if (backlog_p >= backlog_max) backlog_p = 0; work->blk.nonce = 0xffffffff; if (!j || test_nonce(work, nonce, false)) submit_nonce(thr, work, nonce); applog(LOG_DEBUG, "%"PRIpreprv": submitted %08x (from N%dE%d)", cgpu->proc_repr, nonce, i, j); } } } } dclk_preUpdate(&ztex->dclk); if (!ztex_updateFreq(thr)) { // Something really serious happened, so mark this thread as dead! free(lastnonce); free(backlog); return -1; } applog(LOG_DEBUG, "%"PRIpreprv": exit %1.8X", cgpu->proc_repr, noncecnt); work->blk.nonce = 0xffffffff; free(lastnonce); free(backlog); return noncecnt; }
static int64_t hfa_scanwork(struct thr_info *thr) { struct cgpu_info *hashfast = thr->cgpu; struct hashfast_info *info = hashfast->device_data; int jobs, ret, cycles = 0; int64_t hashes; if (unlikely(hashfast->usbinfo.nodev)) { applog(LOG_WARNING, "%s %d: device disappeared, disabling", hashfast->drv->name, hashfast->device_id); return -1; } if (unlikely(last_getwork - hashfast->last_device_valid_work > 60)) { applog(LOG_WARNING, "%s %d: No valid hashes for over 1 minute, attempting to reset", hashfast->drv->name, hashfast->device_id); if (info->hash_clock_rate > HFA_CLOCK_DEFAULT) { info->hash_clock_rate -= 5; if (info->hash_clock_rate < opt_hfa_hash_clock) opt_hfa_hash_clock = info->hash_clock_rate; applog(LOG_WARNING, "%s %d: Decreasing clock speed to %d with reset", hashfast->drv->name, hashfast->device_id, info->hash_clock_rate); } ret = hfa_reset(hashfast, info); if (!ret) { applog(LOG_ERR, "%s %d: Failed to reset after hash failure, disabling", hashfast->drv->name, hashfast->device_id); return -1; } applog(LOG_NOTICE, "%s %d: Reset successful", hashfast->drv->name, hashfast->device_id); } if (unlikely(thr->work_restart)) { restart: info->last_restart = time(NULL); thr->work_restart = false; ret = hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), 0, (uint8_t *)NULL, 0); if (unlikely(!ret)) { ret = hfa_reset(hashfast, info); if (unlikely(!ret)) { applog(LOG_ERR, "%s %d: Failed to reset after write failure, disabling", hashfast->drv->name, hashfast->device_id); return -1; } } /* Give a full allotment of jobs after a restart, not waiting * for the status update telling us how much to give. */ jobs = info->usb_init_base.inflight_target; } else { /* Only adjust die clocks if there's no restart since two * restarts back to back get ignored. */ hfa_temp_clock(hashfast, info); jobs = hfa_jobs(hashfast, info); } /* Wait on restart_wait for up to 0.5 seconds or submit jobs as soon as * they're required. */ while (!jobs && ++cycles < 5) { ret = restart_wait(thr, 100); if (unlikely(!ret)) goto restart; jobs = hfa_jobs(hashfast, info); } if (jobs) { applog(LOG_DEBUG, "%s %d: Sending %d new jobs", hashfast->drv->name, hashfast->device_id, jobs); } while (jobs-- > 0) { struct hf_hash_usb op_hash_data; struct work *work; uint64_t intdiff; int i, sequence; uint32_t *p; /* This is a blocking function if there's no work */ work = get_work(thr, thr->id); /* Assemble the data frame and send the OP_HASH packet */ memcpy(op_hash_data.midstate, work->midstate, sizeof(op_hash_data.midstate)); memcpy(op_hash_data.merkle_residual, work->data + 64, 4); p = (uint32_t *)(work->data + 64 + 4); op_hash_data.timestamp = *p++; op_hash_data.bits = *p++; op_hash_data.starting_nonce = 0; op_hash_data.nonce_loops = 0; op_hash_data.ntime_loops = 0; /* Set the number of leading zeroes to look for based on diff. * Diff 1 = 32, Diff 2 = 33, Diff 4 = 34 etc. */ intdiff = (uint64_t)work->device_diff; for (i = 31; intdiff; i++, intdiff >>= 1); op_hash_data.search_difficulty = i; op_hash_data.group = 0; if ((sequence = info->hash_sequence_head + 1) >= info->num_sequence) sequence = 0; ret = hfa_send_frame(hashfast, OP_HASH, sequence, (uint8_t *)&op_hash_data, sizeof(op_hash_data)); if (unlikely(!ret)) { ret = hfa_reset(hashfast, info); if (unlikely(!ret)) { applog(LOG_ERR, "%s %d: Failed to reset after write failure, disabling", hashfast->drv->name, hashfast->device_id); return -1; } } mutex_lock(&info->lock); info->hash_sequence_head = sequence; info->works[info->hash_sequence_head] = work; mutex_unlock(&info->lock); applog(LOG_DEBUG, "%s %d: OP_HASH sequence %d search_difficulty %d work_difficulty %g", hashfast->drv->name, hashfast->device_id, info->hash_sequence_head, op_hash_data.search_difficulty, work->work_difficulty); } /* Only count 2/3 of the hashes to smooth out the hashrate for cycles * that have no hashes added. */ mutex_lock(&info->lock); hashes = info->hash_count / 3 * 2; info->calc_hashes += hashes; info->hash_count -= hashes; mutex_unlock(&info->lock); return hashes; }
static int64_t bitfury_scanHash(struct thr_info *thr) { static struct bitfury_device *devices; // TODO Move somewhere to appropriate place int chip_n; int chip; uint64_t hashes = 0; unsigned char line[2048]; char stat_lines[32][256] = {0}; static first = 0; //TODO Move to detect() int i; static int shift_number = 1; static struct timeval spi_started; struct timeval now; struct cgpu_info *cgpu = thr->cgpu; devices = thr->cgpu->devices; chip_n = thr->cgpu->chip_n; if (!first) { for (i = 0; i < chip_n; i++) { devices[i].osc6_bits = 50; } set_chip_opts(devices, chip_n); for (i = 0; i < chip_n; i++) { send_reinit(devices[i].slot, devices[i].fasync, devices[i].osc6_bits); } cgtime(&spi_started); } first = 1; cgtime(&now); int wait=1000000*(now.tv_sec-spi_started.tv_sec)+now.tv_usec-spi_started.tv_usec; if(wait<800000){ //cgsleep_ms((800000-wait)/1000); if(restart_wait(thr, (800000-wait)/1000) != ETIMEDOUT) { //purge work for (;chip < chip_n; chip++) { if(devices[chip].bfwork.work != NULL) { work_completed(thr->cgpu, devices[chip].bfwork.work); } devices[chip].bfwork.work = NULL; devices[chip].bfwork.results_n = 0; devices[chip].bfwork.results_sent = 0; } } } for (chip = 0; chip < chip_n; chip++) { devices[chip].job_switched = 0; if(!devices[chip].bfwork.work) { devices[chip].bfwork.work = get_queued(thr->cgpu); if (devices[chip].bfwork.work == NULL) { return 0; } work_to_payload(&(devices[chip].bfwork.payload), devices[chip].bfwork.work); } } cgtime(&spi_started); libbitfury_sendHashData(devices, chip_n); chip = 0; int high = 0; double aveg = 0.0; int total = 0; int futures =0; for (;chip < chip_n; chip++) { if (devices[chip].job_switched) { int i=0; struct work *work = devices[chip].bfwork.work; struct work *owork = devices[chip].obfwork.work; struct work *o2work = devices[chip].o2bfwork.work; if (owork) i+=submit_work(&devices[chip].obfwork, thr); if (o2work) i+=submit_work(&devices[chip].o2bfwork, thr); if (work) i+=submit_work(&devices[chip].bfwork, thr); high = high > i?high:i; total+=i; devices[chip].job_switched = 0; if (o2work) work_completed(thr->cgpu, o2work); //printf("%d %d %d\n",devices[chip].o2bfwork.results_n,devices[chip].obfwork.results_n,devices[chip].bfwork.results_n); memcpy (&(devices[chip].o2bfwork),&(devices[chip].obfwork),sizeof(struct bitfury_work)); memcpy (&(devices[chip].obfwork),&(devices[chip].bfwork),sizeof(struct bitfury_work)); devices[chip].bfwork.work = NULL; devices[chip].bfwork.results_n = 0; devices[chip].bfwork.results_sent = 0; hashes += 0xffffffffull * i; } /* if(shift_number % 100 == 0) { int len = strlen(stat_lines[devices[chip].slot]); snprintf(stat_lines[devices[chip].slot]+len,256-len,"%d: %d/%d ",chip,devices[chip].nonces_found/devices[chip].nonce_errors); } */ } aveg = (double) total / chip_n; //applog(LOG_WARNING, "high: %d aver: %4.2f total %d futures %d", high, aveg,total,futures); if(shift_number % 100 == 0) { /* applog(LOG_WARNING,stat_lines[0]); applog(LOG_WARNING,stat_lines[1]); applog(LOG_WARNING,stat_lines[2]); applog(LOG_WARNING,stat_lines[3]); */ } shift_number++; return hashes; }