static void *postcalc_hash(void *userdata) { struct pc_data *pcd = (struct pc_data *)userdata; struct thr_info *thr = pcd->thr; unsigned int entry = 0; int found = FOUND; pthread_detach(pthread_self()); /* To prevent corrupt values in FOUND from trying to read beyond the * end of the res[] array */ if (unlikely(pcd->res[found] & ~found)) { applog(LOG_WARNING, "%s%d: invalid nonce count - HW error", thr->cgpu->drv->name, thr->cgpu->device_id); hw_errors++; thr->cgpu->hw_errors++; pcd->res[found] &= found; } for (entry = 0; entry < pcd->res[found]; entry++) { uint32_t nonce = pcd->res[entry]; applog(LOG_DEBUG, "OCL NONCE %u found in slot %d", nonce, entry); submit_nonce(thr, pcd->work, nonce); } discard_work(pcd->work); free(pcd); return NULL; }
static int64_t gridseed_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) { struct cgpu_info *gridseed = thr->cgpu; GRIDSEED_INFO *info = gridseed->device_data; unsigned char buf[GRIDSEED_READ_SIZE]; int ret = 0; struct timeval old_scanhash_time = info->scanhash_time; int elapsed_ms; while (!thr->work_restart && (ret = gc3355_get_data(gridseed, buf, GRIDSEED_READ_SIZE)) == 0) { if (buf[0] == 0x55 || buf[1] == 0x20) { uint32_t nonce = le32toh(*(uint32_t *)(buf+4)); uint32_t chip = nonce / ((uint32_t)0xffffffff / info->chips); info->nonce_count[chip]++; if (!submit_nonce(thr, work, nonce)) info->error_count[chip]++; } else { applog(LOG_ERR, "Unrecognized response from %i", gridseed->device_id); return -1; } } if (ret != 0 && ret != LIBUSB_ERROR_TIMEOUT) { applog(LOG_ERR, "No response from %i", gridseed->device_id); return -1; } cgtime(&info->scanhash_time); elapsed_ms = ms_tdiff(&info->scanhash_time, &old_scanhash_time); return GRIDSEED_HASH_SPEED * (double)elapsed_ms * (double)(info->freq * info->chips); }
int submit_work(struct bitfury_work *w, struct thr_info *thr) { int i=0,j; int *res = w->results; for (j = w->results_sent; j < w->results_n;j++) { submit_nonce(thr, w->work, bswap_32(res[j])); w->results_sent++; i++; } return i; }
static int64_t cpu_scanhash(struct thr_info *thr, struct work *work, int64_t max_nonce) { uint32_t first_nonce = work->blk.nonce; uint32_t last_nonce; bool rc; CPUSearch: last_nonce = first_nonce; rc = false; /* scan nonces for a proof-of-work hash */ { sha256_func func = scanhash_generic; switch (work_mining_algorithm(work)->algo) { #ifdef USE_SCRYPT case POW_SCRYPT: func = scanhash_scrypt; break; #endif #ifdef USE_SHA256D case POW_SHA256D: if (work->nonce_diff >= 1.) func = sha256_funcs[opt_algo]; break; #endif default: break; } if (unlikely(!func)) applogr(0, LOG_ERR, "%"PRIpreprv": Unknown mining algorithm", thr->cgpu->proc_repr); rc = (*func)( thr, work, max_nonce, &last_nonce, work->blk.nonce ); } /* if nonce found, submit work */ if (unlikely(rc)) { applog(LOG_DEBUG, "%"PRIpreprv" found something?", thr->cgpu->proc_repr); submit_nonce(thr, work, le32toh(*(uint32_t*)&work->data[76])); work->blk.nonce = last_nonce + 1; goto CPUSearch; } else if (unlikely(last_nonce == first_nonce)) return 0; work->blk.nonce = last_nonce + 1; return last_nonce - first_nonce + 1; }
static int64_t cpu_scanhash(struct thr_info *thr, struct work *work, int64_t max_nonce) { unsigned char hash1[64]; uint32_t first_nonce = work->blk.nonce; uint32_t last_nonce; bool rc; memcpy(&hash1[0], &hash1_init[0], sizeof(hash1)); CPUSearch: last_nonce = first_nonce; rc = false; /* scan nonces for a proof-of-work hash */ { sha256_func func = sha256_funcs[opt_algo]; rc = (*func)( thr, work->midstate, work->data, hash1, work->hash, work->target, max_nonce, &last_nonce, work->blk.nonce ); } /* if nonce found, submit work */ if (unlikely(rc)) { applog(LOG_DEBUG, "%"PRIpreprv" found something?", thr->cgpu->proc_repr); submit_nonce(thr, work, le32toh(*(uint32_t*)&work->data[76])); work->blk.nonce = last_nonce + 1; goto CPUSearch; } else if (unlikely(last_nonce == first_nonce)) return 0; work->blk.nonce = last_nonce + 1; return last_nonce - first_nonce + 1; }
static int64_t bitforce_get_result(struct thr_info *thr, struct work *work) { struct cgpu_info *bitforce = thr->cgpu; int fdDev = bitforce->device_fd; unsigned int delay_time_ms; struct timeval elapsed; struct timeval now; char pdevbuf[0x100]; char *pnoncebuf; uint32_t nonce; if (!fdDev) return -1; while (1) { if (unlikely(thr->work_restart)) return 0; mutex_lock(&bitforce->device_mutex); BFwrite(fdDev, "ZFX", 3); BFgets(pdevbuf, sizeof(pdevbuf), fdDev); mutex_unlock(&bitforce->device_mutex); gettimeofday(&now, NULL); timersub(&now, &bitforce->work_start_tv, &elapsed); if (elapsed.tv_sec >= BITFORCE_LONG_TIMEOUT_S) { applog(LOG_ERR, "BFL%i: took %dms - longer than %dms", bitforce->device_id, tv_to_ms(elapsed), BITFORCE_LONG_TIMEOUT_MS); return 0; } if (pdevbuf[0] && strncasecmp(pdevbuf, "B", 1)) /* BFL does not respond during throttling */ break; /* if BFL is throttling, no point checking so quickly */ delay_time_ms = (pdevbuf[0] ? BITFORCE_CHECK_INTERVAL_MS : 2 * WORK_CHECK_INTERVAL_MS); nmsleep(delay_time_ms); bitforce->wait_ms += delay_time_ms; } if (elapsed.tv_sec > BITFORCE_TIMEOUT_S) { applog(LOG_ERR, "BFL%i: took %dms - longer than %dms", bitforce->device_id, tv_to_ms(elapsed), BITFORCE_TIMEOUT_MS); bitforce->device_last_not_well = time(NULL); bitforce->device_not_well_reason = REASON_DEV_OVER_HEAT; bitforce->dev_over_heat_count++; if (!pdevbuf[0]) /* Only return if we got nothing after timeout - there still may be results */ return 0; } else if (!strncasecmp(pdevbuf, "N", 1)) {/* Hashing complete (NONCE-FOUND or NO-NONCE) */ /* Simple timing adjustment. Allow a few polls to cope with * OS timer delays being variably reliable. wait_ms will * always equal sleep_ms when we've waited greater than or * equal to the result return time.*/ delay_time_ms = bitforce->sleep_ms; if (bitforce->wait_ms > bitforce->sleep_ms + (WORK_CHECK_INTERVAL_MS * 2)) bitforce->sleep_ms += (bitforce->wait_ms - bitforce->sleep_ms) / 2; else if (bitforce->wait_ms == bitforce->sleep_ms) { if (bitforce->sleep_ms > WORK_CHECK_INTERVAL_MS) bitforce->sleep_ms -= WORK_CHECK_INTERVAL_MS; else if (bitforce->sleep_ms > BITFORCE_CHECK_INTERVAL_MS) bitforce->sleep_ms -= BITFORCE_CHECK_INTERVAL_MS; } if (delay_time_ms != bitforce->sleep_ms) applog(LOG_DEBUG, "BFL%i: Wait time changed to: %d, waited %u", bitforce->device_id, bitforce->sleep_ms, bitforce->wait_ms); /* Work out the average time taken. Float for calculation, uint for display */ bitforce->avg_wait_f += (tv_to_ms(elapsed) - bitforce->avg_wait_f) / TIME_AVG_CONSTANT; bitforce->avg_wait_d = (unsigned int) (bitforce->avg_wait_f + 0.5); } applog(LOG_DEBUG, "BFL%i: waited %dms until %s", bitforce->device_id, bitforce->wait_ms, pdevbuf); if (!strncasecmp(&pdevbuf[2], "-", 1)) return bitforce->nonces; /* No valid nonce found */ else if (!strncasecmp(pdevbuf, "I", 1)) return 0; /* Device idle */ else if (strncasecmp(pdevbuf, "NONCE-FOUND", 11)) { bitforce->hw_errors++; applog(LOG_WARNING, "BFL%i: Error: Get result reports: %s", bitforce->device_id, pdevbuf); bitforce_clear_buffer(bitforce); return 0; } pnoncebuf = &pdevbuf[12]; while (1) { hex2bin((void*)&nonce, pnoncebuf, 4); #ifndef __BIG_ENDIAN__ nonce = swab32(nonce); #endif if (unlikely(bitforce->nonce_range && (nonce >= work->blk.nonce || (work->blk.nonce > 0 && nonce < work->blk.nonce - bitforce->nonces - 1)))) { applog(LOG_WARNING, "BFL%i: Disabling broken nonce range support", bitforce->device_id); bitforce->nonce_range = false; work->blk.nonce = 0xffffffff; bitforce->sleep_ms *= 5; bitforce->kname = KNAME_WORK; } submit_nonce(thr, work, nonce); if (strncmp(&pnoncebuf[8], ",", 1)) break; pnoncebuf += 9; } return bitforce->nonces; }
static bool ztex_checkNonce(struct libztex_device *ztex, struct work *work, struct libztex_hash_data *hdata) { uint32_t *data32 = (uint32_t *)(work->data); unsigned char swap[80]; uint32_t *swap32 = (uint32_t *)swap; unsigned char hash1[32]; unsigned char hash2[32]; uint32_t *hash2_32 = (uint32_t *)hash2; int i; #if defined(__BIGENDIAN__) || defined(MIPSEB) hdata->nonce = swab32(hdata->nonce); hdata->hash7 = swab32(hdata->hash7); #endif work->data[64 + 12 + 0] = (hdata->nonce >> 0) & 0xff; work->data[64 + 12 + 1] = (hdata->nonce >> 8) & 0xff; work->data[64 + 12 + 2] = (hdata->nonce >> 16) & 0xff; work->data[64 + 12 + 3] = (hdata->nonce >> 24) & 0xff; for (i = 0; i < 80 / 4; i++) swap32[i] = swab32(data32[i]); sha2(swap, 80, hash1, false); sha2(hash1, 32, hash2, false); #if defined(__BIGENDIAN__) || defined(MIPSEB) if (hash2_32[7] != ((hdata->hash7 + 0x5be0cd19) & 0xFFFFFFFF)) { #else if (swab32(hash2_32[7]) != ((hdata->hash7 + 0x5be0cd19) & 0xFFFFFFFF)) { #endif ztex->errorCount[ztex->freqM] += 1.0 / ztex->numNonces; applog(LOG_DEBUG, "%s: checkNonce failed for %0.8X", ztex->repr, hdata->nonce); return false; } return true; } static int64_t ztex_scanhash(struct thr_info *thr, struct work *work, __maybe_unused int64_t max_nonce) { struct libztex_device *ztex; unsigned char sendbuf[44]; int i, j, k; uint32_t *backlog; int backlog_p = 0, backlog_max; uint32_t *lastnonce; uint32_t nonce, noncecnt = 0; bool overflow, found; struct libztex_hash_data hdata[GOLDEN_BACKLOG]; if (thr->cgpu->deven == DEV_DISABLED) return -1; ztex = thr->cgpu->device_ztex; memcpy(sendbuf, work->data + 64, 12); memcpy(sendbuf + 12, work->midstate, 32); ztex_selectFpga(ztex); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // Something wrong happened in send applog(LOG_ERR, "%s: Failed to send hash data with err %d, retrying", ztex->repr, i); nmsleep(500); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%s: Failed to send hash data with err %d, giving up", ztex->repr, i); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); applog(LOG_DEBUG, "%s: sent hashdata", ztex->repr); lastnonce = calloc(1, sizeof(uint32_t)*ztex->numNonces); if (lastnonce == NULL) { applog(LOG_ERR, "%s: failed to allocate lastnonce[%d]", ztex->repr, ztex->numNonces); return -1; } /* Add an extra slot for detecting dupes that lie around */ backlog_max = ztex->numNonces * (2 + ztex->extraSolutions); backlog = calloc(1, sizeof(uint32_t) * backlog_max); if (backlog == NULL) { applog(LOG_ERR, "%s: failed to allocate backlog[%d]", ztex->repr, backlog_max); return -1; } overflow = false; applog(LOG_DEBUG, "%s: entering poll loop", ztex->repr); while (!(overflow || thr->work_restart)) { nmsleep(250); if (thr->work_restart) { applog(LOG_DEBUG, "%s: New work detected", ztex->repr); break; } ztex_selectFpga(ztex); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // Something wrong happened in read applog(LOG_ERR, "%s: Failed to read hash data with err %d, retrying", ztex->repr, i); nmsleep(500); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%s: Failed to read hash data with err %d, giving up", ztex->repr, i); free(lastnonce); free(backlog); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); if (thr->work_restart) { applog(LOG_DEBUG, "%s: New work detected", ztex->repr); break; } ztex->errorCount[ztex->freqM] *= 0.995; ztex->errorWeight[ztex->freqM] = ztex->errorWeight[ztex->freqM] * 0.995 + 1.0; for (i = 0; i < ztex->numNonces; i++) { nonce = hdata[i].nonce; #if defined(__BIGENDIAN__) || defined(MIPSEB) nonce = swab32(nonce); #endif if (nonce > noncecnt) noncecnt = nonce; if (((0xffffffff - nonce) < (nonce - lastnonce[i])) || nonce < lastnonce[i]) { applog(LOG_DEBUG, "%s: overflow nonce=%0.8x lastnonce=%0.8x", ztex->repr, nonce, lastnonce[i]); overflow = true; } else lastnonce[i] = nonce; #if !(defined(__BIGENDIAN__) || defined(MIPSEB)) nonce = swab32(nonce); #endif if (!ztex_checkNonce(ztex, work, &hdata[i])) { thr->cgpu->hw_errors++; continue; } for (j=0; j<=ztex->extraSolutions; j++) { nonce = hdata[i].goldenNonce[j]; if (nonce > 0) { found = false; for (k = 0; k < backlog_max; k++) { if (backlog[k] == nonce) { found = true; break; } } if (!found) { applog(LOG_DEBUG, "%s: Share found N%dE%d", ztex->repr, i, j); backlog[backlog_p++] = nonce; if (backlog_p >= backlog_max) backlog_p = 0; #if defined(__BIGENDIAN__) || defined(MIPSEB) nonce = swab32(nonce); #endif work->blk.nonce = 0xffffffff; submit_nonce(thr, work, nonce); applog(LOG_DEBUG, "%s: submitted %0.8x", ztex->repr, nonce); } } } } } ztex->errorRate[ztex->freqM] = ztex->errorCount[ztex->freqM] / ztex->errorWeight[ztex->freqM] * (ztex->errorWeight[ztex->freqM] < 100? ztex->errorWeight[ztex->freqM] * 0.01: 1.0); if (ztex->errorRate[ztex->freqM] > ztex->maxErrorRate[ztex->freqM]) ztex->maxErrorRate[ztex->freqM] = ztex->errorRate[ztex->freqM]; if (!ztex_updateFreq(ztex)) { // Something really serious happened, so mark this thread as dead! free(lastnonce); free(backlog); return -1; } applog(LOG_DEBUG, "%s: exit %1.8X", ztex->repr, noncecnt); work->blk.nonce = 0xffffffff; free(lastnonce); free(backlog); return noncecnt; } static void ztex_statline_before(char *buf, struct cgpu_info *cgpu) { if (cgpu->deven == DEV_ENABLED) { tailsprintf(buf, "%s-%d | ", cgpu->device_ztex->snString, cgpu->device_ztex->fpgaNum+1); tailsprintf(buf, "%0.1fMHz | ", cgpu->device_ztex->freqM1 * (cgpu->device_ztex->freqM + 1)); } } static bool ztex_prepare(struct thr_info *thr) { struct timeval now; struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex = cgpu->device_ztex; gettimeofday(&now, NULL); get_datestamp(cgpu->init, &now); ztex_selectFpga(ztex); if (libztex_configureFpga(ztex) != 0) { libztex_resetFpga(ztex); ztex_releaseFpga(ztex); applog(LOG_ERR, "%s: Disabling!", thr->cgpu->device_ztex->repr); thr->cgpu->deven = DEV_DISABLED; return true; } ztex->freqM = ztex->freqMaxM+1;; //ztex_updateFreq(ztex); libztex_setFreq(ztex, ztex->freqMDefault); ztex_releaseFpga(ztex); applog(LOG_DEBUG, "%s: prepare", ztex->repr); return true; } static void ztex_shutdown(struct thr_info *thr) { if (thr->cgpu->device_ztex != NULL) { if (thr->cgpu->device_ztex->fpgaNum == 0) pthread_mutex_destroy(&thr->cgpu->device_ztex->mutex); applog(LOG_DEBUG, "%s: shutdown", thr->cgpu->device_ztex->repr); libztex_destroy_device(thr->cgpu->device_ztex); thr->cgpu->device_ztex = NULL; } } static void ztex_disable(struct thr_info *thr) { applog(LOG_ERR, "%s: Disabling!", thr->cgpu->device_ztex->repr); devices[thr->cgpu->device_id]->deven = DEV_DISABLED; ztex_shutdown(thr); }
static int64_t bitforce_get_result(struct thr_info *thr, struct work *work) { struct cgpu_info *bitforce = thr->cgpu; unsigned int delay_time_ms; struct timeval elapsed; struct timeval now; char buf[BITFORCE_BUFSIZ+1]; int amount; char *pnoncebuf; uint32_t nonce; while (1) { if (unlikely(thr->work_restart)) return 0; mutex_lock(&bitforce->device_mutex); usb_write(bitforce, BITFORCE_WORKSTATUS, BITFORCE_WORKSTATUS_LEN, &amount, C_REQUESTWORKSTATUS); usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETWORKSTATUS); mutex_unlock(&bitforce->device_mutex); cgtime(&now); timersub(&now, &bitforce->work_start_tv, &elapsed); if (elapsed.tv_sec >= BITFORCE_LONG_TIMEOUT_S) { applog(LOG_ERR, "%s%i: took %ldms - longer than %dms", bitforce->drv->name, bitforce->device_id, tv_to_ms(elapsed), BITFORCE_LONG_TIMEOUT_MS); return 0; } if (amount > 0 && buf[0] && strncasecmp(buf, "B", 1)) /* BFL does not respond during throttling */ break; /* if BFL is throttling, no point checking so quickly */ delay_time_ms = (buf[0] ? BITFORCE_CHECK_INTERVAL_MS : 2 * WORK_CHECK_INTERVAL_MS); nmsleep(delay_time_ms); bitforce->wait_ms += delay_time_ms; } if (elapsed.tv_sec > BITFORCE_TIMEOUT_S) { applog(LOG_ERR, "%s%i: took %ldms - longer than %dms", bitforce->drv->name, bitforce->device_id, tv_to_ms(elapsed), BITFORCE_TIMEOUT_MS); dev_error(bitforce, REASON_DEV_OVER_HEAT); /* Only return if we got nothing after timeout - there still may be results */ if (amount == 0) return 0; } else if (!strncasecmp(buf, BITFORCE_EITHER, BITFORCE_EITHER_LEN)) { /* Simple timing adjustment. Allow a few polls to cope with * OS timer delays being variably reliable. wait_ms will * always equal sleep_ms when we've waited greater than or * equal to the result return time.*/ delay_time_ms = bitforce->sleep_ms; if (bitforce->wait_ms > bitforce->sleep_ms + (WORK_CHECK_INTERVAL_MS * 2)) bitforce->sleep_ms += (bitforce->wait_ms - bitforce->sleep_ms) / 2; else if (bitforce->wait_ms == bitforce->sleep_ms) { if (bitforce->sleep_ms > WORK_CHECK_INTERVAL_MS) bitforce->sleep_ms -= WORK_CHECK_INTERVAL_MS; else if (bitforce->sleep_ms > BITFORCE_CHECK_INTERVAL_MS) bitforce->sleep_ms -= BITFORCE_CHECK_INTERVAL_MS; } if (delay_time_ms != bitforce->sleep_ms) applog(LOG_DEBUG, "%s%i: Wait time changed to: %d, waited %u", bitforce->drv->name, bitforce->device_id, bitforce->sleep_ms, bitforce->wait_ms); /* Work out the average time taken. Float for calculation, uint for display */ bitforce->avg_wait_f += (tv_to_ms(elapsed) - bitforce->avg_wait_f) / TIME_AVG_CONSTANT; bitforce->avg_wait_d = (unsigned int) (bitforce->avg_wait_f + 0.5); } applog(LOG_DEBUG, "%s%i: waited %dms until %s", bitforce->drv->name, bitforce->device_id, bitforce->wait_ms, buf); if (!strncasecmp(buf, BITFORCE_NO_NONCE, BITFORCE_NO_NONCE_MATCH)) return bitforce->nonces; /* No valid nonce found */ else if (!strncasecmp(buf, BITFORCE_IDLE, BITFORCE_IDLE_MATCH)) return 0; /* Device idle */ else if (strncasecmp(buf, BITFORCE_NONCE, BITFORCE_NONCE_LEN)) { bitforce->hw_errors++; applog(LOG_WARNING, "%s%i: Error: Get result reports: %s", bitforce->drv->name, bitforce->device_id, buf); bitforce_initialise(bitforce, true); return 0; } pnoncebuf = &buf[12]; while (1) { hex2bin((void*)&nonce, pnoncebuf, 4); #ifndef __BIG_ENDIAN__ nonce = swab32(nonce); #endif if (unlikely(bitforce->nonce_range && (nonce >= work->blk.nonce || (work->blk.nonce > 0 && nonce < work->blk.nonce - bitforce->nonces - 1)))) { applog(LOG_WARNING, "%s%i: Disabling broken nonce range support", bitforce->drv->name, bitforce->device_id); bitforce->nonce_range = false; work->blk.nonce = 0xffffffff; bitforce->sleep_ms *= 5; bitforce->kname = KNAME_WORK; } submit_nonce(thr, work, nonce); if (strncmp(&pnoncebuf[8], ",", 1)) break; pnoncebuf += 9; } return bitforce->nonces; }
static int64_t ztex_scanhash(struct thr_info *thr, struct work *work, __maybe_unused int64_t max_nonce) { struct libztex_device *ztex; unsigned char sendbuf[44]; int i, j, k; uint32_t *backlog; int backlog_p = 0, backlog_max; uint32_t *lastnonce; uint32_t nonce, noncecnt = 0; bool overflow, found; struct libztex_hash_data hdata[GOLDEN_BACKLOG]; if (thr->cgpu->deven == DEV_DISABLED) return -1; ztex = thr->cgpu->device_ztex; memcpy(sendbuf, work->data + 64, 12); memcpy(sendbuf + 12, work->midstate, 32); ztex_selectFpga(ztex); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // Something wrong happened in send applog(LOG_ERR, "%s: Failed to send hash data with err %d, retrying", ztex->repr, i); nmsleep(500); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%s: Failed to send hash data with err %d, giving up", ztex->repr, i); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); applog(LOG_DEBUG, "%s: sent hashdata", ztex->repr); lastnonce = calloc(1, sizeof(uint32_t)*ztex->numNonces); if (lastnonce == NULL) { applog(LOG_ERR, "%s: failed to allocate lastnonce[%d]", ztex->repr, ztex->numNonces); return -1; } /* Add an extra slot for detecting dupes that lie around */ backlog_max = ztex->numNonces * (2 + ztex->extraSolutions); backlog = calloc(1, sizeof(uint32_t) * backlog_max); if (backlog == NULL) { applog(LOG_ERR, "%s: failed to allocate backlog[%d]", ztex->repr, backlog_max); return -1; } overflow = false; int count = 0; int validNonces = 0; double errorCount = 0; applog(LOG_DEBUG, "%s: entering poll loop", ztex->repr); while (!(overflow || thr->work_restart)) { count++; int sleepcount = 0; while (thr->work_restart == 0 && sleepcount < 25) { nmsleep(10); sleepcount += 1; } if (thr->work_restart) { applog(LOG_DEBUG, "%s: New work detected", ztex->repr); break; } ztex_selectFpga(ztex); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // Something wrong happened in read applog(LOG_ERR, "%s: Failed to read hash data with err %d, retrying", ztex->repr, i); nmsleep(500); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%s: Failed to read hash data with err %d, giving up", ztex->repr, i); free(lastnonce); free(backlog); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); if (thr->work_restart) { applog(LOG_DEBUG, "%s: New work detected", ztex->repr); break; } ztex->errorCount[ztex->freqM] *= 0.995; ztex->errorWeight[ztex->freqM] = ztex->errorWeight[ztex->freqM] * 0.995 + 1.0; for (i = 0; i < ztex->numNonces; i++) { nonce = hdata[i].nonce; if (nonce > noncecnt) noncecnt = nonce; if (((0xffffffff - nonce) < (nonce - lastnonce[i])) || nonce < lastnonce[i]) { applog(LOG_DEBUG, "%s: overflow nonce=%0.8x lastnonce=%0.8x", ztex->repr, nonce, lastnonce[i]); overflow = true; } else lastnonce[i] = nonce; if (ztex_checkNonce(work, nonce) != (hdata->hash7 + 0x5be0cd19)) { applog(LOG_DEBUG, "%s: checkNonce failed for %0.8X", ztex->repr, nonce); // do not count errors in the first 500ms after sendHashData (2x250 wait time) if (count > 2) { thr->cgpu->hw_errors++; errorCount += (1.0 / ztex->numNonces); } } else validNonces++; for (j=0; j<=ztex->extraSolutions; j++) { nonce = hdata[i].goldenNonce[j]; if (nonce == ztex->offsNonces) { continue; } // precheck the extraSolutions since they often fail if (j > 0 && ztex_checkNonce(work, nonce) != 0) { continue; } found = false; for (k = 0; k < backlog_max; k++) { if (backlog[k] == nonce) { found = true; break; } } if (!found) { applog(LOG_DEBUG, "%s: Share found N%dE%d", ztex->repr, i, j); backlog[backlog_p++] = nonce; if (backlog_p >= backlog_max) backlog_p = 0; work->blk.nonce = 0xffffffff; submit_nonce(thr, work, nonce); applog(LOG_DEBUG, "%s: submitted %0.8x", ztex->repr, nonce); } } } } // only add the errorCount if we had at least some valid nonces or // had no valid nonces in the last round if (errorCount > 0.0) { if (ztex->nonceCheckValid > 0 && validNonces == 0) { applog(LOG_ERR, "%s: resetting %.1f errors", ztex->repr, errorCount); } else { ztex->errorCount[ztex->freqM] += errorCount; } } // remember the number of valid nonces for the check in the next round ztex->nonceCheckValid = validNonces; ztex->errorRate[ztex->freqM] = ztex->errorCount[ztex->freqM] / ztex->errorWeight[ztex->freqM] * (ztex->errorWeight[ztex->freqM] < 100? ztex->errorWeight[ztex->freqM] * 0.01: 1.0); if (ztex->errorRate[ztex->freqM] > ztex->maxErrorRate[ztex->freqM]) ztex->maxErrorRate[ztex->freqM] = ztex->errorRate[ztex->freqM]; if (!ztex_updateFreq(ztex)) { // Something really serious happened, so mark this thread as dead! free(lastnonce); free(backlog); return -1; } applog(LOG_DEBUG, "%s: exit %1.8X", ztex->repr, noncecnt); work->blk.nonce = 0xffffffff; free(lastnonce); free(backlog); return noncecnt; }
static int64_t bitfury_scanHash(struct thr_info *thr) { static struct bitfury_device *devices; // TODO Move somewhere to appropriate place int chip_n; int chip; uint64_t hashes = 0; struct timeval now; unsigned char line[2048]; int short_stat = 10; static time_t short_out_t; int long_stat = 1800; static time_t long_out_t; int long_long_stat = 60 * 30; static time_t long_long_out_t; static first = 0; //TODO Move to detect() int i; devices = thr->cgpu->devices; chip_n = thr->cgpu->chip_n; if (!first) { for (i = 0; i < chip_n; i++) { devices[i].osc6_bits = 54; } for (i = 0; i < chip_n; i++) { send_reinit(devices[i].slot, devices[i].fasync, devices[i].osc6_bits); } } first = 1; for (chip = 0; chip < chip_n; chip++) { devices[chip].job_switched = 0; if(!devices[chip].work) { devices[chip].work = get_queued(thr->cgpu); if (devices[chip].work == NULL) { return 0; } work_to_payload(&(devices[chip].payload), devices[chip].work); } } libbitfury_sendHashData(devices, chip_n); nmsleep(5); cgtime(&now); chip = 0; for (;chip < chip_n; chip++) { if (devices[chip].job_switched) { int i,j; int *res = devices[chip].results; struct work *work = devices[chip].work; struct work *owork = devices[chip].owork; struct work *o2work = devices[chip].o2work; i = devices[chip].results_n; for (j = i - 1; j >= 0; j--) { if (owork) { submit_nonce(thr, owork, bswap_32(res[j])); devices[chip].stat_ts[devices[chip].stat_counter++] = now.tv_sec; if (devices[chip].stat_counter == BITFURY_STAT_N) { devices[chip].stat_counter = 0; } } if (o2work) { // TEST //submit_nonce(thr, owork, bswap_32(res[j])); } } devices[chip].results_n = 0; devices[chip].job_switched = 0; if (devices[chip].old_nonce && o2work) { submit_nonce(thr, o2work, bswap_32(devices[chip].old_nonce)); i++; } if (devices[chip].future_nonce) { submit_nonce(thr, work, bswap_32(devices[chip].future_nonce)); i++; } if (o2work) work_completed(thr->cgpu, o2work); devices[chip].o2work = devices[chip].owork; devices[chip].owork = devices[chip].work; devices[chip].work = NULL; hashes += 0xffffffffull * i; } } if (now.tv_sec - short_out_t > short_stat) { int shares_first = 0, shares_last = 0, shares_total = 0; char stat_lines[32][256] = {0}; int len, k; double gh[32][8] = {0}; double ghsum = 0, gh1h = 0, gh2h = 0; unsigned strange_counter = 0; for (chip = 0; chip < chip_n; chip++) { int shares_found = calc_stat(devices[chip].stat_ts, short_stat, now); double ghash; len = strlen(stat_lines[devices[chip].slot]); ghash = shares_to_ghashes(shares_found, short_stat); gh[devices[chip].slot][chip & 0x07] = ghash; snprintf(stat_lines[devices[chip].slot] + len, 256 - len, "%.1f-%3.0f ", ghash, devices[chip].mhz); if(short_out_t && ghash < 0.5) { applog(LOG_WARNING, "Chip_id %d FREQ CHANGE\n", chip); send_freq(devices[chip].slot, devices[chip].fasync, devices[chip].osc6_bits - 1); nmsleep(1); send_freq(devices[chip].slot, devices[chip].fasync, devices[chip].osc6_bits); } shares_total += shares_found; shares_first += chip < 4 ? shares_found : 0; shares_last += chip > 3 ? shares_found : 0; strange_counter += devices[chip].strange_counter; devices[chip].strange_counter = 0; } sprintf(line, "vvvvwww SHORT stat %ds: wwwvvvv", short_stat); applog(LOG_WARNING, line); sprintf(line, "stranges: %u", strange_counter); applog(LOG_WARNING, line); for(i = 0; i < 32; i++) if(strlen(stat_lines[i])) { len = strlen(stat_lines[i]); ghsum = 0; gh1h = 0; gh2h = 0; for(k = 0; k < 4; k++) { gh1h += gh[i][k]; gh2h += gh[i][k+4]; ghsum += gh[i][k] + gh[i][k+4]; } snprintf(stat_lines[i] + len, 256 - len, "- %2.1f + %2.1f = %2.1f slot %i ", gh1h, gh2h, ghsum, i); applog(LOG_WARNING, stat_lines[i]); } short_out_t = now.tv_sec; } if (now.tv_sec - long_out_t > long_stat) { int shares_first = 0, shares_last = 0, shares_total = 0; char stat_lines[32][256] = {0}; int len, k; double gh[32][8] = {0}; double ghsum = 0, gh1h = 0, gh2h = 0; for (chip = 0; chip < chip_n; chip++) { int shares_found = calc_stat(devices[chip].stat_ts, long_stat, now); double ghash; len = strlen(stat_lines[devices[chip].slot]); ghash = shares_to_ghashes(shares_found, long_stat); gh[devices[chip].slot][chip & 0x07] = ghash; snprintf(stat_lines[devices[chip].slot] + len, 256 - len, "%.1f-%3.0f ", ghash, devices[chip].mhz); shares_total += shares_found; shares_first += chip < 4 ? shares_found : 0; shares_last += chip > 3 ? shares_found : 0; } sprintf(line, "!!!_________ LONG stat %ds: ___________!!!", long_stat); applog(LOG_WARNING, line); for(i = 0; i < 32; i++) if(strlen(stat_lines[i])) { len = strlen(stat_lines[i]); ghsum = 0; gh1h = 0; gh2h = 0; for(k = 0; k < 4; k++) { gh1h += gh[i][k]; gh2h += gh[i][k+4]; ghsum += gh[i][k] + gh[i][k+4]; } snprintf(stat_lines[i] + len, 256 - len, "- %2.1f + %2.1f = %2.1f slot %i ", gh1h, gh2h, ghsum, i); applog(LOG_WARNING, stat_lines[i]); } long_out_t = now.tv_sec; } return hashes; }
static int64_t ztex_scanhash(struct thr_info *thr, struct work *work, __maybe_unused int64_t max_nonce) { struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex; unsigned char sendbuf[44]; int i, j, k; uint32_t *backlog; int backlog_p = 0, backlog_max; uint32_t *lastnonce; uint32_t nonce, noncecnt = 0; bool overflow, found; struct libztex_hash_data hdata[GOLDEN_BACKLOG]; if (thr->cgpu->deven == DEV_DISABLED) return -1; ztex = thr->cgpu->device_ztex; memcpy(sendbuf, work->data + 64, 12); memcpy(sendbuf + 12, work->midstate, 32); ztex_selectFpga(ztex, cgpu->proc_id); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // Something wrong happened in send applog(LOG_ERR, "%"PRIpreprv": Failed to send hash data with err %d, retrying", cgpu->proc_repr, i); cgsleep_ms(500); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%"PRIpreprv": Failed to send hash data with err %d, giving up", cgpu->proc_repr, i); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); applog(LOG_DEBUG, "%"PRIpreprv": sent hashdata", cgpu->proc_repr); lastnonce = calloc(1, sizeof(uint32_t)*ztex->numNonces); if (lastnonce == NULL) { applog(LOG_ERR, "%"PRIpreprv": failed to allocate lastnonce[%d]", cgpu->proc_repr, ztex->numNonces); return -1; } /* Add an extra slot for detecting dupes that lie around */ backlog_max = ztex->numNonces * (2 + ztex->extraSolutions); backlog = calloc(1, sizeof(uint32_t) * backlog_max); if (backlog == NULL) { applog(LOG_ERR, "%"PRIpreprv": failed to allocate backlog[%d]", cgpu->proc_repr, backlog_max); free(lastnonce); return -1; } overflow = false; int count = 0; applog(LOG_DEBUG, "%"PRIpreprv": entering poll loop", cgpu->proc_repr); while (!(overflow || thr->work_restart)) { count++; if (!restart_wait(thr, 250)) { applog(LOG_DEBUG, "%"PRIpreprv": New work detected", cgpu->proc_repr); break; } ztex_selectFpga(ztex, cgpu->proc_id); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // Something wrong happened in read applog(LOG_ERR, "%"PRIpreprv": Failed to read hash data with err %d, retrying", cgpu->proc_repr, i); cgsleep_ms(500); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%"PRIpreprv": Failed to read hash data with err %d, giving up", cgpu->proc_repr, i); free(lastnonce); free(backlog); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); if (thr->work_restart) { applog(LOG_DEBUG, "%"PRIpreprv": New work detected", cgpu->proc_repr); break; } dclk_gotNonces(&ztex->dclk); for (i = 0; i < ztex->numNonces; i++) { nonce = hdata[i].nonce; if (nonce > noncecnt) noncecnt = nonce; if (((0xffffffff - nonce) < (nonce - lastnonce[i])) || nonce < lastnonce[i]) { applog(LOG_DEBUG, "%"PRIpreprv": overflow nonce=%08x lastnonce=%08x", cgpu->proc_repr, nonce, lastnonce[i]); overflow = true; } else lastnonce[i] = nonce; if (!ztex_checkNonce(cgpu, work, &hdata[i])) { // do not count errors in the first 500ms after sendHashData (2x250 wait time) if (count > 2) dclk_errorCount(&ztex->dclk, 1.0 / ztex->numNonces); inc_hw_errors_only(thr); } for (j=0; j<=ztex->extraSolutions; j++) { nonce = hdata[i].goldenNonce[j]; if (nonce == ztex->offsNonces) { continue; } found = false; for (k = 0; k < backlog_max; k++) { if (backlog[k] == nonce) { found = true; break; } } if (!found) { backlog[backlog_p++] = nonce; if (backlog_p >= backlog_max) backlog_p = 0; work->blk.nonce = 0xffffffff; if (!j || test_nonce(work, nonce, false)) submit_nonce(thr, work, nonce); applog(LOG_DEBUG, "%"PRIpreprv": submitted %08x (from N%dE%d)", cgpu->proc_repr, nonce, i, j); } } } } dclk_preUpdate(&ztex->dclk); if (!ztex_updateFreq(thr)) { // Something really serious happened, so mark this thread as dead! free(lastnonce); free(backlog); return -1; } applog(LOG_DEBUG, "%"PRIpreprv": exit %1.8X", cgpu->proc_repr, noncecnt); work->blk.nonce = 0xffffffff; free(lastnonce); free(backlog); return noncecnt; }
static int64_t serial_fpga_scanwork(struct thr_info *thr) { struct cgpu_info *serial_fpga; int fd; int ret; struct FPGA_INFO *info; unsigned char ob_bin[44], nonce_buf[SERIAL_READ_SIZE]; char *ob_hex; uint32_t nonce; int64_t hash_count; struct timeval tv_start, tv_finish, elapsed, tv_end, diff; int curr_hw_errors, i, j; uint32_t * ob; ob = (uint32_t *)ob_bin; int count; double Hs, W, fullnonce; int read_count; int64_t estimate_hashes; uint32_t values; int64_t hash_count_range; struct work *work; applog(LOG_DEBUG, "serial_fpga_scanwork..."); if (thr->cgpu->deven == DEV_DISABLED) return -1; serial_fpga = thr->cgpu; info = serial_fpga->device_data; work = get_work(thr, thr->id); if (info->device_fd == -1) { applog(LOG_INFO, "Attemping to Reopen Serial FPGA on %s", serial_fpga->device_path); fd = serial_open(serial_fpga->device_path, SERIAL_IO_SPEED, SERIAL_READ_TIMEOUT, false); if (unlikely(-1 == fd)) { applog(LOG_ERR, "Failed to open Serial FPGA on %s", serial_fpga->device_path); return -1; } else info->device_fd = fd; } fd = info->device_fd; memset(ob_bin, 0, sizeof(ob_bin)); // Currently, extra nonces are not supported // memset((unsigned char*)work->data + 144, 0, 12); // // calc_midstate(work); memcpy(ob_bin, work->midstate, 32); // Midstate memcpy(ob_bin + 32, work->data + 128, 12); // Remaining Bytes From Block Header // Send Bytes To FPGA In Reverse Order unsigned char swap[44]; uint32_t * sw; sw = (uint32_t *)swap; for (j=0; j<8; j++) { sw[j] = swab32(ob[j]); } memcpy(swap + 32, ob_bin + 32, 12); for (j=0; j<44; j++) { ob_bin[j] = swap[j]; } //unsigned char* b = (unsigned char*)(ob_bin); //applog(LOG_WARNING, "swap: %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", b[28],b[29],b[30],b[31],b[32],b[33],b[34],b[35],b[36],b[37],b[38],b[39],b[40],b[41],b[42],b[43]); //applog(LOG_WARNING, "swap: %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7],b[8],b[9],b[10],b[11],b[12],b[13],b[14],b[15],b[16],b[17],b[18],b[19],b[20],b[21],b[22],b[23],b[24],b[25],b[26],b[27],b[28],b[29],b[30],b[31],b[32],b[33],b[34],b[35],b[36],b[37],b[38],b[39],b[40],b[41],b[42],b[43]); //#ifndef WIN32 // tcflush(fd, TCOFLUSH); //#endif // Send Data To FPGA ret = write(fd, ob_bin, sizeof(ob_bin)); if (ret != sizeof(ob_bin)) { applog(LOG_ERR, "%s%i: Serial Send Error (ret=%d)", serial_fpga->drv->name, serial_fpga->device_id, ret); serial_fpga_close(thr); dev_error(serial_fpga, REASON_DEV_COMMS_ERROR); return 0; } if (opt_debug) { ob_hex = bin2hex(ob_bin, sizeof(ob_bin)); applog(LOG_DEBUG, "Serial FPGA %d sent: %s", serial_fpga->device_id, ob_hex); free(ob_hex); } elapsed.tv_sec = 0; elapsed.tv_usec = 0; cgtime(&tv_start); applog(LOG_DEBUG, "%s%i: Begin Scan For Nonces", serial_fpga->drv->name, serial_fpga->device_id); while (thr && !thr->work_restart) { memset(nonce_buf,0,4); // Check Serial Port For 1/10 Sec For Nonce ret = read(fd, nonce_buf, SERIAL_READ_SIZE); // Calculate Elapsed Time cgtime(&tv_end); timersub(&tv_end, &tv_start, &elapsed); if (ret == 0) { // No Nonce Found if (elapsed.tv_sec > info->timeout) { applog(LOG_DEBUG, "%s%i: End Scan For Nonces - Time = %d sec", serial_fpga->drv->name, serial_fpga->device_id, elapsed.tv_sec); break; } continue; } else if (ret < SERIAL_READ_SIZE) { applog(LOG_ERR, "%s%i: Serial Read Error (ret=%d)", serial_fpga->drv->name, serial_fpga->device_id, ret); serial_fpga_close(thr); dev_error(serial_fpga, REASON_DEV_COMMS_ERROR); break; } memcpy((char *)&nonce, nonce_buf, SERIAL_READ_SIZE); #if !defined (__BIG_ENDIAN__) && !defined(MIPSEB) nonce = swab32(nonce); #endif curr_hw_errors = serial_fpga->hw_errors; applog(LOG_INFO, "%s%i: Nonce Found - %08X (%5.1fMhz)", serial_fpga->drv->name, serial_fpga->device_id, nonce, (double)(1/(info->Hs * 1000000))); submit_nonce(thr, work, nonce); // Update Hashrate if (serial_fpga->hw_errors == curr_hw_errors) info->Hs = ((double)(elapsed.tv_sec) + ((double)(elapsed.tv_usec))/((double)1000000)) / (double)nonce; } // Estimate Number Of Hashes hash_count = ((double)(elapsed.tv_sec) + ((double)(elapsed.tv_usec))/((double)1000000)) / info->Hs; free_work(work); return hash_count; }
static int64_t ztex_scanhash(struct thr_info *thr, struct work *work, __maybe_unused int64_t max_nonce) { // int numbytes = 80; // KRAMBLE 80 byte protocol int numbytes = 76; // KRAMBLE 76 byte protocol struct libztex_device *ztex; unsigned char sendbuf[80]; // KRAMBLE int i, j, k; uint32_t *backlog; int backlog_p = 0, backlog_max; uint32_t *lastnonce; uint32_t nonce, noncecnt = 0; bool overflow, found; struct libztex_hash_data hdata[GOLDEN_BACKLOG]; if (thr->cgpu->deven == DEV_DISABLED) return -1; ztex = thr->cgpu->device_ztex; // memcpy(sendbuf, work->data + 64, 12); // memcpy(sendbuf + 12, work->midstate, 32); memcpy(sendbuf, work->data, numbytes); // KRAMBLE ztex_selectFpga(ztex); i = libztex_sendHashData(ztex, sendbuf, numbytes); if (i < 0) { // Something wrong happened in send applog(LOG_ERR, "%s: Failed to send hash data with err %d, retrying", ztex->repr, i); nmsleep(500); i = libztex_sendHashData(ztex, sendbuf, numbytes); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%s: Failed to send hash data with err %d, giving up", ztex->repr, i); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); #if 0 // KRAMBLE char *data = bin2hex(work->data, sizeof(work->data)); applog(LOG_INFO, "%s: sent data %s", ztex->repr, data); #endif lastnonce = calloc(1, sizeof(uint32_t)*ztex->numNonces); if (lastnonce == NULL) { applog(LOG_ERR, "%s: failed to allocate lastnonce[%d]", ztex->repr, ztex->numNonces); return -1; } /* Add an extra slot for detecting dupes that lie around */ backlog_max = ztex->numNonces * (2 + ztex->extraSolutions); backlog = calloc(1, sizeof(uint32_t) * backlog_max); if (backlog == NULL) { applog(LOG_ERR, "%s: failed to allocate backlog[%d]", ztex->repr, backlog_max); return -1; } overflow = false; int count = 0; int validNonces = 0; double errorCount = 0; applog(LOG_DEBUG, "%s: entering poll loop", ztex->repr); while (!(overflow || thr->work_restart)) { count++; int sleepcount = 0; while (thr->work_restart == 0 && sleepcount < 25) { nmsleep(10); sleepcount += 1; } if (thr->work_restart) { applog(LOG_DEBUG, "%s: New work detected", ztex->repr); break; } ztex_selectFpga(ztex); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // Something wrong happened in read applog(LOG_ERR, "%s: Failed to read hash data with err %d, retrying", ztex->repr, i); nmsleep(500); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%s: Failed to read hash data with err %d, giving up", ztex->repr, i); free(lastnonce); free(backlog); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); if (thr->work_restart) { applog(LOG_DEBUG, "%s: New work detected", ztex->repr); break; } ztex->errorCount[ztex->freqM] *= 0.995; ztex->errorWeight[ztex->freqM] = ztex->errorWeight[ztex->freqM] * 0.995 + 1.0; for (i = 0; i < ztex->numNonces; i++) { nonce = hdata[i].nonce; //if (nonce > noncecnt) by smartbitcoin // fix the verilog bug which send INT_MAX as nonce sometime. // NB: cgminer , nonce counter was int64, but ztex driver was int32. if( nonce > noncecnt && nonce < 1<<28 ) noncecnt = nonce; // KRAMBLE don't overflow if nonce == 0 (eg if fpga is not hashing) if ( (((0xffffffff - nonce) < (nonce - lastnonce[i])) || nonce < lastnonce[i]) && nonce ) { applog(LOG_INFO, "%s: overflow nonce=%08x lastnonce=%08x", ztex->repr, nonce, lastnonce[i]); // KRAMBLE not in production ?? // applog(LOG_DEBUG, "%s: overflow nonce=%08x lastnonce=%08x", ztex->repr, nonce, lastnonce[i]); // overflow = true; // KRAMBLE disabled as it should not happen at litecoin hash rates (except when broken // so leave warning message enabled) } else lastnonce[i] = nonce; // KRAMBLE try forcing overflow every so often to see if this fixes DIFF & SICK problems // if (nonce > 0x00040000) // Overflow every 256k nonces (a few times a minute, depending on hash rate) if (nonce > 0x00100000) // Overflow every 1M nonces (single core needs larger range to avoid duplicates) { // applog(LOG_INFO, "%s: force overflow nonce=%08x lastnonce=%08x", ztex->repr, nonce, lastnonce[i]); // KRAMBLE not in production overflow = true; } if (ztex_checkNonce(work, nonce) != (hdata->hash7)) { applog(LOG_INFO, "%s: checkNonce failed for %08X hash7 %08X", ztex->repr, nonce, hdata->hash7); // KRAMBLE not in production ?? // applog(LOG_DEBUG, "%s: checkNonce failed for %08X", ztex->repr, nonce); // do not count errors in the first 500ms after sendHashData (2x250 wait time) if (count > 2) { thr->cgpu->hw_errors++; errorCount += (1.0 / ztex->numNonces); } } else validNonces++; for (j=0; j<=ztex->extraSolutions; j++) { nonce = hdata[i].goldenNonce[j]; if (nonce == ztex->offsNonces) { continue; } // precheck the extraSolutions since they often fail if (j > 0 && ztex_checkNonce(work, nonce) != 0) { continue; } found = false; for (k = 0; k < backlog_max; k++) { if (backlog[k] == nonce) { found = true; break; } } if (!found) { applog(LOG_INFO, "%s: Share found %08x", ztex->repr, nonce); // KRAMBLE useful to show its working // applog(LOG_DEBUG, "%s: Share found N%dE%d", ztex->repr, i, j); backlog[backlog_p++] = nonce; if (backlog_p >= backlog_max) backlog_p = 0; work->blk.nonce = 0xffffffff; submit_nonce(thr, work, nonce); applog(LOG_DEBUG, "%s: submitted %08x", ztex->repr, nonce); } } } } // only add the errorCount if we had at least some valid nonces or // had no valid nonces in the last round if (errorCount > 0.0) { if (ztex->nonceCheckValid > 0 && validNonces == 0) { applog(LOG_ERR, "%s: resetting %.1f errors", ztex->repr, errorCount); } else { ztex->errorCount[ztex->freqM] += errorCount; } } // remember the number of valid nonces for the check in the next round ztex->nonceCheckValid = validNonces; ztex->errorRate[ztex->freqM] = ztex->errorCount[ztex->freqM] / ztex->errorWeight[ztex->freqM] * (ztex->errorWeight[ztex->freqM] < 100? ztex->errorWeight[ztex->freqM] * 0.01: 1.0); if (ztex->errorRate[ztex->freqM] > ztex->maxErrorRate[ztex->freqM]) ztex->maxErrorRate[ztex->freqM] = ztex->errorRate[ztex->freqM]; // KRAMBLE Disable ztex_updateFreq if lockClock flag set (ie --ztex-clock set initial and max freq to the same value) if (!ztex->lockClock) { if (!ztex_updateFreq(ztex)) { // Something really serious happened, so mark this thread as dead! free(lastnonce); free(backlog); return -1; } } applog(LOG_DEBUG, "%s: exit %1.8X", ztex->repr, noncecnt); work->blk.nonce = 0xffffffff; free(lastnonce); free(backlog); return noncecnt; }