static bool opencl_thread_prepare(struct thr_info *thr) { char name[256]; struct timeval now; struct cgpu_info *cgpu = thr->cgpu; int gpu = cgpu->device_id; int virtual_gpu = cgpu->virtual_gpu; int i = thr->id; static bool failmessage = false; int buffersize = BUFFERSIZE; if (!blank_res) blank_res = (uint32_t *)calloc(buffersize, 1); if (!blank_res) { applog(LOG_ERR, "Failed to calloc in opencl_thread_init"); return false; } strcpy(name, ""); applog(LOG_INFO, "Init GPU thread %i GPU %i virtual GPU %i", i, gpu, virtual_gpu); clStates[i] = initCl(virtual_gpu, name, sizeof(name), &cgpu->algorithm); if (!clStates[i]) { #ifdef HAVE_CURSES if (use_curses) enable_curses(); #endif applog(LOG_ERR, "Failed to init GPU thread %d, disabling device %d", i, gpu); if (!failmessage) { applog(LOG_ERR, "Restarting the GPU from the menu will not fix this."); applog(LOG_ERR, "Re-check your configuration and try restarting."); failmessage = true; #ifdef HAVE_CURSES char *buf; if (use_curses) { buf = curses_input("Press enter to continue"); if (buf) free(buf); } #endif } cgpu->deven = DEV_DISABLED; cgpu->status = LIFE_NOSTART; dev_error(cgpu, REASON_DEV_NOSTART); return false; } if (!cgpu->name) cgpu->name = strdup(name); if (!cgpu->kernelname) cgpu->kernelname = strdup("ckolivas"); applog(LOG_INFO, "initCl() finished. Found %s", name); cgtime(&now); get_datestamp(cgpu->init, sizeof(cgpu->init), &now); have_opencl = true; return true; }
static int64_t gridseed_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) { struct cgpu_info *gridseed = thr->cgpu; GRIDSEED_INFO *info = gridseed->device_data; unsigned char buf[GRIDSEED_READ_SIZE]; int ret = 0; struct timeval old_scanhash_time = info->scanhash_time; int elapsed_ms; while (!thr->work_restart && (ret = gc3355_get_data(gridseed, buf, GRIDSEED_READ_SIZE)) == 0) { if (buf[0] == 0x55 || buf[1] == 0x20) { uint32_t nonce = le32toh(*(uint32_t *)(buf+4)); uint32_t chip = nonce / ((uint32_t)0xffffffff / info->chips); info->nonce_count[chip]++; if (!submit_nonce(thr, work, nonce)) info->error_count[chip]++; } else { applog(LOG_ERR, "Unrecognized response from %i", gridseed->device_id); return -1; } } if (ret != 0 && ret != LIBUSB_ERROR_TIMEOUT) { applog(LOG_ERR, "No response from %i", gridseed->device_id); return -1; } cgtime(&info->scanhash_time); elapsed_ms = ms_tdiff(&info->scanhash_time, &old_scanhash_time); return GRIDSEED_HASH_SPEED * (double)elapsed_ms * (double)(info->freq * info->chips); }
/* * log function */ void _applog(int prio, const char *str, bool force) { #ifdef HAVE_SYSLOG_H if (use_syslog) { syslog(prio, "%s", str); } #else if (0) {} #endif else { char datetime[64]; struct timeval tv = {0, 0}; struct tm *tm; cgtime(&tv); const time_t tmp_time = tv.tv_sec; tm = localtime(&tmp_time); /* Day changed. */ if (opt_log_show_date && (last_date_output_day != tm->tm_mday)) { last_date_output_day = tm->tm_mday; char date_output_str[64]; snprintf(date_output_str, sizeof(date_output_str), "Log date is now %d-%02d-%02d", tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday); _applog(prio, date_output_str, force); } if (opt_log_show_date) { snprintf(datetime, sizeof(datetime), "[%d-%02d-%02d %02d:%02d:%02d] ", tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); } else { snprintf(datetime, sizeof(datetime), "[%02d:%02d:%02d] ", tm->tm_hour, tm->tm_min, tm->tm_sec); } /* Only output to stderr if it's not going to the screen as well */ if (!isatty(fileno((FILE *)stderr))) { fprintf(stderr, "%s%s\n", datetime, str); /* atomic write to stderr */ fflush(stderr); } my_log_curses(prio, datetime, str, force); } }
static bool gridseed_prepare_work(struct thr_info __maybe_unused *thr, struct work *work) { struct cgpu_info *gridseed = thr->cgpu; GRIDSEED_INFO *info = gridseed->device_data; cgtime(&info->scanhash_time); gc3355_send_cmds(gridseed, str_ltc_reset); usb_buffer_clear(gridseed); return gridseed_send_task(gridseed, work); }
static bool bitforce_thread_prepare(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; struct timeval now; cgtime(&now); get_datestamp(bitforce->init, &now); return true; }
static bool bitfury_prepare(struct thr_info *thr) { struct timeval now; struct cgpu_info *cgpu = thr->cgpu; cgtime(&now); get_datestamp(cgpu->init, &now); applog(LOG_INFO, "INFO bitfury_prepare"); return true; }
static bool spondoolies_prepare_sp30(struct thr_info *thr) { struct cgpu_info *spondoolies_sp30 = thr->cgpu; struct timeval now; assert(spondoolies_sp30); cgtime(&now); /* FIXME: Vladik */ #if NEED_FIX get_datestamp(spondoolies_sp30->init, &now); #endif return true; }
void *miner_thread(void *userdata) { struct thr_info *mythr = userdata; struct cgpu_info *cgpu = mythr->cgpu; struct device_drv *drv = cgpu->drv; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); char threadname[20]; snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns); RenameThread(threadname); if (drv->thread_init && !drv->thread_init(mythr)) { dev_error(cgpu, REASON_THREAD_FAIL_INIT); for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc) dev_error(slave, REASON_THREAD_FAIL_INIT); __thr_being_msg(LOG_ERR, mythr, "failure, exiting"); goto out; } thread_reportout(mythr); applog(LOG_DEBUG, "Popping ping in miner thread"); notifier_read(mythr->notifier); // Wait for a notification to start cgtime(&cgpu->cgminer_stats.start_tv); if (drv->minerloop) drv->minerloop(mythr); else minerloop_scanhash(mythr); __thr_being_msg(LOG_NOTICE, mythr, "shutting down"); out: ; struct cgpu_info *proc = cgpu; do { proc->deven = DEV_DISABLED; proc->status = LIFE_DEAD2; } while ( (proc = proc->next_proc) && !proc->threads); mythr->getwork = 0; mythr->has_pth = false; cgsleep_ms(1000); if (drv->thread_shutdown) drv->thread_shutdown(mythr); notifier_destroy(mythr->notifier); return NULL; }
unsigned long usec_stamp(void) { static unsigned long long int first_usec = 0; struct timeval tv; unsigned long long int curr_usec; cgtime(&tv); curr_usec = tv.tv_sec * 1000000 + tv.tv_usec; if (first_usec == 0) { first_usec = curr_usec; curr_usec = 0; } else curr_usec -= first_usec; return curr_usec; }
static bool hfa_prepare(struct thr_info *thr) { struct cgpu_info *hashfast = thr->cgpu; struct hashfast_info *info = hashfast->device_data; struct timeval now; mutex_init(&info->lock); if (pthread_create(&info->read_thr, NULL, hfa_read, (void *)thr)) quit(1, "Failed to pthread_create read thr in hfa_prepare"); cgtime(&now); get_datestamp(hashfast->init, sizeof(hashfast->init), &now); return true; }
/* * log function */ void _applog(int prio, const char *str, bool force) { #ifdef HAVE_SYSLOG_H if (use_syslog) { syslog(prio, "%s", str); } #else if (0) {} #endif else { char datetime[64]; struct timeval tv = {0, 0}; struct tm *tm; cgtime(&tv); const time_t tmp_time = tv.tv_sec; tm = localtime(&tmp_time); snprintf(datetime, sizeof(datetime), " [%d-%02d-%02d %02d:%02d:%02d] ", tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); /* Only output to stderr if it's not going to the screen as well */ if (!isatty(fileno((FILE *)stderr))) { fprintf(stderr, "%s%s\n", datetime, str); /* atomic write to stderr */ fflush(stderr); } if(g_logfile_enable) { if(!g_log_file) { g_log_file = fopen(g_logfile_path, g_logfile_openflag); } if(g_log_file) { fwrite(datetime, strlen(datetime), 1, g_log_file); fwrite(str, strlen(str), 1, g_log_file); fwrite("\n", 1, 1, g_log_file); fflush(g_log_file); } } my_log_curses(prio, datetime, str, force); } }
static bool hfa_prepare(struct thr_info *thr) { struct cgpu_info *hashfast = thr->cgpu; struct hashfast_info *info = hashfast->device_data; struct timeval now; mutex_init(&info->lock); if (pthread_create(&info->read_thr, NULL, hfa_read, (void *)thr)) quit(1, "Failed to pthread_create read thr in hfa_prepare"); cgtime(&now); get_datestamp(hashfast->init, sizeof(hashfast->init), &now); hashfast->last_device_valid_work = time(NULL); info->resets = 0; hfa_set_fanspeed(hashfast, info, opt_hfa_fan_default); return true; }
bool isdupnonce(struct cgpu_info *cgpu, struct work *work, uint32_t nonce) { struct dupdata *dup = (struct dupdata *)(cgpu->dup_data); struct timeval now; bool unique = true; K_ITEM *item; if (!dup) return false; cgtime(&now); dup->checked++; K_WLOCK(dup->nfree_list); item = dup->nonce_list->tail; while (unique && item) { if (DATAN(item)->work_id == work->id && DATAN(item)->nonce == nonce) { unique = false; applog(LOG_WARNING, "%s%d: Duplicate nonce %08x", cgpu->drv->name, cgpu->device_id, nonce); } else item = item->prev; } if (unique) { item = k_unlink_head(dup->nfree_list); DATAN(item)->work_id = work->id; DATAN(item)->nonce = nonce; memcpy(&(DATAN(item)->when), &now, sizeof(now)); k_add_head(dup->nonce_list, item); } item = dup->nonce_list->tail; while (item && tdiff(&(DATAN(item)->when), &now) > dup->timelimit) { item = k_unlink_tail(dup->nonce_list); k_add_head(dup->nfree_list, item); item = dup->nonce_list->tail; } K_WUNLOCK(dup->nfree_list); if (!unique) dup->dup++; return !unique; }
/* * log function */ void _applog(int prio, const char *str) { #ifdef HAVE_SYSLOG_H if (use_syslog) { syslog(prio, "%s", str); } #else if (0) {} #endif else { char datetime[64]; struct timeval tv = {0, 0}; struct tm *tm; cgtime(&tv); const time_t tmp_time = tv.tv_sec; tm = localtime(&tmp_time); sprintf(datetime, " [%d-%02d-%02d %02d:%02d:%02d] ", tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); /* Only output to stderr if it's not going to the screen as well */ if (!isatty(fileno((FILE *)stderr))) { fprintf(stderr, "%s%s\n", datetime, str); /* atomic write to stderr */ fflush(stderr); } my_log_curses(prio, datetime, str); } }
/* We have only one thread that ever re-initialises GPUs, thus if any GPU * init command fails due to a completely wedged GPU, the thread will never * return, unable to harm other GPUs. If it does return, it means we only had * a soft failure and then the reinit_gpu thread is ready to tackle another * GPU */ void *reinit_gpu(void *userdata) { struct thr_info *mythr = userdata; struct cgpu_info *cgpu; struct thr_info *thr; struct timeval now; char name[256]; int thr_id; int gpu; pthread_detach(pthread_self()); select_cgpu: cgpu = tq_pop(mythr->q, NULL); if (!cgpu) goto out; if (clDevicesNum() != nDevs) { applog(LOG_WARNING, "Hardware not reporting same number of active devices, will not attempt to restart GPU"); goto out; } gpu = cgpu->device_id; for (thr_id = 0; thr_id < mining_threads; ++thr_id) { thr = get_thread(thr_id); cgpu = thr->cgpu; if (cgpu->drv->drv_id != DRIVER_opencl) continue; if (dev_from_id(thr_id) != gpu) continue; thr = get_thread(thr_id); if (!thr) { applog(LOG_WARNING, "No reference to thread %d exists", thr_id); continue; } thr->rolling = thr->cgpu->rolling = 0; /* Reports the last time we tried to revive a sick GPU */ cgtime(&thr->sick); if (!pthread_cancel(thr->pth)) { applog(LOG_WARNING, "Thread %d still exists, killing it off", thr_id); } else applog(LOG_WARNING, "Thread %d no longer exists", thr_id); } for (thr_id = 0; thr_id < mining_threads; ++thr_id) { int virtual_gpu; thr = get_thread(thr_id); cgpu = thr->cgpu; if (cgpu->drv->drv_id != DRIVER_opencl) continue; if (dev_from_id(thr_id) != gpu) continue; virtual_gpu = cgpu->virtual_gpu; /* Lose this ram cause we may get stuck here! */ //tq_freeze(thr->q); thr->q = tq_new(); if (!thr->q) quit(1, "Failed to tq_new in reinit_gpu"); /* Lose this ram cause we may dereference in the dying thread! */ //free(clState); applog(LOG_INFO, "Reinit GPU thread %d", thr_id); clStates[thr_id] = initCl(virtual_gpu, name, sizeof(name)); if (!clStates[thr_id]) { applog(LOG_ERR, "Failed to reinit GPU thread %d", thr_id); goto select_cgpu; } applog(LOG_INFO, "initCl() finished. Found %s", name); if (unlikely(thr_info_create(thr, NULL, miner_thread, thr))) { applog(LOG_ERR, "thread %d create failed", thr_id); return NULL; } applog(LOG_WARNING, "Thread %d restarted", thr_id); } cgtime(&now); get_datestamp(cgpu->init, sizeof(cgpu->init), &now); for (thr_id = 0; thr_id < mining_threads; ++thr_id) { thr = get_thread(thr_id); cgpu = thr->cgpu; if (cgpu->drv->drv_id != DRIVER_opencl) continue; if (dev_from_id(thr_id) != gpu) continue; cgsem_post(&thr->sem); } goto select_cgpu; out: return NULL; }
static int64_t avalon2_scanhash(struct thr_info *thr) { struct avalon2_pkg send_pkg; struct timeval current_stratum; struct pool *pool; struct cgpu_info *avalon2 = thr->cgpu; struct avalon2_info *info = avalon2->device_data; int64_t h; uint32_t tmp, range, start; int i; if (thr->work_restart || thr->work_update || !info->first) { applog(LOG_DEBUG, "Avalon2: New stratum: restart: %d, update: %d, first: %d", thr->work_restart, thr->work_update, info->first); thr->work_update = false; thr->work_restart = false; get_work(thr, thr->id); /* Make sure pool is ready */ pool = current_pool(); if (!pool->has_stratum) quit(1, "Avalon2: Miner Manager have to use stratum pool"); if (pool->coinbase_len > AVA2_P_COINBASE_SIZE) { applog(LOG_ERR, "Avalon2: Miner Manager pool coinbase length have to less then %d", AVA2_P_COINBASE_SIZE); return 0; } if (pool->merkles > AVA2_P_MERKLES_COUNT) { applog(LOG_ERR, "Avalon2: Miner Manager merkles have to less then %d", AVA2_P_MERKLES_COUNT); return 0; } cgtime(&info->last_stratum); cg_rlock(&pool->data_lock); info->pool_no = pool->pool_no; copy_pool_stratum(pool); avalon2_stratum_pkgs(info->fd, pool, thr); cg_runlock(&pool->data_lock); /* Configuer the parameter from outside */ adjust_fan(info); info->set_voltage = opt_avalon2_voltage_min; info->set_frequency = opt_avalon2_freq_min; /* Set the Fan, Voltage and Frequency */ memset(send_pkg.data, 0, AVA2_P_DATA_LEN); tmp = be32toh(info->fan_pwm); memcpy(send_pkg.data, &tmp, 4); applog(LOG_ERR, "Avalon2: Temp max: %d, Cut off temp: %d", get_current_temp_max(info), opt_avalon2_overheat); if (get_current_temp_max(info) >= opt_avalon2_overheat) tmp = encode_voltage(0); else tmp = encode_voltage(info->set_voltage); tmp = be32toh(tmp); memcpy(send_pkg.data + 4, &tmp, 4); tmp = be32toh(info->set_frequency); memcpy(send_pkg.data + 8, &tmp, 4); /* Configure the nonce2 offset and range */ range = 0xffffffff / total_devices; start = range * avalon2->device_id; tmp = be32toh(start); memcpy(send_pkg.data + 12, &tmp, 4); tmp = be32toh(range); memcpy(send_pkg.data + 16, &tmp, 4); /* Package the data */ avalon2_init_pkg(&send_pkg, AVA2_P_SET, 1, 1); while (avalon2_send_pkg(info->fd, &send_pkg, thr) != AVA2_SEND_OK) ; if (unlikely(info->first < 2)) info->first++; } /* Stop polling the device if there is no stratum in 3 minutes, network is down */ cgtime(¤t_stratum); if (tdiff(¤t_stratum, &(info->last_stratum)) > (double)(3.0 * 60.0)) return 0; polling(thr); h = 0; for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { h += info->enable[i] ? (info->local_work[i] - info->hw_work[i]) : 0; } return h * 0xffffffff; }
static bool ztex_prepare(struct thr_info *thr) { struct timeval now; struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex = cgpu->device_ztex; cgtime(&now); get_datestamp(cgpu->init, &now); ztex_selectFpga(ztex); if (libztex_configureFpga(ztex) != 0) { libztex_resetFpga(ztex); ztex_releaseFpga(ztex); applog(LOG_ERR, "%s: Disabling!", thr->cgpu->device_ztex->repr); thr->cgpu->deven = DEV_DISABLED; return true; } // KRAMBLE Handle options, based on get_options in driver-icarus.c // Use as --ztex-clock freqM:freqMaxM // Multiple comma separated vaues are allowed eg 160:180,180:184 { // Bare block to isolate variables char err_buf[BUFSIZ+1]; char buf[BUFSIZ+1]; char *ptr, *comma, *colon, *colon2; size_t max; int i, tmp; int this_option_offset = ++option_offset; if (opt_ztex_clock == NULL) buf[0] = '\0'; else { ptr = opt_ztex_clock; for (i = 0; i < this_option_offset; i++) { comma = strchr(ptr, ','); if (comma == NULL) break; ptr = comma + 1; } comma = strchr(ptr, ','); if (comma == NULL) max = strlen(ptr); else max = comma - ptr; if (max > BUFSIZ) max = BUFSIZ; strncpy(buf, ptr, max); buf[max] = '\0'; } if (*buf) { colon = strchr(buf, ':'); if (colon) *(colon++) = '\0'; if (*buf) { tmp = atoi(buf); if (tmp >= 100 && tmp <= 250) ztex->freqM = ztex->freqMDefault = tmp/4 - 1; // NB 4Mhz units else { sprintf(err_buf, "Invalid ztex_clock must be between 100 and 250", buf); quit(1, err_buf); } } if (colon && *colon) { tmp = atoi(colon); if (tmp >= 100 && tmp <= 250) { if (tmp/4 - 1 >= ztex->freqM) { ztex->freqMaxM = tmp/4 - 1; // NB 4Mhz units // If both initial and max were set, and were the same, lock the clock if (ztex->freqMDefault == ztex->freqMaxM) ztex->lockClock = 1; } else { sprintf(err_buf, "Invalid ztex_clock max must be less than min", buf); quit(1, err_buf); } } else { sprintf(err_buf, "Invalid ztex_clock must be between 100 and 250", buf); quit(1, err_buf); } } } } // End bare block ztex->freqM = ztex->freqMaxM+1; // KRAMBLE is in original // ztex_updateFreq(ztex); // KRAMBLE Was already commented out in original #if 1 libztex_setFreq(ztex, ztex->freqMDefault); // KRAMBLE PRODUCTION CODE #else // KRAMBLE build customised settings for a specific board if (ztex->repr[strlen(ztex->repr)-1] == '4') libztex_setFreq(ztex, ztex->freqMDefault-1); // Run it 4MHz slower else libztex_setFreq(ztex, ztex->freqMDefault); #endif ztex_releaseFpga(ztex); applog(LOG_DEBUG, "%s: prepare", ztex->repr); return true; }
static int64_t bitforce_get_result(struct thr_info *thr, struct work *work) { struct cgpu_info *bitforce = thr->cgpu; unsigned int delay_time_ms; struct timeval elapsed; struct timeval now; char buf[BITFORCE_BUFSIZ+1]; int amount; char *pnoncebuf; uint32_t nonce; while (1) { if (unlikely(thr->work_restart)) return 0; mutex_lock(&bitforce->device_mutex); usb_write(bitforce, BITFORCE_WORKSTATUS, BITFORCE_WORKSTATUS_LEN, &amount, C_REQUESTWORKSTATUS); usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETWORKSTATUS); mutex_unlock(&bitforce->device_mutex); cgtime(&now); timersub(&now, &bitforce->work_start_tv, &elapsed); if (elapsed.tv_sec >= BITFORCE_LONG_TIMEOUT_S) { applog(LOG_ERR, "%s%i: took %ldms - longer than %dms", bitforce->drv->name, bitforce->device_id, tv_to_ms(elapsed), BITFORCE_LONG_TIMEOUT_MS); return 0; } if (amount > 0 && buf[0] && strncasecmp(buf, "B", 1)) /* BFL does not respond during throttling */ break; /* if BFL is throttling, no point checking so quickly */ delay_time_ms = (buf[0] ? BITFORCE_CHECK_INTERVAL_MS : 2 * WORK_CHECK_INTERVAL_MS); nmsleep(delay_time_ms); bitforce->wait_ms += delay_time_ms; } if (elapsed.tv_sec > BITFORCE_TIMEOUT_S) { applog(LOG_ERR, "%s%i: took %ldms - longer than %dms", bitforce->drv->name, bitforce->device_id, tv_to_ms(elapsed), BITFORCE_TIMEOUT_MS); dev_error(bitforce, REASON_DEV_OVER_HEAT); /* Only return if we got nothing after timeout - there still may be results */ if (amount == 0) return 0; } else if (!strncasecmp(buf, BITFORCE_EITHER, BITFORCE_EITHER_LEN)) { /* Simple timing adjustment. Allow a few polls to cope with * OS timer delays being variably reliable. wait_ms will * always equal sleep_ms when we've waited greater than or * equal to the result return time.*/ delay_time_ms = bitforce->sleep_ms; if (bitforce->wait_ms > bitforce->sleep_ms + (WORK_CHECK_INTERVAL_MS * 2)) bitforce->sleep_ms += (bitforce->wait_ms - bitforce->sleep_ms) / 2; else if (bitforce->wait_ms == bitforce->sleep_ms) { if (bitforce->sleep_ms > WORK_CHECK_INTERVAL_MS) bitforce->sleep_ms -= WORK_CHECK_INTERVAL_MS; else if (bitforce->sleep_ms > BITFORCE_CHECK_INTERVAL_MS) bitforce->sleep_ms -= BITFORCE_CHECK_INTERVAL_MS; } if (delay_time_ms != bitforce->sleep_ms) applog(LOG_DEBUG, "%s%i: Wait time changed to: %d, waited %u", bitforce->drv->name, bitforce->device_id, bitforce->sleep_ms, bitforce->wait_ms); /* Work out the average time taken. Float for calculation, uint for display */ bitforce->avg_wait_f += (tv_to_ms(elapsed) - bitforce->avg_wait_f) / TIME_AVG_CONSTANT; bitforce->avg_wait_d = (unsigned int) (bitforce->avg_wait_f + 0.5); } applog(LOG_DEBUG, "%s%i: waited %dms until %s", bitforce->drv->name, bitforce->device_id, bitforce->wait_ms, buf); if (!strncasecmp(buf, BITFORCE_NO_NONCE, BITFORCE_NO_NONCE_MATCH)) return bitforce->nonces; /* No valid nonce found */ else if (!strncasecmp(buf, BITFORCE_IDLE, BITFORCE_IDLE_MATCH)) return 0; /* Device idle */ else if (strncasecmp(buf, BITFORCE_NONCE, BITFORCE_NONCE_LEN)) { bitforce->hw_errors++; applog(LOG_WARNING, "%s%i: Error: Get result reports: %s", bitforce->drv->name, bitforce->device_id, buf); bitforce_initialise(bitforce, true); return 0; } pnoncebuf = &buf[12]; while (1) { hex2bin((void*)&nonce, pnoncebuf, 4); #ifndef __BIG_ENDIAN__ nonce = swab32(nonce); #endif if (unlikely(bitforce->nonce_range && (nonce >= work->blk.nonce || (work->blk.nonce > 0 && nonce < work->blk.nonce - bitforce->nonces - 1)))) { applog(LOG_WARNING, "%s%i: Disabling broken nonce range support", bitforce->drv->name, bitforce->device_id); bitforce->nonce_range = false; work->blk.nonce = 0xffffffff; bitforce->sleep_ms *= 5; bitforce->kname = KNAME_WORK; } submit_nonce(thr, work, nonce); if (strncmp(&pnoncebuf[8], ",", 1)) break; pnoncebuf += 9; } return bitforce->nonces; }
static bool bitforce_send_work(struct thr_info *thr, struct work *work) { struct cgpu_info *bitforce = thr->cgpu; unsigned char ob[70]; char buf[BITFORCE_BUFSIZ+1]; int err, amount; char *s; char *cmd; int len; re_send: if (bitforce->nonce_range) { cmd = BITFORCE_SENDRANGE; len = BITFORCE_SENDRANGE_LEN; } else { cmd = BITFORCE_SENDWORK; len = BITFORCE_SENDWORK_LEN; } mutex_lock(&bitforce->device_mutex); if ((err = usb_write(bitforce, cmd, len, &amount, C_REQUESTSENDWORK)) < 0 || amount != len) { mutex_unlock(&bitforce->device_mutex); applog(LOG_ERR, "%s%i: request send work failed (%d:%d)", bitforce->drv->name, bitforce->device_id, amount, err); return false; } if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_REQUESTSENDWORKSTATUS)) < 0) { mutex_unlock(&bitforce->device_mutex); applog(LOG_ERR, "%s%d: read request send work status failed (%d:%d)", bitforce->drv->name, bitforce->device_id, amount, err); return false; } if (amount == 0 || !buf[0] || !strncasecmp(buf, "B", 1)) { mutex_unlock(&bitforce->device_mutex); nmsleep(WORK_CHECK_INTERVAL_MS); goto re_send; } else if (unlikely(strncasecmp(buf, "OK", 2))) { mutex_unlock(&bitforce->device_mutex); if (bitforce->nonce_range) { applog(LOG_WARNING, "%s%i: Does not support nonce range, disabling", bitforce->drv->name, bitforce->device_id); bitforce->nonce_range = false; bitforce->sleep_ms *= 5; bitforce->kname = KNAME_WORK; goto re_send; } applog(LOG_ERR, "%s%i: Error: Send work reports: %s", bitforce->drv->name, bitforce->device_id, buf); return false; } sprintf((char *)ob, ">>>>>>>>"); memcpy(ob + 8, work->midstate, 32); memcpy(ob + 8 + 32, work->data + 64, 12); if (!bitforce->nonce_range) { sprintf((char *)ob + 8 + 32 + 12, ">>>>>>>>"); work->blk.nonce = bitforce->nonces = 0xffffffff; len = 60; } else { uint32_t *nonce; nonce = (uint32_t *)(ob + 8 + 32 + 12); *nonce = htobe32(work->blk.nonce); nonce = (uint32_t *)(ob + 8 + 32 + 12 + 4); /* Split work up into 1/5th nonce ranges */ bitforce->nonces = 0x33333332; *nonce = htobe32(work->blk.nonce + bitforce->nonces); work->blk.nonce += bitforce->nonces + 1; sprintf((char *)ob + 8 + 32 + 12 + 8, ">>>>>>>>"); len = 68; } if ((err = usb_write(bitforce, (char *)ob, len, &amount, C_SENDWORK)) < 0 || amount != len) { mutex_unlock(&bitforce->device_mutex); applog(LOG_ERR, "%s%i: send work failed (%d:%d)", bitforce->drv->name, bitforce->device_id, amount, err); return false; } if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_SENDWORKSTATUS)) < 0) { mutex_unlock(&bitforce->device_mutex); applog(LOG_ERR, "%s%d: read send work status failed (%d:%d)", bitforce->drv->name, bitforce->device_id, amount, err); return false; } mutex_unlock(&bitforce->device_mutex); if (opt_debug) { s = bin2hex(ob + 8, 44); applog(LOG_DEBUG, "%s%i: block data: %s", bitforce->drv->name, bitforce->device_id, s); free(s); } if (amount == 0 || !buf[0]) { applog(LOG_ERR, "%s%i: Error: Send block data returned empty string/timed out", bitforce->drv->name, bitforce->device_id); return false; } if (unlikely(strncasecmp(buf, "OK", 2))) { applog(LOG_ERR, "%s%i: Error: Send block data reports: %s", bitforce->drv->name, bitforce->device_id, buf); return false; } cgtime(&bitforce->work_start_tv); return true; }
static void hashratio_update_work(struct cgpu_info *hashratio) { struct hashratio_info *info = hashratio->device_data; struct thr_info *thr = hashratio->thr[0]; struct hashratio_pkg send_pkg; uint32_t tmp, range, start; struct work *work; struct pool *pool; applog(LOG_DEBUG, "hashratio: New stratum: restart: %d, update: %d", thr->work_restart, thr->work_update); thr->work_update = false; thr->work_restart = false; work = get_work(thr, thr->id); /* Make sure pool is ready */ discard_work(work); /* Don't leak memory */ pool = current_pool(); if (!pool->has_stratum) quit(1, "hashratio: Miner Manager have to use stratum pool"); if (pool->coinbase_len > HRTO_P_COINBASE_SIZE) quit(1, "hashratio: Miner Manager pool coinbase length have to less then %d", HRTO_P_COINBASE_SIZE); if (pool->merkles > HRTO_P_MERKLES_COUNT) quit(1, "hashratio: Miner Manager merkles have to less then %d", HRTO_P_MERKLES_COUNT); info->pool_no = pool->pool_no; cgtime(&info->last_stratum); cg_rlock(&pool->data_lock); info->pool_no = pool->pool_no; copy_pool_stratum(info, pool); hashratio_stratum_pkgs(hashratio, pool); cg_runlock(&pool->data_lock); /* Configure the parameter from outside */ memset(send_pkg.data, 0, HRTO_P_DATA_LEN); // fan. We're not measuring temperature so set a safe but not max value info->fan_pwm = HRTO_PWM_MAX * 2 / 3; tmp = be32toh(info->fan_pwm); memcpy(send_pkg.data, &tmp, 4); // freq tmp = be32toh(info->default_freq); memcpy(send_pkg.data + 4, &tmp, 4); applog(LOG_DEBUG, "set freq: %d", info->default_freq); /* Configure the nonce2 offset and range */ range = 0xffffffff / (total_devices + 1); start = range * (hashratio->device_id + 1); tmp = be32toh(start); memcpy(send_pkg.data + 8, &tmp, 4); tmp = be32toh(range); memcpy(send_pkg.data + 12, &tmp, 4); /* Package the data */ hashratio_init_pkg(&send_pkg, HRTO_P_SET, 1, 1); hashratio_send_pkgs(hashratio, &send_pkg); }
static bool bitforce_detect_one(struct libusb_device *dev, struct usb_find_devices *found) { char buf[BITFORCE_BUFSIZ+1]; int err, amount; char *s; struct timeval init_start, init_now; int init_sleep, init_count; bool ident_first; struct cgpu_info *bitforce = usb_alloc_cgpu(&bitforce_drv, 1); if (!usb_init(bitforce, dev, found)) goto shin; // Allow 2 complete attempts if the 1st time returns an unrecognised reply ident_first = true; retry: init_count = 0; init_sleep = REINIT_TIME_FIRST_MS; cgtime(&init_start); reinit: bitforce_initialise(bitforce, false); if ((err = usb_write(bitforce, BITFORCE_IDENTIFY, BITFORCE_IDENTIFY_LEN, &amount, C_REQUESTIDENTIFY)) < 0 || amount != BITFORCE_IDENTIFY_LEN) { applog(LOG_ERR, "%s detect (%s) send identify request failed (%d:%d)", bitforce->drv->dname, bitforce->device_path, amount, err); goto unshin; } if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETIDENTIFY)) < 0 || amount < 1) { init_count++; cgtime(&init_now); if (us_tdiff(&init_now, &init_start) <= REINIT_TIME_MAX) { if (init_count == 2) { applog(LOG_WARNING, "%s detect (%s) 2nd init failed (%d:%d) - retrying", bitforce->drv->dname, bitforce->device_path, amount, err); } nmsleep(init_sleep); if ((init_sleep * 2) <= REINIT_TIME_MAX_MS) init_sleep *= 2; goto reinit; } if (init_count > 0) applog(LOG_WARNING, "%s detect (%s) init failed %d times %.2fs", bitforce->drv->dname, bitforce->device_path, init_count, tdiff(&init_now, &init_start)); if (err < 0) { applog(LOG_ERR, "%s detect (%s) error identify reply (%d:%d)", bitforce->drv->dname, bitforce->device_path, amount, err); } else { applog(LOG_ERR, "%s detect (%s) empty identify reply (%d)", bitforce->drv->dname, bitforce->device_path, amount); } goto unshin; } buf[amount] = '\0'; if (unlikely(!strstr(buf, "SHA256"))) { if (ident_first) { applog(LOG_WARNING, "%s detect (%s) didn't recognise '%s' trying again ...", bitforce->drv->dname, bitforce->device_path, buf); ident_first = false; goto retry; } applog(LOG_ERR, "%s detect (%s) didn't recognise '%s' on 2nd attempt", bitforce->drv->dname, bitforce->device_path, buf); goto unshin; } if (strstr(buf, "SHA256 SC")) { #ifdef USE_BFLSC applog(LOG_DEBUG, "SC device detected, will defer to BFLSC driver"); #else applog(LOG_WARNING, "SC device detected but no BFLSC support compiled in!"); #endif goto unshin; } if (likely((!memcmp(buf, ">>>ID: ", 7)) && (s = strstr(buf + 3, ">>>")))) { s[0] = '\0'; bitforce->name = strdup(buf + 7); } else { bitforce->name = (char *)blank; } // We have a real BitForce! applog(LOG_DEBUG, "%s (%s) identified as: '%s'", bitforce->drv->dname, bitforce->device_path, bitforce->name); /* Initially enable support for nonce range and disable it later if it * fails */ if (opt_bfl_noncerange) { bitforce->nonce_range = true; bitforce->sleep_ms = BITFORCE_SLEEP_MS; bitforce->kname = KNAME_RANGE; } else { bitforce->sleep_ms = BITFORCE_SLEEP_MS * 5; bitforce->kname = KNAME_WORK; } if (!add_cgpu(bitforce)) goto unshin; update_usb_stats(bitforce); mutex_init(&bitforce->device_mutex); return true; unshin: usb_uninit(bitforce); shin: if (bitforce->name != blank) { free(bitforce->name); bitforce->name = NULL; } bitforce = usb_free_cgpu(bitforce); return false; }
static int64_t serial_fpga_scanwork(struct thr_info *thr) { struct cgpu_info *serial_fpga; int fd; int ret; struct FPGA_INFO *info; unsigned char ob_bin[44], nonce_buf[SERIAL_READ_SIZE]; char *ob_hex; uint32_t nonce; int64_t hash_count; struct timeval tv_start, tv_finish, elapsed, tv_end, diff; int curr_hw_errors, i, j; uint32_t * ob; ob = (uint32_t *)ob_bin; int count; double Hs, W, fullnonce; int read_count; int64_t estimate_hashes; uint32_t values; int64_t hash_count_range; struct work *work; applog(LOG_DEBUG, "serial_fpga_scanwork..."); if (thr->cgpu->deven == DEV_DISABLED) return -1; serial_fpga = thr->cgpu; info = serial_fpga->device_data; work = get_work(thr, thr->id); if (info->device_fd == -1) { applog(LOG_INFO, "Attemping to Reopen Serial FPGA on %s", serial_fpga->device_path); fd = serial_open(serial_fpga->device_path, SERIAL_IO_SPEED, SERIAL_READ_TIMEOUT, false); if (unlikely(-1 == fd)) { applog(LOG_ERR, "Failed to open Serial FPGA on %s", serial_fpga->device_path); return -1; } else info->device_fd = fd; } fd = info->device_fd; memset(ob_bin, 0, sizeof(ob_bin)); // Currently, extra nonces are not supported // memset((unsigned char*)work->data + 144, 0, 12); // // calc_midstate(work); memcpy(ob_bin, work->midstate, 32); // Midstate memcpy(ob_bin + 32, work->data + 128, 12); // Remaining Bytes From Block Header // Send Bytes To FPGA In Reverse Order unsigned char swap[44]; uint32_t * sw; sw = (uint32_t *)swap; for (j=0; j<8; j++) { sw[j] = swab32(ob[j]); } memcpy(swap + 32, ob_bin + 32, 12); for (j=0; j<44; j++) { ob_bin[j] = swap[j]; } //unsigned char* b = (unsigned char*)(ob_bin); //applog(LOG_WARNING, "swap: %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", b[28],b[29],b[30],b[31],b[32],b[33],b[34],b[35],b[36],b[37],b[38],b[39],b[40],b[41],b[42],b[43]); //applog(LOG_WARNING, "swap: %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7],b[8],b[9],b[10],b[11],b[12],b[13],b[14],b[15],b[16],b[17],b[18],b[19],b[20],b[21],b[22],b[23],b[24],b[25],b[26],b[27],b[28],b[29],b[30],b[31],b[32],b[33],b[34],b[35],b[36],b[37],b[38],b[39],b[40],b[41],b[42],b[43]); //#ifndef WIN32 // tcflush(fd, TCOFLUSH); //#endif // Send Data To FPGA ret = write(fd, ob_bin, sizeof(ob_bin)); if (ret != sizeof(ob_bin)) { applog(LOG_ERR, "%s%i: Serial Send Error (ret=%d)", serial_fpga->drv->name, serial_fpga->device_id, ret); serial_fpga_close(thr); dev_error(serial_fpga, REASON_DEV_COMMS_ERROR); return 0; } if (opt_debug) { ob_hex = bin2hex(ob_bin, sizeof(ob_bin)); applog(LOG_DEBUG, "Serial FPGA %d sent: %s", serial_fpga->device_id, ob_hex); free(ob_hex); } elapsed.tv_sec = 0; elapsed.tv_usec = 0; cgtime(&tv_start); applog(LOG_DEBUG, "%s%i: Begin Scan For Nonces", serial_fpga->drv->name, serial_fpga->device_id); while (thr && !thr->work_restart) { memset(nonce_buf,0,4); // Check Serial Port For 1/10 Sec For Nonce ret = read(fd, nonce_buf, SERIAL_READ_SIZE); // Calculate Elapsed Time cgtime(&tv_end); timersub(&tv_end, &tv_start, &elapsed); if (ret == 0) { // No Nonce Found if (elapsed.tv_sec > info->timeout) { applog(LOG_DEBUG, "%s%i: End Scan For Nonces - Time = %d sec", serial_fpga->drv->name, serial_fpga->device_id, elapsed.tv_sec); break; } continue; } else if (ret < SERIAL_READ_SIZE) { applog(LOG_ERR, "%s%i: Serial Read Error (ret=%d)", serial_fpga->drv->name, serial_fpga->device_id, ret); serial_fpga_close(thr); dev_error(serial_fpga, REASON_DEV_COMMS_ERROR); break; } memcpy((char *)&nonce, nonce_buf, SERIAL_READ_SIZE); #if !defined (__BIG_ENDIAN__) && !defined(MIPSEB) nonce = swab32(nonce); #endif curr_hw_errors = serial_fpga->hw_errors; applog(LOG_INFO, "%s%i: Nonce Found - %08X (%5.1fMhz)", serial_fpga->drv->name, serial_fpga->device_id, nonce, (double)(1/(info->Hs * 1000000))); submit_nonce(thr, work, nonce); // Update Hashrate if (serial_fpga->hw_errors == curr_hw_errors) info->Hs = ((double)(elapsed.tv_sec) + ((double)(elapsed.tv_usec))/((double)1000000)) / (double)nonce; } // Estimate Number Of Hashes hash_count = ((double)(elapsed.tv_sec) + ((double)(elapsed.tv_usec))/((double)1000000)) / info->Hs; free_work(work); return hash_count; }
static int64_t bitfury_scanHash(struct thr_info *thr) { static struct bitfury_device *devices; // TODO Move somewhere to appropriate place int chip_n; int chip; uint64_t hashes = 0; unsigned char line[2048]; char stat_lines[32][256] = {0}; static first = 0; //TODO Move to detect() int i; static int shift_number = 1; static struct timeval spi_started; struct timeval now; struct cgpu_info *cgpu = thr->cgpu; devices = thr->cgpu->devices; chip_n = thr->cgpu->chip_n; if (!first) { for (i = 0; i < chip_n; i++) { devices[i].osc6_bits = 50; } set_chip_opts(devices, chip_n); for (i = 0; i < chip_n; i++) { send_reinit(devices[i].slot, devices[i].fasync, devices[i].osc6_bits); } cgtime(&spi_started); } first = 1; cgtime(&now); int wait=1000000*(now.tv_sec-spi_started.tv_sec)+now.tv_usec-spi_started.tv_usec; if(wait<800000){ //cgsleep_ms((800000-wait)/1000); if(restart_wait(thr, (800000-wait)/1000) != ETIMEDOUT) { //purge work for (;chip < chip_n; chip++) { if(devices[chip].bfwork.work != NULL) { work_completed(thr->cgpu, devices[chip].bfwork.work); } devices[chip].bfwork.work = NULL; devices[chip].bfwork.results_n = 0; devices[chip].bfwork.results_sent = 0; } } } for (chip = 0; chip < chip_n; chip++) { devices[chip].job_switched = 0; if(!devices[chip].bfwork.work) { devices[chip].bfwork.work = get_queued(thr->cgpu); if (devices[chip].bfwork.work == NULL) { return 0; } work_to_payload(&(devices[chip].bfwork.payload), devices[chip].bfwork.work); } } cgtime(&spi_started); libbitfury_sendHashData(devices, chip_n); chip = 0; int high = 0; double aveg = 0.0; int total = 0; int futures =0; for (;chip < chip_n; chip++) { if (devices[chip].job_switched) { int i=0; struct work *work = devices[chip].bfwork.work; struct work *owork = devices[chip].obfwork.work; struct work *o2work = devices[chip].o2bfwork.work; if (owork) i+=submit_work(&devices[chip].obfwork, thr); if (o2work) i+=submit_work(&devices[chip].o2bfwork, thr); if (work) i+=submit_work(&devices[chip].bfwork, thr); high = high > i?high:i; total+=i; devices[chip].job_switched = 0; if (o2work) work_completed(thr->cgpu, o2work); //printf("%d %d %d\n",devices[chip].o2bfwork.results_n,devices[chip].obfwork.results_n,devices[chip].bfwork.results_n); memcpy (&(devices[chip].o2bfwork),&(devices[chip].obfwork),sizeof(struct bitfury_work)); memcpy (&(devices[chip].obfwork),&(devices[chip].bfwork),sizeof(struct bitfury_work)); devices[chip].bfwork.work = NULL; devices[chip].bfwork.results_n = 0; devices[chip].bfwork.results_sent = 0; hashes += 0xffffffffull * i; } /* if(shift_number % 100 == 0) { int len = strlen(stat_lines[devices[chip].slot]); snprintf(stat_lines[devices[chip].slot]+len,256-len,"%d: %d/%d ",chip,devices[chip].nonces_found/devices[chip].nonce_errors); } */ } aveg = (double) total / chip_n; //applog(LOG_WARNING, "high: %d aver: %4.2f total %d futures %d", high, aveg,total,futures); if(shift_number % 100 == 0) { /* applog(LOG_WARNING,stat_lines[0]); applog(LOG_WARNING,stat_lines[1]); applog(LOG_WARNING,stat_lines[2]); applog(LOG_WARNING,stat_lines[3]); */ } shift_number++; return hashes; }
static bool opencl_thread_prepare(struct thr_info *thr) { char name[256]; struct timeval now; struct cgpu_info *cgpu = thr->cgpu; int gpu = cgpu->device_id; int virtual_gpu = cgpu->virtual_gpu; int i = thr->id; static bool failmessage = false; int buffersize = BUFFERSIZE; if (!blank_res) blank_res = calloc(buffersize, 1); if (!blank_res) { applog(LOG_ERR, "Failed to calloc in opencl_thread_init"); return false; } strcpy(name, ""); applog(LOG_INFO, "Init GPU thread %i GPU %i virtual GPU %i", i, gpu, virtual_gpu); clStates[i] = initCl(virtual_gpu, name, sizeof(name)); if (!clStates[i]) { #ifdef HAVE_CURSES if (use_curses) enable_curses(); #endif applog(LOG_ERR, "Failed to init GPU thread %d, disabling device %d", i, gpu); if (!failmessage) { applog(LOG_ERR, "Restarting the GPU from the menu will not fix this."); applog(LOG_ERR, "Try restarting sgminer."); failmessage = true; #ifdef HAVE_CURSES char *buf; if (use_curses) { buf = curses_input("Press enter to continue"); if (buf) free(buf); } #endif } cgpu->deven = DEV_DISABLED; cgpu->status = LIFE_NOSTART; dev_error(cgpu, REASON_DEV_NOSTART); return false; } if (!cgpu->name) cgpu->name = strdup(name); if (!cgpu->kname) { switch (clStates[i]->chosen_kernel) { case KL_ALEXKARNEW: cgpu->kname = ALEXKARNEW_KERNNAME; break; case KL_ALEXKAROLD: cgpu->kname = ALEXKAROLD_KERNNAME; break; case KL_CKOLIVAS: cgpu->kname = CKOLIVAS_KERNNAME; break; case KL_ZUIKKIS: cgpu->kname = ZUIKKIS_KERNNAME; break; case KL_PSW: cgpu->kname = PSW_KERNNAME; break; case KL_DARKCOIN: cgpu->kname = DARKCOIN_KERNNAME; break; case KL_QUBITCOIN: cgpu->kname = QUBITCOIN_KERNNAME; break; case KL_QUARKCOIN: cgpu->kname = QUARKCOIN_KERNNAME; break; default: break; } } applog(LOG_INFO, "initCl() finished. Found %s", name); cgtime(&now); get_datestamp(cgpu->init, sizeof(cgpu->init), &now); return true; }
static int64_t bitfury_scanHash(struct thr_info *thr) { static struct bitfury_device *devices; // TODO Move somewhere to appropriate place int chip_n; int chip; uint64_t hashes = 0; struct timeval now; unsigned char line[2048]; int short_stat = 10; static time_t short_out_t; int long_stat = 1800; static time_t long_out_t; int long_long_stat = 60 * 30; static time_t long_long_out_t; static first = 0; //TODO Move to detect() int i; devices = thr->cgpu->devices; chip_n = thr->cgpu->chip_n; if (!first) { for (i = 0; i < chip_n; i++) { devices[i].osc6_bits = 54; } for (i = 0; i < chip_n; i++) { send_reinit(devices[i].slot, devices[i].fasync, devices[i].osc6_bits); } } first = 1; for (chip = 0; chip < chip_n; chip++) { devices[chip].job_switched = 0; if(!devices[chip].work) { devices[chip].work = get_queued(thr->cgpu); if (devices[chip].work == NULL) { return 0; } work_to_payload(&(devices[chip].payload), devices[chip].work); } } libbitfury_sendHashData(devices, chip_n); nmsleep(5); cgtime(&now); chip = 0; for (;chip < chip_n; chip++) { if (devices[chip].job_switched) { int i,j; int *res = devices[chip].results; struct work *work = devices[chip].work; struct work *owork = devices[chip].owork; struct work *o2work = devices[chip].o2work; i = devices[chip].results_n; for (j = i - 1; j >= 0; j--) { if (owork) { submit_nonce(thr, owork, bswap_32(res[j])); devices[chip].stat_ts[devices[chip].stat_counter++] = now.tv_sec; if (devices[chip].stat_counter == BITFURY_STAT_N) { devices[chip].stat_counter = 0; } } if (o2work) { // TEST //submit_nonce(thr, owork, bswap_32(res[j])); } } devices[chip].results_n = 0; devices[chip].job_switched = 0; if (devices[chip].old_nonce && o2work) { submit_nonce(thr, o2work, bswap_32(devices[chip].old_nonce)); i++; } if (devices[chip].future_nonce) { submit_nonce(thr, work, bswap_32(devices[chip].future_nonce)); i++; } if (o2work) work_completed(thr->cgpu, o2work); devices[chip].o2work = devices[chip].owork; devices[chip].owork = devices[chip].work; devices[chip].work = NULL; hashes += 0xffffffffull * i; } } if (now.tv_sec - short_out_t > short_stat) { int shares_first = 0, shares_last = 0, shares_total = 0; char stat_lines[32][256] = {0}; int len, k; double gh[32][8] = {0}; double ghsum = 0, gh1h = 0, gh2h = 0; unsigned strange_counter = 0; for (chip = 0; chip < chip_n; chip++) { int shares_found = calc_stat(devices[chip].stat_ts, short_stat, now); double ghash; len = strlen(stat_lines[devices[chip].slot]); ghash = shares_to_ghashes(shares_found, short_stat); gh[devices[chip].slot][chip & 0x07] = ghash; snprintf(stat_lines[devices[chip].slot] + len, 256 - len, "%.1f-%3.0f ", ghash, devices[chip].mhz); if(short_out_t && ghash < 0.5) { applog(LOG_WARNING, "Chip_id %d FREQ CHANGE\n", chip); send_freq(devices[chip].slot, devices[chip].fasync, devices[chip].osc6_bits - 1); nmsleep(1); send_freq(devices[chip].slot, devices[chip].fasync, devices[chip].osc6_bits); } shares_total += shares_found; shares_first += chip < 4 ? shares_found : 0; shares_last += chip > 3 ? shares_found : 0; strange_counter += devices[chip].strange_counter; devices[chip].strange_counter = 0; } sprintf(line, "vvvvwww SHORT stat %ds: wwwvvvv", short_stat); applog(LOG_WARNING, line); sprintf(line, "stranges: %u", strange_counter); applog(LOG_WARNING, line); for(i = 0; i < 32; i++) if(strlen(stat_lines[i])) { len = strlen(stat_lines[i]); ghsum = 0; gh1h = 0; gh2h = 0; for(k = 0; k < 4; k++) { gh1h += gh[i][k]; gh2h += gh[i][k+4]; ghsum += gh[i][k] + gh[i][k+4]; } snprintf(stat_lines[i] + len, 256 - len, "- %2.1f + %2.1f = %2.1f slot %i ", gh1h, gh2h, ghsum, i); applog(LOG_WARNING, stat_lines[i]); } short_out_t = now.tv_sec; } if (now.tv_sec - long_out_t > long_stat) { int shares_first = 0, shares_last = 0, shares_total = 0; char stat_lines[32][256] = {0}; int len, k; double gh[32][8] = {0}; double ghsum = 0, gh1h = 0, gh2h = 0; for (chip = 0; chip < chip_n; chip++) { int shares_found = calc_stat(devices[chip].stat_ts, long_stat, now); double ghash; len = strlen(stat_lines[devices[chip].slot]); ghash = shares_to_ghashes(shares_found, long_stat); gh[devices[chip].slot][chip & 0x07] = ghash; snprintf(stat_lines[devices[chip].slot] + len, 256 - len, "%.1f-%3.0f ", ghash, devices[chip].mhz); shares_total += shares_found; shares_first += chip < 4 ? shares_found : 0; shares_last += chip > 3 ? shares_found : 0; } sprintf(line, "!!!_________ LONG stat %ds: ___________!!!", long_stat); applog(LOG_WARNING, line); for(i = 0; i < 32; i++) if(strlen(stat_lines[i])) { len = strlen(stat_lines[i]); ghsum = 0; gh1h = 0; gh2h = 0; for(k = 0; k < 4; k++) { gh1h += gh[i][k]; gh2h += gh[i][k+4]; ghsum += gh[i][k] + gh[i][k+4]; } snprintf(stat_lines[i] + len, 256 - len, "- %2.1f + %2.1f = %2.1f slot %i ", gh1h, gh2h, ghsum, i); applog(LOG_WARNING, stat_lines[i]); } long_out_t = now.tv_sec; } return hashes; }
static int64_t opencl_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) { const int thr_id = thr->id; struct opencl_thread_data *thrdata = thr->cgpu_data; struct cgpu_info *gpu = thr->cgpu; _clState *clState = clStates[thr_id]; const cl_kernel *kernel = &clState->kernel; const int dynamic_us = opt_dynamic_interval * 1000; cl_int status; size_t globalThreads[1]; size_t localThreads[1] = { clState->wsize }; int64_t hashes; int found = FOUND; int buffersize = BUFFERSIZE; /* Windows' timer resolution is only 15ms so oversample 5x */ if (gpu->dynamic && (++gpu->intervals * dynamic_us) > 70000) { struct timeval tv_gpuend; double gpu_us; cgtime(&tv_gpuend); gpu_us = us_tdiff(&tv_gpuend, &gpu->tv_gpustart) / gpu->intervals; if (gpu_us > dynamic_us) { if (gpu->intensity > MIN_INTENSITY) --gpu->intensity; } else if (gpu_us < dynamic_us / 2) { if (gpu->intensity < MAX_INTENSITY) ++gpu->intensity; } memcpy(&(gpu->tv_gpustart), &tv_gpuend, sizeof(struct timeval)); gpu->intervals = 0; } set_threads_hashes(clState->vwidth, clState->compute_shaders, &hashes, globalThreads, localThreads[0], &gpu->intensity, &gpu->xintensity, &gpu->rawintensity); if (hashes > gpu->max_hashes) gpu->max_hashes = hashes; status = thrdata->queue_kernel_parameters(clState, &work->blk, globalThreads[0]); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); return -1; } if (clState->goffset) { size_t global_work_offset[1]; global_work_offset[0] = work->blk.nonce; status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, global_work_offset, globalThreads, localThreads, 0, NULL, NULL); } else status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL, globalThreads, localThreads, 0, NULL, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error %d: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)", status); return -1; } status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0, buffersize, thrdata->res, 0, NULL, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed error %d. (clEnqueueReadBuffer)", status); return -1; } /* The amount of work scanned can fluctuate when intensity changes * and since we do this one cycle behind, we increment the work more * than enough to prevent repeating work */ work->blk.nonce += gpu->max_hashes; /* This finish flushes the readbuffer set with CL_FALSE in clEnqueueReadBuffer */ clFinish(clState->commandQueue); /* FOUND entry is used as a counter to say how many nonces exist */ if (thrdata->res[found]) { /* Clear the buffer again */ status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0, buffersize, blank_res, 0, NULL, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); return -1; } applog(LOG_DEBUG, "GPU %d found something?", gpu->device_id); postcalc_hash_async(thr, work, thrdata->res); memset(thrdata->res, 0, buffersize); /* This finish flushes the writebuffer set with CL_FALSE in clEnqueueWriteBuffer */ clFinish(clState->commandQueue); } return hashes; }
static bool opencl_thread_prepare(struct thr_info *thr) { char name[256]; struct timeval now; struct cgpu_info *cgpu = thr->cgpu; int gpu = cgpu->device_id; int virtual_gpu = cgpu->virtual_gpu; int i = thr->id; static bool failmessage = false; int buffersize = opt_scrypt ? SCRYPT_BUFFERSIZE : BUFFERSIZE; if (!blank_res) blank_res = calloc(buffersize, 1); if (!blank_res) { applog(LOG_ERR, "Failed to calloc in opencl_thread_init"); return false; } strcpy(name, ""); applog(LOG_INFO, "Init GPU thread %i GPU %i virtual GPU %i", i, gpu, virtual_gpu); clStates[i] = initCl(virtual_gpu, name, sizeof(name)); if (!clStates[i]) { #ifdef HAVE_CURSES if (use_curses) enable_curses(); #endif applog(LOG_ERR, "Failed to init GPU thread %d, disabling device %d", i, gpu); if (!failmessage) { applog(LOG_ERR, "Restarting the GPU from the menu will not fix this."); applog(LOG_ERR, "Try restarting cgminer."); failmessage = true; #ifdef HAVE_CURSES char *buf; if (use_curses) { buf = curses_input("Press enter to continue"); if (buf) free(buf); } #endif } cgpu->deven = DEV_DISABLED; cgpu->status = LIFE_NOSTART; dev_error(cgpu, REASON_DEV_NOSTART); return false; } if (!cgpu->name) cgpu->name = strdup(name); if (!cgpu->kname) { switch (clStates[i]->chosen_kernel) { case KL_DIABLO: cgpu->kname = "diablo"; break; case KL_DIAKGCN: cgpu->kname = "diakgcn"; break; case KL_PHATK: cgpu->kname = "phatk"; break; #ifdef USE_SCRYPT case KL_SCRYPT: cgpu->kname = "scrypt"; break; #endif case KL_POCLBM: cgpu->kname = "poclbm"; break; default: break; } } applog(LOG_INFO, "initCl() finished. Found %s", name); cgtime(&now); get_datestamp(cgpu->init, sizeof(cgpu->init), &now); have_opencl = true; return true; }