static int avalon2_stratum_pkgs(int fd, struct pool *pool, struct thr_info *thr) { /* FIXME: what if new stratum arrive when writing */ struct avalon2_pkg pkg; int i, a, b, tmp; unsigned char target[32]; int job_id_len; /* Send out the first stratum message STATIC */ applog(LOG_DEBUG, "Avalon2: Pool stratum message STATIC: %ld, %d, %d, %d, %d", pool->swork.cb_len, pool->nonce2_offset, pool->n2size, pool->merkle_offset, pool->swork.merkles); memset(pkg.data, 0, AVA2_P_DATA_LEN); tmp = be32toh(pool->swork.cb_len); memcpy(pkg.data, &tmp, 4); tmp = be32toh(pool->nonce2_offset); memcpy(pkg.data + 4, &tmp, 4); tmp = be32toh(pool->n2size); memcpy(pkg.data + 8, &tmp, 4); tmp = be32toh(pool->merkle_offset); memcpy(pkg.data + 12, &tmp, 4); tmp = be32toh(pool->swork.merkles); memcpy(pkg.data + 16, &tmp, 4); tmp = be32toh((int)pool->swork.diff); memcpy(pkg.data + 20, &tmp, 4); tmp = be32toh((int)pool->pool_no); memcpy(pkg.data + 24, &tmp, 4); avalon2_init_pkg(&pkg, AVA2_P_STATIC, 1, 1); while (avalon2_send_pkg(fd, &pkg, thr) != AVA2_SEND_OK) ; set_target(target, pool->swork.diff); memcpy(pkg.data, target, 32); if (opt_debug) { char *target_str; target_str = bin2hex(target, 32); applog(LOG_DEBUG, "Avalon2: Pool stratum target: %s", target_str); free(target_str); } avalon2_init_pkg(&pkg, AVA2_P_TARGET, 1, 1); while (avalon2_send_pkg(fd, &pkg, thr) != AVA2_SEND_OK) ; applog(LOG_DEBUG, "Avalon2: Pool stratum message JOBS_ID: %s", pool->swork.job_id); memset(pkg.data, 0, AVA2_P_DATA_LEN); job_id_len = strlen(pool->swork.job_id); job_id_len = job_id_len >= 4 ? 4 : job_id_len; for (i = 0; i < job_id_len; i++) { pkg.data[i] = *(pool->swork.job_id + strlen(pool->swork.job_id) - 4 + i); } avalon2_init_pkg(&pkg, AVA2_P_JOB_ID, 1, 1); while (avalon2_send_pkg(fd, &pkg, thr) != AVA2_SEND_OK) ; a = pool->swork.cb_len / AVA2_P_DATA_LEN; b = pool->swork.cb_len % AVA2_P_DATA_LEN; applog(LOG_DEBUG, "Avalon2: Pool stratum message COINBASE: %d %d", a, b); for (i = 0; i < a; i++) { memcpy(pkg.data, pool->coinbase + i * 32, 32); avalon2_init_pkg(&pkg, AVA2_P_COINBASE, i + 1, a + (b ? 1 : 0)); while (avalon2_send_pkg(fd, &pkg, thr) != AVA2_SEND_OK) ; } if (b) { memset(pkg.data, 0, AVA2_P_DATA_LEN); memcpy(pkg.data, pool->coinbase + i * 32, b); avalon2_init_pkg(&pkg, AVA2_P_COINBASE, i + 1, i + 1); while (avalon2_send_pkg(fd, &pkg, thr) != AVA2_SEND_OK) ; } b = pool->swork.merkles; applog(LOG_DEBUG, "Avalon2: Pool stratum message MERKLES: %d", b); for (i = 0; i < b; i++) { memset(pkg.data, 0, AVA2_P_DATA_LEN); memcpy(pkg.data, pool->swork.merkle_bin[i], 32); avalon2_init_pkg(&pkg, AVA2_P_MERKLES, i + 1, b); while (avalon2_send_pkg(fd, &pkg, thr) != AVA2_SEND_OK) ; } applog(LOG_DEBUG, "Avalon2: Pool stratum message HEADER: 4"); for (i = 0; i < 4; i++) { memset(pkg.data, 0, AVA2_P_HEADER); memcpy(pkg.data, pool->header_bin + i * 32, 32); avalon2_init_pkg(&pkg, AVA2_P_HEADER, i + 1, 4); while (avalon2_send_pkg(fd, &pkg, thr) != AVA2_SEND_OK) ; } return 0; }
static void hashratio_update_work(struct cgpu_info *hashratio) { struct hashratio_info *info = hashratio->device_data; struct thr_info *thr = hashratio->thr[0]; struct hashratio_pkg send_pkg; uint32_t tmp, range, start; struct work *work; struct pool *pool; applog(LOG_DEBUG, "hashratio: New stratum: restart: %d, update: %d", thr->work_restart, thr->work_update); thr->work_update = false; thr->work_restart = false; work = get_work(thr, thr->id); /* Make sure pool is ready */ discard_work(work); /* Don't leak memory */ pool = current_pool(); if (!pool->has_stratum) quit(1, "hashratio: Miner Manager have to use stratum pool"); if (pool->coinbase_len > HRTO_P_COINBASE_SIZE) quit(1, "hashratio: Miner Manager pool coinbase length have to less then %d", HRTO_P_COINBASE_SIZE); if (pool->merkles > HRTO_P_MERKLES_COUNT) quit(1, "hashratio: Miner Manager merkles have to less then %d", HRTO_P_MERKLES_COUNT); info->pool_no = pool->pool_no; cgtime(&info->last_stratum); cg_rlock(&pool->data_lock); info->pool_no = pool->pool_no; copy_pool_stratum(info, pool); hashratio_stratum_pkgs(hashratio, pool); cg_runlock(&pool->data_lock); /* Configure the parameter from outside */ memset(send_pkg.data, 0, HRTO_P_DATA_LEN); // fan. We're not measuring temperature so set a safe but not max value info->fan_pwm = HRTO_PWM_MAX * 2 / 3; tmp = be32toh(info->fan_pwm); memcpy(send_pkg.data, &tmp, 4); // freq tmp = be32toh(info->default_freq); memcpy(send_pkg.data + 4, &tmp, 4); applog(LOG_DEBUG, "set freq: %d", info->default_freq); /* Configure the nonce2 offset and range */ range = 0xffffffff / (total_devices + 1); start = range * (hashratio->device_id + 1); tmp = be32toh(start); memcpy(send_pkg.data + 8, &tmp, 4); tmp = be32toh(range); memcpy(send_pkg.data + 12, &tmp, 4); /* Package the data */ hashratio_init_pkg(&send_pkg, HRTO_P_SET, 1, 1); hashratio_send_pkgs(hashratio, &send_pkg); }
bool cairnsmore_supports_dynclock(int fd) { if (!cairnsmore_send_cmd(fd, 0, 1, true)) return false; if (!cairnsmore_send_cmd(fd, 0, 1, true)) return false; uint32_t nonce = 0; { struct timeval tv_finish; struct thr_info dummy = { .work_restart = false, .work_restart_fd = -1, }; icarus_gets((unsigned char*)&nonce, fd, &tv_finish, &dummy, 1); } applog(LOG_DEBUG, "Cairnsmore dynclock detection... Got %08x", nonce); switch (nonce) { case 0x00949a6f: // big endian case 0x6f9a9400: // little endian // Hashed the command, so it's not supported return false; default: applog(LOG_WARNING, "Unexpected nonce from dynclock probe: %08x", be32toh(nonce)); return false; case 0: return true; } } #define cairnsmore_send_cmd(fd, cmd, data) cairnsmore_send_cmd(fd, cmd, data, false) static bool cairnsmore_change_clock_func(struct thr_info *thr, int bestM) { struct cgpu_info *cm1 = thr->cgpu; struct ICARUS_INFO *info = cm1->cgpu_data; if (unlikely(!cairnsmore_send_cmd(cm1->device_fd, 0, bestM))) return false; // Adjust Hs expectations for frequency change info->Hs = info->Hs * (double)bestM / (double)info->dclk.freqM; char repr[0x10]; sprintf(repr, "%s %u", cm1->api->name, cm1->device_id); dclk_msg_freqchange(repr, 2.5 * (double)info->dclk.freqM, 2.5 * (double)bestM, NULL); info->dclk.freqM = bestM; return true; } static bool cairnsmore_init(struct thr_info *thr) { struct cgpu_info *cm1 = thr->cgpu; struct ICARUS_INFO *info = cm1->cgpu_data; struct icarus_state *state = thr->cgpu_data; if (cairnsmore_supports_dynclock(cm1->device_fd)) { info->dclk_change_clock_func = cairnsmore_change_clock_func; dclk_prepare(&info->dclk); info->dclk.freqMinM = CAIRNSMORE1_MINIMUM_CLOCK / 2.5; info->dclk.freqMaxM = CAIRNSMORE1_MAXIMUM_CLOCK / 2.5; info->dclk.freqM = info->dclk.freqMDefault = CAIRNSMORE1_DEFAULT_CLOCK / 2.5; cairnsmore_send_cmd(cm1->device_fd, 0, info->dclk.freqM); applog(LOG_WARNING, "%s %u: Frequency set to %u MHz (range: %u-%u)", cm1->api->name, cm1->device_id, CAIRNSMORE1_DEFAULT_CLOCK, CAIRNSMORE1_MINIMUM_CLOCK, CAIRNSMORE1_MAXIMUM_CLOCK ); // The dynamic-clocking firmware connects each FPGA as its own device if (!(info->user_set & 1)) { info->work_division = 1; if (!(info->user_set & 2)) info->fpga_count = 1; } } else { applog(LOG_WARNING, "%s %u: Frequency scaling not supported", cm1->api->name, cm1->device_id ); } // Commands corrupt the hash state, so next scanhash is a firstrun state->firstrun = true; return true; } void convert_icarus_to_cairnsmore(struct cgpu_info *cm1) { struct ICARUS_INFO *info = cm1->cgpu_data; info->Hs = CAIRNSMORE1_HASH_TIME; info->fullnonce = info->Hs * (((double)0xffffffff) + 1); info->timing_mode = MODE_LONG; info->do_icarus_timing = true; cm1->api = &cairnsmore_api; renumber_cgpu(cm1); cairnsmore_init(cm1->thr[0]); } static struct api_data *cairnsmore_api_extra_device_status(struct cgpu_info *cm1) { struct ICARUS_INFO *info = cm1->cgpu_data; struct api_data*root = NULL; if (info->dclk.freqM) { double frequency = 2.5 * info->dclk.freqM; root = api_add_freq(root, "Frequency", &frequency, true); } return root; } static bool cairnsmore_identify(struct cgpu_info *cm1) { struct ICARUS_INFO *info = cm1->cgpu_data; if (!info->dclk.freqM) return false; cairnsmore_send_cmd(cm1->device_fd, 1, 1); sleep(5); cairnsmore_send_cmd(cm1->device_fd, 1, 0); cm1->flash_led = true; return true; } extern struct device_api icarus_api; static void cairnsmore_api_init() { cairnsmore_api = icarus_api; cairnsmore_api.dname = "cairnsmore"; cairnsmore_api.name = "ECM"; cairnsmore_api.api_detect = cairnsmore_detect; cairnsmore_api.thread_init = cairnsmore_init; cairnsmore_api.identify_device = cairnsmore_identify; cairnsmore_api.get_api_extra_device_status = cairnsmore_api_extra_device_status; }
static int decode_pkg(struct thr_info *thr, struct hashratio_ret *ar, uint8_t *pkg) { struct cgpu_info *hashratio = thr->cgpu; struct hashratio_info *info = hashratio->device_data; struct pool *pool, *real_pool, *pool_stratum = &info->pool; unsigned int expected_crc; unsigned int actual_crc; uint32_t nonce, nonce2, miner; int pool_no; uint8_t job_id[4]; int tmp; int type = HRTO_GETS_ERROR; memcpy((uint8_t *)ar, pkg, HRTO_READ_SIZE); // applog(LOG_DEBUG, "pkg.type, hex: %02x, dec: %d", ar->type, ar->type); if (ar->head[0] == HRTO_H1 && ar->head[1] == HRTO_H2) { expected_crc = crc16(ar->data, HRTO_P_DATA_LEN); actual_crc = (ar->crc[0] & 0xff) | ((ar->crc[1] & 0xff) << 8); type = ar->type; applog(LOG_DEBUG, "hashratio: %d: expected crc(%04x), actual_crc(%04x)", type, expected_crc, actual_crc); if (expected_crc != actual_crc) goto out; switch(type) { case HRTO_P_NONCE: applog(LOG_DEBUG, "Hashratio: HRTO_P_NONCE"); memcpy(&miner, ar->data + 0, 4); memcpy(&pool_no, ar->data + 4, 4); memcpy(&nonce2, ar->data + 8, 4); /* Calc time ar->data + 12 */ memcpy(&nonce, ar->data + 12, 4); memcpy(job_id, ar->data + 16, 4); miner = be32toh(miner); pool_no = be32toh(pool_no); if (miner >= HRTO_DEFAULT_MINERS || pool_no >= total_pools || pool_no < 0) { applog(LOG_DEBUG, "hashratio: Wrong miner/pool/id no %d,%d", miner, pool_no); break; } else info->matching_work[miner]++; nonce2 = be32toh(nonce2); nonce = be32toh(nonce); applog(LOG_DEBUG, "hashratio: Found! [%s] %d:(%08x) (%08x)", job_id, pool_no, nonce2, nonce); real_pool = pool = pools[pool_no]; if (job_idcmp(job_id, pool->swork.job_id)) { if (!job_idcmp(job_id, pool_stratum->swork.job_id)) { applog(LOG_DEBUG, "Hashratio: Match to previous stratum! (%s)", pool_stratum->swork.job_id); pool = pool_stratum; } else { applog(LOG_DEBUG, "Hashratio Cannot match to any stratum! (%s)", pool->swork.job_id); break; } } submit_nonce2_nonce(thr, pool, real_pool, nonce2, nonce, 0); break; case HRTO_P_STATUS: applog(LOG_DEBUG, "Hashratio: HRTO_P_STATUS"); memcpy(&tmp, ar->data, 4); tmp = be32toh(tmp); info->temp = (tmp & 0x00f0) >> 8; if (info->temp_max < info->temp) { info->temp_max = info->temp; } // info->temp[1] = tmp & 0xffff; memcpy(&tmp, ar->data + 4, 4); tmp = be32toh(tmp); info->fan[0] = tmp >> 16; info->fan[1] = tmp & 0xffff; // local_work memcpy(&tmp, ar->data + 8, 4); tmp = be32toh(tmp); info->local_work = tmp; info->local_works += tmp; // hw_work memcpy(&tmp, ar->data + 12, 4); tmp = be32toh(tmp); info->hw_works += tmp; hashratio->temp = info->temp; break; case HRTO_P_ACKDETECT: applog(LOG_DEBUG, "Hashratio: HRTO_P_ACKDETECT"); break; case HRTO_P_ACK: applog(LOG_DEBUG, "Hashratio: HRTO_P_ACK"); break; case HRTO_P_NAK: applog(LOG_DEBUG, "Hashratio: HRTO_P_NAK"); break; default: applog(LOG_DEBUG, "Hashratio: HRTO_GETS_ERROR"); type = HRTO_GETS_ERROR; break; } }
static void hashratio_initialise(struct cgpu_info *hashratio) { int err, interface; if (hashratio->usbinfo.nodev) return; interface = usb_interface(hashratio); // Reset err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, FTDI_VALUE_RESET, interface, C_RESET); applog(LOG_DEBUG, "%s%i: reset got err %d", hashratio->drv->name, hashratio->device_id, err); if (hashratio->usbinfo.nodev) return; // Set latency err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_LATENCY, HASHRATIO_LATENCY, interface, C_LATENCY); applog(LOG_DEBUG, "%s%i: latency got err %d", hashratio->drv->name, hashratio->device_id, err); if (hashratio->usbinfo.nodev) return; // Set data err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_DATA, FTDI_VALUE_DATA_AVA, interface, C_SETDATA); applog(LOG_DEBUG, "%s%i: data got err %d", hashratio->drv->name, hashratio->device_id, err); if (hashratio->usbinfo.nodev) return; // Set the baud err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_BAUD, FTDI_VALUE_BAUD_AVA, (FTDI_INDEX_BAUD_AVA & 0xff00) | interface, C_SETBAUD); applog(LOG_DEBUG, "%s%i: setbaud got err %d", hashratio->drv->name, hashratio->device_id, err); if (hashratio->usbinfo.nodev) return; // Set Modem Control err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, FTDI_VALUE_MODEM, interface, C_SETMODEM); applog(LOG_DEBUG, "%s%i: setmodemctrl got err %d", hashratio->drv->name, hashratio->device_id, err); if (hashratio->usbinfo.nodev) return; // Set Flow Control err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, FTDI_VALUE_FLOW, interface, C_SETFLOW); applog(LOG_DEBUG, "%s%i: setflowctrl got err %d", hashratio->drv->name, hashratio->device_id, err); if (hashratio->usbinfo.nodev) return; /* hashratio repeats the following */ // Set Modem Control err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, FTDI_VALUE_MODEM, interface, C_SETMODEM); applog(LOG_DEBUG, "%s%i: setmodemctrl 2 got err %d", hashratio->drv->name, hashratio->device_id, err); if (hashratio->usbinfo.nodev) return; // Set Flow Control err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, FTDI_VALUE_FLOW, interface, C_SETFLOW); applog(LOG_DEBUG, "%s%i: setflowctrl 2 got err %d", hashratio->drv->name, hashratio->device_id, err); }
// Algo benchmark, crash-safe, system-dependent stage static double bench_algo_stage2( enum sha256_algos algo ) { // Here, the gig is to safely run a piece of code that potentially // crashes. Unfortunately, the Right Way (tm) to do this is rather // heavily platform dependent :( double rate = -1.23457; #if defined(unix) // Make a pipe: [readFD, writeFD] int pfd[2]; int r = pipe(pfd); if (r<0) { perror("pipe - failed to create pipe for --algo auto"); exit(1); } // Make pipe non blocking set_non_blocking(pfd[0], 1); set_non_blocking(pfd[1], 1); // Don't allow a crashing child to kill the main process sighandler_t sr0 = signal(SIGPIPE, SIG_IGN); sighandler_t sr1 = signal(SIGPIPE, SIG_IGN); if (SIG_ERR==sr0 || SIG_ERR==sr1) { perror("signal - failed to edit signal mask for --algo auto"); exit(1); } // Fork a child to do the actual benchmarking pid_t child_pid = fork(); if (child_pid<0) { perror("fork - failed to create a child process for --algo auto"); exit(1); } // Do the dangerous work in the child, knowing we might crash if (0==child_pid) { // TODO: some umask trickery to prevent coredumps // Benchmark this algorithm double r = bench_algo_stage3(algo); // We survived, send result to parent and bail int loop_count = 0; while (1) { ssize_t bytes_written = write(pfd[1], &r, sizeof(r)); int try_again = (0==bytes_written || (bytes_written<0 && EAGAIN==errno)); int success = (sizeof(r)==(size_t)bytes_written); if (success) break; if (!try_again) { perror("write - child failed to write benchmark result to pipe"); exit(1); } if (5<loop_count) { applog(LOG_ERR, "child tried %d times to communicate with parent, giving up", loop_count); exit(1); } ++loop_count; sleep(1); } exit(0); } // Parent waits for a result from child int loop_count = 0; while (1) { // Wait for child to die int status; int r = waitpid(child_pid, &status, WNOHANG); if ((child_pid==r) || (r<0 && ECHILD==errno)) { // Child died somehow. Grab result and bail double tmp; ssize_t bytes_read = read(pfd[0], &tmp, sizeof(tmp)); if (sizeof(tmp)==(size_t)bytes_read) rate = tmp; break; } else if (r<0) { perror("bench_algo: waitpid failed. giving up."); exit(1); } // Give up on child after a ~60s if (60<loop_count) { kill(child_pid, SIGKILL); waitpid(child_pid, &status, 0); break; } // Wait a bit longer ++loop_count; sleep(1); } // Close pipe r = close(pfd[0]); if (r<0) { perror("close - failed to close read end of pipe for --algo auto"); exit(1); } r = close(pfd[1]); if (r<0) { perror("close - failed to close read end of pipe for --algo auto"); exit(1); } #elif defined(WIN32) // Get handle to current exe HINSTANCE module = GetModuleHandle(0); if (!module) { applog(LOG_ERR, "failed to retrieve module handle"); exit(1); } // Create a unique name char unique_name[32]; snprintf( unique_name, sizeof(unique_name)-1, "cgminer-%p", (void*)module ); // Create and init a chunked of shared memory HANDLE map_handle = CreateFileMapping( INVALID_HANDLE_VALUE, // use paging file NULL, // default security attributes PAGE_READWRITE, // read/write access 0, // size: high 32-bits 4096, // size: low 32-bits unique_name // name of map object ); if (NULL==map_handle) { applog(LOG_ERR, "could not create shared memory"); exit(1); } void *shared_mem = MapViewOfFile( map_handle, // object to map view of FILE_MAP_WRITE, // read/write access 0, // high offset: map from 0, // low offset: beginning 0 // default: map entire file ); if (NULL==shared_mem) { applog(LOG_ERR, "could not map shared memory"); exit(1); } SetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name); CopyMemory(shared_mem, &rate, sizeof(rate)); // Get path to current exe char cmd_line[256 + MAX_PATH]; const size_t n = sizeof(cmd_line)-200; DWORD size = GetModuleFileName(module, cmd_line, n); if (0==size) { applog(LOG_ERR, "failed to retrieve module path"); exit(1); } // Construct new command line based on that char *p = strlen(cmd_line) + cmd_line; sprintf(p, " --bench-algo %d", algo); SetEnvironmentVariable("CGMINER_BENCH_ALGO", "1"); // Launch a debug copy of cgminer STARTUPINFO startup_info; PROCESS_INFORMATION process_info; ZeroMemory(&startup_info, sizeof(startup_info)); ZeroMemory(&process_info, sizeof(process_info)); startup_info.cb = sizeof(startup_info); BOOL ok = CreateProcess( NULL, // No module name (use command line) cmd_line, // Command line NULL, // Process handle not inheritable NULL, // Thread handle not inheritable FALSE, // Set handle inheritance to FALSE DEBUG_ONLY_THIS_PROCESS,// We're going to debug the child NULL, // Use parent's environment block NULL, // Use parent's starting directory &startup_info, // Pointer to STARTUPINFO structure &process_info // Pointer to PROCESS_INFORMATION structure ); if (!ok) { applog(LOG_ERR, "CreateProcess failed with error %d\n", GetLastError() ); exit(1); } // Debug the child (only clean way to catch exceptions) while (1) { // Wait for child to do something DEBUG_EVENT debug_event; ZeroMemory(&debug_event, sizeof(debug_event)); BOOL ok = WaitForDebugEvent(&debug_event, 60 * 1000); if (!ok) break; // Decide if event is "normal" int go_on = CREATE_PROCESS_DEBUG_EVENT== debug_event.dwDebugEventCode || CREATE_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode || EXIT_THREAD_DEBUG_EVENT == debug_event.dwDebugEventCode || EXCEPTION_DEBUG_EVENT == debug_event.dwDebugEventCode || LOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode || OUTPUT_DEBUG_STRING_EVENT == debug_event.dwDebugEventCode || UNLOAD_DLL_DEBUG_EVENT == debug_event.dwDebugEventCode; if (!go_on) break; // Some exceptions are also "normal", apparently. if (EXCEPTION_DEBUG_EVENT== debug_event.dwDebugEventCode) { int go_on = EXCEPTION_BREAKPOINT== debug_event.u.Exception.ExceptionRecord.ExceptionCode; if (!go_on) break; } // If nothing unexpected happened, let child proceed ContinueDebugEvent( debug_event.dwProcessId, debug_event.dwThreadId, DBG_CONTINUE ); } // Clean up child process TerminateProcess(process_info.hProcess, 1); CloseHandle(process_info.hProcess); CloseHandle(process_info.hThread); // Reap return value and cleanup CopyMemory(&rate, shared_mem, sizeof(rate)); (void)UnmapViewOfFile(shared_mem); (void)CloseHandle(map_handle); #else // Not linux, not unix, not WIN32 ... do our best rate = bench_algo_stage3(algo); #endif // defined(unix) // Done return rate; }
static void hfa_update_stats1(struct cgpu_info *hashfast, struct hashfast_info *info, struct hf_header *h) { struct hf_long_usb_stats1 *s1 = &info->stats1; struct hf_usb_stats1 *sd = (struct hf_usb_stats1 *)(h + 1); s1->usb_rx_preambles += sd->usb_rx_preambles; s1->usb_rx_receive_byte_errors += sd->usb_rx_receive_byte_errors; s1->usb_rx_bad_hcrc += sd->usb_rx_bad_hcrc; s1->usb_tx_attempts += sd->usb_tx_attempts; s1->usb_tx_packets += sd->usb_tx_packets; s1->usb_tx_timeouts += sd->usb_tx_timeouts; s1->usb_tx_incompletes += sd->usb_tx_incompletes; s1->usb_tx_endpointstalled += sd->usb_tx_endpointstalled; s1->usb_tx_disconnected += sd->usb_tx_disconnected; s1->usb_tx_suspended += sd->usb_tx_suspended; #if 0 /* We don't care about UART stats so they're not in our struct */ s1->uart_tx_queue_dma += sd->uart_tx_queue_dma; s1->uart_tx_interrupts += sd->uart_tx_interrupts; s1->uart_rx_preamble_ints += sd->uart_rx_preamble_ints; s1->uart_rx_missed_preamble_ints += sd->uart_rx_missed_preamble_ints; s1->uart_rx_header_done += sd->uart_rx_header_done; s1->uart_rx_data_done += sd->uart_rx_data_done; s1->uart_rx_bad_hcrc += sd->uart_rx_bad_hcrc; s1->uart_rx_bad_dma += sd->uart_rx_bad_dma; s1->uart_rx_short_dma += sd->uart_rx_short_dma; s1->uart_rx_buffers_full += sd->uart_rx_buffers_full; #endif if (sd->max_tx_buffers > s1->max_tx_buffers) s1->max_tx_buffers = sd->max_tx_buffers; if (sd->max_rx_buffers > s1->max_rx_buffers) s1->max_rx_buffers = sd->max_rx_buffers; applog(LOG_DEBUG, "HFA %d: OP_USB_STATS1:", hashfast->device_id); applog(LOG_DEBUG, " usb_rx_preambles: %6d", sd->usb_rx_preambles); applog(LOG_DEBUG, " usb_rx_receive_byte_errors: %6d", sd->usb_rx_receive_byte_errors); applog(LOG_DEBUG, " usb_rx_bad_hcrc: %6d", sd->usb_rx_bad_hcrc); applog(LOG_DEBUG, " usb_tx_attempts: %6d", sd->usb_tx_attempts); applog(LOG_DEBUG, " usb_tx_packets: %6d", sd->usb_tx_packets); applog(LOG_DEBUG, " usb_tx_timeouts: %6d", sd->usb_tx_timeouts); applog(LOG_DEBUG, " usb_tx_incompletes: %6d", sd->usb_tx_incompletes); applog(LOG_DEBUG, " usb_tx_endpointstalled: %6d", sd->usb_tx_endpointstalled); applog(LOG_DEBUG, " usb_tx_disconnected: %6d", sd->usb_tx_disconnected); applog(LOG_DEBUG, " usb_tx_suspended: %6d", sd->usb_tx_suspended); #if 0 applog(LOG_DEBUG, " uart_tx_queue_dma: %6d", sd->uart_tx_queue_dma); applog(LOG_DEBUG, " uart_tx_interrupts: %6d", sd->uart_tx_interrupts); applog(LOG_DEBUG, " uart_rx_preamble_ints: %6d", sd->uart_rx_preamble_ints); applog(LOG_DEBUG, " uart_rx_missed_preamble_ints: %6d", sd->uart_rx_missed_preamble_ints); applog(LOG_DEBUG, " uart_rx_header_done: %6d", sd->uart_rx_header_done); applog(LOG_DEBUG, " uart_rx_data_done: %6d", sd->uart_rx_data_done); applog(LOG_DEBUG, " uart_rx_bad_hcrc: %6d", sd->uart_rx_bad_hcrc); applog(LOG_DEBUG, " uart_rx_bad_dma: %6d", sd->uart_rx_bad_dma); applog(LOG_DEBUG, " uart_rx_short_dma: %6d", sd->uart_rx_short_dma); applog(LOG_DEBUG, " uart_rx_buffers_full: %6d", sd->uart_rx_buffers_full); #endif applog(LOG_DEBUG, " max_tx_buffers: %6d", sd->max_tx_buffers); applog(LOG_DEBUG, " max_rx_buffers: %6d", sd->max_rx_buffers); }
nfsstat4 nfs_op_link(struct nfs_cxn *cxn, const LINK4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status; struct nfs_inode *dir_ino = NULL, *src_ino = NULL; struct nfs_buf newname; uint64_t before = 0, after = 0; DB_TXN *txn; DB_ENV *dbenv = srv.fsdb.env; int rc; newname.len = args->newname.utf8string_len; newname.val = args->newname.utf8string_val; if (debugging) applog(LOG_INFO, "op LINK (%.*s)", newname.len, newname.val); /* verify input parameters */ if (!valid_fh(cxn->current_fh) || !valid_fh(cxn->save_fh)) { status = NFS4ERR_NOFILEHANDLE; goto out; } if (newname.len > SRV_MAX_NAME) { status = NFS4ERR_NAMETOOLONG; goto out; } /* open transaction */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } /* read source inode's directory inode */ dir_ino = inode_fhdec(txn, cxn->current_fh, 0); if (!dir_ino) { status = NFS4ERR_NOFILEHANDLE; goto out_abort; } /* make sure target is a directory */ if (dir_ino->type != NF4DIR) { status = NFS4ERR_NOTDIR; goto out_abort; } /* read source inode */ src_ino = inode_fhdec(txn, cxn->save_fh, 0); if (!src_ino) { status = NFS4ERR_NOFILEHANDLE; goto out_abort; } /* make sure source is a not a directory */ if (src_ino->type == NF4DIR) { status = NFS4ERR_ISDIR; goto out_abort; } before = dir_ino->version; /* add directory entry */ status = dir_add(txn, dir_ino, &newname, src_ino); if (status != NFS4_OK) goto out_abort; after = dir_ino->version; /* update source inode */ src_ino->n_link++; if (inode_touch(txn, src_ino)) { status = NFS4ERR_IO; goto out_abort; } /* close transaction */ rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } out: WR32(status); if (status == NFS4_OK) { WR32(1); /* cinfo.atomic */ WR64(before); /* cinfo.before */ WR64(after); /* cinfo.after */ } inode_free(src_ino); inode_free(dir_ino); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
nfsstat4 nfs_op_remove(struct nfs_cxn *cxn, const REMOVE4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status = NFS4_OK; struct nfs_inode *dir_ino = NULL, *target_ino = NULL; struct nfs_buf target; change_info4 cinfo = { true, 0, 0 }; DB_TXN *txn = NULL; DB_ENV *dbenv = srv.fsdb.env; int rc; nfsino_t de_inum; target.len = args->target.utf8string_len; target.val = args->target.utf8string_val; if (debugging) applog(LOG_INFO, "op REMOVE ('%.*s')", target.len, target.val); if (target.len > SRV_MAX_NAME) { status = NFS4ERR_NAMETOOLONG; goto out; } if (!valid_utf8string(&target)) { status = NFS4ERR_INVAL; goto out; } if (has_dots(&target)) { status = NFS4ERR_BADNAME; goto out; } rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } /* reference container directory */ status = dir_curfh(txn, cxn, &dir_ino, DB_RMW); if (status != NFS4_OK) goto out_abort; /* lookup target name in directory */ status = dir_lookup(txn, dir_ino, &target, 0, &de_inum); if (status != NFS4_OK) goto out_abort; /* reference target inode */ target_ino = inode_getdec(txn, de_inum, DB_RMW); if (!target_ino) { status = NFS4ERR_NOENT; goto out_abort; } /* prevent root dir deletion */ if (target_ino->inum == INO_ROOT) { status = NFS4ERR_INVAL; goto out_abort; } /* prevent removal of non-empty dirs */ if ((target_ino->type == NF4DIR) && !dir_is_empty(txn, target_ino)) { status = NFS4ERR_NOTEMPTY; goto out_abort; } /* remove target inode from directory */ rc = fsdb_dirent_del(&srv.fsdb, txn, dir_ino->inum, &target, 0); if (rc) { status = NFS4ERR_IO; goto out_abort; } /* record directory change info */ cinfo.before = dir_ino->version; rc = inode_touch(txn, dir_ino); if (rc) { status = NFS4ERR_IO; goto out_abort; } cinfo.after = dir_ino->version; /* remove link, possibly deleting inode */ rc = inode_unlink(txn, target_ino); if (rc) { status = NFS4ERR_IO; goto out_abort; } rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } out: WR32(status); if (status == NFS4_OK) { WR32(cinfo.atomic ? 1 : 0); /* cinfo.atomic */ WR64(cinfo.before); /* cinfo.before */ WR64(cinfo.after); /* cinfo.after */ } inode_free(dir_ino); inode_free(target_ino); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
int clDevicesNum(void) { cl_int status; char pbuff[256]; cl_uint numDevices; cl_uint numPlatforms; int most_devices = -1; cl_platform_id *platforms; cl_platform_id platform = NULL; unsigned int i, mdplatform = 0; status = clGetPlatformIDs(0, NULL, &numPlatforms); /* If this fails, assume no GPUs. */ if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clGetPlatformsIDs failed (no OpenCL SDK installed?)", status); return -1; } if (numPlatforms == 0) { applog(LOG_ERR, "clGetPlatformsIDs returned no platforms (no OpenCL SDK installed?)"); return -1; } platforms = (cl_platform_id *)alloca(numPlatforms*sizeof(cl_platform_id)); status = clGetPlatformIDs(numPlatforms, platforms, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platform Ids. (clGetPlatformsIDs)", status); return -1; } for (i = 0; i < numPlatforms; i++) { if (opt_platform_id >= 0 && (int)i != opt_platform_id) continue; status = clGetPlatformInfo( platforms[i], CL_PLATFORM_VENDOR, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platform Info. (clGetPlatformInfo)", status); return -1; } platform = platforms[i]; applog(LOG_INFO, "CL Platform %d vendor: %s", i, pbuff); status = clGetPlatformInfo(platform, CL_PLATFORM_NAME, sizeof(pbuff), pbuff, NULL); if (status == CL_SUCCESS) applog(LOG_INFO, "CL Platform %d name: %s", i, pbuff); status = clGetPlatformInfo(platform, CL_PLATFORM_VERSION, sizeof(pbuff), pbuff, NULL); if (status == CL_SUCCESS) applog(LOG_INFO, "CL Platform %d version: %s", i, pbuff); status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &numDevices); if (status != CL_SUCCESS) { applog(LOG_INFO, "Error %d: Getting Device IDs (num)", status); continue; } applog(LOG_INFO, "Platform %d devices: %d", i, numDevices); if ((int)numDevices > most_devices) { most_devices = numDevices; mdplatform = i; } if (numDevices) { unsigned int j; cl_device_id *devices = (cl_device_id *)malloc(numDevices*sizeof(cl_device_id)); clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, numDevices, devices, NULL); for (j = 0; j < numDevices; j++) { clGetDeviceInfo(devices[j], CL_DEVICE_NAME, sizeof(pbuff), pbuff, NULL); applog(LOG_INFO, "\t%i\t%s", j, pbuff); } free(devices); } } if (opt_platform_id < 0) opt_platform_id = mdplatform;; return most_devices; }
nfsstat4 nfs_op_lookup(struct nfs_cxn *cxn, const LOOKUP4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status = NFS4_OK; struct nfs_inode *ino = NULL; bool printed = false; struct nfs_buf objname; nfsino_t inum; DB_TXN *txn = NULL; DB_ENV *dbenv = srv.fsdb.env; int rc; objname.len = args->objname.utf8string_len; objname.val = args->objname.utf8string_val; if (!objname.len) { status = NFS4ERR_INVAL; goto out; } if (!objname.val) { status = NFS4ERR_BADXDR; goto out; } if (objname.len > SRV_MAX_NAME) { status = NFS4ERR_NAMETOOLONG; goto out; } rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } status = dir_curfh(txn, cxn, &ino, 0); if (status != NFS4_OK) { if ((status == NFS4ERR_NOTDIR) && (ino->type == NF4LNK)) status = NFS4ERR_SYMLINK; goto out_abort; } status = dir_lookup(txn, ino, &objname, 0, &inum); if (status != NFS4_OK) goto out_abort; rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } fh_set(&cxn->current_fh, inum); if (debugging) { applog(LOG_INFO, "op LOOKUP ('%.*s') -> %016llX", objname.len, objname.val, (unsigned long long) cxn->current_fh.inum); printed = true; } out: if (!printed) { if (debugging) applog(LOG_INFO, "op LOOKUP ('%.*s')", objname.len, objname.val); } WR32(status); inode_free(ino); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
_clState *initCl(unsigned int gpu, char *name, size_t nameSize) { _clState *clState = calloc(1, sizeof(_clState)); bool patchbfi = false, prog_built = false; struct cgpu_info *cgpu = &gpus[gpu]; cl_platform_id platform = NULL; char pbuff[256], vbuff[255]; cl_platform_id* platforms; cl_uint preferred_vwidth; cl_device_id *devices; cl_uint numPlatforms; cl_uint numDevices; cl_int status; status = clGetPlatformIDs(0, NULL, &numPlatforms); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platforms. (clGetPlatformsIDs)", status); return NULL; } platforms = (cl_platform_id *)alloca(numPlatforms*sizeof(cl_platform_id)); status = clGetPlatformIDs(numPlatforms, platforms, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platform Ids. (clGetPlatformsIDs)", status); return NULL; } if (opt_platform_id >= (int)numPlatforms) { applog(LOG_ERR, "Specified platform that does not exist"); return NULL; } status = clGetPlatformInfo(platforms[opt_platform_id], CL_PLATFORM_VENDOR, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platform Info. (clGetPlatformInfo)", status); return NULL; } platform = platforms[opt_platform_id]; if (platform == NULL) { perror("NULL platform found!\n"); return NULL; } applog(LOG_INFO, "CL Platform vendor: %s", pbuff); status = clGetPlatformInfo(platform, CL_PLATFORM_NAME, sizeof(pbuff), pbuff, NULL); if (status == CL_SUCCESS) applog(LOG_INFO, "CL Platform name: %s", pbuff); status = clGetPlatformInfo(platform, CL_PLATFORM_VERSION, sizeof(vbuff), vbuff, NULL); if (status == CL_SUCCESS) applog(LOG_INFO, "CL Platform version: %s", vbuff); status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &numDevices); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device IDs (num)", status); return NULL; } if (numDevices > 0 ) { devices = (cl_device_id *)malloc(numDevices*sizeof(cl_device_id)); /* Now, get the device list data */ status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, numDevices, devices, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device IDs (list)", status); return NULL; } applog(LOG_INFO, "List of devices:"); unsigned int i; for (i = 0; i < numDevices; i++) { status = clGetDeviceInfo(devices[i], CL_DEVICE_NAME, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device Info", status); return NULL; } applog(LOG_INFO, "\t%i\t%s", i, pbuff); } if (gpu < numDevices) { status = clGetDeviceInfo(devices[gpu], CL_DEVICE_NAME, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device Info", status); return NULL; } applog(LOG_INFO, "Selected %i: %s", gpu, pbuff); strncpy(name, pbuff, nameSize); } else { applog(LOG_ERR, "Invalid GPU %i", gpu); return NULL; } } else return NULL; cl_context_properties cps[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties)platform, 0 }; clState->context = clCreateContextFromType(cps, CL_DEVICE_TYPE_GPU, NULL, NULL, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Context. (clCreateContextFromType)", status); return NULL; } ///////////////////////////////////////////////////////////////// // Create an OpenCL command queue ///////////////////////////////////////////////////////////////// clState->commandQueue = clCreateCommandQueue(clState->context, devices[gpu], CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, &status); if (status != CL_SUCCESS) /* Try again without OOE enable */ clState->commandQueue = clCreateCommandQueue(clState->context, devices[gpu], 0 , &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Command Queue. (clCreateCommandQueue)", status); return NULL; } /* Check for BFI INT support. Hopefully people don't mix devices with * and without it! */ char * extensions = malloc(1024); const char * camo = "cl_amd_media_ops"; char *find; status = clGetDeviceInfo(devices[gpu], CL_DEVICE_EXTENSIONS, 1024, (void *)extensions, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_EXTENSIONS", status); return NULL; } find = strstr(extensions, camo); if (find) clState->hasBitAlign = true; /* Check for OpenCL >= 1.0 support, needed for global offset parameter usage. */ char * devoclver = malloc(1024); const char * ocl10 = "OpenCL 1.0"; const char * ocl11 = "OpenCL 1.1"; status = clGetDeviceInfo(devices[gpu], CL_DEVICE_VERSION, 1024, (void *)devoclver, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_VERSION", status); return NULL; } find = strstr(devoclver, ocl10); if (!find) { clState->hasOpenCL11plus = true; find = strstr(devoclver, ocl11); if (!find) clState->hasOpenCL12plus = true; } status = clGetDeviceInfo(devices[gpu], CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, sizeof(cl_uint), (void *)&preferred_vwidth, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT", status); return NULL; } applog(LOG_DEBUG, "Preferred vector width reported %d", preferred_vwidth); status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(size_t), (void *)&clState->max_work_size, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_WORK_GROUP_SIZE", status); return NULL; } applog(LOG_DEBUG, "Max work group size reported %d", (int)(clState->max_work_size)); status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_MEM_ALLOC_SIZE , sizeof(cl_ulong), (void *)&cgpu->max_alloc, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_MEM_ALLOC_SIZE", status); return NULL; } applog(LOG_DEBUG, "Max mem alloc size is %lu", (long unsigned int)(cgpu->max_alloc)); /* Create binary filename based on parameters passed to opencl * compiler to ensure we only load a binary that matches what would * have otherwise created. The filename is: * name + kernelname +/- g(offset) + v + vectors + w + work_size + l + sizeof(long) + .bin * For scrypt the filename is: * name + kernelname + g + lg + lookup_gap + tc + thread_concurrency + w + work_size + l + sizeof(long) + .bin */ char binaryfilename[255]; char filename[255]; char numbuf[16]; if (cgpu->kernel == KL_NONE) { if (opt_scrypt) { applog(LOG_INFO, "Selecting scrypt kernel"); clState->chosen_kernel = KL_SCRYPT; } else if (opt_kryptohash) { applog(LOG_INFO, "Selecting kryptohash kernel"); clState->chosen_kernel = KL_KRYPTOHASH; } else if (!strstr(name, "Tahiti") && /* Detect all 2.6 SDKs not with Tahiti and use diablo kernel */ (strstr(vbuff, "844.4") || // Linux 64 bit ATI 2.6 SDK strstr(vbuff, "851.4") || // Windows 64 bit "" strstr(vbuff, "831.4") || strstr(vbuff, "898.1") || // 12.2 driver SDK strstr(vbuff, "923.1") || // 12.4 strstr(vbuff, "938.2") || // SDK 2.7 strstr(vbuff, "1113.2"))) {// SDK 2.8 applog(LOG_INFO, "Selecting diablo kernel"); clState->chosen_kernel = KL_DIABLO; /* Detect all 7970s, older ATI and NVIDIA and use poclbm */ } else if (strstr(name, "Tahiti") || !clState->hasBitAlign) { applog(LOG_INFO, "Selecting poclbm kernel"); clState->chosen_kernel = KL_POCLBM; /* Use phatk for the rest R5xxx R6xxx */ } else { applog(LOG_INFO, "Selecting phatk kernel"); clState->chosen_kernel = KL_PHATK; } cgpu->kernel = clState->chosen_kernel; } else { clState->chosen_kernel = cgpu->kernel; if (clState->chosen_kernel == KL_PHATK && (strstr(vbuff, "844.4") || strstr(vbuff, "851.4") || strstr(vbuff, "831.4") || strstr(vbuff, "898.1") || strstr(vbuff, "923.1") || strstr(vbuff, "938.2") || strstr(vbuff, "1113.2"))) { applog(LOG_WARNING, "WARNING: You have selected the phatk kernel."); applog(LOG_WARNING, "You are running SDK 2.6+ which performs poorly with this kernel."); applog(LOG_WARNING, "Downgrade your SDK and delete any .bin files before starting again."); applog(LOG_WARNING, "Or allow cgminer to automatically choose a more suitable kernel."); } } /* For some reason 2 vectors is still better even if the card says * otherwise, and many cards lie about their max so use 256 as max * unless explicitly set on the command line. Tahiti prefers 1 */ if (strstr(name, "Tahiti")) preferred_vwidth = 1; else if (preferred_vwidth > 2) preferred_vwidth = 2; switch (clState->chosen_kernel) { case KL_POCLBM: strcpy(filename, POCLBM_KERNNAME".cl"); strcpy(binaryfilename, POCLBM_KERNNAME); break; case KL_PHATK: strcpy(filename, PHATK_KERNNAME".cl"); strcpy(binaryfilename, PHATK_KERNNAME); break; case KL_DIAKGCN: strcpy(filename, DIAKGCN_KERNNAME".cl"); strcpy(binaryfilename, DIAKGCN_KERNNAME); break; case KL_SCRYPT: strcpy(filename, SCRYPT_KERNNAME".cl"); strcpy(binaryfilename, SCRYPT_KERNNAME); /* Scrypt only supports vector 1 */ cgpu->vwidth = 1; break; case KL_KRYPTOHASH: strcpy(filename, KRYPTOHASH_KERNNAME".cl"); strcpy(binaryfilename, KRYPTOHASH_KERNNAME); /* Kryptohash only supports vector 1 */ cgpu->vwidth = 1; break; case KL_NONE: /* Shouldn't happen */ case KL_DIABLO: strcpy(filename, DIABLO_KERNNAME".cl"); strcpy(binaryfilename, DIABLO_KERNNAME); break; } if (cgpu->vwidth) clState->vwidth = cgpu->vwidth; else { clState->vwidth = preferred_vwidth; cgpu->vwidth = preferred_vwidth; } if (((clState->chosen_kernel == KL_POCLBM || clState->chosen_kernel == KL_DIABLO || clState->chosen_kernel == KL_DIAKGCN) && clState->vwidth == 1 && clState->hasOpenCL11plus) || opt_scrypt) clState->goffset = true; if (cgpu->work_size && cgpu->work_size <= clState->max_work_size) clState->wsize = cgpu->work_size; else if (opt_scrypt) clState->wsize = 256; else if (strstr(name, "Tahiti")) clState->wsize = 64; else clState->wsize = (clState->max_work_size <= 256 ? clState->max_work_size : 256) / clState->vwidth; cgpu->work_size = clState->wsize; #ifdef USE_SCRYPT if (opt_scrypt) { if (!cgpu->opt_lg) { applog(LOG_DEBUG, "GPU %d: selecting lookup gap of 2", gpu); cgpu->lookup_gap = 2; } else cgpu->lookup_gap = cgpu->opt_lg; if (!cgpu->opt_tc) { unsigned int sixtyfours; sixtyfours = cgpu->max_alloc / 131072 / 64 - 1; cgpu->thread_concurrency = sixtyfours * 64; if (cgpu->shaders && cgpu->thread_concurrency > cgpu->shaders) { cgpu->thread_concurrency -= cgpu->thread_concurrency % cgpu->shaders; if (cgpu->thread_concurrency > cgpu->shaders * 5) cgpu->thread_concurrency = cgpu->shaders * 5; } applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %d", gpu, (int)(cgpu->thread_concurrency)); } else cgpu->thread_concurrency = cgpu->opt_tc; } #endif #ifdef USE_KRYPTOHASH if (opt_kryptohash) { if (!cgpu->shaders) { applog(LOG_DEBUG, "GPU %d: selecting shaders value of 256", gpu); cgpu->shaders = 256; } if (!cgpu->shaders) { applog(LOG_DEBUG, "GPU %d: selecting shaders multiplier value of 8", gpu); cgpu->shaders_mul = 8; } } #endif FILE *binaryfile; size_t *binary_sizes; char **binaries; int pl; char *source = file_contents(filename, &pl); size_t sourceSize[] = {(size_t)pl}; cl_uint slot, cpnd; slot = cpnd = 0; if (!source) return NULL; binary_sizes = calloc(sizeof(size_t) * MAX_GPUDEVICES * 4, 1); if (unlikely(!binary_sizes)) { applog(LOG_ERR, "Unable to calloc binary_sizes"); return NULL; } binaries = calloc(sizeof(char *) * MAX_GPUDEVICES * 4, 1); if (unlikely(!binaries)) { applog(LOG_ERR, "Unable to calloc binaries"); return NULL; } strcat(binaryfilename, name); if (clState->goffset) strcat(binaryfilename, "g"); if (opt_scrypt) { #ifdef USE_SCRYPT sprintf(numbuf, "lg%utc%u", cgpu->lookup_gap, (unsigned int)cgpu->thread_concurrency); strcat(binaryfilename, numbuf); #endif } else { sprintf(numbuf, "v%d", clState->vwidth); strcat(binaryfilename, numbuf); } sprintf(numbuf, "w%d", (int)clState->wsize); strcat(binaryfilename, numbuf); sprintf(numbuf, "l%d", (int)sizeof(long)); strcat(binaryfilename, numbuf); strcat(binaryfilename, ".bin"); binaryfile = fopen(binaryfilename, "rb"); if (!binaryfile) { applog(LOG_DEBUG, "No binary found, generating from source"); } else { struct stat binary_stat; if (unlikely(stat(binaryfilename, &binary_stat))) { applog(LOG_DEBUG, "Unable to stat binary, generating from source"); fclose(binaryfile); goto build; } if (!binary_stat.st_size) goto build; binary_sizes[slot] = binary_stat.st_size; binaries[slot] = (char *)calloc(binary_sizes[slot], 1); if (unlikely(!binaries[slot])) { applog(LOG_ERR, "Unable to calloc binaries"); fclose(binaryfile); return NULL; } if (fread(binaries[slot], 1, binary_sizes[slot], binaryfile) != binary_sizes[slot]) { applog(LOG_ERR, "Unable to fread binaries"); fclose(binaryfile); free(binaries[slot]); goto build; } clState->program = clCreateProgramWithBinary(clState->context, 1, &devices[gpu], &binary_sizes[slot], (const unsigned char **)binaries, &status, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Loading Binary into cl_program (clCreateProgramWithBinary)", status); fclose(binaryfile); free(binaries[slot]); goto build; } fclose(binaryfile); applog(LOG_DEBUG, "Loaded binary image %s", binaryfilename); goto built; } ///////////////////////////////////////////////////////////////// // Load CL file, build CL program object, create CL kernel object ///////////////////////////////////////////////////////////////// build: clState->program = clCreateProgramWithSource(clState->context, 1, (const char **)&source, sourceSize, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Loading Binary into cl_program (clCreateProgramWithSource)", status); return NULL; } /* create a cl program executable for all the devices specified */ char *CompilerOptions = calloc(1, 256); #ifdef USE_SCRYPT if (opt_scrypt) sprintf(CompilerOptions, "-D LOOKUP_GAP=%d -D CONCURRENT_THREADS=%d -D WORKSIZE=%d", cgpu->lookup_gap, (unsigned int)cgpu->thread_concurrency, (int)clState->wsize); else #endif #ifdef USE_KRYPTOHASH if (opt_kryptohash) sprintf(CompilerOptions, "-D WORKSIZE=%d", (int)clState->wsize); else #endif { sprintf(CompilerOptions, "-D WORKSIZE=%d -D VECTORS%d -D WORKVEC=%d", (int)clState->wsize, clState->vwidth, (int)clState->wsize * clState->vwidth); } applog(LOG_DEBUG, "Setting worksize to %d", (int)(clState->wsize)); if (clState->vwidth > 1) applog(LOG_DEBUG, "Patched source to suit %d vectors", clState->vwidth); if (clState->hasBitAlign && !opt_kryptohash) { strcat(CompilerOptions, " -D BITALIGN"); applog(LOG_DEBUG, "cl_amd_media_ops found, setting BITALIGN"); if (!clState->hasOpenCL12plus && (strstr(name, "Cedar") || strstr(name, "Redwood") || strstr(name, "Juniper") || strstr(name, "Cypress" ) || strstr(name, "Hemlock" ) || strstr(name, "Caicos" ) || strstr(name, "Turks" ) || strstr(name, "Barts" ) || strstr(name, "Cayman" ) || strstr(name, "Antilles" ) || strstr(name, "Wrestler" ) || strstr(name, "Zacate" ) || strstr(name, "WinterPark" ))) patchbfi = true; } else applog(LOG_DEBUG, "cl_amd_media_ops not found, will not set BITALIGN"); if (patchbfi) { strcat(CompilerOptions, " -D BFI_INT"); applog(LOG_DEBUG, "BFI_INT patch requiring device found, patched source with BFI_INT"); } else applog(LOG_DEBUG, "BFI_INT patch requiring device not found, will not BFI_INT patch"); if (clState->goffset) strcat(CompilerOptions, " -D GOFFSET"); if (!clState->hasOpenCL11plus) strcat(CompilerOptions, " -D OCL1"); applog(LOG_DEBUG, "CompilerOptions: %s", CompilerOptions); status = clBuildProgram(clState->program, 1, &devices[gpu], CompilerOptions , NULL, NULL); free(CompilerOptions); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Building Program (clBuildProgram)", status); size_t logSize; status = clGetProgramBuildInfo(clState->program, devices[gpu], CL_PROGRAM_BUILD_LOG, 0, NULL, &logSize); char *log = malloc(logSize); status = clGetProgramBuildInfo(clState->program, devices[gpu], CL_PROGRAM_BUILD_LOG, logSize, log, NULL); applog(LOG_ERR, "%s", log); return NULL; } prog_built = true; #ifdef __APPLE__ /* OSX OpenCL breaks reading off binaries with >1 GPU so always build * from source. */ goto built; #endif status = clGetProgramInfo(clState->program, CL_PROGRAM_NUM_DEVICES, sizeof(cl_uint), &cpnd, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error %d: Getting program info CL_PROGRAM_NUM_DEVICES. (clGetProgramInfo)", status); return NULL; } status = clGetProgramInfo(clState->program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t)*cpnd, binary_sizes, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error %d: Getting program info CL_PROGRAM_BINARY_SIZES. (clGetProgramInfo)", status); return NULL; } /* The actual compiled binary ends up in a RANDOM slot! Grr, so we have * to iterate over all the binary slots and find where the real program * is. What the heck is this!? */ for (slot = 0; slot < cpnd; slot++) if (binary_sizes[slot]) break; /* copy over all of the generated binaries. */ applog(LOG_DEBUG, "Binary size for gpu %d found in binary slot %d: %d", gpu, slot, (int)(binary_sizes[slot])); if (!binary_sizes[slot]) { applog(LOG_ERR, "OpenCL compiler generated a zero sized binary, FAIL!"); return NULL; } binaries[slot] = calloc(sizeof(char) * binary_sizes[slot], 1); status = clGetProgramInfo(clState->program, CL_PROGRAM_BINARIES, sizeof(char *) * cpnd, binaries, NULL ); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error %d: Getting program info. CL_PROGRAM_BINARIES (clGetProgramInfo)", status); return NULL; } /* Patch the kernel if the hardware supports BFI_INT but it needs to * be hacked in */ if (patchbfi) { unsigned remaining = binary_sizes[slot]; char *w = binaries[slot]; unsigned int start, length; /* Find 2nd incidence of .text, and copy the program's * position and length at a fixed offset from that. Then go * back and find the 2nd incidence of \x7ELF (rewind by one * from ELF) and then patch the opcocdes */ if (!advance(&w, &remaining, ".text")) goto build; w++; remaining--; if (!advance(&w, &remaining, ".text")) { /* 32 bit builds only one ELF */ w--; remaining++; } memcpy(&start, w + 285, 4); memcpy(&length, w + 289, 4); w = binaries[slot]; remaining = binary_sizes[slot]; if (!advance(&w, &remaining, "ELF")) goto build; w++; remaining--; if (!advance(&w, &remaining, "ELF")) { /* 32 bit builds only one ELF */ w--; remaining++; } w--; remaining++; w += start; remaining -= start; applog(LOG_DEBUG, "At %p (%u rem. bytes), to begin patching", w, remaining); patch_opcodes(w, length); status = clReleaseProgram(clState->program); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Releasing program. (clReleaseProgram)", status); return NULL; } clState->program = clCreateProgramWithBinary(clState->context, 1, &devices[gpu], &binary_sizes[slot], (const unsigned char **)&binaries[slot], &status, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Loading Binary into cl_program (clCreateProgramWithBinary)", status); return NULL; } /* Program needs to be rebuilt */ prog_built = false; } free(source); /* Save the binary to be loaded next time */ binaryfile = fopen(binaryfilename, "wb"); if (!binaryfile) { /* Not a fatal problem, just means we build it again next time */ applog(LOG_DEBUG, "Unable to create file %s", binaryfilename); } else { if (unlikely(fwrite(binaries[slot], 1, binary_sizes[slot], binaryfile) != binary_sizes[slot])) { applog(LOG_ERR, "Unable to fwrite to binaryfile"); return NULL; } fclose(binaryfile); } built: if (binaries[slot]) free(binaries[slot]); free(binaries); free(binary_sizes); applog(LOG_INFO, "Initialising kernel %s with%s bitalign, %d vectors and worksize %d", filename, clState->hasBitAlign ? "" : "out", clState->vwidth, (int)(clState->wsize)); if (!prog_built) { /* create a cl program executable for all the devices specified */ status = clBuildProgram(clState->program, 1, &devices[gpu], NULL, NULL, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Building Program (clBuildProgram)", status); size_t logSize; status = clGetProgramBuildInfo(clState->program, devices[gpu], CL_PROGRAM_BUILD_LOG, 0, NULL, &logSize); char *log = malloc(logSize); status = clGetProgramBuildInfo(clState->program, devices[gpu], CL_PROGRAM_BUILD_LOG, logSize, log, NULL); applog(LOG_ERR, "%s", log); return NULL; } } /* get a kernel object handle for a kernel with the given name */ clState->kernel = clCreateKernel(clState->program, "search", &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Kernel from program. (clCreateKernel)", status); return NULL; } #ifdef USE_SCRYPT if (opt_scrypt) { size_t ipt = (1024 / cgpu->lookup_gap + (1024 % cgpu->lookup_gap > 0)); size_t bufsize = 128 * ipt * cgpu->thread_concurrency; /* Use the max alloc value which has been rounded to a power of * 2 greater >= required amount earlier */ if (bufsize > cgpu->max_alloc) { applog(LOG_WARNING, "Maximum buffer memory device %d supports says %lu", gpu, (long unsigned int)(cgpu->max_alloc)); applog(LOG_WARNING, "Your scrypt settings come to %d", (int)bufsize); } applog(LOG_DEBUG, "Creating scrypt buffer sized %d", (int)bufsize); clState->padbufsize = bufsize; /* This buffer is weird and might work to some degree even if * the create buffer call has apparently failed, so check if we * get anything back before we call it a failure. */ clState->padbuffer8 = NULL; clState->padbuffer8 = clCreateBuffer(clState->context, CL_MEM_READ_WRITE, bufsize, NULL, &status); if (status != CL_SUCCESS && !clState->padbuffer8) { applog(LOG_ERR, "Error %d: clCreateBuffer (padbuffer8), decrease TC or increase LG", status); return NULL; } clState->CLbuffer0 = clCreateBuffer(clState->context, CL_MEM_READ_ONLY, 128, NULL, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clCreateBuffer (CLbuffer0)", status); return NULL; } clState->outputBuffer = clCreateBuffer(clState->context, CL_MEM_WRITE_ONLY, SCRYPT_BUFFERSIZE, NULL, &status); } else #endif #ifdef USE_KRYPTOHASH if (opt_kryptohash) { size_t bufsize = 65536 * cgpu->shaders * cgpu->shaders_mul; /* Use the max alloc value which has been rounded to a power of * 2 greater >= required amount earlier */ if (bufsize > cgpu->max_alloc) { applog(LOG_WARNING, "Maximum buffer memory device %d supports says %lu", gpu, (long unsigned int)(cgpu->max_alloc)); applog(LOG_WARNING, "Your kryptohash settings come to %d", (int)bufsize); } applog(LOG_DEBUG, "Creating kryptohash buffer size %d", (int)bufsize); clState->padbufsize = bufsize; clState->scratchpad = NULL; clState->scratchpad = clCreateBuffer(clState->context, CL_MEM_READ_WRITE, bufsize, NULL, &status); if (status != CL_SUCCESS && !clState->scratchpad) { applog(LOG_ERR, "Error %d: clCreateBuffer (scratchpad), decrease shaders multiplier", status); return NULL; } clState->kryptohash_CLbuffer = clCreateBuffer(clState->context, CL_MEM_READ_ONLY, KRYPTOHASH_INBUFFER_SZ, NULL, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clCreateBuffer (kryptohash_CLbuffer)", status); return NULL; } clState->outputBuffer = clCreateBuffer(clState->context, CL_MEM_WRITE_ONLY, KRYPTOHASH_OUTBUFFER_SZ, NULL, &status); } else #endif { clState->outputBuffer = clCreateBuffer(clState->context, CL_MEM_WRITE_ONLY, BUFFERSIZE, NULL, &status); } if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clCreateBuffer (outputBuffer)", status); return NULL; } return clState; }
static int64_t avalon2_scanhash(struct thr_info *thr) { struct avalon2_pkg send_pkg; struct pool *pool; struct cgpu_info *avalon2 = thr->cgpu; struct avalon2_info *info = avalon2->device_data; int64_t h; uint32_t tmp, range, start; int i; if (thr->work_restart || thr->work_update || info->first) { info->new_stratum = true; applog(LOG_DEBUG, "Avalon2: New stratum: restart: %d, update: %d, first: %d", thr->work_restart, thr->work_update, info->first); thr->work_update = false; thr->work_restart = false; if (unlikely(info->first)) info->first = false; get_work(thr, thr->id); /* Make sure pool is ready */ pool = current_pool(); if (!pool->has_stratum) quit(1, "Avalon2: Miner Manager have to use stratum pool"); if (pool->swork.cb_len > AVA2_P_COINBASE_SIZE) quit(1, "Avalon2: Miner Manager pool coinbase length have to less then %d", AVA2_P_COINBASE_SIZE); if (pool->swork.merkles > AVA2_P_MERKLES_COUNT) quit(1, "Avalon2: Miner Manager merkles have to less then %d", AVA2_P_MERKLES_COUNT); info->diff = (int)pool->swork.diff - 1; info->pool_no = pool->pool_no; cg_wlock(&pool->data_lock); avalon2_stratum_pkgs(info->fd, pool, thr); cg_wunlock(&pool->data_lock); /* Configuer the parameter from outside */ info->fan_pwm = opt_avalon2_fan_min; info->set_voltage = opt_avalon2_voltage_min; info->set_frequency = opt_avalon2_freq_min; /* Set the Fan, Voltage and Frequency */ memset(send_pkg.data, 0, AVA2_P_DATA_LEN); tmp = be32toh(info->fan_pwm); memcpy(send_pkg.data, &tmp, 4); /* http://www.onsemi.com/pub_link/Collateral/ADP3208D.PDF */ tmp = rev8((0x78 - info->set_voltage / 125) << 1 | 1) << 8; tmp = be32toh(tmp); memcpy(send_pkg.data + 4, &tmp, 4); tmp = be32toh(info->set_frequency); memcpy(send_pkg.data + 8, &tmp, 4); /* Configure the nonce2 offset and range */ range = 0xffffffff / total_devices; start = range * avalon2->device_id; tmp = be32toh(start); memcpy(send_pkg.data + 12, &tmp, 4); tmp = be32toh(range); memcpy(send_pkg.data + 16, &tmp, 4); /* Package the data */ avalon2_init_pkg(&send_pkg, AVA2_P_SET, 1, 1); while (avalon2_send_pkg(info->fd, &send_pkg, thr) != AVA2_SEND_OK) ; info->new_stratum = false; } polling(thr); h = 0; for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { h += info->local_work[i]; } return h * 0xffffffff; }
static bool avalon2_detect_one(const char *devpath) { struct avalon2_info *info; int ackdetect; int fd; int tmp, i, modular[3]; char mm_version[AVA2_DEFAULT_MODULARS][16]; struct cgpu_info *avalon2; struct avalon2_pkg detect_pkg; struct avalon2_ret ret_pkg; applog(LOG_DEBUG, "Avalon2 Detect: Attempting to open %s", devpath); fd = avalon2_open(devpath, AVA2_IO_SPEED, true); if (unlikely(fd == -1)) { applog(LOG_ERR, "Avalon2 Detect: Failed to open %s", devpath); return false; } tcflush(fd, TCIOFLUSH); for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { modular[i] = 0; strcpy(mm_version[i], "NONE"); /* Send out detect pkg */ memset(detect_pkg.data, 0, AVA2_P_DATA_LEN); tmp = be32toh(i); memcpy(detect_pkg.data + 28, &tmp, 4); avalon2_init_pkg(&detect_pkg, AVA2_P_DETECT, 1, 1); avalon2_send_pkg(fd, &detect_pkg, NULL); ackdetect = avalon2_get_result(NULL, fd, &ret_pkg); applog(LOG_DEBUG, "Avalon2 Detect ID[%d]: %d", i, ackdetect); if (ackdetect != AVA2_P_ACKDETECT) continue; modular[i] = 1; memcpy(mm_version[i], ret_pkg.data, 15); mm_version[i][15] = '\0'; } if (!modular[0] && !modular[1] && !modular[2]) return false; /* We have a real Avalon! */ avalon2 = calloc(1, sizeof(struct cgpu_info)); avalon2->drv = &avalon2_drv; avalon2->device_path = strdup(devpath); avalon2->threads = AVA2_MINER_THREADS; add_cgpu(avalon2); applog(LOG_INFO, "Avalon2 Detect: Found at %s, mark as %d", devpath, avalon2->device_id); avalon2->device_data = calloc(sizeof(struct avalon2_info), 1); if (unlikely(!(avalon2->device_data))) quit(1, "Failed to malloc avalon2_info"); info = avalon2->device_data; strcpy(info->mm_version[0], mm_version[0]); strcpy(info->mm_version[1], mm_version[1]); strcpy(info->mm_version[2], mm_version[2]); info->baud = AVA2_IO_SPEED; info->fan_pwm = AVA2_DEFAULT_FAN_PWM; info->set_voltage = AVA2_DEFAULT_VOLTAGE_MIN; info->set_frequency = AVA2_DEFAULT_FREQUENCY; info->temp_max = 0; info->temp_history_index = 0; info->temp_sum = 0; info->temp_old = 0; info->modulars[0] = modular[0]; info->modulars[1] = modular[1]; info->modulars[2] = modular[2]; /* Enable modular */ info->fd = -1; /* Set asic to idle mode after detect */ avalon2_close(fd); return true; }
int null_scanhash() { applog(LOG_WARNING,"SWERR: undefined scanhash function in algo_gate"); return 0; }
nfsstat4 nfs_op_rename(struct nfs_cxn *cxn, const RENAME4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status = NFS4_OK; struct nfs_inode *src_dir = NULL, *target_dir = NULL; struct nfs_inode *old_file = NULL, *new_file = NULL; struct nfs_buf oldname, newname; change_info4 src = { true, 0, 0 }; change_info4 target = { true, 0, 0 }; DB_TXN *txn = NULL; DB_ENV *dbenv = srv.fsdb.env; int rc; nfsino_t old_dirent, new_dirent; oldname.len = args->oldname.utf8string_len; oldname.val = args->oldname.utf8string_val; newname.len = args->newname.utf8string_len; newname.val = args->newname.utf8string_val; if (debugging) applog(LOG_INFO, "op RENAME (OLD:%.*s, NEW:%.*s)", oldname.len, oldname.val, newname.len, newname.val); /* validate text input */ if ((!valid_utf8string(&oldname)) || (!valid_utf8string(&newname))) { status = NFS4ERR_INVAL; goto out; } if (has_dots(&oldname) || has_dots(&newname)) { status = NFS4ERR_BADNAME; goto out; } rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } /* reference source, target directories. * NOTE: src_dir and target_dir may point to the same object */ src_dir = inode_fhdec(txn, cxn->save_fh, DB_RMW); if (fh_equal(cxn->save_fh, cxn->current_fh)) target_dir = src_dir; else target_dir = inode_fhdec(txn, cxn->current_fh, DB_RMW); if (!src_dir || !target_dir) { status = NFS4ERR_NOFILEHANDLE; goto out_abort; } if ((src_dir->type != NF4DIR) || (target_dir->type != NF4DIR)) { status = NFS4ERR_NOTDIR; goto out_abort; } /* lookup source, target names */ status = dir_lookup(txn, src_dir, &oldname, 0, &old_dirent); if (status != NFS4_OK) goto out_abort; old_file = inode_getdec(txn, old_dirent, 0); if (!old_file) { status = NFS4ERR_NOENT; goto out_abort; } status = dir_lookup(txn, target_dir, &newname, 0, &new_dirent); if (status != NFS4_OK && status != NFS4ERR_NOENT) goto out_abort; /* if target (newname) is present, attempt to remove */ if (status == NFS4_OK) { bool ok_to_remove = false; /* read to-be-deleted inode */ new_file = inode_getdec(txn, new_dirent, DB_RMW); if (!new_file) { status = NFS4ERR_NOENT; goto out_abort; } /* do oldname and newname refer to same file? */ if (old_file->inum == new_file->inum) { src.after = src.before = src_dir->version; target.after = target.before = target_dir->version; goto out_abort; } if (old_file->type != NF4DIR && new_file->type != NF4DIR) ok_to_remove = true; else if (old_file->type == NF4DIR && new_file->type == NF4DIR && dir_is_empty(txn, new_file)) ok_to_remove = true; if (!ok_to_remove) { status = NFS4ERR_EXIST; goto out_abort; } /* remove target inode from directory */ rc = fsdb_dirent_del(&srv.fsdb, txn, target_dir->inum, &newname, 0); if (rc == 0) rc = inode_unlink(txn, new_file); if (rc) { status = NFS4ERR_IO; goto out_abort; } } else status = NFS4_OK; new_dirent = old_dirent; /* delete entry from source directory; add to target directory */ rc = fsdb_dirent_del(&srv.fsdb, txn, src_dir->inum, &oldname, 0); if (rc == 0) rc = fsdb_dirent_put(&srv.fsdb, txn, target_dir->inum, &newname, 0, new_dirent); if (rc) { status = NFS4ERR_IO; goto out_abort; } /* if renamed file is a directory, ensure its 'parent' is updated */ if (old_file->type == NF4DIR) { old_file->parent = target_dir->inum; if (inode_touch(txn, old_file)) { status = NFS4ERR_IO; goto out_abort; } } /* record directory change info */ src.before = src_dir->version; target.before = target_dir->version; /* update last-modified stamps of directory inodes */ rc = inode_touch(txn, src_dir); if (rc == 0 && src_dir != target_dir) rc = inode_touch(txn, target_dir); if (rc) { status = NFS4ERR_IO; goto out_abort; } /* close the transaction */ rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } src.after = src_dir->version; target.after = target_dir->version; out: WR32(status); if (status == NFS4_OK) { WR32(src.atomic ? 1 : 0); /* src cinfo.atomic */ WR64(src.before); /* src cinfo.before */ WR64(src.after); /* src cinfo.after */ WR32(target.atomic ? 1 : 0); /* target cinfo.atomic */ WR64(target.before); /* target cinfo.before */ WR64(target.after); /* target cinfo.after */ } inode_free(src_dir); if (src_dir != target_dir) inode_free(target_dir); inode_free(old_file); inode_free(new_file); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
void null_hash_suw() { applog(LOG_WARNING,"SWERR: null_hash_suw unsafe null function"); };
static bool readdir_iter(DB_TXN *txn, const struct fsdb_de_key *key, size_t key_len, nfsino_t dirent, struct readdir_info *ri) { uint64_t bitmap_out = 0; uint32_t dirlen, maxlen; struct nfs_fattr_set attr; struct nfs_inode *ino = NULL; struct list_head *writes = ri->writes; struct rpc_write **wr = ri->wr; size_t name_len; struct nfs_buf de_name; if (ri->stop) return true; if (!ri->cookie_found) { if (ri->cookie && (ri->dir_pos <= ri->cookie)) { ri->dir_pos++; return false; } ri->cookie_found = true; } ino = inode_getdec(txn, dirent, 0); if (!ino) { applog(LOG_WARNING, " WARNING: inode %016llX not found", (unsigned long long) dirent); /* FIXME: return via rdattr-error */ ri->stop = true; ri->status = NFS4ERR_NOENT; return true; } memset(&attr, 0, sizeof(attr)); fattr_fill(ino, &attr); name_len = key_len - sizeof(*key); dirlen = 8 + 4 + (XDR_QUADLEN(name_len) * 4); if (dirlen > ri->dircount) { ri->hit_limit = true; ri->stop = true; if (debugging > 1) applog(LOG_DEBUG, " iter: hit dir limit"); goto out; } maxlen = 8 + 4 + (XDR_QUADLEN(name_len) * 4) + 16 + fattr_size(&attr) + 4; if (maxlen > ri->maxcount) { ri->hit_limit = true; ri->stop = true; if (debugging > 1) applog(LOG_DEBUG, " iter: hit max limit"); goto out; } if (ri->first_time) { ri->first_time = false; /* FIXME: server verifier isn't the best for dir verf */ WRMEM(&srv.instance_verf, sizeof(verifier4)); /* cookieverf */ ri->val_follows = WRSKIP(4); } ri->dircount -= dirlen; ri->maxcount -= maxlen; /* write value to previous entry4.nextentry */ *ri->val_follows = htonl(1); ri->val_follows = NULL; WR64(ri->dir_pos); /* entry4.cookie */ de_name.len = name_len; de_name.val = (void *) key->name; /* cast is ok: RO data is copied */ WRBUF(&de_name); /* entry4.name */ /* entry4.attrs */ attr.bitmap = ri->attr_request; ri->status = wr_fattr(&attr, &bitmap_out, writes, wr); if (ri->status != NFS4_OK) ri->stop = true; if (debugging) applog(LOG_DEBUG, " READDIR ent: '%.*s' (INO:%016llX MAP:%Lx WRLEN:%u)", (int) name_len, key->name, (unsigned long long) dirent, (unsigned long long) bitmap_out, (*wr)->len); ri->val_follows = WRSKIP(4); /* entry4.nextentry */ ri->n_results++; ri->dir_pos++; out: inode_free(ino); fattr_free(&attr); if (ri->stop) return true; return false; }
static bool hfa_reset(struct cgpu_info *hashfast, struct hashfast_info *info) { struct hf_usb_init_header usb_init, *hu = &usb_init; struct hf_usb_init_base *db; struct hf_usb_init_options *ho; char buf[1024]; struct hf_header *h = (struct hf_header *)buf; uint8_t hcrc; bool ret; int i; // XXX Following items need to be defaults with command-line overrides info->hash_clock_rate = 550; // Hash clock rate in Mhz info->group_ntime_roll = 1; info->core_ntime_roll = 1; // Assemble the USB_INIT request memset(hu, 0, sizeof(*hu)); hu->preamble = HF_PREAMBLE; hu->operation_code = OP_USB_INIT; hu->protocol = PROTOCOL_GLOBAL_WORK_QUEUE; // Protocol to use hu->hash_clock = info->hash_clock_rate; // Hash clock rate in Mhz if (info->group_ntime_roll > 1 && info->core_ntime_roll) { ho = (struct hf_usb_init_options *)(hu + 1); memset(ho, 0, sizeof(*ho)); ho->group_ntime_roll = info->group_ntime_roll; ho->core_ntime_roll = info->core_ntime_roll; hu->data_length = sizeof(*ho) / 4; } hu->crc8 = hfa_crc8((uint8_t *)hu); applog(LOG_INFO, "HFA%d: Sending OP_USB_INIT with GWQ protocol specified", hashfast->device_id); if (!hfa_send_packet(hashfast, (struct hf_header *)hu, HF_USB_CMD(OP_USB_INIT))) return false; // Check for the correct response. // We extend the normal timeout - a complete device initialization, including // bringing power supplies up from standby, etc., can take over a second. tryagain: for (i = 0; i < 30; i++) { ret = hfa_get_header(hashfast, h, &hcrc); if (ret) break; } if (!ret) { applog(LOG_WARNING, "HFA %d: OP_USB_INIT failed!", hashfast->device_id); return false; } if (h->crc8 != hcrc) { applog(LOG_WARNING, "HFA %d: OP_USB_INIT failed! CRC mismatch", hashfast->device_id); return false; } if (h->operation_code != OP_USB_INIT) { // This can happen if valid packet(s) were in transit *before* the OP_USB_INIT arrived // at the device, so we just toss the packets and keep looking for the response. applog(LOG_WARNING, "HFA %d: OP_USB_INIT: Tossing packet, valid but unexpected type %d", hashfast->device_id, h->operation_code); hfa_get_data(hashfast, buf, h->data_length); goto tryagain; } applog(LOG_DEBUG, "HFA %d: Good reply to OP_USB_INIT", hashfast->device_id); applog(LOG_DEBUG, "HFA %d: OP_USB_INIT: %d die in chain, %d cores, device_type %d, refclk %d Mhz", hashfast->device_id, h->chip_address, h->core_address, h->hdata & 0xff, (h->hdata >> 8) & 0xff); // Save device configuration info->asic_count = h->chip_address; info->core_count = h->core_address; info->device_type = (uint8_t)h->hdata; info->ref_frequency = (uint8_t)(h->hdata >> 8); info->hash_sequence_head = 0; info->hash_sequence_tail = 0; info->device_sequence_tail = 0; // Size in bytes of the core bitmap in bytes info->core_bitmap_size = (((info->asic_count * info->core_count) + 31) / 32) * 4; // Get the usb_init_base structure if (!hfa_get_data(hashfast, (char *)&info->usb_init_base, U32SIZE(info->usb_init_base))) { applog(LOG_WARNING, "HFA %d: OP_USB_INIT failed! Failure to get usb_init_base data", hashfast->device_id); return false; } db = &info->usb_init_base; applog(LOG_INFO, "HFA %d: firmware_rev: %d.%d", hashfast->device_id, (db->firmware_rev >> 8) & 0xff, db->firmware_rev & 0xff); applog(LOG_INFO, "HFA %d: hardware_rev: %d.%d", hashfast->device_id, (db->hardware_rev >> 8) & 0xff, db->hardware_rev & 0xff); applog(LOG_INFO, "HFA %d: serial number: %d", hashfast->device_id, db->serial_number); applog(LOG_INFO, "HFA %d: hash clockrate: %d Mhz", hashfast->device_id, db->hash_clockrate); applog(LOG_INFO, "HFA %d: inflight_target: %d", hashfast->device_id, db->inflight_target); applog(LOG_INFO, "HFA %d: sequence_modulus: %d", hashfast->device_id, db->sequence_modulus); info->num_sequence = db->sequence_modulus; // Now a copy of the config data used if (!hfa_get_data(hashfast, (char *)&info->config_data, U32SIZE(info->config_data))) { applog(LOG_WARNING, "HFA %d: OP_USB_INIT failed! Failure to get config_data", hashfast->device_id); return false; } // Now the core bitmap info->core_bitmap = malloc(info->core_bitmap_size); if (!info->core_bitmap) quit(1, "Failed to malloc info core bitmap in hfa_reset"); if (!hfa_get_data(hashfast, (char *)info->core_bitmap, info->core_bitmap_size / 4)) { applog(LOG_WARNING, "HFA %d: OP_USB_INIT failed! Failure to get core_bitmap", hashfast->device_id); return false; } // See if the initialization suceeded if (db->operation_status) { applog(LOG_WARNING, "HFA %d: OP_USB_INIT failed! Operation status %d (%s)", hashfast->device_id, db->operation_status, (db->operation_status < sizeof(hf_usb_init_errors)/sizeof(hf_usb_init_errors[0])) ? hf_usb_init_errors[db->operation_status] : "Unknown error code"); return false; } return true; }
nfsstat4 nfs_op_readdir(struct nfs_cxn *cxn, const READDIR4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status = NFS4_OK; struct nfs_inode *ino = NULL; uint32_t dircount, maxcount, *status_p; struct readdir_info ri; uint64_t cookie, attr_request; const verifier4 *cookie_verf; DB_TXN *txn = NULL; DB *dirent = srv.fsdb.dirent; DB_ENV *dbenv = srv.fsdb.env; DBT pkey, pval; struct fsdb_de_key key; int cget_flags; DBC *curs = NULL; int rc; uint64_t dirent_inum, db_de; struct fsdb_de_key *rkey; cookie = args->cookie; cookie_verf = &args->cookieverf; dircount = args->dircount; maxcount = args->maxcount; attr_request = bitmap4_decode(&args->attr_request); status_p = WRSKIP(4); if (debugging) { applog(LOG_INFO, "op READDIR (COOKIE:%Lu DIR:%u MAX:%u MAP:%Lx)", (unsigned long long) cookie, dircount, maxcount, (unsigned long long) attr_request); print_fattr_bitmap("op READDIR", attr_request); } /* traditionally "." and "..", hardcoded */ if (cookie == 1 || cookie == 2) { status = NFS4ERR_BAD_COOKIE; goto out; } /* don't permit request of write-only attrib */ if (attr_request & fattr_write_only_mask) { status = NFS4ERR_INVAL; goto out; } /* FIXME: very, very, very poor verifier */ if (cookie && memcmp(cookie_verf, &srv.instance_verf, sizeof(verifier4))) { status = NFS4ERR_NOT_SAME; goto out; } /* read inode of directory being read */ status = dir_curfh(NULL, cxn, &ino, 0); if (status != NFS4_OK) goto out; if (ino->mode == 0) { status = NFS4ERR_ACCESS; goto out; } /* subtract READDIR4resok header and footer size */ if (maxcount < 16) { status = NFS4ERR_TOOSMALL; goto out; } maxcount -= (8 + 4 + 4); /* verify within server limits */ if (dircount > SRV_MAX_READ || maxcount > SRV_MAX_READ) { status = NFS4ERR_INVAL; goto out; } /* open transaction */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } /* set up directory iteration */ memset(&ri, 0, sizeof(ri)); ri.cookie = cookie; ri.dircount = dircount; ri.maxcount = maxcount; ri.attr_request = attr_request; ri.status = NFS4_OK; ri.writes = writes; ri.wr = wr; ri.dir_pos = 3; ri.first_time = true; /* if dir is empty, skip directory interation loop completely */ if (dir_is_empty(txn, ino)) { WRMEM(&srv.instance_verf, sizeof(verifier4)); /* cookieverf */ ri.val_follows = WRSKIP(4); if (debugging) applog(LOG_DEBUG, " READDIR: empty directory"); goto the_finale; } /* otherwise, loop through each dirent attached to ino->inum */ rc = dirent->cursor(dirent, txn, &curs, 0); if (rc) { status = NFS4ERR_IO; dirent->err(dirent, rc, "dirent->cursor"); goto out_abort; } key.inum = inum_encode(ino->inum); memset(&pkey, 0, sizeof(pkey)); pkey.data = &key; pkey.size = sizeof(key); pkey.flags = DB_DBT_MALLOC; memset(&pval, 0, sizeof(pval)); pval.data = &db_de; pval.ulen = sizeof(db_de); pval.flags = DB_DBT_USERMEM; cget_flags = DB_SET_RANGE; while (1) { bool iter_rc; rc = curs->get(curs, &pkey, &pval, cget_flags); if (rc) { if (rc != DB_NOTFOUND) dirent->err(dirent, rc, "readdir curs->get"); break; } cget_flags = DB_NEXT; rkey = pkey.data; if (inum_decode(rkey->inum) != ino->inum) { free(rkey); break; } dirent_inum = inum_decode(db_de); iter_rc = readdir_iter(txn, rkey, pkey.size, dirent_inum, &ri); free(rkey); if (iter_rc) break; } if (!ri.n_results) { if (debugging) applog(LOG_INFO, " zero results, status %s", ri.status <= NFS4ERR_CB_PATH_DOWN ? status2str(ri.status) : "n/a"); if (ri.status == NFS4_OK) { WRMEM(&srv.instance_verf, sizeof(verifier4)); /* cookieverf */ ri.val_follows = WRSKIP(4); } } rc = curs->close(curs); if (rc) { status = NFS4ERR_IO; dirent->err(dirent, rc, "dirent->cursor close"); goto out_abort; } the_finale: /* terminate final entry4.nextentry and dirlist4.entries */ if (ri.val_follows) *ri.val_follows = htonl(0); if (ri.cookie_found && !ri.n_results && ri.hit_limit) { status = NFS4ERR_TOOSMALL; goto out_abort; } /* close transaction */ rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } WR32(ri.hit_limit ? 0 : 1); /* reply eof */ out: *status_p = htonl(status); inode_free(ino); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
static int64_t hfa_scanwork(struct thr_info *thr) { struct cgpu_info *hashfast = thr->cgpu; struct hashfast_info *info = hashfast->device_data; int64_t hashes; int jobs, ret; if (unlikely(hashfast->usbinfo.nodev)) { applog(LOG_WARNING, "HFA %d: device disappeared, disabling", hashfast->device_id); return -1; } if (unlikely(thr->work_restart)) { restart: ret = hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), 0, (uint8_t *)NULL, 0); if (unlikely(!ret)) { ret = hfa_reset(hashfast, info); if (unlikely(!ret)) { applog(LOG_ERR, "HFA %d: Failed to reset after write failure, disabling", hashfast->device_id); return -1; } } } jobs = hfa_jobs(info); if (!jobs) { ret = restart_wait(thr, 100); if (unlikely(!ret)) goto restart; jobs = hfa_jobs(info); } while (jobs-- > 0) { struct hf_hash_usb op_hash_data; struct work *work; uint64_t intdiff; int i, sequence; uint32_t *p; /* This is a blocking function if there's no work */ work = get_work(thr, thr->id); /* Assemble the data frame and send the OP_HASH packet */ memcpy(op_hash_data.midstate, work->midstate, sizeof(op_hash_data.midstate)); memcpy(op_hash_data.merkle_residual, work->data + 64, 4); p = (uint32_t *)(work->data + 64 + 4); op_hash_data.timestamp = *p++; op_hash_data.bits = *p++; op_hash_data.nonce_loops = 0; /* Set the number of leading zeroes to look for based on diff. * Diff 1 = 32, Diff 2 = 33, Diff 4 = 34 etc. */ intdiff = (uint64_t)work->device_diff; for (i = 31; intdiff; i++, intdiff >>= 1); op_hash_data.search_difficulty = i; if ((sequence = info->hash_sequence_head + 1) >= info->num_sequence) sequence = 0; ret = hfa_send_frame(hashfast, OP_HASH, sequence, (uint8_t *)&op_hash_data, sizeof(op_hash_data)); if (unlikely(!ret)) { ret = hfa_reset(hashfast, info); if (unlikely(!ret)) { applog(LOG_ERR, "HFA %d: Failed to reset after write failure, disabling", hashfast->device_id); return -1; } } mutex_lock(&info->lock); info->hash_sequence_head = sequence; info->works[info->hash_sequence_head] = work; mutex_unlock(&info->lock); applog(LOG_DEBUG, "HFA %d: OP_HASH sequence %d search_difficulty %d work_difficulty %g", hashfast->device_id, info->hash_sequence_head, op_hash_data.search_difficulty, work->work_difficulty); } mutex_lock(&info->lock); hashes = info->hash_count; info->hash_count = 0; mutex_unlock(&info->lock); return hashes; }
static char *my_pwdb_lookup(const char *user) { MYSQL *db = srv.db_cxn; MYSQL_STMT *stmt; MYSQL_BIND bind_param[1], bind_res[1]; unsigned long bind_lengths[1], bind_res_lengths[1]; char password[256], *pass_ret; int pass_len; const char *step = "init"; stmt = mysql_stmt_init(db); if (!stmt) return NULL; step = "prep"; if (mysql_stmt_prepare(stmt, srv.db_stmt_pwdb, strlen(srv.db_stmt_pwdb))) goto err_out; if (mysql_stmt_param_count(stmt)) { memset(bind_param, 0, sizeof(bind_param)); memset(bind_lengths, 0, sizeof(bind_lengths)); bind_instr(bind_param, bind_lengths, 0, user); step = "bind-param"; if (mysql_stmt_bind_param(stmt, bind_param)) goto err_out; } memset(bind_res, 0, sizeof(bind_res)); memset(bind_res_lengths, 0, sizeof(bind_res_lengths)); bind_res[0].buffer_type = MYSQL_TYPE_STRING; bind_res[0].buffer = password; bind_res[0].buffer_length = sizeof(password); bind_res[0].length = &bind_res_lengths[0]; step = "execute"; if (mysql_stmt_execute(stmt)) goto err_out; step = "bind-result"; if (mysql_stmt_bind_result(stmt, bind_res)) goto err_out; step = "store-result"; if (mysql_stmt_store_result(stmt)) goto err_out; step = "fetch"; if (mysql_stmt_fetch(stmt)) goto err_out; pass_len = bind_res_lengths[0]; step = "malloc"; pass_ret = malloc(pass_len + 1); if (!pass_ret) goto err_out; memcpy(pass_ret, password, pass_len); pass_ret[pass_len] = 0; mysql_stmt_close(stmt); return pass_ret; err_out: mysql_stmt_close(stmt); applog(LOG_ERR, "mysql pwdb query failed at %s", step); return NULL; }
static void hashratio_stratum_pkgs(struct cgpu_info *hashratio, struct pool *pool) { const int merkle_offset = 36; struct hashratio_pkg pkg; int i, a, b, tmp; unsigned char target[32]; int job_id_len; unsigned short crc; /* Send out the first stratum message STATIC */ applog(LOG_DEBUG, "hashratio: Pool stratum message STATIC: %d, %d, %d, %d, %d, %d", pool->coinbase_len, pool->nonce2_offset, pool->n2size, merkle_offset, pool->merkles, pool->pool_no); memset(pkg.data, 0, HRTO_P_DATA_LEN); tmp = be32toh(pool->coinbase_len); memcpy(pkg.data, &tmp, 4); tmp = be32toh(pool->nonce2_offset); memcpy(pkg.data + 4, &tmp, 4); tmp = be32toh(pool->n2size); memcpy(pkg.data + 8, &tmp, 4); tmp = be32toh(merkle_offset); memcpy(pkg.data + 12, &tmp, 4); tmp = be32toh(pool->merkles); memcpy(pkg.data + 16, &tmp, 4); tmp = be32toh((int)pool->sdiff); memcpy(pkg.data + 20, &tmp, 4); tmp = be32toh((int)pool->pool_no); memcpy(pkg.data + 24, &tmp, 4); hashratio_init_pkg(&pkg, HRTO_P_STATIC, 1, 1); if (hashratio_send_pkgs(hashratio, &pkg)) return; set_target(target, pool->sdiff); memcpy(pkg.data, target, 32); if (opt_debug) { char *target_str; target_str = bin2hex(target, 32); applog(LOG_DEBUG, "hashratio: Pool stratum target: %s", target_str); free(target_str); } hashratio_init_pkg(&pkg, HRTO_P_TARGET, 1, 1); if (hashratio_send_pkgs(hashratio, &pkg)) return; applog(LOG_DEBUG, "hashratio: Pool stratum message JOBS_ID: %s", pool->swork.job_id); memset(pkg.data, 0, HRTO_P_DATA_LEN); job_id_len = strlen(pool->swork.job_id); crc = crc16((const unsigned char *)pool->swork.job_id, job_id_len); pkg.data[0] = (crc & 0xff00) >> 8; pkg.data[1] = crc & 0x00ff; hashratio_init_pkg(&pkg, HRTO_P_JOB_ID, 1, 1); if (hashratio_send_pkgs(hashratio, &pkg)) return; a = pool->coinbase_len / HRTO_P_DATA_LEN; b = pool->coinbase_len % HRTO_P_DATA_LEN; applog(LOG_DEBUG, "pool->coinbase_len: %d", pool->coinbase_len); applog(LOG_DEBUG, "hashratio: Pool stratum message COINBASE: %d %d", a, b); for (i = 0; i < a; i++) { memcpy(pkg.data, pool->coinbase + i * 32, 32); hashratio_init_pkg(&pkg, HRTO_P_COINBASE, i + 1, a + (b ? 1 : 0)); if (hashratio_send_pkgs(hashratio, &pkg)) return; if (i % 25 == 0) { cgsleep_ms(2); } } if (b) { memset(pkg.data, 0, HRTO_P_DATA_LEN); memcpy(pkg.data, pool->coinbase + i * 32, b); hashratio_init_pkg(&pkg, HRTO_P_COINBASE, i + 1, i + 1); if (hashratio_send_pkgs(hashratio, &pkg)) return; } b = pool->merkles; applog(LOG_DEBUG, "hashratio: Pool stratum message MERKLES: %d", b); for (i = 0; i < b; i++) { memset(pkg.data, 0, HRTO_P_DATA_LEN); memcpy(pkg.data, pool->swork.merkle_bin[i], 32); hashratio_init_pkg(&pkg, HRTO_P_MERKLES, i + 1, b); if (hashratio_send_pkgs(hashratio, &pkg)) return; } applog(LOG_DEBUG, "hashratio: Pool stratum message HEADER: 4"); for (i = 0; i < 4; i++) { memset(pkg.data, 0, HRTO_P_HEADER); memcpy(pkg.data, pool->header_bin + i * 32, 32); hashratio_init_pkg(&pkg, HRTO_P_HEADER, i + 1, 4); if (hashratio_send_pkgs(hashratio, &pkg)) return; } }
// called by each thread that uses the gate bool register_algo_gate( int algo, algo_gate_t *gate ) { if ( NULL == gate ) { applog(LOG_ERR,"FAIL: algo_gate registration failed, NULL gate\n"); return false; } init_algo_gate( gate ); switch (algo) { case ALGO_ARGON2: register_argon2_algo ( gate ); break; case ALGO_AXIOM: register_axiom_algo ( gate ); break; case ALGO_BASTION: register_bastion_algo ( gate ); break; case ALGO_BLAKE: register_blake_algo ( gate ); break; case ALGO_BLAKECOIN: register_blakecoin_algo ( gate ); break; case ALGO_BLAKE2S: register_blake2s_algo ( gate ); break; case ALGO_C11: register_c11_algo ( gate ); break; case ALGO_CRYPTOLIGHT: register_cryptolight_algo( gate ); break; case ALGO_CRYPTONIGHT: register_cryptonight_algo( gate ); break; case ALGO_DECRED: register_decred_algo ( gate ); break; case ALGO_DROP: register_drop_algo ( gate ); break; case ALGO_FRESH: register_fresh_algo ( gate ); break; case ALGO_GROESTL: register_groestl_algo ( gate ); break; case ALGO_HEAVY: register_heavy_algo ( gate ); break; case ALGO_HMQ1725: register_hmq1725_algo ( gate ); break; case ALGO_HODL: register_hodl_algo ( gate ); break; case ALGO_KECCAK: register_keccak_algo ( gate ); break; case ALGO_LBRY: register_lbry_algo ( gate ); break; case ALGO_LUFFA: register_luffa_algo ( gate ); break; case ALGO_LYRA2RE: register_lyra2re_algo ( gate ); break; case ALGO_LYRA2REV2: register_lyra2rev2_algo ( gate ); break; case ALGO_LYRA2Z: register_zcoin_algo ( gate ); break; case ALGO_LYRA2ZOIN: register_zoin_algo ( gate ); break; case ALGO_M7M: register_m7m_algo ( gate ); break; case ALGO_MYR_GR: register_myriad_algo ( gate ); break; case ALGO_NEOSCRYPT: register_neoscrypt_algo ( gate ); break; case ALGO_NIST5: register_nist5_algo ( gate ); break; case ALGO_PENTABLAKE: register_pentablake_algo ( gate ); break; case ALGO_PLUCK: register_pluck_algo ( gate ); break; case ALGO_QUARK: register_quark_algo ( gate ); break; case ALGO_QUBIT: register_qubit_algo ( gate ); break; case ALGO_SCRYPT: register_scrypt_algo ( gate ); break; case ALGO_SCRYPTJANE: register_scryptjane_algo ( gate ); break; case ALGO_SHA256D: register_sha256d_algo ( gate ); break; case ALGO_SHAVITE3: register_shavite_algo ( gate ); break; case ALGO_SKEIN: register_skein_algo ( gate ); break; case ALGO_SKEIN2: register_skein2_algo ( gate ); break; case ALGO_S3: register_s3_algo ( gate ); break; case ALGO_VANILLA: register_vanilla_algo ( gate ); break; case ALGO_VELTOR: register_veltor_algo ( gate ); break; case ALGO_WHIRLPOOL: register_whirlpool_algo ( gate ); break; case ALGO_WHIRLPOOLX: register_whirlpoolx_algo ( gate ); break; case ALGO_X11: register_x11_algo ( gate ); break; case ALGO_X11EVO: register_x11evo_algo ( gate ); break; case ALGO_X11GOST: register_sib_algo ( gate ); break; case ALGO_X13: register_x13_algo ( gate ); break; case ALGO_X14: register_x14_algo ( gate ); break; case ALGO_X15: register_x15_algo ( gate ); break; case ALGO_X17: register_x17_algo ( gate ); break; case ALGO_XEVAN: register_xevan_algo ( gate ); break; case ALGO_YESCRYPT: register_yescrypt_algo ( gate ); break; case ALGO_ZR5: register_zr5_algo ( gate ); break; default: applog(LOG_ERR,"FAIL: algo_gate registration failed, unknown algo %s.\n", algo_names[opt_algo] ); return false; } // switch // ensure required functions were defined. if ( gate->scanhash == (void*)&null_scanhash ) { applog(LOG_ERR, "FAIL: Required algo_gate functions undefined\n"); return false; } return true; }
static struct cgpu_info *hashratio_detect_one(struct libusb_device *dev, struct usb_find_devices *found) { struct hashratio_info *info; int err, amount; int ackdetect; char mm_version[16]; struct cgpu_info *hashratio = usb_alloc_cgpu(&hashratio_drv, 1); struct hashratio_pkg detect_pkg; struct hashratio_ret ret_pkg; if (!usb_init(hashratio, dev, found)) { applog(LOG_ERR, "Hashratio failed usb_init"); hashratio = usb_free_cgpu(hashratio); return NULL; } hashratio_initialise(hashratio); strcpy(mm_version, "NONE"); /* Send out detect pkg */ memset(detect_pkg.data, 0, HRTO_P_DATA_LEN); hashratio_init_pkg(&detect_pkg, HRTO_P_DETECT, 1, 1); hashratio_send_pkg(hashratio, &detect_pkg); err = usb_read(hashratio, (char *)&ret_pkg, HRTO_READ_SIZE, &amount, C_HRO_READ); if (err || amount != HRTO_READ_SIZE) { applog(LOG_ERR, "%s %d: Hashratio failed usb_read with err %d amount %d", hashratio->drv->name, hashratio->device_id, err, amount); usb_uninit(hashratio); usb_free_cgpu(hashratio); return NULL; } ackdetect = ret_pkg.type; applog(LOG_DEBUG, "hashratio Detect ID: %d", ackdetect); if (ackdetect != HRTO_P_ACKDETECT) { applog(LOG_DEBUG, "Not a hashratio device"); usb_uninit(hashratio); usb_free_cgpu(hashratio); return NULL; } memcpy(mm_version, ret_pkg.data, 15); mm_version[15] = '\0'; /* We have a real Hashratio! */ hashratio->threads = HRTO_MINER_THREADS; add_cgpu(hashratio); update_usb_stats(hashratio); applog(LOG_INFO, "%s%d: Found at %s", hashratio->drv->name, hashratio->device_id, hashratio->device_path); hashratio->device_data = cgcalloc(sizeof(struct hashratio_info), 1); info = hashratio->device_data; strcpy(info->mm_version, mm_version); info->fan_pwm = HRTO_DEFAULT_FAN / 100 * HRTO_PWM_MAX; info->temp_max = 0; info->temp_history_index = 0; info->temp_sum = 0; info->temp_old = 0; info->default_freq = hashratio_freq; return hashratio; }
void algo_not_tested() { applog( LOG_WARNING,"Algo %s has not been tested live. It may not work", algo_names[opt_algo] ); applog(LOG_WARNING,"and bad things may happen. Use at your own risk."); }
void nvml_init() { nvmlReturn_t ret; #ifdef __linux__ hDLL = dlopen("libnvidia-ml.so", RTLD_LAZY | RTLD_GLOBAL); #else /* Not in system path, but could be local */ hDLL = LoadLibrary("nvml.dll"); if(!hDLL) { /* %ProgramW6432% is unsupported by OS prior to year 2009 */ char path[512]; ExpandEnvironmentStringsA("%ProgramFiles%\\NVIDIA Corporation\\NVSMI\\nvml.dll", path, sizeof(path)); hDLL = LoadLibrary(path); } #endif if(!hDLL) { applog(LOG_INFO, "Unable to load the NVIDIA Management Library"); opt_nonvml = true; return; } NVML_nvmlInit = (nvmlReturn_t (*)()) dlsym(hDLL, "nvmlInit_v2"); if(!NVML_nvmlInit) { /* Try an older interface */ NVML_nvmlInit = (nvmlReturn_t (*)()) dlsym(hDLL, "nvmlInit"); if(!NVML_nvmlInit) { applog(LOG_ERR, "NVML: Unable to initialise"); opt_nonvml = true; return; } else { NVML_nvmlDeviceGetCount = (nvmlReturn_t (*)(uint *)) \ dlsym(hDLL, "nvmlDeviceGetCount"); NVML_nvmlDeviceGetHandleByIndex = (nvmlReturn_t (*)(uint, nvmlDevice_t *)) \ dlsym(hDLL, "nvmlDeviceGetHandleByIndex"); NVML_nvmlDeviceGetPciInfo = (nvmlReturn_t (*)(nvmlDevice_t, nvmlPciInfo_t *)) \ dlsym(hDLL, "nvmlDeviceGetPciInfo"); } } else { NVML_nvmlDeviceGetCount = (nvmlReturn_t (*)(uint *)) \ dlsym(hDLL, "nvmlDeviceGetCount_v2"); NVML_nvmlDeviceGetHandleByIndex = (nvmlReturn_t (*)(uint, nvmlDevice_t *)) \ dlsym(hDLL, "nvmlDeviceGetHandleByIndex_v2"); NVML_nvmlDeviceGetPciInfo = (nvmlReturn_t (*)(nvmlDevice_t, nvmlPciInfo_t *)) \ dlsym(hDLL, "nvmlDeviceGetPciInfo_v2"); } NVML_nvmlErrorString = (char * (*)()) \ dlsym(hDLL, "nvmlErrorString"); NVML_nvmlDeviceGetName = (nvmlReturn_t (*)(nvmlDevice_t, char *, uint)) \ dlsym(hDLL, "nvmlDeviceGetName"); NVML_nvmlDeviceGetTemperature = (nvmlReturn_t (*)(nvmlDevice_t, nvmlTemperatureSensors_t, uint *)) \ dlsym(hDLL, "nvmlDeviceGetTemperature"); NVML_nvmlDeviceGetFanSpeed = (nvmlReturn_t (*)(nvmlDevice_t, uint *)) \ dlsym(hDLL, "nvmlDeviceGetFanSpeed"); NVML_nvmlShutdown = (nvmlReturn_t (*)()) \ dlsym(hDLL, "nvmlShutdown"); ret = NVML_nvmlInit(); if(ret != NVML_SUCCESS) { applog(LOG_ERR, "NVML: Initialisation failed with code %s", NVML_nvmlErrorString(ret)); } }
void algo_not_implemented() { applog(LOG_ERR,"Algo %s has not been Implemented.",algo_names[opt_algo]); }
json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, const char *rpc_req, bool longpoll_scan, bool longpoll, int *curl_err) { json_t *val, *err_val, *res_val; int rc; struct data_buffer all_data = {0}; struct upload_buffer upload_data; json_error_t err; struct curl_slist *headers = NULL; char len_hdr[64]; char curl_err_str[CURL_ERROR_SIZE]; long timeout = longpoll ? opt_timeout : 30; struct header_info hi = {0}; bool lp_scanning = longpoll_scan && !have_longpoll; /* it is assumed that 'curl' is freshly [re]initialized at this pt */ if (opt_protocol) curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); curl_easy_setopt(curl, CURLOPT_URL, url); if (opt_cert) curl_easy_setopt(curl, CURLOPT_CAINFO, opt_cert); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb); curl_easy_setopt(curl, CURLOPT_READDATA, &upload_data); #if LIBCURL_VERSION_NUM >= 0x071200 curl_easy_setopt(curl, CURLOPT_SEEKFUNCTION, &seek_data_cb); curl_easy_setopt(curl, CURLOPT_SEEKDATA, &upload_data); #endif curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb); curl_easy_setopt(curl, CURLOPT_HEADERDATA, &hi); if (opt_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, opt_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, opt_proxy_type); } if (userpass) { curl_easy_setopt(curl, CURLOPT_USERPWD, userpass); curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); } #if LIBCURL_VERSION_NUM >= 0x070f06 if (longpoll) curl_easy_setopt(curl, CURLOPT_SOCKOPTFUNCTION, sockopt_keepalive_cb); #endif curl_easy_setopt(curl, CURLOPT_POST, 1); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol request:\n%s\n", rpc_req); upload_data.buf = rpc_req; upload_data.len = strlen(rpc_req); upload_data.pos = 0; sprintf(len_hdr, "Content-Length: %lu", (unsigned long) upload_data.len); headers = curl_slist_append(headers, "Content-Type: application/json"); headers = curl_slist_append(headers, len_hdr); headers = curl_slist_append(headers, "User-Agent: " USER_AGENT); headers = curl_slist_append(headers, "X-Mining-Extensions: midstate"); headers = curl_slist_append(headers, "Accept:"); /* disable Accept hdr*/ headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); rc = curl_easy_perform(curl); if (curl_err != NULL) *curl_err = rc; if (rc) { if (!(longpoll && rc == CURLE_OPERATION_TIMEDOUT)) applog(LOG_ERR, "HTTP request failed: %s", curl_err_str); goto err_out; } /* If X-Stratum was found, activate Stratum */ if (want_stratum && hi.stratum_url && !strncasecmp(hi.stratum_url, "stratum+tcp://", 14) && !(opt_proxy && opt_proxy_type == CURLPROXY_HTTP)) { have_stratum = true; tq_push(thr_info[stratum_thr_id].q, hi.stratum_url); hi.stratum_url = NULL; } /* If X-Long-Polling was found, activate long polling */ if (lp_scanning && hi.lp_path && !have_stratum) { have_longpoll = true; tq_push(thr_info[longpoll_thr_id].q, hi.lp_path); hi.lp_path = NULL; } if (!all_data.buf) { applog(LOG_ERR, "Empty data received in json_rpc_call."); goto err_out; } val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_ERR, "JSON decode failed(%d): %s", err.line, err.text); goto err_out; } if (opt_protocol) { char *s = json_dumps(val, JSON_INDENT(3)); applog(LOG_DEBUG, "JSON protocol response:\n%s", s); free(s); } /* JSON-RPC valid response returns a non-null 'result', * and a null 'error'. */ res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_null(res_val) || (err_val && !json_is_null(err_val))) { char *s; if (err_val) s = json_dumps(err_val, JSON_INDENT(3)); else s = strdup("(unknown reason)"); applog(LOG_ERR, "JSON-RPC call failed: %s", s); free(s); goto err_out; } if (hi.reason) json_object_set_new(val, "reject-reason", json_string(hi.reason)); databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); return val; err_out: free(hi.lp_path); free(hi.reason); free(hi.stratum_url); databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); return NULL; }
static int decode_pkg(struct thr_info *thr, struct avalon2_ret *ar, uint8_t *pkg) { struct cgpu_info *avalon2; struct avalon2_info *info; struct pool *pool; unsigned int expected_crc; unsigned int actual_crc; uint32_t nonce, nonce2, miner, modular_id; int pool_no; uint8_t job_id[5]; int tmp; int type = AVA2_GETS_ERROR; if (thr) { avalon2 = thr->cgpu; info = avalon2->device_data; } memcpy((uint8_t *)ar, pkg, AVA2_READ_SIZE); if (ar->head[0] == AVA2_H1 && ar->head[1] == AVA2_H2) { expected_crc = crc16(ar->data, AVA2_P_DATA_LEN); actual_crc = (ar->crc[0] & 0xff) | ((ar->crc[1] & 0xff) << 8); type = ar->type; applog(LOG_DEBUG, "Avalon2: %d: expected crc(%04x), actural_crc(%04x)", type, expected_crc, actual_crc); if (expected_crc != actual_crc) goto out; memcpy(&modular_id, ar->data + 28, 4); modular_id = be32toh(modular_id); if (modular_id == 3) modular_id = 0; switch(type) { case AVA2_P_NONCE: memcpy(&miner, ar->data + 0, 4); memcpy(&pool_no, ar->data + 4, 4); memcpy(&nonce2, ar->data + 8, 4); /* Calc time ar->data + 12 */ memcpy(&nonce, ar->data + 16, 4); memset(job_id, 0, 5); memcpy(job_id, ar->data + 20, 4); miner = be32toh(miner); pool_no = be32toh(pool_no); if (miner >= AVA2_DEFAULT_MINERS || modular_id >= AVA2_DEFAULT_MINERS || pool_no >= total_pools || pool_no < 0) { applog(LOG_DEBUG, "Avalon2: Wrong miner/pool/id no %d,%d,%d", miner, pool_no, modular_id); break; } else info->matching_work[modular_id * AVA2_DEFAULT_MINERS + miner]++; nonce2 = bswap_32(nonce2); nonce = be32toh(nonce); nonce -= 0x180; applog(LOG_DEBUG, "Avalon2: Found! [%s] %d:(%08x) (%08x)", job_id, pool_no, nonce2, nonce); /* FIXME: * We need remember the pre_pool. then submit the stale work */ pool = pools[pool_no]; if (job_idcmp(job_id, pool->swork.job_id)) break; if (thr && !info->new_stratum) submit_nonce2_nonce(thr, pool_no, nonce2, nonce); break; case AVA2_P_STATUS: memcpy(&tmp, ar->data, 4); tmp = be32toh(tmp); info->temp[0 + modular_id * 2] = tmp >> 16; info->temp[1 + modular_id * 2] = tmp & 0xffff; memcpy(&tmp, ar->data + 4, 4); tmp = be32toh(tmp); info->fan[0 + modular_id * 2] = tmp >> 16; info->fan[1 + modular_id * 2] = tmp & 0xffff; memcpy(&(info->get_frequency[modular_id]), ar->data + 8, 4); memcpy(&(info->get_voltage[modular_id]), ar->data + 12, 4); memcpy(&(info->local_work[modular_id]), ar->data + 16, 4); memcpy(&(info->hw_work[modular_id]), ar->data + 20, 4); info->get_frequency[modular_id] = be32toh(info->get_frequency[modular_id]); info->get_voltage[modular_id] = be32toh(info->get_voltage[modular_id]); info->local_work[modular_id] = be32toh(info->local_work[modular_id]); info->hw_work[modular_id] = be32toh(info->hw_work[modular_id]); info->local_works[modular_id] += info->local_work[modular_id]; info->hw_works[modular_id] += info->hw_work[modular_id]; avalon2->temp = info->temp[0]; /* FIXME: */ break; case AVA2_P_ACKDETECT: break; case AVA2_P_ACK: break; case AVA2_P_NAK: break; default: type = AVA2_GETS_ERROR; break; } }