int authorize(char *username, const char *password) { int authorized = 0; char l[256], u[65], passwd[129]; char *newpw = NULL ; size_t username_l; size_t min_len; debug( "Checking basic for user: %s; password XXXXX", username); if ((username == NULL) || (password == NULL)) { debug("No username (%p) or password (XXXXX)", username); return 0; } username_l = strlen(username); FILE *fp = fopen(filename, "r"); if (!fp) { debug( "Couldn't open basic passwd file %s", filename); return 0; } while (fgets(l, sizeof(l), fp) != NULL) { if (sscanf(l, "%64[^:]:%128s", u, passwd) != 2) continue; /* Ignore malformed lines */ debug( "user: %s, passwd: XXXX", u); min_len = strlen(u); if (username_l < min_len) { min_len = username_l; } if (!safe_cmp((unsigned char *)username, (unsigned char *)u, min_len)) { size_t newpw_l; min_len = strlen(passwd); newpw = crypt(password, passwd); newpw_l = strlen(newpw); if (newpw_l < min_len) { min_len = newpw_l; } debug( "user: %s, passwd: XXXXX", u ); authorized = ( safe_cmp ((unsigned char *)newpw, (unsigned char *)passwd, min_len) == 0 ); break; } } fclose(fp); return authorized; }
static bool opencl_prepare_work(struct thr_info __maybe_unused *thr, struct work *work) { if (!safe_cmp(work->pool->algorithm.name, "Lyra2RE")) { work->blk.work = work; precalc_hash_blake256(&work->blk, 0, (uint32_t *)(work->data)); } else { work->blk.work = work; } thr->pool_no = work->pool->pool_no; return true; }
bool cmp_algorithm(const algorithm_t* algo1, const algorithm_t* algo2) { return (!safe_cmp(algo1->name, algo2->name) && !safe_cmp(algo1->kernelfile, algo2->kernelfile) && (algo1->nfactor == algo2->nfactor)); }
bool cmp_algorithm(algorithm_t* algo1, algorithm_t* algo2) { // return (strcmp(algo1->name, algo2->name) == 0) && (algo1->nfactor == algo2->nfactor); return (!safe_cmp(algo1->name, algo2->name) && !safe_cmp(algo1->kernelfile, algo2->kernelfile) && (algo1->nfactor == algo2->nfactor)); }
_clState *initCl(unsigned int gpu, char *name, size_t nameSize, algorithm_t *algorithm) { _clState *clState = (_clState *)calloc(1, sizeof(_clState)); struct cgpu_info *cgpu = &gpus[gpu]; cl_platform_id platform = NULL; char pbuff[256]; build_kernel_data *build_data = (build_kernel_data *) alloca(sizeof(struct _build_kernel_data)); cl_uint preferred_vwidth; cl_device_id *devices; cl_uint numDevices; cl_int status; if (!get_opencl_platform(opt_platform_id, &platform)) { return NULL; } numDevices = clDevicesNum(); if (numDevices <= 0 ) return NULL; devices = (cl_device_id *)alloca(numDevices*sizeof(cl_device_id)); /* Now, get the device list data */ status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, numDevices, devices, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device IDs (list)", status); return NULL; } applog(LOG_INFO, "List of devices:"); unsigned int i; for (i = 0; i < numDevices; i++) { status = clGetDeviceInfo(devices[i], CL_DEVICE_NAME, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device Info", status); return NULL; } applog(LOG_INFO, "\t%i\t%s", i, pbuff); if (i == gpu) { applog(LOG_INFO, "Selected %i: %s", gpu, pbuff); strncpy(name, pbuff, nameSize); } } if (gpu >= numDevices) { applog(LOG_ERR, "Invalid GPU %i", gpu); return NULL; } status = create_opencl_context(&clState->context, &platform); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Context. (clCreateContextFromType)", status); return NULL; } status = create_opencl_command_queue(&clState->commandQueue, &clState->context, &devices[gpu], cgpu->algorithm.cq_properties); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Command Queue. (clCreateCommandQueue)", status); return NULL; } clState->hasBitAlign = get_opencl_bit_align_support(&devices[gpu]); status = clGetDeviceInfo(devices[gpu], CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, sizeof(cl_uint), (void *)&preferred_vwidth, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT", status); return NULL; } applog(LOG_DEBUG, "Preferred vector width reported %d", preferred_vwidth); status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(size_t), (void *)&clState->max_work_size, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_WORK_GROUP_SIZE", status); return NULL; } applog(LOG_DEBUG, "Max work group size reported %d", (int)(clState->max_work_size)); size_t compute_units = 0; status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(size_t), (void *)&compute_units, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_COMPUTE_UNITS", status); return NULL; } // AMD architechture got 64 compute shaders per compute unit. // Source: http://www.amd.com/us/Documents/GCN_Architecture_whitepaper.pdf clState->compute_shaders = compute_units * 64; applog(LOG_DEBUG, "Max shaders calculated %d", (int)(clState->compute_shaders)); status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_MEM_ALLOC_SIZE , sizeof(cl_ulong), (void *)&cgpu->max_alloc, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_MEM_ALLOC_SIZE", status); return NULL; } applog(LOG_DEBUG, "Max mem alloc size is %lu", (long unsigned int)(cgpu->max_alloc)); /* Create binary filename based on parameters passed to opencl * compiler to ensure we only load a binary that matches what * would have otherwise created. The filename is: * name + g + lg + lookup_gap + tc + thread_concurrency + nf + nfactor + w + work_size + l + sizeof(long) + .bin */ char filename[255]; char strbuf[32]; sprintf(strbuf, "%s.cl", (!empty_string(cgpu->algorithm.kernelfile) ? cgpu->algorithm.kernelfile : cgpu->algorithm.name)); strcpy(filename, strbuf); applog(LOG_DEBUG, "Using source file %s", filename); /* For some reason 2 vectors is still better even if the card says * otherwise, and many cards lie about their max so use 256 as max * unless explicitly set on the command line. Tahiti prefers 1 */ if (strstr(name, "Tahiti")) preferred_vwidth = 1; else if (preferred_vwidth > 2) preferred_vwidth = 2; /* All available kernels only support vector 1 */ cgpu->vwidth = 1; /* Vectors are hard-set to 1 above. */ if (likely(cgpu->vwidth)) clState->vwidth = cgpu->vwidth; else { clState->vwidth = preferred_vwidth; cgpu->vwidth = preferred_vwidth; } clState->goffset = true; if (cgpu->work_size && cgpu->work_size <= clState->max_work_size) clState->wsize = cgpu->work_size; else clState->wsize = 256; if (!cgpu->opt_lg) { applog(LOG_DEBUG, "GPU %d: selecting lookup gap of 2", gpu); cgpu->lookup_gap = 2; } else cgpu->lookup_gap = cgpu->opt_lg; if ((strcmp(cgpu->algorithm.name, "zuikkis") == 0) && (cgpu->lookup_gap != 2)) { applog(LOG_WARNING, "Kernel zuikkis only supports lookup-gap = 2 (currently %d), forcing.", cgpu->lookup_gap); cgpu->lookup_gap = 2; } if ((strcmp(cgpu->algorithm.name, "bufius") == 0) && ((cgpu->lookup_gap != 2) && (cgpu->lookup_gap != 4) && (cgpu->lookup_gap != 8))) { applog(LOG_WARNING, "Kernel bufius only supports lookup-gap of 2, 4 or 8 (currently %d), forcing to 2", cgpu->lookup_gap); cgpu->lookup_gap = 2; } // neoscrypt calculates TC differently if (!safe_cmp(cgpu->algorithm.name, "neoscrypt")) { int max_int = ((cgpu->dynamic) ? MAX_INTENSITY : cgpu->intensity); size_t glob_thread_count = 1UL << max_int; // if TC is entered by user, use that value... otherwise use default cgpu->thread_concurrency = ((cgpu->opt_tc) ? cgpu->opt_tc : ((glob_thread_count < cgpu->work_size) ? cgpu->work_size : glob_thread_count)); // if TC * scratchbuf size is too big for memory... reduce to max if (((uint64_t)cgpu->thread_concurrency * NEOSCRYPT_SCRATCHBUF_SIZE) >(uint64_t)cgpu->max_alloc) { /* Selected intensity will not run on this GPU. Not enough memory. * Adapt the memory setting. */ glob_thread_count = cgpu->max_alloc / NEOSCRYPT_SCRATCHBUF_SIZE; /* Find highest significant bit in glob_thread_count, which gives * the intensity. */ while (max_int && ((1U << max_int) & glob_thread_count) == 0) { --max_int; } /* Check if max_intensity is >0. */ if (max_int < MIN_INTENSITY) { applog(LOG_ERR, "GPU %d: Max intensity is below minimum.", gpu); max_int = MIN_INTENSITY; } cgpu->intensity = max_int; cgpu->thread_concurrency = 1U << max_int; } applog(LOG_DEBUG, "GPU %d: computing max. global thread count to %u", gpu, (unsigned)(cgpu->thread_concurrency)); } else if (!cgpu->opt_tc) { unsigned int sixtyfours; sixtyfours = cgpu->max_alloc / 131072 / 64 / (algorithm->n/1024) - 1; cgpu->thread_concurrency = sixtyfours * 64; if (cgpu->shaders && cgpu->thread_concurrency > cgpu->shaders) { cgpu->thread_concurrency -= cgpu->thread_concurrency % cgpu->shaders; if (cgpu->thread_concurrency > cgpu->shaders * 5) { cgpu->thread_concurrency = cgpu->shaders * 5; } } applog(LOG_DEBUG, "GPU %d: selecting thread concurrency of %d", gpu, (int)(cgpu->thread_concurrency)); } else { cgpu->thread_concurrency = cgpu->opt_tc; } cl_uint slot, cpnd; slot = cpnd = 0; build_data->context = clState->context; build_data->device = &devices[gpu]; // Build information strcpy(build_data->source_filename, filename); strcpy(build_data->platform, name); strcpy(build_data->sgminer_path, sgminer_path); if (opt_kernel_path && *opt_kernel_path) { build_data->kernel_path = opt_kernel_path; } else { build_data->kernel_path = NULL; } build_data->work_size = clState->wsize; build_data->has_bit_align = clState->hasBitAlign; build_data->opencl_version = get_opencl_version(devices[gpu]); build_data->patch_bfi = needs_bfi_patch(build_data); strcpy(build_data->binary_filename, (!empty_string(cgpu->algorithm.kernelfile) ? cgpu->algorithm.kernelfile : cgpu->algorithm.name)); strcat(build_data->binary_filename, name); if (clState->goffset) strcat(build_data->binary_filename, "g"); set_base_compiler_options(build_data); if (algorithm->set_compile_options) algorithm->set_compile_options(build_data, cgpu, algorithm); strcat(build_data->binary_filename, ".bin"); applog(LOG_DEBUG, "Using binary file %s", build_data->binary_filename); // Load program from file or build it if it doesn't exist if (!(clState->program = load_opencl_binary_kernel(build_data))) { applog(LOG_NOTICE, "Building binary %s", build_data->binary_filename); if (!(clState->program = build_opencl_kernel(build_data, filename))) return NULL; if (save_opencl_kernel(build_data, clState->program)) { /* Program needs to be rebuilt, because the binary was patched */ if (build_data->patch_bfi) { clReleaseProgram(clState->program); clState->program = load_opencl_binary_kernel(build_data); } } else { if (build_data->patch_bfi) quit(1, "Could not save kernel to file, but it is necessary to apply BFI patch"); } } // Load kernels applog(LOG_NOTICE, "Initialising kernel %s with%s bitalign, %spatched BFI, nfactor %d, n %d", filename, clState->hasBitAlign ? "" : "out", build_data->patch_bfi ? "" : "un", algorithm->nfactor, algorithm->n); /* get a kernel object handle for a kernel with the given name */ clState->kernel = clCreateKernel(clState->program, "search", &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Kernel from program. (clCreateKernel)", status); return NULL; } clState->n_extra_kernels = algorithm->n_extra_kernels; if (clState->n_extra_kernels > 0) { unsigned int i; char kernel_name[9]; // max: search99 + 0x0 clState->extra_kernels = (cl_kernel *)malloc(sizeof(cl_kernel) * clState->n_extra_kernels); for (i = 0; i < clState->n_extra_kernels; i++) { snprintf(kernel_name, 9, "%s%d", "search", i + 1); clState->extra_kernels[i] = clCreateKernel(clState->program, kernel_name, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating ExtraKernel #%d from program. (clCreateKernel)", status, i); return NULL; } } } size_t bufsize; size_t readbufsize = 128; if (algorithm->rw_buffer_size < 0) { // calc buffer size for neoscrypt if (!safe_cmp(algorithm->name, "neoscrypt")) { /* The scratch/pad-buffer needs 32kBytes memory per thread. */ bufsize = NEOSCRYPT_SCRATCHBUF_SIZE * cgpu->thread_concurrency; /* This is the input buffer. For neoscrypt this is guaranteed to be * 80 bytes only. */ readbufsize = 80; applog(LOG_DEBUG, "Neoscrypt buffer sizes: %lu RW, %lu R", (unsigned long)bufsize, (unsigned long)readbufsize); // scrypt/n-scrypt } else { size_t ipt = (algorithm->n / cgpu->lookup_gap + (algorithm->n % cgpu->lookup_gap > 0)); bufsize = 128 * ipt * cgpu->thread_concurrency; applog(LOG_DEBUG, "Scrypt buffer sizes: %lu RW, %lu R", (unsigned long)bufsize, (unsigned long)readbufsize); } } else { bufsize = (size_t)algorithm->rw_buffer_size; applog(LOG_DEBUG, "Buffer sizes: %lu RW, %lu R", (unsigned long)bufsize, (unsigned long)readbufsize); } clState->padbuffer8 = NULL; if (bufsize > 0) { applog(LOG_DEBUG, "Creating read/write buffer sized %lu", (unsigned long)bufsize); /* Use the max alloc value which has been rounded to a power of * 2 greater >= required amount earlier */ if (bufsize > cgpu->max_alloc) { applog(LOG_WARNING, "Maximum buffer memory device %d supports says %lu", gpu, (unsigned long)(cgpu->max_alloc)); applog(LOG_WARNING, "Your settings come to %lu", (unsigned long)bufsize); } /* This buffer is weird and might work to some degree even if * the create buffer call has apparently failed, so check if we * get anything back before we call it a failure. */ clState->padbuffer8 = clCreateBuffer(clState->context, CL_MEM_READ_WRITE, bufsize, NULL, &status); if (status != CL_SUCCESS && !clState->padbuffer8) { applog(LOG_ERR, "Error %d: clCreateBuffer (padbuffer8), decrease TC or increase LG", status); return NULL; } } applog(LOG_DEBUG, "Using read buffer sized %lu", (unsigned long)readbufsize); clState->CLbuffer0 = clCreateBuffer(clState->context, CL_MEM_READ_ONLY, readbufsize, NULL, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clCreateBuffer (CLbuffer0)", status); return NULL; } applog(LOG_DEBUG, "Using output buffer sized %lu", BUFFERSIZE); clState->outputBuffer = clCreateBuffer(clState->context, CL_MEM_WRITE_ONLY, BUFFERSIZE, NULL, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clCreateBuffer (outputBuffer)", status); return NULL; } return clState; }