static void find_best_kpc(void){
    int num;
    cl_event myEvent;
    cl_ulong startTime, endTime, tmpTime;
    int kernelExecTimeNs = 6969;
    cl_int ret_code;
    int optimal_kpc=2048;
    int i = 0;
    cl_uint *tmpbuffer;

    printf("Calculating best keys per crypt, this will take a while ");
    for( num=SSHA_NUM_KEYS; num > 4096 ; num -= 16384){
        release_clobj();
	create_clobj(num);
	advance_cursor();
	queue_prof = clCreateCommandQueue( context[gpu_id], devices[gpu_id], CL_QUEUE_PROFILING_ENABLE, &ret_code);
	for (i=0; i < num; i++){
		memcpy(&(saved_plain[i*PLAINTEXT_LENGTH]),"abacaeaf",PLAINTEXT_LENGTH);
	}
	clEnqueueWriteBuffer(queue_prof, mysalt, CL_TRUE, 0, SALT_SIZE, saved_salt, 0, NULL, NULL);
	clEnqueueWriteBuffer(queue_prof, buffer_keys, CL_TRUE, 0, (PLAINTEXT_LENGTH) * num, saved_plain, 0, NULL, NULL);
    	ret_code = clEnqueueNDRangeKernel( queue_prof, crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, &myEvent);
	if(ret_code != CL_SUCCESS){
		printf("Error %d\n",ret_code);
		continue;
	}
	clFinish(queue_prof);
	clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
	clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END  , sizeof(cl_ulong), &endTime  , NULL);
	tmpTime = endTime-startTime;
	tmpbuffer = malloc(sizeof(cl_uint) * num);
	clEnqueueReadBuffer(queue_prof, buffer_out, CL_TRUE, 0, sizeof(cl_uint) * num, tmpbuffer, 0, NULL, &myEvent);
	clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
	clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END  , sizeof(cl_ulong), &endTime  , NULL);
	tmpTime = tmpTime + (endTime-startTime);
	if( ((int)( ((float) (tmpTime) / num) * 10 )) <= kernelExecTimeNs) {
		kernelExecTimeNs = ((int) (((float) (tmpTime) / num) * 10) ) ;
		optimal_kpc = num;
	}
	free(tmpbuffer);
	clReleaseCommandQueue(queue_prof);
    }
    printf("Optimal keys per crypt %d\n(to avoid this test on next run, put \""
           KPC_CONFIG " = %d\" in john.conf, section [" SECTION_OPTIONS
           SUBSECTION_OPENCL "])\n", optimal_kpc, optimal_kpc);
    max_keys_per_crypt = optimal_kpc;
    release_clobj();
    create_clobj(optimal_kpc);
}
static void find_best_kpc(void){
	int num;
	cl_event myEvent;
	cl_ulong startTime, endTime, tmpTime;
	int kernelExecTimeNs = 6969;
	cl_int ret_code;
	int optimal_kpc=2048;
	int i = 0;
	cl_uint *tmpbuffer;

	fprintf(stderr, "Calculating best keys per crypt, this will take a while ");
	for( num=MD4_NUM_KEYS; num > 4096 ; num -= 4096){
		release_clobj();
		create_clobj(num);
		advance_cursor();
		queue_prof = clCreateCommandQueue( context[ocl_gpu_id], devices[ocl_gpu_id], CL_QUEUE_PROFILING_ENABLE, &ret_code);
		for (i=0; i < num; i++){
			memcpy(&(saved_plain[i * (PLAINTEXT_LENGTH + 1)]), "abcaaeaf", PLAINTEXT_LENGTH + 1);
			saved_plain[i * (PLAINTEXT_LENGTH + 1) + 8] = 0x80;
		}
        	clEnqueueWriteBuffer(queue_prof, data_info, CL_TRUE, 0, sizeof(unsigned int)*2, datai, 0, NULL, NULL);
		clEnqueueWriteBuffer(queue_prof, buffer_keys, CL_TRUE, 0, (PLAINTEXT_LENGTH + 1) * num, saved_plain, 0, NULL, NULL);
    		ret_code = clEnqueueNDRangeKernel( queue_prof, crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, &myEvent);
		if(ret_code != CL_SUCCESS) {
			HANDLE_CLERROR(ret_code, "Error running kernel in find_best_KPC()");
			continue;
		}
		clFinish(queue_prof);
		clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
		clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END  , sizeof(cl_ulong), &endTime  , NULL);
		tmpTime = endTime-startTime;
		tmpbuffer = malloc(sizeof(cl_uint) * num);
		clEnqueueReadBuffer(queue_prof, buffer_out, CL_TRUE, 0, sizeof(cl_uint) * num, tmpbuffer, 0, NULL, &myEvent);
		clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
		clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END  , sizeof(cl_ulong), &endTime  , NULL);
		tmpTime = tmpTime + (endTime-startTime);
		if( ((int)( ((float) (tmpTime) / num) * 10 )) <= kernelExecTimeNs) {
			kernelExecTimeNs = ((int) (((float) (tmpTime) / num) * 10) ) ;
			optimal_kpc = num;
		}
		free(tmpbuffer);
		clReleaseCommandQueue(queue_prof);
	}
	fprintf(stderr, "Optimal keys per crypt %d\n(to avoid this test on next run do \"export GWS=%d\")\n",optimal_kpc,optimal_kpc);
	max_keys_per_crypt = optimal_kpc;
	release_clobj();
	create_clobj(optimal_kpc);
}
static void init(struct fmt_main *self) {
	char *kpc;

	global_work_size = MAX_KEYS_PER_CRYPT;

	opencl_init("$JOHN/md4_kernel.cl", ocl_gpu_id, platform_id);
	crypt_kernel = clCreateKernel(program[ocl_gpu_id], "md4", &ret_code);
	HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
	if( ((kpc = getenv("LWS")) == NULL) || (atoi(kpc) == 0)) {
		create_clobj(MD4_NUM_KEYS);
		opencl_find_best_workgroup(self);
		release_clobj();
	}else {
		local_work_size = atoi(kpc);
	}
	if( (kpc = getenv("GWS")) == NULL){
		max_keys_per_crypt = MD4_NUM_KEYS;
		create_clobj(MD4_NUM_KEYS);
	} else {
		if (atoi(kpc) == 0){
			//user chose to die of boredom
			max_keys_per_crypt = MD4_NUM_KEYS;
			create_clobj(MD4_NUM_KEYS);
			find_best_kpc();
		} else {
			max_keys_per_crypt = atoi(kpc);
			create_clobj(max_keys_per_crypt);
		}
	}
	fprintf(stderr, "Local work size (LWS) %d, Global work size (GWS) %d\n",(int)local_work_size, max_keys_per_crypt);
	self->params.max_keys_per_crypt = max_keys_per_crypt;
}
static void fmt_ssha_init(struct fmt_main *pFmt)
{
	char *kpc;
	opencl_init("$JOHN/ssha_opencl_kernel.cl", gpu_id);

	// create kernel to execute
	crypt_kernel = clCreateKernel(program[gpu_id], "sha1_crypt_kernel", &ret_code);
	HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");

	if( (kpc = getenv("LWS")) == NULL){
		create_clobj(SSHA_NUM_KEYS);
		find_best_workgroup();
		release_clobj();
	}else {
		local_work_size = atoi(kpc);
	}
	if( (kpc = getenv("KPC")) == NULL){
		max_keys_per_crypt = SSHA_NUM_KEYS;
		create_clobj(SSHA_NUM_KEYS);
	} else {
		if (atoi(kpc) == 0){
			//user chose to die of boredom
			max_keys_per_crypt = SSHA_NUM_KEYS;
			create_clobj(SSHA_NUM_KEYS);
			find_best_kpc();
		} else {
			max_keys_per_crypt = atoi(kpc);
			create_clobj(max_keys_per_crypt);
		}
	}
	printf("Local work size (LWS) %d, Keys per crypt (KPC) %d\n",(int)local_work_size,max_keys_per_crypt);
	pFmt->params.max_keys_per_crypt = max_keys_per_crypt;
}
static void done(void)
{
	release_clobj();

	HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
	HANDLE_CLERROR(clReleaseProgram(program[ocl_gpu_id]), "Release Program");
}
static void init(struct fmt_main *self)
{
	cl_ulong maxsize;
	size_t selected_gws;

	opencl_init_opt("$JOHN/kernels/pwsafe_kernel.cl", ocl_gpu_id, NULL);

	init_kernel = clCreateKernel(program[ocl_gpu_id], KERNEL_INIT_NAME, &ret_code);
	HANDLE_CLERROR(ret_code, "Error while creating init kernel");

	crypt_kernel = clCreateKernel(program[ocl_gpu_id], KERNEL_RUN_NAME, &ret_code);
	HANDLE_CLERROR(ret_code, "Error while creating crypt kernel");

	finish_kernel = clCreateKernel(program[ocl_gpu_id], KERNEL_FINISH_NAME, &ret_code);
	HANDLE_CLERROR(ret_code, "Error while creating finish kernel");

	local_work_size = cpu(device_info[ocl_gpu_id]) ? 1 : 64;
	global_work_size = 0;
	opencl_get_user_preferences(CONFIG_NAME);

	//Initialize openCL tuning (library) for this format.
	opencl_init_auto_setup(STEP, ROUNDS_DEFAULT/8, 8, split_events,
		warn, &multi_profilingEvent[3], self, create_clobj, release_clobj,
		sizeof(pwsafe_pass), 0);

	self->methods.crypt_all = crypt_all_benchmark;

	selected_gws = global_work_size;
	/* Note: we ask for the kernels' max sizes, not the device's! */
	maxsize = get_current_work_group_size(ocl_gpu_id, init_kernel);
	maxsize = MIN(get_current_work_group_size(ocl_gpu_id, crypt_kernel),
	              maxsize);
	maxsize = MIN(get_current_work_group_size(ocl_gpu_id, finish_kernel),
	              maxsize);

	while (local_work_size > maxsize)
		local_work_size >>= 1;

	self->params.max_keys_per_crypt = (global_work_size ? global_work_size: MAX_KEYS_PER_CRYPT);

	if (!local_work_size) {
		create_clobj(self->params.max_keys_per_crypt, self);
		find_best_lws(self, ocl_gpu_id);
		release_clobj();
	}
	global_work_size = selected_gws;

	if (global_work_size)
		create_clobj(global_work_size, self);
	else
		//user chose to die of boredom
		find_best_gws(self, ocl_gpu_id);

	self->params.min_keys_per_crypt = local_work_size;
	self->params.max_keys_per_crypt = global_work_size;
	self->methods.crypt_all = crypt_all;

	if (options.verbosity > 2)
		fprintf(stderr, "Local worksize (LWS) %d, Global worksize (GWS) %d\n", (int)local_work_size, (int)global_work_size);
}
static void find_best_kpc(void){
	int num;
	cl_event myEvent;
	cl_ulong startTime, endTime, tmpTime;
	int kernelExecTimeNs = INT_MAX;
	cl_int ret_code;
	int optimal_kpc=2048;
	int i = 0;
	cl_uint *tmpbuffer;

	fprintf(stderr, "Calculating best keys per crypt, this will take a while ");
	for( num=MAX_KEYS_PER_CRYPT; num >= 4096 ; num -= 4096){
		release_clobj();
		create_clobj(num);
		advance_cursor();
		queue_prof = clCreateCommandQueue( context[ocl_gpu_id], devices[ocl_gpu_id], CL_QUEUE_PROFILING_ENABLE, &ret_code);
		for (i=0; i < num; i++){
			strcpy(&(saved_plain[i * keybuf_size]), tests[0].plaintext);
		}
		clEnqueueWriteBuffer(queue_prof, buffer_keys, CL_TRUE, 0, keybuf_size * num, saved_plain, 0, NULL, NULL);
		ret_code = clEnqueueNDRangeKernel( queue_prof, crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, &myEvent);
		if(ret_code != CL_SUCCESS){
			fprintf(stderr, "Error %d\n",ret_code);
			continue;
		}
		clFinish(queue_prof);
		clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
		clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END  , sizeof(cl_ulong), &endTime  , NULL);
		tmpTime = endTime-startTime;
		tmpbuffer = mem_alloc(sizeof(cl_uint) * num);
		clEnqueueReadBuffer(queue_prof, buffer_out, CL_TRUE, 0, sizeof(cl_uint) * num, tmpbuffer, 0, NULL, &myEvent);
		clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
		clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END  , sizeof(cl_ulong), &endTime  , NULL);
		tmpTime = tmpTime + (endTime-startTime);
		if( ((int)( ((float) (tmpTime) / num) * 10 )) <= kernelExecTimeNs) {
			kernelExecTimeNs = ((int) (((float) (tmpTime) / num) * 10) ) ;
			optimal_kpc = num;
		}
		MEM_FREE(tmpbuffer);
		clReleaseCommandQueue(queue_prof);
	}
	fprintf(stderr, "Optimal keys per crypt %d\n(to avoid this test on next run do export GWS=%d)\n",optimal_kpc,optimal_kpc);
	global_work_size = optimal_kpc;
	release_clobj();
	create_clobj(optimal_kpc);
}
static void find_best_gws(int do_benchmark, struct fmt_main *self)
{
    cl_event myEvent;
    cl_ulong startTime, endTime, tmpTime;
    int kernelExecTimeNs = INT_MAX;
    cl_int ret_code;
    int optimal_kpc=2048;
    int gws, i = 0;
    cl_uint *tmpbuffer;

    for(gws = local_work_size << 2; gws <= 8*1024*1024; gws <<= 1) {
        create_clobj(gws, self);
        advance_cursor();
        queue_prof = clCreateCommandQueue( context[ocl_gpu_id], devices[ocl_gpu_id], CL_QUEUE_PROFILING_ENABLE, &ret_code);
        for (i=0; i < gws; i++) {
            memcpy(&(saved_plain[i*PLAINTEXT_LENGTH]),"abacaeaf",PLAINTEXT_LENGTH);
        }
        clEnqueueWriteBuffer(queue_prof, mysalt, CL_TRUE, 0, SALT_SIZE, saved_salt, 0, NULL, NULL);
        clEnqueueWriteBuffer(queue_prof, buffer_keys, CL_TRUE, 0, (PLAINTEXT_LENGTH) * gws, saved_plain, 0, NULL, NULL);
        ret_code = clEnqueueNDRangeKernel( queue_prof, crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, &myEvent);
        if(ret_code != CL_SUCCESS) {
            // We hit some resource limit so we end here.
            release_clobj();
            break;
        }
        clFinish(queue_prof);
        clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
        clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END  , sizeof(cl_ulong), &endTime  , NULL);
        tmpTime = endTime-startTime;
        tmpbuffer = malloc(sizeof(cl_uint) * gws);
        clEnqueueReadBuffer(queue_prof, buffer_out, CL_TRUE, 0, sizeof(cl_uint) * gws, tmpbuffer, 0, NULL, &myEvent);
        clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
        clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END  , sizeof(cl_ulong), &endTime  , NULL);
        tmpTime = tmpTime + (endTime-startTime);
        if (do_benchmark)
            fprintf(stderr, "%10d %10llu c/s %-.2f us\n", gws, gws * 1000000000ULL / tmpTime, tmpTime / 1000.0);
        if( ((int)( ((float) (tmpTime) / gws) * 10 )) <= kernelExecTimeNs) {
            kernelExecTimeNs = ((int) (((float) (tmpTime) / gws) * 10) ) ;
            optimal_kpc = gws;
        }
        MEM_FREE(tmpbuffer);
        clReleaseCommandQueue(queue_prof);
        release_clobj();
    }
    global_work_size = optimal_kpc;
}
static void done(void)
{
	release_clobj();

	HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
	HANDLE_CLERROR(clReleaseKernel(krb5pa_md5_nthash), "Release kernel");
	HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
}
static void done(void)
{
    release_clobj();
    HANDLE_CLERROR(clReleaseKernel(pbkdf1_init), "Release kernel");
    HANDLE_CLERROR(clReleaseKernel(pbkdf1_loop), "Release kernel");
    HANDLE_CLERROR(clReleaseKernel(pbkdf1_final), "Release kernel");
    HANDLE_CLERROR(clReleaseProgram(program[gpu_id]),
                   "Release Program");
}
static void done(void)
{
	release_clobj();

	HANDLE_CLERROR(clReleaseKernel(oldoffice_utf16), "Release kernel");
	HANDLE_CLERROR(clReleaseKernel(oldoffice_md5), "Release kernel");
	HANDLE_CLERROR(clReleaseKernel(oldoffice_sha1), "Release kernel");
	HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
}
Exemple #12
0
static void fmt_ssha_init(struct fmt_main *self)
{
    char *temp;
    cl_ulong maxsize;

    global_work_size = 0;

    opencl_init("$JOHN/ssha_kernel.cl", ocl_gpu_id, platform_id);

    // create kernel to execute
    crypt_kernel = clCreateKernel(program[ocl_gpu_id], "sha1_crypt_kernel", &ret_code);
    HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");

    HANDLE_CLERROR(clGetKernelWorkGroupInfo(crypt_kernel, devices[ocl_gpu_id], CL_KERNEL_WORK_GROUP_SIZE, sizeof(maxsize), &maxsize, NULL), "Query max work group size");

    if ((temp = cfg_get_param(SECTION_OPTIONS, SUBSECTION_OPENCL, LWS_CONFIG)))
        local_work_size = atoi(temp);

    if ((temp = cfg_get_param(SECTION_OPTIONS, SUBSECTION_OPENCL, GWS_CONFIG)))
        global_work_size = atoi(temp);

    if ((temp = getenv("LWS")))
        local_work_size = atoi(temp);

    if ((temp = getenv("GWS")))
        global_work_size = atoi(temp);

    if (!local_work_size) {
        int temp = global_work_size;
        local_work_size = maxsize;
        global_work_size = global_work_size ? global_work_size : 4 * maxsize;
        create_clobj(global_work_size, self);
        opencl_find_best_workgroup_limit(self, maxsize);
        release_clobj();
        global_work_size = temp;
    }

    if (local_work_size > maxsize) {
        fprintf(stderr, "LWS %d is too large for this GPU. Max allowed is %d, using that.\n", (int)local_work_size, (int)maxsize);
        local_work_size = maxsize;
    }

    if (!global_work_size)
        find_best_gws(getenv("GWS") == NULL ? 0 : 1, self);

    if (global_work_size < local_work_size)
        global_work_size = local_work_size;

    fprintf(stderr, "Local worksize (LWS) %d, Global worksize (GWS) %d\n", (int)local_work_size, (int)global_work_size);
    create_clobj(global_work_size, self);
    atexit(release_clobj);
}
cl_ulong gws_test(int gws)
{
	cl_ulong startTime, endTime, run_time;
	cl_command_queue queue_prof;
	cl_event myEvent;
	cl_int ret_code;
	int i;
	int num = VF * gws;

	create_clobj(gws);
	queue_prof = clCreateCommandQueue(context[ocl_gpu_id], devices[ocl_gpu_id], CL_QUEUE_PROFILING_ENABLE, &ret_code);
	for (i = 0; i < num; i++)
		set_key(rar_fmt.params.tests[0].plaintext, i);
	HANDLE_CLERROR(clEnqueueWriteBuffer(queue_prof, cl_salt, BLOCK_IF_DEBUG, 0, 8, saved_salt, 0, NULL, NULL), "Failed transferring salt");
	HANDLE_CLERROR(clEnqueueWriteBuffer(queue_prof, cl_saved_key, BLOCK_IF_DEBUG, 0, UNICODE_LENGTH * num, saved_key, 0, NULL, NULL), "Failed transferring keys");
	HANDLE_CLERROR(clEnqueueWriteBuffer(queue_prof, cl_saved_len, BLOCK_IF_DEBUG, 0, sizeof(int) * num, saved_len, 0, NULL, NULL), "Failed transferring lengths");
	ret_code = clEnqueueNDRangeKernel(queue_prof, crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, &myEvent);
	if (ret_code != CL_SUCCESS) {
		fprintf(stderr, "Error: %s\n", get_error_name(ret_code));
		clReleaseCommandQueue(queue_prof);
		release_clobj();
		return 0;
	}
	HANDLE_CLERROR(clFinish(queue_prof), "Failed running kernel");
	clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
	clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &endTime, NULL);
	run_time = endTime - startTime;
	HANDLE_CLERROR(clEnqueueReadBuffer(queue_prof, cl_aes_iv, BLOCK_IF_DEBUG, 0, 16 * num, aes_iv, 0, NULL, &myEvent), "Failed reading iv back");
	HANDLE_CLERROR(clEnqueueReadBuffer(queue_prof, cl_aes_key, BLOCK_IF_DEBUG, 0, 16 * num, aes_key, 0, NULL, &myEvent), "Failed reading key back");
	HANDLE_CLERROR(clFinish(queue_prof), "Failed reading results back");
	clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime, NULL);
	clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &endTime, NULL);
	clReleaseCommandQueue(queue_prof);
	release_clobj();

	return (run_time + endTime - startTime);
}
static void fmt_ssha_init(struct fmt_main *pFmt)
{
	char *temp;
	opencl_init("$JOHN/ssha_kernel.cl", gpu_id, platform_id);

	// create kernel to execute
	crypt_kernel = clCreateKernel(program[gpu_id], "sha1_crypt_kernel", &ret_code);
	HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");

	if ((temp = cfg_get_param(SECTION_OPTIONS, SUBSECTION_OPENCL,
	                          LWS_CONFIG)))
		local_work_size = atoi(temp);

	if ((temp = getenv("LWS")))
		local_work_size = atoi(temp);

	if (!local_work_size) {
		create_clobj(SSHA_NUM_KEYS);
		find_best_workgroup();
		release_clobj();
	}

	if ((temp = cfg_get_param(SECTION_OPTIONS, SUBSECTION_OPENCL,
	                          KPC_CONFIG)))
		max_keys_per_crypt = atoi(temp);
	else
		max_keys_per_crypt = SSHA_NUM_KEYS;

	if ((temp = getenv("KPC")))
		max_keys_per_crypt = atoi(temp);

	if (max_keys_per_crypt) {
		create_clobj(max_keys_per_crypt);
	} else {
		//user chose to die of boredom
		max_keys_per_crypt = SSHA_NUM_KEYS;
		create_clobj(SSHA_NUM_KEYS);
		find_best_kpc();
	}
	printf("Local work size (LWS) %d, Keys per crypt (KPC) %d\n",(int)local_work_size,max_keys_per_crypt);
	pFmt->params.max_keys_per_crypt = max_keys_per_crypt;
}
/* ------- Initialization  ------- */
static void init(struct fmt_main * self) {
    char * tmp_value;
    char * task = "$JOHN/sha256_kernel.cl";

    opencl_init_dev(ocl_gpu_id, platform_id);
    source_in_use = device_info[ocl_gpu_id];

    if ((tmp_value = getenv("_TYPE")))
        source_in_use = atoi(tmp_value);

    opencl_build_kernel(task, ocl_gpu_id);

    // create kernel(s) to execute
    crypt_kernel = clCreateKernel(program[ocl_gpu_id], "kernel_crypt", &ret_code);
    HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
    cmp_kernel = clCreateKernel(program[ocl_gpu_id], "kernel_cmp", &ret_code);
    HANDLE_CLERROR(ret_code, "Error creating kernel_cmp. Double-check kernel name?");

    global_work_size = get_task_max_size();
    local_work_size = 0;

    if (source_in_use != device_info[ocl_gpu_id]) {
        device_info[ocl_gpu_id] = source_in_use;
        fprintf(stderr, "Selected runtime id %d, source (%s)\n", source_in_use, task);
    }

    if ((tmp_value = cfg_get_param(SECTION_OPTIONS,
                                   SUBSECTION_OPENCL, LWS_CONFIG)))
        local_work_size = atoi(tmp_value);

    if ((tmp_value = getenv("LWS")))
        local_work_size = atoi(tmp_value);

    //Check if local_work_size is a valid number.
    if (local_work_size > get_task_max_work_group_size()){
        fprintf(stderr, "Error: invalid local work size (LWS). Max value allowed is: %zd\n" ,
               get_task_max_work_group_size());
        local_work_size = 0; //Force find a valid number.
    }
    self->params.max_keys_per_crypt = global_work_size;

    if (!local_work_size) {
        local_work_size = get_task_max_work_group_size();
        create_clobj(global_work_size, self);
        find_best_workgroup(self);
        release_clobj();
    }

    if ((tmp_value = cfg_get_param(SECTION_OPTIONS,
                                   SUBSECTION_OPENCL, GWS_CONFIG)))
        global_work_size = atoi(tmp_value);

    if ((tmp_value = getenv("GWS")))
        global_work_size = atoi(tmp_value);

    //Check if a valid multiple is used.
    global_work_size = get_multiple(global_work_size, local_work_size);

    if (global_work_size)
        create_clobj(global_work_size, self);

    else {
        //user chose to die of boredom
        global_work_size = get_task_max_size();
        find_best_gws(self);
    }
    fprintf(stderr, "Local work size (LWS) %d, global work size (GWS) %zd\n",
           (int) local_work_size, global_work_size);
    self->params.max_keys_per_crypt = global_work_size;
}
//Do the proper test using different sizes.
static cl_ulong gws_test(size_t num, struct fmt_main * self) {

    cl_event myEvent;
    cl_int ret_code;
    cl_uint *tmpbuffer;
    cl_ulong startTime, endTime, runtime;
    int i;

    //Prepare buffers.
    create_clobj(num, self);

    tmpbuffer = mem_alloc(sizeof(sha256_hash) * num);

    if (tmpbuffer == NULL) {
        fprintf(stderr, "Malloc failure in find_best_gws\n");
        exit(EXIT_FAILURE);
    }

    queue_prof = clCreateCommandQueue(context[ocl_gpu_id], devices[ocl_gpu_id],
            CL_QUEUE_PROFILING_ENABLE, &ret_code);
    HANDLE_CLERROR(ret_code, "Failed in clCreateCommandQueue");

    // Set keys
    for (i = 0; i < num; i++) {
        set_key("aaabaabaaa", i);
    }
    //** Get execution time **//
    HANDLE_CLERROR(clEnqueueWriteBuffer(queue_prof, pass_buffer, CL_FALSE, 0,
            sizeof(sha256_password) * num, plaintext, 0, NULL, &myEvent),
            "Failed in clEnqueueWriteBuffer");

    HANDLE_CLERROR(clFinish(queue_prof), "Failed in clFinish");
    HANDLE_CLERROR(clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT,
            sizeof(cl_ulong), &startTime, NULL),
            "Failed in clGetEventProfilingInfo I");
    HANDLE_CLERROR(clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END,
            sizeof(cl_ulong), &endTime, NULL),
            "Failed in clGetEventProfilingInfo II");
    HANDLE_CLERROR(clReleaseEvent(myEvent), "Failed in clReleaseEvent");
    runtime = endTime - startTime;

    //** Get execution time **//
    ret_code = clEnqueueNDRangeKernel(queue_prof, crypt_kernel,
            1, NULL, &num, &local_work_size, 0, NULL, &myEvent);

    HANDLE_CLERROR(clFinish(queue_prof), "Failed in clFinish");
    HANDLE_CLERROR(clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT,
            sizeof(cl_ulong), &startTime, NULL),
            "Failed in clGetEventProfilingInfo I");
    HANDLE_CLERROR(clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END,
            sizeof(cl_ulong), &endTime, NULL),
            "Failed in clGetEventProfilingInfo II");
    HANDLE_CLERROR(clReleaseEvent(myEvent), "Failed in clReleaseEvent");
    runtime += endTime - startTime;

    //** Get execution time **//
    HANDLE_CLERROR(clEnqueueReadBuffer(queue_prof, hash_buffer, CL_FALSE, 0,
            sizeof(uint32_t) * num, tmpbuffer, 0, NULL, &myEvent),
            "Failed in clEnqueueReadBuffer");

    HANDLE_CLERROR(clFinish(queue_prof), "Failed in clFinish");
    HANDLE_CLERROR(clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_SUBMIT,
            sizeof(cl_ulong), &startTime, NULL),
            "Failed in clGetEventProfilingInfo I");
    HANDLE_CLERROR(clGetEventProfilingInfo(myEvent, CL_PROFILING_COMMAND_END,
            sizeof(cl_ulong), &endTime, NULL),
            "Failed in clGetEventProfilingInfo II");
    HANDLE_CLERROR(clReleaseEvent(myEvent), "Failed in clReleaseEvent");
    runtime += endTime - startTime;

    MEM_FREE(tmpbuffer);
    HANDLE_CLERROR(clReleaseCommandQueue(queue_prof),
            "Failed in clReleaseCommandQueue");
    release_clobj();

     if (ret_code != CL_SUCCESS) {

        if (ret_code != CL_INVALID_WORK_GROUP_SIZE)
            fprintf(stderr, "Error %d\n", ret_code);
        return 0;
    }
    return runtime;
}
/* ------- Initialization  ------- */
static void init(struct fmt_main * self) {
    char * tmp_value;
    char * task = "$JOHN/cryptsha512_kernel_DEFAULT.cl";
    uint64_t startTime, runtime;

    opencl_init_dev(ocl_gpu_id, platform_id);
    startTime = (unsigned long) time(NULL);
    source_in_use = device_info[ocl_gpu_id];

    if ((tmp_value = getenv("_TYPE")))
        source_in_use = atoi(tmp_value);

    if ((tmp_value = getenv("_FAST")))
        fast_mode = TRUE;

    if (use_local(source_in_use))
            task = "$JOHN/cryptsha512_kernel_LOCAL.cl";
    else if (gpu(source_in_use)) {
        fprintf(stderr, "Building the kernel, this could take a while\n");
        task = "$JOHN/cryptsha512_kernel_GPU.cl";
    }
    fflush(stdout);
    opencl_build_kernel(task, ocl_gpu_id);

    if ((runtime = (unsigned long) (time(NULL) - startTime)) > 2UL)
        fprintf(stderr, "Elapsed time: %lu seconds\n", runtime);
    fflush(stdout);

    // create kernel(s) to execute
    crypt_kernel = clCreateKernel(program[ocl_gpu_id], "kernel_crypt", &ret_code);
    HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");

    if (gpu(source_in_use) || use_local(source_in_use)) {
        prepare_kernel = clCreateKernel(program[ocl_gpu_id], "kernel_prepare", &ret_code);
        HANDLE_CLERROR(ret_code, "Error creating kernel_prepare. Double-check kernel name?");
        final_kernel = clCreateKernel(program[ocl_gpu_id], "kernel_final", &ret_code);
        HANDLE_CLERROR(ret_code, "Error creating kernel_final. Double-check kernel name?");
    }
    global_work_size = get_task_max_size();
    local_work_size = get_default_workgroup();

    if (source_in_use != device_info[ocl_gpu_id])
        fprintf(stderr, "Selected runtime id %d, source (%s)\n", source_in_use, task);

    if ((tmp_value = cfg_get_param(SECTION_OPTIONS,
                                   SUBSECTION_OPENCL, LWS_CONFIG)))
        local_work_size = atoi(tmp_value);

    if ((tmp_value = getenv("LWS")))
        local_work_size = atoi(tmp_value);

    //Check if local_work_size is a valid number.
    if (local_work_size > get_task_max_work_group_size()){
        local_work_size = 0; //Force find a valid number.
    }
    self->params.max_keys_per_crypt = global_work_size;

    if (!local_work_size) {
        local_work_size = get_task_max_work_group_size();
        create_clobj(global_work_size, self);
        find_best_workgroup(self);
        release_clobj();
    }

    if ((tmp_value = cfg_get_param(SECTION_OPTIONS,
                                   SUBSECTION_OPENCL, GWS_CONFIG)))
        global_work_size = atoi(tmp_value);

    if ((tmp_value = getenv("GWS")))
        global_work_size = atoi(tmp_value);

    //Check if a valid multiple is used.
    global_work_size = get_multiple(global_work_size, local_work_size);

    if (global_work_size)
        create_clobj(global_work_size, self);

    else {
        //user chose to die of boredom
        global_work_size = get_task_max_size();
        find_best_gws(self);
    }
    fprintf(stderr, "Local work size (LWS) %d, global work size (GWS) %zd\n",
           (int) local_work_size, global_work_size);
    self->params.max_keys_per_crypt = global_work_size;
}
static void init(struct fmt_main *self)
{
    char *temp;
    cl_ulong maxsize, maxsize2;
    char build_opts[64];

    global_work_size = 0;

    snprintf(build_opts, sizeof(build_opts),
             "-DHASH_LOOPS=%u -DUNICODE_LENGTH=%u %s",
             HASH_LOOPS,
             UNICODE_LENGTH,
             (options.flags & FLG_VECTORIZE) ? "-DVECTORIZE" :
             (options.flags & FLG_SCALAR) ? "-DSCALAR" : "");
    opencl_init_opt("$JOHN/office2007_kernel.cl", ocl_gpu_id, platform_id, build_opts);

    // create kernel to execute
    GenerateSHA1pwhash = clCreateKernel(program[ocl_gpu_id], "GenerateSHA1pwhash", &ret_code);
    HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
    crypt_kernel = clCreateKernel(program[ocl_gpu_id], "HashLoop", &ret_code);
    HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");
    Generate2007key = clCreateKernel(program[ocl_gpu_id], "Generate2007key", &ret_code);
    HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?");

    if (options.flags & FLG_VECTORIZE) {
        /* Run vectorized code */
        VF = 4;
        self->params.algorithm_name = "OpenCL 4x";
    }

    if ((temp = cfg_get_param(SECTION_OPTIONS, SUBSECTION_OPENCL, LWS_CONFIG)))
        local_work_size = atoi(temp);

    if ((temp = cfg_get_param(SECTION_OPTIONS, SUBSECTION_OPENCL, GWS_CONFIG)))
        global_work_size = atoi(temp);

    if ((temp = getenv("LWS")))
        local_work_size = atoi(temp);

    if ((temp = getenv("GWS")))
        global_work_size = atoi(temp);

    /* Note: we ask for the kernels' max sizes, not the device's! */
    HANDLE_CLERROR(clGetKernelWorkGroupInfo(GenerateSHA1pwhash, devices[ocl_gpu_id], CL_KERNEL_WORK_GROUP_SIZE, sizeof(maxsize), &maxsize, NULL), "Query max work group size");
    HANDLE_CLERROR(clGetKernelWorkGroupInfo(crypt_kernel, devices[ocl_gpu_id], CL_KERNEL_WORK_GROUP_SIZE, sizeof(maxsize2), &maxsize2, NULL), "Query max work group size");
    if (maxsize2 < maxsize) maxsize = maxsize2;
    HANDLE_CLERROR(clGetKernelWorkGroupInfo(Generate2007key, devices[ocl_gpu_id], CL_KERNEL_WORK_GROUP_SIZE, sizeof(maxsize2), &maxsize2, NULL), "Query max work group size");
    if (maxsize2 < maxsize) maxsize = maxsize2;

#if 0
    /* Our use of local memory sets a limit for LWS */
    maxsize2 = get_local_memory_size(ocl_gpu_id) / (24 * VF);
    while (maxsize > maxsize2)
        maxsize >>= 1;
#endif

    /* maxsize is the lowest figure from the three different kernels */
    if (!local_work_size) {
        if (getenv("LWS")) {
            /* LWS was explicitly set to 0 */
            int temp = global_work_size;
            local_work_size = maxsize;
            global_work_size = global_work_size ? global_work_size : 4 * maxsize;
            create_clobj(global_work_size, self);
            opencl_find_best_workgroup_limit(self, maxsize);
            release_clobj();
            global_work_size = temp;
        } else {
            if (cpu(device_info[ocl_gpu_id])) {
                if (get_platform_vendor_id(platform_id) == DEV_INTEL)
                    local_work_size = MIN(maxsize, 8);
                else
                    local_work_size = 1;
            } else
                local_work_size = MIN(maxsize, 64);
        }
    }

    if (local_work_size > maxsize) {
        fprintf(stderr, "LWS %d is too large for this GPU. Max allowed is %d, using that.\n", (int)local_work_size, (int)maxsize);
        local_work_size = maxsize;
    }

    if (!global_work_size)
        find_best_gws(getenv("GWS") == NULL ? 0 : 1, self);

    if (global_work_size < local_work_size)
        global_work_size = local_work_size;

    fprintf(stderr, "Local worksize (LWS) %d, Global worksize (GWS) %d\n", (int)local_work_size, (int)global_work_size);
    create_clobj(global_work_size, self);
    atexit(release_clobj);

    if (options.utf8)
        self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
}
static cl_ulong gws_test(int gws, int do_benchmark, struct fmt_main *self)
{
    cl_ulong startTime, endTime;
    cl_command_queue queue_prof;
    cl_event Event[6];
    cl_int ret_code;
    int i;
    size_t scalar_gws = VF * gws;

    create_clobj(gws, self);
    queue_prof = clCreateCommandQueue(context[ocl_gpu_id], devices[ocl_gpu_id], CL_QUEUE_PROFILING_ENABLE, &ret_code);
    for (i = 0; i < scalar_gws; i++)
        set_key(tests[0].plaintext, i);
    set_salt(get_salt(tests[0].ciphertext));

    HANDLE_CLERROR(clEnqueueWriteBuffer(queue_prof, cl_saved_key, CL_TRUE, 0, UNICODE_LENGTH * scalar_gws, saved_key, 0, NULL, &Event[0]), "Failed transferring keys");
    HANDLE_CLERROR(clEnqueueWriteBuffer(queue_prof, cl_saved_len, CL_TRUE, 0, sizeof(int) * scalar_gws, saved_len, 0, NULL, &Event[1]), "Failed transferring lengths");

    HANDLE_CLERROR(clEnqueueNDRangeKernel(queue_prof, GenerateSHA1pwhash, 1, NULL, &scalar_gws, &local_work_size, 0, NULL, &Event[2]), "running kernel");

    for (i = 0; i < 50000 / HASH_LOOPS - 1; i++)
        HANDLE_CLERROR(clEnqueueNDRangeKernel(queue_prof, crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL), "running kernel");

    HANDLE_CLERROR(clEnqueueNDRangeKernel(queue_prof, crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, &Event[3]), "running kernel");

    HANDLE_CLERROR(clEnqueueNDRangeKernel(queue_prof, Generate2007key, 1, NULL, &global_work_size, &local_work_size, 0, NULL, &Event[4]), "running kernel");

    HANDLE_CLERROR(clEnqueueReadBuffer(queue_prof, cl_key, CL_TRUE, 0, 16 * scalar_gws, key, 0, NULL, &Event[5]), "failed in reading key back");

#if 0
    HANDLE_CLERROR(clGetEventProfilingInfo(Event[2],
                                           CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &startTime,
                                           NULL), "Failed to get profiling info");
    HANDLE_CLERROR(clGetEventProfilingInfo(Event[2],
                                           CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &endTime,
                                           NULL), "Failed to get profiling info");
    fprintf(stderr, "GenerateSHA1pwhash kernel duration: %llu us, ", (endTime-startTime)/1000ULL);
#endif

    HANDLE_CLERROR(clGetEventProfilingInfo(Event[3],
                                           CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &startTime,
                                           NULL), "Failed to get profiling info");
    HANDLE_CLERROR(clGetEventProfilingInfo(Event[3],
                                           CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &endTime,
                                           NULL), "Failed to get profiling info");
    if (do_benchmark)
        fprintf(stderr, "%.2f ms x %u = %.2f s\t", (float)((endTime - startTime)/1000000.), 50000/HASH_LOOPS, (float)(50000/HASH_LOOPS) * (endTime - startTime) / 1000000000.);

    /* 200 ms duration limit for GCN to avoid ASIC hangs */
    if (amd_gcn(device_info[ocl_gpu_id]) && endTime - startTime > 200000000) {
        if (do_benchmark)
            fprintf(stderr, "- exceeds 200 ms\n");
        clReleaseCommandQueue(queue_prof);
        release_clobj();
        return 0;
    }

#if 0
    HANDLE_CLERROR(clGetEventProfilingInfo(Event[4],
                                           CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &startTime,
                                           NULL), "Failed to get profiling info");
    HANDLE_CLERROR(clGetEventProfilingInfo(Event[4],
                                           CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &endTime,
                                           NULL), "Failed to get profiling info");
    fprintf(stderr, "Generate2007key kernel duration: %llu us\n", (endTime-startTime)/1000ULL);
#endif

    HANDLE_CLERROR(clGetEventProfilingInfo(Event[0],
                                           CL_PROFILING_COMMAND_SUBMIT, sizeof(cl_ulong), &startTime,
                                           NULL), "Failed to get profiling info");
    HANDLE_CLERROR(clGetEventProfilingInfo(Event[5],
                                           CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &endTime,
                                           NULL), "Failed to get profiling info");
    clReleaseCommandQueue(queue_prof);
    release_clobj();

    return (endTime - startTime);
}