void thread_run(DeviceTask *task) { if(task->type == DeviceTask::FILM_CONVERT) { film_convert(*task, task->buffer, task->rgba_byte, task->rgba_half); } else if(task->type == DeviceTask::SHADER) { shader(*task); } else if(task->type == DeviceTask::RENDER) { RenderTile tile; DenoisingTask denoising(this, *task); /* Keep rendering tiles until done. */ while(task->acquire_tile(this, tile)) { if(tile.task == RenderTile::PATH_TRACE) { int start_sample = tile.start_sample; int end_sample = tile.start_sample + tile.num_samples; for(int sample = start_sample; sample < end_sample; sample++) { if(task->get_cancel()) { if(task->need_finish_queue == false) break; } path_trace(tile, sample); tile.sample = sample + 1; task->update_progress(&tile, tile.w*tile.h); } /* Complete kernel execution before release tile */ /* This helps in multi-device render; * The device that reaches the critical-section function * release_tile waits (stalling other devices from entering * release_tile) for all kernels to complete. If device1 (a * slow-render device) reaches release_tile first then it would * stall device2 (a fast-render device) from proceeding to render * next tile. */ clFinish(cqCommandQueue); } else if(tile.task == RenderTile::DENOISE) { tile.sample = tile.start_sample + tile.num_samples; denoise(tile, denoising); task->update_progress(&tile, tile.w*tile.h); } task->release_tile(tile); } } }
void thread_run(DeviceTask *task) { flush_texture_buffers(); if(task->type == DeviceTask::FILM_CONVERT) { film_convert(*task, task->buffer, task->rgba_byte, task->rgba_half); } else if(task->type == DeviceTask::SHADER) { shader(*task); } else if(task->type == DeviceTask::RENDER) { RenderTile tile; DenoisingTask denoising(this); /* Allocate buffer for kernel globals */ device_only_memory<KernelGlobalsDummy> kgbuffer(this, "kernel_globals"); kgbuffer.alloc_to_device(1); /* Keep rendering tiles until done. */ while(task->acquire_tile(this, tile)) { if(tile.task == RenderTile::PATH_TRACE) { assert(tile.task == RenderTile::PATH_TRACE); scoped_timer timer(&tile.buffers->render_time); split_kernel->path_trace(task, tile, kgbuffer, *const_mem_map["__data"]); /* Complete kernel execution before release tile. */ /* This helps in multi-device render; * The device that reaches the critical-section function * release_tile waits (stalling other devices from entering * release_tile) for all kernels to complete. If device1 (a * slow-render device) reaches release_tile first then it would * stall device2 (a fast-render device) from proceeding to render * next tile. */ clFinish(cqCommandQueue); } else if(tile.task == RenderTile::DENOISE) { tile.sample = tile.start_sample + tile.num_samples; denoise(tile, denoising, *task); task->update_progress(&tile, tile.w*tile.h); } task->release_tile(tile); } kgbuffer.free(); } }
int main(void) { FILE * fp_raw; FILE * fp_image; int digit = 0; int i = 0; int j = 0; char dim[DIM] = {' ', '.', ',', ':', ';', '~', '=', '*', '%', '#'}; int buffer[ROW_NUM][COL_NUM]; if ((fp_raw = fopen("raw.dat", "w+")) == NULL) { printf("Can't open file raw.dat!\n"); exit(EXIT_FAILURE); } //encoding:generate random numbers srand(time(NULL)); for (i = 0; i < ROW_NUM; i++) { for (j = 0; j < COL_NUM; j++) { digit = rand() % DIM; fprintf(fp_raw, "%d ", digit); } fprintf(fp_raw, "\n"); } fclose(fp_raw); //denoising if ((fp_raw = fopen("raw.dat", "r")) == NULL) { printf("Can't open file raw.dat!\n"); exit(EXIT_FAILURE); } for (i = 0; i < ROW_NUM; i++) { for (j = 0; j < COL_NUM; j++) { fscanf(fp_raw, "%d", &buffer[i][j]); } } fclose(fp_raw); denoising(buffer); //decoding if ((fp_image = fopen ("image.dat", "w+")) == NULL) { printf("Can't open file image.dat!\n"); exit(EXIT_FAILURE); } for (i = 0; i < ROW_NUM; i++) { for (j = 0; j < COL_NUM; j++) { //fprintf(fp_image, "%d ", buffer[i][j]); fprintf(fp_image, "%c ", dim[buffer[i][j]]); } fprintf(fp_image, "\n"); } fclose(fp_image); return 0; }
void thread_render(DeviceTask &task) { if (task_pool.canceled()) { if (task.need_finish_queue == false) return; } /* allocate buffer for kernel globals */ device_only_memory<KernelGlobals> kgbuffer(this, "kernel_globals"); kgbuffer.alloc_to_device(1); KernelGlobals *kg = new ((void *)kgbuffer.device_pointer) KernelGlobals(thread_kernel_globals_init()); profiler.add_state(&kg->profiler); CPUSplitKernel *split_kernel = NULL; if (use_split_kernel) { split_kernel = new CPUSplitKernel(this); if (!split_kernel->load_kernels(requested_features)) { thread_kernel_globals_free((KernelGlobals *)kgbuffer.device_pointer); kgbuffer.free(); delete split_kernel; return; } } RenderTile tile; DenoisingTask denoising(this, task); denoising.profiler = &kg->profiler; while (task.acquire_tile(this, tile)) { if (tile.task == RenderTile::PATH_TRACE) { if (use_split_kernel) { device_only_memory<uchar> void_buffer(this, "void_buffer"); split_kernel->path_trace(&task, tile, kgbuffer, void_buffer); } else { path_trace(task, tile, kg); } } else if (tile.task == RenderTile::DENOISE) { denoise(denoising, tile); task.update_progress(&tile, tile.w * tile.h); } task.release_tile(tile); if (task_pool.canceled()) { if (task.need_finish_queue == false) break; } } profiler.remove_state(&kg->profiler); thread_kernel_globals_free((KernelGlobals *)kgbuffer.device_pointer); kg->~KernelGlobals(); kgbuffer.free(); delete split_kernel; }