/** * Compute some pixels, and store them. * * This uses a "self-dispatching" parallel algorithm. Executes until * there is no more work to be done, or is told to stop. * * In order to reduce the traffic through the res_worker critical * section, a multiple pixel block may be removed from the work queue * at once. * * For a general-purpose version, see LIBRT rt_shoot_many_rays() */ void worker(int cpu, void *UNUSED(arg)) { int pixel_start; int pixelnum; int pat_num = -1; /* The more CPUs at work, the bigger the bites we take */ if (per_processor_chunk <= 0) per_processor_chunk = npsw; if (cpu >= MAX_PSW) { bu_log("rt/worker() cpu %d > MAX_PSW %d, array overrun\n", cpu, MAX_PSW); bu_exit(EXIT_FAILURE, "rt/worker() cpu > MAX_PSW, array overrun\n"); } RT_CK_RESOURCE(&resource[cpu]); pat_num = -1; if (hypersample) { int i, ray_samples; ray_samples = hypersample + 1; for (i=0; pt_pats[i].num_samples != 0; i++) { if (pt_pats[i].num_samples == ray_samples) { pat_num = i; goto pat_found; } } } pat_found: if (transpose_grid) { int tmp; /* switch cur_pixel and last_pixel */ tmp = cur_pixel; cur_pixel = last_pixel; last_pixel = tmp; while (1) { if (stop_worker) return; bu_semaphore_acquire(RT_SEM_WORKER); pixel_start = cur_pixel; cur_pixel -= per_processor_chunk; bu_semaphore_release(RT_SEM_WORKER); for (pixelnum = pixel_start; pixelnum > pixel_start-per_processor_chunk; pixelnum--) { if (pixelnum < last_pixel) return; do_pixel(cpu, pat_num, pixelnum); } } } else if (random_mode) { while (1) { /* Generate a random pixel id between 0 and last_pixel inclusive - TODO: check if there is any issue related with multi-threaded RNG */ pixelnum = rand()*1.0/RAND_MAX*(last_pixel + 1); if (pixelnum >= last_pixel) pixelnum = last_pixel; do_pixel(cpu, pat_num, pixelnum); } } else { while (1) { if (stop_worker) return; bu_semaphore_acquire(RT_SEM_WORKER); pixel_start = cur_pixel; cur_pixel += per_processor_chunk; bu_semaphore_release(RT_SEM_WORKER); for (pixelnum = pixel_start; pixelnum < pixel_start+per_processor_chunk; pixelnum++) { if (pixelnum > last_pixel) return; do_pixel(cpu, pat_num, pixelnum); } } } }
/* * W O R K E R * * Compute some pixels, and store them. * A "self-dispatching" parallel algorithm. * Executes until there is no more work to be done, or is told to stop. * * In order to reduce the traffic through the res_worker critical section, * a multiple pixel block may be removed from the work queue at once. * * For a general-purpose version, see LIBRT rt_shoot_many_rays() */ void worker(int cpu, genptr_t arg) { int pixel_start; int pixelnum; int pat_num = -1; /* The more CPUs at work, the bigger the bites we take */ if ( per_processor_chunk <= 0 ) per_processor_chunk = npsw; if ( cpu >= MAX_PSW ) { bu_log("rt/worker() cpu %d > MAX_PSW %d, array overrun\n", cpu, MAX_PSW); bu_exit(EXIT_FAILURE, "rt/worker() cpu > MAX_PSW, array overrun\n"); } RT_CK_RESOURCE( &resource[cpu] ); pat_num = -1; if (hypersample) { int i, ray_samples; ray_samples = hypersample + 1; for (i=0; pt_pats[i].num_samples != 0; i++) { if (pt_pats[i].num_samples == ray_samples) { pat_num = i; goto pat_found; } } } pat_found: if (transpose_grid) { int tmp; /* switch cur_pixel and last_pixel */ tmp = cur_pixel; cur_pixel = last_pixel; last_pixel = tmp; while (1) { if (stop_worker) return; bu_semaphore_acquire(RT_SEM_WORKER); pixel_start = cur_pixel; cur_pixel -= per_processor_chunk; bu_semaphore_release(RT_SEM_WORKER); for (pixelnum = pixel_start; pixelnum > pixel_start-per_processor_chunk; pixelnum--) { if (pixelnum < last_pixel) return; do_pixel(cpu, pat_num, pixelnum); } } } else { while (1) { if (stop_worker) return; bu_semaphore_acquire(RT_SEM_WORKER); pixel_start = cur_pixel; cur_pixel += per_processor_chunk; bu_semaphore_release(RT_SEM_WORKER); for (pixelnum = pixel_start; pixelnum < pixel_start+per_processor_chunk; pixelnum++) { if (pixelnum > last_pixel) return; do_pixel(cpu, pat_num, pixelnum); } } } }