コード例 #1
0
CCL_NAMESPACE_BEGIN

DenoisingTask::DenoisingTask(Device *device, const DeviceTask &task)
: tile_info_mem(device, "denoising tile info mem", MEM_READ_WRITE),
  storage(device),
  buffer(device),
  device(device)
{
	radius = task.denoising_radius;
	nlm_k_2 = powf(2.0f, lerp(-5.0f, 3.0f, task.denoising_strength));
	if(task.denoising_relative_pca) {
		pca_threshold = -powf(10.0f, lerp(-8.0f, 0.0f, task.denoising_feature_strength));
	}
	else {
		pca_threshold = powf(10.0f, lerp(-5.0f, 3.0f, task.denoising_feature_strength));
	}

	render_buffer.pass_stride = task.pass_stride;
	render_buffer.offset = task.pass_denoising_data;

	target_buffer.pass_stride = task.pass_stride;
	target_buffer.denoising_clean_offset = task.pass_denoising_clean;

	functions.map_neighbor_tiles = function_bind(task.map_neighbor_tiles, _1, device);
	functions.unmap_neighbor_tiles = function_bind(task.unmap_neighbor_tiles, _1, device);
}
コード例 #2
0
void BlenderSession::create_session()
{
	SceneParams scene_params = BlenderSync::get_scene_params(b_scene, background);
	SessionParams session_params = BlenderSync::get_session_params(b_userpref, b_scene, background);

	/* reset status/progress */
	last_status= "";
	last_progress= -1.0f;

	/* create scene */
	scene = new Scene(scene_params);

	/* create sync */
	sync = new BlenderSync(b_data, b_scene, scene, !background);
	sync->sync_data(b_v3d);

	if(b_rv3d)
		sync->sync_view(b_v3d, b_rv3d, width, height);
	else
		sync->sync_camera(b_engine.camera_override(), width, height);

	/* create session */
	session = new Session(session_params);
	session->scene = scene;
	session->progress.set_update_callback(function_bind(&BlenderSession::tag_redraw, this));
	session->progress.set_cancel_callback(function_bind(&BlenderSession::test_cancel, this));
	session->set_pause(BlenderSync::get_session_pause(b_scene, background));

	/* set buffer parameters */
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_scene, b_rv3d, width, height);
	session->reset(buffer_params, session_params.samples);
}
コード例 #3
0
ファイル: blender_session.cpp プロジェクト: eirikhex/blender
void BlenderSession::create_session()
{
	SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
	bool is_cpu = session_params.device.type == DEVICE_CPU;
	SceneParams scene_params = BlenderSync::get_scene_params(b_scene, background, is_cpu);
	bool session_pause = BlenderSync::get_session_pause(b_scene, background);

	/* reset status/progress */
	last_status = "";
	last_error = "";
	last_progress = -1.0f;
	start_resize_time = 0.0;

	/* create scene */
	scene = new Scene(scene_params, session_params.device);

	/* setup callbacks for builtin image support */
	scene->image_manager->builtin_image_info_cb = function_bind(&BlenderSession::builtin_image_info, this, _1, _2, _3, _4, _5, _6, _7);
	scene->image_manager->builtin_image_pixels_cb = function_bind(&BlenderSession::builtin_image_pixels, this, _1, _2, _3);
	scene->image_manager->builtin_image_float_pixels_cb = function_bind(&BlenderSession::builtin_image_float_pixels, this, _1, _2, _3);

	/* create session */
	session = new Session(session_params);
	session->scene = scene;
	session->progress.set_update_callback(function_bind(&BlenderSession::tag_redraw, this));
	session->progress.set_cancel_callback(function_bind(&BlenderSession::test_cancel, this));
	session->set_pause(session_pause);

	/* create sync */
	sync = new BlenderSync(b_engine, b_data, b_scene, scene, !background, session->progress, is_cpu);

	if(b_v3d) {
		if(session_pause == false) {
			/* full data sync */
			sync->sync_view(b_v3d, b_rv3d, width, height);
			sync->sync_data(b_render,
			                b_v3d,
			                b_engine.camera_override(),
			                width, height,
			                &python_thread_state,
			                b_rlay_name.c_str());
		}
	}
	else {
		/* for final render we will do full data sync per render layer, only
		 * do some basic syncing here, no objects or materials for speed */
		sync->sync_render_layers(b_v3d, NULL);
		sync->sync_integrator();
		sync->sync_camera(b_render, b_engine.camera_override(), width, height);
	}

	/* set buffer parameters */
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_v3d, b_rv3d, scene->camera, width, height);
	session->reset(buffer_params, session_params.samples);

	b_engine.use_highlight_tiles(session_params.progressive_refine == false);
}
コード例 #4
0
ファイル: device_cpu.cpp プロジェクト: sobotka/blender
  void denoise(DenoisingTask &denoising, RenderTile &tile)
  {
    ProfilingHelper profiling(denoising.profiler, PROFILING_DENOISING);

    tile.sample = tile.start_sample + tile.num_samples;

    denoising.functions.construct_transform = function_bind(
        &CPUDevice::denoising_construct_transform, this, &denoising);
    denoising.functions.accumulate = function_bind(
        &CPUDevice::denoising_accumulate, this, _1, _2, _3, _4, &denoising);
    denoising.functions.solve = function_bind(&CPUDevice::denoising_solve, this, _1, &denoising);
    denoising.functions.divide_shadow = function_bind(
        &CPUDevice::denoising_divide_shadow, this, _1, _2, _3, _4, _5, &denoising);
    denoising.functions.non_local_means = function_bind(
        &CPUDevice::denoising_non_local_means, this, _1, _2, _3, _4, &denoising);
    denoising.functions.combine_halves = function_bind(
        &CPUDevice::denoising_combine_halves, this, _1, _2, _3, _4, _5, _6, &denoising);
    denoising.functions.get_feature = function_bind(
        &CPUDevice::denoising_get_feature, this, _1, _2, _3, _4, _5, &denoising);
    denoising.functions.write_feature = function_bind(
        &CPUDevice::denoising_write_feature, this, _1, _2, _3, &denoising);
    denoising.functions.detect_outliers = function_bind(
        &CPUDevice::denoising_detect_outliers, this, _1, _2, _3, _4, &denoising);

    denoising.filter_area = make_int4(tile.x, tile.y, tile.w, tile.h);
    denoising.render_buffer.samples = tile.sample;
    denoising.buffer.gpu_temporary_mem = false;

    denoising.run_denoising(&tile);
  }
コード例 #5
0
static void session_init()
{
	options.session = new Session(options.session_params);
	options.session->reset(session_buffer_params(), options.session_params.samples);
	options.session->scene = options.scene;
	
	if(options.session_params.background && !options.quiet)
		options.session->progress.set_update_callback(function_bind(&session_print_status));
	else
		options.session->progress.set_update_callback(function_bind(&view_redraw));

	options.session->start();

	options.scene = NULL;
}
コード例 #6
0
ファイル: image.cpp プロジェクト: mgschwan/blensor
void ImageManager::device_update(Device *device,
                                 Scene *scene,
                                 Progress& progress)
{
	if(!need_update) {
		return;
	}

	TaskPool pool;
	for(int type = 0; type < IMAGE_DATA_NUM_TYPES; type++) {
		for(size_t slot = 0; slot < images[type].size(); slot++) {
			if(!images[type][slot])
				continue;

			if(images[type][slot]->users == 0) {
				device_free_image(device, (ImageDataType)type, slot);
			}
			else if(images[type][slot]->need_load) {
				if(!osl_texture_system || images[type][slot]->builtin_data)
					pool.push(function_bind(&ImageManager::device_load_image,
					                        this,
					                        device,
					                        scene,
					                        (ImageDataType)type,
					                        slot,
					                        &progress));
			}
		}
	}

	pool.wait_work();

	need_update = false;
}
コード例 #7
0
ファイル: session.cpp プロジェクト: JimStar/OctaneBlender
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Runs the new rendering loop
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Session::start(const char* pass_name_, bool synchronous, uint32_t frame_idx_, uint32_t total_frames_) {
    pass_name       = pass_name_;
    frame_idx       = frame_idx_;
    total_frames    = total_frames_;

    if(!synchronous)
        //FIXME: kill this boost here
        session_thread = new thread(function_bind(&Session::run, this));
    else
        run();
} //start()
コード例 #8
0
ファイル: util_task_test.cpp プロジェクト: dfelinto/blender
TEST(util_task, basic)
{
  TaskScheduler::init(0);
  TaskPool pool;
  for (int i = 0; i < 100; ++i) {
    pool.push(function_bind(task_run));
  }
  TaskPool::Summary summary;
  pool.wait_work(&summary);
  TaskScheduler::exit();
  EXPECT_EQ(summary.num_tasks_handled, 100);
}
コード例 #9
0
void BlenderSession::create_session()
{
	SceneParams scene_params = BlenderSync::get_scene_params(b_scene, background);
	SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);

	/* reset status/progress */
	last_status = "";
	last_progress = -1.0f;
	start_resize_time = 0.0;

	/* create scene */
	scene = new Scene(scene_params, session_params.device);

	/* create session */
	session = new Session(session_params);
	session->scene = scene;
	session->progress.set_update_callback(function_bind(&BlenderSession::tag_redraw, this));
	session->progress.set_cancel_callback(function_bind(&BlenderSession::test_cancel, this));
	session->set_pause(BlenderSync::get_session_pause(b_scene, background));

	/* create sync */
	sync = new BlenderSync(b_engine, b_data, b_scene, scene, !background, session->progress, session_params.device.type == DEVICE_CPU);
	sync->sync_data(b_v3d, b_engine.camera_override());

	if(b_rv3d)
		sync->sync_view(b_v3d, b_rv3d, width, height);
	else
		sync->sync_camera(b_render, b_engine.camera_override(), width, height);

	/* set buffer parameters */
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_scene, b_v3d, b_rv3d, scene->camera, width, height);
	session->reset(buffer_params, session_params.samples);

	b_engine.use_highlight_tiles(session_params.progressive_refine == false);

	/* setup callbacks for builtin image support */
	scene->image_manager->builtin_image_info_cb = function_bind(&BlenderSession::builtin_image_info, this, _1, _2, _3, _4, _5, _6);
	scene->image_manager->builtin_image_pixels_cb = function_bind(&BlenderSession::builtin_image_pixels, this, _1, _2, _3);
	scene->image_manager->builtin_image_float_pixels_cb = function_bind(&BlenderSession::builtin_image_float_pixels, this, _1, _2, _3);
}
コード例 #10
0
ファイル: image.cpp プロジェクト: vanangamudi/blender-main
void ImageManager::device_update(Device *device, DeviceScene *dscene, Progress& progress)
{
	if(!need_update)
		return;
	
	TaskPool pool;

	for(size_t slot = 0; slot < images.size(); slot++) {
		if(!images[slot])
			continue;

		if(images[slot]->users == 0) {
			device_free_image(device, dscene, slot);
		}
		else if(images[slot]->need_load) {
			if(!osl_texture_system) 
				pool.push(function_bind(&ImageManager::device_load_image, this, device, dscene, slot, &progress));
		}
	}

	for(size_t slot = 0; slot < float_images.size(); slot++) {
		if(!float_images[slot])
			continue;

		if(float_images[slot]->users == 0) {
			device_free_image(device, dscene, slot + TEX_IMAGE_FLOAT_START);
		}
		else if(float_images[slot]->need_load) {
			if(!osl_texture_system) 
				pool.push(function_bind(&ImageManager::device_load_image, this, device, dscene, slot + TEX_IMAGE_FLOAT_START, &progress));
		}
	}

	pool.wait_work();

	if(pack_images)
		device_pack_images(device, dscene, progress);

	need_update = false;
}
コード例 #11
0
ファイル: light.cpp プロジェクト: Aligorith/blender
CCL_NAMESPACE_BEGIN

static void shade_background_pixels(Device *device, DeviceScene *dscene, int res, vector<float3>& pixels, Progress& progress)
{
	/* create input */
	int width = res;
	int height = res;

	device_vector<uint4> d_input;
	device_vector<float4> d_output;

	uint4 *d_input_data = d_input.resize(width*height);

	for(int y = 0; y < height; y++) {
		for(int x = 0; x < width; x++) {
			float u = x/(float)width;
			float v = y/(float)height;

			uint4 in = make_uint4(__float_as_int(u), __float_as_int(v), 0, 0);
			d_input_data[x + y*width] = in;
		}
	}

	/* compute on device */
	d_output.resize(width*height);
	memset((void*)d_output.data_pointer, 0, d_output.memory_size());

	device->const_copy_to("__data", &dscene->data, sizeof(dscene->data));

	device->mem_alloc(d_input, MEM_READ_ONLY);
	device->mem_copy_to(d_input);
	device->mem_alloc(d_output, MEM_WRITE_ONLY);

	DeviceTask main_task(DeviceTask::SHADER);
	main_task.shader_input = d_input.device_pointer;
	main_task.shader_output = d_output.device_pointer;
	main_task.shader_eval_type = SHADER_EVAL_BACKGROUND;
	main_task.shader_x = 0;
	main_task.shader_w = width*height;
	main_task.num_samples = 1;
	main_task.get_cancel = function_bind(&Progress::get_cancel, &progress);

	/* disabled splitting for now, there's an issue with multi-GPU mem_copy_from */
	list<DeviceTask> split_tasks;
	main_task.split(split_tasks, 1, 128*128);

	foreach(DeviceTask& task, split_tasks) {
		device->task_add(task);
		device->task_wait();
		device->mem_copy_from(d_output, task.shader_x, 1, task.shader_w, sizeof(float4));
	}
コード例 #12
0
ファイル: bvh_build.cpp プロジェクト: jonntd/blender
	BVHBuildTask(BVHBuild *build,
	             InnerNode *node,
	             int child,
	             const BVHObjectBinning& range,
	             int level)
	: range_(range)
	{
		run = function_bind(&BVHBuild::thread_build_node,
		                    build,
		                    node,
		                    child,
		                    &range_,
		                    level);
	}
コード例 #13
0
ファイル: device_cpu.cpp プロジェクト: ruesp83/Blender---AMA
	CPUDevice(int threads_num)
	{
		kg = kernel_globals_create();

		/* do now to avoid thread issues */
		system_cpu_support_optimized();

		if(threads_num == 0)
			threads_num = system_cpu_thread_count();

		threads.resize(threads_num);

		for(size_t i = 0; i < threads.size(); i++)
			threads[i] = new thread(function_bind(&CPUDevice::thread_run, this, i));
	}
コード例 #14
0
ファイル: bvh_build.cpp プロジェクト: jonntd/blender
	BVHSpatialSplitBuildTask(BVHBuild *build,
	                         InnerNode *node,
	                         int child,
	                         const BVHRange& range,
	                         const vector<BVHReference>& references,
	                         int level)
	: range_(range),
	  references_(references.begin() + range.start(),
	              references.begin() + range.end())
	{
		range_.set_start(0);
		run = function_bind(&BVHBuild::thread_build_spatial_split_node,
		                    build,
		                    node,
		                    child,
		                    &range_,
		                    &references_,
		                    level,
		                    _1);
	}
コード例 #15
0
ファイル: film.cpp プロジェクト: mdtrooper/goxel
static vector<float> filter_table(FilterType type, float width)
{
	vector<float> filter_table(FILTER_TABLE_SIZE);
	float (*filter_func)(float, float) = NULL;

	switch(type) {
		case FILTER_BOX:
			filter_func = filter_func_box;
			break;
		case FILTER_GAUSSIAN:
			filter_func = filter_func_gaussian;
			width *= 3.0f;
			break;
		case FILTER_BLACKMAN_HARRIS:
			filter_func = filter_func_blackman_harris;
			width *= 2.0f;
			break;
		default:
			assert(0);
	}

	/* Create importance sampling table. */

	/* TODO(sergey): With the even filter table size resolution we can not
	 * really make it nice symmetric importance map without sampling full range
	 * (meaning, we would need to sample full filter range and not use the
	 * make_symmetric argument).
	 *
	 * Current code matches exactly initial filter table code, but we should
	 * consider either making FILTER_TABLE_SIZE odd value or sample full filter.
	 */

	util_cdf_inverted(FILTER_TABLE_SIZE,
	                  0.0f,
	                  width * 0.5f,
	                  function_bind(filter_func, _1, width),
	                  true,
	                  filter_table);

	return filter_table;
}
コード例 #16
0
bool BlenderSession::draw(int w, int h)
{
	/* pause in redraw in case update is not being called due to final render */
	session->set_pause(BlenderSync::get_session_pause(b_scene, background));

	/* before drawing, we verify camera and viewport size changes, because
	 * we do not get update callbacks for those, we must detect them here */
	if(session->ready_to_reset()) {
		bool reset = false;

		/* if dimensions changed, reset */
		if(width != w || height != h) {
			if(start_resize_time == 0.0) {
				/* don't react immediately to resizes to avoid flickery resizing
				 * of the viewport, and some window managers changing the window
				 * size temporarily on unminimize */
				start_resize_time = time_dt();
				tag_redraw();
			}
			else if(time_dt() - start_resize_time < 0.2) {
				tag_redraw();
			}
			else {
				width = w;
				height = h;
				reset = true;
			}
		}

		/* try to acquire mutex. if we can't, come back later */
		if(!session->scene->mutex.try_lock()) {
			tag_update();
		}
		else {
			/* update camera from 3d view */

			sync->sync_view(b_v3d, b_rv3d, width, height);

			if(scene->camera->need_update)
				reset = true;

			session->scene->mutex.unlock();
		}

		/* reset if requested */
		if(reset) {
			SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
			BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_v3d, b_rv3d, scene->camera, width, height);
			bool session_pause = BlenderSync::get_session_pause(b_scene, background);

			if(session_pause == false) {
				session->reset(buffer_params, session_params.samples);
				start_resize_time = 0.0;
			}
		}
	}
	else {
		tag_update();
	}

	/* update status and progress for 3d view draw */
	update_status_progress();

	/* draw */
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_v3d, b_rv3d, scene->camera, width, height);
	DeviceDrawParams draw_params;

	if(session->params.display_buffer_linear) {
		draw_params.bind_display_space_shader_cb = function_bind(&BL::RenderEngine::bind_display_space_shader, &b_engine, b_scene);
		draw_params.unbind_display_space_shader_cb = function_bind(&BL::RenderEngine::unbind_display_space_shader, &b_engine);
	}

	return !session->draw(buffer_params, draw_params);
}
コード例 #17
0
void BlenderSession::bake(BL::Object b_object, const string& pass_type, BL::BakePixel pixel_array, const size_t num_pixels, const int /*depth*/, float result[])
{
	ShaderEvalType shader_type = get_shader_type(pass_type);
	size_t object_index = OBJECT_NONE;
	int tri_offset = 0;

	/* ensure kernels are loaded before we do any scene updates */
	session->load_kernels();

	if(session->progress.get_cancel())
		return;

	if(shader_type == SHADER_EVAL_UV) {
		/* force UV to be available */
		Pass::add(PASS_UV, scene->film->passes);
	}

	if(BakeManager::is_light_pass(shader_type)) {
		/* force use_light_pass to be true */
		Pass::add(PASS_LIGHT, scene->film->passes);
	}

	/* create device and update scene */
	scene->film->tag_update(scene);
	scene->integrator->tag_update(scene);

	/* update scene */
	sync->sync_camera(b_render, b_engine.camera_override(), width, height);
	sync->sync_data(b_v3d, b_engine.camera_override(), &python_thread_state);

	/* get buffer parameters */
	SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_v3d, b_rv3d, scene->camera, width, height);

	scene->bake_manager->set_shader_limit((size_t)b_engine.tile_x(), (size_t)b_engine.tile_y());
	scene->bake_manager->set_baking(true);

	/* set number of samples */
	session->tile_manager.set_samples(session_params.samples);
	session->reset(buffer_params, session_params.samples);
	session->update_scene();

	/* find object index. todo: is arbitrary - copied from mesh_displace.cpp */
	for(size_t i = 0; i < scene->objects.size(); i++) {
		if(strcmp(scene->objects[i]->name.c_str(), b_object.name().c_str()) == 0) {
			object_index = i;
			tri_offset = scene->objects[i]->mesh->tri_offset;
			break;
		}
	}

	/* when used, non-instanced convention: object = ~object */
	int object = ~object_index;

	BakeData *bake_data = scene->bake_manager->init(object, tri_offset, num_pixels);

	populate_bake_data(bake_data, pixel_array, num_pixels);

	/* set number of samples */
	session->tile_manager.set_samples(session_params.samples);
	session->reset(buffer_params, session_params.samples);
	session->update_scene();

	session->progress.set_update_callback(function_bind(&BlenderSession::update_bake_progress, this));

	scene->bake_manager->bake(scene->device, &scene->dscene, scene, session->progress, shader_type, bake_data, result);

	/* free all memory used (host and device), so we wouldn't leave render
	 * engine with extra memory allocated
	 */

	session->device_free();

	delete sync;
	sync = NULL;
}
コード例 #18
0
void BlenderSession::render()
{
	/* set callback to write out render results */
	session->write_render_tile_cb = function_bind(&BlenderSession::write_render_tile, this, _1);
	session->update_render_tile_cb = function_bind(&BlenderSession::update_render_tile, this, _1);

	/* get buffer parameters */
	SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_render, b_v3d, b_rv3d, scene->camera, width, height);

	/* render each layer */
	BL::RenderSettings r = b_scene.render();
	BL::RenderSettings::layers_iterator b_layer_iter;
	BL::RenderResult::views_iterator b_view_iter;
	
	for(r.layers.begin(b_layer_iter); b_layer_iter != r.layers.end(); ++b_layer_iter) {
		b_rlay_name = b_layer_iter->name();

		/* temporary render result to find needed passes and views */
		BL::RenderResult b_rr = begin_render_result(b_engine, 0, 0, 1, 1, b_rlay_name.c_str(), NULL);
		BL::RenderResult::layers_iterator b_single_rlay;
		b_rr.layers.begin(b_single_rlay);

		/* layer will be missing if it was disabled in the UI */
		if(b_single_rlay == b_rr.layers.end()) {
			end_render_result(b_engine, b_rr, true, false);
			continue;
		}

		BL::RenderLayer b_rlay = *b_single_rlay;

		/* add passes */
		vector<Pass> passes;
		Pass::add(PASS_COMBINED, passes);
#ifdef WITH_CYCLES_DEBUG
		Pass::add(PASS_BVH_TRAVERSAL_STEPS, passes);
#endif

		if(session_params.device.advanced_shading) {

			/* loop over passes */
			BL::RenderLayer::passes_iterator b_pass_iter;

			for(b_rlay.passes.begin(b_pass_iter); b_pass_iter != b_rlay.passes.end(); ++b_pass_iter) {
				BL::RenderPass b_pass(*b_pass_iter);
				PassType pass_type = get_pass_type(b_pass);

				if(pass_type == PASS_MOTION && scene->integrator->motion_blur)
					continue;
				if(pass_type != PASS_NONE)
					Pass::add(pass_type, passes);
			}
		}

		buffer_params.passes = passes;
		scene->film->pass_alpha_threshold = b_layer_iter->pass_alpha_threshold();
		scene->film->tag_passes_update(scene, passes);
		scene->film->tag_update(scene);
		scene->integrator->tag_update(scene);

		for(b_rr.views.begin(b_view_iter); b_view_iter != b_rr.views.end(); ++b_view_iter) {
			b_rview_name = b_view_iter->name();

			/* set the current view */
			b_engine.active_view_set(b_rview_name.c_str());

			/* update scene */
			sync->sync_camera(b_render, b_engine.camera_override(), width, height);
			sync->sync_data(b_v3d, b_engine.camera_override(), &python_thread_state, b_rlay_name.c_str());

			/* update number of samples per layer */
			int samples = sync->get_layer_samples();
			bool bound_samples = sync->get_layer_bound_samples();

			if(samples != 0 && (!bound_samples || (samples < session_params.samples)))
				session->reset(buffer_params, samples);
			else
				session->reset(buffer_params, session_params.samples);

			/* render */
			session->start();
			session->wait();

			if(session->progress.get_cancel())
				break;
		}

		/* free result without merging */
		end_render_result(b_engine, b_rr, true, false);

		if(session->progress.get_cancel())
			break;
	}

	/* clear callback */
	session->write_render_tile_cb = function_null;
	session->update_render_tile_cb = function_null;

	/* free all memory used (host and device), so we wouldn't leave render
	 * engine with extra memory allocated
	 */

	session->device_free();

	delete sync;
	sync = NULL;
}
コード例 #19
0
	/* note that the lock must be already acquired upon entry.
	 * This is necessary because the caller often peeks at
	 * the header and delegates control to here when it doesn't
	 * specifically handle the current RPC.
	 * The lock must be unlocked before returning */
	void process(RPCReceive& rcv, thread_scoped_lock &lock)
	{
		if(rcv.name == "mem_alloc") {
			MemoryType type;
			network_device_memory mem;
			device_ptr client_pointer;

			rcv.read(mem);
			rcv.read(type);

			lock.unlock();

			client_pointer = mem.device_pointer;

			/* create a memory buffer for the device buffer */
			size_t data_size = mem.memory_size();
			DataVector &data_v = data_vector_insert(client_pointer, data_size);

			if(data_size)
				mem.data_pointer = (device_ptr)&(data_v[0]);
			else
				mem.data_pointer = 0;

			/* perform the allocation on the actual device */
			device->mem_alloc(mem, type);

			/* store a mapping to/from client_pointer and real device pointer */
			pointer_mapping_insert(client_pointer, mem.device_pointer);
		}
		else if(rcv.name == "mem_copy_to") {
			network_device_memory mem;

			rcv.read(mem);
			lock.unlock();

			device_ptr client_pointer = mem.device_pointer;

			DataVector &data_v = data_vector_find(client_pointer);

			size_t data_size = mem.memory_size();

			/* get pointer to memory buffer	for device buffer */
			mem.data_pointer = (device_ptr)&data_v[0];

			/* copy data from network into memory buffer */
			rcv.read_buffer((uint8_t*)mem.data_pointer, data_size);

			/* translate the client pointer to a real device pointer */
			mem.device_pointer = device_ptr_from_client_pointer(client_pointer);

			/* copy the data from the memory buffer to the device buffer */
			device->mem_copy_to(mem);
		}
		else if(rcv.name == "mem_copy_from") {
			network_device_memory mem;
			int y, w, h, elem;

			rcv.read(mem);
			rcv.read(y);
			rcv.read(w);
			rcv.read(h);
			rcv.read(elem);

			device_ptr client_pointer = mem.device_pointer;
			mem.device_pointer = device_ptr_from_client_pointer(client_pointer);

			DataVector &data_v = data_vector_find(client_pointer);

			mem.data_pointer = (device_ptr)&(data_v[0]);

			device->mem_copy_from(mem, y, w, h, elem);

			size_t data_size = mem.memory_size();

			RPCSend snd(socket, &error_func, "mem_copy_from");
			snd.write();
			snd.write_buffer((uint8_t*)mem.data_pointer, data_size);
			lock.unlock();
		}
		else if(rcv.name == "mem_zero") {
			network_device_memory mem;
			
			rcv.read(mem);
			lock.unlock();

			device_ptr client_pointer = mem.device_pointer;
			mem.device_pointer = device_ptr_from_client_pointer(client_pointer);

			DataVector &data_v = data_vector_find(client_pointer);

			mem.data_pointer = (device_ptr)&(data_v[0]);

			device->mem_zero(mem);
		}
		else if(rcv.name == "mem_free") {
			network_device_memory mem;
			device_ptr client_pointer;

			rcv.read(mem);
			lock.unlock();

			client_pointer = mem.device_pointer;

			mem.device_pointer = device_ptr_from_client_pointer_erase(client_pointer);

			device->mem_free(mem);
		}
		else if(rcv.name == "const_copy_to") {
			string name_string;
			size_t size;

			rcv.read(name_string);
			rcv.read(size);

			vector<char> host_vector(size);
			rcv.read_buffer(&host_vector[0], size);
			lock.unlock();

			device->const_copy_to(name_string.c_str(), &host_vector[0], size);
		}
		else if(rcv.name == "tex_alloc") {
			network_device_memory mem;
			string name;
			InterpolationType interpolation;
			bool periodic;
			device_ptr client_pointer;

			rcv.read(name);
			rcv.read(mem);
			rcv.read(interpolation);
			rcv.read(periodic);
			lock.unlock();

			client_pointer = mem.device_pointer;

			size_t data_size = mem.memory_size();

			DataVector &data_v = data_vector_insert(client_pointer, data_size);

			if(data_size)
				mem.data_pointer = (device_ptr)&(data_v[0]);
			else
				mem.data_pointer = 0;

			rcv.read_buffer((uint8_t*)mem.data_pointer, data_size);

			device->tex_alloc(name.c_str(), mem, interpolation, periodic);

			pointer_mapping_insert(client_pointer, mem.device_pointer);
		}
		else if(rcv.name == "tex_free") {
			network_device_memory mem;
			device_ptr client_pointer;

			rcv.read(mem);
			lock.unlock();

			client_pointer = mem.device_pointer;

			mem.device_pointer = device_ptr_from_client_pointer_erase(client_pointer);

			device->tex_free(mem);
		}
		else if(rcv.name == "load_kernels") {
			bool experimental;
			rcv.read(experimental);

			bool result;
			result = device->load_kernels(experimental);
			RPCSend snd(socket, &error_func, "load_kernels");
			snd.add(result);
			snd.write();
			lock.unlock();
		}
		else if(rcv.name == "task_add") {
			DeviceTask task;

			rcv.read(task);
			lock.unlock();

			if(task.buffer)
				task.buffer = device_ptr_from_client_pointer(task.buffer);

			if(task.rgba_half)
				task.rgba_half = device_ptr_from_client_pointer(task.rgba_half);

			if(task.rgba_byte)
				task.rgba_byte = device_ptr_from_client_pointer(task.rgba_byte);

			if(task.shader_input)
				task.shader_input = device_ptr_from_client_pointer(task.shader_input);

			if(task.shader_output)
				task.shader_output = device_ptr_from_client_pointer(task.shader_output);


			task.acquire_tile = function_bind(&DeviceServer::task_acquire_tile, this, _1, _2);
			task.release_tile = function_bind(&DeviceServer::task_release_tile, this, _1);
			task.update_progress_sample = function_bind(&DeviceServer::task_update_progress_sample, this);
			task.update_tile_sample = function_bind(&DeviceServer::task_update_tile_sample, this, _1);
			task.get_cancel = function_bind(&DeviceServer::task_get_cancel, this);

			device->task_add(task);
		}
		else if(rcv.name == "task_wait") {
			lock.unlock();

			blocked_waiting = true;
			device->task_wait();
			blocked_waiting = false;

			lock.lock();
			RPCSend snd(socket, &error_func, "task_wait_done");
			snd.write();
			lock.unlock();
		}
		else if(rcv.name == "task_cancel") {
			lock.unlock();
			device->task_cancel();
		}
		else if(rcv.name == "acquire_tile") {
			AcquireEntry entry;
			entry.name = rcv.name;
			rcv.read(entry.tile);
			acquire_queue.push_back(entry);
			lock.unlock();
		}
		else if(rcv.name == "acquire_tile_none") {
			AcquireEntry entry;
			entry.name = rcv.name;
			acquire_queue.push_back(entry);
			lock.unlock();
		}
		else if(rcv.name == "release_tile") {
			AcquireEntry entry;
			entry.name = rcv.name;
			acquire_queue.push_back(entry);
			lock.unlock();
		}
		else {
			cout << "Error: unexpected RPC receive call \"" + rcv.name + "\"\n";
			lock.unlock();
		}
	}
コード例 #20
0
ファイル: camera.cpp プロジェクト: DrangPo/blender
void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
{
	Scene::MotionType need_motion = scene->need_motion(device->info.advanced_shading);

	update();

	if(previous_need_motion != need_motion) {
		/* scene's motion model could have been changed since previous device
		 * camera update this could happen for example in case when one render
		 * layer has got motion pass and another not */
		need_device_update = true;
	}

	if(!need_device_update)
		return;
	
	KernelCamera *kcam = &dscene->data.cam;

	/* store matrices */
	kcam->screentoworld = screentoworld;
	kcam->rastertoworld = rastertoworld;
	kcam->rastertocamera = rastertocamera;
	kcam->cameratoworld = cameratoworld;
	kcam->worldtocamera = worldtocamera;
	kcam->worldtoscreen = worldtoscreen;
	kcam->worldtoraster = worldtoraster;
	kcam->worldtondc = worldtondc;

	/* camera motion */
	kcam->have_motion = 0;
	kcam->have_perspective_motion = 0;

	if(need_motion == Scene::MOTION_PASS) {
		/* TODO(sergey): Support perspective (zoom, fov) motion. */
		if(type == CAMERA_PANORAMA) {
			if(use_motion) {
				kcam->motion.pre = transform_inverse(motion.pre);
				kcam->motion.post = transform_inverse(motion.post);
			}
			else {
				kcam->motion.pre = kcam->worldtocamera;
				kcam->motion.post = kcam->worldtocamera;
			}
		}
		else {
			if(use_motion) {
				kcam->motion.pre = cameratoraster * transform_inverse(motion.pre);
				kcam->motion.post = cameratoraster * transform_inverse(motion.post);
			}
			else {
				kcam->motion.pre = worldtoraster;
				kcam->motion.post = worldtoraster;
			}
		}
	}
#ifdef __CAMERA_MOTION__
	else if(need_motion == Scene::MOTION_BLUR) {
		if(use_motion) {
			transform_motion_decompose((DecompMotionTransform*)&kcam->motion, &motion, &matrix);
			kcam->have_motion = 1;
		}
		if(use_perspective_motion) {
			kcam->perspective_motion = perspective_motion;
			kcam->have_perspective_motion = 1;
		}
	}
#endif

	/* depth of field */
	kcam->aperturesize = aperturesize;
	kcam->focaldistance = focaldistance;
	kcam->blades = (blades < 3)? 0.0f: blades;
	kcam->bladesrotation = bladesrotation;

	/* motion blur */
#ifdef __CAMERA_MOTION__
	kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime: -1.0f;

	if(need_motion == Scene::MOTION_BLUR) {
		vector<float> shutter_table;
		util_cdf_inverted(SHUTTER_TABLE_SIZE,
		                  0.0f,
		                  1.0f,
		                  function_bind(shutter_curve_eval, _1, shutter_curve),
		                  false,
		                  shutter_table);
		shutter_table_offset = scene->lookup_tables->add_table(dscene,
		                                                       shutter_table);
		kcam->shutter_table_offset = (int)shutter_table_offset;
	}
	else if(shutter_table_offset != TABLE_OFFSET_INVALID) {
		scene->lookup_tables->remove_table(shutter_table_offset);
		shutter_table_offset = TABLE_OFFSET_INVALID;
	}
#else
	kcam->shuttertime = -1.0f;
#endif

	/* type */
	kcam->type = type;

	/* anamorphic lens bokeh */
	kcam->inv_aperture_ratio = 1.0f / aperture_ratio;

	/* panorama */
	kcam->panorama_type = panorama_type;
	kcam->fisheye_fov = fisheye_fov;
	kcam->fisheye_lens = fisheye_lens;
	kcam->equirectangular_range = make_float4(longitude_min - longitude_max, -longitude_min,
	                                          latitude_min -  latitude_max, -latitude_min + M_PI_2_F);

	/* sensor size */
	kcam->sensorwidth = sensorwidth;
	kcam->sensorheight = sensorheight;

	/* render size */
	kcam->width = width;
	kcam->height = height;
	kcam->resolution = resolution;

	/* store differentials */
	kcam->dx = float3_to_float4(dx);
	kcam->dy = float3_to_float4(dy);

	/* clipping */
	kcam->nearclip = nearclip;
	kcam->cliplength = (farclip == FLT_MAX)? FLT_MAX: farclip - nearclip;

	/* Camera in volume. */
	kcam->is_inside_volume = 0;

	/* Rolling shutter effect */
	kcam->rolling_shutter_type = rolling_shutter_type;
	kcam->rolling_shutter_duration = rolling_shutter_duration;

	previous_need_motion = need_motion;
}
コード例 #21
0
void BlenderSession::render()
{
	/* set callback to write out render results */
	session->write_render_tile_cb = function_bind(&BlenderSession::write_render_tile, this, _1);
	session->update_render_tile_cb = function_bind(&BlenderSession::update_render_tile, this, _1);

	/* get buffer parameters */
	SessionParams session_params = BlenderSync::get_session_params(b_engine, b_userpref, b_scene, background);
	BufferParams buffer_params = BlenderSync::get_buffer_params(b_scene, b_v3d, b_rv3d, scene->camera, width, height);

	/* render each layer */
	BL::RenderSettings r = b_scene.render();
	BL::RenderSettings::layers_iterator b_iter;
	
	for(r.layers.begin(b_iter); b_iter != r.layers.end(); ++b_iter) {
		b_rlay_name = b_iter->name();

		/* temporary render result to find needed passes */
		BL::RenderResult b_rr = begin_render_result(b_engine, 0, 0, 1, 1, b_rlay_name.c_str());
		BL::RenderResult::layers_iterator b_single_rlay;
		b_rr.layers.begin(b_single_rlay);

		/* layer will be missing if it was disabled in the UI */
		if(b_single_rlay == b_rr.layers.end()) {
			end_render_result(b_engine, b_rr, true);
			continue;
		}

		BL::RenderLayer b_rlay = *b_single_rlay;

		/* add passes */
		vector<Pass> passes;
		Pass::add(PASS_COMBINED, passes);

		if(session_params.device.advanced_shading) {

			/* loop over passes */
			BL::RenderLayer::passes_iterator b_pass_iter;

			for(b_rlay.passes.begin(b_pass_iter); b_pass_iter != b_rlay.passes.end(); ++b_pass_iter) {
				BL::RenderPass b_pass(*b_pass_iter);
				PassType pass_type = get_pass_type(b_pass);

				if(pass_type == PASS_MOTION && scene->integrator->motion_blur)
					continue;
				if(pass_type != PASS_NONE)
					Pass::add(pass_type, passes);
			}
		}

		/* free result without merging */
		end_render_result(b_engine, b_rr, true);

		buffer_params.passes = passes;
		scene->film->tag_passes_update(scene, passes);
		scene->film->tag_update(scene);
		scene->integrator->tag_update(scene);

		/* update scene */
		sync->sync_data(b_v3d, b_engine.camera_override(), b_rlay_name.c_str());

		/* update session */
		int samples = sync->get_layer_samples();
		session->reset(buffer_params, (samples == 0)? session_params.samples: samples);

		/* render */
		session->start();
		session->wait();

		if(session->progress.get_cancel())
			break;
	}

	/* clear callback */
	session->write_render_tile_cb = NULL;
	session->update_render_tile_cb = NULL;

	/* free all memory used (host and device), so we wouldn't leave render
	 * engine with extra memory allocated
	 */

	session->device_free();

	delete sync;
	sync = NULL;
}
コード例 #22
0
ファイル: util_task.cpp プロジェクト: dezelin/blender
void TaskScheduler::init(int num_threads)
{
	thread_scoped_lock lock(mutex);

	/* multiple cycles instances can use this task scheduler, sharing the same
	 * threads, so we keep track of the number of users. */
	if(users == 0) {
		do_exit = false;

		const bool use_auto_threads = (num_threads == 0);
		if(use_auto_threads) {
			/* automatic number of threads */
			num_threads = system_cpu_thread_count();
		}
		VLOG(1) << "Creating pool of " << num_threads << " threads.";

		/* launch threads that will be waiting for work */
		threads.resize(num_threads);

		const int num_groups = system_cpu_group_count();
		unsigned short num_process_groups;
		vector<unsigned short> process_groups;
		int current_group_threads;
		if(num_groups > 1) {
			process_groups.resize(num_groups);
			num_process_groups = system_cpu_process_groups(num_groups, 
			                                               &process_groups[0]);
			if(num_process_groups == 1) {
				current_group_threads = system_cpu_group_thread_count(process_groups[0]);
			}
		}
		int thread_index = 0;
		for(int group = 0; group < num_groups; ++group) {
			/* NOTE: That's not really efficient from threading point of view,
			 * but it is simple to read and it doesn't make sense to use more
			 * user-specified threads than logical threads anyway.
			 */
			int num_group_threads = (group == num_groups - 1)
			        ? (threads.size() - thread_index)
			        : system_cpu_group_thread_count(group);
			for(int group_thread = 0;
				group_thread < num_group_threads && thread_index < threads.size();
				++group_thread, ++thread_index)
			{
				/* NOTE: Thread group of -1 means we would not force thread affinity. */
				int thread_group;
				if(num_groups == 1) {
					/* Use default affinity if there's only one CPU group in the system. */
					thread_group = -1;
				}
				else if(use_auto_threads &&
				        num_process_groups == 1 &&
						num_threads <= current_group_threads)
				{
					/* If we fit into curent CPU group we also don't force any affinity. */
					thread_group = -1;
				}
				else {
					thread_group = group;
				}
				threads[thread_index] = new thread(function_bind(&TaskScheduler::thread_run,
				                                                 thread_index + 1),
				                                   thread_group);
			}
		}
	}
	
	users++;
}
コード例 #23
0
		OpenCLDeviceTask(OpenCLDevice *device, DeviceTask& task)
		: DeviceTask(task)
		{
			run = function_bind(&OpenCLDevice::thread_run, device, this);
		}
コード例 #24
0
		CPUDeviceTask(CPUDevice *device, DeviceTask& task)
		: DeviceTask(task)
		{
			run = function_bind(&CPUDevice::thread_run, device, this);
		}
コード例 #25
0
	void process(RPCReceive& rcv)
	{
		// fprintf(stderr, "receive process %s\n", rcv.name.c_str());

		if(rcv.name == "mem_alloc") {
			MemoryType type;
			network_device_memory mem;
			device_ptr remote_pointer;

			rcv.read(mem);
			rcv.read(type);

			/* todo: CPU needs mem.data_pointer */

			remote_pointer = mem.device_pointer;

			mem_data[remote_pointer] = vector<uint8_t>();
			mem_data[remote_pointer].resize(mem.memory_size());
			if(mem.memory_size())
				mem.data_pointer = (device_ptr)&(mem_data[remote_pointer][0]);
			else
				mem.data_pointer = 0;

			device->mem_alloc(mem, type);

			ptr_map[remote_pointer] = mem.device_pointer;
			ptr_imap[mem.device_pointer] = remote_pointer;
		}
		else if(rcv.name == "mem_copy_to") {
			network_device_memory mem;

			rcv.read(mem);

			device_ptr remote_pointer = mem.device_pointer;
			mem.data_pointer = (device_ptr)&(mem_data[remote_pointer][0]);

			rcv.read_buffer((uint8_t*)mem.data_pointer, mem.memory_size());

			mem.device_pointer = ptr_map[remote_pointer];

			device->mem_copy_to(mem);
		}
		else if(rcv.name == "mem_copy_from") {
			network_device_memory mem;
			int y, w, h, elem;

			rcv.read(mem);
			rcv.read(y);
			rcv.read(w);
			rcv.read(h);
			rcv.read(elem);

			device_ptr remote_pointer = mem.device_pointer;
			mem.device_pointer = ptr_map[remote_pointer];
			mem.data_pointer = (device_ptr)&(mem_data[remote_pointer][0]);

			device->mem_copy_from(mem, y, w, h, elem);

			RPCSend snd(socket);
			snd.write();
			snd.write_buffer((uint8_t*)mem.data_pointer, mem.memory_size());
		}
		else if(rcv.name == "mem_zero") {
			network_device_memory mem;
			
			rcv.read(mem);
			device_ptr remote_pointer = mem.device_pointer;
			mem.device_pointer = ptr_map[mem.device_pointer];
			mem.data_pointer = (device_ptr)&(mem_data[remote_pointer][0]);

			device->mem_zero(mem);
		}
		else if(rcv.name == "mem_free") {
			network_device_memory mem;
			device_ptr remote_pointer;

			rcv.read(mem);

			remote_pointer = mem.device_pointer;
			mem.device_pointer = ptr_map[mem.device_pointer];
			ptr_map.erase(remote_pointer);
			ptr_imap.erase(mem.device_pointer);
			mem_data.erase(remote_pointer);

			device->mem_free(mem);
		}
		else if(rcv.name == "const_copy_to") {
			string name_string;
			size_t size;

			rcv.read(name_string);
			rcv.read(size);

			vector<char> host_vector(size);
			rcv.read_buffer(&host_vector[0], size);

			device->const_copy_to(name_string.c_str(), &host_vector[0], size);
		}
		else if(rcv.name == "tex_alloc") {
			network_device_memory mem;
			string name;
			bool interpolation;
			bool periodic;
			device_ptr remote_pointer;

			rcv.read(name);
			rcv.read(mem);
			rcv.read(interpolation);
			rcv.read(periodic);

			remote_pointer = mem.device_pointer;

			mem_data[remote_pointer] = vector<uint8_t>();
			mem_data[remote_pointer].resize(mem.memory_size());
			if(mem.memory_size())
				mem.data_pointer = (device_ptr)&(mem_data[remote_pointer][0]);
			else
				mem.data_pointer = 0;

			rcv.read_buffer((uint8_t*)mem.data_pointer, mem.memory_size());

			device->tex_alloc(name.c_str(), mem, interpolation, periodic);

			ptr_map[remote_pointer] = mem.device_pointer;
			ptr_imap[mem.device_pointer] = remote_pointer;
		}
		else if(rcv.name == "tex_free") {
			network_device_memory mem;
			device_ptr remote_pointer;

			rcv.read(mem);

			remote_pointer = mem.device_pointer;
			mem.device_pointer = ptr_map[mem.device_pointer];
			ptr_map.erase(remote_pointer);
			ptr_map.erase(mem.device_pointer);
			mem_data.erase(remote_pointer);

			device->tex_free(mem);
		}
		else if(rcv.name == "task_add") {
			DeviceTask task;

			rcv.read(task);

			if(task.buffer) task.buffer = ptr_map[task.buffer];
			if(task.rgba_byte) task.rgba_byte = ptr_map[task.rgba_byte];
			if(task.rgba_half) task.rgba_half = ptr_map[task.rgba_half];
			if(task.shader_input) task.shader_input = ptr_map[task.shader_input];
			if(task.shader_output) task.shader_output = ptr_map[task.shader_output];

			task.acquire_tile = function_bind(&DeviceServer::task_acquire_tile, this, _1, _2);
			task.release_tile = function_bind(&DeviceServer::task_release_tile, this, _1);
			task.update_progress_sample = function_bind(&DeviceServer::task_update_progress_sample, this);
			task.update_tile_sample = function_bind(&DeviceServer::task_update_tile_sample, this, _1);
			task.get_cancel = function_bind(&DeviceServer::task_get_cancel, this);

			device->task_add(task);
		}
		else if(rcv.name == "task_wait") {
			device->task_wait();

			RPCSend snd(socket, "task_wait_done");
			snd.write();
		}
		else if(rcv.name == "task_cancel") {
			device->task_cancel();
		}
	}