Пример #1
0
DeviceInfo Device::get_multi_device(const vector<DeviceInfo>& subdevices, int threads, bool background)
{
	assert(subdevices.size() > 1);

	DeviceInfo info;
	info.type = DEVICE_MULTI;
	info.id = "MULTI";
	info.description = "Multi Device";
	info.num = 0;

	info.has_half_images = true;
	info.has_volume_decoupled = true;
	info.bvh_layout_mask = BVH_LAYOUT_ALL;
	info.has_osl = true;

	foreach(const DeviceInfo &device, subdevices) {
		/* Ensure CPU device does not slow down GPU. */
		if(device.type == DEVICE_CPU && subdevices.size() > 1) {
			if(background) {
				int orig_cpu_threads = (threads)? threads: system_cpu_thread_count();
				int cpu_threads = max(orig_cpu_threads - (subdevices.size() - 1), 0);

				VLOG(1) << "CPU render threads reduced from "
						<< orig_cpu_threads << " to " << cpu_threads
						<< ", to dedicate to GPU.";

				if(cpu_threads >= 1) {
					DeviceInfo cpu_device = device;
					cpu_device.cpu_threads = cpu_threads;
					info.multi_devices.push_back(cpu_device);
				}
				else {
					continue;
				}
			}
			else {
				VLOG(1) << "CPU render threads disabled for interactive render.";
				continue;
			}
		}
		else {
			info.multi_devices.push_back(device);
		}

		/* Accumulate device info. */
		info.has_half_images &= device.has_half_images;
		info.has_volume_decoupled &= device.has_volume_decoupled;
		info.bvh_layout_mask = device.bvh_layout_mask & info.bvh_layout_mask;
		info.has_osl &= device.has_osl;
	}

	return info;
}
Пример #2
0
	CPUDevice(int threads_num)
	{
		kg = kernel_globals_create();

		/* do now to avoid thread issues */
		system_cpu_support_optimized();

		if(threads_num == 0)
			threads_num = system_cpu_thread_count();

		threads.resize(threads_num);

		for(size_t i = 0; i < threads.size(); i++)
			threads[i] = new thread(function_bind(&CPUDevice::thread_run, this, i));
	}
Пример #3
0
void TaskScheduler::init(int num_threads)
{
	thread_scoped_lock lock(mutex);

	/* multiple cycles instances can use this task scheduler, sharing the same
	 * threads, so we keep track of the number of users. */
	if(users == 0) {
		do_exit = false;

		const bool use_auto_threads = (num_threads == 0);
		if(use_auto_threads) {
			/* automatic number of threads */
			num_threads = system_cpu_thread_count();
		}
		VLOG(1) << "Creating pool of " << num_threads << " threads.";

		/* launch threads that will be waiting for work */
		threads.resize(num_threads);

		const int num_groups = system_cpu_group_count();
		unsigned short num_process_groups;
		vector<unsigned short> process_groups;
		int current_group_threads;
		if(num_groups > 1) {
			process_groups.resize(num_groups);
			num_process_groups = system_cpu_process_groups(num_groups, 
			                                               &process_groups[0]);
			if(num_process_groups == 1) {
				current_group_threads = system_cpu_group_thread_count(process_groups[0]);
			}
		}
		int thread_index = 0;
		for(int group = 0; group < num_groups; ++group) {
			/* NOTE: That's not really efficient from threading point of view,
			 * but it is simple to read and it doesn't make sense to use more
			 * user-specified threads than logical threads anyway.
			 */
			int num_group_threads = (group == num_groups - 1)
			        ? (threads.size() - thread_index)
			        : system_cpu_group_thread_count(group);
			for(int group_thread = 0;
				group_thread < num_group_threads && thread_index < threads.size();
				++group_thread, ++thread_index)
			{
				/* NOTE: Thread group of -1 means we would not force thread affinity. */
				int thread_group;
				if(num_groups == 1) {
					/* Use default affinity if there's only one CPU group in the system. */
					thread_group = -1;
				}
				else if(use_auto_threads &&
				        num_process_groups == 1 &&
						num_threads <= current_group_threads)
				{
					/* If we fit into curent CPU group we also don't force any affinity. */
					thread_group = -1;
				}
				else {
					thread_group = group;
				}
				threads[thread_index] = new thread(function_bind(&TaskScheduler::thread_run,
				                                                 thread_index + 1),
				                                   thread_group);
			}
		}
	}
	
	users++;
}