Пример #1
0
/* Find an output node of the shader tree.
 *
 * NOTE: it will only return output which is NOT in the group, which isn't how
 * render engines works but it's how the GPU shader compilation works. This we
 * can change in the future and make it a generic function, but for now it stays
 * private here.
 */
static bNode *ntree_shader_output_node(bNodeTree *ntree)
{
	/* Make sure we only have single node tagged as output. */
	ntreeSetOutput(ntree);
	for (bNode *node = ntree->nodes.first; node != NULL; node = node->next) {
		if (node->flag & NODE_DO_OUTPUT) {
			return node;
		}
	}
	return NULL;
}
Пример #2
0
static void update(bNodeTree *ntree)
{
	ntreeSetOutput(ntree);
	
	ntree_update_reroute_nodes(ntree);
	
	if (ntree->update & NTREE_UPDATE_NODES) {
		/* clean up preview cache, in case nodes have been removed */
		BKE_node_preview_remove_unused(ntree);
	}
}
Пример #3
0
/* XXX Group nodes must set use_tree_data to false, since their trees can be shared by multiple nodes.
 * If use_tree_data is true, the ntree->execdata pointer is checked to avoid multiple execution of top-level trees.
 */
struct bNodeTreeExec *ntreeCompositBeginExecTree(bNodeTree *ntree, int use_tree_data)
{
	bNodeTreeExec *exec;
	bNode *node;
	bNodeSocket *sock;
	
	if (use_tree_data) {
		/* XXX hack: prevent exec data from being generated twice.
		 * this should be handled by the renderer!
		 */
		if (ntree->execdata)
			return ntree->execdata;
	}
	
	/* ensures only a single output node is enabled */
	ntreeSetOutput(ntree);
	
	exec = ntree_exec_begin(ntree);
	
	for (node= exec->nodetree->nodes.first; node; node= node->next) {
		/* initialize needed for groups */
		node->exec= 0;
		
		for (sock= node->outputs.first; sock; sock= sock->next) {
			bNodeStack *ns= node_get_socket_stack(exec->stack, sock);
			if (ns && sock->cache) {
				ns->data= sock->cache;
				sock->cache= NULL;
			}
		}
		/* cannot initialize them while using in threads */
		if (ELEM4(node->type, CMP_NODE_TIME, CMP_NODE_CURVE_VEC, CMP_NODE_CURVE_RGB, CMP_NODE_HUECORRECT)) {
			curvemapping_initialize(node->storage);
			if (node->type==CMP_NODE_CURVE_RGB)
				curvemapping_premultiply(node->storage, 0);
		}
	}
	
	if (use_tree_data) {
		/* XXX this should not be necessary, but is still used for cmp/sha/tex nodes,
		 * which only store the ntree pointer. Should be fixed at some point!
		 */
		ntree->execdata = exec;
	}
	
	return exec;
}
Пример #4
0
bNodeTreeExec *ntreeShaderBeginExecTree_internal(bNodeExecContext *context, bNodeTree *ntree, bNodeInstanceKey parent_key)
{
	bNodeTreeExec *exec;
	bNode *node;
	
	/* ensures only a single output node is enabled */
	ntreeSetOutput(ntree);
	
	/* common base initialization */
	exec = ntree_exec_begin(context, ntree, parent_key);
	
	/* allocate the thread stack listbase array */
	exec->threadstack = MEM_callocN(BLENDER_MAX_THREADS * sizeof(ListBase), "thread stack array");
	
	for (node = exec->nodetree->nodes.first; node; node = node->next)
		node->need_exec = 1;
	
	return exec;
}
Пример #5
0
static void update(bNodeTree *ntree)
{
	ntreeSetOutput(ntree);
	
	ntree_update_reroute_nodes(ntree);
}
Пример #6
0
static void update(bNodeTree *ntree)
{
	ntreeSetOutput(ntree);
}