/* Move the results from the previous iteration back to the input sockets. */ static void loop_iteration_reset(bNodeTree *ngroup, bNodeStack *gstack) { bNodeSocket *gin, *gout; bNodeStack *nsin, *nsout; gin = ngroup->inputs.first; gout = ngroup->outputs.first; while (gin && gout) { /* skip static (non-looping) sockets */ while (gin && !(gin->flag & SOCK_DYNAMIC)) gin=gin->next; while (gout && !(gout->flag & SOCK_DYNAMIC)) gout=gout->next; if (gin && gout) { nsin = node_get_socket_stack(gstack, gin); nsout = node_get_socket_stack(gstack, gout); move_stack(nsin, nsout); gin=gin->next; gout=gout->next; } } }
/* basic preparation of socket stacks */ static struct bNodeStack *setup_stack(bNodeStack *stack, bNodeTree *ntree, bNode *node, bNodeSocket *sock) { bNodeStack *ns = node_get_socket_stack(stack, sock); if (!ns) return NULL; /* don't mess with remote socket stacks, these are initialized by other nodes! */ if (sock->link) return ns; ns->sockettype = sock->type; switch (sock->type) { case SOCK_FLOAT: ns->vec[0] = node_socket_get_float(ntree, node, sock); break; case SOCK_VECTOR: node_socket_get_vector(ntree, node, sock, ns->vec); break; case SOCK_RGBA: node_socket_get_color(ntree, node, sock, ns->vec); break; } return ns; }
static void forloop_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out) { bNodeTreeExec *exec= (bNodeTreeExec*)nodedata; int totiterations= (int)in[0]->vec[0]; bNodeSocket *sock; bNodeStack *ns; int iteration; /* XXX same behavior as trunk: all nodes inside group are executed. * it's stupid, but just makes it work. compo redesign will do this better. */ { bNode *inode; for (inode=exec->nodetree->nodes.first; inode; inode=inode->next) inode->need_exec = 1; } /* "Iteration" socket */ sock = exec->nodetree->inputs.first; ns = node_get_socket_stack(exec->stack, sock); group_copy_inputs(node, in, exec->stack); for (iteration=0; iteration < totiterations; ++iteration) { /* first input contains current iteration counter */ ns->vec[0] = (float)iteration; if (iteration > 0) loop_iteration_reset(exec->nodetree, exec->stack); ntreeExecNodes(exec, data, thread); group_free_internal(exec); } group_move_outputs(node, out, exec->stack); }
/* XXX Group nodes must set use_tree_data to false, since their trees can be shared by multiple nodes. * If use_tree_data is true, the ntree->execdata pointer is checked to avoid multiple execution of top-level trees. */ void ntreeCompositEndExecTree(bNodeTreeExec *exec, int use_tree_data) { if (exec) { bNodeTree *ntree= exec->nodetree; bNode *node; bNodeStack *ns; for (node= exec->nodetree->nodes.first; node; node= node->next) { bNodeSocket *sock; for (sock= node->outputs.first; sock; sock= sock->next) { ns = node_get_socket_stack(exec->stack, sock); if (ns && ns->data) { sock->cache= ns->data; ns->data= NULL; } } if (node->type==CMP_NODE_CURVE_RGB) curvemapping_premultiply(node->storage, 1); node->need_exec= 0; } ntree_exec_end(exec); if (use_tree_data) { /* XXX clear nodetree backpointer to exec data, same problem as noted in ntreeBeginExecTree */ ntree->execdata = NULL; } } }
void node_get_stack(bNode *node, bNodeStack *stack, bNodeStack **in, bNodeStack **out) { bNodeSocket *sock; /* build pointer stack */ if (in) { for (sock= node->inputs.first; sock; sock= sock->next) { *(in++) = node_get_socket_stack(stack, sock); } } if (out) { for (sock= node->outputs.first; sock; sock= sock->next) { *(out++) = node_get_socket_stack(stack, sock); } } }
static void PRINT_BUFFERS(bNodeTreeExec *exec) { bNodeTree *ntree= exec->nodetree; bNode *node; bNodeSocket *sock; bNodeStack *ns; int i; printf("-------------- DEBUG --------------\n"); for (sock=ntree->inputs.first, i=0; sock; sock=sock->next, ++i) { ns = node_get_socket_stack(exec->stack, sock); printf("%d. Tree Input %s", i, sock->name); if (ns->external) printf(" (external)"); printf(": data=%p\n", ns->data); } for (sock=ntree->outputs.first, i=0; sock; sock=sock->next, ++i) { ns = node_get_socket_stack(exec->stack, sock); printf("%d. Tree Output %s", i, sock->name); if (ns->external) printf(" (external)"); printf(": data=%p\n", ns->data); } for (node=ntree->nodes.first; node; node=node->next) { printf("Node %s:\n", node->name); for (sock=node->inputs.first, i=0; sock; sock=sock->next, ++i) { ns = node_get_socket_stack(exec->stack, sock); printf("\t%d. Input %s", i, sock->name); if (ns->external) printf(" (external)"); printf(": data=%p\n", ns->data); } for (sock=node->outputs.first, i=0; sock; sock=sock->next, ++i) { ns = node_get_socket_stack(exec->stack, sock); printf("\t%d. Output %s", i, sock->name); if (ns->external) printf(" (external)"); printf(": data=%p\n", ns->data); } } }
/* Copy inputs to the internal stack. * This is a shallow copy, no buffers are duplicated here! */ static void group_copy_inputs(bNode *node, bNodeStack **in, bNodeStack *gstack) { bNodeSocket *sock; bNodeStack *ns; int a; for (sock=node->inputs.first, a=0; sock; sock=sock->next, ++a) { if (sock->groupsock) { ns = node_get_socket_stack(gstack, sock->groupsock); copy_stack(ns, in[a]); } } }
/* Copy internal results to the external outputs. */ static void group_move_outputs(bNode *node, bNodeStack **out, bNodeStack *gstack) { bNodeSocket *sock; bNodeStack *ns; int a; for (sock=node->outputs.first, a=0; sock; sock=sock->next, ++a) { if (sock->groupsock) { ns = node_get_socket_stack(gstack, sock->groupsock); move_stack(out[a], ns); } } }
/* Copy internal results to the external outputs. */ static void group_gpu_move_outputs(bNode *node, GPUNodeStack *out, bNodeStack *gstack) { bNodeSocket *sock; bNodeStack *ns; int a; for (sock=node->outputs.first, a=0; sock; sock=sock->next, ++a) { if (sock->groupsock) { ns = node_get_socket_stack(gstack, sock->groupsock); /* convert the node stack data result back to gpu stack */ node_gpu_stack_from_data(&out[a], sock->type, ns); } } }
static void group_gpu_copy_inputs(bNode *node, GPUNodeStack *in, bNodeStack *gstack) { bNodeSocket *sock; bNodeStack *ns; int a; for (sock=node->inputs.first, a=0; sock; sock=sock->next, ++a) { if (sock->groupsock) { ns = node_get_socket_stack(gstack, sock->groupsock); /* convert the external gpu stack back to internal node stack data */ node_data_from_gpu_stack(ns, &in[a]); } } }
/* XXX Group nodes must set use_tree_data to false, since their trees can be shared by multiple nodes. * If use_tree_data is true, the ntree->execdata pointer is checked to avoid multiple execution of top-level trees. */ struct bNodeTreeExec *ntreeCompositBeginExecTree(bNodeTree *ntree, int use_tree_data) { bNodeTreeExec *exec; bNode *node; bNodeSocket *sock; if (use_tree_data) { /* XXX hack: prevent exec data from being generated twice. * this should be handled by the renderer! */ if (ntree->execdata) return ntree->execdata; } /* ensures only a single output node is enabled */ ntreeSetOutput(ntree); exec = ntree_exec_begin(ntree); for (node= exec->nodetree->nodes.first; node; node= node->next) { /* initialize needed for groups */ node->exec= 0; for (sock= node->outputs.first; sock; sock= sock->next) { bNodeStack *ns= node_get_socket_stack(exec->stack, sock); if (ns && sock->cache) { ns->data= sock->cache; sock->cache= NULL; } } /* cannot initialize them while using in threads */ if (ELEM4(node->type, CMP_NODE_TIME, CMP_NODE_CURVE_VEC, CMP_NODE_CURVE_RGB, CMP_NODE_HUECORRECT)) { curvemapping_initialize(node->storage); if (node->type==CMP_NODE_CURVE_RGB) curvemapping_premultiply(node->storage, 0); } } if (use_tree_data) { /* XXX this should not be necessary, but is still used for cmp/sha/tex nodes, * which only store the ntree pointer. Should be fixed at some point! */ ntree->execdata = exec; } return exec; }
/* Copy inputs to the internal stack. * This is a shallow copy, no buffers are duplicated here! */ static void group_copy_inputs(bNode *gnode, bNodeStack **in, bNodeStack *gstack) { bNodeTree *ngroup = (bNodeTree*)gnode->id; bNode *node; bNodeSocket *sock; bNodeStack *ns; int a; for (node = ngroup->nodes.first; node; node = node->next) { if (node->type == NODE_GROUP_INPUT) { for (sock = node->outputs.first, a = 0; sock; sock = sock->next, ++a) { ns = node_get_socket_stack(gstack, sock); if (ns) copy_stack(ns, in[a]); } } } }
/* Copy internal results to the external outputs. */ static void group_copy_outputs(bNode *gnode, bNodeStack **out, bNodeStack *gstack) { bNodeTree *ngroup = (bNodeTree*)gnode->id; bNode *node; bNodeSocket *sock; bNodeStack *ns; int a; for (node = ngroup->nodes.first; node; node = node->next) { if (node->type == NODE_GROUP_OUTPUT && (node->flag & NODE_DO_OUTPUT)) { for (sock = node->inputs.first, a = 0; sock; sock = sock->next, ++a) { ns = node_get_socket_stack(gstack, sock); if (ns) copy_stack(out[a], ns); } break; /* only one active output node */ } } }
/* while executing tree, free buffers from nodes that are not needed anymore */ static void freeExecutableNode(bNodeTreeExec *exec) { /* node outputs can be freed when: * - not a render result or image node * - when node outputs go to nodes all being set NODE_FINISHED */ bNodeTree *ntree = exec->nodetree; bNodeExec *nodeexec; bNode *node; bNodeSocket *sock; int n; /* set exec flag for finished nodes that might need freed */ for (node= ntree->nodes.first; node; node= node->next) { if (node->type!=CMP_NODE_R_LAYERS) if (node->exec & NODE_FINISHED) node->exec |= NODE_FREEBUFS; } /* clear this flag for input links that are not done yet. * Using the exec data for valid dependency sort. */ for (n=0, nodeexec=exec->nodeexec; n < exec->totnodes; ++n, ++nodeexec) { node = nodeexec->node; if ((node->exec & NODE_FINISHED)==0) { for (sock= node->inputs.first; sock; sock= sock->next) if (sock->link) sock->link->fromnode->exec &= ~NODE_FREEBUFS; } } /* now we can free buffers */ for (node= ntree->nodes.first; node; node= node->next) { if (node->exec & NODE_FREEBUFS) { for (sock= node->outputs.first; sock; sock= sock->next) { bNodeStack *ns= node_get_socket_stack(exec->stack, sock); if (ns && ns->data) { free_compbuf(ns->data); ns->data= NULL; // printf("freed buf node %s\n", node->name); } } } } }
static void group_gpu_copy_inputs(bNode *gnode, GPUNodeStack *in, bNodeStack *gstack) { bNodeTree *ngroup = (bNodeTree *)gnode->id; bNode *node; bNodeSocket *sock; bNodeStack *ns; int a; for (node = ngroup->nodes.first; node; node = node->next) { if (node->type == NODE_GROUP_INPUT) { for (sock = node->outputs.first, a = 0; sock; sock = sock->next, ++a) { ns = node_get_socket_stack(gstack, sock); if (ns) { /* convert the external gpu stack back to internal node stack data */ node_data_from_gpu_stack(ns, &in[a]); } } } } }
static void *group_initexec(bNode *node) { bNodeTree *ngroup= (bNodeTree*)node->id; bNodeTreeExec *exec; bNodeSocket *sock; bNodeStack *ns; /* initialize the internal node tree execution */ exec = ntreeCompositBeginExecTree(ngroup, 0); /* tag group outputs as external to prevent freeing */ for (sock=ngroup->outputs.first; sock; sock=sock->next) { if (!(sock->flag & SOCK_INTERNAL)) { ns = node_get_socket_stack(exec->stack, sock); ns->external = 1; } } return exec; }
/* Copy internal results to the external outputs. */ static void group_gpu_move_outputs(bNode *gnode, GPUNodeStack *out, bNodeStack *gstack) { bNodeTree *ngroup = (bNodeTree *)gnode->id; bNode *node; bNodeSocket *sock; bNodeStack *ns; int a; for (node = ngroup->nodes.first; node; node = node->next) { if (node->type == NODE_GROUP_OUTPUT && (node->flag & NODE_DO_OUTPUT)) { for (sock = node->inputs.first, a = 0; sock; sock = sock->next, ++a) { ns = node_get_socket_stack(gstack, sock); if (ns) { /* convert the node stack data result back to gpu stack */ node_gpu_stack_from_data(&out[a], sock->type, ns); } } break; /* only one active output node */ } } }
/* basic preparation of socket stacks */ static struct bNodeStack *setup_stack(bNodeStack *stack, bNodeSocket *sock) { bNodeStack *ns = node_get_socket_stack(stack, sock); float null_value[4]= {0.0f, 0.0f, 0.0f, 0.0f}; /* don't mess with remote socket stacks, these are initialized by other nodes! */ if (sock->link) return ns; ns->sockettype = sock->type; if (sock->default_value) { switch (sock->type) { case SOCK_FLOAT: ns->vec[0] = ((bNodeSocketValueFloat*)sock->default_value)->value; break; case SOCK_VECTOR: copy_v3_v3(ns->vec, ((bNodeSocketValueVector*)sock->default_value)->value); break; case SOCK_RGBA: copy_v4_v4(ns->vec, ((bNodeSocketValueRGBA*)sock->default_value)->value); break; } } else { switch (sock->type) { case SOCK_FLOAT: ns->vec[0] = 0.0f; break; case SOCK_VECTOR: copy_v3_v3(ns->vec, null_value); break; case SOCK_RGBA: copy_v4_v4(ns->vec, null_value); break; } } return ns; }
/**** FOR LOOP ****/ #if 0 /* XXX loop nodes don't work nicely with current trees */ static void forloop_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out) { bNodeTreeExec *exec= (bNodeTreeExec*)nodedata; bNodeThreadStack *nts; int iterations= (int)in[0]->vec[0]; bNodeSocket *sock; bNodeStack *ns; int iteration; /* XXX same behavior as trunk: all nodes inside group are executed. * it's stupid, but just makes it work. compo redesign will do this better. */ { bNode *inode; for (inode=exec->nodetree->nodes.first; inode; inode=inode->next) inode->need_exec = 1; } nts = ntreeGetThreadStack(exec, thread); /* "Iteration" socket */ sock = exec->nodetree->inputs.first; ns = node_get_socket_stack(nts->stack, sock); // group_copy_inputs(node, in, nts->stack); for (iteration=0; iteration < iterations; ++iteration) { /* first input contains current iteration counter */ ns->vec[0] = (float)iteration; ns->vec[1]=ns->vec[2]=ns->vec[3] = 0.0f; // if (iteration > 0) // loop_init_iteration(exec->nodetree, nts->stack); // ntreeExecThreadNodes(exec, nts, data, thread); } // loop_copy_outputs(node, in, out, exec->stack); ntreeReleaseThreadStack(nts); }
/**** WHILE LOOP ****/ #if 0 /* XXX loop nodes don't work nicely with current trees */ static void whileloop_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out) { bNodeTreeExec *exec= (bNodeTreeExec*)nodedata; bNodeThreadStack *nts; int condition= (in[0]->vec[0] > 0.0f); bNodeSocket *sock; bNodeStack *ns; int iteration; /* XXX same behavior as trunk: all nodes inside group are executed. * it's stupid, but just makes it work. compo redesign will do this better. */ { bNode *inode; for (inode=exec->nodetree->nodes.first; inode; inode=inode->next) inode->need_exec = 1; } nts = ntreeGetThreadStack(exec, thread); /* "Condition" socket */ sock = exec->nodetree->outputs.first; ns = node_get_socket_stack(nts->stack, sock); iteration = 0; // group_copy_inputs(node, in, nts->stack); while (condition && iteration < node->custom1) { // if (iteration > 0) // loop_init_iteration(exec->nodetree, nts->stack); // ntreeExecThreadNodes(exec, nts, data, thread); condition = (ns->vec[0] > 0.0f); ++iteration; } // loop_copy_outputs(node, in, out, exec->stack); ntreeReleaseThreadStack(nts); }
/**** WHILE LOOP ****/ #if 0 /* XXX loop nodes don't work nicely with current trees */ static void whileloop_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out) { bNodeTreeExec *exec= (bNodeTreeExec*)nodedata; int condition= (in[0]->vec[0] > 0.0f); bNodeSocket *sock; bNodeStack *ns; int iteration; /* XXX same behavior as trunk: all nodes inside group are executed. * it's stupid, but just makes it work. compo redesign will do this better. */ { bNode *inode; for (inode=exec->nodetree->nodes.first; inode; inode=inode->next) inode->need_exec = 1; } /* "Condition" socket */ sock = exec->nodetree->outputs.first; ns = node_get_socket_stack(exec->stack, sock); iteration = 0; group_copy_inputs(node, in, exec->stack); while (condition && iteration < node->custom1) { if (iteration > 0) loop_iteration_reset(exec->nodetree, exec->stack); ntreeExecNodes(exec, data, thread); group_free_internal(exec); // PRINT_BUFFERS(exec); condition = (ns->vec[0] > 0.0f); ++iteration; } group_move_outputs(node, out, exec->stack); }