static void group_execute(void *data, int thread, struct bNode *node, bNodeExecData *execdata, struct bNodeStack **in, struct bNodeStack **out) { bNodeTreeExec *exec= execdata->data; bNodeThreadStack *nts; if (!exec) return; /* XXX same behavior as trunk: all nodes inside group are executed. * it's stupid, but just makes it work. compo redesign will do this better. */ { bNode *inode; for (inode=exec->nodetree->nodes.first; inode; inode=inode->next) inode->need_exec = 1; } nts = ntreeGetThreadStack(exec, thread); group_copy_inputs(node, in, nts->stack); ntreeExecThreadNodes(exec, nts, data, thread); group_copy_outputs(node, out, nts->stack); ntreeReleaseThreadStack(nts); }
int ntreeTexExecTree( bNodeTree *nodes, TexResult *texres, float co[3], float dxt[3], float dyt[3], int osatex, const short thread, Tex *UNUSED(tex), short which_output, int cfra, int preview, ShadeInput *shi, MTex *mtex) { TexCallData data; float *nor = texres->nor; int retval = TEX_INT; bNodeThreadStack *nts = NULL; bNodeTreeExec *exec = nodes->execdata; data.co = co; data.dxt = dxt; data.dyt = dyt; data.osatex = osatex; data.target = texres; data.do_preview = preview; data.do_manage = (shi) ? shi->do_manage : true; data.thread = thread; data.which_output = which_output; data.cfra = cfra; data.mtex = mtex; data.shi = shi; /* ensure execdata is only initialized once */ if (!exec) { BLI_lock_thread(LOCK_NODES); if (!nodes->execdata) ntreeTexBeginExecTree(nodes); BLI_unlock_thread(LOCK_NODES); exec = nodes->execdata; } nts = ntreeGetThreadStack(exec, thread); ntreeExecThreadNodes(exec, nts, &data, thread); ntreeReleaseThreadStack(nts); if (texres->nor) retval |= TEX_NOR; retval |= TEX_RGB; /* confusing stuff; the texture output node sets this to NULL to indicate no normal socket was set * however, the texture code checks this for other reasons (namely, a normal is required for material) */ texres->nor = nor; return retval; }
/* only for Blender internal */ bool ntreeShaderExecTree(bNodeTree *ntree, ShadeInput *shi, ShadeResult *shr) { ShaderCallData scd; /** * \note: preserve material from ShadeInput for material id, nodetree execs change it * fix for bug "[#28012] Mat ID messy with shader nodes" */ Material *mat = shi->mat; bNodeThreadStack *nts = NULL; bNodeTreeExec *exec = ntree->execdata; int compat; /* convert caller data to struct */ scd.shi = shi; scd.shr = shr; /* each material node has own local shaderesult, with optional copying */ memset(shr, 0, sizeof(ShadeResult)); /* ensure execdata is only initialized once */ if (!exec) { BLI_lock_thread(LOCK_NODES); if (!ntree->execdata) ntree->execdata = ntreeShaderBeginExecTree(ntree); BLI_unlock_thread(LOCK_NODES); exec = ntree->execdata; } nts = ntreeGetThreadStack(exec, shi->thread); compat = ntreeExecThreadNodes(exec, nts, &scd, shi->thread); ntreeReleaseThreadStack(nts); // \note: set material back to preserved material shi->mat = mat; /* better not allow negative for now */ if (shr->combined[0] < 0.0f) shr->combined[0] = 0.0f; if (shr->combined[1] < 0.0f) shr->combined[1] = 0.0f; if (shr->combined[2] < 0.0f) shr->combined[2] = 0.0f; /* if compat is zero, it has been using non-compatible nodes */ return compat; }
/**** WHILE LOOP ****/ #if 0 /* XXX loop nodes don't work nicely with current trees */ static void whileloop_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out) { bNodeTreeExec *exec= (bNodeTreeExec*)nodedata; bNodeThreadStack *nts; int condition= (in[0]->vec[0] > 0.0f); bNodeSocket *sock; bNodeStack *ns; int iteration; /* XXX same behavior as trunk: all nodes inside group are executed. * it's stupid, but just makes it work. compo redesign will do this better. */ { bNode *inode; for (inode=exec->nodetree->nodes.first; inode; inode=inode->next) inode->need_exec = 1; } nts = ntreeGetThreadStack(exec, thread); /* "Condition" socket */ sock = exec->nodetree->outputs.first; ns = node_get_socket_stack(nts->stack, sock); iteration = 0; // group_copy_inputs(node, in, nts->stack); while (condition && iteration < node->custom1) { // if (iteration > 0) // loop_init_iteration(exec->nodetree, nts->stack); // ntreeExecThreadNodes(exec, nts, data, thread); condition = (ns->vec[0] > 0.0f); ++iteration; } // loop_copy_outputs(node, in, out, exec->stack); ntreeReleaseThreadStack(nts); }
/**** FOR LOOP ****/ #if 0 /* XXX loop nodes don't work nicely with current trees */ static void forloop_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out) { bNodeTreeExec *exec= (bNodeTreeExec*)nodedata; bNodeThreadStack *nts; int iterations= (int)in[0]->vec[0]; bNodeSocket *sock; bNodeStack *ns; int iteration; /* XXX same behavior as trunk: all nodes inside group are executed. * it's stupid, but just makes it work. compo redesign will do this better. */ { bNode *inode; for (inode=exec->nodetree->nodes.first; inode; inode=inode->next) inode->need_exec = 1; } nts = ntreeGetThreadStack(exec, thread); /* "Iteration" socket */ sock = exec->nodetree->inputs.first; ns = node_get_socket_stack(nts->stack, sock); // group_copy_inputs(node, in, nts->stack); for (iteration=0; iteration < iterations; ++iteration) { /* first input contains current iteration counter */ ns->vec[0] = (float)iteration; ns->vec[1]=ns->vec[2]=ns->vec[3] = 0.0f; // if (iteration > 0) // loop_init_iteration(exec->nodetree, nts->stack); // ntreeExecThreadNodes(exec, nts, data, thread); } // loop_copy_outputs(node, in, out, exec->stack); ntreeReleaseThreadStack(nts); }