//???????????????????????????????????????????????????????????????????????? // The submat with a higher index "cover" the ones "below". // If the first submat taken from the end of the array has amount == 100, // that's al it matters. Otherwise all until the second consecutive submat // with amount == 100 matter // bool CompositeMat::IsOutputConst ( ShadeContext& sc, // describes context of evaluation int stdID // must be ID_AM, ect ) { Mtl *sm = NULL; int numSubMatOn = 0; int numConsec = 0; bool bIsConst = true; Interval iv; // Iterate through the submats in reverse order because that is // the order of their significance for (int i = MAX_NUM_MTLS-1; i >= 0; i--) { BOOL enabled; float amount; // The first one is always enabled if ( i == 0 ) enabled = 1; else pblock2->GetValue( compmat_map_on, sc.CurTime(), enabled, iv, i-1 ); if ( enabled ) { pblock2->GetValue( compmat_mtls, sc.CurTime(), sm, iv, i ); if ( sm != NULL ) { numSubMatOn++; // All of the first on is always composited if ( i == 0 ) amount = 100.f; else pblock2->GetValue( compmat_amount, sc.CurTime(), amount, iv, i-1 ); if ( numSubMatOn == 1 && amount == 100.0f ) return sm->IsOutputConst( sc, stdID ); else { if ( amount == 100.0f ) numConsec++; else numConsec = 0; bool b = sm->IsOutputConst( sc, stdID ); bIsConst = (bIsConst && b ); if ( !bIsConst ) return bIsConst; else if ( numConsec == 2 ) return bIsConst; } } } } return bIsConst; }
float BerconGradient::getGradientValueDist(ShadeContext& sc) { switch (p_normalType) { case 0: { // View return -sc.P().z; //Length(sc.OrigView()); //(sc.PointTo(sc.P(), REF_CAMERA)).z; } case 1: { // Local X return (sc.PointTo(sc.P(), REF_OBJECT)).x; } case 2: { // Local Y return (sc.PointTo(sc.P(), REF_OBJECT)).y; } case 3: { // Local Z return (sc.PointTo(sc.P(), REF_OBJECT)).z; } case 4: { // World X return (sc.PointTo(sc.P(), REF_WORLD)).x; } case 5: { // World Y return (sc.PointTo(sc.P(), REF_WORLD)).y; } case 6: { // World Z return (sc.PointTo(sc.P(), REF_WORLD)).z; } case 7: { // Camera X return sc.P().x; //(sc.PointTo(sc.P(), REF_CAMERA)).x; } case 8: { // Camera Y return sc.P().y; //(sc.PointTo(sc.P(), REF_CAMERA)).y; } case 9: { // Camera Z return -sc.P().z; //-(sc.PointTo(sc.P(), REF_CAMERA)).z; } case 10: { // To Object if (sc.InMtlEditor() || !p_node) return -sc.P().z; //(sc.PointTo(sc.P(), REF_CAMERA)).z; return Length((p_node->GetNodeTM(sc.CurTime())).GetTrans() - sc.PointTo(sc.P(), REF_WORLD)); } case 11: { // Object Z if (sc.InMtlEditor() || !p_node) return -sc.P().z; //(sc.PointTo(sc.P(), REF_CAMERA)).z; Matrix3 tm = p_node->GetNodeTM(sc.CurTime()); Point3 a = tm.GetTrans() - sc.PointTo(sc.P(), REF_WORLD); Point3 b = FNormalize(tm.GetRow(2)); return (-DotProd(b, a) / Length(b)); } } return 0.f; }
void CompositeMat::PreShade(ShadeContext& sc, IReshadeFragment* pFrag) { int i(0); Mtl *submtl = NULL; // BOOL enabled(FALSE); char texLengths[12]; int lengthChan = pFrag->NTextures(); pFrag->AddIntChannel(0); pFrag->AddIntChannel(0); pFrag->AddIntChannel(0); int nPrevTex = 3 + lengthChan; // preshade any submaterials for (i=0; i<MAX_NUM_MTLS; i++) { pblock2->GetValue(compmat_mtls, sc.CurTime(), submtl, FOREVER, i); if (submtl){ IReshading* pReshading = (IReshading*)(submtl->GetInterface(IID_IReshading)); if( pReshading ){ pReshading->PreShade(sc, pFrag); int nTex = pFrag->NTextures(); texLengths[i] = char( nTex - nPrevTex); nPrevTex = nTex; } } } int* pI = (int*)&texLengths[0]; pFrag->SetIntChannel( lengthChan++, pI[0] ); pFrag->SetIntChannel( lengthChan++, pI[1] ); pFrag->SetIntChannel( lengthChan, pI[2] ); }
RGBA Noise::EvalColor(ShadeContext& sc) { Point3 p,dp; if (!sc.doMaps) return black; AColor c; if (sc.GetCache(this,c)) return c; if (gbufID) sc.SetGBufferID(gbufID); //IPoint2 ps = sc.ScreenCoord(); UpdateCache(sc.CurTime()); // DS 10/3/00 xyzGen->GetXYZ(sc,p,dp); p /= size; filter = sc.filterMaps; float smw; float limlev = LimitLevel(dp,smw); float d = NoiseFunction(p,limlev,smw); RGBA c0 = mapOn[0]&&subTex[0] ? subTex[0]->EvalColor(sc): col[0]; RGBA c1 = mapOn[1]&&subTex[1] ? subTex[1]->EvalColor(sc): col[1]; c = texout->Filter((1.0f-d)*c0 + d*c1); sc.PutCache(this,c); return c; }
AColor mrTwoSidedShader::EvalColor(ShadeContext& sc) { // Provide a good default for this (for the material editor peview)... // Use the front color for the top half of the screen the the back color // for the bottom half. if(m_mainPB != NULL) { Point2 screenUV; Point2 screenDUV; sc.ScreenUV(screenUV, screenDUV); // Front map is used for top part of the image bool useFront = (screenUV.y > 0.5f); TimeValue t = sc.CurTime(); BOOL mapOn = m_mainPB->GetInt(useFront ? kMainPID_FrontMapOn : kMainPID_BackMapOn, t); if(mapOn) { Texmap* map = m_mainPB->GetTexmap(useFront ? kMainPID_FrontMap : kMainPID_BackMap, t); if(map != NULL) { return map->EvalColor(sc); } } // Return the color only AColor col = m_mainPB->GetAColor(useFront ? kMainPID_FrontColor : kMainPID_BackColor, t); return col; } return AColor(0,0,0); }
AColor BerconNoise::EvalColor(ShadeContext& sc) { if (!sc.doMaps) return black; AColor c; if (sc.GetCache(this,c)) return c; if (gbufID) sc.SetGBufferID(gbufID); // UVW and Distortion Point3 p, dpdx, dpdy, dp; if(!berconXYZ.get(sc, p, dpdx, dpdy)) return AColor(0,0,0,0); if (useDistortion) applyDistortion(sc,p); float nSize = (mapOn[4] && subtex[4]) ? subtex[4]->EvalMono(sc)*size : size; p /= nSize; dpdx /= nSize; dpdy /= nSize; Noise::alterUVW(p, uvwDist); NoiseParams np = EvalParameters(&sc); // Caluclate noise function float d = sc.filterMaps ? Noise::limitedNoise(p, dpdx, dpdy, np) : Noise::limitedNoise(p, np); if (useCurve) d = curve->GetControlCurve(0)->GetValue(sc.CurTime(), d); // Get colors RGBA c0 = mapOn[0]&&subtex[0] ? subtex[0]->EvalColor(sc): col[0]; RGBA c1 = mapOn[1]&&subtex[1] ? subtex[1]->EvalColor(sc): col[1]; c = texout->Filter((1.f-d)*c0 + d*c1); // Cache sc.PutCache(this,c); return c; }
float BerconGradient::getGradientValueNormal(ShadeContext& sc) { switch (p_normalType) { case 0: { // View return -DotProd(sc.Normal(), sc.V()); } case 1: { // Local X return (sc.VectorTo(sc.Normal(), REF_OBJECT)).x; } case 2: { // Local Y return (sc.VectorTo(sc.Normal(), REF_OBJECT)).y; } case 3: { // Local Z return (sc.VectorTo(sc.Normal(), REF_OBJECT)).z; } case 4: { // World X return (sc.VectorTo(sc.Normal(), REF_WORLD)).x; } case 5: { // World Y return (sc.VectorTo(sc.Normal(), REF_WORLD)).y; } case 6: { // World Z return (sc.VectorTo(sc.Normal(), REF_WORLD)).z; } case 7: { // Camera X return sc.Normal().x; //(sc.VectorTo(sc.Normal(), REF_CAMERA)).x; } case 8: { // Camera Y return sc.Normal().y; //(sc.VectorTo(sc.Normal(), REF_CAMERA)).y; } case 9: { // Camera Z return sc.Normal().z; //(sc.VectorTo(sc.Normal(), REF_CAMERA)).z; } case 10: { // To Object if (sc.InMtlEditor() || !p_node) return -DotProd(sc.Normal(), sc.V()); return DotProd(sc.Normal(), FNormalize(sc.PointFrom((p_node->GetNodeTM(sc.CurTime())).GetTrans(),REF_WORLD) - sc.P())); } case 11: { // Object Z if (sc.InMtlEditor() || !p_node) return -DotProd(sc.Normal(), sc.V()); return DotProd(sc.Normal(), FNormalize(sc.VectorFrom(p_node->GetNodeTM(sc.CurTime()).GetRow(2),REF_WORLD))); } } return 0.f; }
AColor BerconWood::EvalColor(ShadeContext& sc) { Point3 p,dpdx,dpdy; if (!sc.doMaps) return black; // If we've already evalutated the color at this point we'll use it and stop here AColor c; if (sc.GetCache(this,c)) return c; if (gbufID) sc.SetGBufferID(gbufID); // Evaluate parameters WoodParam wp = EvalParameters(sc); float grainA = mapOn[19]&&subtex[19]?subtex[19]->EvalMono(sc)*grainAmount:grainAmount; float grainF = mapOn[20]&&subtex[20]?subtex[20]->EvalMono(sc)*grainFreq:grainFreq; // UVW, Distortion and size berconXYZ.get(sc,p,dpdx,dpdy); if (useDistortion) applyDistortion(sc,p); float wSize = mapOn[5]&&subtex[5]?subtex[5]->EvalMono(sc)*woodSize:woodSize; p /= wSize; dpdx /= (wSize / 2.f); dpdy /= (wSize / 2.f); // Caluclate wood function and grain Point3 gP; float d = sc.filterMaps? Noise::wood(p, dpdx, dpdy, gP, wp) : Noise::wood(p, gP, wp); float g = (grainAmount > .001f) ? Fractal::grain(gP, grainA, grainF): 0.f; // Get colors RGBA c0 = mapOn[0]&&subtex[0] ? subtex[0]->EvalColor(sc): col[0]; RGBA c1 = mapOn[1]&&subtex[1] ? subtex[1]->EvalColor(sc): col[1]; RGBA c2 = lockGrain ? c1: (mapOn[2]&&subtex[2] ? subtex[2]->EvalColor(sc): col[2]); // Apply curves if (useCurve) d = curve->GetControlCurve(0)->GetValue(sc.CurTime(), d); // Calculate color c = (1.0f-d)*c0 + d*c1; c = (1.0f-g)*c + g*c2; c = texout->Filter(c); // Cache sc.PutCache(this,c); return c; }
void M3Mat::PreShade(ShadeContext& sc, IReshadeFragment* pFrag) { int i; IReshading* pReshading; TimeValue t = sc.CurTime(); Interval valid = FOREVER; // get the base material value into i pblockMat->GetValue(100, t, i, valid ); Mtl *sm1 = mTex[100]; // handle no base mat if(sm1 == NULL) { return; } if(i==0||(i==1&&inRender)) { for( i=0;i<100;i++) { float u; pblockMat->GetValue(i,t,u,valid); if(mTex[i]!=NULL && u!=0 && mapOn[i]) { Mtl *comb = mTex[i]; pReshading = (IReshading*)(comb->GetInterface(IID_IReshading)); if( pReshading ) pReshading->PreShade(sc, pFrag); } } pReshading = (IReshading*)(sm1->GetInterface(IID_IReshading)); if( pReshading ) pReshading->PreShade(sc, pFrag); } else { // i == 1 && not inRender pReshading = (IReshading*)(sm1->GetInterface(IID_IReshading)); if( pReshading ) pReshading->PreShade(sc, pFrag); } }
Point3 Noise::EvalNormalPerturb(ShadeContext& sc) { Point3 p,dp; if (!sc.doMaps) return Point3(0,0,0); if (gbufID) sc.SetGBufferID(gbufID); UpdateCache(sc.CurTime()); // DS 10/3/00 xyzGen->GetXYZ(sc,p,dp); p /= size; filter = sc.filterMaps; float smw; float limlev = LimitLevel(dp,smw); float del,d; d = NoiseFunction(p,limlev,smw); //del = (dp.x+dp.y+dp.z)/(size*3.0f); del = .1f; Point3 np; Point3 M[3]; xyzGen->GetBumpDP(sc,M); np.x = (NoiseFunction(p+del*M[0],limlev,smw) - d)/del; np.y = (NoiseFunction(p+del*M[1],limlev,smw) - d)/del; np.z = (NoiseFunction(p+del*M[2],limlev,smw) - d)/del; np = sc.VectorFromNoScale(np, REF_OBJECT); Texmap *sub0 = mapOn[0]?subTex[0]:NULL; Texmap *sub1 = mapOn[1]?subTex[1]:NULL; if (sub0||sub1) { // d((1-k)*a + k*b ) = dk*(b-a) + k*(db-da) + da float a,b; Point3 da,db; if (sub0) { a = sub0->EvalMono(sc); da = sub0->EvalNormalPerturb(sc); } else { a = Intens(col[0]); da = Point3(0.0f,0.0f,0.0f); } if (sub1) { b = sub1->EvalMono(sc); db = sub1->EvalNormalPerturb(sc); } else { b = Intens(col[1]); db= Point3(0.0f,0.0f,0.0f); } np = (b-a)*np + d*(db-da) + da; } else np *= Intens(col[1])-Intens(col[0]); return texout->Filter(np); }
float Noise::EvalMono(ShadeContext& sc) { Point3 p,dp; if (!sc.doMaps) return 0.0f; float f; if (sc.GetCache(this,f)) return f; if (gbufID) sc.SetGBufferID(gbufID); UpdateCache(sc.CurTime()); // DS 10/3/00 xyzGen->GetXYZ(sc,p,dp); p /= size; filter = sc.filterMaps; float smw; float limlev = LimitLevel(dp, smw); float d = NoiseFunction(p,limlev,smw); float c0 = mapOn[0]&&subTex[0] ? subTex[0]->EvalMono(sc): Intens(col[0]); float c1 = mapOn[1]&&subTex[1] ? subTex[1]->EvalMono(sc): Intens(col[1]); f = texout->Filter((1.0f-d)*c0 + d*c1); sc.PutCache(this,f); return f; }
// #################### // Color \\ #################### AColor BerconGradient::EvalColor(ShadeContext& sc) { // Initialize returned color AColor res(0.0f,0.0f,0.0f,0.0f); if (!sc.doMaps) return res; // Use cache if (sc.GetCache(this,res)) return res; if (gbufID) sc.SetGBufferID(gbufID); // Function type float d; if (p_type == 0) {// UVW Point3 p; if (!berconXYZ.get(sc, p)) return res; d = getGradientValueUVW(p); } else { // Others d = getGradientValue(sc); } // Distortion if (p_disOn && p_distex) d += (1.f - p_distex->EvalMono(sc) * 2.f) * p_disStr; // Limit range if (!limitRange(d)) return res; // Curve if (p_curveOn) d = curve->GetControlCurve(0)->GetValue(sc.CurTime(), d); // Get color from gradient res = gradient->getColor(p_reverse?1.f-d:d, sc); // Output res = texout->Filter(res); // Shading ready, return results sc.PutCache(this,res); return res; }
float M3Mat::EvalDisplacement(ShadeContext& sc) { int i; TimeValue t = sc.CurTime(); Interval valid = FOREVER; pblockMat->GetValue(100,t,i,FOREVER); Mtl *sm1 = mTex[100]; int counter = 0; float final = 0.0f; // handle no base mat if(sm1==NULL) { return 0.0f; } if(i==0||(i==1&&inRender)) { float u[100]; for( i=0;i<100;i++) { pblockMat->GetValue(i,t,u[i],valid); if(mTex[i]!=NULL&&u[i]!=0&&mapOn[i]) { Mtl *comb = mTex[i]; float mI = u[i]/100.0f; final += (comb->EvalDisplacement(sc)*mI); counter++; }
void plPassMtl::ShadeWithBackground(ShadeContext &sc, Color background, bool useVtxAlpha /* = true */) { #if 1 // old #if 0 Color lightCol,rescol, diffIllum0; RGBA mval; Point3 N0,P; BOOL bumped = FALSE; int i; if (gbufID) sc.SetGBufferID(gbufID); if (sc.mode == SCMODE_SHADOW) { float opac = 0.0; for (i=0; i < NumSubTexmaps(); i++) { if (SubTexmapOn(i)) { hsMaxLayerBase *hsmLay = (hsMaxLayerBase *)GetSubTexmap(i); opac += hsmLay->GetOpacity(t); } } float f = 1.0f - opac; sc.out.t = Color(f,f,f); return; } N0 = sc.Normal(); P = sc.P(); #endif TimeValue t = sc.CurTime(); Color color(0, 0, 0); float alpha = 0.0; // Evaluate Base layer Texmap *map = fLayersPB->GetTexmap(kPassLayBase); if (map && ( map->ClassID() == LAYER_TEX_CLASS_ID || map->ClassID() == STATIC_ENV_LAYER_CLASS_ID ) ) { plLayerTex *layer = (plLayerTex*)map; AColor evalColor = layer->EvalColor(sc); color = evalColor; alpha = evalColor.a; } // Evaluate Top layer, if it's on if (fLayersPB->GetInt(kPassLayTopOn)) { Texmap *map = fLayersPB->GetTexmap(kPassLayTop); if (map && ( map->ClassID() == LAYER_TEX_CLASS_ID || map->ClassID() == STATIC_ENV_LAYER_CLASS_ID || map->ClassID() == ANGLE_ATTEN_LAYER_CLASS_ID) ) { plPlasmaMAXLayer *layer = (plPlasmaMAXLayer*)map; AColor evalColor = layer->EvalColor(sc); // Blend layers if( !layer->DiscardColor() ) { int blendType = fLayersPB->GetInt(kPassLayBlend); switch (blendType) { case kBlendAdd: color += evalColor * evalColor.a; break; case kBlendAlpha: color = (1.0f - evalColor.a) * color + evalColor.a * evalColor; break; case kBlendMult: color *= evalColor; break; default: // No blend... color = evalColor; break; } } if( !layer->DiscardAlpha() ) { int alphaType = fLayersPB->GetInt(kPassLayOutputBlend); switch( alphaType ) { case kAlphaMultiply: alpha *= evalColor.a; break; case kAlphaAdd: alpha += evalColor.a; break; case kAlphaDiscard: default: break; } } } } #if 1 AColor black; black.Black(); AColor white; white.White(); SIllumParams ip; if (fBasicPB->GetInt(kPassBasEmissive)) { // Emissive objects don't get shaded ip.diffIllum = fBasicPB->GetColor(kPassBasColorAmb, t) * color; ip.diffIllum.ClampMinMax(); ip.specIllum = black; } else { // // Shading setup // // Setup the parameters for the shader ip.amb = fBasicPB->GetColor(kPassBasColorAmb, t); ip.diff = fBasicPB->GetColor(kPassBasColor, t) * color; ip.diffIllum = black; ip.specIllum = black; ip.N = sc.Normal(); ip.V = sc.V(); // // Specularity // if (fBasicPB->GetInt(kPassBasUseSpec, t)) { ip.sh_str = 1.f; ip.spec = fBasicPB->GetColor( kPassBasSpecColor, t ); ip.ph_exp = (float)pow(2.0f,float(fBasicPB->GetInt(kPassBasShine, t)) / 10.0f); ip.shine = float(fBasicPB->GetInt(kPassBasShine, t)) / 100.0f; } else { ip.spec = black; ip.sh_str = 0; ip.ph_exp = 0; ip.shine = 0; } ip.softThresh = 0; // // Do the shading Shader *myShader = GetShader(SHADER_BLINN); myShader->Illum(sc, ip); // Override shader parameters if (fAdvPB->GetInt(kPBAdvNoShade)) { ip.diffIllum = black; ip.specIllum = black; } if (fAdvPB->GetInt(kPBAdvWhite)) { ip.diffIllum = white; ip.specIllum = black; } ip.specIllum.ClampMinMax(); ip.diffIllum = ip.amb * sc.ambientLight + ip.diff * ip.diffIllum; ip.diffIllum.ClampMinMax(); } // AColor returnColor = AColor(opac * ip.diffIllum + ip.specIllum, opac) #endif // Get opacity and combine with alpha float opac = float(fBasicPB->GetInt(kPassBasOpacity, t)) / 100.0f; alpha *= opac; float vtxAlpha = 1.0f; if (useVtxAlpha && GetOutputBlend() == plPassMtlBase::kBlendAlpha) { Point3 p; GetInterpVtxValue(MAP_ALPHA, sc, p); vtxAlpha = p.x; } alpha *= vtxAlpha; // MAX will do the additive/alpha/no blending for us based on what Requirements() // we tell it. However, since MAX's formula is bgnd*sc.out.t + sc.out.c, // we have to multiply our output color by the alpha. // If we ever need a more complicated blending function, you can request the // background color via Requirements() (otherwise it's just black) and then do // the blending yourself; however, if the transparency isn't set, the shadows // will be opaque, so be careful. Color outC = ip.diffIllum + ip.specIllum; sc.out.c = ( outC * alpha ); sc.out.t = Color( 1.f - alpha, 1.f - alpha, 1.f - alpha ); #endif }
Point3 BerconNoise::EvalNormalPerturb(ShadeContext& sc) { if (!sc.doMaps) return Point3(0,0,0); if (gbufID) sc.SetGBufferID(gbufID); // UVW and Distortion Point3 p, dpdx, dpdy; Point3 M[3]; if (!berconXYZ.get(sc, p, dpdx, dpdy, M)) return Point3(0,0,0); if (useDistortion) applyDistortion(sc,p); float nSize = (mapOn[4] && subtex[4]) ? subtex[4]->EvalMono(sc)*size : size; p /= nSize; Noise::alterUVW(p, uvwDist); NoiseParams np = EvalParameters(&sc); // Vector Point3 normal; float d = Noise::limitedNoise(p, np); if (useCurve) { d = curve->GetControlCurve(0)->GetValue(sc.CurTime(), d); normal.x = (curve->GetControlCurve(0)->GetValue(sc.CurTime(), Noise::limitedNoise(p+DELTA*M[0], np)) - d) / DELTA; normal.y = (curve->GetControlCurve(0)->GetValue(sc.CurTime(), Noise::limitedNoise(p+DELTA*M[1], np)) - d) / DELTA; normal.z = (curve->GetControlCurve(0)->GetValue(sc.CurTime(), Noise::limitedNoise(p+DELTA*M[2], np)) - d) / DELTA; } else { normal.x = (Noise::limitedNoise(p+DELTA*M[0], np) - d) / DELTA; normal.y = (Noise::limitedNoise(p+DELTA*M[1], np) - d) / DELTA; normal.z = (Noise::limitedNoise(p+DELTA*M[2], np) - d) / DELTA; } normal = -sc.VectorFromNoScale(normal, REF_OBJECT); // Eval sub maps float f1, f2; Point3 v1, v2; bool maps = false; if (subtex[0]) { f1 = subtex[0]->EvalMono(sc); v1 = subtex[0]->EvalNormalPerturb(sc); maps = true; } else { f1 = Intens(col[0]); v1 = Point3(0.f, 0.f, 0.f); } if (subtex[1]) { f2 = subtex[1]->EvalMono(sc); v2 = subtex[1]->EvalNormalPerturb(sc); maps = true; } else { f2 = Intens(col[1]); v2 = Point3(0.f, 0.f, 0.f); } // Calculate vector if (maps) normal = (f2-f1)*normal + d*v2 + (1.f-d)*v1; else normal *= f2 - f1; return texout->Filter(normal); // Does this filter actually do something? }
void M3Mat::Shade(ShadeContext& sc) { int i; TimeValue t = sc.CurTime(); Interval valid = FOREVER; pblockMat->GetValue(100,t,i,FOREVER); Mtl *sm1 = mTex[100]; float total(0.0f); ShadeOutput sFinal( sc.out.nElements ); // get nElements correctly ShadeOutput sDatabase[100]; // for( i = 0; i < 100; ++i ) // sDatabase[i] = sFinal; float u[100]; // handle no base mat if(!sm1) { sc.ResetOutput(); sc.out.c = black; sc.out.t = black; return; } if(i==0||(i==1&&inRender)) { for( i=0;i<100;i++) { pblockMat->GetValue(i,t,u[i],valid); u[i] /= 100.0f; if(mTex[i]!=NULL&&u[i]!=0&&mapOn[i]) { Mtl *comb = mTex[i]; comb->Shade(sc); sDatabase[i] = sc.out; sc.ResetOutput(); total += u[i]; } } sc.ResetOutput(); sm1->Shade(sc); sFinal.c = black; sFinal.t = black; sFinal.ior = 0.0f; for( i=0;i<100;i++) { if(mTex[i]!=NULL&&u[i]!=0&&mapOn[i]) { sc.out.flags |= sDatabase[i].flags; if(total>1.0f){ sFinal.c += u[i]/total * sDatabase[i].c; sFinal.t += u[i]/total * sDatabase[i].t; sFinal.ior += u[i]/total * sDatabase[i].ior; } else{ sFinal.c += u[i] * sDatabase[i].c; sFinal.t += u[i] * sDatabase[i].t; sFinal.ior += u[i] * sDatabase[i].ior; } } } if(total) { sc.out.MixIn(sFinal, 1.0f-total); } } else { sm1->Shade(sc); } }
void M3Mat::PostShade(ShadeContext& sc, IReshadeFragment* pFrag, int& nextTexIndex, IllumParams* ip) { int i; IReshading* pReshading; TimeValue t = sc.CurTime(); Interval valid = FOREVER; pblockMat->GetValue(100,t,i,FOREVER); Mtl *sm1 = mTex[100]; float total(0.0f); ShadeOutput sDatabase[100]; float u[100]; ShadeOutput sFinal; // handle no base mat if(!sm1) { sc.ResetOutput(); sc.out.c = black; sc.out.t = black; return; } if(i==0 || (i==1 && inRender) ) { for( i=0; i<100; i++) { pblockMat->GetValue(i,t,u[i],valid); u[i] /= 100.0f; if( mTex[i]!=NULL && u[i]!=0 && mapOn[i] ) { Mtl *comb = mTex[i]; pReshading = (IReshading*)(comb->GetInterface(IID_IReshading)); if( pReshading ) pReshading->PostShade(sc, pFrag, nextTexIndex, ip ); sDatabase[i] = sc.out; sc.ResetOutput(); total += u[i]; } } sc.ResetOutput(); pReshading = (IReshading*)(sm1->GetInterface(IID_IReshading)); if( pReshading ) pReshading->PostShade(sc, pFrag, nextTexIndex, ip ); sFinal.c = black; sFinal.t = black; sFinal.ior = 0.0f; for( i=0;i<100;i++) { if(mTex[i]!=NULL && u[i]!=0 && mapOn[i]) { sc.out.flags |= sDatabase[i].flags; if(total>1.0f){ sFinal.c += u[i]/total * sDatabase[i].c; sFinal.t += u[i]/total * sDatabase[i].t; sFinal.ior += u[i]/total * sDatabase[i].ior; } else{ sFinal.c += u[i] * sDatabase[i].c; sFinal.t += u[i] * sDatabase[i].t; sFinal.ior += u[i] * sDatabase[i].ior; } } } if(total) { sc.out.MixIn(sFinal, 1.0f-total); } } else { pReshading = (IReshading*)(sm1->GetInterface(IID_IReshading)); if( pReshading ) pReshading->PostShade(sc, pFrag, nextTexIndex, ip ); } }
// if this function changes, please also check SupportsReShading, PreShade and PostShade // end - ke/mjm - 03.16.00 - merge reshading code // [attilas|29.5.2000] if this function changes, please also check EvalColorStdChannel void CompositeMat::Shade(ShadeContext& sc) { Mtl *sm1 = NULL; int id =0; // float gamount; if (gbufID){ sc.SetGBufferID(gbufID); id = gbufID; } Interval iv; int first = 1; ShadeOutput out1; int nEles = sc.NRenderElements(); for (int i = 0; i < MAX_NUM_MTLS; i++){ BOOL enabled; float amount; if (i==0) enabled = 1; else pblock2->GetValue(compmat_map_on,sc.CurTime(),enabled,iv,i-1); if (enabled){ pblock2->GetValue(compmat_mtls,sc.CurTime(),sm1,iv,i); if (sm1 != NULL){ if (i==0) amount = 100.f; else pblock2->GetValue(compmat_amount,sc.CurTime(),amount,iv,i-1); amount = amount*0.01f; int type; if (i==0) type = 2; else pblock2->GetValue(compmat_type,sc.CurTime(),type,iv,i-1); if (first ==1){ // first material first = 0; sm1->Shade(sc); // sc.out already reset for first out1 = sc.out; if (type == 0){ out1.t.r += 1.0f-amount; out1.t.g += 1.0f-amount; out1.t.b += 1.0f-amount; out1.c *= amount; out1.t.ClampMinMax(); // render elements for( int i = 0; i < nEles; ++i ) out1.elementVals[i] *= amount; } out1.ior *= amount; // gamount = 1.0f-(out1.t.r + out1.t.g + out1.t.b)/3.0f; } else { // not first material // pblock2->GetValue(compmat_mtls,sc.CurTime(),sm2,iv,i); // sc.out.ior = s*a.ior + f*ior; // if (f<=0.5f) gbufId = a.gbufId; sc.ResetOutput(); sm1->Shade(sc); ShadeOutput out2 = sc.out; if (type == 0){ // additive out2.t.r += 1.0f-amount; out2.t.g += 1.0f-amount; out2.t.b += 1.0f-amount; out2.c *= amount; out2.t.ClampMinMax(); out2.ior *= amount; float f1 = 1.0f-(out1.t.r + out1.t.g + out1.t.b)/3.0f; float f2 = 1.0f-(out2.t.r + out2.t.g + out2.t.b)/3.0f; out1.c = out1.c *(1.0f-f2) + out2.c * (f1); out1.t = out1.t- (1.0f-out2.t); out1.ior = out1.ior + out2.ior; // if (f2 > gamount) { // gamount = f2; // out1.gbufId = out1.gbufId;//?? ke 6.13.00 // } out1.t.ClampMinMax(); // render elements for( int i = 0; i < nEles; ++i ) out1.elementVals[i] = out1.elementVals[i] * (1.0f-f2) + out2.elementVals[i] * f1; } else if (type == 1) { // subtractive //NB: as of 6.13.00 this is SAME as additive case out2.t.r += 1.0f-amount; out2.t.g += 1.0f-amount; out2.t.b += 1.0f-amount; out2.c *= amount; out2.t.ClampMinMax(); out2.ior *= amount; float f1 = 1.0f-(out1.t.r + out1.t.g + out1.t.b)/3.0f; float f2 = 1.0f-(out2.t.r + out2.t.g + out2.t.b)/3.0f; out1.c = out1.c *(1.0f-f2) - out2.c * f1; out1.t = out1.t- (1.0f-out2.t); out1.ior = out1.ior + out2.ior; // if (f2 > gamount){ // gamount = f2; // out1.gbufId = out1.gbufId; //?? ke 6.13.00 // } out1.t.ClampMinMax(); // render elements for( int i = 0; i < nEles; ++i ) out1.elementVals[i] = out1.elementVals[i] * (1.0f-f2) + out2.elementVals[i] * f1; } else { //mix // mixIn handles render elements out1.MixIn(out2,1.0f-amount); } }// end, not first mtl } // end, sm1 not null }// end, enabled } // end, for each material sc.out = out1; }
void CompositeMat::PostShade(ShadeContext& sc, IReshadeFragment* pFrag, int& nextTexIndex, IllumParams* ) { int i(0), type(2); Mtl* submtl = NULL; BOOL enabled(TRUE); ShadeOutput out1, out2; float amount(100.0f); // , gamount(0.0f); char texLengths[12]; int* pI = (int*)&texLengths[0]; pI[0] = pFrag->GetIntChannel(nextTexIndex++); pI[1] = pFrag->GetIntChannel(nextTexIndex++); pI[2] = pFrag->GetIntChannel(nextTexIndex++); // postshade any submaterials for ( i=0; i<MAX_NUM_MTLS; i++) { if( i>0 ) pblock2->GetValue(compmat_map_on, sc.CurTime(), enabled, FOREVER, i-1); else enabled = TRUE; pblock2->GetValue(compmat_mtls, sc.CurTime(), submtl, FOREVER, i); if ( enabled && submtl ){ if( i > 0 ){ pblock2->GetValue(compmat_amount, sc.CurTime(), amount, FOREVER, i-1); pblock2->GetValue(compmat_type, sc.CurTime(), type, FOREVER, i-1); } amount = amount * 0.01f; sc.ResetOutput(); IReshading* pReshading = (IReshading*)(submtl->GetInterface(IID_IReshading)); if( pReshading ) pReshading->PostShade(sc, pFrag, nextTexIndex); // first check for base material if( i > 0 ) { // not base material, composite the next material out2 = sc.out; if (type == 0) // additive { out2.t.r += 1.0f-amount; out2.t.g += 1.0f-amount; out2.t.b += 1.0f-amount; out2.c *= amount; out2.t.ClampMinMax(); out2.ior *= amount; float f1 = 1.0f - (out1.t.r + out1.t.g + out1.t.b) / 3.0f; float f2 = 1.0f - (out2.t.r + out2.t.g + out2.t.b) / 3.0f; out1.c = out1.c * (1.0f-f2) + out2.c * f1; out1.t = out1.t - (1.0f-out2.t); out1.ior = out1.ior + out2.ior; out1.t.ClampMinMax(); } else if (type == 1) // subtractive { out2.t.r += 1.0f-amount; out2.t.g += 1.0f-amount; out2.t.b += 1.0f-amount; out2.c *= amount; out2.t.ClampMinMax(); out2.ior *= amount; float f1 = 1.0f - (out1.t.r + out1.t.g + out1.t.b) / 3.0f; float f2 = 1.0f - (out2.t.r + out2.t.g + out2.t.b) / 3.0f; out1.c = out1.c * (1.0f-f2) - out2.c * f1; out1.t = out1.t - (1.0f-out2.t); out1.ior = out1.ior + out2.ior; out1.t.ClampMinMax(); } else //mix { out1.MixIn(out2, 1.0f-amount); } }// end, not base mtl else { // base material. out1 = sc.out; out1.ior *= amount; } }// end, if has submtl & enabled else { nextTexIndex += texLengths[i]; } }// end, for each mtl sc.out = out1; }
//???????????????????????????????????????????????????????????????????????? // Evaluates the material on a single texmap channel. // bool CompositeMat::EvalMonoStdChannel ( ShadeContext& sc, // describes context of evaluation int stdID, // must be ID_AM, ect float& outVal // output var ) { Mtl *sm1 = NULL; int id =0; Interval iv; int first = 1; float val1 = 0.0f; for (int i = 0; i < MAX_NUM_MTLS; i++) { BOOL enabled; float amount; // The first one is always enabled if ( i == 0 ) enabled = 1; else pblock2->GetValue( compmat_map_on, sc.CurTime(), enabled, iv, i-1 ); if ( enabled ) { pblock2->GetValue( compmat_mtls, sc.CurTime(), sm1, iv, i ); if ( sm1 != NULL ) { // All of the first on is always composited if ( i == 0 ) amount = 100.f; else pblock2->GetValue( compmat_amount, sc.CurTime(), amount, iv, i-1 ); amount = amount*0.01f; int type; // The first one is mixed in if ( i == 0 ) type = 2; else pblock2->GetValue( compmat_type, sc.CurTime(), type, iv, i-1 ); // [attilas|29.5.2000] I don't understand why the i == 0 test // is not used to ensure special treatment for the base material if ( first == 1 ) { first = 0; if ( !sm1->EvalMonoStdChannel(sc, stdID, outVal) ) return false; val1 = outVal; // [attilas|29.5.2000] Why is this needed? if (type == 0) val1 *= amount; } else { outVal = 0.0f; if ( !sm1->EvalMonoStdChannel(sc, stdID, outVal) ) return false; float val2 = outVal; if ( type == 0 ) // additive { val2 *= amount; val1 += val2; } else if ( type == 1 ) // subtractive { val2 *= amount; val1 -= val2; } else //mix { // ShadeOutput::MixIn prototype and teh way it's called in CMtl::Shade // s.out = (1-f)*a + f*s.out; // void ShadeOutput::MixIn(ShadeOutput &a, float f) // out1.MixIn(out2,1.0f-amount); val1 = amount*val2 + (1.0f - amount)*val1; } } } } } outVal = val1; return true; }
void plParticleMtl::ShadeWithBackground(ShadeContext &sc, Color background) { #if 1 TimeValue t = sc.CurTime(); Color color(0, 0, 0); float alpha = 0.0; // Evaluate Base layer Texmap *map = fBasicPB->GetTexmap(kTexmap); if (map && map->ClassID() == LAYER_TEX_CLASS_ID) { plLayerTex *layer = (plLayerTex*)map; AColor evalColor = layer->EvalColor(sc); color = evalColor; alpha = evalColor.a; } #if 1 AColor black; black.Black(); AColor white; white.White(); SIllumParams ip; if( fBasicPB->GetInt( kNormal ) == kEmissive ) { // Emissive objects don't get shaded ip.diffIllum = fBasicPB->GetColor(kColorAmb, t) * color; ip.diffIllum.ClampMinMax(); ip.specIllum = black; } else { // // Shading setup // // Setup the parameters for the shader ip.amb = black; ip.diff = fBasicPB->GetColor(kColor, t) * color; ip.spec = white; ip.diffIllum = black; ip.specIllum = black; ip.N = sc.Normal(); ip.V = sc.V(); // // Specularity // ip.sh_str = 0; ip.ph_exp = 0; ip.shine = 0; ip.softThresh = 0; // Do the shading Shader *myShader = GetShader(SHADER_BLINN); myShader->Illum(sc, ip); ip.diffIllum.ClampMinMax(); ip.specIllum.ClampMinMax(); ip.diffIllum = ip.amb * sc.ambientLight + ip.diff * ip.diffIllum; } // AColor returnColor = AColor(opac * ip.diffIllum + ip.specIllum, opac) #endif // Get opacity and combine with alpha float opac = float(fBasicPB->GetInt(kOpacity, t)) / 100.0f; //float opac = 1.0f; alpha *= opac; // MAX will do the additive/alpha/no blending for us based on what Requirements() // we tell it. However, since MAX's formula is bgnd*sc.out.t + sc.out.c, // we have to multiply our output color by the alpha. // If we ever need a more complicated blending function, you can request the // background color via Requirements() (otherwise it's just black) and then do // the blending yourself; however, if the transparency isn't set, the shadows // will be opaque, so be careful. Color outC = ip.diffIllum + ip.specIllum; sc.out.c = ( outC * alpha ); sc.out.t = Color( 1.f - alpha, 1.f - alpha, 1.f - alpha ); #endif }
//???????????????????????????????????????????????????????????????????????? // Evaluates the material on a single texmap channel. // // Note: Channels are added, subtracted or mixed without taking into // account the opacity of the materials // bool CompositeMat::EvalColorStdChannel ( ShadeContext& sc, // describes context of evaluation int stdID, // must be ID_AM, ect Color& outClr // output var ) { Mtl *sm1 = NULL; int id =0; Interval iv; int first = 1; Color c1; Color t1; // transparency color (to mimic the logic of Shade()) c1.Black(); t1.Black(); for (int i = 0; i < MAX_NUM_MTLS; i++){ BOOL enabled; float amount; // The first one is always enabled if ( i == 0 ) enabled = 1; else pblock2->GetValue( compmat_map_on, sc.CurTime(), enabled, iv, i-1 ); if ( enabled ){ pblock2->GetValue( compmat_mtls, sc.CurTime(), sm1, iv, i ); if ( sm1 != NULL ) { // All of the first on is always composited if ( i == 0 ) amount = 100.f; else pblock2->GetValue( compmat_amount, sc.CurTime(), amount, iv, i-1 ); amount *= 0.01f; int type; // The first one is mixed in if ( i == 0 ) type = 2; else pblock2->GetValue( compmat_type, sc.CurTime(), type, iv, i-1 ); // [attilas|29.5.2000] I don't understand why the i == 0 test // is not used to ensure special treatment for the base material if ( first == 1 ){ first = 0; if ( !sm1->EvalColorStdChannel(sc, stdID, c1) ) return false; if ( !sm1->EvalColorStdChannel(sc, ID_FI, t1) ) t1.Black(); // [attilas|29.5.2000] Why is this needed? if (type == 0){ t1.r += 1.0f - amount; t1.g += 1.0f - amount; t1.b += 1.0f - amount; c1 *= amount; t1.ClampMinMax(); } } else{ Color c2(0.0f, 0.0f, 0.0f), t2(0.0f, 0.0f, 0.0f); if ( !sm1->EvalColorStdChannel(sc, stdID, c2) ) return false; if ( !sm1->EvalColorStdChannel(sc, ID_FI, t2) ) t2.Black(); if ( type == 0 ){ // additive c2 *= 1.0f - t2; t2.r += 1.0f - amount; t2.g += 1.0f - amount; t2.b += 1.0f - amount; c2 *= amount; t2.ClampMinMax(); float f1 = 1.0f - (t1.r + t1.g + t1.b) / 3.0f; float f2 = 1.0f - (t2.r + t2.g + t2.b) / 3.0f; c1 = c1 * (1.0f-f2) + c2 * f1; t1 = t1 - (1.0f-t2); t1.ClampMinMax(); } else if ( type == 1 ){ // subtractive c2 *= 1.0f - t2; t2.r += 1.0f - amount; t2.g += 1.0f - amount; t2.b += 1.0f - amount; c2 *= amount; t2.ClampMinMax(); float f1 = 1.0f - (t1.r + t1.g + t1.b) / 3.0f; float f2 = 1.0f - (t2.r + t2.g + t2.b) / 3.0f; c1 = c1 * (1.0f-f2) - c2 * f1; t1 = t1 - (1.0f-t2); t1.ClampMinMax(); } else{ //mix // copies the behaviour of ShadeOutput::MixIn() float f = 1.0f - amount; if(f <= 0.0f) c1 = c2; else if(f < 1.0f){ float s = 1.0f - f; c1 = s*c2 + f*c1; t1 = s*t2 + f*t1; } } } } } } outClr = stdID == ID_FI ? t1 : c1; return true; }
float CompositeMat::EvalDisplacement(ShadeContext& sc) { int first = 1; float disp = 0.0f; Interval iv; Mtl *sm1 = NULL; for (int i = 0; i < MAX_NUM_MTLS; i++) { BOOL enabled; float amount; if (i==0) enabled = 1; else pblock2->GetValue(compmat_map_on,sc.CurTime(),enabled,iv,i-1); if (enabled) { pblock2->GetValue(compmat_mtls,sc.CurTime(),sm1,iv,i); if (sm1 != NULL) { if (i==0) amount = 100.f; else pblock2->GetValue(compmat_amount,sc.CurTime(),amount,iv,i-1); amount = amount*0.01f; int type; if (i==0) type = 0; else pblock2->GetValue(compmat_type,sc.CurTime(),type,iv,i-1); if (first ==1) { float d2 = sm1->EvalDisplacement(sc); disp = d2; } else { if (type == 0) // addative { float d2 = sm1->EvalDisplacement(sc); disp += d2; } else if (type == 1) // subtractive { float d2 = sm1->EvalDisplacement(sc); disp -= d2; } else //mix { float d2 = sm1->EvalDisplacement(sc); disp = (1.0f-amount)*disp + amount*d2; } } } } } /* Mtl *sm1 = mapOn[0]?sub1:NULL; Mtl *sm2 = mapOn[1]?sub2:NULL; Texmap *mp = mapOn[2]?map:NULL; float mix = mp ? mp->EvalMono(sc) : u; if (mp && useCurve) mix = mixCurve(mix); if (mix<0.0001f) { return (sm1)?sm1->EvalDisplacement(sc):0.0f; } else if (mix>0.9999f) { return (sm2)?sm2->EvalDisplacement(sc):0.0f; } else { if (sm1) { float d = sm1->EvalDisplacement(sc); if(sm2) { float d2 = sm2->EvalDisplacement(sc); d = (1.0f-mix)*d + mix*d2; } return d; } else { if (sm2) return sm2->EvalDisplacement(sc); } } */ return disp; }
AColor CrackVisualizer::EvalColor(ShadeContext& sc) { if (gbufID) sc.SetGBufferID(gbufID); float dist = pblock->GetFloat( pb_spin, sc.CurTime() )*0.1f; float distSquared = dist*dist; float minDist = 9999999999999999999.0f; // we must be sure that minDist is a value greater than dist // ... //AColor edgeColor = AColor (1.0f,0.0f,0.0f,1.0f); AColor edgeColor = pblock->GetAColor( pb_color, sc.CurTime() ); edgeColor.a = 1.0f; int nodeID = sc.NodeID(); if( !sc.globContext ) return AColor (0.0f,0.0f,0.0f,0.0f); RenderInstance* inst = sc.globContext->GetRenderInstance(nodeID); if( (inst==NULL) || (inst->mesh==NULL) || NULL == inst->mesh->faces || inst->mesh->getNumFaces() <= sc.FaceNumber() ) { return AColor (0.0f,0.0f,0.0f,0.0f); } // if an entry for the current nodeID doesnt exist if( adjBoundaryEdges.find( nodeID ) == adjBoundaryEdges.end() ) // build the table findAdjBoundaryEdges( nodeID, inst ); int faceIndex = sc.FaceNumber(); Face& f = inst->mesh->faces[faceIndex]; // compute Position of p Point3 bary = sc.BarycentricCoords(); Point3 p = bary[0]*inst->mesh->getVert(f.getVert(0)) + bary[1]*inst->mesh->getVert(f.getVert(1)) + bary[2]*inst->mesh->getVert(f.getVert(2)); // p is not close to any boundary edge // check if p close to any vertex which neighbours a boundaryedge from another triangle for( int i=0; i<3; ++i ) { // if wireframe if(0) { DWORD edgeIdx = f.GetEdgeIndex( f.getVert(i), f.getVert((i+1)%3) ); // get vertex positions Point3 v0 = inst->mesh->getVert(f.getVert(i)); Point3 v1 = inst->mesh->getVert(f.getVert(i+1)%3); // compute distance p <-> edge v0, v1 //float edgeDistance = distancePointLine( p, v0, v1 ); float edgeDistance = Dist3DPtToLine( &p, &v0, &v1 ); edgeDistance = edgeDistance*edgeDistance; // if distance of p is closer then 1/10 of the distance of v2 to that edge if( edgeDistance < minDist ) minDist = edgeDistance; } // if there is any incident boundary edge to the current vertex, than we know that it is a // boundary vertex if( !adjBoundaryEdges[nodeID][f.getVert(i)].empty() ) { // current vertex is a boundary vertex // comute distance of p to that vertex float vertexDistance = (inst->mesh->getVert( f.getVert(i) ) - p).LengthSquared(); if( vertexDistance < minDist ) minDist = vertexDistance; // check all boundary edges which are adjacent to the vertex and may // come from other faces for( int j = 0; j<adjBoundaryEdges[nodeID][f.getVert(i)].size(); ++j ) { // compute distance to that edge Point3 v0 = inst->mesh->getVert( adjBoundaryEdges[nodeID][f.getVert(i)][j].first ); Point3 v1 = inst->mesh->getVert( adjBoundaryEdges[nodeID][f.getVert(i)][j].second ); // compute dotproduct Point3 vec = p - v0; Point3 direction = Normalize( v1 - v0 ); float maxLength = Length( v1 - v0 ); float dp = DotProd( vec, direction ); if( (dp<0.0f)||(dp>maxLength) ) continue; float edgeDistance = LengthSquared( vec - dp*direction ); if( edgeDistance < minDist ) minDist = edgeDistance; } } } if( minDist < distSquared ) return edgeColor; return AColor (0.0f,0.0f,0.0f,0.0f);}
Point3 BerconGradient::EvalNormalPerturb(ShadeContext& sc) { // Returned vector Point3 res(0.0f,0.0f,0.0f); if (p_type != 0) return res; // Bump only works for UVW, otherwise we don't really know the derivative of the gradient // Use cache if (sc.GetCache(this,res)) return res; if (gbufID) sc.SetGBufferID(gbufID); // UVW Point3 p; Point3 M[3]; if (!berconXYZ.get(sc, p, M)) return res; // Distortion float dist = 0.f; if (p_disOn && p_distex) dist = (1.f - p_distex->EvalMono(sc) * 2.f) * p_disStr; // Origin float d = getGradientValueUVW(p) + dist; if (!limitRange(d)) return res; if (p_curveOn) d = curve->GetControlCurve(0)->GetValue(sc.CurTime(), d); d = Intens(gradient->getColor(d, sc)); // Deltas Point3 normal; /*if (berconXYZ.req()) { Point3 MP[3]; MP[0] = Point3(DELTA,0.f,0.f); MP[1] = Point3(0.f,DELTA,0.f); MP[2] = Point3(0.f,DELTA,0.f); for (int i=0; i<3; i++) { normal[i] = getGradientValueUVW(p+DELTA*M[i]) + dist; if (!limitRange(normal[i])) return res; if (p_curveOn) normal[i] = curve->GetControlCurve(0)->GetValue(sc.CurTime(), normal[i]); normal[i] = (normal[i] - d) / DELTA; } normal = M[0]*normal.x + M[1]*normal.y + M[2]*normal.z; } else {*/ Point3 MP[3]; MP[0] = Point3(DELTA,0.f,0.f); MP[1] = Point3(0.f,DELTA,0.f); MP[2] = Point3(0.f,DELTA,0.f); for (int i=0; i<3; i++) { normal[i] = getGradientValueUVW(p+MP[i]) + dist; if (!limitRange(normal[i])) return res; if (p_curveOn) normal[i] = curve->GetControlCurve(0)->GetValue(sc.CurTime(), normal[i]); normal[i] = Intens(gradient->getColor(normal[i], sc)); normal[i] = (normal[i] - d) / DELTA; } normal = M[0]*normal.x + M[1]*normal.y + M[2]*normal.z; //normal = sc.VectorFromNoScale(normal, REF_OBJECT); //} // Compute maps and proper bump vector res = gradient->getBump(p_reverse?1.f-d:d, p_reverse?normal:-normal, sc); // Output res = texout->Filter(res); // Shading ready, return results sc.PutCache(this,res); return res; }
Point3 BerconWood::EvalNormalPerturb(ShadeContext& sc) { Point3 p,dpdx,dpdy; if (!sc.doMaps) return Point3(0,0,0); if (gbufID) sc.SetGBufferID(gbufID); // Evaluate parameters WoodParam wp = EvalParameters(sc); float grainA = mapOn[19]&&subtex[19]?subtex[19]->EvalMono(sc)*grainAmount:grainAmount; float grainF = mapOn[20]&&subtex[20]?subtex[20]->EvalMono(sc)*grainFreq:grainFreq; // UVW, Distortion and size Point3 M[3]; berconXYZ.get(sc, p, dpdx, dpdy, M); if (useDistortion) applyDistortion(sc,p); float wSize = mapOn[5]&&subtex[5]?subtex[5]->EvalMono(sc)*woodSize:woodSize; p /= wSize; dpdx /= (wSize / 2.f); dpdy /= (wSize / 2.f); // Vectors bool grainON = (grainAmount > .001f); Point3 np, nG, gP; float d = sc.filterMaps? Noise::wood(p, dpdx, dpdy, gP, wp) : Noise::wood(p, gP, wp); if (useCurve) d = curve->GetControlCurve(0)->GetValue(sc.CurTime(), d); float g = grainON ? Fractal::grain(gP, grainA, grainF): 0.f; for (int i=0; i<3; i++) { np[i] = sc.filterMaps? Noise::wood(p + DELTA * M[i], dpdx, dpdy, gP, wp) : Noise::wood(p + DELTA * M[i], gP, wp); if (useCurve) np[i] = curve->GetControlCurve(0)->GetValue(sc.CurTime(), np[i]); np[i] = (np[i] - d) / DELTA; if (grainON) nG[i] = Fractal::grain(gP, grainA, grainF); // gP is updated by wood() } np = -sc.VectorFromNoScale(np, REF_OBJECT); nG = -sc.VectorFromNoScale(nG, REF_OBJECT); // Eval sub maps float f1, f2, f3; Point3 v1, v2, v3; bool maps = false; if (subtex[0]) { f1 = subtex[0]->EvalMono(sc); v1 = subtex[0]->EvalNormalPerturb(sc); maps = true; } else { f1 = Intens(col[0]); v1 = Point3(0.f, 0.f, 0.f); } if (subtex[1]) { f2 = subtex[1]->EvalMono(sc); v2 = subtex[1]->EvalNormalPerturb(sc); maps = true; } else { f2 = Intens(col[1]); v2 = Point3(0.f, 0.f, 0.f); } if (subtex[2]) { f3 = subtex[2]->EvalMono(sc); v3 = subtex[2]->EvalNormalPerturb(sc); maps = true; } else { f3 = Intens(col[2]); v3 = Point3(0.f, 0.f, 0.f); } // Calculate vector if (maps) { np = (f2-f1)*np + d*v2 + (1.f-d)*v1; if (grainON) { float val = d*f1 + (1.f-d)*f2; np = (f3-val)*nG + g*v3 + (1.f-g)*np; } } else { np *= f2 - f1; if (grainON) { float val = d*f1 + (1.f-d)*f2; np = (f3-val)*nG + (1.f-g)*np; } } return texout->Filter(np); // Does this filter actually do something? }
// Calculates 0..1 value which is given to the gradient float BerconGradient::getGradientValue(ShadeContext& sc) { switch (p_type) { case 0: { // UVW break; // Handled in main evaluation } case 1: { // Normal switch (p_normalFunction) { case 0: { // Perpendicular / Parallel return fabs(getGradientValueNormal(sc)); } case 1: { // Towards / Away return (getGradientValueNormal(sc) + 1.f) / 2.f; } case 2: { // Fresnel // NOTE: Should this get IOR from sc.GetIOR()? // I think not since its just a map, not material. // You get more predictable behaviour with constant 1.f. static float n1 = 1.0f; float cti = fabs(getGradientValueNormal(sc)); float stt = (n1 / p_ior) * sqrt(1 - cti * cti); float ctt = sqrt(1 - stt * stt); float rs = (p_ior * ctt - n1 * cti ) / (p_ior * ctt + n1 * cti); rs = rs * rs; float rp = (n1 * ctt - p_ior * cti ) / (n1 * ctt + p_ior * cti); rp = rp * rp; return 1.f - 0.5f * (rs + rp); } } } case 2: { // Distance return getGradientValueDist(sc); } case 3: { // Light return Intens(sc.DiffuseIllum()); } case 4: { // Map return p_maptex?p_maptex->EvalMono(sc):0.f; // TODO: Evaluate submaps color, bump is tougher DELTA shift with BerconSC? } case 5: { // Random seedRandomGen(sc); return (float)sfrand(); break; } case 6: { // Particle age Object *ob = sc.GetEvalObject(); if (ob && ob->IsParticleSystem()) { ParticleObject *obj = (ParticleObject*)ob; TimeValue t = sc.CurTime(); TimeValue age = obj->ParticleAge(t,sc.mtlNum); TimeValue life = obj->ParticleLife(t,sc.mtlNum); if (age>=0 && life>=0) return float(age)/float(life); } break; } case 7: { // Particle speed Object *ob = sc.GetEvalObject(); if (ob && ob->IsParticleSystem()) { ParticleObject *obj = (ParticleObject*)ob; /*IChkMtlAPI* chkMtlAPI = static_cast<IChkMtlAPI*>(obj->GetInterface(I_NEWMTLINTERFACE)); if ((chkMtlAPI&&chkMtlAPI->SupportsParticleIDbyFace())) return (Length(obj->ParticleVelocity(sc.CurTime(),chkMtlAPI->GetParticleFromFace(sc.FaceNumber()))) - p_rangeMin) / (p_rangeMax - p_rangeMin); else*/ return Length(obj->ParticleVelocity(sc.CurTime(),sc.mtlNum)); } break; } case 8: { // Particle size Object *ob = sc.GetEvalObject(); if (ob && ob->IsParticleSystem()) { ParticleObject *obj = (ParticleObject*)ob; return obj->ParticleSize(sc.CurTime(),sc.mtlNum); } break; } default: break; } return 0.f; }