コード例 #1
0
ファイル: heightfield.cpp プロジェクト: sc1991327/pbrt-v2-sc
void Heightfield::Refine(vector<Reference<Shape> > &refined) const {
    int ntris = 2*(nx-1)*(ny-1);
    refined.reserve(ntris);
    int *verts = new int[3*ntris];
    Point *P = new Point[nx*ny];
    float *uvs = new float[2*nx*ny];
    int nverts = nx*ny;

    int x, y;
    // Compute heightfield vertex positions
    int pos = 0;
    for (y = 0; y < ny; ++y) {
        for (x = 0; x < nx; ++x) {
            P[pos].x = uvs[2*pos]   = (float)x / (float)(nx-1);
            P[pos].y = uvs[2*pos+1] = (float)y / (float)(ny-1);
            P[pos].z = z[pos];
            ++pos;
        }
    }

    // Fill in heightfield vertex offset array
    int *vp = verts;
    for (y = 0; y < ny-1; ++y) {
        for (x = 0; x < nx-1; ++x) {
#define VERT(x,y) ((x)+(y)*nx)
            *vp++ = VERT(x, y);
            *vp++ = VERT(x+1, y);
            *vp++ = VERT(x+1, y+1);
    
            *vp++ = VERT(x, y);
            *vp++ = VERT(x+1, y+1);
            *vp++ = VERT(x, y+1);
        }
#undef VERT
    }
    ParamSet paramSet;
    paramSet.AddInt("indices", verts, 3*ntris);
    paramSet.AddFloat("uv", uvs, 2 * nverts);
    paramSet.AddPoint("P", P, nverts);
    refined.push_back(CreateTriangleMeshShape(ObjectToWorld, WorldToObject, ReverseOrientation, paramSet));
    delete[] P;
    delete[] uvs;
    delete[] verts;
}
コード例 #2
0
ファイル: LuxGeometry.cpp プロジェクト: MassW/OpenMaya
void LuxRenderer::defineTriangleMesh(mtlu_MayaObject *obj, bool noObjectDef = false)
{
	MObject meshObject = obj->mobject;
	MStatus stat = MStatus::kSuccess;
	MFnMesh meshFn(meshObject, &stat);
	
	CHECK_MSTATUS(stat);
	MItMeshPolygon faceIt(meshObject, &stat);
	CHECK_MSTATUS(stat);

	MPointArray points;
	meshFn.getPoints(points);
    MFloatVectorArray normals;
    meshFn.getNormals( normals, MSpace::kWorld );
	MFloatArray uArray, vArray;
	meshFn.getUVs(uArray, vArray);

	logger.debug(MString("Translating mesh object ") + meshFn.name().asChar());
	MString meshFullName = obj->fullNiceName;


	MIntArray trianglesPerFace, triVertices;
	meshFn.getTriangles(trianglesPerFace, triVertices);
	int numTriangles = 0;
	for( size_t i = 0; i < trianglesPerFace.length(); i++)
		numTriangles += trianglesPerFace[i];

	// lux render does not have a per vertex per face normal definition, here we can use one normal and uv per vertex only
	// So I create the triangles with unique vertices, normals and uvs. Of course this way vertices etc. cannot be shared.
	int numPTFloats = numTriangles * 3 * 3;
	logger.debug(MString("Num Triangles: ") + numTriangles + " num tri floats " + numPTFloats);

	float *floatPointArray = new float[numPTFloats];
	float *floatNormalArray = new float[numPTFloats];
	float *floatUvArray = new float[numTriangles * 3 * 2];
	
	logger.debug(MString("Allocated ") + numPTFloats + " floats for point and normals");

	MIntArray triangelVtxIdListA;
	MFloatArray floatPointArrayA;

	MPointArray triPoints;
	MIntArray triVtxIds;
	MIntArray faceVtxIds;
	MIntArray faceNormalIds;
	
	int *triangelVtxIdList = new int[numTriangles * 3];

	for( uint sgId = 0; sgId < obj->shadingGroups.length(); sgId++)
	{
		MString slotName = MString("slot_") + sgId;
	}
	
	int triCount = 0;
	int vtxCount = 0;

	for(faceIt.reset(); !faceIt.isDone(); faceIt.next())
	{
		int faceId = faceIt.index();
		int numTris;
		faceIt.numTriangles(numTris);
		faceIt.getVertices(faceVtxIds);

		MIntArray faceUVIndices;

		faceNormalIds.clear();
		for( uint vtxId = 0; vtxId < faceVtxIds.length(); vtxId++)
		{
			faceNormalIds.append(faceIt.normalIndex(vtxId));
			int uvIndex;
			faceIt.getUVIndex(vtxId, uvIndex);
			faceUVIndices.append(uvIndex);
		}

		int perFaceShadingGroup = 0;
		if( obj->perFaceAssignments.length() > 0)
			perFaceShadingGroup = obj->perFaceAssignments[faceId];
		//logger.info(MString("Face ") + faceId + " will receive SG " +  perFaceShadingGroup);

		for( int triId = 0; triId < numTris; triId++)
		{
			int faceRelIds[3];
			faceIt.getTriangle(triId, triPoints, triVtxIds);

			for( uint triVtxId = 0; triVtxId < 3; triVtxId++)
			{
				for(uint faceVtxId = 0; faceVtxId < faceVtxIds.length(); faceVtxId++)
				{
					if( faceVtxIds[faceVtxId] == triVtxIds[triVtxId])
					{
						faceRelIds[triVtxId] = faceVtxId;
					}
				}
			}

			
			uint vtxId0 = faceVtxIds[faceRelIds[0]];
			uint vtxId1 = faceVtxIds[faceRelIds[1]];
			uint vtxId2 = faceVtxIds[faceRelIds[2]];
			uint normalId0 = faceNormalIds[faceRelIds[0]];
			uint normalId1 = faceNormalIds[faceRelIds[1]];
			uint normalId2 = faceNormalIds[faceRelIds[2]];
			uint uvId0 = faceUVIndices[faceRelIds[0]];
			uint uvId1 = faceUVIndices[faceRelIds[1]];
			uint uvId2 = faceUVIndices[faceRelIds[2]];
			
			floatPointArray[vtxCount * 3] = points[vtxId0].x;
			floatPointArray[vtxCount * 3 + 1] = points[vtxId0].y;
			floatPointArray[vtxCount * 3 + 2] = points[vtxId0].z;

			floatNormalArray[vtxCount * 3] = normals[normalId0].x;
			floatNormalArray[vtxCount * 3 + 1] = normals[normalId0].y;
			floatNormalArray[vtxCount * 3 + 2] = normals[normalId0].z;

			floatUvArray[vtxCount * 2] = uArray[uvId0];
			floatUvArray[vtxCount * 2 + 1] = vArray[uvId0];

			vtxCount++;

			floatPointArray[vtxCount * 3] = points[vtxId1].x;
			floatPointArray[vtxCount * 3 + 1] = points[vtxId1].y;
			floatPointArray[vtxCount * 3 + 2] = points[vtxId1].z;

			floatNormalArray[vtxCount * 3] = normals[normalId1].x;
			floatNormalArray[vtxCount * 3 + 1] = normals[normalId1].y;
			floatNormalArray[vtxCount * 3 + 2] = normals[normalId1].z;

			floatUvArray[vtxCount * 2] = uArray[uvId1];
			floatUvArray[vtxCount * 2 + 1] = vArray[uvId1];

			vtxCount++;

			floatPointArray[vtxCount * 3] = points[vtxId2].x;
			floatPointArray[vtxCount * 3 + 1] = points[vtxId2].y;
			floatPointArray[vtxCount * 3 + 2] = points[vtxId2].z;

			floatNormalArray[vtxCount * 3] = normals[normalId2].x;
			floatNormalArray[vtxCount * 3 + 1] = normals[normalId2].y;
			floatNormalArray[vtxCount * 3 + 2] = normals[normalId2].z;

			floatUvArray[vtxCount * 2] = uArray[uvId2];
			floatUvArray[vtxCount * 2 + 1] = vArray[uvId2];

			vtxCount++;
			
			//logger.debug(MString("Vertex count: ") + vtxCount + " maxId " + ((vtxCount - 1) * 3 + 2) + " ptArrayLen " + (numTriangles * 3 * 3));

			triangelVtxIdList[triCount * 3] = triCount * 3;
			triangelVtxIdList[triCount * 3 + 1] = triCount * 3 + 1;
			triangelVtxIdList[triCount * 3 + 2] = triCount * 3 + 2;

			triCount++;
		}		
	}

//generatetangents 	bool 	Generate tangent space using miktspace, useful if mesh has a normal map that was also baked using miktspace (such as blender or xnormal) 	false
//subdivscheme 	string 	Subdivision algorithm, options are "loop" and "microdisplacement" 	"loop"
//displacementmap 	string 	Name of the texture used for the displacement. Subdivscheme parameter must always be provided, as load-time displacement is handled by the loop-subdivision code. 	none - optional. (loop subdiv can be used without displacement, microdisplacement will not affect the mesh without a displacement map specified)
//dmscale 	float 	Scale of the displacement (for an LDR map, this is the maximum height of the displacement in meter) 	0.1
//dmoffset 	float 	Offset of the displacement. 	0
//dmnormalsmooth 	bool 	Smoothing of the normals of the subdivided faces. Only valid for loop subdivision. 	true
//dmnormalsplit 	bool 	Force the mesh to split along breaks in the normal. If a mesh has no normals (flat-shaded) it will rip open on all edges. Only valid for loop subdivision. 	false
//dmsharpboundary 	bool 	Try to preserve mesh boundaries during subdivision. Only valid for loop subdivision. 	false
//nsubdivlevels 	integer 	Number of subdivision levels. This is only recursive for loop subdivision, microdisplacement will need much larger values (such as 50). 	0

	bool generatetangents = false;
	getBool(MString("mtlu_mesh_generatetangents"), meshFn, generatetangents);
	int subdivscheme = 0;
	const char *subdAlgos[] = {"loop", "microdisplacement"};
	getInt(MString("mtlu_mesh_subAlgo"), meshFn, subdivscheme);
	const char *subdalgo =  subdAlgos[subdivscheme];
	float dmscale;
	getFloat(MString("mtlu_mesh_dmscale"), meshFn, dmscale);
	float dmoffset;
	getFloat(MString("mtlu_mesh_dmoffset"), meshFn, dmoffset);
	MString displacementmap;
	getString(MString("mtlu_mesh_displacementMap"), meshFn, displacementmap);
	const char *displacemap = displacementmap.asChar();
	bool dmnormalsmooth = true;
	getBool(MString("mtlu_mesh_dmnormalsmooth"), meshFn, dmnormalsmooth);
	bool dmnormalsplit = false;
	getBool(MString("mtlu_mesh_dmnormalsplit"), meshFn, dmnormalsplit);
	bool dmsharpboundary = false;
	getBool(MString("mtlu_mesh_dmsharpboundary"), meshFn, dmsharpboundary);
	int nsubdivlevels = 0;
	getInt(MString("mtlu_mesh_subdivlevel"), meshFn, nsubdivlevels);

	// a displacment map needs its own texture defintion
	MString displacementTextureName = "";
	if(displacementmap.length() > 0)
	{
		ParamSet dmParams = CreateParamSet();
		dmParams->AddString("filename", &displacemap);
		displacementTextureName = meshFn.name() + "_displacementMap";
		this->lux->texture(displacementTextureName.asChar(), "float", "imagemap", boost::get_pointer(dmParams));
	}

	ParamSet triParams = CreateParamSet();
	int numPointValues = numTriangles * 3;
	int numUvValues = numTriangles * 3 * 2;
	clock_t startTime = clock();
	logger.info(MString("Adding mesh values to params."));
	triParams->AddInt("indices", triangelVtxIdList, numTriangles * 3);
	triParams->AddPoint("P", floatPointArray, numPointValues);
	triParams->AddNormal("N", floatNormalArray, numPointValues);
	triParams->AddFloat("uv",  floatUvArray, numUvValues);
	if( nsubdivlevels > 0)
		triParams->AddInt("nsubdivlevels", &nsubdivlevels, 1);
	triParams->AddBool("generatetangents",  &generatetangents, 1);
	triParams->AddString("subdivscheme", &subdalgo , 1);
	if(displacementmap.length() > 0)
	{
		triParams->AddFloat("dmoffset",  &dmoffset, 1);
		triParams->AddFloat("dmscale",  &dmscale, 1);
		const char *dmft = displacementTextureName.asChar();
		triParams->AddString("displacementmap", &dmft);
	}
	triParams->AddBool("dmnormalsmooth",  &dmnormalsmooth, 1);
	triParams->AddBool("dmnormalsplit",  &dmnormalsplit, 1);
	triParams->AddBool("dmsharpboundary",  &dmsharpboundary, 1);


	clock_t pTime = clock();
	if(!noObjectDef)
		this->lux->objectBegin(meshFullName.asChar());
	this->lux->shape("trianglemesh", boost::get_pointer(triParams));
	if(!noObjectDef)
		this->lux->objectEnd();

	clock_t eTime = clock();
	logger.info(MString("Timing: Parameters: ") + ((pTime - startTime)/CLOCKS_PER_SEC) + " objTime " + ((eTime - pTime)/CLOCKS_PER_SEC) + " all " + ((eTime - startTime)/CLOCKS_PER_SEC));

	return;

}
コード例 #3
0
ファイル: exrtotiff.cpp プロジェクト: EiffelOberon/pbrt-v1
int main(int argc, char *argv[]) 
{
    float scale = 1.f, gamma = 2.2f;
    float bloomRadius = 0.f, bloomWeight = .2f;
    float bggray = -1.f;
    char *toneMap = NULL;
    ParamSet toneMapParams;

    int argNum = 1;
    while (argNum < argc && argv[argNum][0] == '-') {
#define ARG(name, var) \
      else if (!strcmp(argv[argNum], "-" name)) { \
	if (argNum+1 == argc) \
	  usage(); \
	var = atof(argv[argNum+1]); \
	++argNum; \
      }

	if (!strcmp(argv[argNum], "-tonemap")) {
	    if (argNum+1 == argc)
		usage();
	    toneMap = argv[argNum+1];
	    ++argNum;
	}
	else if (!strcmp(argv[argNum], "-param")) {
	    if (argNum+2 >= argc)
		usage();
	    float val = atof(argv[argNum+2]);
	    toneMapParams.AddFloat(argv[argNum+1], &val);
	    argNum += 2;
	}
	ARG("scale", scale)
	ARG("gamma", gamma)
	ARG("bg", bggray)
	ARG("bloomRadius", bloomRadius)
	ARG("bloomWeight", bloomWeight)
	else 
	    usage();
      ++argNum;

    }
    if (argNum + 2 > argc) usage();

    char *inFile = argv[argNum], *outFile = argv[argNum+1]; 	  
    float *rgba;
    int xRes, yRes;
    bool hasAlpha;
    pbrtInit();

    if (ReadEXR(inFile, rgba, xRes, yRes, hasAlpha)) {
	float *rgb = new float[xRes*yRes*3];
	for (int i = 0; i < xRes*yRes; ++i) {
	    for (int j = 0; j < 3; ++j) {
		rgb[3*i+j] = scale * rgba[4*i+j];
//CO		if (rgba[4*i+3] != 0.f)
//CO		    rgb[3*i+j] /= rgba[4*i+3];
		if (bggray > 0)
		    rgb[3*i+j] = rgba[4*i+3] * rgb[3*i+j] + (1.f - rgba[4*i+3]) * bggray;
	    }
	    if (bggray > 0)
		rgba[4*i+3] = 1.f;
	}

	ApplyImagingPipeline(rgb, xRes, yRes, NULL, bloomRadius,
			     bloomWeight, toneMap, &toneMapParams,
			     gamma, 0.f, 255);

	for (int i = 0; i < xRes*yRes; ++i) {
	    for (int j = 0; j < 3; ++j) {
		rgba[4*i+j] = rgb[3*i+j];
		if (rgba[4*i+3] != 0.f)
		    rgba[4*i+j] /= rgba[4*i+3];
	    }
	    rgba[4*i+3] *= 255.f;
	}

	WriteTIFF(outFile, rgba, xRes, yRes, hasAlpha);
    }
    pbrtCleanup();
    return 0;
}
コード例 #4
0
ファイル: LuxCamera.cpp プロジェクト: haggi/OpenMaya
void LuxRenderer::defineCamera()
{
	std::shared_ptr<MayaScene> mayaScene = MayaTo::getWorldPtr()->worldScenePtr;
	std::shared_ptr<RenderGlobals> renderGlobals = MayaTo::getWorldPtr()->worldRenderGlobalsPtr;
	std::shared_ptr<MayaObject> mo = mayaScene->camList[0];

	MMatrix cm = mo->dagPath.inclusiveMatrix();
	MFnCamera camFn(mo->mobject);
	this->transformCamera(mo.get(), renderGlobals->doMb && (mo->transformMatrices.size() > 1));

	// lux uses the fov of the smallest image edge
	double hFov = RadToDeg(camFn.horizontalFieldOfView());
	double vFov = RadToDeg(camFn.verticalFieldOfView());
	float fov = hFov;
	int width, height;
	renderGlobals->getWidthHeight(width, height);
	if( height < width)
		fov = vFov;

	// focaldist
	float focusDist = (float)camFn.focusDistance() * renderGlobals->sceneScale;
	float focalLen = (float)camFn.focalLength();
	float fStop = (float)camFn.fStop();

	bool useDOF = false;
	getBool(MString("depthOfField"), camFn, useDOF);
	useDOF = useDOF && renderGlobals->doDof;

	// hither, yon
	float hither = (float)camFn.nearClippingPlane();
	float yon = (float)camFn.farClippingPlane();
	
	// render region
	int left, bottom, right, top;
	renderGlobals->getRenderRegion(left, bottom, right, top);
	int ybot = (height - bottom);
	int ytop = (height - top);
	int ymin = ybot <  ytop ? ybot :  ytop;
	int ymax = ybot >  ytop ? ybot :  ytop;
	float lensradius = (focalLen / 1000.0) / ( 2.0 * fStop );

	int blades = 0;
	getInt(MString("mtlu_diaphragm_blades"), camFn, blades);
	bool autofocus = false;
	getBool(MString("mtlu_autofocus"), camFn, autofocus);
	int dist = 0;
	getInt(MString("mtlu_distribution"), camFn, dist);
	logger.debug(MString("Lens distribution: ") + dist + " " + LensDistributions[dist]);
	float power = 1.0f;
	getFloat(MString("mtlu_power"), camFn, power);
	const char *lensdistribution =  LensDistributions[dist];

	float shutterOpen = 0.0f;
	float shutterClose = renderGlobals->mbLength;

	ParamSet cp = CreateParamSet();
	cp->AddFloat("fov", &fov);
	cp->AddFloat("focaldistance", &focusDist);
	cp->AddFloat("hither", &hither);
	cp->AddFloat("yon", &yon);
	cp->AddFloat("shutteropen", &shutterOpen);
	cp->AddFloat("shutterclose", &shutterClose);
	if( blades > 0)
		cp->AddInt("blades", &blades);
	if( useDOF )
	{
		cp->AddFloat("lensradius", &lensradius);
		cp->AddBool("autofocus", &autofocus);
		cp->AddString("distribution", &lensdistribution);
		cp->AddFloat("power", &power);
	}
	lux->camera("perspective", boost::get_pointer(cp));
	
	if( renderGlobals->exportSceneFile)
		this->luxFile << "Camera \"perspective\" "<< "\"float fov\" [" << fov << "]"  <<"\n";

}
コード例 #5
0
ファイル: nurbs.cpp プロジェクト: acpa2691/cs348b
void NURBS::Refine(vector<Reference<Shape> > &refined) const {
	// Compute NURBS dicing rates
	int diceu = 30, dicev = 30;
	float *ueval = new float[diceu];
	float *veval = new float[dicev];
	Point *evalPs = new Point[diceu*dicev];
	Normal *evalNs = new Normal[diceu*dicev];
	int i;
	for (i = 0; i < diceu; ++i)
		ueval[i] = Lerp((float)i / (float)(diceu-1), umin, umax);
	for (i = 0; i < dicev; ++i)
		veval[i] = Lerp((float)i / (float)(dicev-1), vmin, vmax);
	// Evaluate NURBS over grid of points
	memset(evalPs, 0, diceu*dicev*sizeof(Point));
	memset(evalNs, 0, diceu*dicev*sizeof(Point));
	float *uvs = new float[2*diceu*dicev];
	// Turn NURBS into triangles
	Homogeneous3 *Pw = (Homogeneous3 *)P;
	if (!isHomogeneous) {
		Pw = (Homogeneous3 *)alloca(nu*nv*sizeof(Homogeneous3));
		for (int i = 0; i < nu*nv; ++i) {
			Pw[i].x = P[3*i];
			Pw[i].y = P[3*i+1];
			Pw[i].z = P[3*i+2];
			Pw[i].w = 1.;
		}
	}
	for (int v = 0; v < dicev; ++v) {
		for (int u = 0; u < diceu; ++u) {
			uvs[2*(v*diceu+u)]   = ueval[u];
			uvs[2*(v*diceu+u)+1] = veval[v];
	
			Vector dPdu, dPdv;
			Point pt = NURBSEvaluateSurface(uorder, uknot, nu, ueval[u],
				vorder, vknot, nv, veval[v], Pw, &dPdu, &dPdv);
			evalPs[v*diceu + u].x = pt.x;
			evalPs[v*diceu + u].y = pt.y;
			evalPs[v*diceu + u].z = pt.z;
			evalNs[v*diceu + u] = Normal(Normalize(Cross(dPdu, dPdv)));
		}
	}
	// Generate points-polygons mesh
	int nTris = 2*(diceu-1)*(dicev-1);
	int *vertices = new int[3 * nTris];
	int *vertp = vertices;
	// Compute the vertex offset numbers for the triangles
	for (int v = 0; v < dicev-1; ++v) {
		for (int u = 0; u < diceu-1; ++u) {
	#define VN(u,v) ((v)*diceu+(u))
			*vertp++ = VN(u,   v);
			*vertp++ = VN(u+1, v);
			*vertp++ = VN(u+1, v+1);
	
			*vertp++ = VN(u,   v);
			*vertp++ = VN(u+1, v+1);
			*vertp++ = VN(u,   v+1);
	#undef VN
		}
	}
	int nVerts = diceu*dicev;
	ParamSet paramSet;
	paramSet.AddInt("indices", vertices, 3*nTris);
	paramSet.AddPoint("P", evalPs, nVerts);
	paramSet.AddFloat("uv", uvs, 2 * nVerts);
	paramSet.AddNormal("N", evalNs, nVerts);
	refined.push_back(MakeShape("trianglemesh", ObjectToWorld,
			reverseOrientation, paramSet));
	// Cleanup from NURBS refinement
	delete[] uvs;
	delete[] ueval;
	delete[] veval;
	delete[] evalPs;
	delete[] evalNs;
	delete[] vertices;
}