void TransformDrawEngine::SoftwareTransformAndDraw( int prim, u8 *decoded, LinkedShader *program, int vertexCount, u32 vertType, void *inds, int indexType, const DecVtxFormat &decVtxFormat, int maxIndex) { bool throughmode = (vertType & GE_VTYPE_THROUGH_MASK) != 0; bool lmode = gstate.isUsingSecondaryColor() && gstate.isLightingEnabled(); // TODO: Split up into multiple draw calls for GLES 2.0 where you can't guarantee support for more than 0x10000 verts. #if defined(MOBILE_DEVICE) if (vertexCount > 0x10000/3) vertexCount = 0x10000/3; #endif float uscale = 1.0f; float vscale = 1.0f; bool scaleUV = false; if (throughmode) { uscale /= gstate_c.curTextureWidth; vscale /= gstate_c.curTextureHeight; } else { scaleUV = !g_Config.bPrescaleUV; } bool skinningEnabled = vertTypeIsSkinningEnabled(vertType); int w = gstate.getTextureWidth(0); int h = gstate.getTextureHeight(0); float widthFactor = (float) w / (float) gstate_c.curTextureWidth; float heightFactor = (float) h / (float) gstate_c.curTextureHeight; Lighter lighter(vertType); float fog_end = getFloat24(gstate.fog1); float fog_slope = getFloat24(gstate.fog2); VertexReader reader(decoded, decVtxFormat, vertType); for (int index = 0; index < maxIndex; index++) { reader.Goto(index); float v[3] = {0, 0, 0}; float c0[4] = {1, 1, 1, 1}; float c1[4] = {0, 0, 0, 0}; float uv[3] = {0, 0, 1}; float fogCoef = 1.0f; if (throughmode) { // Do not touch the coordinates or the colors. No lighting. reader.ReadPos(v); if (reader.hasColor0()) { reader.ReadColor0(c0); for (int j = 0; j < 4; j++) { c1[j] = 0.0f; } } else { c0[0] = gstate.getMaterialAmbientR() / 255.f; c0[1] = gstate.getMaterialAmbientG() / 255.f; c0[2] = gstate.getMaterialAmbientB() / 255.f; c0[3] = gstate.getMaterialAmbientA() / 255.f; } if (reader.hasUV()) { reader.ReadUV(uv); uv[0] *= uscale; uv[1] *= vscale; } fogCoef = 1.0f; // Scale UV? } else { // We do software T&L for now float out[3], norm[3]; float pos[3], nrm[3]; Vec3f normal(0, 0, 1); reader.ReadPos(pos); if (reader.hasNormal()) reader.ReadNrm(nrm); if (!skinningEnabled) { Vec3ByMatrix43(out, pos, gstate.worldMatrix); if (reader.hasNormal()) { Norm3ByMatrix43(norm, nrm, gstate.worldMatrix); normal = Vec3f(norm).Normalized(); } } else { float weights[8]; reader.ReadWeights(weights); // Skinning Vec3f psum(0,0,0); Vec3f nsum(0,0,0); for (int i = 0; i < vertTypeGetNumBoneWeights(vertType); i++) { if (weights[i] != 0.0f) { Vec3ByMatrix43(out, pos, gstate.boneMatrix+i*12); Vec3f tpos(out); psum += tpos * weights[i]; if (reader.hasNormal()) { Norm3ByMatrix43(norm, nrm, gstate.boneMatrix+i*12); Vec3f tnorm(norm); nsum += tnorm * weights[i]; } } } // Yes, we really must multiply by the world matrix too. Vec3ByMatrix43(out, psum.AsArray(), gstate.worldMatrix); if (reader.hasNormal()) { Norm3ByMatrix43(norm, nsum.AsArray(), gstate.worldMatrix); normal = Vec3f(norm).Normalized(); } } // Perform lighting here if enabled. don't need to check through, it's checked above. float unlitColor[4] = {1, 1, 1, 1}; if (reader.hasColor0()) { reader.ReadColor0(unlitColor); } else { unlitColor[0] = gstate.getMaterialAmbientR() / 255.f; unlitColor[1] = gstate.getMaterialAmbientG() / 255.f; unlitColor[2] = gstate.getMaterialAmbientB() / 255.f; unlitColor[3] = gstate.getMaterialAmbientA() / 255.f; } float litColor0[4]; float litColor1[4]; lighter.Light(litColor0, litColor1, unlitColor, out, normal); if (gstate.isLightingEnabled()) { // Don't ignore gstate.lmode - we should send two colors in that case for (int j = 0; j < 4; j++) { c0[j] = litColor0[j]; } if (lmode) { // Separate colors for (int j = 0; j < 4; j++) { c1[j] = litColor1[j]; } } else { // Summed color into c0 for (int j = 0; j < 4; j++) { c0[j] = ((c0[j] + litColor1[j]) > 1.0f) ? 1.0f : (c0[j] + litColor1[j]); } } } else { if (reader.hasColor0()) { for (int j = 0; j < 4; j++) { c0[j] = unlitColor[j]; } } else { c0[0] = gstate.getMaterialAmbientR() / 255.f; c0[1] = gstate.getMaterialAmbientG() / 255.f; c0[2] = gstate.getMaterialAmbientB() / 255.f; c0[3] = gstate.getMaterialAmbientA() / 255.f; } if (lmode) { for (int j = 0; j < 4; j++) { c1[j] = 0.0f; } } } float ruv[2] = {0.0f, 0.0f}; if (reader.hasUV()) reader.ReadUV(ruv); // Perform texture coordinate generation after the transform and lighting - one style of UV depends on lights. switch (gstate.getUVGenMode()) { case GE_TEXMAP_TEXTURE_COORDS: // UV mapping case GE_TEXMAP_UNKNOWN: // Seen in Riviera. Unsure of meaning, but this works. // Texture scale/offset is only performed in this mode. if (scaleUV) { uv[0] = ruv[0]*gstate_c.uv.uScale + gstate_c.uv.uOff; uv[1] = ruv[1]*gstate_c.uv.vScale + gstate_c.uv.vOff; } else { uv[0] = ruv[0]; uv[1] = ruv[1]; } uv[2] = 1.0f; break; case GE_TEXMAP_TEXTURE_MATRIX: { // Projection mapping Vec3f source; switch (gstate.getUVProjMode()) { case GE_PROJMAP_POSITION: // Use model space XYZ as source source = pos; break; case GE_PROJMAP_UV: // Use unscaled UV as source source = Vec3f(ruv[0], ruv[1], 0.0f); break; case GE_PROJMAP_NORMALIZED_NORMAL: // Use normalized normal as source if (reader.hasNormal()) { source = Vec3f(norm).Normalized(); } else { ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?"); source = Vec3f(0.0f, 0.0f, 1.0f); } break; case GE_PROJMAP_NORMAL: // Use non-normalized normal as source! if (reader.hasNormal()) { source = Vec3f(norm); } else { ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?"); source = Vec3f(0.0f, 0.0f, 1.0f); } break; } float uvw[3]; Vec3ByMatrix43(uvw, &source.x, gstate.tgenMatrix); uv[0] = uvw[0]; uv[1] = uvw[1]; uv[2] = uvw[2]; } break; case GE_TEXMAP_ENVIRONMENT_MAP: // Shade mapping - use two light sources to generate U and V. { Vec3f lightpos0 = Vec3f(gstate_c.lightpos[gstate.getUVLS0()]).Normalized(); Vec3f lightpos1 = Vec3f(gstate_c.lightpos[gstate.getUVLS1()]).Normalized(); uv[0] = (1.0f + Dot(lightpos0, normal))/2.0f; uv[1] = (1.0f - Dot(lightpos1, normal))/2.0f; uv[2] = 1.0f; } break; default: // Illegal ERROR_LOG_REPORT(G3D, "Impossible UV gen mode? %d", gstate.getUVGenMode()); break; } uv[0] = uv[0] * widthFactor; uv[1] = uv[1] * heightFactor; // Transform the coord by the view matrix. Vec3ByMatrix43(v, out, gstate.viewMatrix); fogCoef = (v[2] + fog_end) * fog_slope; } // TODO: Write to a flexible buffer, we don't always need all four components. memcpy(&transformed[index].x, v, 3 * sizeof(float)); transformed[index].fog = fogCoef; memcpy(&transformed[index].u, uv, 3 * sizeof(float)); if (gstate_c.flipTexture) { transformed[index].v = 1.0f - transformed[index].v; } for (int i = 0; i < 4; i++) { transformed[index].color0[i] = c0[i] * 255.0f; } for (int i = 0; i < 3; i++) { transformed[index].color1[i] = c1[i] * 255.0f; } } // Here's the best opportunity to try to detect rectangles used to clear the screen, and // replace them with real OpenGL clears. This can provide a speedup on certain mobile chips. // Disabled for now - depth does not come out exactly the same. // // An alternative option is to simply ditch all the verts except the first and last to create a single // rectangle out of many. Quite a small optimization though. if (false && maxIndex > 1 && gstate.isModeClear() && prim == GE_PRIM_RECTANGLES && IsReallyAClear(maxIndex)) { u32 clearColor; memcpy(&clearColor, transformed[0].color0, 4); float clearDepth = transformed[0].z; const float col[4] = { ((clearColor & 0xFF)) / 255.0f, ((clearColor & 0xFF00) >> 8) / 255.0f, ((clearColor & 0xFF0000) >> 16) / 255.0f, ((clearColor & 0xFF000000) >> 24) / 255.0f, }; bool colorMask = gstate.isClearModeColorMask(); bool alphaMask = gstate.isClearModeAlphaMask(); glstate.colorMask.set(colorMask, colorMask, colorMask, alphaMask); if (alphaMask) { glstate.stencilTest.set(true); // Clear stencil // TODO: extract the stencilValue properly, see below int stencilValue = 0; glstate.stencilFunc.set(GL_ALWAYS, stencilValue, 255); } else { // Don't touch stencil glstate.stencilTest.set(false); } glstate.scissorTest.set(false); bool depthMask = gstate.isClearModeDepthMask(); int target = 0; if (colorMask || alphaMask) target |= GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT; if (depthMask) target |= GL_DEPTH_BUFFER_BIT; glClearColor(col[0], col[1], col[2], col[3]); #ifdef USING_GLES2 glClearDepthf(clearDepth); #else glClearDepth(clearDepth); #endif glClearStencil(0); // TODO - take from alpha? glClear(target); return; }
void SoftwareTransform( int prim, int vertexCount, u32 vertType, u16 *&inds, int indexType, const DecVtxFormat &decVtxFormat, int &maxIndex, TransformedVertex *&drawBuffer, int &numTrans, bool &drawIndexed, const SoftwareTransformParams *params, SoftwareTransformResult *result) { u8 *decoded = params->decoded; FramebufferManagerCommon *fbman = params->fbman; TextureCacheCommon *texCache = params->texCache; TransformedVertex *transformed = params->transformed; TransformedVertex *transformedExpanded = params->transformedExpanded; float ySign = 1.0f; bool throughmode = (vertType & GE_VTYPE_THROUGH_MASK) != 0; bool lmode = gstate.isUsingSecondaryColor() && gstate.isLightingEnabled(); // TODO: Split up into multiple draw calls for GLES 2.0 where you can't guarantee support for more than 0x10000 verts. #if defined(MOBILE_DEVICE) if (vertexCount > 0x10000/3) vertexCount = 0x10000/3; #endif float uscale = 1.0f; float vscale = 1.0f; if (throughmode) { uscale /= gstate_c.curTextureWidth; vscale /= gstate_c.curTextureHeight; } bool skinningEnabled = vertTypeIsSkinningEnabled(vertType); const int w = gstate.getTextureWidth(0); const int h = gstate.getTextureHeight(0); float widthFactor = (float) w / (float) gstate_c.curTextureWidth; float heightFactor = (float) h / (float) gstate_c.curTextureHeight; Lighter lighter(vertType); float fog_end = getFloat24(gstate.fog1); float fog_slope = getFloat24(gstate.fog2); // Same fixup as in ShaderManager.cpp if (my_isinf(fog_slope)) { // not really sure what a sensible value might be. fog_slope = fog_slope < 0.0f ? -10000.0f : 10000.0f; } if (my_isnan(fog_slope)) { // Workaround for https://github.com/hrydgard/ppsspp/issues/5384#issuecomment-38365988 // Just put the fog far away at a large finite distance. // Infinities and NaNs are rather unpredictable in shaders on many GPUs // so it's best to just make it a sane calculation. fog_end = 100000.0f; fog_slope = 1.0f; } VertexReader reader(decoded, decVtxFormat, vertType); if (throughmode) { for (int index = 0; index < maxIndex; index++) { // Do not touch the coordinates or the colors. No lighting. reader.Goto(index); // TODO: Write to a flexible buffer, we don't always need all four components. TransformedVertex &vert = transformed[index]; reader.ReadPos(vert.pos); if (reader.hasColor0()) { reader.ReadColor0_8888(vert.color0); } else { vert.color0_32 = gstate.getMaterialAmbientRGBA(); } if (reader.hasUV()) { reader.ReadUV(vert.uv); vert.u *= uscale; vert.v *= vscale; } else { vert.u = 0.0f; vert.v = 0.0f; } // Ignore color1 and fog, never used in throughmode anyway. // The w of uv is also never used (hardcoded to 1.0.) } } else { // Okay, need to actually perform the full transform. for (int index = 0; index < maxIndex; index++) { reader.Goto(index); float v[3] = {0, 0, 0}; Vec4f c0 = Vec4f(1, 1, 1, 1); Vec4f c1 = Vec4f(0, 0, 0, 0); float uv[3] = {0, 0, 1}; float fogCoef = 1.0f; // We do software T&L for now float out[3]; float pos[3]; Vec3f normal(0, 0, 1); Vec3f worldnormal(0, 0, 1); reader.ReadPos(pos); if (!skinningEnabled) { Vec3ByMatrix43(out, pos, gstate.worldMatrix); if (reader.hasNormal()) { reader.ReadNrm(normal.AsArray()); if (gstate.areNormalsReversed()) { normal = -normal; } Norm3ByMatrix43(worldnormal.AsArray(), normal.AsArray(), gstate.worldMatrix); worldnormal = worldnormal.Normalized(); } } else { float weights[8]; reader.ReadWeights(weights); if (reader.hasNormal()) reader.ReadNrm(normal.AsArray()); // Skinning Vec3f psum(0, 0, 0); Vec3f nsum(0, 0, 0); for (int i = 0; i < vertTypeGetNumBoneWeights(vertType); i++) { if (weights[i] != 0.0f) { Vec3ByMatrix43(out, pos, gstate.boneMatrix+i*12); Vec3f tpos(out); psum += tpos * weights[i]; if (reader.hasNormal()) { Vec3f norm; Norm3ByMatrix43(norm.AsArray(), normal.AsArray(), gstate.boneMatrix+i*12); nsum += norm * weights[i]; } } } // Yes, we really must multiply by the world matrix too. Vec3ByMatrix43(out, psum.AsArray(), gstate.worldMatrix); if (reader.hasNormal()) { normal = nsum; if (gstate.areNormalsReversed()) { normal = -normal; } Norm3ByMatrix43(worldnormal.AsArray(), normal.AsArray(), gstate.worldMatrix); worldnormal = worldnormal.Normalized(); } } // Perform lighting here if enabled. don't need to check through, it's checked above. Vec4f unlitColor = Vec4f(1, 1, 1, 1); if (reader.hasColor0()) { reader.ReadColor0(&unlitColor.x); } else { unlitColor = Vec4f::FromRGBA(gstate.getMaterialAmbientRGBA()); } if (gstate.isLightingEnabled()) { float litColor0[4]; float litColor1[4]; lighter.Light(litColor0, litColor1, unlitColor.AsArray(), out, worldnormal); // Don't ignore gstate.lmode - we should send two colors in that case for (int j = 0; j < 4; j++) { c0[j] = litColor0[j]; } if (lmode) { // Separate colors for (int j = 0; j < 4; j++) { c1[j] = litColor1[j]; } } else { // Summed color into c0 (will clamp in ToRGBA().) for (int j = 0; j < 4; j++) { c0[j] += litColor1[j]; } } } else { if (reader.hasColor0()) { for (int j = 0; j < 4; j++) { c0[j] = unlitColor[j]; } } else { c0 = Vec4f::FromRGBA(gstate.getMaterialAmbientRGBA()); } if (lmode) { // c1 is already 0. } } float ruv[2] = {0.0f, 0.0f}; if (reader.hasUV()) reader.ReadUV(ruv); // Perform texture coordinate generation after the transform and lighting - one style of UV depends on lights. switch (gstate.getUVGenMode()) { case GE_TEXMAP_TEXTURE_COORDS: // UV mapping case GE_TEXMAP_UNKNOWN: // Seen in Riviera. Unsure of meaning, but this works. // We always prescale in the vertex decoder now. uv[0] = ruv[0]; uv[1] = ruv[1]; uv[2] = 1.0f; break; case GE_TEXMAP_TEXTURE_MATRIX: { // Projection mapping Vec3f source; switch (gstate.getUVProjMode()) { case GE_PROJMAP_POSITION: // Use model space XYZ as source source = pos; break; case GE_PROJMAP_UV: // Use unscaled UV as source source = Vec3f(ruv[0], ruv[1], 0.0f); break; case GE_PROJMAP_NORMALIZED_NORMAL: // Use normalized normal as source source = normal.Normalized(); if (!reader.hasNormal()) { ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?"); } break; case GE_PROJMAP_NORMAL: // Use non-normalized normal as source! source = normal; if (!reader.hasNormal()) { ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?"); } break; } float uvw[3]; Vec3ByMatrix43(uvw, &source.x, gstate.tgenMatrix); uv[0] = uvw[0]; uv[1] = uvw[1]; uv[2] = uvw[2]; } break; case GE_TEXMAP_ENVIRONMENT_MAP: // Shade mapping - use two light sources to generate U and V. { Vec3f lightpos0 = Vec3f(&lighter.lpos[gstate.getUVLS0() * 3]).Normalized(); Vec3f lightpos1 = Vec3f(&lighter.lpos[gstate.getUVLS1() * 3]).Normalized(); uv[0] = (1.0f + Dot(lightpos0, worldnormal))/2.0f; uv[1] = (1.0f + Dot(lightpos1, worldnormal))/2.0f; uv[2] = 1.0f; } break; default: // Illegal ERROR_LOG_REPORT(G3D, "Impossible UV gen mode? %d", gstate.getUVGenMode()); break; } uv[0] = uv[0] * widthFactor; uv[1] = uv[1] * heightFactor; // Transform the coord by the view matrix. Vec3ByMatrix43(v, out, gstate.viewMatrix); fogCoef = (v[2] + fog_end) * fog_slope; // TODO: Write to a flexible buffer, we don't always need all four components. memcpy(&transformed[index].x, v, 3 * sizeof(float)); transformed[index].fog = fogCoef; memcpy(&transformed[index].u, uv, 3 * sizeof(float)); transformed[index].color0_32 = c0.ToRGBA(); transformed[index].color1_32 = c1.ToRGBA(); // The multiplication by the projection matrix is still performed in the vertex shader. // So is vertex depth rounding, to simulate the 16-bit depth buffer. } } // Here's the best opportunity to try to detect rectangles used to clear the screen, and // replace them with real clears. This can provide a speedup on certain mobile chips. // // An alternative option is to simply ditch all the verts except the first and last to create a single // rectangle out of many. Quite a small optimization though. // Experiment: Disable on PowerVR (see issue #6290) // TODO: This bleeds outside the play area in non-buffered mode. Big deal? Probably not. bool reallyAClear = false; if (maxIndex > 1 && prim == GE_PRIM_RECTANGLES && gstate.isModeClear()) { int scissorX2 = gstate.getScissorX2() + 1; int scissorY2 = gstate.getScissorY2() + 1; reallyAClear = IsReallyAClear(transformed, maxIndex, scissorX2, scissorY2); } if (reallyAClear && gl_extensions.gpuVendor != GPU_VENDOR_POWERVR) { // && g_Config.iRenderingMode != FB_NON_BUFFERED_MODE) { // If alpha is not allowed to be separate, it must match for both depth/stencil and color. Vulkan requires this. bool alphaMatchesColor = gstate.isClearModeColorMask() == gstate.isClearModeAlphaMask(); bool depthMatchesStencil = gstate.isClearModeAlphaMask() == gstate.isClearModeDepthMask(); if (params->allowSeparateAlphaClear || (alphaMatchesColor && depthMatchesStencil)) { result->color = transformed[1].color0_32; // Need to rescale from a [0, 1] float. This is the final transformed value. result->depth = ToScaledDepth((s16)(int)(transformed[1].z * 65535.0f)); result->action = SW_CLEAR; return; } } // This means we're using a framebuffer (and one that isn't big enough.) if (gstate_c.curTextureHeight < (u32)h && maxIndex >= 2) { // Even if not rectangles, this will detect if either of the first two are outside the framebuffer. // HACK: Adding one pixel margin to this detection fixes issues in Assassin's Creed : Bloodlines, // while still keeping BOF working (see below). const float invTexH = 1.0f / gstate_c.curTextureHeight; // size of one texel. bool tlOutside; bool tlAlmostOutside; bool brOutside; // If we're outside heightFactor, then v must be wrapping or clamping. Avoid this workaround. // If we're <= 1.0f, we're inside the framebuffer (workaround not needed.) // We buffer that 1.0f a little more with a texel to avoid some false positives. tlOutside = transformed[0].v <= heightFactor && transformed[0].v > 1.0f + invTexH; brOutside = transformed[1].v <= heightFactor && transformed[1].v > 1.0f + invTexH; // Careful: if br is outside, but tl is well inside, this workaround still doesn't make sense. // We go with halfway, since we overestimate framebuffer heights sometimes but not by much. tlAlmostOutside = transformed[0].v <= heightFactor && transformed[0].v >= 0.5f; if (tlOutside || (brOutside && tlAlmostOutside)) { // Okay, so we're texturing from outside the framebuffer, but inside the texture height. // Breath of Fire 3 does this to access a render surface at an offset. const u32 bpp = fbman->GetTargetFormat() == GE_FORMAT_8888 ? 4 : 2; const u32 prevH = texCache->AttachedDrawingHeight(); const u32 fb_size = bpp * fbman->GetTargetStride() * prevH; const u32 prevYOffset = gstate_c.curTextureYOffset; if (texCache->SetOffsetTexture(fb_size)) { const float oldWidthFactor = widthFactor; const float oldHeightFactor = heightFactor; widthFactor = (float) w / (float) gstate_c.curTextureWidth; heightFactor = (float) h / (float) gstate_c.curTextureHeight; // We've already baked in the old gstate_c.curTextureYOffset, so correct. const float yDiff = (float) (prevH + prevYOffset - gstate_c.curTextureYOffset) / (float) h; for (int index = 0; index < maxIndex; ++index) { transformed[index].u *= widthFactor / oldWidthFactor; // Inverse it back to scale to the new FBO, and add 1.0f to account for old FBO. transformed[index].v = (transformed[index].v / oldHeightFactor - yDiff) * heightFactor; } } } } // Step 2: expand rectangles. drawBuffer = transformed; numTrans = 0; drawIndexed = false; if (prim != GE_PRIM_RECTANGLES) { // We can simply draw the unexpanded buffer. numTrans = vertexCount; drawIndexed = true; } else { bool useBufferedRendering = g_Config.iRenderingMode != FB_NON_BUFFERED_MODE; if (useBufferedRendering) ySign = -ySign; float flippedMatrix[16]; if (!throughmode) { memcpy(&flippedMatrix, gstate.projMatrix, 16 * sizeof(float)); const bool invertedY = useBufferedRendering ? (gstate_c.vpHeight < 0) : (gstate_c.vpHeight > 0); if (invertedY) { flippedMatrix[1] = -flippedMatrix[1]; flippedMatrix[5] = -flippedMatrix[5]; flippedMatrix[9] = -flippedMatrix[9]; flippedMatrix[13] = -flippedMatrix[13]; } const bool invertedX = gstate_c.vpWidth < 0; if (invertedX) { flippedMatrix[0] = -flippedMatrix[0]; flippedMatrix[4] = -flippedMatrix[4]; flippedMatrix[8] = -flippedMatrix[8]; flippedMatrix[12] = -flippedMatrix[12]; } } //rectangles always need 2 vertices, disregard the last one if there's an odd number vertexCount = vertexCount & ~1; numTrans = 0; drawBuffer = transformedExpanded; TransformedVertex *trans = &transformedExpanded[0]; const u16 *indsIn = (const u16 *)inds; u16 *newInds = inds + vertexCount; u16 *indsOut = newInds; maxIndex = 4 * vertexCount; for (int i = 0; i < vertexCount; i += 2) { const TransformedVertex &transVtxTL = transformed[indsIn[i + 0]]; const TransformedVertex &transVtxBR = transformed[indsIn[i + 1]]; // We have to turn the rectangle into two triangles, so 6 points. // This is 4 verts + 6 indices. // bottom right trans[0] = transVtxBR; // top right trans[1] = transVtxBR; trans[1].y = transVtxTL.y; trans[1].v = transVtxTL.v; // top left trans[2] = transVtxBR; trans[2].x = transVtxTL.x; trans[2].y = transVtxTL.y; trans[2].u = transVtxTL.u; trans[2].v = transVtxTL.v; // bottom left trans[3] = transVtxBR; trans[3].x = transVtxTL.x; trans[3].u = transVtxTL.u; // That's the four corners. Now process UV rotation. if (throughmode) RotateUVThrough(trans); else RotateUV(trans, flippedMatrix, ySign); // Triangle: BR-TR-TL indsOut[0] = i * 2 + 0; indsOut[1] = i * 2 + 1; indsOut[2] = i * 2 + 2; // Triangle: BL-BR-TL indsOut[3] = i * 2 + 3; indsOut[4] = i * 2 + 0; indsOut[5] = i * 2 + 2; trans += 4; indsOut += 6; numTrans += 6; } inds = newInds; drawIndexed = true; // We don't know the color until here, so we have to do it now, instead of in StateMapping. // Might want to reconsider the order of things later... if (gstate.isModeClear() && gstate.isClearModeAlphaMask()) { result->setStencil = true; if (vertexCount > 1) { // Take the bottom right alpha value of the first rect as the stencil value. // Technically, each rect could individually fill its stencil, but most of the // time they use the same one. result->stencilValue = transformed[indsIn[1]].color0[3]; } else { result->stencilValue = 0; } } } result->action = SW_DRAW_PRIMITIVES; }
void TransformAndDrawPrim(void *verts, void *inds, int prim, int vertexCount, LinkedShader *program, float *customUV, int forceIndexType) { // First, decode the verts and apply morphing VertexDecoder dec; dec.SetVertexType(gstate.vertType); dec.DecodeVerts(decoded, verts, inds, prim, vertexCount); bool useTexCoord = false; // Check if anything needs updating if (gstate.textureChanged) { if (gstate.textureMapEnable && !(gstate.clearmode & 1)) { PSPSetTexture(); useTexCoord = true; } } // Then, transform and draw in one big swoop (urgh!) // need to move this to the shader. // We're gonna have to keep software transforming RECTANGLES, unless we use a geom shader which we can't on OpenGL ES 2.0. // Usually, though, these primitives don't use lighting etc so it's no biggie performance wise, but it would be nice to get rid of // this code. // Actually, if we find the camera-relative right and down vectors, it might even be possible to add the extra points in pre-transformed // space and thus make decent use of hardware transform. // Actually again, single quads could be drawn more efficiently using GL_TRIANGLE_STRIP, no need to duplicate verts as for // GL_TRIANGLES. Still need to sw transform to compute the extra two corners though. // Temporary storage for RECTANGLES emulation float v2[3] = {0}; float uv2[2] = {0}; int numTrans = 0; TransformedVertex *trans = &transformed[0]; // TODO: Could use glDrawElements in some cases, see below. // TODO: Split up into multiple draw calls for Android where you can't guarantee support for more than 0x10000 verts. int i = 0; #ifdef ANDROID if (vertexCount > 0x10000/3) vertexCount = 0x10000/3; #endif for (int i = 0; i < vertexCount; i++) { int indexType = (gstate.vertType & GE_VTYPE_IDX_MASK); if (forceIndexType != -1) { indexType = forceIndexType; } int index; if (indexType == GE_VTYPE_IDX_8BIT) { index = ((u8*)inds)[i]; } else if (indexType == GE_VTYPE_IDX_16BIT) { index = ((u16*)inds)[i]; } else { index = i; } float v[3] = {0,0,0}; float c[4] = {1,1,1,1}; float uv[2] = {0,0}; if (gstate.vertType & GE_VTYPE_THROUGH_MASK) { // Do not touch the coordinates or the colors. No lighting. for (int j=0; j<3; j++) v[j] = decoded[index].pos[j]; // TODO : check if has color for (int j=0; j<4; j++) c[j] = decoded[index].color[j]; // TODO : check if has uv for (int j=0; j<2; j++) uv[j] = decoded[index].uv[j]; //Rescale UV? } else { //We do software T&L for now float out[3], norm[3]; if ((gstate.vertType & GE_VTYPE_WEIGHT_MASK) == GE_VTYPE_WEIGHT_NONE) { Vec3ByMatrix43(out, decoded[index].pos, gstate.worldMatrix); Norm3ByMatrix43(norm, decoded[index].normal, gstate.worldMatrix); } else { Vec3 psum(0,0,0); Vec3 nsum(0,0,0); int nweights = (gstate.vertType & GE_VTYPE_WEIGHT_MASK) >> GE_VTYPE_WEIGHT_SHIFT; for (int i = 0; i < nweights; i++) { Vec3ByMatrix43(out, decoded[index].pos, gstate.boneMatrix+i*12); Norm3ByMatrix43(norm, decoded[index].normal, gstate.boneMatrix+i*12); Vec3 tpos(out), tnorm(norm); psum += tpos*decoded[index].weights[i]; nsum += tnorm*decoded[index].weights[i]; } nsum.Normalize(); psum.Write(out); nsum.Write(norm); } // Perform lighting here if enabled. don't need to check through, it's checked above. float dots[4] = {0,0,0,0}; if (program->a_color0 != -1) { //c[1] = norm[1]; float litColor[4] = {0,0,0,0}; Light(litColor, decoded[index].color, out, norm, dots); if (gstate.lightingEnable & 1) { memcpy(c, litColor, sizeof(litColor)); } else { // no lighting? copy the color. for (int j=0; j<4; j++) c[j] = decoded[index].color[j]; } } else { // no color in the fragment program??? for (int j=0; j<4; j++) c[j] = decoded[index].color[j]; } if (customUV) { uv[0] = customUV[index * 2 + 0]*gstate.uScale + gstate.uOff; uv[1] = customUV[index * 2 + 1]*gstate.vScale + gstate.vOff; } else { // Perform texture coordinate generation after the transform and lighting - one style of UV depends on lights. switch (gstate.texmapmode & 0x3) { case 0: // UV mapping // Texture scale/offset is only performed in this mode. uv[0] = decoded[index].uv[0]*gstate.uScale + gstate.uOff; uv[1] = decoded[index].uv[1]*gstate.vScale + gstate.vOff; break; case 1: { // Projection mapping Vec3 source; switch ((gstate.texmapmode >> 8) & 0x3) { case 0: // Use model space XYZ as source source = decoded[index].pos; break; case 1: // Use unscaled UV as source source = Vec3(decoded[index].uv[0], decoded[index].uv[1], 0.0f); break; case 2: // Use normalized normal as source source = Vec3(norm).Normalized(); break; case 3: // Use non-normalized normal as source! source = Vec3(norm); break; } float uvw[3]; Vec3ByMatrix43(uvw, &source.x, gstate.tgenMatrix); uv[0] = uvw[0]; uv[1] = uvw[1]; } break; case 2: // Shade mapping { int lightsource1 = gstate.texshade & 0x3; int lightsource2 = (gstate.texshade >> 8) & 0x3; uv[0] = dots[lightsource1]; uv[1] = dots[lightsource2]; } break; case 3: // Illegal break; } } // Transform the coord by the view matrix. Should this be done before or after texcoord generation? Vec3ByMatrix43(v, out, gstate.viewMatrix); } // We need to tesselate axis-aligned rectangles, as they're only specified by two coordinates. if (prim == GE_PRIM_RECTANGLES) { if ((i & 1) == 0) { // Save this vertex so we can generate when we get the next one. Color is taken from the last vertex. memcpy(v2, v, sizeof(float)*3); memcpy(uv2,uv,sizeof(float)*2); } else { // We have to turn the rectangle into two triangles, so 6 points. Sigh. // top left trans->x = v[0]; trans->y = v[1]; trans->z = v[2]; trans->uv[0] = uv[0]; trans->uv[1] = uv[1]; memcpy(trans->color, c, 4*sizeof(float)); trans++; // top right trans->x = v2[0]; trans->y = v[1]; trans->z = v[2]; trans->uv[0] = uv2[0]; trans->uv[1] = uv[1]; memcpy(trans->color, c, 4*sizeof(float)); trans++; // bottom right trans->x = v2[0]; trans->y = v2[1]; trans->z = v[2]; trans->uv[0] = uv2[0]; trans->uv[1] = uv2[1]; memcpy(trans->color, c, 4*sizeof(float)); trans++; // bottom left trans->x = v[0]; trans->y = v2[1]; trans->z = v[2]; trans->uv[0] = uv[0]; trans->uv[1] = uv2[1]; memcpy(trans->color, c, 4*sizeof(float)); trans++; // top left trans->x = v[0]; trans->y = v[1]; trans->z = v[2]; trans->uv[0] = uv[0]; trans->uv[1] = uv[1]; memcpy(trans->color, c, 4*sizeof(float)); trans++; // bottom right trans->x = v2[0]; trans->y = v2[1]; trans->z = v[2]; trans->uv[0] = uv2[0]; trans->uv[1] = uv2[1]; memcpy(trans->color, c, 4*sizeof(float)); trans++; numTrans += 6; } } else { memcpy(&trans->x, v, 3*sizeof(float)); memcpy(trans->color, c, 4*sizeof(float)); memcpy(trans->uv, uv, 2*sizeof(float)); trans++; numTrans++; } } glEnableVertexAttribArray(program->a_position); if (useTexCoord && program->a_texcoord != -1) glEnableVertexAttribArray(program->a_texcoord); if (program->a_color0 != -1) glEnableVertexAttribArray(program->a_color0); const int vertexSize = sizeof(*trans); glVertexAttribPointer(program->a_position, 3, GL_FLOAT, GL_FALSE, vertexSize, transformed); if (useTexCoord && program->a_texcoord != -1) glVertexAttribPointer(program->a_texcoord, 2, GL_FLOAT, GL_FALSE, vertexSize, ((uint8_t*)transformed) + 3 * 4); if (program->a_color0 != -1) glVertexAttribPointer(program->a_color0, 4, GL_FLOAT, GL_FALSE, vertexSize, ((uint8_t*)transformed) + 5 * 4); // NOTICE_LOG(G3D,"DrawPrimitive: %i", numTrans); glDrawArrays(glprim[prim], 0, numTrans); glDisableVertexAttribArray(program->a_position); if (useTexCoord && program->a_texcoord != -1) glDisableVertexAttribArray(program->a_texcoord); if (program->a_color0 != -1) glDisableVertexAttribArray(program->a_color0); /* if (((gstate.vertType ) & GE_VTYPE_IDX_MASK) == GE_VTYPE_IDX_8BIT) { glDrawElements(glprim, vertexCount, GL_UNSIGNED_BYTE, inds); } else if (((gstate.vertType ) & GE_VTYPE_IDX_MASK) == GE_VTYPE_IDX_16BIT) { glDrawElements(glprim, vertexCount, GL_UNSIGNED_SHORT, inds); } else {*/ }
// This normalizes a set of vertices in any format to SimpleVertex format, by processing away morphing AND skinning. // The rest of the transform pipeline like lighting will go as normal, either hardware or software. // The implementation is initially a bit inefficient but shouldn't be a big deal. // An intermediate buffer of not-easy-to-predict size is stored at bufPtr. u32 TransformDrawEngine::NormalizeVertices(u8 *outPtr, u8 *bufPtr, const u8 *inPtr, VertexDecoder *dec, int lowerBound, int upperBound, u32 vertType) { // First, decode the vertices into a GPU compatible format. This step can be eliminated but will need a separate // implementation of the vertex decoder. dec->DecodeVerts(bufPtr, inPtr, lowerBound, upperBound); // OK, morphing eliminated but bones still remain to be taken care of. // Let's do a partial software transform where we only do skinning. VertexReader reader(bufPtr, dec->GetDecVtxFmt(), vertType); SimpleVertex *sverts = (SimpleVertex *)outPtr; const u8 defaultColor[4] = { (u8)gstate.getMaterialAmbientR(), (u8)gstate.getMaterialAmbientG(), (u8)gstate.getMaterialAmbientB(), (u8)gstate.getMaterialAmbientA(), }; // Let's have two separate loops, one for non skinning and one for skinning. if (!g_Config.bSoftwareSkinning && (vertType & GE_VTYPE_WEIGHT_MASK) != GE_VTYPE_WEIGHT_NONE) { int numBoneWeights = vertTypeGetNumBoneWeights(vertType); for (int i = lowerBound; i <= upperBound; i++) { reader.Goto(i); SimpleVertex &sv = sverts[i]; if (vertType & GE_VTYPE_TC_MASK) { reader.ReadUV(sv.uv); } if (vertType & GE_VTYPE_COL_MASK) { reader.ReadColor0_8888(sv.color); } else { memcpy(sv.color, defaultColor, 4); } float nrm[3], pos[3]; float bnrm[3], bpos[3]; if (vertType & GE_VTYPE_NRM_MASK) { // Normals are generated during tesselation anyway, not sure if any need to supply reader.ReadNrm(nrm); } else { nrm[0] = 0; nrm[1] = 0; nrm[2] = 1.0f; } reader.ReadPos(pos); // Apply skinning transform directly float weights[8]; reader.ReadWeights(weights); // Skinning Vec3Packedf psum(0,0,0); Vec3Packedf nsum(0,0,0); for (int w = 0; w < numBoneWeights; w++) { if (weights[w] != 0.0f) { Vec3ByMatrix43(bpos, pos, gstate.boneMatrix+w*12); Vec3Packedf tpos(bpos); psum += tpos * weights[w]; Norm3ByMatrix43(bnrm, nrm, gstate.boneMatrix+w*12); Vec3Packedf tnorm(bnrm); nsum += tnorm * weights[w]; } } sv.pos = psum; sv.nrm = nsum; } } else { for (int i = lowerBound; i <= upperBound; i++) { reader.Goto(i); SimpleVertex &sv = sverts[i]; if (vertType & GE_VTYPE_TC_MASK) { reader.ReadUV(sv.uv); } else { sv.uv[0] = 0; // This will get filled in during tesselation sv.uv[1] = 0; } if (vertType & GE_VTYPE_COL_MASK) { reader.ReadColor0_8888(sv.color); } else { memcpy(sv.color, defaultColor, 4); } if (vertType & GE_VTYPE_NRM_MASK) { // Normals are generated during tesselation anyway, not sure if any need to supply reader.ReadNrm((float *)&sv.nrm); } else { sv.nrm.x = 0; sv.nrm.y = 0; sv.nrm.z = 1.0f; } reader.ReadPos((float *)&sv.pos); } } // Okay, there we are! Return the new type (but keep the index bits) return GE_VTYPE_TC_FLOAT | GE_VTYPE_COL_8888 | GE_VTYPE_NRM_FLOAT | GE_VTYPE_POS_FLOAT | (vertType & (GE_VTYPE_IDX_MASK | GE_VTYPE_THROUGH)); }