Ejemplo n.º 1
0
bool Engine::validWalkableTile(MapChunk *tmap, int x, int y)
{
    if(tmap == NULL) return false;

    sf::Vector2i tpos(x,y);

    //check that tile is within map dimensions
    sf::Vector2i mapdim = tmap->getDimensions();
    if(x < 0 || y < 0 || x >= mapdim.x || y >= mapdim.y) return false;

    //check that tile is walkable
    MapTile *ttile = getMapTile( tmap->getTile(x,y) );
    if(!ttile->isWalkable()) return false;

    //check that there are no actors there
    std::vector<Monster*> *actors = tmap->getMapMonsters();
    for(int i = 0; i < int(actors->size()); i++)
    {
        if((*actors)[i]->getPosition() == tpos) return false;
    }

    //check if player is there
    if( m_Player->getPosition() == tpos) return false;

    return true;
}
Ejemplo n.º 2
0
void LabelStyle::draw()
{
    Label *w = static_cast<Label*>(getWidget());
    Rect rect(Position(), w->getSize());
    Color bg = getBGColor();
    TextPosition tpos(rect, getTextHAlign(), getTextVAlign(), getTextHSpacing(), getTextVSpacing());
    //iuiGetRenderer()->drawRect(rect, bg);
    iuiGetRenderer()->drawFont(tpos, getFontColor(), w->getText().c_str(), w->getText().size());
}
Ejemplo n.º 3
0
bool CollisionTester::FrustumPointTest( const Frustum& frus, const CVector3& pnt )
{
	CVector4 tpos( pnt );
	bool	bv = frus.mNearPlane.IsPointInFront( tpos );
			bv &= frus.mFarPlane.IsPointInFront( tpos );
			bv &= frus.mLeftPlane.IsPointInFront( tpos );
			bv &= frus.mRightPlane.IsPointInFront( tpos );
			bv &= frus.mTopPlane.IsPointInFront( tpos );
			bv &= frus.mBottomPlane.IsPointInFront( tpos );

	return bv;
}
Ejemplo n.º 4
0
//------------------------------------------------------------------------
//chr safe to remove?
bool CGunTurret::IsTargetCloaked(IActor *pActor) const
{
	// cloak check
	if(m_turretparams.find_cloaked)
		return false;

	bool cloaked = false;

	// if destinationId assigned, target can always be found
	if(m_destinationId && pActor->GetEntityId() == m_destinationId)
		return false;

	if(cloaked && m_turretparams.light_fov != 0.f)
	{
		// if cloaked, target can only be found with searchlight
		// check if target inside light cone
		const Matrix34 &weaponTM = GetEntity()->GetSlotWorldTM(eIGS_ThirdPerson);
		Vec3 wpos(weaponTM.GetTranslation());
		Vec3 wdir(weaponTM.GetColumn1());
		Vec3 tpos(GetTargetPos(pActor->GetEntity()));

		float epsilon = 0.8f;
		Quat rot = Quat::CreateRotationAA(epsilon*0.5f*DEG2RAD(m_turretparams.light_fov), weaponTM.GetColumn2());
		Vec3 a = wpos + m_turretparams.mg_range*(wdir*rot);
		Vec3 b = wpos + m_turretparams.mg_range*(wdir*rot.GetInverted());
		bool inside = Overlap::PointInTriangle(tpos, wpos, a, b, weaponTM.GetColumn2());

		if(inside)
		{
			rot = Quat::CreateRotationAA(0.5f*DEG2RAD(m_turretparams.light_fov), weaponTM.GetColumn0());
			a = wpos + m_turretparams.mg_range*(wdir*rot);
			b = wpos + m_turretparams.mg_range*(wdir*rot.GetInverted());
			inside = Overlap::PointInTriangle(tpos, wpos, a, b, weaponTM.GetColumn0());
		}

		cloaked = !inside;

		if(g_pGameCVars->i_debug_turrets == eGTD_Search)
		{
			IRenderAuxGeom *pGeom = gEnv->pRenderer->GetIRenderAuxGeom();
			pGeom->SetRenderFlags(e_Def3DPublicRenderflags);
			float color[] = {1,1,1,1};
			Vec3 points[] = {wpos, a, b};
			pGeom->DrawPolyline(points, 3, true, ColorB(0,255,0,255));

			if(inside)
				gEnv->pRenderer->Draw2dLabel(200,200,1.4f,color,false,"target inside cone");
		}
	}

	return cloaked;
}
Ejemplo n.º 5
0
/**
 * @function GoalDist
 * @brief Calculate geometric distance from the goal to the End Effector Body Node
 */
double B1RRT::GoalDist( Eigen::VectorXd _nodeConfig, Eigen::VectorXd _targetPose ) {

    world->mRobots[robotId]->setDofs( _nodeConfig, links );
    Eigen::MatrixXd pose = world->mRobots[robotId]->getNode( EEId )->getWorldTransform(); 
    double dist;

    Eigen::VectorXd pos(3); pos << pose(0,3), pose(1,3), pose(2,3);
    Eigen::VectorXd tpos(3); tpos << _targetPose(0), _targetPose(1), _targetPose(2);

    dist = ( pos - tpos ).norm();

    return dist;
}
Ejemplo n.º 6
0
void ToggleButtonStyle::draw()
{
    ToggleButton *w = static_cast<ToggleButton*>(getWidget());
    Rect rect(Position(), w->getSize());
    TextPosition tpos(rect, getTextHAlign(), getTextVAlign(), getTextHSpacing(), getTextVSpacing());
    Color bg = getBGColor();
    if(w->isPressing() || w->isPressed()) {
        bg += vec4(0.4f, 0.4f, 0.4f, 0.0f);
    }
    else if(w->isHovered()) {
        bg += vec4(0.2f, 0.2f, 0.2f, 0.0f);
    }
    iuiGetRenderer()->drawRect(rect, bg);
    iuiGetRenderer()->drawOutlineRect(rect, getBorderColor());
    iuiGetRenderer()->drawFont(tpos, getFontColor(), w->getText().c_str(), w->getText().size());
}
float DetourCrowdComponent::getDistanceToGoal() const
{
	if( !m_isInCrowd )
	{
		return -1.0f;
	}

	dtCrowd* crowd = DetourCrowdManager::instance()->getCrowd();
	const dtCrowdAgent* ag = crowd->getAgent(m_crowdId);

	Vec3f tpos(ag->targetPos[0],ag->targetPos[1],ag->targetPos[2]);
	Vec3f vpos(ag->npos[0],ag->npos[1],ag->npos[2]);
	Vec3f curEntityToTargetVec =  tpos - vpos;

	return curEntityToTargetVec.length();
}
Ejemplo n.º 8
0
void ModelInstance::draw2(const Vec3D& ofs, const float rot)
{
	Vec3D tpos(ofs + pos);
	rotate(ofs.x,ofs.z,&tpos.x,&tpos.z,rot*PI/180.0f);
	if ( (tpos - gWorld->camera).lengthSquared() > (gWorld->doodaddrawdistance2*model->rad*sc) ) return;
	if (!gWorld->frustum.intersectsSphere(tpos, model->rad*sc)) return;

	glPushMatrix();

	glTranslatef(pos.x, pos.y, pos.z);
	Vec3D vdir(-dir.z,dir.x,dir.y);
	glQuaternionRotate(vdir,w);
	glScalef(sc,-sc,-sc);

	model->draw();
	glPopMatrix();
}
Ejemplo n.º 9
0
	void EntitiesEditor::drawBoxHelpers(Renderer2D &out, const IBox &box) const {
		int3 pos = box.min, bbox = box.max - box.min;
		int3 tsize = asXZY(m_tile_map.dimensions(), 32);

		drawLine(out, int3(0, pos.y, pos.z), int3(tsize.x, pos.y, pos.z), Color(0, 255, 0, 127));
		drawLine(out, int3(0, pos.y, pos.z + bbox.z), int3(tsize.x, pos.y, pos.z + bbox.z), Color(0, 255, 0, 127));
		
		drawLine(out, int3(pos.x, pos.y, 0), int3(pos.x, pos.y, tsize.z), Color(0, 255, 0, 127));
		drawLine(out, int3(pos.x + bbox.x, pos.y, 0), int3(pos.x + bbox.x, pos.y, tsize.z), Color(0, 255, 0, 127));

		int3 tpos(pos.x, 0, pos.z);
		drawBBox(out, IBox(tpos, tpos + int3(bbox.x, pos.y, bbox.z)), Color(0, 0, 255, 127));
		
		drawLine(out, int3(0, 0, pos.z), int3(tsize.x, 0, pos.z), Color(0, 0, 255, 127));
		drawLine(out, int3(0, 0, pos.z + bbox.z), int3(tsize.x, 0, pos.z + bbox.z), Color(0, 0, 255, 127));
		
		drawLine(out, int3(pos.x, 0, 0), int3(pos.x, 0, tsize.z), Color(0, 0, 255, 127));
		drawLine(out, int3(pos.x + bbox.x, 0, 0), int3(pos.x + bbox.x, 0, tsize.z), Color(0, 0, 255, 127));
	}
void DetourCrowdComponent::update( dtCrowd* crowd)
{
	if( m_pendingAddToSim )
	{
		addBackToSim();
		return;
	}
	const dtCrowdAgent* ag = crowd->getAgent(m_crowdId);
	if( ag->active && m_isInCrowd )
	{
		const float* pos = ag->npos;
		const float* vel = ag->vel;
		Vec3f velo( vel[0], vel[1], vel[2] );
		Vec3f veloRot = velo.toRotation();
		
		// Scenegraph expects degree, Bullet radians. This should be fixed in those components.
		// Rotate: degree (Scenegraph)
		float yrot = radToDeg(veloRot.y);
		yrot -= 180.f;
		// Rotate: radian (Bullet Physics)
		//float yrot = veloRot.y - Math::Pi;

		//const float* trans = GameEngine::getEntityTransformation(m_owner->worldId());

		// The entity travels on the navigation mesh.
		GameEngine::setEntityTranslation( m_owner->worldId(), pos[0], pos[1] + m_yOffset, pos[2] );
		GameEngine::setEntityRotation( m_owner->worldId(), 0, yrot, 0);

		Vec3f tpos(ag->targetPos[0],ag->targetPos[1],ag->targetPos[2]);
		Vec3f vpos(ag->npos[0],ag->npos[1],ag->npos[2]);
		Vec3f curEntityToTargetVec =  tpos - vpos;

		if( curEntityToTargetVec.length() < m_targetRadius )
		{
			// Target reached. Remove from sim and add obstacle.
			tempRemoveFromSim();
			GameEvent targetReachedEv( GameEvent::E_TARGET_REACHED, 0, 0 );
			GameEngine::sendEvent( m_worldId, &targetReachedEv );
		}
	}
}
Ejemplo n.º 11
0
bool CollisionTester::FrustumSphereTest( const Frustum& frus, const Sphere& sph )
{
	float nrad = -sph.mRadius;
	CVector4 tpos( sph.mCenter );
	
	float nd = frus.mNearPlane.GetPointDistance( tpos );
	float fd = frus.mFarPlane.GetPointDistance( tpos );
	float ld = frus.mLeftPlane.GetPointDistance( tpos );
	float rd = frus.mRightPlane.GetPointDistance( tpos );
	float td = frus.mTopPlane.GetPointDistance( tpos );
	float bd = frus.mBottomPlane.GetPointDistance( tpos );
	
	if(nd < nrad) return false;
	if(fd < nrad) return false;
	if(ld < nrad) return false;
	if(rd < nrad) return false;
	if(td < nrad) return false;
	if(bd < nrad) return false;
	
	return true;
}
Ejemplo n.º 12
0
void EditboxStyle::draw()
{
    Editbox *w = static_cast<Editbox*>(getWidget());
    Rect rect(Position(), w->getSize());
    TextPosition tpos(rect, getTextHAlign(), getTextVAlign(), getTextHSpacing(), getTextVSpacing());
    Color bg = getBGColor();
    if(w->isFocused()) {
        bg += vec4(0.4f, 0.4f, 0.4f, 0.0f);
    }
    else if(w->isHovered()) {
        bg += vec4(0.2f, 0.2f, 0.2f, 0.0f);
    }
    iuiGetRenderer()->drawRect(rect, bg);
    iuiGetRenderer()->drawOutlineRect(rect, getBorderColor());
    iuiGetRenderer()->drawFont(tpos, getFontColor(), w->getText().c_str(), w->getText().size());
    if(w->isFocused() && ist::GetTick()%1000<500) {
        vec2 tsize = iuiGetRenderer()->computeTextSize(w->getText().c_str(), w->getCursorPos());
        Line l(Position(tsize.x, 0.0f), Position(tsize.x, tsize.y));
        iuiGetRenderer()->drawLine(l, getBorderColor());
    }
}
Ejemplo n.º 13
0
void TransformDrawEngine::SoftwareTransformAndDraw(
    int prim, u8 *decoded, LinkedShader *program, int vertexCount, u32 vertType, void *inds, int indexType, const DecVtxFormat &decVtxFormat, int maxIndex) {

    bool throughmode = (vertType & GE_VTYPE_THROUGH_MASK) != 0;
    bool lmode = gstate.isUsingSecondaryColor() && gstate.isLightingEnabled();

    // TODO: Split up into multiple draw calls for GLES 2.0 where you can't guarantee support for more than 0x10000 verts.

#if defined(MOBILE_DEVICE)
    if (vertexCount > 0x10000/3)
        vertexCount = 0x10000/3;
#endif

    float uscale = 1.0f;
    float vscale = 1.0f;
    bool scaleUV = false;
    if (throughmode) {
        uscale /= gstate_c.curTextureWidth;
        vscale /= gstate_c.curTextureHeight;
    } else {
        scaleUV = !g_Config.bPrescaleUV;
    }

    bool skinningEnabled = vertTypeIsSkinningEnabled(vertType);

    int w = gstate.getTextureWidth(0);
    int h = gstate.getTextureHeight(0);
    float widthFactor = (float) w / (float) gstate_c.curTextureWidth;
    float heightFactor = (float) h / (float) gstate_c.curTextureHeight;

    Lighter lighter(vertType);
    float fog_end = getFloat24(gstate.fog1);
    float fog_slope = getFloat24(gstate.fog2);

    VertexReader reader(decoded, decVtxFormat, vertType);
    for (int index = 0; index < maxIndex; index++) {
        reader.Goto(index);

        float v[3] = {0, 0, 0};
        float c0[4] = {1, 1, 1, 1};
        float c1[4] = {0, 0, 0, 0};
        float uv[3] = {0, 0, 1};
        float fogCoef = 1.0f;

        if (throughmode) {
            // Do not touch the coordinates or the colors. No lighting.
            reader.ReadPos(v);
            if (reader.hasColor0()) {
                reader.ReadColor0(c0);
                for (int j = 0; j < 4; j++) {
                    c1[j] = 0.0f;
                }
            } else {
                c0[0] = gstate.getMaterialAmbientR() / 255.f;
                c0[1] = gstate.getMaterialAmbientG() / 255.f;
                c0[2] = gstate.getMaterialAmbientB() / 255.f;
                c0[3] = gstate.getMaterialAmbientA() / 255.f;
            }

            if (reader.hasUV()) {
                reader.ReadUV(uv);

                uv[0] *= uscale;
                uv[1] *= vscale;
            }
            fogCoef = 1.0f;
            // Scale UV?
        } else {
            // We do software T&L for now
            float out[3], norm[3];
            float pos[3], nrm[3];
            Vec3f normal(0, 0, 1);
            reader.ReadPos(pos);
            if (reader.hasNormal())
                reader.ReadNrm(nrm);

            if (!skinningEnabled) {
                Vec3ByMatrix43(out, pos, gstate.worldMatrix);
                if (reader.hasNormal()) {
                    Norm3ByMatrix43(norm, nrm, gstate.worldMatrix);
                    normal = Vec3f(norm).Normalized();
                }
            } else {
                float weights[8];
                reader.ReadWeights(weights);
                // Skinning
                Vec3f psum(0,0,0);
                Vec3f nsum(0,0,0);
                for (int i = 0; i < vertTypeGetNumBoneWeights(vertType); i++) {
                    if (weights[i] != 0.0f) {
                        Vec3ByMatrix43(out, pos, gstate.boneMatrix+i*12);
                        Vec3f tpos(out);
                        psum += tpos * weights[i];
                        if (reader.hasNormal()) {
                            Norm3ByMatrix43(norm, nrm, gstate.boneMatrix+i*12);
                            Vec3f tnorm(norm);
                            nsum += tnorm * weights[i];
                        }
                    }
                }

                // Yes, we really must multiply by the world matrix too.
                Vec3ByMatrix43(out, psum.AsArray(), gstate.worldMatrix);
                if (reader.hasNormal()) {
                    Norm3ByMatrix43(norm, nsum.AsArray(), gstate.worldMatrix);
                    normal = Vec3f(norm).Normalized();
                }
            }

            // Perform lighting here if enabled. don't need to check through, it's checked above.
            float unlitColor[4] = {1, 1, 1, 1};
            if (reader.hasColor0()) {
                reader.ReadColor0(unlitColor);
            } else {
                unlitColor[0] = gstate.getMaterialAmbientR() / 255.f;
                unlitColor[1] = gstate.getMaterialAmbientG() / 255.f;
                unlitColor[2] = gstate.getMaterialAmbientB() / 255.f;
                unlitColor[3] = gstate.getMaterialAmbientA() / 255.f;
            }
            float litColor0[4];
            float litColor1[4];
            lighter.Light(litColor0, litColor1, unlitColor, out, normal);

            if (gstate.isLightingEnabled()) {
                // Don't ignore gstate.lmode - we should send two colors in that case
                for (int j = 0; j < 4; j++) {
                    c0[j] = litColor0[j];
                }
                if (lmode) {
                    // Separate colors
                    for (int j = 0; j < 4; j++) {
                        c1[j] = litColor1[j];
                    }
                } else {
                    // Summed color into c0
                    for (int j = 0; j < 4; j++) {
                        c0[j] = ((c0[j] + litColor1[j]) > 1.0f) ? 1.0f : (c0[j] + litColor1[j]);
                    }
                }
            } else {
                if (reader.hasColor0()) {
                    for (int j = 0; j < 4; j++) {
                        c0[j] = unlitColor[j];
                    }
                } else {
                    c0[0] = gstate.getMaterialAmbientR() / 255.f;
                    c0[1] = gstate.getMaterialAmbientG() / 255.f;
                    c0[2] = gstate.getMaterialAmbientB() / 255.f;
                    c0[3] = gstate.getMaterialAmbientA() / 255.f;
                }
                if (lmode) {
                    for (int j = 0; j < 4; j++) {
                        c1[j] = 0.0f;
                    }
                }
            }

            float ruv[2] = {0.0f, 0.0f};
            if (reader.hasUV())
                reader.ReadUV(ruv);

            // Perform texture coordinate generation after the transform and lighting - one style of UV depends on lights.
            switch (gstate.getUVGenMode()) {
            case GE_TEXMAP_TEXTURE_COORDS:	// UV mapping
            case GE_TEXMAP_UNKNOWN: // Seen in Riviera.  Unsure of meaning, but this works.
                // Texture scale/offset is only performed in this mode.
                if (scaleUV) {
                    uv[0] = ruv[0]*gstate_c.uv.uScale + gstate_c.uv.uOff;
                    uv[1] = ruv[1]*gstate_c.uv.vScale + gstate_c.uv.vOff;
                } else {
                    uv[0] = ruv[0];
                    uv[1] = ruv[1];
                }
                uv[2] = 1.0f;
                break;

            case GE_TEXMAP_TEXTURE_MATRIX:
            {
                // Projection mapping
                Vec3f source;
                switch (gstate.getUVProjMode())	{
                case GE_PROJMAP_POSITION: // Use model space XYZ as source
                    source = pos;
                    break;

                case GE_PROJMAP_UV: // Use unscaled UV as source
                    source = Vec3f(ruv[0], ruv[1], 0.0f);
                    break;

                case GE_PROJMAP_NORMALIZED_NORMAL: // Use normalized normal as source
                    if (reader.hasNormal()) {
                        source = Vec3f(norm).Normalized();
                    } else {
                        ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?");
                        source = Vec3f(0.0f, 0.0f, 1.0f);
                    }
                    break;

                case GE_PROJMAP_NORMAL: // Use non-normalized normal as source!
                    if (reader.hasNormal()) {
                        source = Vec3f(norm);
                    } else {
                        ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?");
                        source = Vec3f(0.0f, 0.0f, 1.0f);
                    }
                    break;
                }

                float uvw[3];
                Vec3ByMatrix43(uvw, &source.x, gstate.tgenMatrix);
                uv[0] = uvw[0];
                uv[1] = uvw[1];
                uv[2] = uvw[2];
            }
            break;

            case GE_TEXMAP_ENVIRONMENT_MAP:
                // Shade mapping - use two light sources to generate U and V.
            {
                Vec3f lightpos0 = Vec3f(gstate_c.lightpos[gstate.getUVLS0()]).Normalized();
                Vec3f lightpos1 = Vec3f(gstate_c.lightpos[gstate.getUVLS1()]).Normalized();

                uv[0] = (1.0f + Dot(lightpos0, normal))/2.0f;
                uv[1] = (1.0f - Dot(lightpos1, normal))/2.0f;
                uv[2] = 1.0f;
            }
            break;

            default:
                // Illegal
                ERROR_LOG_REPORT(G3D, "Impossible UV gen mode? %d", gstate.getUVGenMode());
                break;
            }

            uv[0] = uv[0] * widthFactor;
            uv[1] = uv[1] * heightFactor;

            // Transform the coord by the view matrix.
            Vec3ByMatrix43(v, out, gstate.viewMatrix);
            fogCoef = (v[2] + fog_end) * fog_slope;
        }

        // TODO: Write to a flexible buffer, we don't always need all four components.
        memcpy(&transformed[index].x, v, 3 * sizeof(float));
        transformed[index].fog = fogCoef;
        memcpy(&transformed[index].u, uv, 3 * sizeof(float));
        if (gstate_c.flipTexture) {
            transformed[index].v = 1.0f - transformed[index].v;
        }
        for (int i = 0; i < 4; i++) {
            transformed[index].color0[i] = c0[i] * 255.0f;
        }
        for (int i = 0; i < 3; i++) {
            transformed[index].color1[i] = c1[i] * 255.0f;
        }
    }

    // Here's the best opportunity to try to detect rectangles used to clear the screen, and
    // replace them with real OpenGL clears. This can provide a speedup on certain mobile chips.
    // Disabled for now - depth does not come out exactly the same.
    //
    // An alternative option is to simply ditch all the verts except the first and last to create a single
    // rectangle out of many. Quite a small optimization though.
    if (false && maxIndex > 1 && gstate.isModeClear() && prim == GE_PRIM_RECTANGLES && IsReallyAClear(maxIndex)) {
        u32 clearColor;
        memcpy(&clearColor, transformed[0].color0, 4);
        float clearDepth = transformed[0].z;
        const float col[4] = {
            ((clearColor & 0xFF)) / 255.0f,
            ((clearColor & 0xFF00) >> 8) / 255.0f,
            ((clearColor & 0xFF0000) >> 16) / 255.0f,
            ((clearColor & 0xFF000000) >> 24) / 255.0f,
        };

        bool colorMask = gstate.isClearModeColorMask();
        bool alphaMask = gstate.isClearModeAlphaMask();
        glstate.colorMask.set(colorMask, colorMask, colorMask, alphaMask);
        if (alphaMask) {
            glstate.stencilTest.set(true);
            // Clear stencil
            // TODO: extract the stencilValue properly, see below
            int stencilValue = 0;
            glstate.stencilFunc.set(GL_ALWAYS, stencilValue, 255);
        } else {
            // Don't touch stencil
            glstate.stencilTest.set(false);
        }
        glstate.scissorTest.set(false);
        bool depthMask = gstate.isClearModeDepthMask();

        int target = 0;
        if (colorMask || alphaMask) target |= GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
        if (depthMask) target |= GL_DEPTH_BUFFER_BIT;

        glClearColor(col[0], col[1], col[2], col[3]);
#ifdef USING_GLES2
        glClearDepthf(clearDepth);
#else
        glClearDepth(clearDepth);
#endif
        glClearStencil(0);  // TODO - take from alpha?
        glClear(target);
        return;
    }
 /** \brief Project a real-world coordinate.
  *
  * \param rpos Real world coordinate position.
  * \return 2D pixel position.
  */
 math::vec2i project(const math::vec3f &rpos) const
 {
   math::vec4f tpos(rpos.x(), rpos.y(), rpos.z(), 1.0f);
   std::cout << "Projected position: " << get_matrix_stack() * tpos << std::endl;
   return this->convertTo2D(get_matrix_stack() * tpos);
 }
Ejemplo n.º 15
0
void TransformAndDrawPrim(void *verts, void *inds, int prim, int vertexCount, LinkedShader *program, float *customUV, int forceIndexType)
{
	// First, decode the verts and apply morphing
	VertexDecoder dec;
	dec.SetVertexType(gstate.vertType);
	dec.DecodeVerts(decoded, verts, inds, prim, vertexCount);

	bool useTexCoord = false;

	// Check if anything needs updating
	if (gstate.textureChanged)
	{
		if (gstate.textureMapEnable && !(gstate.clearmode & 1))
		{
			PSPSetTexture();
			useTexCoord = true;
		}
	}

	// Then, transform and draw in one big swoop (urgh!)
	// need to move this to the shader.
	
	// We're gonna have to keep software transforming RECTANGLES, unless we use a geom shader which we can't on OpenGL ES 2.0.
	// Usually, though, these primitives don't use lighting etc so it's no biggie performance wise, but it would be nice to get rid of
	// this code.

	// Actually, if we find the camera-relative right and down vectors, it might even be possible to add the extra points in pre-transformed
	// space and thus make decent use of hardware transform.

	// Actually again, single quads could be drawn more efficiently using GL_TRIANGLE_STRIP, no need to duplicate verts as for
	// GL_TRIANGLES. Still need to sw transform to compute the extra two corners though.
	
	// Temporary storage for RECTANGLES emulation
	float v2[3] = {0};
	float uv2[2] = {0};

	int numTrans = 0;
	TransformedVertex *trans = &transformed[0];

	// TODO: Could use glDrawElements in some cases, see below.


	// TODO: Split up into multiple draw calls for Android where you can't guarantee support for more than 0x10000 verts.
	int i = 0;

#ifdef ANDROID
	if (vertexCount > 0x10000/3)
		vertexCount = 0x10000/3;
#endif

	for (int i = 0; i < vertexCount; i++)
	{	
		int indexType = (gstate.vertType & GE_VTYPE_IDX_MASK);
		if (forceIndexType != -1) {
			indexType = forceIndexType;
		}

		int index;
		if (indexType == GE_VTYPE_IDX_8BIT)
		{
			index = ((u8*)inds)[i];
		} 
		else if (indexType == GE_VTYPE_IDX_16BIT)
		{
			index = ((u16*)inds)[i];
		}
		else
		{
			index = i;
		}

		float v[3] = {0,0,0};
		float c[4] = {1,1,1,1};
		float uv[2] = {0,0};

		if (gstate.vertType & GE_VTYPE_THROUGH_MASK)
		{
			// Do not touch the coordinates or the colors. No lighting.
			for (int j=0; j<3; j++)
				v[j] = decoded[index].pos[j];
			// TODO : check if has color
			for (int j=0; j<4; j++)
				c[j] = decoded[index].color[j];
			// TODO : check if has uv
			for (int j=0; j<2; j++)
				uv[j] = decoded[index].uv[j];

			//Rescale UV?
		}
		else
		{
			//We do software T&L for now
			float out[3], norm[3];
			if ((gstate.vertType & GE_VTYPE_WEIGHT_MASK) == GE_VTYPE_WEIGHT_NONE)
			{
				Vec3ByMatrix43(out, decoded[index].pos, gstate.worldMatrix);
				Norm3ByMatrix43(norm, decoded[index].normal, gstate.worldMatrix);
			}
			else
			{
				Vec3 psum(0,0,0);
				Vec3 nsum(0,0,0);
				int nweights = (gstate.vertType & GE_VTYPE_WEIGHT_MASK) >> GE_VTYPE_WEIGHT_SHIFT;
				for (int i = 0; i < nweights; i++)
				{
					Vec3ByMatrix43(out, decoded[index].pos, gstate.boneMatrix+i*12);
					Norm3ByMatrix43(norm, decoded[index].normal, gstate.boneMatrix+i*12);
					Vec3 tpos(out), tnorm(norm);
					psum += tpos*decoded[index].weights[i];
					nsum += tnorm*decoded[index].weights[i];
				}
				nsum.Normalize();
				psum.Write(out);
				nsum.Write(norm);
			}

			// Perform lighting here if enabled. don't need to check through, it's checked above.
			float dots[4] = {0,0,0,0};
			if (program->a_color0 != -1)
			{
				//c[1] = norm[1];
				float litColor[4] = {0,0,0,0};
				Light(litColor, decoded[index].color, out, norm, dots);
				if (gstate.lightingEnable & 1)
				{
					memcpy(c, litColor, sizeof(litColor));
				}
				else
				{
					// no lighting? copy the color.
					for (int j=0; j<4; j++)
						c[j] = decoded[index].color[j];
				}
			}
			else
			{
				// no color in the fragment program???
				for (int j=0; j<4; j++)
					c[j] = decoded[index].color[j];
			}

			if (customUV) {
				uv[0] = customUV[index * 2 + 0]*gstate.uScale + gstate.uOff;
				uv[1] = customUV[index * 2 + 1]*gstate.vScale + gstate.vOff;
			} else {
				// Perform texture coordinate generation after the transform and lighting - one style of UV depends on lights.
				switch (gstate.texmapmode & 0x3)
				{
				case 0:	// UV mapping
					// Texture scale/offset is only performed in this mode.
					uv[0] = decoded[index].uv[0]*gstate.uScale + gstate.uOff;
					uv[1] = decoded[index].uv[1]*gstate.vScale + gstate.vOff;
					break;
				case 1:
					{
						// Projection mapping
						Vec3 source;
						switch ((gstate.texmapmode >> 8) & 0x3)
						{
						case 0: // Use model space XYZ as source
							source = decoded[index].pos;
							break;
						case 1: // Use unscaled UV as source
							source = Vec3(decoded[index].uv[0], decoded[index].uv[1], 0.0f);
							break;
						case 2: // Use normalized normal as source
							source = Vec3(norm).Normalized();
							break;
						case 3: // Use non-normalized normal as source!
							source = Vec3(norm);
							break;
						}
						float uvw[3];
						Vec3ByMatrix43(uvw, &source.x, gstate.tgenMatrix);
						uv[0] = uvw[0];
						uv[1] = uvw[1];
					}
					break;
				case 2:
					// Shade mapping
					{
						int lightsource1 = gstate.texshade & 0x3;
						int lightsource2 = (gstate.texshade >> 8) & 0x3;
						uv[0] = dots[lightsource1];
						uv[1] = dots[lightsource2];
					}
					break;
				case 3:
					// Illegal
					break;
				}
			}
			// Transform the coord by the view matrix. Should this be done before or after texcoord generation?
			Vec3ByMatrix43(v, out, gstate.viewMatrix);
		}


		// We need to tesselate axis-aligned rectangles, as they're only specified by two coordinates.
		if (prim == GE_PRIM_RECTANGLES)
		{
			if ((i & 1) == 0)
			{
				// Save this vertex so we can generate when we get the next one. Color is taken from the last vertex.
				memcpy(v2, v, sizeof(float)*3);
				memcpy(uv2,uv,sizeof(float)*2);
			}
			else
			{
				// We have to turn the rectangle into two triangles, so 6 points. Sigh.

				// top left
				trans->x = v[0]; trans->y = v[1];
				trans->z = v[2]; 
				trans->uv[0] = uv[0]; trans->uv[1] = uv[1];
				memcpy(trans->color, c, 4*sizeof(float));
				trans++;

				// top right
				trans->x = v2[0]; trans->y = v[1];
				trans->z = v[2]; 
				trans->uv[0] = uv2[0]; trans->uv[1] = uv[1];
				memcpy(trans->color, c, 4*sizeof(float));
				trans++;

				// bottom right
				trans->x = v2[0]; trans->y = v2[1];
				trans->z = v[2]; 
				trans->uv[0] = uv2[0]; trans->uv[1] = uv2[1];
				memcpy(trans->color, c, 4*sizeof(float));
				trans++;

				// bottom left
				trans->x = v[0]; trans->y = v2[1];
				trans->z = v[2]; 
				trans->uv[0] = uv[0]; trans->uv[1] = uv2[1];
				memcpy(trans->color, c, 4*sizeof(float));
				trans++;

				// top left
				trans->x = v[0]; trans->y = v[1];
				trans->z = v[2]; 
				trans->uv[0] = uv[0]; trans->uv[1] = uv[1];
				memcpy(trans->color, c, 4*sizeof(float));
				trans++;

				// bottom right
				trans->x = v2[0]; trans->y = v2[1];
				trans->z = v[2]; 
				trans->uv[0] = uv2[0]; trans->uv[1] = uv2[1];
				memcpy(trans->color, c, 4*sizeof(float));
				trans++;

				numTrans += 6;
			}
		}
		else
		{
			memcpy(&trans->x, v, 3*sizeof(float));
			memcpy(trans->color, c, 4*sizeof(float));
			memcpy(trans->uv, uv, 2*sizeof(float));
			trans++;
			numTrans++;
		}
	}

	glEnableVertexAttribArray(program->a_position);
	if (useTexCoord && program->a_texcoord != -1) glEnableVertexAttribArray(program->a_texcoord);
	if (program->a_color0 != -1) glEnableVertexAttribArray(program->a_color0);
	const int vertexSize = sizeof(*trans);
	glVertexAttribPointer(program->a_position, 3, GL_FLOAT, GL_FALSE, vertexSize, transformed);
	if (useTexCoord && program->a_texcoord != -1) glVertexAttribPointer(program->a_texcoord, 2, GL_FLOAT, GL_FALSE, vertexSize, ((uint8_t*)transformed) + 3 * 4);	
	if (program->a_color0 != -1) glVertexAttribPointer(program->a_color0, 4, GL_FLOAT, GL_FALSE, vertexSize, ((uint8_t*)transformed) + 5 * 4);
	// NOTICE_LOG(G3D,"DrawPrimitive: %i", numTrans);
	glDrawArrays(glprim[prim], 0, numTrans);
	glDisableVertexAttribArray(program->a_position);
	if (useTexCoord && program->a_texcoord != -1) glDisableVertexAttribArray(program->a_texcoord);
	if (program->a_color0 != -1) glDisableVertexAttribArray(program->a_color0);

	/*
	if (((gstate.vertType ) & GE_VTYPE_IDX_MASK) == GE_VTYPE_IDX_8BIT)
	{
		glDrawElements(glprim, vertexCount, GL_UNSIGNED_BYTE, inds);
	} 
	else if (((gstate.vertType ) & GE_VTYPE_IDX_MASK) == GE_VTYPE_IDX_16BIT)
	{
		glDrawElements(glprim, vertexCount, GL_UNSIGNED_SHORT, inds);
	}
	else
	{*/

}
Ejemplo n.º 16
0
void TestTriangulationWidget::paintEvent(QPaintEvent *)
{
    QPainter paint(this);
    QPen pen;
    QBrush brush;
    brush.setColor(Qt::blue);

    for(int i=0; i < points_.size(); i++)
    {
        if(i == selected_id0_)
        {
            pen.setColor(Qt::red);
        }
        else if(i == selected_id1_)
        {
            pen.setColor(Qt::green);
        }
        else
        {
            pen.setColor(Qt::blue);
        }
        pen.setWidth(3);
        paint.setPen(pen);
        QPoint tpos((int)points_[i][0],(int)points_[i][1]);
        paint.drawEllipse(tpos,5,5);
    }

    for(int i=0; i < edges_.size(); i++)
    {
        if(boundary_markers_[i])
        {
            pen.setColor(Qt::red);
        }
        else
        {
            pen.setColor(Qt::green);
        }
        pen.setWidth(3);
        Vec3 p0 = points_[edges_[i][0]];
        Vec3 p1 = points_[edges_[i][1]];
        paint.setPen(pen);
        paint.drawLine((int)p0[0],(int)p0[1],(int)p1[0],(int)p1[1]);
    }


    for(int i=0; i < new_points_.size(); i++)
    {
        pen.setColor(Qt::black);
        pen.setWidth(2);
        paint.setPen(pen);
        QPoint tpos((int)new_points_[i][0],(int)new_points_[i][1]);
        paint.drawEllipse(tpos,2,2);
    }

    for(int i=0; i < triangles_.size(); i++)
    {
        pen.setColor(Qt::blue);
        pen.setWidth(1);
        paint.setPen(pen);
        for(int j=0; j < 3; j++)
        {
            Vec3 p0 = new_points_[triangles_[i][j]];
            Vec3 p1 = new_points_[triangles_[i][(j+1)%3]];
            paint.setPen(pen);
            paint.drawLine((int)p0[0],(int)p0[1],(int)p1[0],(int)p1[1]);
        }
    }
}
Ejemplo n.º 17
0
// This normalizes a set of vertices in any format to SimpleVertex format, by processing away morphing AND skinning.
// The rest of the transform pipeline like lighting will go as normal, either hardware or software.
// The implementation is initially a bit inefficient but shouldn't be a big deal.
// An intermediate buffer of not-easy-to-predict size is stored at bufPtr.
u32 TransformDrawEngine::NormalizeVertices(u8 *outPtr, u8 *bufPtr, const u8 *inPtr, VertexDecoder *dec, int lowerBound, int upperBound, u32 vertType) {
	// First, decode the vertices into a GPU compatible format. This step can be eliminated but will need a separate
	// implementation of the vertex decoder.
	dec->DecodeVerts(bufPtr, inPtr, lowerBound, upperBound);

	// OK, morphing eliminated but bones still remain to be taken care of.
	// Let's do a partial software transform where we only do skinning.

	VertexReader reader(bufPtr, dec->GetDecVtxFmt(), vertType);

	SimpleVertex *sverts = (SimpleVertex *)outPtr;	

	const u8 defaultColor[4] = {
		(u8)gstate.getMaterialAmbientR(),
		(u8)gstate.getMaterialAmbientG(),
		(u8)gstate.getMaterialAmbientB(),
		(u8)gstate.getMaterialAmbientA(),
	};

	// Let's have two separate loops, one for non skinning and one for skinning.
	if (!g_Config.bSoftwareSkinning && (vertType & GE_VTYPE_WEIGHT_MASK) != GE_VTYPE_WEIGHT_NONE) {
		int numBoneWeights = vertTypeGetNumBoneWeights(vertType);
		for (int i = lowerBound; i <= upperBound; i++) {
			reader.Goto(i);
			SimpleVertex &sv = sverts[i];
			if (vertType & GE_VTYPE_TC_MASK) {
				reader.ReadUV(sv.uv);
			}

			if (vertType & GE_VTYPE_COL_MASK) {
				reader.ReadColor0_8888(sv.color);
			} else {
				memcpy(sv.color, defaultColor, 4);
			}

			float nrm[3], pos[3];
			float bnrm[3], bpos[3];

			if (vertType & GE_VTYPE_NRM_MASK) {
				// Normals are generated during tesselation anyway, not sure if any need to supply
				reader.ReadNrm(nrm);
			} else {
				nrm[0] = 0;
				nrm[1] = 0;
				nrm[2] = 1.0f;
			}
			reader.ReadPos(pos);

			// Apply skinning transform directly
			float weights[8];
			reader.ReadWeights(weights);
			// Skinning
			Vec3Packedf psum(0,0,0);
			Vec3Packedf nsum(0,0,0);
			for (int w = 0; w < numBoneWeights; w++) {
				if (weights[w] != 0.0f) {
					Vec3ByMatrix43(bpos, pos, gstate.boneMatrix+w*12);
					Vec3Packedf tpos(bpos);
					psum += tpos * weights[w];

					Norm3ByMatrix43(bnrm, nrm, gstate.boneMatrix+w*12);
					Vec3Packedf tnorm(bnrm);
					nsum += tnorm * weights[w];
				}
			}
			sv.pos = psum;
			sv.nrm = nsum;
		}
	} else {
		for (int i = lowerBound; i <= upperBound; i++) {
			reader.Goto(i);
			SimpleVertex &sv = sverts[i];
			if (vertType & GE_VTYPE_TC_MASK) {
				reader.ReadUV(sv.uv);
			} else {
				sv.uv[0] = 0;  // This will get filled in during tesselation
				sv.uv[1] = 0;
			}
			if (vertType & GE_VTYPE_COL_MASK) {
				reader.ReadColor0_8888(sv.color);
			} else {
				memcpy(sv.color, defaultColor, 4);
			}
			if (vertType & GE_VTYPE_NRM_MASK) {
				// Normals are generated during tesselation anyway, not sure if any need to supply
				reader.ReadNrm((float *)&sv.nrm);
			} else {
				sv.nrm.x = 0;
				sv.nrm.y = 0;
				sv.nrm.z = 1.0f;
			}
			reader.ReadPos((float *)&sv.pos);
		}
	}

	// Okay, there we are! Return the new type (but keep the index bits)
	return GE_VTYPE_TC_FLOAT | GE_VTYPE_COL_8888 | GE_VTYPE_NRM_FLOAT | GE_VTYPE_POS_FLOAT | (vertType & (GE_VTYPE_IDX_MASK | GE_VTYPE_THROUGH));
}
Ejemplo n.º 18
0
void SoftwareTransform(
	int prim, int vertexCount, u32 vertType, u16 *&inds, int indexType,
	const DecVtxFormat &decVtxFormat, int &maxIndex, TransformedVertex *&drawBuffer, int &numTrans, bool &drawIndexed, const SoftwareTransformParams *params, SoftwareTransformResult *result) {
	u8 *decoded = params->decoded;
	FramebufferManagerCommon *fbman = params->fbman;
	TextureCacheCommon *texCache = params->texCache;
	TransformedVertex *transformed = params->transformed;
	TransformedVertex *transformedExpanded = params->transformedExpanded;
	float ySign = 1.0f;
	bool throughmode = (vertType & GE_VTYPE_THROUGH_MASK) != 0;
	bool lmode = gstate.isUsingSecondaryColor() && gstate.isLightingEnabled();

	// TODO: Split up into multiple draw calls for GLES 2.0 where you can't guarantee support for more than 0x10000 verts.

#if defined(MOBILE_DEVICE)
	if (vertexCount > 0x10000/3)
		vertexCount = 0x10000/3;
#endif

	float uscale = 1.0f;
	float vscale = 1.0f;
	if (throughmode) {
		uscale /= gstate_c.curTextureWidth;
		vscale /= gstate_c.curTextureHeight;
	}

	bool skinningEnabled = vertTypeIsSkinningEnabled(vertType);

	const int w = gstate.getTextureWidth(0);
	const int h = gstate.getTextureHeight(0);
	float widthFactor = (float) w / (float) gstate_c.curTextureWidth;
	float heightFactor = (float) h / (float) gstate_c.curTextureHeight;

	Lighter lighter(vertType);
	float fog_end = getFloat24(gstate.fog1);
	float fog_slope = getFloat24(gstate.fog2);
	// Same fixup as in ShaderManager.cpp
	if (my_isinf(fog_slope)) {
		// not really sure what a sensible value might be.
		fog_slope = fog_slope < 0.0f ? -10000.0f : 10000.0f;
	}
	if (my_isnan(fog_slope)) {
		// Workaround for https://github.com/hrydgard/ppsspp/issues/5384#issuecomment-38365988
		// Just put the fog far away at a large finite distance.
		// Infinities and NaNs are rather unpredictable in shaders on many GPUs
		// so it's best to just make it a sane calculation.
		fog_end = 100000.0f;
		fog_slope = 1.0f;
	}

	VertexReader reader(decoded, decVtxFormat, vertType);
	if (throughmode) {
		for (int index = 0; index < maxIndex; index++) {
			// Do not touch the coordinates or the colors. No lighting.
			reader.Goto(index);
			// TODO: Write to a flexible buffer, we don't always need all four components.
			TransformedVertex &vert = transformed[index];
			reader.ReadPos(vert.pos);

			if (reader.hasColor0()) {
				reader.ReadColor0_8888(vert.color0);
			} else {
				vert.color0_32 = gstate.getMaterialAmbientRGBA();
			}

			if (reader.hasUV()) {
				reader.ReadUV(vert.uv);

				vert.u *= uscale;
				vert.v *= vscale;
			} else {
				vert.u = 0.0f;
				vert.v = 0.0f;
			}

			// Ignore color1 and fog, never used in throughmode anyway.
			// The w of uv is also never used (hardcoded to 1.0.)
		}
	} else {
		// Okay, need to actually perform the full transform.
		for (int index = 0; index < maxIndex; index++) {
			reader.Goto(index);

			float v[3] = {0, 0, 0};
			Vec4f c0 = Vec4f(1, 1, 1, 1);
			Vec4f c1 = Vec4f(0, 0, 0, 0);
			float uv[3] = {0, 0, 1};
			float fogCoef = 1.0f;

			// We do software T&L for now
			float out[3];
			float pos[3];
			Vec3f normal(0, 0, 1);
			Vec3f worldnormal(0, 0, 1);
			reader.ReadPos(pos);

			if (!skinningEnabled) {
				Vec3ByMatrix43(out, pos, gstate.worldMatrix);
				if (reader.hasNormal()) {
					reader.ReadNrm(normal.AsArray());
					if (gstate.areNormalsReversed()) {
						normal = -normal;
					}
					Norm3ByMatrix43(worldnormal.AsArray(), normal.AsArray(), gstate.worldMatrix);
					worldnormal = worldnormal.Normalized();
				}
			} else {
				float weights[8];
				reader.ReadWeights(weights);
				if (reader.hasNormal())
					reader.ReadNrm(normal.AsArray());

				// Skinning
				Vec3f psum(0, 0, 0);
				Vec3f nsum(0, 0, 0);
				for (int i = 0; i < vertTypeGetNumBoneWeights(vertType); i++) {
					if (weights[i] != 0.0f) {
						Vec3ByMatrix43(out, pos, gstate.boneMatrix+i*12);
						Vec3f tpos(out);
						psum += tpos * weights[i];
						if (reader.hasNormal()) {
							Vec3f norm;
							Norm3ByMatrix43(norm.AsArray(), normal.AsArray(), gstate.boneMatrix+i*12);
							nsum += norm * weights[i];
						}
					}
				}

				// Yes, we really must multiply by the world matrix too.
				Vec3ByMatrix43(out, psum.AsArray(), gstate.worldMatrix);
				if (reader.hasNormal()) {
					normal = nsum;
					if (gstate.areNormalsReversed()) {
						normal = -normal;
					}
					Norm3ByMatrix43(worldnormal.AsArray(), normal.AsArray(), gstate.worldMatrix);
					worldnormal = worldnormal.Normalized();
				}
			}

			// Perform lighting here if enabled. don't need to check through, it's checked above.
			Vec4f unlitColor = Vec4f(1, 1, 1, 1);
			if (reader.hasColor0()) {
				reader.ReadColor0(&unlitColor.x);
			} else {
				unlitColor = Vec4f::FromRGBA(gstate.getMaterialAmbientRGBA());
			}

			if (gstate.isLightingEnabled()) {
				float litColor0[4];
				float litColor1[4];
				lighter.Light(litColor0, litColor1, unlitColor.AsArray(), out, worldnormal);

				// Don't ignore gstate.lmode - we should send two colors in that case
				for (int j = 0; j < 4; j++) {
					c0[j] = litColor0[j];
				}
				if (lmode) {
					// Separate colors
					for (int j = 0; j < 4; j++) {
						c1[j] = litColor1[j];
					}
				} else {
					// Summed color into c0 (will clamp in ToRGBA().)
					for (int j = 0; j < 4; j++) {
						c0[j] += litColor1[j];
					}
				}
			} else {
				if (reader.hasColor0()) {
					for (int j = 0; j < 4; j++) {
						c0[j] = unlitColor[j];
					}
				} else {
					c0 = Vec4f::FromRGBA(gstate.getMaterialAmbientRGBA());
				}
				if (lmode) {
					// c1 is already 0.
				}
			}

			float ruv[2] = {0.0f, 0.0f};
			if (reader.hasUV())
				reader.ReadUV(ruv);

			// Perform texture coordinate generation after the transform and lighting - one style of UV depends on lights.
			switch (gstate.getUVGenMode()) {
			case GE_TEXMAP_TEXTURE_COORDS:	// UV mapping
			case GE_TEXMAP_UNKNOWN: // Seen in Riviera.  Unsure of meaning, but this works.
				// We always prescale in the vertex decoder now.
				uv[0] = ruv[0];
				uv[1] = ruv[1];
				uv[2] = 1.0f;
				break;

			case GE_TEXMAP_TEXTURE_MATRIX:
				{
					// Projection mapping
					Vec3f source;
					switch (gstate.getUVProjMode())	{
					case GE_PROJMAP_POSITION: // Use model space XYZ as source
						source = pos;
						break;

					case GE_PROJMAP_UV: // Use unscaled UV as source
						source = Vec3f(ruv[0], ruv[1], 0.0f);
						break;

					case GE_PROJMAP_NORMALIZED_NORMAL: // Use normalized normal as source
						source = normal.Normalized();
						if (!reader.hasNormal()) {
							ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?");
						}
						break;

					case GE_PROJMAP_NORMAL: // Use non-normalized normal as source!
						source = normal;
						if (!reader.hasNormal()) {
							ERROR_LOG_REPORT(G3D, "Normal projection mapping without normal?");
						}
						break;
					}

					float uvw[3];
					Vec3ByMatrix43(uvw, &source.x, gstate.tgenMatrix);
					uv[0] = uvw[0];
					uv[1] = uvw[1];
					uv[2] = uvw[2];
				}
				break;

			case GE_TEXMAP_ENVIRONMENT_MAP:
				// Shade mapping - use two light sources to generate U and V.
				{
					Vec3f lightpos0 = Vec3f(&lighter.lpos[gstate.getUVLS0() * 3]).Normalized();
					Vec3f lightpos1 = Vec3f(&lighter.lpos[gstate.getUVLS1() * 3]).Normalized();

					uv[0] = (1.0f + Dot(lightpos0, worldnormal))/2.0f;
					uv[1] = (1.0f + Dot(lightpos1, worldnormal))/2.0f;
					uv[2] = 1.0f;
				}
				break;

			default:
				// Illegal
				ERROR_LOG_REPORT(G3D, "Impossible UV gen mode? %d", gstate.getUVGenMode());
				break;
			}

			uv[0] = uv[0] * widthFactor;
			uv[1] = uv[1] * heightFactor;

			// Transform the coord by the view matrix.
			Vec3ByMatrix43(v, out, gstate.viewMatrix);
			fogCoef = (v[2] + fog_end) * fog_slope;

			// TODO: Write to a flexible buffer, we don't always need all four components.
			memcpy(&transformed[index].x, v, 3 * sizeof(float));
			transformed[index].fog = fogCoef;
			memcpy(&transformed[index].u, uv, 3 * sizeof(float));
			transformed[index].color0_32 = c0.ToRGBA();
			transformed[index].color1_32 = c1.ToRGBA();

			// The multiplication by the projection matrix is still performed in the vertex shader.
			// So is vertex depth rounding, to simulate the 16-bit depth buffer.
		}
	}

	// Here's the best opportunity to try to detect rectangles used to clear the screen, and
	// replace them with real clears. This can provide a speedup on certain mobile chips.
	//
	// An alternative option is to simply ditch all the verts except the first and last to create a single
	// rectangle out of many. Quite a small optimization though.
	// Experiment: Disable on PowerVR (see issue #6290)
	// TODO: This bleeds outside the play area in non-buffered mode. Big deal? Probably not.
	bool reallyAClear = false;
	if (maxIndex > 1 && prim == GE_PRIM_RECTANGLES && gstate.isModeClear()) {
		int scissorX2 = gstate.getScissorX2() + 1;
		int scissorY2 = gstate.getScissorY2() + 1;
		reallyAClear = IsReallyAClear(transformed, maxIndex, scissorX2, scissorY2);
	}
	if (reallyAClear && gl_extensions.gpuVendor != GPU_VENDOR_POWERVR) {  // && g_Config.iRenderingMode != FB_NON_BUFFERED_MODE) {
		// If alpha is not allowed to be separate, it must match for both depth/stencil and color.  Vulkan requires this.
		bool alphaMatchesColor = gstate.isClearModeColorMask() == gstate.isClearModeAlphaMask();
		bool depthMatchesStencil = gstate.isClearModeAlphaMask() == gstate.isClearModeDepthMask();
		if (params->allowSeparateAlphaClear || (alphaMatchesColor && depthMatchesStencil)) {
			result->color = transformed[1].color0_32;
			// Need to rescale from a [0, 1] float.  This is the final transformed value.
			result->depth = ToScaledDepth((s16)(int)(transformed[1].z * 65535.0f));
			result->action = SW_CLEAR;
			return;
		}
	}

	// This means we're using a framebuffer (and one that isn't big enough.)
	if (gstate_c.curTextureHeight < (u32)h && maxIndex >= 2) {
		// Even if not rectangles, this will detect if either of the first two are outside the framebuffer.
		// HACK: Adding one pixel margin to this detection fixes issues in Assassin's Creed : Bloodlines,
		// while still keeping BOF working (see below).
		const float invTexH = 1.0f / gstate_c.curTextureHeight; // size of one texel.
		bool tlOutside;
		bool tlAlmostOutside;
		bool brOutside;
		// If we're outside heightFactor, then v must be wrapping or clamping.  Avoid this workaround.
		// If we're <= 1.0f, we're inside the framebuffer (workaround not needed.)
		// We buffer that 1.0f a little more with a texel to avoid some false positives.
		tlOutside = transformed[0].v <= heightFactor && transformed[0].v > 1.0f + invTexH;
		brOutside = transformed[1].v <= heightFactor && transformed[1].v > 1.0f + invTexH;
		// Careful: if br is outside, but tl is well inside, this workaround still doesn't make sense.
		// We go with halfway, since we overestimate framebuffer heights sometimes but not by much.
		tlAlmostOutside = transformed[0].v <= heightFactor && transformed[0].v >= 0.5f;
		if (tlOutside || (brOutside && tlAlmostOutside)) {
			// Okay, so we're texturing from outside the framebuffer, but inside the texture height.
			// Breath of Fire 3 does this to access a render surface at an offset.
			const u32 bpp = fbman->GetTargetFormat() == GE_FORMAT_8888 ? 4 : 2;
			const u32 prevH = texCache->AttachedDrawingHeight();
			const u32 fb_size = bpp * fbman->GetTargetStride() * prevH;
			const u32 prevYOffset = gstate_c.curTextureYOffset;
			if (texCache->SetOffsetTexture(fb_size)) {
				const float oldWidthFactor = widthFactor;
				const float oldHeightFactor = heightFactor;
				widthFactor = (float) w / (float) gstate_c.curTextureWidth;
				heightFactor = (float) h / (float) gstate_c.curTextureHeight;

				// We've already baked in the old gstate_c.curTextureYOffset, so correct.
				const float yDiff = (float) (prevH + prevYOffset - gstate_c.curTextureYOffset) / (float) h;
				for (int index = 0; index < maxIndex; ++index) {
					transformed[index].u *= widthFactor / oldWidthFactor;
					// Inverse it back to scale to the new FBO, and add 1.0f to account for old FBO.
					transformed[index].v = (transformed[index].v / oldHeightFactor - yDiff) * heightFactor;
				}
			}
		}
	}

	// Step 2: expand rectangles.
	drawBuffer = transformed;
	numTrans = 0;
	drawIndexed = false;

	if (prim != GE_PRIM_RECTANGLES) {
		// We can simply draw the unexpanded buffer.
		numTrans = vertexCount;
		drawIndexed = true;
	} else {
		bool useBufferedRendering = g_Config.iRenderingMode != FB_NON_BUFFERED_MODE;
		if (useBufferedRendering)
			ySign = -ySign;

		float flippedMatrix[16];
		if (!throughmode) {
			memcpy(&flippedMatrix, gstate.projMatrix, 16 * sizeof(float));

			const bool invertedY = useBufferedRendering ? (gstate_c.vpHeight < 0) : (gstate_c.vpHeight > 0);
			if (invertedY) {
				flippedMatrix[1] = -flippedMatrix[1];
				flippedMatrix[5] = -flippedMatrix[5];
				flippedMatrix[9] = -flippedMatrix[9];
				flippedMatrix[13] = -flippedMatrix[13];
			}
			const bool invertedX = gstate_c.vpWidth < 0;
			if (invertedX) {
				flippedMatrix[0] = -flippedMatrix[0];
				flippedMatrix[4] = -flippedMatrix[4];
				flippedMatrix[8] = -flippedMatrix[8];
				flippedMatrix[12] = -flippedMatrix[12];
			}
		}

		//rectangles always need 2 vertices, disregard the last one if there's an odd number
		vertexCount = vertexCount & ~1;
		numTrans = 0;
		drawBuffer = transformedExpanded;
		TransformedVertex *trans = &transformedExpanded[0];
		const u16 *indsIn = (const u16 *)inds;
		u16 *newInds = inds + vertexCount;
		u16 *indsOut = newInds;
		maxIndex = 4 * vertexCount;
		for (int i = 0; i < vertexCount; i += 2) {
			const TransformedVertex &transVtxTL = transformed[indsIn[i + 0]];
			const TransformedVertex &transVtxBR = transformed[indsIn[i + 1]];

			// We have to turn the rectangle into two triangles, so 6 points.
			// This is 4 verts + 6 indices.

			// bottom right
			trans[0] = transVtxBR;

			// top right
			trans[1] = transVtxBR;
			trans[1].y = transVtxTL.y;
			trans[1].v = transVtxTL.v;

			// top left
			trans[2] = transVtxBR;
			trans[2].x = transVtxTL.x;
			trans[2].y = transVtxTL.y;
			trans[2].u = transVtxTL.u;
			trans[2].v = transVtxTL.v;

			// bottom left
			trans[3] = transVtxBR;
			trans[3].x = transVtxTL.x;
			trans[3].u = transVtxTL.u;

			// That's the four corners. Now process UV rotation.
			if (throughmode)
				RotateUVThrough(trans);
			else
				RotateUV(trans, flippedMatrix, ySign);

			// Triangle: BR-TR-TL
			indsOut[0] = i * 2 + 0;
			indsOut[1] = i * 2 + 1;
			indsOut[2] = i * 2 + 2;
			// Triangle: BL-BR-TL
			indsOut[3] = i * 2 + 3;
			indsOut[4] = i * 2 + 0;
			indsOut[5] = i * 2 + 2;
			trans += 4;
			indsOut += 6;

			numTrans += 6;
		}
		inds = newInds;
		drawIndexed = true;

		// We don't know the color until here, so we have to do it now, instead of in StateMapping.
		// Might want to reconsider the order of things later...
		if (gstate.isModeClear() && gstate.isClearModeAlphaMask()) {
			result->setStencil = true;
			if (vertexCount > 1) {
				// Take the bottom right alpha value of the first rect as the stencil value.
				// Technically, each rect could individually fill its stencil, but most of the
				// time they use the same one.
				result->stencilValue = transformed[indsIn[1]].color0[3];
			} else {
				result->stencilValue = 0;
			}
		}
	}

	result->action = SW_DRAW_PRIMITIVES;
}
Ejemplo n.º 19
0
bool SpriteModel::load_model(string filepath, vector<VertexPos>& vertexbuffer, vector<int>& indexbuffer, vector<SubSet>& sets)
{
	if (!dread.load_all_blocks(filepath))
		return false;

	dread.analysis_blocks();

	int count = dread.get_data_num_by_name("model");

	vector< vector<string> > res0 = dread.get_data_by_name("draw_vertice");
	vector< vector<string> > res1 = dread.get_data_by_name("colli_vertice");
	vector< vector<string> > res2 = dread.get_data_by_name("shapetype");
	vector< vector<string> > res3 = dread.get_data_by_name("bodytype");
	vector< vector<string> > res4 = dread.get_data_by_name("texcoord");
	vector< vector<string> > res5 = dread.get_data_by_name("index");

	//note that every model must define animation(because of my laziness)
	vector< vector<string> > res6 = dread.get_data_by_name("animation");//animation number is size/3; read each of them and pushback
	vector< vector<string> > res7 = dread.get_data_by_name("interval");
	vector< vector<string> > res8 = dread.get_data_by_name("defanime");
	vector< vector<string> > res9 = dread.get_data_by_name("distvh");

	vector< vector<string> > res10 = dread.get_data_by_name("zorder");


	if (!(res0.size() == count && res1.size() == count && res2.size() == count && res3.size() == count && res4.size() == count &&res5.size()
		== count && res6.size() == count && res7.size() == count && res8.size() == count && res9.size() == count && res10.size() == count))
	{
		MessageBox(0, "syntax error: coponents' number doesn't match!", 0, 0);
		return false;
	}



	stringstream ss;

	for (int i = 0; i < count; i++)
	{
		vector<b2Shape::Type>			shapetype;
		b2BodyType						bodytype;
		int								setnum = 0;
		vector< vector<b2Vec2> >		vertdraw;
		vector< vector<b2Vec2> >		vertcolli;

		vector<animation>				animes;
		int								interval = 0;
		int								defanime = 0;
		float							distv, disth;


		//animation 
		int flag = 1;
		animation a;
		a = { 0, 1, 1, -1, 0, 0, 0, 0 };
		bool isanime = true;
		for (int j = 0; j < res6[i].size(); j++)
		{
			ss << res6[i][j];
			switch (flag)
			{
			case 1:
				ss >> a.framecount;
				if (a.framecount == 0)
				{
					isanime = false;
				}
				break;
			case 2:
				ss >> a.column;
				break;
			case 3:
				ss >> a.startcolumn;
				break;
			case 4:
				ss >> a.origin_texco_transX;
				break;
			case 5:
				ss >> a.origin_texco_transY;
				flag = 0;
				animes.push_back(a);
				break;
			}
			ss.clear();
			flag++;
			if (isanime == false)
			{
				break;
			}
		}
		//intervel
		ss << res7[i][0];
		ss >> interval;
		ss.clear();
		//defanime
		ss << res8[i][0];
		ss >> defanime;
		ss.clear();
		//distvh
		ss << res9[i][0];
		ss >> distv;
		ss.clear();
		ss << res9[i][1];
		ss >> disth;
		ss.clear();


		//shapetype
		for (int j = 0; j < res2[i].size(); j++)
		{
			if (res2[i][j] == "edge")
			{
				shapetype.push_back(b2Shape::Type::e_edge);
			}
			else if (res2[i][j] == "closeedge")
			{
				shapetype.push_back(b2Shape::Type::e_closeedge);
			}
			else if (res2[i][j] == "circle")
			{
				shapetype.push_back(b2Shape::Type::e_circle);
			}
			else if (res2[i][j] == "polygon")
			{
				shapetype.push_back(b2Shape::Type::e_polygon);
			}

		}
		//bodytype
		if (res3[i][0] == "static")
			bodytype = b2BodyType::b2_staticBody;
		else if (res3[i][0] == "dynamic")
			bodytype = b2BodyType::b2_dynamicBody;
		else if (res3[i][0] == "kinematic")
			bodytype = b2BodyType::b2_kinematicBody;
		//setnum
		setnum = i;
		//vertdraw and vertcolli
		vector<b2Vec2> vertdrawgroup;
		for (int j = 0; j < res0[i].size(); )
		{
			if (res0[i][j] == "*-*")
			{
				vertdraw.push_back(vertdrawgroup);
				vertdrawgroup.clear();
				j++;
				continue;
			}
			//else
			b2Vec2 tpos(0, 0);
			ss << res0[i][j];
			ss >> tpos.x;
			ss.clear();
			ss << res0[i][j + 1];
			ss >> tpos.y;
			ss.clear();
			vertdrawgroup.push_back(tpos);

			j += 2;
		}
		vector<b2Vec2> vertcolligroup;
		for (int j = 0; j < res1[i].size(); )
		{
			if (res1[i][j] == "*-*")
			{
				vertcolli.push_back(vertcolligroup);
				vertcolligroup.clear();
				j++;
				continue;
			}
			//else
			b2Vec2 tpos(0, 0);
			ss << res1[i][j];
			ss >> tpos.x;
			ss.clear();
			ss << res1[i][j + 1];
			ss >> tpos.y;
			ss.clear();
			vertcolligroup.push_back(tpos);

			j += 2;
		}
		//Model
		SModel sm = { bodytype, shapetype, vertdraw, vertcolli, setnum,
			animes, interval, defanime, distv, disth };
		Models.push_back(sm);


	}


	//create vertex, index buffer and subsets
	//subsets
	for (int i = 0; i < count; i++)
	{
		SubSet s;
		sets.push_back(s);
	}
	//vertex
	ss.clear();
	for (int i = 0; i < count; i++)
	{
		if (res0[i].size() != res4[i].size())
		{
			MessageBox(0, "vertdraw and texcoords doesn't match!", 0, 0);
			return false;
		}
		ss.clear();
		ss << res10[i][0];
		double zorder = 0.0;
		ss >> zorder;
		ss.clear();

		sets[i].basevertexlocation = vertexbuffer.size();
		for (int j = 0; j < res0[i].size(); )
		{
			if (res0[i][j] == "*-*" && res4[i][j] == "*-*")
			{
				j++;
				continue;
			}
			VertexPos tpos = { XMFLOAT3(0, 0, zorder), XMFLOAT2(0, 0) };
			//vertex
			ss << res0[i][j];
			ss >> tpos.pos.x;
			ss.clear();
			ss << res0[i][j + 1];
			ss >> tpos.pos.y;
			ss.clear();
			//texcoords
			ss << res4[i][j];
			ss >> tpos.tex0.x;
			ss.clear();
			ss << res4[i][j + 1];
			ss >> tpos.tex0.y;
			ss.clear();
			vertexbuffer.push_back(tpos);
			j += 2;
		}
	}
	//index
	for (int i = 0; i < count; i++)
	{
		sets[i].startindexlocation = indexbuffer.size();
		for (int j = 0; j < res5[i].size(); j++)
		{
			int t = 0;
			ss << res5[i][j];
			ss >> t;
			indexbuffer.push_back(t);
			sets[i].indexcount++;
			ss.clear();
		}
	}


	return true;
}
Ejemplo n.º 20
0
void RNMarchingCubesBase<T>::RenderT(sInt start,sInt count,sInt thread)
{
  for(sInt i_=start;i_<start+count;i_++)
  {
    HashContainer *hc = ThreadHashConts[i_];
    PartContainer *con = hc->FirstPart;
    const sInt s = 1<<base;
    const sInt m = (s+1);
    const sInt mm = (s+1)*(s+1);
    sF32 S = Para.GridSize/s;
    sVector31 tpos(hc->IX*Para.GridSize,hc->IY*Para.GridSize,hc->IZ*Para.GridSize);

//    sInt size = (s+2)*(s+1)*(s+1);
    typename T::FieldType *pot = PotData[thread];

    funcinfo fi;

    // calculate potential and normal

    sClear(fi);
    fi.tresh = 1/(Para.Influence*Para.Influence);
    fi.treshf = 1.0f/fi.tresh-0.00001f;
    fi.iso = Para.IsoValue;

    // reorganize array for SIMD

    sInt pn4 = 0;
    PartContainer *cp = con;
    while(cp)
    {
      pn4 += (cp->Count+3)/4;
      cp = cp->Next;
    }

    fi.tresh4 = _mm_load_ps1(&fi.tresh);
    fi.treshf4 = _mm_load_ps1(&fi.treshf);
    fi.one = _mm_set_ps1(1.0f);
    fi.epsilon = _mm_set_ps1(0.01f);
    fi.pn4 = pn4;

    fi.parts4 = SimdParts[thread];
    sInt i4 = 0;

    typename T::PartType far;
    far.x = 1024*1024;
    far.y = 0;
    far.z = 0;
    cp = con;
    while(cp)
    {
      sInt pn = cp->Count;
      typename T::PartType *p = cp->Parts;

      switch(pn&3)
      {
        case 1: p[pn+2] = far;
        case 2: p[pn+1] = far;
        case 3: p[pn+0] = far;
        case 0: break;
      }

      for(sInt i=0;i<(pn+3)/4;i++)
      {
        fi.parts4[i4].x.m128_f32[0] = p[0].x;
        fi.parts4[i4].x.m128_f32[1] = p[1].x;
        fi.parts4[i4].x.m128_f32[2] = p[2].x;
        fi.parts4[i4].x.m128_f32[3] = p[3].x;

        fi.parts4[i4].y.m128_f32[0] = p[0].y;
        fi.parts4[i4].y.m128_f32[1] = p[1].y;
        fi.parts4[i4].y.m128_f32[2] = p[2].y;
        fi.parts4[i4].y.m128_f32[3] = p[3].y;

        fi.parts4[i4].z.m128_f32[0] = p[0].z;
        fi.parts4[i4].z.m128_f32[1] = p[1].z;
        fi.parts4[i4].z.m128_f32[2] = p[2].z;
        fi.parts4[i4].z.m128_f32[3] = p[3].z;

        if(T::Color)
        {
          fi.parts4[i4].cr.m128_f32[0] = ((p[0].c>>16)&255)/255.0f;
          fi.parts4[i4].cr.m128_f32[1] = ((p[1].c>>16)&255)/255.0f;
          fi.parts4[i4].cr.m128_f32[2] = ((p[2].c>>16)&255)/255.0f;
          fi.parts4[i4].cr.m128_f32[3] = ((p[3].c>>16)&255)/255.0f;

          fi.parts4[i4].cg.m128_f32[0] = ((p[0].c>> 8)&255)/255.0f;
          fi.parts4[i4].cg.m128_f32[1] = ((p[1].c>> 8)&255)/255.0f;
          fi.parts4[i4].cg.m128_f32[2] = ((p[2].c>> 8)&255)/255.0f;
          fi.parts4[i4].cg.m128_f32[3] = ((p[3].c>> 8)&255)/255.0f;

          fi.parts4[i4].cb.m128_f32[0] = ((p[0].c>> 0)&255)/255.0f;
          fi.parts4[i4].cb.m128_f32[1] = ((p[1].c>> 0)&255)/255.0f;
          fi.parts4[i4].cb.m128_f32[2] = ((p[2].c>> 0)&255)/255.0f;
          fi.parts4[i4].cb.m128_f32[3] = ((p[3].c>> 0)&255)/255.0f;
        }

        p+=4;
        i4++;
      }
      cp = cp->Next;
    }
    sVERIFY(i4==fi.pn4);

    // pass 1: skip every second vertex

    for(sInt z=0;z<s+1;z++)
    {
      for(sInt y=0;y<s+1;y++)
      {
        for(sInt x=0;x<s+1;x++)
        {
          sVector31 v = sVector30(x,y,z) * S + tpos;

          func(v,pot[z*mm+y*m+x],fi);
        }
      }
    }

    // subdivision schemes

    if(subdiv==0)                 // none
    {
      // i don't understand, but manually inlining this makes things a bit faster...
      //  MC.March(Para.BaseGrid,pot,S,tpos);

      switch(base)
      {
        case 0: MC.March_0_1(pot,S,tpos,thread); break;
        case 1: MC.March_1_1(pot,S,tpos,thread); break;
        case 2: MC.March_2_1(pot,S,tpos,thread); break;
        case 3: MC.March_3_1(pot,S,tpos,thread); break;
        case 4: MC.March_4_1(pot,S,tpos,thread); break;
        case 5: MC.March_5_1(pot,S,tpos,thread); break;
        default: sVERIFYFALSE;
      }  
    }
    else                          // subdiv once
    {
      typename T::FieldType pot2[4][3][3];
      sVector31 v;
      typename T::FieldType pot2y[s][4];
      sInt lastyz[s];
      for(sInt i=0;i<s;i++) lastyz[i] = -2;

      for(sInt z=0;z<s;z++)
      {
        sInt LastY = -2;
        for(sInt y=0;y<s;y++)
        {
          sInt LastX = -2;
          for(sInt x=0;x<s;x++)  
          {
            sU32 flo,ma,mo;
            flo = *(sU32 *)&pot[(z+0)*mm+(y+0)*m+(x+0)].w; ma  = flo; mo  = flo;
            flo = *(sU32 *)&pot[(z+0)*mm+(y+0)*m+(x+1)].w; ma &= flo; mo |= flo;
            flo = *(sU32 *)&pot[(z+0)*mm+(y+1)*m+(x+0)].w; ma &= flo; mo |= flo;
            flo = *(sU32 *)&pot[(z+0)*mm+(y+1)*m+(x+1)].w; ma &= flo; mo |= flo;
            flo = *(sU32 *)&pot[(z+1)*mm+(y+0)*m+(x+0)].w; ma &= flo; mo |= flo;
            flo = *(sU32 *)&pot[(z+1)*mm+(y+0)*m+(x+1)].w; ma &= flo; mo |= flo;
            flo = *(sU32 *)&pot[(z+1)*mm+(y+1)*m+(x+0)].w; ma &= flo; mo |= flo;
            flo = *(sU32 *)&pot[(z+1)*mm+(y+1)*m+(x+1)].w; ma &= flo; mo |= flo;
            if((ma&0x80000000)==0 && (mo&0x80000000)!=0)
            {
              
              // get the dots we already have

              pot2[0][0][0] = pot[(z+0)*mm+(y+0)*m+(x+0)];
              pot2[0][0][2] = pot[(z+0)*mm+(y+0)*m+(x+1)];
              pot2[0][2][0] = pot[(z+0)*mm+(y+1)*m+(x+0)];
              pot2[0][2][2] = pot[(z+0)*mm+(y+1)*m+(x+1)];
              pot2[2][0][0] = pot[(z+1)*mm+(y+0)*m+(x+0)];
              pot2[2][0][2] = pot[(z+1)*mm+(y+0)*m+(x+1)];
              pot2[2][2][0] = pot[(z+1)*mm+(y+1)*m+(x+0)];
              pot2[2][2][2] = pot[(z+1)*mm+(y+1)*m+(x+1)];

              // reuse last x2 for current x0

              if(LastX==x-1)
              {
                pot2[1][0][0] = pot2[1][0][2];
                pot2[0][1][0] = pot2[0][1][2];
                pot2[1][1][0] = pot2[1][1][2];
                pot2[2][1][0] = pot2[2][1][2];
                pot2[1][2][0] = pot2[1][2][2];
              }
              else
              {
                v = sVector30(x+0.0f,y+0.0f,z+0.5f) * S + tpos;  func(v,pot2[1][0][0],fi);
                v = sVector30(x+0.0f,y+0.5f,z+0.0f) * S + tpos;  func(v,pot2[0][1][0],fi);
                v = sVector30(x+0.0f,y+0.5f,z+0.5f) * S + tpos;  func(v,pot2[1][1][0],fi);
                v = sVector30(x+0.0f,y+0.5f,z+1.0f) * S + tpos;  func(v,pot2[2][1][0],fi);
                v = sVector30(x+0.0f,y+1.0f,z+0.5f) * S + tpos;  func(v,pot2[1][2][0],fi);
              }
              LastX = x;

              // resuse last y2 for current y0

              if(LastY==y-1 && lastyz[x]==z)
              {
                pot2[0][0][1] = pot2y[x][0];
                pot2[1][0][1] = pot2y[x][1];
                pot2[2][0][1] = pot2y[x][2];
                pot2[1][0][2] = pot2y[x][3];
              }
              else
              {
                v = sVector30(x+0.5f,y+0.0f,z+0.0f) * S + tpos;  func(v,pot2[0][0][1],fi);
                v = sVector30(x+0.5f,y+0.0f,z+0.5f) * S + tpos;  func(v,pot2[1][0][1],fi);
                v = sVector30(x+0.5f,y+0.0f,z+1.0f) * S + tpos;  func(v,pot2[2][0][1],fi);
                v = sVector30(x+1.0f,y+0.0f,z+0.5f) * S + tpos;  func(v,pot2[1][0][2],fi);
              }

              v = sVector30(x+0.5f,y+1.0f,z+0.0f) * S + tpos;  func(v,pot2[0][2][1],fi);  pot2y[x][0] = pot2[0][2][1];
              v = sVector30(x+0.5f,y+1.0f,z+0.5f) * S + tpos;  func(v,pot2[1][2][1],fi);  pot2y[x][1] = pot2[1][2][1];
              v = sVector30(x+0.5f,y+1.0f,z+1.0f) * S + tpos;  func(v,pot2[2][2][1],fi);  pot2y[x][2] = pot2[2][2][1];
              v = sVector30(x+1.0f,y+1.0f,z+0.5f) * S + tpos;  func(v,pot2[1][2][2],fi);  pot2y[x][3] = pot2[1][2][2];
              LastY = y;
              lastyz[x] = z;

              // do the rest, don't bother caching

              v = sVector30(x+0.5f,y+0.5f,z+0.0f) * S + tpos;  func(v,pot2[0][1][1],fi);
              v = sVector30(x+0.5f,y+0.5f,z+0.5f) * S + tpos;  func(v,pot2[1][1][1],fi);
              v = sVector30(x+0.5f,y+0.5f,z+1.0f) * S + tpos;  func(v,pot2[2][1][1],fi);

              v = sVector30(x+1.0f,y+0.5f,z+0.0f) * S + tpos;  func(v,pot2[0][1][2],fi);
              v = sVector30(x+1.0f,y+0.5f,z+0.5f) * S + tpos;  func(v,pot2[1][1][2],fi);
              v = sVector30(x+1.0f,y+0.5f,z+1.0f) * S + tpos;  func(v,pot2[2][1][2],fi);

              // render it

              MC.March_1_1(&pot2[0][0][0],S/2,tpos+sVector30(x*S,y*S,z*S),thread);
            }
          }
        }
      }
    }
  }
}