void ImposterCapture::capture( const MatrixF &rotMatrix, GBitmap **imposterOut, GBitmap **normalMapOut ) { GFXTransformSaver saver; // this version of the snapshot function renders the shape to a black texture, then to white, then reads bitmaps // back for both renders and combines them, restoring the alpha and color values. this is based on the // TGE implementation. it is not fast due to the copy and software combination operations. the generated bitmaps // are upside-down (which is how TGE generated them...) (*imposterOut) = new GBitmap( mDim, mDim, false, GFXFormatR8G8B8A8 ); (*normalMapOut) = new GBitmap( mDim, mDim, false, GFXFormatR8G8B8A8 ); // The object to world transform. MatrixF centerMat( true ); centerMat.setPosition( -mCenter ); MatrixF objMatrix( rotMatrix ); objMatrix.mul( centerMat ); GFX->setWorldMatrix( objMatrix ); // The view transform. MatrixF view( EulerF( M_PI_F / 2.0f, 0, M_PI_F ), Point3F( 0, 0, -10.0f * mRadius ) ); mRenderPass->assignSharedXform( RenderPassManager::View, view ); mRenderPass->assignSharedXform( RenderPassManager::Projection, GFX->getProjectionMatrix() ); // Render the diffuse pass. mRenderPass->clear(); mMeshRenderBin->getMatOverrideDelegate().bind( ImposterCaptureMaterialHook::getDiffuseInst ); _renderToTexture( mBlackTex, mBlackBmp, ColorI(0, 0, 0, 0) ); _renderToTexture( mWhiteTex, mWhiteBmp, ColorI(255, 255, 255, 255) ); // Now render the normals. mRenderPass->clear(); mMeshRenderBin->getMatOverrideDelegate().bind( ImposterCaptureMaterialHook::getNormalsInst ); _renderToTexture( mNormalTex, *normalMapOut, ColorI(0, 0, 0, 0) ); _separateAlpha( *imposterOut ); _convertDXT5nm( *normalMapOut ); if ( 0 ) { // Render out the bitmaps for debug purposes. FileStream fs; if ( fs.open( "./blackbmp.png", Torque::FS::File::Write ) ) mBlackBmp->writeBitmap( "png", fs ); fs.close(); if ( fs.open( "./whitebmp.png", Torque::FS::File::Write ) ) mWhiteBmp->writeBitmap( "png", fs ); fs.close(); if ( fs.open( "./normalbmp.png", Torque::FS::File::Write ) ) (*normalMapOut)->writeBitmap( "png", fs ); fs.close(); if ( fs.open( "./finalimposter.png", Torque::FS::File::Write ) ) (*imposterOut)->writeBitmap( "png", fs ); fs.close(); } }
void renderer::findBest(Camera &camera,Mesh* mesh, particle camPartikel, Shader* _likelihood, Shader* _meshProgram, glm::mat4 m, glm::mat4 v, glm::mat4 p, GLuint mLoc,GLuint vLoc,GLuint pLoc, GLuint handle, glm::vec3 &newCenter, glm::vec3 &newLookAt){ max = 0.0; cv::Mat_<float> centerMat = camPartikel.getParticleCenterM(); cv::Mat_<float> lookAtMat = camPartikel.getParticleLookAtM(); //querys to count the pixels const int size = 400; GLuint queryArray[size]; glGenQueries(size, queryArray); float meshsRelValue[size] = {0.0}; //bind texture with edgepicture texLoc = glGetUniformLocation(handle,"tex"); glActiveTexture(GL_TEXTURE0); glUniform1i(texLoc,0); glBindTexture(GL_TEXTURE_2D,handle); //bind likelihoodshader _likelihood->bind(); m = mesh->computeModelMatrix(); glUniformMatrix4fv(mLoc,1,GL_FALSE,glm::value_ptr(m)); glUniformMatrix4fv(pLoc,1,GL_FALSE,glm::value_ptr(p)); //funktioniert nicht //glm::mat4 * view = new glm::mat4[size]; //view = camPartikel.getViewArray(); for (int i = 0; i < centerMat.rows; i++) { cameraPos.x = centerMat(i,0); cameraPos.y = centerMat(i,1); cameraPos.z = centerMat(i,2); camera.setCenter(cameraPos); for (int j = 0; j < centerMat.rows; j++) { cameraLookAt.x = lookAtMat(i * centerMat.rows + j, 0); cameraLookAt.y = lookAtMat(i * centerMat.rows + j, 1); cameraLookAt.z = lookAtMat(i * centerMat.rows + j, 2); camera.setLookAt(cameraLookAt); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); v = camera.getViewMatrix(); glUniformMatrix4fv(vLoc,1,GL_FALSE,glm::value_ptr(v)); glBeginQuery(GL_SAMPLES_PASSED, queryArray[i]); if(mesh != nullptr) mesh->draw(&cameraPos); glEndQuery(GL_SAMPLES_PASSED); glGetQueryObjectuiv(queryArray[i], GL_QUERY_RESULT, &PixelCountSet); meshsRelValue[i * centerMat.rows + j] = PixelCountSet; //cout << "PixelCount für " << i * centerMat.rows + j << ": " << PixelCountSet << endl; } } glBindTexture(GL_TEXTURE_2D,0); _likelihood->unbind(); //bind meshshader _meshProgram->bind(); for (int i = 0; i < centerMat.rows; i++) { cameraPos.x = centerMat(i,0); cameraPos.y = centerMat(i,1); cameraPos.z = centerMat(i,2); camera.setCenter(cameraPos); for (int j = 0; j < centerMat.rows; j++) { cameraLookAt.x = lookAtMat(i * centerMat.rows + j, 0); cameraLookAt.y = lookAtMat(i * centerMat.rows + j, 1); cameraLookAt.z = lookAtMat(i * centerMat.rows + j, 2); camera.setLookAt(cameraLookAt); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); v = camera.getViewMatrix(); glUniformMatrix4fv(vLoc,1,GL_FALSE,glm::value_ptr(v)); glBeginQuery(GL_SAMPLES_PASSED, queryArray[i]); if(mesh != nullptr) mesh->draw(&cameraPos); glEndQuery(GL_SAMPLES_PASSED); glGetQueryObjectuiv(queryArray[i], GL_QUERY_RESULT, &PixelCountSet); meshsRelValue[i * centerMat.rows + j] /= PixelCountSet; if(meshsRelValue[i * centerMat.rows + j] > max){ max = meshsRelValue[i * centerMat.rows + j]; newCenter.x = centerMat(i,0); newCenter.y = centerMat(i,1); newCenter.z = centerMat(i,2); newLookAt.x = lookAtMat(i * centerMat.rows + j, 0); newLookAt.y = lookAtMat(i * centerMat.rows + j, 1); newLookAt.z = lookAtMat(i * centerMat.rows + j, 2); } //cout << "PixelCount für " << i * centerMat.rows + j << ": " << meshsRelValue[i * centerMat.rows + j] << endl; } } _meshProgram->unbind(); glDeleteQueries(size, queryArray); //set camerapose of the best new position camera.setCenter(newCenter); camera.setLookAt(newLookAt); }