void QmitkNavigationDataPlayerView::OnSetDisplay(){ DestroyPipeline(); if ( (m_Controls->m_ChkDisplay->isChecked()) && ( m_Player.IsNotNull() )) { CreatePipeline(); } }
void VkRenderBuffers::BeginFrame(int width, int height, int sceneWidth, int sceneHeight) { VkSampleCountFlagBits samples = GetBestSampleCount(); if (width != mWidth || height != mHeight || mSamples != samples) { auto fb = GetVulkanFrameBuffer(); fb->GetRenderPassManager()->RenderBuffersReset(); fb->GetPostprocess()->RenderBuffersReset(); } if (width != mWidth || height != mHeight) CreatePipeline(width, height); if (width != mWidth || height != mHeight || mSamples != samples) CreateScene(width, height, samples); CreateShadowmap(); mWidth = width; mHeight = height; mSamples = samples; mSceneWidth = sceneWidth; mSceneHeight = sceneHeight; }
VkPPRenderPassSetup::VkPPRenderPassSetup(const VkPPRenderPassKey &key) { CreateDescriptorLayout(key); CreatePipelineLayout(key); CreateRenderPass(key); CreatePipeline(key); }
void VulkanTexturedQuad::CreatePipelineStateObject() { vertexShader_ = LoadShader(device_, BasicVertexShader, sizeof(BasicVertexShader)); fragmentShader_ = LoadShader(device_, TexturedFragmentShader, sizeof(TexturedFragmentShader)); pipeline_ = CreatePipeline(device_, renderPass_, pipelineLayout_, vertexShader_, fragmentShader_); }
bool CDFKAFU050::start() { CreatePipeline(); return true; }
void VulkanQuad::CreatePipelineStateObject () { vertexShader_ = LoadShader (device_, BasicVertexShader, sizeof (BasicVertexShader)); fragmentShader_ = LoadShader (device_, BasicFragmentShader, sizeof (BasicFragmentShader)); pipelineLayout_ = CreatePipelineLayout (device_); VkExtent2D extent = { static_cast<uint32_t> (window_->GetWidth ()), static_cast<uint32_t> (window_->GetHeight ()) }; pipeline_ = CreatePipeline(device_, renderPass_, pipelineLayout_, vertexShader_, fragmentShader_, extent); }
void FGLRenderBuffers::Setup(int width, int height, int sceneWidth, int sceneHeight) { if (width <= 0 || height <= 0) I_FatalError("Requested invalid render buffer sizes: screen = %dx%d", width, height); int samples = clamp((int)gl_multisample, 0, mMaxSamples); bool needsSceneTextures = (gl_ssao != 0); GLint activeTex; GLint textureBinding; glGetIntegerv(GL_ACTIVE_TEXTURE, &activeTex); glActiveTexture(GL_TEXTURE0); glGetIntegerv(GL_TEXTURE_BINDING_2D, &textureBinding); if (width != mWidth || height != mHeight) CreatePipeline(width, height); if (width != mWidth || height != mHeight || mSamples != samples || mSceneUsesTextures != needsSceneTextures) CreateScene(width, height, samples, needsSceneTextures); mWidth = width; mHeight = height; mSamples = samples; mSceneUsesTextures = needsSceneTextures; mSceneWidth = sceneWidth; mSceneHeight = sceneHeight; glBindTexture(GL_TEXTURE_2D, textureBinding); glActiveTexture(activeTex); glBindRenderbuffer(GL_RENDERBUFFER, 0); glBindFramebuffer(GL_FRAMEBUFFER, 0); if (FailedCreate) { ClearScene(); ClearPipeline(); ClearEyeBuffers(); mWidth = 0; mHeight = 0; mSamples = 0; mSceneWidth = 0; mSceneHeight = 0; I_FatalError("Unable to create render buffers."); } }
bool Tutorial03::ChildOnWindowSizeChanged() { if( !CreateRenderPass() ) { return false; } if( !CreateFramebuffers() ) { return false; } if( !CreatePipeline() ) { return false; } if( !CreateCommandBuffers() ) { return false; } if( !RecordCommandBuffers() ) { return false; } return true; }
bool CreateStream() { if (IsLoaded()) return true; _BuildPipeline(); GError *err = 0; GstElement* p = gst_parse_launch(m_pipeLineString.c_str(), &err); if (err) { printf("GstNetworkAudioPlayer: Pipeline error: %s", err->message); } if (!p) return false; SetPipeline(p); _UpdatePorts(); return CreatePipeline(false,m_ipAddr,m_clockPort); }
bool CGigECamera::start() { CreatePipeline(); g_object_set(G_OBJECT( GetElement("image_sink" )),"signal-handoffs", TRUE, NULL); g_signal_connect(G_OBJECT( GetElement( "image_sink")), "handoff", G_CALLBACK(_cbfunc), _CallbackData); GstStateChangeReturn sret = gst_element_set_state (_Pipeline.pipeline, GST_STATE_PLAYING); if (sret == GST_STATE_CHANGE_FAILURE) { g_printerr ("Playing set_state failed.\n"); return false; } else { g_print ("Playing\n"); } return true; }
bool GltfPbr::OnCreate( ID3D12Device* pDevice, UploadHeapDX12* pUploadHeap, ResourceViewHeapsDX12 *pHeaps, DynamicBufferRingDX12 *pDynamicBufferRing, StaticBufferPoolDX12 *pStaticBufferPool, GLTFCommon *pGLTFData, SkyDome *pSkyDome, #ifdef USE_SHADOWMAPS Texture *pShadowMap, #endif void *pluginManager, void *msghandler) { m_pGLTFData = pGLTFData; m_pDynamicBufferRing = pDynamicBufferRing; m_pResourceViewHeaps = pHeaps; m_pStaticBufferPool = pStaticBufferPool; // Load cubemaps maps for IBL m_pCubeDiffuseTexture = pSkyDome->GetDiffuseCubeMap(); m_pCubeSpecularTexture = pSkyDome->GetSpecularCubeMap(); if (m_BrdfTexture.InitFromFile(pDevice, pUploadHeap, L"./plugins/media/envmap/brdf.dds", pluginManager, msghandler) != 0) { return false; } pUploadHeap->FlushAndFinish(); json &j3 = pGLTFData->j3; // Load Textures for gltf file if (!pGLTFData->isBinFile) { auto images = j3["images"]; m_textures.resize(images.size()); for (unsigned int i = 0; i < images.size(); i++) { std::string filename = images[i]["uri"]; WCHAR wcstrPath[MAX_PATH]; MultiByteToWideChar(CP_UTF8, 0, (pGLTFData->m_path + filename).c_str(), -1, wcstrPath, MAX_PATH); INT32 result = m_textures[i].InitFromFile(pDevice, pUploadHeap, wcstrPath, pluginManager, msghandler); } pUploadHeap->FlushAndFinish(); } // Load PBR 2.0 Materials // if (DX12_CMips) { DX12_CMips->Print("Load PBR 2.0 Materials"); } std::vector<PBRMaterial *> materialsData; auto materials = j3["materials"]; auto textures = j3["textures"]; for (unsigned int i = 0; i < materials.size(); i++) { json::object_t material = materials[i]; PBRMaterial *tfmat = new PBRMaterial(); materialsData.push_back(tfmat); // Load material constants // json::array_t ones = { 1.0, 1.0, 1.0, 1.0 }; json::array_t zeroes = { 0.0, 0.0, 0.0, 0.0 }; tfmat->emissiveFactor = (XMVECTOR) GetVector(GetElementJsonArray(material, "emissiveFactor", zeroes)); tfmat->baseColorFactor = (XMVECTOR) GetVector(GetElementJsonArray(material, "pbrMetallicRoughness/baseColorFactor", ones)); try { tfmat->metallicFactor = GetElementFloat(material, "pbrMetallicRoughness/metallicFactor", 1.0); } catch (json::exception& e) { tfmat->metallicFactor = (GetElementJsonArray(material, "pbrMetallicRoughness/metallicFactor", ones))[0]; } try { tfmat->roughnessFactor = GetElementFloat(material, "pbrMetallicRoughness/roughnessFactor", 1.0); } catch (json::exception& e) { tfmat->roughnessFactor = (GetElementJsonArray(material, "pbrMetallicRoughness/roughnessFactor", ones))[0]; } tfmat->m_defines["DEF_alphaMode_" + GetElementString(material, "alphaMode", "OPAQUE")] = 1; float alphaCutOff = 0.0f; try { alphaCutOff = GetElementFloat(material, "alphaCutoff", 1.0); } catch (json::exception& e) { alphaCutOff = (GetElementJsonArray(material, "alphaCutoff", ones))[0]; } tfmat->m_defines["DEF_alphaCutoff"] = std::to_string(alphaCutOff); // load glTF 2.0 material's textures (if present) and create descriptor set // std::map<std::string, TextureDX12 *> texturesBase; if (textures.size() > 0) { AddTextureIfExists(material, textures, texturesBase, "pbrMetallicRoughness/baseColorTexture/index", "baseColorTexture"); AddTextureIfExists(material, textures, texturesBase, "pbrMetallicRoughness/metallicRoughnessTexture/index", "metallicRoughnessTexture"); AddTextureIfExists(material, textures, texturesBase, "emissiveTexture/index", "emissiveTexture"); AddTextureIfExists(material, textures, texturesBase, "normalTexture/index", "normalTexture"); AddTextureIfExists(material, textures, texturesBase, "occlusionTexture/index", "occlusionTexture"); } tfmat->m_textureCount = (int)texturesBase.size(); if (m_pCubeDiffuseTexture) tfmat->m_textureCount += 1; if (m_pCubeSpecularTexture) tfmat->m_textureCount += 1; //+ 1 brdf lookup texture, add that to the total count of textures used tfmat->m_textureCount += 1; #ifdef USE_SHADOWMAPS // plus shadows if (pShadowMap != NULL) tfmat->m_textureCount += 1; #endif if (tfmat->m_textureCount >= 0) { //allocate descriptor table for the textures tfmat->m_pTexturesTable = new CBV_SRV_UAV[tfmat->m_textureCount]; pHeaps->AllocCBV_SRV_UAVDescriptor(tfmat->m_textureCount, tfmat->m_pTexturesTable); int cnt = 0; //create SRVs and #defines so the shader compiler knows what the index of each texture is for (auto it = texturesBase.begin(); it != texturesBase.end(); it++) { tfmat->m_defines[std::string("ID_") + it->first] = std::to_string(cnt); it->second->CreateSRV(cnt++, tfmat->m_pTexturesTable); } //create SRVs and #defines for the IBL resources if (m_pCubeDiffuseTexture) { tfmat->m_defines["ID_diffuseCube"] = std::to_string(cnt); m_pCubeDiffuseTexture->CreateCubeSRV(cnt++, tfmat->m_pTexturesTable); tfmat->m_defines["USE_IBL"] = "1"; } if (m_pCubeSpecularTexture) { tfmat->m_defines["ID_specularCube"] = std::to_string(cnt); m_pCubeSpecularTexture->CreateCubeSRV(cnt++, tfmat->m_pTexturesTable); tfmat->m_defines["USE_IBL"] = "1"; } tfmat->m_defines["ID_brdfTexture"] = std::to_string(cnt); m_BrdfTexture.CreateSRV(cnt++, tfmat->m_pTexturesTable); #ifdef USE_SHADOWMAPS // add SRV for the shadowmap if (pShadowMap!=NULL) { tfmat->m_defines["ID_shadowMap"] = std::to_string(cnt); pShadowMap->CreateSRV(cnt++, tfmat->m_pTexturesTable); } #endif } } // Load Meshes // if (DX12_CMips) { DX12_CMips->Print("Load Meshes"); } auto accessors = j3["accessors"]; auto bufferViews = j3["bufferViews"]; auto meshes = j3["meshes"]; m_meshes.resize(meshes.size()); for (unsigned int i = 0; i < meshes.size(); i++) { PBRMesh *tfmesh = &m_meshes[i]; auto primitives = meshes[i]["primitives"]; tfmesh->m_pPrimitives.resize(primitives.size()); for (unsigned int p = 0; p < primitives.size(); p++) { PBRPrimitives *pPrimitive = &tfmesh->m_pPrimitives[p]; // Set Material // pPrimitive->m_pMaterial = materialsData[primitives[p]["material"]]; // Set Index buffer // tfAccessor indexBuffer; { int indicesID = primitives[p]["indices"].get<int>(); json::object_t indicesAccessor = accessors[indicesID]; GetBufferDetails(indicesAccessor, bufferViews, pGLTFData->buffersData, &indexBuffer); } // Get input layout // std::vector<tfAccessor> vertexBuffers; std::vector<std::string> semanticNames; std::vector<D3D12_INPUT_ELEMENT_DESC> layout; auto attribute = primitives[p]["attributes"]; layout.reserve(attribute.size()); semanticNames.reserve(attribute.size()); vertexBuffers.resize(attribute.size()); for (auto it = attribute.begin(); it != attribute.end(); it++) { // glTF attributes may end in a number, DX12 doest like this and if this is the case we need to split the attribute name from the number // CMP_DWORD semanticIndex = 0; std::string semanticName; SplitGltfAttribute(it.key(), &semanticName, &semanticIndex); semanticNames.push_back(semanticName); auto accessor = accessors[it.value().get<int>()]; // Get VB accessors // GetBufferDetails(accessor, bufferViews, pGLTFData->buffersData, &vertexBuffers[layout.size()]); // Create Input Layout // D3D12_INPUT_ELEMENT_DESC l; l.SemanticName = NULL; // we need to set it in the pipeline function (because of multithreading) l.SemanticIndex = semanticIndex; l.Format = GetFormatDX12(accessor["type"], accessor["componentType"]); l.InputSlot = (UINT)layout.size(); l.InputSlotClass = D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA; l.InstanceDataStepRate = 0; l.AlignedByteOffset = D3D12_APPEND_ALIGNED_ELEMENT; layout.push_back(l); } if (!CreateGeometry(indexBuffer, vertexBuffers, pPrimitive)) return false; GetThreadPool()->Add_Job([=]() { CreatePipeline(pDevice, pUploadHeap->GetNodeMask(), semanticNames, layout, pPrimitive); }); } } return true; }
void Window::Create(){ if(true){ WNDCLASSEX wcex; /* register window class */ wcex.cbSize = sizeof(WNDCLASSEX); wcex.style = CS_OWNDC; wcex.lpfnWndProc = WindowProc; wcex.cbClsExtra = 0; wcex.cbWndExtra = 0; wcex.hInstance = GlobalInstanceHandle; wcex.hIcon = LoadIcon(NULL, IDI_APPLICATION); wcex.hCursor = LoadCursor(NULL, IDC_ARROW); wcex.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH); wcex.lpszMenuName = NULL; wcex.lpszClassName = "Cell_Engine_Window"; wcex.hIconSm = LoadIcon(NULL, IDI_APPLICATION);; if (!RegisterClassEx(&wcex)){ //exit(0); } } /* create main window */ ConsoleEcho("Window Création"); WindowHandler= CreateWindowEx(0, "Cell_Engine_Window", Title, WS_TILEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, width, height, NULL, NULL, GlobalInstanceHandle, NULL); PIXELFORMATDESCRIPTOR pfd; int iFormat; /* get the device context (DC) */ DeviceContext = GetDC(WindowHandler); /* set the pixel format for the DC */ ZeroMemory(&pfd, sizeof(pfd)); pfd.nSize = sizeof(pfd); pfd.nVersion = 1; pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER; pfd.iPixelType = PFD_TYPE_RGBA; pfd.cColorBits = 24; pfd.cDepthBits = 16; pfd.iLayerType = PFD_MAIN_PLANE; iFormat = ChoosePixelFormat(DeviceContext, &pfd); SetPixelFormat(DeviceContext, iFormat, &pfd); /* create and enable the render context (RC) */ GLRenderingContext= wglCreateContext(DeviceContext); SharedRenderContext=wglCreateContext(DeviceContext); UserSharedRenderContext=wglCreateContext(DeviceContext); if(SharedRenderContext==NULL){ DWORD errorCode=GetLastError(); LPVOID lpMsgBuf; FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorCode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),(LPTSTR) &lpMsgBuf, 0, NULL); MessageBox( NULL, (LPCTSTR)lpMsgBuf, "Error", MB_OK | MB_ICONINFORMATION ); LocalFree(lpMsgBuf);exit(125); } bool Error=true; Error=wglShareLists(GLRenderingContext,SharedRenderContext); Error=wglShareLists(GLRenderingContext,SharedRenderContext); if(Error==false){ConsoleEcho("Error dans Share List");exit(122);} ConsoleEcho("ContextCrétaion"); Error=wglMakeCurrent(DeviceContext,SharedRenderContext); if(Error==false){ NoticeError(); } wglMakeCurrent(DeviceContext,GLRenderingContext); Context_Initied=true; //wglShareLists(GLobalHGLRC,GLRenderingContext); glewInit(); ConsoleEcho("Device getting"); GetDeviceMode(); ConsoleEcho("FrameRate Upd"); if(FrameRate==0){DeviceMode.dmDisplayFrequency=(FrameRateList.back()); FrameRate=(FrameRateList.back()); }else{DeviceMode.dmDisplayFrequency=FrameRate; } ConsoleEcho("device upd"); UpdateDeviceMode(); RenderWidth=width; RenderHeight=height; ShowWindow(WindowHandler, SW_SHOW); Created=true; GetRendererInfo(); ConsoleEcho("Buffer"); #if defined(DEBUG_MODE) DefineGLDebugCallback(); #endif // defined if(MSAA_Sample>0){ PrepareSamplingBuffer(MSAA_Sample); }else{ MSAA_Sample=0; PrepareBuffer(); } glViewport(0,0,RenderWidth,RenderHeight); test=5; //glEnable(GL_DEPTH_TEST); VSyncState(true); CreatePipeline(); SetEvent(ThreadStart); }
bool FGLRenderBuffers::Setup(int width, int height, int sceneWidth, int sceneHeight) { if (gl_renderbuffers != BuffersActive) { if (BuffersActive) glBindFramebuffer(GL_FRAMEBUFFER, mOutputFB); BuffersActive = gl_renderbuffers; GLRenderer->mShaderManager->ResetFixedColormap(); } if (!IsEnabled()) return false; if (width <= 0 || height <= 0) I_FatalError("Requested invalid render buffer sizes: screen = %dx%d", width, height); int samples = clamp((int)gl_multisample, 0, mMaxSamples); bool needsSceneTextures = (gl_ssao != 0); GLint activeTex; GLint textureBinding; glGetIntegerv(GL_ACTIVE_TEXTURE, &activeTex); glActiveTexture(GL_TEXTURE0); glGetIntegerv(GL_TEXTURE_BINDING_2D, &textureBinding); if (width != mWidth || height != mHeight) CreatePipeline(width, height); if (width != mWidth || height != mHeight || mSamples != samples || mSceneUsesTextures != needsSceneTextures) CreateScene(width, height, samples, needsSceneTextures); mWidth = width; mHeight = height; mSamples = samples; mSceneUsesTextures = needsSceneTextures; // Bloom bluring buffers need to match the scene to avoid bloom bleeding artifacts if (mSceneWidth != sceneWidth || mSceneHeight != sceneHeight) { CreateBloom(sceneWidth, sceneHeight); CreateExposureLevels(sceneWidth, sceneHeight); CreateAmbientOcclusion(sceneWidth, sceneHeight); mSceneWidth = sceneWidth; mSceneHeight = sceneHeight; } glBindTexture(GL_TEXTURE_2D, textureBinding); glActiveTexture(activeTex); glBindRenderbuffer(GL_RENDERBUFFER, 0); glBindFramebuffer(GL_FRAMEBUFFER, 0); if (FailedCreate) { ClearScene(); ClearPipeline(); ClearEyeBuffers(); ClearBloom(); ClearExposureLevels(); mWidth = 0; mHeight = 0; mSamples = 0; mSceneWidth = 0; mSceneHeight = 0; } return !FailedCreate; }
int main(int argc, char *argv[]) #endif { sInputParams Params = {}; // input parameters from command line std::auto_ptr<CEncodingPipeline> pPipeline; mfxStatus sts = MFX_ERR_NONE; // return value check sts = ParseInputString(argv, (mfxU8)argc, &Params); MSDK_CHECK_PARSE_RESULT(sts, MFX_ERR_NONE, 1); // Choosing which pipeline to use pPipeline.reset(CreatePipeline(Params)); MSDK_CHECK_POINTER(pPipeline.get(), MFX_ERR_MEMORY_ALLOC); if (MVC_ENABLED & Params.MVC_flags) { pPipeline->SetMultiView(); pPipeline->SetNumView(Params.numViews); } sts = pPipeline->Init(&Params); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); pPipeline->PrintInfo(); msdk_printf(MSDK_STRING("Processing started\n")); if (pPipeline->CaptureStartV4L2Pipeline() != MFX_ERR_NONE) { msdk_printf(MSDK_STRING("V4l2 failure terminating the program\n")); return 0; } for (;;) { sts = pPipeline->Run(); if (MFX_ERR_DEVICE_LOST == sts || MFX_ERR_DEVICE_FAILED == sts) { msdk_printf(MSDK_STRING("\nERROR: Hardware device was lost or returned an unexpected error. Recovering...\n")); sts = pPipeline->ResetDevice(); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, 1); sts = pPipeline->ResetMFXComponents(&Params); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, 1); continue; } else { MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, 1); break; } } pPipeline->CaptureStopV4L2Pipeline(); pPipeline->Close(); msdk_printf(MSDK_STRING("\nProcessing finished\n")); return 0; }
//---------------------------------------------------------------------------- medGizmoCrossTranslatePlane::medGizmoCrossTranslatePlane(mafVME *input, mafObserver *listener) //---------------------------------------------------------------------------- { m_LastColor[0][S0] = -1; m_LastColor[1][S0] = -1; m_LastColor[2][S0] = -1; m_LastColor[0][S1] = -1; m_LastColor[1][S1] = -1; m_LastColor[2][S1] = -1; m_TranslationFeedbackGizmo = NULL; m_FeedbackConeSource = NULL; // feedback cone transform stuff m_LeftFeedbackConeTransform = NULL; m_RightFeedbackConeTransform = NULL; m_UpFeedbackConeTransform = NULL; m_DownFeedbackConeTransform = NULL; // feedback cone transform PDF m_LeftFeedbackConeTransformPDF = NULL; m_RightFeedbackConeTransformPDF = NULL; m_UpFeedbackConeTransformPDF = NULL; m_DownFeedbackConeTransformPDF = NULL; m_FeedbackCylinderSource = NULL; m_HorizontalFeedbackCylinderTransform = NULL; m_VerticalFeedbackCylinderTransform = NULL; m_VerticalFeedbackCylinderTransformPDF = NULL; m_HorizontalFeedbackCylinderTransformPDF = NULL; m_FeedbackStuffAppendPolydata = NULL; this->SetIsActive(false); m_IsaComp[0] = m_IsaComp[1] = NULL; m_Listener = listener; m_InputVme = input; m_Length = 1; // default plane is YZ m_ActivePlane = X_NORMAL; // TODO REFACTOR THIS: Isa Generic API cleanup // pivot transform stuff in isa generic probably could be deleted with a minor refactor // //----------------- // pivot stuff //----------------- // pivotTransform is useless for this operation but required by isa generic m_PivotTransform = vtkTransform::New(); // create pipeline stuff CreatePipeline(); // create isa stuff CreateISA(); //----------------- // create vme gizmos stuff //----------------- mafString vmeName; int i; for (i = 0; i < NUM_GIZMO_PARTS; i++) { // the ith gizmo m_Gizmo[i] = mafVMEGizmo::New(); vmeName = "part"; vmeName << i; m_Gizmo[i]->SetName(vmeName.GetCStr()); m_Gizmo[i]->SetData(m_RotatePDF[i]->GetOutput()); m_Gizmo[i]->SetMediator(m_Listener); } // assign isa to S1 and S2; m_Gizmo[S0]->SetBehavior(m_IsaComp[S0]); m_Gizmo[S1]->SetBehavior(m_IsaComp[S1]); mafMatrix *absInputMatrix = m_InputVme->GetOutput()->GetAbsMatrix(); SetAbsPose(absInputMatrix); SetConstrainRefSys(absInputMatrix); // add the gizmo to the tree, this should increase reference count for (i = 0; i < NUM_GIZMO_PARTS; i++) { m_Gizmo[i]->ReparentTo(mafVME::SafeDownCast(m_InputVme->GetRoot())); } CreateFeedbackGizmoPipeline(); }