bool mitk::AffineInteractor::ExecuteAction(Action* action, mitk::StateEvent const* stateEvent) { bool ok = false; TimeSlicedGeometry* inputtimegeometry = GetData()->GetTimeSlicedGeometry(); if (inputtimegeometry == NULL) return false; Geometry3D* geometry = inputtimegeometry->GetGeometry3D(m_TimeStep); mitk::DisplayPositionEvent const *event = dynamic_cast <const mitk::DisplayPositionEvent *> (stateEvent->GetEvent()); switch (action->GetActionId()) { case AcCHECKELEMENT: { mitk::Point3D worldPoint = event->GetWorldPosition(); /* now we have a worldpoint. check if it is inside our object and select/deselect it accordingly */ mitk::BoolProperty::Pointer selected; mitk::ColorProperty::Pointer color; std::auto_ptr<StateEvent> newStateEvent; selected = dynamic_cast<mitk::BoolProperty*>(m_DataNode->GetProperty("selected")); if ( selected.IsNull() ) { selected = mitk::BoolProperty::New(); m_DataNode->GetPropertyList()->SetProperty("selected", selected); } color = dynamic_cast<mitk::ColorProperty*>(m_DataNode->GetProperty("color")); if ( color.IsNull() ) { color = mitk::ColorProperty::New(); m_DataNode->GetPropertyList()->SetProperty("color", color); } if (this->CheckSelected(worldPoint, m_TimeStep)) { newStateEvent.reset(new mitk::StateEvent(EIDYES, stateEvent->GetEvent())); selected->SetValue(true); color->SetColor(1.0, 1.0, 0.0); } else { newStateEvent.reset(new mitk::StateEvent(EIDNO, stateEvent->GetEvent())); selected = mitk::BoolProperty::New(false); color->SetColor(0.0, 0.0, 1.0); /* mitk::BoundingObject* b = dynamic_cast<mitk::BoundingObject*>(m_DataNode->GetData()); if(b != NULL) { color = (b->GetPositive())? mitk::ColorProperty::New(0.0, 0.0, 1.0) : mitk::ColorProperty::New(1.0, 0.0, 0.0); // if deselected, a boundingobject is colored according to its positive/negative state } else color = mitk::ColorProperty::New(1.0, 1.0, 1.0); // if deselcted and no bounding object, color is white */ } /* write new state (selected/not selected) to the property */ this->HandleEvent( newStateEvent.get() ); ok = true; break; } case AcADD: { mitk::Point3D worldPoint = event->GetWorldPosition(); std::auto_ptr<StateEvent> newStateEvent; if (this->CheckSelected(worldPoint, m_TimeStep)) { newStateEvent.reset(new mitk::StateEvent(EIDYES, event)); m_DataNode->GetPropertyList()->SetProperty("selected", mitk::BoolProperty::New(true)); // TODO: Generate an Select Operation and send it to the undo controller ? } else // if not selected, do nothing (don't deselect) { newStateEvent.reset(new mitk::StateEvent(EIDNO, event)); } //call HandleEvent to leave the guard-state this->HandleEvent( newStateEvent.get() ); ok = true; break; } case AcTRANSLATESTART: case AcROTATESTART: case AcSCALESTART: { m_LastMousePosition = event->GetWorldPosition(); ok = true; break; } case AcTRANSLATE: { mitk::Point3D newPosition; newPosition = event->GetWorldPosition(); newPosition -= m_LastMousePosition.GetVectorFromOrigin(); // compute difference between actual and last mouse position m_LastMousePosition = event->GetWorldPosition(); // save current mouse position as last position /* create operation with position difference */ mitk::PointOperation* doOp = new mitk::PointOperation(OpMOVE, newPosition, 0); // Index is not used here if (m_UndoEnabled) //write to UndoMechanism { mitk::Point3D oldPosition=geometry->GetCornerPoint(0); PointOperation* undoOp = new mitk::PointOperation(OpMOVE, oldPosition, 0); OperationEvent *operationEvent = new OperationEvent(geometry, doOp, undoOp); m_UndoController->SetOperationEvent(operationEvent); } /* execute the Operation */ geometry->ExecuteOperation(doOp); if (!m_UndoEnabled) delete doOp; ok = true; break; } case AcTRANSLATEEND: { m_UndoController->SetOperationEvent(new UndoStackItem("Move object")); m_DataNode->InvokeEvent(TranslateEvent()); break; } case AcROTATE: { mitk::Point3D p = event->GetWorldPosition(); mitk::Vector3D newPosition = p.GetVectorFromOrigin(); mitk::Point3D dataPosition = geometry->GetCenter(); newPosition = newPosition - dataPosition.GetVectorFromOrigin(); // calculate vector from center of the data object to the current mouse position mitk::Vector3D startPosition = m_LastMousePosition.GetVectorFromOrigin() - dataPosition.GetVectorFromOrigin(); // calculate vector from center of the data object to the last mouse position /* calculate rotation axis (by calculating the cross produkt of the vectors) */ mitk::Vector3D rotationaxis; rotationaxis[0] = startPosition[1] * newPosition[2] - startPosition[2] * newPosition[1]; rotationaxis[1] = startPosition[2] * newPosition[0] - startPosition[0] * newPosition[2]; rotationaxis[2] = startPosition[0] * newPosition[1] - startPosition[1] * newPosition[0]; /* calculate rotation angle in degrees */ mitk::ScalarType angle = atan2((mitk::ScalarType)rotationaxis.GetNorm(), (mitk::ScalarType) (newPosition * startPosition)) * (180/vnl_math::pi); m_LastMousePosition = p; // save current mouse position as last mouse position /* create operation with center of rotation, angle and axis and send it to the geometry and Undo controller */ mitk::RotationOperation* doOp = new mitk::RotationOperation(OpROTATE, dataPosition, rotationaxis, angle); if (m_UndoEnabled) //write to UndoMechanism { RotationOperation* undoOp = new mitk::RotationOperation(OpROTATE, dataPosition, rotationaxis, -angle); OperationEvent *operationEvent = new OperationEvent(geometry, doOp, undoOp); m_UndoController->SetOperationEvent(operationEvent); } /* execute the Operation */ geometry->ExecuteOperation(doOp); if(!m_UndoEnabled) delete doOp; ok = true; break; } case AcROTATEEND: { m_UndoController->SetOperationEvent(new UndoStackItem("Rotate object")); m_DataNode->InvokeEvent(RotateEvent()); break; } case AcSCALE: { mitk::Point3D p = event->GetWorldPosition(); mitk::Vector3D v = p - m_LastMousePosition; /* calculate scale changes */ mitk::Point3D newScale; newScale[0] = (geometry->GetAxisVector(0) * v) / geometry->GetExtentInMM(0); // Scalarprodukt of normalized Axis newScale[1] = (geometry->GetAxisVector(1) * v) / geometry->GetExtentInMM(1); // and direction vector of mouse movement newScale[2] = (geometry->GetAxisVector(2) * v) / geometry->GetExtentInMM(2); // is the length of the movement vectors // projection onto the axis /* convert movement to local object coordinate system and mirror it to the positive quadrant */ Vector3D start; Vector3D end; mitk::ScalarType convert[3]; itk2vtk(m_LastMousePosition, convert); geometry->GetVtkTransform()->GetInverse()->TransformPoint(convert, convert); // transform start point to local object coordinates start[0] = fabs(convert[0]); start[1] = fabs(convert[1]); start[2] = fabs(convert[2]); // mirror it to the positive quadrant itk2vtk(p, convert); geometry->GetVtkTransform()->GetInverse()->TransformPoint(convert, convert); // transform end point to local object coordinates end[0] = fabs(convert[0]); end[1] = fabs(convert[1]); end[2] = fabs(convert[2]); // mirror it to the positive quadrant /* check if mouse movement is towards or away from the objects axes and adjust scale factors accordingly */ Vector3D vLocal = start - end; newScale[0] = (vLocal[0] > 0.0) ? -fabs(newScale[0]) : +fabs(newScale[0]); newScale[1] = (vLocal[1] > 0.0) ? -fabs(newScale[1]) : +fabs(newScale[1]); newScale[2] = (vLocal[2] > 0.0) ? -fabs(newScale[2]) : +fabs(newScale[2]); m_LastMousePosition = p; // update lastPosition for next mouse move /* generate Operation and send it to the receiving geometry */ PointOperation* doOp = new mitk::PointOperation(OpSCALE, newScale, 0); // Index is not used here if (m_UndoEnabled) //write to UndoMechanism { mitk::Point3D oldScaleData; oldScaleData[0] = -newScale[0]; oldScaleData[1] = -newScale[1]; oldScaleData[2] = -newScale[2]; PointOperation* undoOp = new mitk::PointOperation(OpSCALE, oldScaleData, 0); OperationEvent *operationEvent = new OperationEvent(geometry, doOp, undoOp); m_UndoController->SetOperationEvent(operationEvent); } /* execute the Operation */ geometry->ExecuteOperation(doOp); if(!m_UndoEnabled) delete doOp; /* Update Volume Property with new value */ /* mitk::BoundingObject* b = dynamic_cast<mitk::BoundingObject*>(m_DataNode->GetData()); if (b != NULL) { m_DataNode->GetPropertyList()->SetProperty("volume", FloatProperty::New(b->GetVolume())); //MITK_INFO << "Volume of Boundingobject is " << b->GetVolume()/1000.0 << " ml" << std::endl; } */ ok = true; break; } case AcSCALEEND: { m_UndoController->SetOperationEvent(new UndoStackItem("Scale object")); m_DataNode->InvokeEvent(ScaleEvent()); break; } default: ok = Superclass::ExecuteAction(action, stateEvent);//, objectEventId, groupEventId); } mitk::RenderingManager::GetInstance()->RequestUpdateAll(); return ok; }
bool mitk::WiiMoteInteractor::FixedRotationAndTranslation(const mitk::WiiMoteAllDataEvent* wiiMoteEvent) { Geometry3D* geometry = this->TransformCurrentDataInGeometry3D(); m_OrientationX = wiiMoteEvent->GetOrientationX(); m_OrientationY = wiiMoteEvent->GetOrientationY(); m_OrientationZ = wiiMoteEvent->GetOrientationZ(); ScalarType pitchSpeed = wiiMoteEvent->GetPitchSpeed(); ScalarType rollSpeed = wiiMoteEvent->GetRollSpeed(); ScalarType yawSpeed = wiiMoteEvent->GetYawSpeed(); // angle x if(std::abs(pitchSpeed) < 200) pitchSpeed = 0; m_xAngle += (pitchSpeed / 1500); // angle y if(std::abs(rollSpeed) < 200) rollSpeed = 0; m_yAngle += (rollSpeed / 1500); // angle z if(std::abs(yawSpeed) < 200) yawSpeed = 0; m_zAngle += (yawSpeed / 1500); if( std::abs(pitchSpeed) > 200 || std::abs(rollSpeed) > 200 || std::abs(yawSpeed) > 200) { m_InRotation = true; //// depending on a combination of the //// orientation the angleX wil be altered //// because the range from roll is limited //// range: -90° to 90° by the wiimote //if(wiiMoteEvent->GetOrientationZ() < 0) //{ // // value is positive // if(wiiMoteEvent->GetOrientationX() > 0) // { // // the degree measured decreases after it reaches // // in the "real" world the 90 degree angle // // (rotation to the right side) // // therefore it needs to artificially increased // // measured value drops -> computated angle increases // angleX = 90 - angleX; // // now add the "new" angle to 90 degree threshold // angleX += 90; // } // // value is negative // else if(wiiMoteEvent->GetOrientationX() < 0) // { // // the degree measured increases after it reaches // // in the "real" world -90 degree // // (rotation to the left side) // // therefore it needs to be artificially decreased // // (example -90 -> -70, but -110 is needed) // // measured value increases -> computated angle decreases // angleX = 90 + angleX; // // invert the algebraic sign, because it is the "negative" // // side of the rotation // angleX = -angleX; // // now add the negative value to the -90 degree threshold // // to decrease the value further // angleX -= 90; // } // else if(wiiMoteEvent->GetOrientationX() == 0) // { // // i.e. wiimote is flipped upside down // angleX = 180; // } //} //rotation vtkTransform *vtkTransform = vtkTransform::New(); //copy m_vtkMatrix to m_VtkIndexToWorldTransform geometry->TransferItkToVtkTransform(); //////m_VtkIndexToWorldTransform as vtkLinearTransform* vtkTransform->SetMatrix(geometry->GetVtkTransform()->GetMatrix()); // rotation from center is different // from rotation while translated // hence one needs the center of the object Point3D center = geometry->GetOrigin(); vtkTransform->PostMultiply(); vtkTransform->Translate(-center[0], -center[1], -center[2]); //vtkTransform->RotateWXYZ(angle, rotationVector[0], rotationVector[1], rotationVector[2]); vtkTransform->RotateX(m_xAngle); vtkTransform->RotateY(m_zAngle); vtkTransform->RotateZ(m_yAngle); vtkTransform->Translate(center[0], center[1], center[2]); vtkTransform->PreMultiply(); geometry->SetIndexToWorldTransformByVtkMatrix(vtkTransform->GetMatrix()); geometry->Modified(); // indicate modification of data tree node m_DataNode->Modified(); vtkTransform->Delete(); //update rendering mitk::RenderingManager::GetInstance()->RequestUpdateAll(); return true; } else if(!m_InRotation) { float xValue = wiiMoteEvent->GetXAcceleration(); float yValue = wiiMoteEvent->GetYAcceleration(); float zValue = wiiMoteEvent->GetZAcceleration(); float pitch = wiiMoteEvent->GetPitch(); float roll = wiiMoteEvent->GetRoll(); // substracts the proportionate force // applied by gravity depending on the // orientation float sinP = sin(pitch/180.0 * M_PI); float cosP = cos(pitch/180.0 * M_PI); float sinR = sin(roll/180.0 * M_PI); float cosR = cos(roll/180.0 * M_PI); // x acceleration if(m_OrientationZ >= 0) xValue = xValue - sinR * cosP; else xValue = xValue + sinR * cosP; // against drift if(std::abs(xValue) < 0.2) xValue = 0; // y acceleration yValue = yValue + sinP; // against drift if(std::abs(yValue) < 0.2) yValue = 0; // z acceleration zValue = zValue - cosP * cosR; // against drift if(std::abs(zValue) < 0.3) zValue = 0; // simple integration over time // resulting in velocity switch(m_TranslationMode) { case 1: m_xVelocity -= xValue; m_yVelocity -= yValue; m_zVelocity += zValue; // 1 = movement to the right // initially starts with negative acceleration // 2 = movement to the left // initially starts with positive acceleration if( m_xVelocity > 0 && xValue > 0 // 1 || m_xVelocity < 0 && xValue < 0) // 2 { m_xVelocity += xValue; } else if( m_xVelocity > 0 && xValue < 0 // 1 || m_xVelocity < 0 && xValue > 0) // 2 { m_xVelocity -= xValue; } break; case 3: m_yVelocity -= yValue; break; case 4: // 1 = movement up // initially starts with positive acceleration // 2 = movement down // initially starts with negative acceleration if( m_zVelocity > 0 && zValue < 0 // 1 || m_zVelocity < 0 && zValue > 0) // 2 { m_zVelocity -= zValue; } else if(m_zVelocity > 0 && zValue > 0 // 1 || m_zVelocity < 0 && zValue < 0) // 2 { m_zVelocity += zValue; } break; } // sets the mode of the translation // depending on the initial velocity if( std::abs(m_xVelocity) > std::abs(m_yVelocity) && std::abs(m_xVelocity) > std::abs(m_zVelocity) ) { m_TranslationMode = 2; m_yVelocity = 0; m_zVelocity = 0; } else if( std::abs(m_yVelocity) > std::abs(m_xVelocity) && std::abs(m_yVelocity) > std::abs(m_zVelocity) ) { m_TranslationMode = 3; m_xVelocity = 0; m_zVelocity = 0; } else if(std::abs(m_zVelocity) > std::abs(m_xVelocity) && std::abs(m_zVelocity) > std::abs(m_yVelocity) ) { m_TranslationMode = 4; m_xVelocity = 0; m_yVelocity = 0; } // translation mitk::Vector3D movementVector; movementVector.SetElement(0,m_xVelocity); movementVector.SetElement(1,m_yVelocity); movementVector.SetElement(2,m_zVelocity); geometry->Translate(movementVector); // indicate modification of data tree node m_DataNode->Modified(); // update rendering mitk::RenderingManager::GetInstance()->RequestUpdateAll(); return true; } return false; }
void mitk::Geometry2DDataToSurfaceFilter::GenerateOutputInformation() { mitk::Geometry2DData::ConstPointer input = this->GetInput(); mitk::Surface::Pointer output = this->GetOutput(); if ( input.IsNull() || (input->GetGeometry2D() == NULL) || (input->GetGeometry2D()->IsValid() == false) || (m_UseBoundingBox && (m_BoundingBox.IsNull() || (m_BoundingBox->GetDiagonalLength2() < mitk::eps))) ) { return; } Point3D origin; Point3D right, bottom; vtkPolyData *planeSurface = NULL; // Does the Geometry2DData contain a PlaneGeometry? if ( dynamic_cast< PlaneGeometry * >( input->GetGeometry2D() ) != NULL ) { mitk::PlaneGeometry *planeGeometry = dynamic_cast< PlaneGeometry * >( input->GetGeometry2D() ); if ( m_PlaceByGeometry ) { // Let the output use the input geometry to appropriately transform the // coordinate system. mitk::Geometry3D::TransformType *affineTransform = planeGeometry->GetIndexToWorldTransform(); TimeGeometry *timeGeometry = output->GetTimeGeometry(); Geometry3D *geometrie3d = timeGeometry->GetGeometryForTimeStep( 0 ); geometrie3d->SetIndexToWorldTransform( affineTransform ); } if ( !m_UseBoundingBox) { // We do not have a bounding box, so no clipping is required. if ( m_PlaceByGeometry ) { // Derive coordinate axes and origin from input geometry extent origin.Fill( 0.0 ); FillVector3D( right, planeGeometry->GetExtent(0), 0.0, 0.0 ); FillVector3D( bottom, 0.0, planeGeometry->GetExtent(1), 0.0 ); } else { // Take the coordinate axes and origin directly from the input geometry. origin = planeGeometry->GetOrigin(); right = planeGeometry->GetCornerPoint( false, true ); bottom = planeGeometry->GetCornerPoint( true, false ); } // Since the plane is planar, there is no need to subdivide the grid // (cf. AbstractTransformGeometry case) m_PlaneSource->SetXResolution( 1 ); m_PlaneSource->SetYResolution( 1 ); m_PlaneSource->SetOrigin( origin[0], origin[1], origin[2] ); m_PlaneSource->SetPoint1( right[0], right[1], right[2] ); m_PlaneSource->SetPoint2( bottom[0], bottom[1], bottom[2] ); m_PlaneSource->Update(); planeSurface = m_PlaneSource->GetOutput(); } else { // Set up a cube with the extent and origin of the bounding box. This // cube will be clipped by a plane later on. The intersection of the // cube and the plane will be the surface we are interested in. Note // that the bounding box needs to be explicitly specified by the user // of this class, since it is not necessarily clear from the data // available herein which bounding box to use. In most cases, this // would be the bounding box of the input geometry's reference // geometry, but this is not an inevitable requirement. mitk::BoundingBox::PointType boundingBoxMin = m_BoundingBox->GetMinimum(); mitk::BoundingBox::PointType boundingBoxMax = m_BoundingBox->GetMaximum(); mitk::BoundingBox::PointType boundingBoxCenter = m_BoundingBox->GetCenter(); m_CubeSource->SetXLength( boundingBoxMax[0] - boundingBoxMin[0] ); m_CubeSource->SetYLength( boundingBoxMax[1] - boundingBoxMin[1] ); m_CubeSource->SetZLength( boundingBoxMax[2] - boundingBoxMin[2] ); m_CubeSource->SetCenter( boundingBoxCenter[0], boundingBoxCenter[1], boundingBoxCenter[2] ); // Now we have to transform the cube, so that it will cut our plane // appropriately. (As can be seen below, the plane corresponds to the // z-plane in the coordinate system and is *not* transformed.) Therefore, // we get the inverse of the plane geometry's transform and concatenate // it with the transform of the reference geometry, if available. m_Transform->Identity(); m_Transform->Concatenate( planeGeometry->GetVtkTransform()->GetLinearInverse() ); Geometry3D *referenceGeometry = planeGeometry->GetReferenceGeometry(); if ( referenceGeometry ) { m_Transform->Concatenate( referenceGeometry->GetVtkTransform() ); } // Transform the cube accordingly (s.a.) m_PolyDataTransformer->SetInputConnection( m_CubeSource->GetOutputPort() ); m_PolyDataTransformer->SetTransform( m_Transform ); // Initialize the plane to clip the cube with, as lying on the z-plane m_Plane->SetOrigin( 0.0, 0.0, 0.0 ); m_Plane->SetNormal( 0.0, 0.0, 1.0 ); // Cut the plane with the cube. m_PlaneCutter->SetInputConnection( m_PolyDataTransformer->GetOutputPort() ); m_PlaneCutter->SetCutFunction( m_Plane ); // The output of the cutter must be converted into appropriate poly data. m_PlaneStripper->SetInputConnection( m_PlaneCutter->GetOutputPort() ); m_PlaneStripper->Update(); if ( m_PlaneStripper->GetOutput()->GetNumberOfPoints() < 3 ) { return; } m_PlanePolyData->SetPoints( m_PlaneStripper->GetOutput()->GetPoints() ); m_PlanePolyData->SetPolys( m_PlaneStripper->GetOutput()->GetLines() ); m_PlaneTriangler->SetInputData( m_PlanePolyData ); // Get bounds of the resulting surface and use it to generate the texture // mapping information m_PlaneTriangler->Update(); m_PlaneTriangler->GetOutput()->ComputeBounds(); double *surfaceBounds = m_PlaneTriangler->GetOutput()->GetBounds(); origin[0] = surfaceBounds[0]; origin[1] = surfaceBounds[2]; origin[2] = surfaceBounds[4]; right[0] = surfaceBounds[1]; right[1] = surfaceBounds[2]; right[2] = surfaceBounds[4]; bottom[0] = surfaceBounds[0]; bottom[1] = surfaceBounds[3]; bottom[2] = surfaceBounds[4]; // Now we tell the data how it shall be textured afterwards; // description see above. m_TextureMapToPlane->SetInputConnection( m_PlaneTriangler->GetOutputPort() ); m_TextureMapToPlane->AutomaticPlaneGenerationOn(); m_TextureMapToPlane->SetOrigin( origin[0], origin[1], origin[2] ); m_TextureMapToPlane->SetPoint1( right[0], right[1], right[2] ); m_TextureMapToPlane->SetPoint2( bottom[0], bottom[1], bottom[2] ); // Need to call update so that output data and bounds are immediately // available m_TextureMapToPlane->Update(); // Return the output of this generation process planeSurface = dynamic_cast< vtkPolyData * >( m_TextureMapToPlane->GetOutput() ); } } // Does the Geometry2DData contain an AbstractTransformGeometry? else if ( mitk::AbstractTransformGeometry *abstractGeometry = dynamic_cast< AbstractTransformGeometry * >( input->GetGeometry2D() ) ) { // In the case of an AbstractTransformGeometry (which holds a possibly // non-rigid transform), we proceed slightly differently: since the // plane can be arbitrarily deformed, we need to transform it by the // abstract transform before clipping it. The setup for this is partially // done in the constructor. origin = abstractGeometry->GetPlane()->GetOrigin(); right = origin + abstractGeometry->GetPlane()->GetAxisVector( 0 ); bottom = origin + abstractGeometry->GetPlane()->GetAxisVector( 1 ); // Define the plane m_PlaneSource->SetOrigin( origin[0], origin[1], origin[2] ); m_PlaneSource->SetPoint1( right[0], right[1], right[2] ); m_PlaneSource->SetPoint2( bottom[0], bottom[1], bottom[2] ); // Set the plane's resolution (unlike for non-deformable planes, the plane // grid needs to have a certain resolution so that the deformation has the // desired effect). if ( m_UseGeometryParametricBounds ) { m_PlaneSource->SetXResolution( (int)abstractGeometry->GetParametricExtent(0) ); m_PlaneSource->SetYResolution( (int)abstractGeometry->GetParametricExtent(1) ); } else { m_PlaneSource->SetXResolution( m_XResolution ); m_PlaneSource->SetYResolution( m_YResolution ); } if ( m_PlaceByGeometry ) { // Let the output use the input geometry to appropriately transform the // coordinate system. mitk::Geometry3D::TransformType *affineTransform = abstractGeometry->GetIndexToWorldTransform(); TimeGeometry *timeGeometry = output->GetTimeGeometry(); Geometry3D *g3d = timeGeometry->GetGeometryForTimeStep( 0 ); g3d->SetIndexToWorldTransform( affineTransform ); vtkGeneralTransform *composedResliceTransform = vtkGeneralTransform::New(); composedResliceTransform->Identity(); composedResliceTransform->Concatenate( abstractGeometry->GetVtkTransform()->GetLinearInverse() ); composedResliceTransform->Concatenate( abstractGeometry->GetVtkAbstractTransform() ); // Use the non-rigid transform for transforming the plane. m_VtkTransformPlaneFilter->SetTransform( composedResliceTransform ); } else { // Use the non-rigid transform for transforming the plane. m_VtkTransformPlaneFilter->SetTransform( abstractGeometry->GetVtkAbstractTransform() ); } if ( m_UseBoundingBox ) { mitk::BoundingBox::PointType boundingBoxMin = m_BoundingBox->GetMinimum(); mitk::BoundingBox::PointType boundingBoxMax = m_BoundingBox->GetMaximum(); //mitk::BoundingBox::PointType boundingBoxCenter = m_BoundingBox->GetCenter(); m_Box->SetXMin( boundingBoxMin[0], boundingBoxMin[1], boundingBoxMin[2] ); m_Box->SetXMax( boundingBoxMax[0], boundingBoxMax[1], boundingBoxMax[2] ); } else { // Plane will not be clipped m_Box->SetXMin( -10000.0, -10000.0, -10000.0 ); m_Box->SetXMax( 10000.0, 10000.0, 10000.0 ); } m_Transform->Identity(); m_Transform->Concatenate( input->GetGeometry2D()->GetVtkTransform() ); m_Transform->PreMultiply(); m_Box->SetTransform( m_Transform ); m_PlaneClipper->SetInputConnection(m_VtkTransformPlaneFilter->GetOutputPort() ); m_PlaneClipper->SetClipFunction( m_Box ); m_PlaneClipper->GenerateClippedOutputOff(); // important to NOT generate normals data for clipped part m_PlaneClipper->InsideOutOn(); m_PlaneClipper->SetValue( 0.0 ); m_PlaneClipper->Update(); planeSurface = m_PlaneClipper->GetOutput(); } m_NormalsUpdater->SetInputData( planeSurface ); m_NormalsUpdater->AutoOrientNormalsOn(); // that's the trick! Brings consistency between // normals direction and front/back faces direction (see bug 1440) m_NormalsUpdater->ComputePointNormalsOn(); m_NormalsUpdater->Update(); output->SetVtkPolyData( m_NormalsUpdater->GetOutput() ); output->CalculateBoundingBox(); }
void mitk::SurfaceToImageFilter::Stencil3DImage(int time) { const mitk::TimeGeometry *surfaceTimeGeometry = GetInput()->GetTimeGeometry(); const mitk::TimeGeometry *imageTimeGeometry = GetImage()->GetTimeGeometry(); // Convert time step from image time-frame to surface time-frame mitk::TimePointType matchingTimePoint = imageTimeGeometry->TimeStepToTimePoint(time); mitk::TimeStepType surfaceTimeStep = surfaceTimeGeometry->TimePointToTimeStep(matchingTimePoint); vtkPolyData * polydata = ( (mitk::Surface*)GetInput() )->GetVtkPolyData( surfaceTimeStep ); vtkSmartPointer<vtkTransformPolyDataFilter> move=vtkTransformPolyDataFilter::New(); move->SetInputData(polydata); move->ReleaseDataFlagOn(); vtkSmartPointer<vtkTransform> transform=vtkTransform::New(); Geometry3D* geometry = surfaceTimeGeometry->GetGeometryForTimeStep( surfaceTimeStep ); geometry->TransferItkToVtkTransform(); transform->PostMultiply(); transform->Concatenate(geometry->GetVtkTransform()->GetMatrix()); // take image geometry into account. vtk-Image information will be changed to unit spacing and zero origin below. Geometry3D* imageGeometry = imageTimeGeometry->GetGeometryForTimeStep(time); imageGeometry->TransferItkToVtkTransform(); transform->Concatenate(imageGeometry->GetVtkTransform()->GetLinearInverse()); move->SetTransform(transform); vtkSmartPointer<vtkPolyDataNormals> normalsFilter = vtkPolyDataNormals::New(); normalsFilter->SetFeatureAngle(50); normalsFilter->SetConsistency(1); normalsFilter->SetSplitting(1); normalsFilter->SetFlipNormals(0); normalsFilter->ReleaseDataFlagOn(); normalsFilter->SetInputConnection(move->GetOutputPort() ); vtkSmartPointer<vtkPolyDataToImageStencil> surfaceConverter = vtkPolyDataToImageStencil::New(); surfaceConverter->SetTolerance( 0.0 ); surfaceConverter->ReleaseDataFlagOn(); surfaceConverter->SetInputConnection( normalsFilter->GetOutputPort() ); mitk::Image::Pointer binaryImage = mitk::Image::New(); if (m_MakeOutputBinary) { binaryImage->Initialize(mitk::MakeScalarPixelType<unsigned char>(), *this->GetImage()->GetTimeGeometry()); unsigned int size = sizeof(unsigned char); for (unsigned int i = 0; i < binaryImage->GetDimension(); ++i) size *= binaryImage->GetDimension(i); mitk::ImageWriteAccessor accessor( binaryImage ); memset( accessor.GetData(), 1, size ); } vtkImageData *image = m_MakeOutputBinary ? binaryImage->GetVtkImageData(time) : const_cast<mitk::Image *>(this->GetImage())->GetVtkImageData(time); // Create stencil and use numerical minimum of pixel type as background value vtkSmartPointer<vtkImageStencil> stencil = vtkImageStencil::New(); stencil->SetInputData(image); stencil->ReverseStencilOff(); stencil->ReleaseDataFlagOn(); stencil->SetStencilConnection(surfaceConverter->GetOutputPort()); stencil->SetBackgroundValue(m_MakeOutputBinary ? 0 : m_BackgroundValue); stencil->Update(); mitk::Image::Pointer output = this->GetOutput(); output->SetVolume( stencil->GetOutput()->GetScalarPointer(), time ); MITK_INFO << "stencil ref count: " << stencil->GetReferenceCount() << std::endl; }
void mitk::SurfaceToImageFilter::Stencil3DImage(int time) { const mitk::TimeSlicedGeometry *surfaceTimeGeometry = GetInput()->GetTimeSlicedGeometry(); const mitk::TimeSlicedGeometry *imageTimeGeometry = GetImage()->GetTimeSlicedGeometry(); // Convert time step from image time-frame to surface time-frame int surfaceTimeStep = surfaceTimeGeometry->TimeStepToTimeStep( imageTimeGeometry, time ); vtkPolyData * polydata = ( (mitk::Surface*)GetInput() )->GetVtkPolyData( surfaceTimeStep ); vtkTransformPolyDataFilter * move=vtkTransformPolyDataFilter::New(); move->SetInput(polydata); move->ReleaseDataFlagOn(); vtkTransform *transform=vtkTransform::New(); Geometry3D* geometry = surfaceTimeGeometry->GetGeometry3D( surfaceTimeStep ); geometry->TransferItkToVtkTransform(); transform->PostMultiply(); transform->Concatenate(geometry->GetVtkTransform()->GetMatrix()); // take image geometry into account. vtk-Image information will be changed to unit spacing and zero origin below. Geometry3D* imageGeometry = imageTimeGeometry->GetGeometry3D(time); imageGeometry->TransferItkToVtkTransform(); transform->Concatenate(imageGeometry->GetVtkTransform()->GetLinearInverse()); move->SetTransform(transform); transform->Delete(); vtkPolyDataNormals * normalsFilter = vtkPolyDataNormals::New(); normalsFilter->SetFeatureAngle(50); normalsFilter->SetConsistency(1); normalsFilter->SetSplitting(1); normalsFilter->SetFlipNormals(0); normalsFilter->ReleaseDataFlagOn(); normalsFilter->SetInput( move->GetOutput() ); move->Delete(); vtkPolyDataToImageStencil * surfaceConverter = vtkPolyDataToImageStencil::New(); surfaceConverter->SetTolerance( 0.0 ); surfaceConverter->ReleaseDataFlagOn(); surfaceConverter->SetInput( normalsFilter->GetOutput() ); normalsFilter->Delete(); vtkImageData *image = const_cast< mitk::Image * >(this->GetImage())->GetVtkImageData( time ); // Create stencil and use numerical minimum of pixel type as background value vtkImageStencil * stencil = vtkImageStencil::New(); stencil->SetInput( image ); stencil->ReverseStencilOff(); stencil->ReleaseDataFlagOn(); stencil->SetStencil( surfaceConverter->GetOutput() ); surfaceConverter->Delete(); if (m_MakeOutputBinary) { stencil->SetBackgroundValue( image->GetScalarTypeMin() ); vtkImageThreshold * threshold = vtkImageThreshold::New(); threshold->SetInput( stencil->GetOutput() ); threshold->ThresholdByLower( image->GetScalarTypeMin() ); threshold->ReplaceInOn(); threshold->ReplaceOutOn(); threshold->SetInValue( 0 ); threshold->SetOutValue( 1 ); threshold->SetOutputScalarTypeToUnsignedChar(); threshold->Update(); mitk::Image::Pointer output = this->GetOutput(); output->SetVolume( threshold->GetOutput()->GetScalarPointer(), time ); threshold->Delete(); } else { stencil->SetBackgroundValue( m_BackgroundValue ); stencil->Update(); mitk::Image::Pointer output = this->GetOutput(); output->SetVolume( stencil->GetOutput()->GetScalarPointer(), time ); MITK_INFO << "stencil ref count: " << stencil->GetReferenceCount() << std::endl; } stencil->Delete(); }