//--------------------------------------------------------------- void CameraExporter::exportCamera( ExportNode* exportNode ) { if ( !exportNode->getIsInVisualScene() ) return; String cameraId = getCameraId(*exportNode); INode* iNode = exportNode->getINode(); CameraObject* camera = (CameraObject*)iNode->GetObjectRef(); INode* targetNode = ( camera->ClassID().PartA() == LOOKAT_CAM_CLASS_ID) ? iNode->GetTarget() : 0; if ( camera ) { if ( mDocumentExporter->isExportedObject(ObjectIdentifier(camera)) ) return; mDocumentExporter->insertExportedObject(ObjectIdentifier(camera), exportNode); // Retrieve the camera parameters block IParamBlock* parameters = (IParamBlock*) camera->GetReference(MaxCamera::PBLOCK_REF); COLLADASW::BaseOptic * optics = 0; if ( camera->IsOrtho() ) { optics = new COLLADASW::OrthographicOptic(COLLADASW::LibraryCameras::mSW); // Calculate the target distance for FOV calculations float targetDistance; if ( targetNode ) { Point3 targetTrans = targetNode->GetNodeTM(mDocumentExporter->getOptions().getAnimationStart()).GetTrans(); Point3 cameraTrans = iNode->GetNodeTM(mDocumentExporter->getOptions().getAnimationStart()).GetTrans(); targetDistance = (targetTrans - cameraTrans).Length(); } else { targetDistance = camera->GetTDist(mDocumentExporter->getOptions().getAnimationStart()); } ConversionInverseOrthoFOVFunctor conversionInverseOrthoFOVFunctor(targetDistance); if ( AnimationExporter::isAnimated(parameters, MaxCamera::FOV) ) { optics->setXMag(conversionInverseOrthoFOVFunctor(parameters->GetFloat(MaxCamera::FOV)), XMAG_SID); mAnimationExporter->addAnimatedParameter(parameters, MaxCamera::FOV, cameraId, XMAG_SID, 0, true, &conversionInverseOrthoFOVFunctor); } else { optics->setXMag(conversionInverseOrthoFOVFunctor(parameters->GetFloat(MaxCamera::FOV))); } } else { optics = new COLLADASW::PerspectiveOptic(COLLADASW::LibraryCameras::mSW); if ( AnimationExporter::isAnimated(parameters, MaxCamera::FOV) ) { optics->setXFov(COLLADASW::MathUtils::radToDegF(parameters->GetFloat(MaxCamera::FOV)), XFOV_SID); mAnimationExporter->addAnimatedParameter(parameters, MaxCamera::FOV, cameraId, XFOV_SID, 0, true, &ConversionFunctors::radToDeg); } else { optics->setXFov(COLLADASW::MathUtils::radToDegF(parameters->GetFloat(MaxCamera::FOV))); } } bool hasAnimatedZNear = mAnimationExporter->addAnimatedParameter(parameters, MaxCamera::NEAR_CLIP, cameraId, optics->getZNearDefaultSid(), 0); optics->setZNear(parameters->GetFloat(MaxCamera::NEAR_CLIP), hasAnimatedZNear); bool hasAnimatedZFar = mAnimationExporter->addAnimatedParameter(parameters, MaxCamera::FAR_CLIP, cameraId, optics->getZFarDefaultSid(), 0); optics->setZFar(parameters->GetFloat(MaxCamera::FAR_CLIP), hasAnimatedZFar); #ifdef UNICODE String exportNodeName = COLLADABU::StringUtils::wideString2utf8String(exportNode->getINode()->GetName()); COLLADASW::Camera colladaCamera(COLLADASW::LibraryCameras::mSW, optics, cameraId, COLLADASW::Utils::checkNCName(exportNodeName)); #else COLLADASW::Camera colladaCamera(COLLADASW::LibraryCameras::mSW, optics, cameraId, COLLADASW::Utils::checkNCName(exportNode->getINode()->GetName())); #endif setExtraTechnique(&colladaCamera); // Retrieve the camera target if ( targetNode ) { ExportNode* targetExportNode = mExportSceneGraph->getExportNode(targetNode); addExtraParameter(EXTRA_PARAMETER_TARGET, "#" + targetExportNode->getId()); } if (camera->GetMultiPassEffectEnabled(0, FOREVER)) { IMultiPassCameraEffect *multiPassCameraEffect = camera->GetIMultiPassCameraEffect(); if (multiPassCameraEffect) { Class_ID id = multiPassCameraEffect->ClassID(); // the camera could have both effects, but not in Max if (id == FMULTI_PASS_MOTION_BLUR_CLASS_ID) { IParamBlock2 *parameters = multiPassCameraEffect->GetParamBlock(0); if (parameters ) { addParamBlockAnimatedExtraParameters(MOTION_BLUR_ELEMENT, MOTION_BLUR_PARAMETERS, MOTION_BLUR_PARAMETER_COUNT, parameters, cameraId); } } else if (id == FMULTI_PASS_DOF_CLASS_ID) { IParamBlock2 *parameters = multiPassCameraEffect->GetParamBlock(0); if (parameters ) { addParamBlockAnimatedExtraParameters(DEPTH_OF_FIELD_ELEMENT, DEPTH_OF_FIELD_PARAMETERS, DEPTH_OF_FIELD_PARAMETER_COUNT, parameters, cameraId); addExtraParameter(TARGETDISTANCE_PARAMETER, camera->GetTDist(0)); } } } } addCamera(colladaCamera); delete optics; } }
bool AlembicCamera::Save(double time, bool bLastFrame) { TimeValue ticks = GetTimeValueFromFrame(time); Object *obj = mMaxNode->EvalWorldState(ticks).obj; if (mNumSamples == 0) { bForever = CheckIfObjIsValidForever(obj, ticks); } else { bool bNewForever = CheckIfObjIsValidForever(obj, ticks); if (bForever && bNewForever != bForever) { ESS_LOG_INFO("bForever has changed"); } } bForever = false; SaveMetaData(mMaxNode, this); // Set the xform sample Matrix3 wm = mMaxNode->GetObjTMAfterWSM(ticks); if (mJob) { Point3 worldMaxPoint = wm.GetTrans(); Abc::V3f alembicWorldPoint = ConvertMaxPointToAlembicPoint(worldMaxPoint); mJob->GetArchiveBBox().extendBy(alembicWorldPoint); } // check if the camera is animated if (mNumSamples > 0) { if (bForever) { return true; } } // Return a pointer to a Camera given an INode or return false if the node // cannot be converted to a Camera CameraObject *cam = NULL; if (obj->CanConvertToType(Class_ID(SIMPLE_CAM_CLASS_ID, 0))) { cam = reinterpret_cast<CameraObject *>( obj->ConvertToType(ticks, Class_ID(SIMPLE_CAM_CLASS_ID, 0))); } else if (obj->CanConvertToType(Class_ID(LOOKAT_CAM_CLASS_ID, 0))) { cam = reinterpret_cast<CameraObject *>( obj->ConvertToType(ticks, Class_ID(LOOKAT_CAM_CLASS_ID, 0))); } else { return false; } CameraState cs; Interval valid = FOREVER; cam->EvalCameraState(ticks, valid, &cs); float tDist = cam->GetTDist(ticks); float ratio = GetCOREInterface()->GetRendImageAspect(); float aperatureWidth = GetCOREInterface()->GetRendApertureWidth(); // this may differ from the // imported value // unfortunately float focalLength = (float)((aperatureWidth / 2.0) / tan(cs.fov / 2.0)); // alembic wants this one in millimeters aperatureWidth /= 10.0f; // convert to centimeters IMultiPassCameraEffect *pCameraEffect = cam->GetIMultiPassCameraEffect(); Interval interval = FOREVER; BOOL bUseTargetDistance = FALSE; const int TARGET_DISTANCE = 0; pCameraEffect->GetParamBlockByID(0)->GetValue(TARGET_DISTANCE, ticks, bUseTargetDistance, interval); float fFocalDepth = 0.0f; const int FOCAL_DEPTH = 1; pCameraEffect->GetParamBlockByID(0)->GetValue(FOCAL_DEPTH, ticks, fFocalDepth, interval); // store the camera data mCameraSample.setNearClippingPlane(cs.hither); mCameraSample.setFarClippingPlane(cs.yon); // mCameraSample.setLensSqueezeRatio(ratio); // should set to 1.0 according the article "Maya to Softimage: Camera // Interoperability" mCameraSample.setLensSqueezeRatio(1.0); mCameraSample.setFocalLength(focalLength); mCameraSample.setHorizontalAperture(aperatureWidth); mCameraSample.setVerticalAperture(aperatureWidth / ratio); if (bUseTargetDistance) { mCameraSample.setFocusDistance(tDist); } else { mCameraSample.setFocusDistance(fFocalDepth); } // save the samples mCameraSchema.set(mCameraSample); mNumSamples++; // Note that the CamObject should only be deleted if the pointer to it is not // equal to the object pointer that called ConvertToType() if (cam != NULL && obj != cam) { delete cam; cam = NULL; return false; } return true; }