Exemple #1
0
  double RadarGroundMap::ComputeXv(SpiceDouble X[3]) {
    // Get the spacecraft position (Xsc) and velocity (Vsc) in body fixed
    // coordinates
    SpiceRotation *bodyFrame = p_camera->BodyRotation();
    SpicePosition *spaceCraft = p_camera->InstrumentPosition();

    std::vector<double> Ssc(6);
    // Load the state into Ssc
    vequ_c ( (SpiceDouble *) &(spaceCraft->Coordinate()[0]), &Ssc[0]);
    vequ_c ( (SpiceDouble *) &(spaceCraft->Velocity()[0]), &Ssc[3]);

    // Rotate the state to body-fixed
    std::vector<double> bfSsc(6);
    bfSsc = bodyFrame->ReferenceVector(Ssc);

    // Extract the body-fixed position and velocity
    std::vector<double> Vsc(3);
    std::vector<double> Xsc(3);
    vequ_c ( &bfSsc[0], &Xsc[0] );
    vequ_c ( &bfSsc[3], &Vsc[0] );

    // Compute the slant range
    SpiceDouble lookB[3];
    vsub_c(&Xsc[0],X,lookB);
    p_slantRange = vnorm_c(lookB);

    // Compute and return xv
    double xv = -2.0 * vdot_c(lookB,&Vsc[0]) / (vnorm_c(lookB) * p_waveLength);
    return xv;
  }
Exemple #2
0
  /** Compute undistorted focal plane coordinate from ground position using current Spice from SetImage call
   * 
   * This method will compute the undistorted focal plane coordinate for 
   * a ground position, using the current Spice settings (time and kernels) 
   * without resetting the current point values for lat/lon/radius/x/y.
   *  
   * @param lat planetocentric latitude in degrees
   * @param lon planetocentric longitude in degrees 
   * @param radius local radius in m 
   * 
   * @return conversion was successful
   */
  bool CameraGroundMap::GetXY(const double lat, const double lon, const double radius,
                              std::vector<double> &lookJ) {

    // Check for Sky images
    if ( p_camera->IsSky() ) {
      return false;
    }

    // Should a check be added to make sure SetImage has been called???
    
    // Compute the look vector in body-fixed coordinates
    double pB[3]; // Point on surface
    latrec_c( radius/1000.0, lon*Isis::PI/180.0, lat*Isis::PI/180.0, pB);

    // Get spacecraft vector in body-fixed coordinates
    SpiceRotation *bodyRot = p_camera->BodyRotation();
    std::vector<double> sB = bodyRot->ReferenceVector(p_camera->InstrumentPosition()->Coordinate());
    std::vector<double> lookB(3);
    for (int ic=0; ic<3; ic++)   lookB[ic] = pB[ic] - sB[ic];

    // Check for point on back of planet by checking to see if surface point is viewable (test emission angle)
    // During iterations, we may not want to do the back of planet test???
    double upsB[3],upB[3],dist;
    vminus_c ( (SpiceDouble *) &lookB[0], upsB);
    unorm_c (upsB, upsB, &dist);
    unorm_c (pB, upB, &dist);
    double angle = vdot_c(upB, upsB);
    double emission;
    if (angle > 1) {
      emission = 0;
    }
    else if (angle < -1) {
      emission = 180.;
    }
    else {
      emission = acos (angle) * 180.0 / Isis::PI;
    }
    if (fabs(emission) > 90.) return false;

    // Get the look vector in the camera frame and the instrument rotation
    lookJ.resize(3);
    lookJ = p_camera->BodyRotation()->J2000Vector( lookB );
    return true;
  }
Exemple #3
0
  /** Compute ground position from slant range
   *
   * @param ux Slant range distance
   * @param uy Doppler shift (always 0.0)
   * @param uz Not used
   *
   * @return conversion was successful
   */
  bool RadarGroundMap::SetFocalPlane(const double ux, const double uy,
                                     double uz) {

    SpiceRotation *bodyFrame = p_camera->BodyRotation();
    SpicePosition *spaceCraft = p_camera->InstrumentPosition();

    // Get spacecraft position and velocity to create a state vector
    std::vector<double> Ssc(6);
    // Load the state into Ssc
    vequ_c ( (SpiceDouble *) &(spaceCraft->Coordinate()[0]), &Ssc[0]);
    vequ_c ( (SpiceDouble *) &(spaceCraft->Velocity()[0]), &Ssc[3]);

    // Rotate state vector to body-fixed
    std::vector<double> bfSsc(6);
    bfSsc = bodyFrame->ReferenceVector(Ssc);

    // Extract body-fixed position and velocity
    std::vector<double> Vsc(3);
    std::vector<double> Xsc(3);
    vequ_c ( &bfSsc[0], (SpiceDouble *) &(Xsc[0]) );
    vequ_c ( &bfSsc[3], (SpiceDouble *) &(Vsc[0]) );

    // Compute intrack, crosstrack, and radial coordinate
    SpiceDouble i[3];
    vhat_c (&Vsc[0],i);

    SpiceDouble c[3];
    SpiceDouble dp;
    dp = vdot_c(&Xsc[0],i);
    SpiceDouble p[3],q[3];
    vscl_c(dp,i,p);
    vsub_c(&Xsc[0],p,q);
    vhat_c(q,c);

    SpiceDouble r[3];
    vcrss_c(i,c,r);

    // What is the initial guess for R
    double radii[3];
    p_camera->Radii(radii);
    SpiceDouble R = radii[0];
    SpiceDouble lastR = DBL_MAX;
    SpiceDouble rlat;
    SpiceDouble rlon;

    SpiceDouble lat = DBL_MAX;
    SpiceDouble lon = DBL_MAX;

    double slantRangeSqr = (ux * p_rangeSigma) / 1000.;
    slantRangeSqr = slantRangeSqr*slantRangeSqr;
    SpiceDouble X[3];

    int iter = 0;
    do {
      double normXsc = vnorm_c(&Xsc[0]);
      double alpha = (R*R - slantRangeSqr - normXsc*normXsc) /
                     (2.0 * vdot_c(&Xsc[0],c));

      double arg = slantRangeSqr - alpha*alpha;
      if (arg < 0.0) return false;

      double beta = sqrt(arg);
      if (p_lookDirection == Radar::Left) beta *= -1.0;

      SpiceDouble alphac[3],betar[3];
      vscl_c(alpha,c,alphac);
      vscl_c(beta,r,betar);

      vadd_c(alphac,betar,alphac);
      vadd_c(&Xsc[0],alphac,X);

      // Convert X to lat,lon
      lastR = R;
      reclat_c(X,&R,&lon,&lat);

      rlat = lat*180.0/Isis::PI;
      rlon = lon*180.0/Isis::PI;
      R = GetRadius(rlat,rlon);
      iter++;
    }
    while (fabs(R-lastR) > p_tolerance && iter < 30);

    if (fabs(R-lastR) > p_tolerance) return false;

    lat = lat*180.0/Isis::PI;
    lon = lon*180.0/Isis::PI;
    while (lon < 0.0) lon += 360.0;

    // Compute body fixed look direction
    std::vector<double> lookB;
    lookB.resize(3);
    lookB[0] = X[0] - Xsc[0];
    lookB[1] = X[1] - Xsc[1];
    lookB[2] = X[2] - Xsc[2];

    std::vector<double> lookJ = bodyFrame->J2000Vector(lookB);
    SpiceRotation *cameraFrame = p_camera->InstrumentRotation();
    std::vector<double> lookC = cameraFrame->ReferenceVector(lookJ);

    SpiceDouble unitLookC[3];
    vhat_c(&lookC[0],unitLookC);
    p_camera->SetLookDirection(unitLookC);

    return p_camera->Sensor::SetUniversalGround(lat,lon);
  }
Exemple #4
0
  /** Compute undistorted focal plane coordinate from ground position that includes a local radius
   *
   * @param lat planetocentric latitude in degrees
   * @param lon planetocentric longitude in degrees
   * @param radius local radius in meters
   *
   * @return conversion was successful
   */
  bool RadarGroundMap::SetGround(const double lat, const double lon, const double radius) {
    // Get the ground point in rectangular coordinates (X)
    SpiceDouble X[3];
    SpiceDouble rlat = lat*Isis::PI/180.0;
    SpiceDouble rlon = lon*Isis::PI/180.0;
    latrec_c(radius,rlon,rlat,X);

    // Compute lower bound for Doppler shift 
    double et1 = p_camera->Spice::CacheStartTime();
    p_camera->Sensor::SetEphemerisTime(et1);
    double xv1 = ComputeXv(X);

    // Compute upper bound for Doppler shift
    double et2 = p_camera->Spice::CacheEndTime();
    p_camera->Sensor::SetEphemerisTime(et2);
    double xv2 = ComputeXv(X);

    // Make sure we bound root (xv = 0.0)
    if ((xv1 < 0.0) && (xv2 < 0.0)) return false;
    if ((xv1 > 0.0) && (xv2 > 0.0)) return false;

    // Order the bounds
    double fl,fh,xl,xh;
    if (xv1 < xv2) {
      fl = xv1;
      fh = xv2;
      xl = et1;
      xh = et2;
    }
    else {
      fl = xv2;
      fh = xv1;
      xl = et2;
      xh = et1;
    }

    // Iterate a max of 30 times
    for (int j=0; j<30; j++) {
      // Use the secant method to guess the next et
      double etGuess = xl + (xh - xl) * fl / (fl - fh);

      // Compute the guessed Doppler shift.  Hopefully
      // this guess converges to zero at some point
      p_camera->Sensor::SetEphemerisTime(etGuess);
      double fGuess = ComputeXv(X);

      // Update the bounds
      double delTime;
      if (fGuess < 0.0) {
        delTime = xl - etGuess;
        xl = etGuess;
        fl = fGuess;
      }
      else {
        delTime = xh - etGuess;
        xh = etGuess;
        fh = fGuess;
      }

      // See if we are done
      if ((fabs(delTime) <= p_timeTolerance) || (fGuess == 0.0)) {
        SpiceRotation *bodyFrame = p_camera->BodyRotation();
        SpicePosition *spaceCraft = p_camera->InstrumentPosition();

        // Get body fixed spacecraft velocity and position
        std::vector<double> Ssc(6);

        // Load the state into Ssc and rotate to body-fixed
        vequ_c ( (SpiceDouble *) &(spaceCraft->Coordinate()[0]), &Ssc[0]);
        vequ_c ( (SpiceDouble *) &(spaceCraft->Velocity()[0]), &Ssc[3]);
        std::vector<double> bfSsc(6);
        bfSsc = bodyFrame->ReferenceVector(Ssc);

        // Extract the body-fixed position and velocity from the state
        std::vector<double> Vsc(3);
        std::vector<double> Xsc(3);
        vequ_c ( &bfSsc[0], (SpiceDouble *) &(Xsc[0]) );
        vequ_c ( &bfSsc[3], (SpiceDouble *) &(Vsc[0]) );

        // Determine if focal plane coordinate falls on the correct side of the
        // spacecraft. Radar has both left and right look directions. Make sure
        // the coordinate is on the same side as the look direction. This is done
        // by (X - S) . (V x S) where X=ground point vector, S=spacecraft position
        // vector, and V=velocity vector. If the dot product is greater than 0, then
        // the point is on the right side. If the dot product is less than 0, then
        // the point is on the left side. If the dot product is 0, then the point is
        // directly under the spacecraft (neither left or right) and is invalid.
        SpiceDouble vout1[3];
        SpiceDouble vout2[3];
        SpiceDouble dp;
        vsub_c(X,&Xsc[0],vout1);
        vcrss_c(&Vsc[0],&Xsc[0],vout2);
        dp = vdot_c(vout1,vout2);
        if (dp > 0.0 && p_lookDirection == Radar::Left) return false;
        if (dp < 0.0 && p_lookDirection == Radar::Right) return false;
        if (dp == 0.0) return false;

        // Compute body fixed look direction
        std::vector<double> lookB;
        lookB.resize(3);
        lookB[0] = X[0] - Xsc[0];
        lookB[1] = X[1] - Xsc[1];
        lookB[2] = X[2] - Xsc[2];

        std::vector<double> lookJ = bodyFrame->J2000Vector(lookB);
        SpiceRotation *cameraFrame = p_camera->InstrumentRotation();
        std::vector<double> lookC = cameraFrame->ReferenceVector(lookJ);

        SpiceDouble unitLookC[3];
        vhat_c(&lookC[0],unitLookC);
        p_camera->SetLookDirection(unitLookC);

        p_camera->SetFocalLength(p_slantRange*1000.0);
        p_focalPlaneX = p_slantRange / p_rangeSigma;
        p_focalPlaneY = 0.0;
        return true;
      }
    }

    return false;
  }
  /** Cache J2000 rotation quaternion over a time range.
   *
   * This method will load an internal cache with frames over a time
   * range.  This prevents the NAIF kernels from being read over-and-over
   * again and slowing an application down due to I/O performance.  Once the
   * cache has been loaded then the kernels can be unloaded from the NAIF
   * system.
   *
   * @internal
   * @history 2010-12-23  Debbie A. Cook Added set of full cache time
   *                       parameters
   */
  void LineScanCameraRotation::LoadCache() {
    NaifStatus::CheckErrors();

    double startTime = p_cacheTime[0];
    int size = p_cacheTime.size();
    double endTime = p_cacheTime[size-1];
    SetFullCacheParameters(startTime, endTime, size);

    // TODO  Add a label value to indicate pointing is already decomposed to line scan angles
    // and set p_pointingDecomposition=none,framing angles, or line scan angles.
    // Also add a label value to indicate jitterOffsets=jitterFileName
    // Then we can decide whether to simply grab the crot angles or do new decomposition and whether
    // to apply jitter or throw an error because jitter has already been applied.

    // *** May need to do a frame trace and load the frames (at least the constant ones) ***

    // Loop and load the cache
    double state[6];
    double lt;
    NaifStatus::CheckErrors();

    double R[3];  // Direction of radial axis of line scan camera
    double C[3];  // Direction of cross-track axis
    double I[3];  // Direction of in-track axis
    double *velocity;
    std::vector<double> IB(9);
    std::vector<double> CI(9);
    SpiceRotation *prot = p_spi->bodyRotation();
    SpiceRotation *crot = p_spi->instrumentRotation();

    for(std::vector<double>::iterator i = p_cacheTime.begin(); i < p_cacheTime.end(); i++) {
      double et = *i;

      prot->SetEphemerisTime(et);
      crot->SetEphemerisTime(et);

      // The following code will be put into method LoadIBcache()
      spkezr_c("MRO", et, "IAU_MARS", "NONE", "MARS", state, &lt);
      NaifStatus::CheckErrors();

      // Compute the direction of the radial axis (3) of the line scan camera
      vscl_c(1. / vnorm_c(state), state, R); // vscl and vnorm only operate on first 3 members of state

      // Compute the direction of the cross-track axis (2) of the line scan camera
      velocity  =  state + 3;
      vscl_c(1. / vnorm_c(velocity), velocity, C);
      vcrss_c(R, C, C);

      // Compute the direction of the in-track axis (1) of the line scan camera
      vcrss_c(C, R, I);

      // Load the matrix IB and enter it into the cache
      vequ_c(I, (SpiceDouble( *)) &IB[0]);
      vequ_c(C, (SpiceDouble( *)) &IB[3]);
      vequ_c(R, (SpiceDouble( *)) &IB[6]);
      p_cacheIB.push_back(IB);
      // end IB code

      // Compute the CIcr matrix - in-track, cross-track, radial frame to constant frame
      mxmt_c((SpiceDouble( *)[3]) & (crot->TimeBasedMatrix())[0], (SpiceDouble( *)[3]) & (prot->Matrix())[0],
             (SpiceDouble( *)[3]) &CI[0]);

      // Put CI into parent cache to use the parent class methods on it
      mxmt_c((SpiceDouble( *)[3]) &CI[0], (SpiceDouble( *)[3]) &IB[0], (SpiceDouble( *)[3]) &CI[0]);
      p_cache.push_back(CI);
    }
    p_cachesLoaded = true;
    SetSource(Memcache);

    NaifStatus::CheckErrors();
  }
  /** Cache J2000 rotation over existing cached time range using polynomials
   *
   * This method will reload an internal cache with matrices
   * formed from rotation angles fit to polynomials over a time
   * range.
   *
   * @param function1   The first polynomial function used to
   *                    find the rotation angles
   * @param function2   The second polynomial function used to
   *                    find the rotation angles
   * @param function3   The third polynomial function used to
   *                    find the rotation angles
   */
  void LineScanCameraRotation::ReloadCache() {
    NaifStatus::CheckErrors();

    // Make sure caches are already loaded
    if(!p_cachesLoaded) {
      QString msg = "A LineScanCameraRotation cache has not been loaded yet";
      throw IException(IException::Programmer, msg, _FILEINFO_);
    }

    // Clear existing matrices from cache
    p_cache.clear();

    // Create polynomials fit to angles & use to reload cache
    Isis::PolynomialUnivariate function1(p_degree);
    Isis::PolynomialUnivariate function2(p_degree);
    Isis::PolynomialUnivariate function3(p_degree);

    // Get the coefficients of the polynomials already fit to the angles of rotation defining [CI]
    std::vector<double> coeffAng1;
    std::vector<double> coeffAng2;
    std::vector<double> coeffAng3;
    GetPolynomial(coeffAng1, coeffAng2, coeffAng3);

    // Reset linear term to center around zero -- what works best is either roll-avg & pitchavg+ or pitchavg+ & yawavg-
//    coeffAng1[1] -= 0.0000158661225;
//    coeffAng2[1] = 0.0000308433;
//    coeffAng3[0] = -0.001517547;
    if(p_pitchRate)  coeffAng2[1] = p_pitchRate;
    if(p_yaw)  coeffAng3[0] = p_yaw;

    // Load the functions with the coefficients
    function1.SetCoefficients(coeffAng1);
    function2.SetCoefficients(coeffAng2);
    function3.SetCoefficients(coeffAng3);

    double CI[3][3];
    double IJ[3][3];
    std::vector<double> rtime;
    SpiceRotation *prot = p_spi->bodyRotation();
    std::vector<double> CJ;
    CJ.resize(9);

    for(std::vector<double>::size_type pos = 0; pos < p_cacheTime.size(); pos++) {
      double et = p_cacheTime.at(pos);
      rtime.push_back((et - GetBaseTime()) / GetTimeScale());
      double angle1 = function1.Evaluate(rtime);
      double angle2 = function2.Evaluate(rtime);
      double angle3 = function3.Evaluate(rtime);
      rtime.clear();

// Get the first angle back into the range Naif expects [180.,180.]
      if(angle1 < -1 * pi_c()) {
        angle1 += twopi_c();
      }
      else if(angle1 > pi_c()) {
        angle1 -= twopi_c();
      }

      eul2m_c((SpiceDouble) angle3, (SpiceDouble) angle2, (SpiceDouble) angle1,
              p_axis3,                    p_axis2,                    p_axis1,
              CI);
      mxm_c((SpiceDouble( *)[3]) & (p_jitter->SetEphemerisTimeHPF(et))[0], CI, CI);

      prot->SetEphemerisTime(et);
      mxm_c((SpiceDouble( *)[3]) & (p_cacheIB.at(pos))[0], (SpiceDouble( *)[3]) & (prot->Matrix())[0], IJ);
      mxm_c(CI, IJ, (SpiceDouble( *)[3]) &CJ[0]);

      p_cache.push_back(CJ);   // J2000 to constant frame
    }

    // Set source to cache to get updated values
    SetSource(SpiceRotation::Memcache);

    // Make sure SetEphemerisTime updates the matrix by resetting it twice (in case the first one
    // matches the current et.  p_et is private and not available from the child class
    NaifStatus::CheckErrors();
    SetEphemerisTime(p_cacheTime[0]);
    SetEphemerisTime(p_cacheTime[1]);

    NaifStatus::CheckErrors();
  }
void IsisMain() {

    // Use a regular Process
    Process p;

    // Get user parameters and error check
    UserInterface &ui = Application::GetUserInterface();
    QString from = ui.GetFileName("FROM");
    QString to = FileName(ui.GetFileName("TO")).expanded();
//TO DO: UNCOMMENT THIS LINE ONCE HRSC IS WORKING IN SS
//  double HRSCNadirCenterTime = ui.GetDouble("HRSC_NADIRCENTERTIME");

    // Open input cube and Make sure this is a lev1 image (ie, not map projected)
    Cube cube;
    cube.open(from);

    if (cube.isProjected()) {
        QString msg = "Input images is a map projected cube ... not a level 1 image";
        throw IException(IException::User, msg, _FILEINFO_);
    }

    // Initialize the camera
    Cube *input = p.SetInputCube("FROM");
    Pvl *cubeHeader = input->label();
    Camera *cam = input->camera();
    CameraDetectorMap *detectorMap = cam->DetectorMap();
    CameraFocalPlaneMap *focalMap = cam->FocalPlaneMap();
    CameraDistortionMap *distortionMap = cam->DistortionMap();
    CameraGroundMap *groundMap = cam->GroundMap();

    // Make sure the image contains the InstrumentPointing (aka CK) blob/table
    PvlGroup test = cube.label()->findGroup("Kernels", Pvl::Traverse);
    QString InstrumentPointing = (QString) test["InstrumentPointing"];
    if (InstrumentPointing != "Table") {
        QString msg = "Input image does not contain needed SPICE blobs...run spiceinit with attach=yes.";
        throw IException(IException::User, msg, _FILEINFO_);
    }

    // Open output line scanner keyword file
    ofstream toStrm;
    toStrm.open(to.toAscii().data(), ios::trunc);
    if (toStrm.bad()) {
        QString msg = "Unable to open output TO file";
        throw IException(IException::User, msg, _FILEINFO_);
    }

    // Get required keywords from instrument and band groups
    PvlGroup inst = cube.label()->findGroup("Instrument", Pvl::Traverse);
    QString instrumentId = (QString) inst["InstrumentId"];

    bool     isMocNA = false;
//TO DO: UNCOMMENT THIS LINES ONCE MOC IS WORKING IN SS
//  bool     isMocWARed = false;
    bool     isHiRise = false;
    bool     isCTX = false;
    bool     isLroNACL = false;
    bool     isLroNACR = false;
    bool     isHRSC = false;
//TO DO: UNCOMMENT THESE LINE ONCE MOC IS WORKING IN SS
//  if (instrumentId == "MOC") {
//    PvlGroup band = cube.label()->findGroup("BandBin", Pvl::Traverse);
//    QString filter = (QString) band["FilterName"];
//
//    if (strcmp(filter.toAscii().data(), "BROAD_BAND") == 0)
//      isMocNA = true;
//    else if (strcmp(filter.toAscii().data(), "RED") == 0)
//      isMocWARed = true;
//    else if (strcmp(filter.toAscii().data(), "BLUE") == 0) {
//      QString msg = "MOC WA Blue filter images not supported for Socet Set mapping";
//      throw IException(IException::User, msg, _FILEINFO_);
//    }
//  }
//  else if (instrumentId == "IdealCamera") {
//TO DO: DELETE THIS LINE ONCE MOC IS WORKING IN SS
    if (instrumentId == "IdealCamera") {
        PvlGroup orig = cube.label()->findGroup("OriginalInstrument",  Pvl::Traverse);
        QString origInstrumentId = (QString) orig["InstrumentId"];
        if (origInstrumentId == "HIRISE") {
            isHiRise = true;
        }
        else {
            QString msg = "Unsupported instrument: " + origInstrumentId;
            throw IException(IException::User, msg, _FILEINFO_);
        }
    }
    else if (instrumentId == "HIRISE") {
        isHiRise = true;
    }
    else if (instrumentId == "CTX") {
        isCTX = true;
    }
    else if (instrumentId == "NACL") {
        isLroNACL = true;
    }
    else if (instrumentId == "NACR") {
        isLroNACR = true;
    }
//TO DO: UNCOMMENT THIS LINE ONCE HRSC IS WORKING IN SS
//  else if (instrumentId == "HRSC") isHRSC = true;
    else {
        QString msg = "Unsupported instrument: " + instrumentId;
        throw IException(IException::User, msg, _FILEINFO_);
    }

    int ikCode = cam->naifIkCode();

    // Get Focal Length.
    // NOTE:
    //   For MOC Wide Angle, cam->focal_length returns the focal length
    //      in pixels, so we must convert from pixels to mm using the PIXEL_SIZE
    //      of 0.007 mm gotten from $ISIS3DATA/mgs/kernels/ik/moc20.ti.  (The
    //      PIXEL_PITCH value gotten from cam->PixelPitch is 1.0 since the
    //      focal length used by ISIS in this case is in pixels)
    //      For reference: the MOC WA blue filter pixel size needs an adjustment
    //      of 1.000452 (see p_scale in MocWideAngleDistortionMap.cpp), so that
    //      the final blue filter pixel size = (0.007 / 1.000452)
    //
    //   For all other cameras, cam->focal_length returns the focal
    //      length in mm, as needed by Socet Set

    double focal = cam->FocalLength();  // focal length returned in mm

//TO DO: UNCOMMENT THESE LINES ONCE HRSC and MOC IS WORKING IN SS
//  if (isMocWARed)
//    focal = focal * 0.007;  // pixel to mm conversion
//  else if (isHRSC)
//  {
//    switch (ikCode) {
//      case -41219:                   //S1: fwd stereo
//        focal = 184.88;
//        break;
//      case -41218:                   //IR: infra-red
//        focal = 181.57;
//        break;
//      case -41217:                   //P1: fwd photo
//        focal = 179.16;
//        break;
//      case -41216:                   // GREEN
//        focal = 175.31;
//        break;
//      case -41215:                   // NADIR
//        focal = 175.01;
//        break;
//      case -41214:                   // BLUE
//        focal = 175.53;
//        break;
//      case -41213:                   // P2: aft photo
//        focal = 179.19;
//        break;
//      case -41212:                   // RED
//        focal = 181.77;
//        break;
//      case -41211:                   // S2: aft stereo
//        focal = 184.88;
//        break;
//      default:
//        break;
//    }
//  }

    // Get instrument summing modes
    int csum = (int) detectorMap->SampleScaleFactor();
    int dsum = (int) detectorMap->LineScaleFactor();

    if (isLroNACL || isLroNACR || isHRSC)
        dsum = csum;

    // Calculate location of boresight in image space, these are zero-based values
    //
    // Note: For MOC NA, the boresight is at the image center
    //       For MOC WA, MRO HiRISE, MRO CTX, LRO_NACL, LRO_NACR and HRSC the
    //       boresight is not at the detector center, but the boresight is at the
    //       center of a NOPROJ'ED MRO HIRISE image

    // Get line/samp of boresight pixel in detector space (summing == 1)
    focalMap->SetFocalPlane(0.0, 0.0);
    double detectorBoresightSample = focalMap->DetectorSample();
    double detectorBoresightLine = focalMap->DetectorLine();

    // Convert sample of boresight pixel in detector into image space
    // (summing, etc., is accounted for.)
    detectorMap->SetDetector(detectorBoresightSample, detectorBoresightLine);
    double boresightSample = detectorMap->ParentSample();

    // Set Atmospheric correction coefficients to 0
    double atmco[4] = {0.0, 0.0, 0.0, 0.0};

    // Get totalLines, totalSamples and account for summed images
    int totalLines = cube.lineCount();
    int totalSamples = cube.sampleCount();

    // Get the Interval Time in seconds and calculate
    // scan duration in seconds
    double scanDuration = 0.0;
    double intTime = 0.0;

//TO DO: UNCOMMENT THESE LINES ONCE HRSC IS WORKING IN SS
//  int numIntTimes = 0.0;
//  vector<LineRateChange> lineRates;
//  if (isHRSC) {
//    numIntTimes = GetHRSCLineRates(&cube, lineRates, totalLines, HRSCNadirCenterTime);
//    if (numIntTimes == 1) {
//      LineRateChange lrc = lineRates.at(0);
//      intTime = lrc.GetLineScanRate();
//    }
//    if (numIntTimes <= 0) {
//      QString msg = "HRSC: Invalid number of scan times";
//      throw IException(IException::Programmer, msg, _FILEINFO_);
//    }
//    else
//      scanDuration = GetHRSCScanDuration(lineRates, totalLines);
//  }
//  else {
//
//  TO DO: indent the following two lines when HRSC is working in SS
    intTime = detectorMap->LineRate();  //LineRate is in seconds
    scanDuration = intTime * totalLines;
//TO DO: UNCOMMENT THIS LINE ONCE HRSC IS WORKING IN SS
//  }

    // For reference, this is the code if calculating interval time
    // via LineExposureDuration keyword off image labels:
    //
    // if (isMocNA || isMocWARed)
    //   intTime = exposureDuration * (double) dsum / 1000.0;
    // else if (isHiRise)
    //   intTime = exposureDuration * (double) dsum / 1000000.0;

    // Get along and cross scan pixel size for NA and WA sensors.
    // NOTE:
    //     1) The MOC WA pixel size is gotten from moc20.ti and is 7 microns
    //         HRSC pixel size is from the Instrument Addendum file
    //     2) For others, cam->PixelPitch() returns the pixel pitch (size) in mm.
    double alongScanPxSize = 0.0;
    double crossScanPxSize = 0.0;
//TO DO: UNCOMMENT THESE LINES ONCE MOC IS WORKING IN SS
//  if (isMocWARed || isHRSC) {
//    alongScanPxSize = csum * 0.007;
//    crossScanPxSize = dsum * 0.007;
//  }
//  else {
//
//  TO DO: indent the following 24 lines when HRSC is working in SS
    crossScanPxSize = dsum * cam->PixelPitch();

    // Get the ephemeris time, ground position and undistorted focal plane X
    // coordinate at the center line/samp of image
    cam->SetImage(cube.sampleCount() / 2.0, cube.lineCount() / 2.0);

    double tMid = cam->time().Et();

    const double latCenter = cam->UniversalLatitude();
    const double lonCenter = cam->UniversalLongitude();
    const double radiusCenter = cam->LocalRadius().meters();

    double uXCenter = distortionMap->UndistortedFocalPlaneX();

    // from the ground position at the image center, increment the ephemeris
    // time by the line rate and map the ground position into the sensor in
    // undistorted focal plane coordinates

    cam->setTime(iTime(tMid + intTime));
    double uX, uY;
    groundMap->GetXY(latCenter, lonCenter, radiusCenter, &uX, &uY);

    // the along scan pixel size is the difference in focal plane X coordinates
    alongScanPxSize = abs(uXCenter - uX);

//TO DO: UNCOMMENT THIS LINE ONCE MOC and HRSC IS WORKING IN SS
//  }

    // Now that we have totalLines, totalSamples, alongScanPxSize and
    // crossScanPxSize, fill the Interior Orientation Coefficient arrays
    double ioCoefLine[10];
    double ioCoefSample[10];
    for (int i = 0; i <= 9; i++) {
        ioCoefLine[i] = 0.0;
        ioCoefSample[i] = 0.0;
    }

    ioCoefLine[0] = totalLines / 2.0;
    ioCoefLine[1] = 1.0 / alongScanPxSize;

    ioCoefSample[0] = totalSamples / 2.0;
    ioCoefSample[2] = 1.0 / crossScanPxSize;

    // Update the Rectification Terms found in the base sensor class
    double rectificationTerms[6];
    rectificationTerms[0] = totalLines / 2.0;
    rectificationTerms[1] = 0.0;
    rectificationTerms[2] = 1.0;
    rectificationTerms[3] = totalSamples / 2.0;
    rectificationTerms[4] = 1.0;
    rectificationTerms[5] = 0.0;

    // Fill the triangulation parameters array
    double triParams[18];
    for (int i = 0; i <= 17; i++)
        triParams[i] = 0.0;

    triParams[15] = focal;

    // Set the Center Ground Point at the SOCET Set image, in radians
    double centerGp[3];
    double radii[3] = {0.0, 0.0, 0.0};
    Distance Dradii[3];

    cam->radii(Dradii);
    radii[0] = Dradii[0].kilometers();
    radii[1] = Dradii[1].kilometers();
    radii[2] = Dradii[2].kilometers();

    cam->SetImage(boresightSample, totalLines / 2.0);

    centerGp[0] = DEG2RAD *
                  TProjection::ToPlanetographic(cam->UniversalLatitude(), radii[0], radii[2]);
    centerGp[1] = DEG2RAD * TProjection::To180Domain(cam->UniversalLongitude());
    centerGp[2] = 0.0;
    //**** NOTE: in the import_pushbroom SOCET SET program, centerGp[2] will be set to the SS
    //**** project's gp_origin_z

    // Now get keyword values that depend on ephemeris data.

    // First get the ephemeris time and camera Lat Lon at image center line, boresight sample.
    double centerLine = double(totalLines) / 2.0;

    cam->SetImage(boresightSample, centerLine); //set to boresight of image
    double etCenter = cam->time().Et();

    // Get the sensor position at the image center in ographic lat,
    // +E lon domain 180 coordinates, radians, height in meters
    double sensorPosition[3] = {0.0, 0.0, 0.0};
    double ocentricLat, e360Lon;
    cam->subSpacecraftPoint(ocentricLat, e360Lon);
    sensorPosition[0] = DEG2RAD * TProjection::ToPlanetographic(ocentricLat, radii[0], radii[2]);
    sensorPosition[1] = DEG2RAD * TProjection::To180Domain(e360Lon);
    sensorPosition[2] = cam->SpacecraftAltitude() * 1000.0;

    // Build the ephem data.  If the image label contains the InstrumentPosition
    // table, use it as a guide for number and spacing of Ephem points.
    // Otherwise (i.e, for dejittered HiRISE images), the number and spacing of
    // ephem points based on hardcoded dtEphem value

    // Using the InstrumentPosition table as a guide build the ephem data
    QList< QList<double> > ephemPts;
    QList< QList<double> > ephemRates;

    PvlGroup kernels = cube.label()->findGroup("Kernels", Pvl::Traverse);
    QString InstrumentPosition = (QString) kernels["InstrumentPosition"];

    int numEphem = 0;      // number of ephemeris points
    double dtEphem = 0.0;  // delta time of ephemeris points, seconds
    if (InstrumentPosition == "Table") {
        // Labels contain SPK blob
        // set up Ephem pts/rates number and spacing
        Table tablePosition("InstrumentPosition", cubeHeader->fileName());
        numEphem = tablePosition.Records();

        // increase the number of ephem nodes by 20%.  This is somewhat random but
        // generally intended to compensate for having equally time spaced nodes
        // instead of of the potentially more efficient placement used by spiceinit
        numEphem = int(double(numEphem) * 1.2);

        // if numEphem calcutated from SPICE blobs is too sparse for SOCET Set,
        // mulitiply it by a factor of 30
        // (30X was settled upon emperically.  In the future, make this an input parameter)
        if (numEphem <= 10) numEphem = tablePosition.Records() * 30;

        // make the number of nodes odd
        numEphem  = (numEphem % 2) == 1 ? numEphem : numEphem + 1;

        // SOCET has a max number of ephem pts of 10000, and we're going to add twenty...
        if (numEphem > 10000 - 20) numEphem = 9979;

        dtEphem = scanDuration / double(numEphem);

        //build the tables of values
        double et = etCenter - (((numEphem - 1) / 2) * dtEphem);
        for (int i = 0; i < numEphem; i++) {
            cam->setTime(iTime(et));
            SpiceRotation *bodyRot = cam->bodyRotation();
            vector<double> pos = bodyRot->ReferenceVector(cam->instrumentPosition()->Coordinate());
//TO DO: UNCOMMENT THE FOLLOWING LINE WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
            //vector<double> vel = bodyRot->ReferenceVector(cam->instrumentPosition()->Velocity());

            //Add the ephemeris position and velocity to their respective lists, in meters and meters/sec
            QList<double> ephemPt;
            QList<double> ephemRate;
            ephemPts.append(ephemPt << pos[0] * 1000 << pos[1] * 1000 << pos[2] * 1000);
//TO DO: UNCOMMENT THE FOLLOWING LINE WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
            //ephemRates.append(ephemRate << vel[0] * 1000 << vel[1] * 1000 << vel[2] * 1000);

            et += dtEphem;
        }

//TO DO: WHEN VELOCITY BLOBS ARE CORRECT IN ISIS, linearlly interpolate 10 nodes rather than 11
//       (need 11 now for computation of velocity at first and last ephemeris point)
        // linearlly interpolate 11 additional nodes before line 1 (SOCET requires this)
        for (int i = 0; i < 11; i++) {
            double vec[3] = {0.0, 0.0, 0.0};
            vec[0] = ephemPts[0][0] + (ephemPts[0][0] - ephemPts[1][0]);
            vec[1] = ephemPts[0][1] + (ephemPts[0][1] - ephemPts[1][1]);
            vec[2] = ephemPts[0][2] + (ephemPts[0][2] - ephemPts[1][2]);
            QList<double> ephemPt;
            ephemPts.prepend (ephemPt << vec[0] << vec[1] << vec[2]);

//TO DO: UNCOMMENT THE FOLLOWING LINES WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
            //vec[0] = ephemRates[0][0] + (ephemRates[0][0] - ephemRates[1][0]);
            //vec[1] = ephemRates[0][1] + (ephemRates[0][1] - ephemRates[1][1]);
            //vec[2] = ephemRates[0][2] + (ephemRates[0][2] - ephemRates[1][2]);
            //QList<double> ephemRate;
            //ephemRates.prepend (ephemRate << vec[0] << vec[1] << vec[2]);
        }

//TO DO: WHEN VELOCITY BLOBS ARE CORRECT IN ISIS, linearlly interpolate 10 nodes rather than 11
//       (need 11 now for computation of velocity at first and last ephemeris point)
        // linearlly interpolate 11 additional nodes after the last line (SOCET requires this)
        for (int i = 0; i < 11; i++) {
            double vec[3] = {0.0, 0.0, 0.0};
            int index = ephemPts.size() - 1;
            vec[0] = ephemPts[index][0] + (ephemPts[index][0] - ephemPts[index - 1][0]);
            vec[1] = ephemPts[index][1] + (ephemPts[index][1] - ephemPts[index - 1][1]);
            vec[2] = ephemPts[index][2] + (ephemPts[index][2] - ephemPts[index - 1][2]);
            QList<double> ephemPt;
            ephemPts.append(ephemPt << vec[0] << vec[1] << vec[2]);

//TO DO: UNCOMMENT THE FOLLOWING LINES WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
            //vec[0] = ephemRates[index][0] + (ephemRates[index][0] - ephemRates[index - 1][0]);
            //vec[1] = ephemRates[index][1] + (ephemRates[index][1] - ephemRates[index - 1][1]);
            //vec[2] = ephemRates[index][2] + (ephemRates[index][2] - ephemRates[index - 1][2]);
            //QList<double> ephemRate;
            //ephemRates.append(ephemRate << vec[0] << vec[1] << vec[2]);
        }

        numEphem += 20;

//TO DO: DELETE THE FOLLOWING LINES WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
        // Compute the spacecraft velocity at each ephemeris point
        double deltaTime = 2.0 * dtEphem;
        for (int i = 0; i < numEphem; i++) {
            double vec[3] = {0.0, 0.0, 0.0};
            vec[0] = (ephemPts[i+2][0] - ephemPts[i][0]) / deltaTime;
            vec[1] = (ephemPts[i+2][1] - ephemPts[i][1]) / deltaTime;
            vec[2] = (ephemPts[i+2][2] - ephemPts[i][2]) / deltaTime;
            QList<double> ephemRate;
            ephemRates.append(ephemRate << vec[0] << vec[1] << vec[2]);
        }

    }
    else {
        // Calculate the number of ephemeris points that are needed, based on the
        // value of dtEphem (Delta-Time-Ephemeris).  SOCET SET needs the ephemeris
        // points to exceed the image range for interpolation.  For now, attempt a
        // padding of 10 ephemeris points on either side of the image.

        if (isMocNA || isHiRise || isCTX || isLroNACL || isLroNACR || isHRSC)
            // Try increment of every 300 image lines
            dtEphem = 300 * intTime;  // Make this a user definable increment?
        else // Set increment for WA images to one second
            dtEphem = 1.0;

        // Pad by 10 ephem pts on each side of the image
        numEphem = (int)(scanDuration / dtEphem) + 20;

        // if numEphem is even, make it odd so that the number of ephemeris points
        // is equal on either side of T_CENTER
        if ((numEphem % 2) == 0)
            numEphem++;

//TO DO: DELETE THE FOLLOWING LINE WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
        numEphem = numEphem + 2; // Add two for calcuation of velocity vectors...

        // Find the ephemeris time for the first ephemeris point, and from that, get
        // to_ephem needed by SOCET (to_ephem is relative to etCenter)
        double et = etCenter - (((numEphem - 1) / 2) * dtEphem);
        for (int i = 0; i < numEphem; i++) {
            cam->setTime(iTime(et));
            SpiceRotation *bodyRot = cam->bodyRotation();
            vector<double> pos = bodyRot->ReferenceVector(cam->instrumentPosition()->Coordinate());
//TO DO: UNCOMMENT THE FOLLOWING LINE WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
            //vector<double> vel = bodyRot->ReferenceVector(cam->instrumentPosition()->Velocity());

            //Add the ephemeris position and velocity to their respective lists, in meters and meters/sec
            QList<double> ephemPt;
            QList<double> ephemRate;
            ephemPts.append(ephemPt << pos[0] * 1000 << pos[1] * 1000 << pos[2] * 1000);
//TO DO: UNCOMMENT THE FOLLOWING LINE WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
            //ephemRates.append(ephemRate << vel[0] * 1000 << vel[1] * 1000 << vel[2] * 1000);

            et += dtEphem;
        }
//TO DO: DELETE THE FOLLOWING LINES WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
        // Compute the spacecraft velocity at each ephemeris point
        // (We must do this when blobs are not attached because the Spice Class
        // stores in memory the same data that would be in a blob...even when reading NAIF kernels)
        double deltaTime = 2.0 * dtEphem;
        numEphem = numEphem - 2; // set numEphem back to the number we need output
        for (int i = 0; i < numEphem; i++) {
            double vec[3] = {0.0, 0.0, 0.0};
            vec[0] = (ephemPts[i+2][0] - ephemPts[i][0]) / deltaTime;
            vec[1] = (ephemPts[i+2][1] - ephemPts[i][1]) / deltaTime;
            vec[2] = (ephemPts[i+2][2] - ephemPts[i][2]) / deltaTime;
            QList<double> ephemRate;
            ephemRates.append(ephemRate << vec[0] << vec[1] << vec[2]);
        }
    }

    //update ephem stats
    double etFirstEphem = etCenter - (((numEphem - 1) / 2) * dtEphem);
    double t0Ephem = etFirstEphem - etCenter;

    // Using the intrumentPointing table as a guide build the quarternions
    // for simplicity sake we'll leave the mountingAngles as identity
    // and store the complete rotation from body fixed to camera in the
    // quarternions

    //set up quaternions number and spacing
    Table tablePointing("InstrumentPointing", cubeHeader->fileName());

    //number of quaternions
    int numQuaternions = tablePointing.Records();

    // increase the number of quaternions nodes by 20%. This is somewhat random but
    // generally intended to compensate for having equally time spaced nodes
    // instead of of the potentially more efficient placement used by spiceinit
    numQuaternions = (int)(numQuaternions * 1.2);

    // if numQuaternions calcutated from SPICE blobs is too sparse for SOCET Set,
    // mulitiply it by a factor of 30
    // (30X was settled upon emperically.  In the future, make this an input parameter)
    if (numQuaternions <= 10) numQuaternions = tablePointing.Records() * 30;

    //make the number of nodes odd
    numQuaternions = (numQuaternions % 2) == 1 ? numQuaternions : numQuaternions + 1;

    // SOCET has a max number of quaternions of 20000, and we're going to add twenty...
    if (numQuaternions > 20000 - 20) numQuaternions = 19179;

    double dtQuat = scanDuration / double(numQuaternions);

    // build the tables of values
    QList< QList<double> > quaternions;
    double et = etCenter - (((numQuaternions - 1) / 2) * dtQuat);

    for (int i = 0; i < numQuaternions; i++) {
        cam->setTime(iTime(et));
        vector<double> j2000ToBodyFixedMatrixVector = cam->bodyRotation()->Matrix();
        vector<double> j2000ToCameraMatrixVector = cam->instrumentRotation()->Matrix();
        double quaternion[4] = {0.0, 0.0, 0.0, 0.0};

        double j2000ToBodyFixedRotationMatrix[3][3], //rotation from J2000 to target (aka body, planet)
               j2000ToCameraRotationMatrix[3][3], //rotation from J2000 to spacecraft
               cameraToBodyFixedRotationMatrix[3][3]; //rotation from camera to target

        // reformat vectors to 3x3 rotation matricies
        for (int j = 0; j < 3; j++) {
            for (int k = 0; k < 3; k++) {
                j2000ToBodyFixedRotationMatrix[j][k] = j2000ToBodyFixedMatrixVector[3 * j + k];
                j2000ToCameraRotationMatrix[j][k] = j2000ToCameraMatrixVector[3 * j + k];
            }
        }

        // get the quaternion
        mxmt_c(j2000ToBodyFixedRotationMatrix, j2000ToCameraRotationMatrix,
               cameraToBodyFixedRotationMatrix);
        m2q_c(cameraToBodyFixedRotationMatrix, quaternion);

        // add the quaternion to the list of quaternions
        QList<double> quat;
        quaternions.append(quat << quaternion[1] << quaternion[2] << quaternion[3] <<
                           quaternion[0]);
        //note also that the order is changed to match socet

        et += dtQuat;
    }

    // linearlly interpolate 10 additional nodes before the first quaternion (SOCET requires this)
    for (int i = 0; i < 10; i++) {
        double vec[4] = {0.0, 0.0, 0.0, 0.0};
        vec[0] = quaternions[0][0] + (quaternions[0][0] - quaternions[1][0]);
        vec[1] = quaternions[0][1] + (quaternions[0][1] - quaternions[1][1]);
        vec[2] = quaternions[0][2] + (quaternions[0][2] - quaternions[1][2]);
        vec[3] = quaternions[0][3] + (quaternions[0][3] - quaternions[1][3]);
        QList<double> quat;
        quaternions.prepend (quat << vec[0] << vec[1] << vec[2] << vec[3]);
    }

    // linearlly interpolate 10 additional nodes after the last quaternion (SOCET requires this)
    for (int i = 0; i < 10; i++) {
        double vec[4] = {0.0, 0.0, 0.0, 0.0};
        int index = quaternions.size() - 1;
        vec[0] = quaternions[index][0] + (quaternions[index][0] - quaternions[index - 1][0]);
        vec[1] = quaternions[index][1] + (quaternions[index][1] - quaternions[index - 1][1]);
        vec[2] = quaternions[index][2] + (quaternions[index][2] - quaternions[index - 1][2]);
        vec[3] = quaternions[index][3] + (quaternions[index][3] - quaternions[index - 1][3]);
        QList<double> quat;
        quaternions.append(quat << vec[0] << vec[1] << vec[2] << vec[3]);
    }

    //update quaternions stats
    numQuaternions += 20;

    //ephemeris time of the first quarternion
    double et0Quat = etCenter - (((numQuaternions - 1) / 2) * dtQuat);

    //quadrtic time of the first quarternion
    double qt0Quat = et0Quat - etCenter;

    //query remaing transformation parameters from Camera Classes
    //transformation to distortionless focal plane
    double zDirection = distortionMap->ZDirection();

    //transformation from DistortionlessFocalPlane to FocalPlane
    vector<double> opticalDistCoefs = distortionMap->OpticalDistortionCoefficients();

    // For instruments with less than 3 distortion coefficients, set the
    // unused ones to 0.0
    opticalDistCoefs.resize(3, 0);

    //transformation from focal plane to detector
    const double *iTransS = focalMap->TransS();
    const double *iTransL = focalMap->TransL();
    double detectorSampleOrigin = focalMap->DetectorSampleOrigin();
    double detectorLineOrigin = focalMap->DetectorLineOrigin();

    //transformation from dectector to cube
    double startingSample = detectorMap->AdjustedStartingSample();
    double startingLine = detectorMap->AdjustedStartingLine();
    double sampleSumming = detectorMap->SampleScaleFactor();
    double etStart = ((LineScanCameraDetectorMap *)detectorMap)->StartTime();
    double lineOffset = focalMap->DetectorLineOffset();

    // We are done with computing keyword values, so output the Line Scanner
    // Keyword file.

    // This is the SOCET SET base sensor class keywords portion of support file:
    toStrm.setf(ios::scientific);
    toStrm << "RECTIFICATION_TERMS" << endl;
    toStrm << "        " << setprecision(14) << rectificationTerms[0] << " " <<
           rectificationTerms[1] << " " << rectificationTerms[2] << endl;
    toStrm << "        " << rectificationTerms[3] << " " << rectificationTerms[4] <<
           " " << rectificationTerms[5] << endl;

    toStrm << "GROUND_ZERO ";
    toStrm << centerGp[0] << " " << centerGp[1] << " " << centerGp[2] << endl;

    toStrm << "LOAD_PT ";
    toStrm << centerGp[0] << " " << centerGp[1] << " " << centerGp[2] << endl;

    toStrm << "COORD_SYSTEM 1" << endl;

    toStrm << "IMAGE_MOTION 0" << endl;

    // This is the line scanner sensor model portion of support file:
    toStrm << "SENSOR_TYPE USGSAstroLineScanner" << endl;
    toStrm << "SENSOR_MODE UNKNOWN" << endl;

    toStrm << "FOCAL " << focal << endl;

    toStrm << "ATMCO";
    for (int i = 0; i < 4; i++) toStrm << " " << atmco[i];
    toStrm << endl;

    toStrm << "IOCOEF_LINE";
    for (int i = 0; i < 10; i++) toStrm << " " << ioCoefLine[i];
    toStrm << endl;

    toStrm << "IOCOEF_SAMPLE";
    for (int i = 0; i < 10; i++) toStrm << " " << ioCoefSample[i];
    toStrm << endl;

    toStrm << "ABERR    0" << endl;
    toStrm << "ATMREF   0" << endl;
    toStrm << "PLATFORM   1" << endl;
    toStrm << "SOURCE_FLAG  1" << endl;
    toStrm << "SINGLE_EPHEMERIDE  0" << endl;

    //Note, for TRI_PARAMETERS, we print the first element separate from the rest so that the array
    //starts in the first column.  Otherwise, SOCET Set will treat the array as a comment
    toStrm << "TRI_PARAMETERS" << endl;
    toStrm << triParams[0];
    for (int i = 1; i < 18; i++) toStrm << " " << triParams[i];
    toStrm << endl;

    toStrm << setprecision(25) << "T_CENTER  ";
    double tCenter = 0.0;
//TO DO: UNCOMMENT THESE LINES ONCE HRSC IS WORKING IN SS
//  if (isHRSC) {
//    tCenter = etCenter - HRSCNadirCenterTime;
//    toStrm << tCenter << endl;
//  }
//  else
    toStrm << tCenter << endl;

    toStrm << "DT_EPHEM  " << dtEphem << endl;

    toStrm << "T0_EPHEM  ";
//TO DO: UNCOMMENT THESE LINES ONCE HRSC IS WORKING IN SS
//  if (isHRSC) {
//    double t = tCenter + t0Ephem;
//    toStrm << t << endl;
//  }
//  else
    toStrm << t0Ephem << endl;

    toStrm << "NUMBER_OF_EPHEM   " << numEphem << endl;

    toStrm << "EPHEM_PTS" << endl;
//TO DO: DELETE THE FOLLOWING LINE WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
    for (int i = 1; i <= numEphem; i++) {
//TO DO: UNCOMMENT THE FOLLOWING LINE WHEN VELOCITY BLOBS ARE CORRECT IN ISIS
        //for (int i = 0; i < numEphem; i++) {
        toStrm << " " << ephemPts[i][0];
        toStrm << " " << ephemPts[i][1];
        toStrm << " " << ephemPts[i][2] << endl;
    }

    toStrm  << "\n\nEPHEM_RATES" << endl;
    for (int i = 0; i < numEphem; i++) {
        toStrm << " " << ephemRates[i][0];
        toStrm << " " << ephemRates[i][1];
        toStrm << " " << ephemRates[i][2] << endl;
    }

    toStrm << "\n\nDT_QUAT " << dtQuat << endl;
    toStrm << "T0_QUAT " << qt0Quat << endl;
    toStrm << "NUMBER_OF_QUATERNIONS  " << numQuaternions << endl;
    toStrm << "QUATERNIONS" << endl;
    for (int i = 0; i < numQuaternions; i++) {
        toStrm << " " << quaternions[i][0];
        toStrm << " " << quaternions[i][1];
        toStrm << " " << quaternions[i][2];
        toStrm << " " << quaternions[i][3] << endl;
    }

    toStrm << "\n\nSCAN_DURATION " << scanDuration << endl;

    //  UNCOMMENT toStrm << "\nNUMBER_OF_INT_TIMES " << numIntTimes << endl;
    //
    //  if (isHRSC) {
    //    toStrm  << "INT_TIMES" << endl;
    //    for (int i = 0; i < numIntTimes; i++) {
    //      LineRateChange lr = lineRates.at(i);
    //      toStrm << " " << lr.GetStartEt();
    //      toStrm << " " << lr.GetLineScanRate();
    //      toStrm << " " << lr.GetStartLine() << endl;
    //    }
    //  }
    //  else
    toStrm << "INT_TIME " << intTime << endl;

    toStrm << "\nALONG_SCAN_PIXEL_SIZE  " << alongScanPxSize << endl;
    toStrm << "CROSS_SCAN_PIXEL_SIZE  " << crossScanPxSize << endl;

    toStrm << "\nCENTER_GP";
    for (int i = 0; i < 3; i++) toStrm << " " << centerGp[i];
    toStrm << endl;

    toStrm << "SENSOR_POSITION";
    for (int i = 0; i < 3; i++) toStrm << " " << sensorPosition[i];
    toStrm << endl;

    toStrm << "MOUNTING_ANGLES";
    double mountingAngles[3] = {0.0, 0.0, 0.0};
    for (int i = 0; i < 3; i++) toStrm << " " << mountingAngles[i];
    toStrm << endl;

    toStrm << "\nTOTAL_LINES " << totalLines << endl;
    toStrm << "TOTAL_SAMPLES " << totalSamples << endl;
    toStrm << "\n\n\n" << endl;

    toStrm << "IKCODE  " << ikCode << endl;
    toStrm << "ISIS_Z_DIRECTION  " << zDirection << endl;

    toStrm << "OPTICAL_DIST_COEF";
    for (int i = 0; i < 3; i++) toStrm << " " << opticalDistCoefs[i];
    toStrm << endl;

    toStrm << "ITRANSS";
    for (int i = 0; i < 3; i++) toStrm << " " << iTransS[i];
    toStrm << endl;

    toStrm << "ITRANSL";
    for (int i = 0; i < 3; i++) toStrm << " " << iTransL[i];
    toStrm << endl;

    toStrm << "DETECTOR_SAMPLE_ORIGIN " << detectorSampleOrigin << endl;
    toStrm << "DETECTOR_LINE_ORIGIN " << detectorLineOrigin << endl;
    toStrm << "DETECTOR_LINE_OFFSET  " << lineOffset << endl;
    toStrm << "DETECTOR_SAMPLE_SUMMING  " << sampleSumming << endl;

    toStrm << "STARTING_SAMPLE " << startingSample << endl;
    toStrm << "STARTING_LINE " << startingLine << endl;
    toStrm << "STARTING_EPHEMERIS_TIME " << setprecision(25) << etStart << endl;
    toStrm << "CENTER_EPHEMERIS_TIME " << etCenter << endl;

} // end main
Exemple #8
0
void IsisMain() {
  UserInterface &ui = Application::GetUserInterface();
  double  time0,//start time
          time1,//end time
          alti,  //altitude of the spacecraftmore
          fmc,  //forward motion compensation rad/sec
          horV,  //horizontal velocity km/sec
          radV,  //radial velocity km/sec
          rollV,//roll speed in rad/sec
          led;  //line exposure duration in seconds

  Cube  panCube;
  iTime  isisTime;
  QString iStrTEMP;

  int i,j,k,scFrameCode,insCode;

  QString mission;

  SpicePosition *spPos;
  SpiceRotation *spRot;

  //int nlines,nsamples,nbands;

  double deg2rad = acos(-1.0)/180.0;

  ProcessImport jp;
  FileName transFile("$apollo15/translations/apollopantranstable.trn");
  PvlTranslationTable transTable(transFile);
  PvlGroup kernels_pvlG;

  //scFrameCode and insCode from user input
  mission = ui.GetString("MISSION");
  if (mission == "APOLLO12") scFrameCode = -912000;
  if (mission == "APOLLO14") scFrameCode = -914000;
  if (mission == "APOLLO15") scFrameCode = -915000;
  if (mission == "APOLLO16") scFrameCode = -916000;
  if (mission == "APOLLO17") scFrameCode = -917000;

  insCode = scFrameCode - 230;

  try {
    panCube.open(ui.GetFileName("FROM"),"rw");
  }
  catch (IException &e) {
    throw IException(IException::User,
                     "Unable to open the file [" + ui.GetFileName("FROM") + "] as a cube.",
                     _FILEINFO_);
  }

  ////////////////////////////////////////////build the cube header instrament group
  PvlGroup inst_pvlG("Instrument");

  PvlKeyword keyword;

  //four that are the same for every panaramic mission
  keyword.setName("SpacecraftName");
  keyword.setValue(mission);
  inst_pvlG.addKeyword(keyword);

  keyword.setName("InstrumentName");
  keyword.setValue(transTable.Translate("InstrumentName","whatever"));
  inst_pvlG.addKeyword(keyword);

  keyword.setName("InstrumentId");
  keyword.setValue(transTable.Translate("InstrumentId","whatever"));
  inst_pvlG.addKeyword(keyword);

  keyword.setName("TargetName");
  keyword.setValue(transTable.Translate("TargetName","whatever"));
  inst_pvlG.addKeyword(keyword);

  //three that need to be calculated from input values
  horV = ui.GetDouble("VEL_HORIZ");
  radV = ui.GetDouble("VEL_RADIAL");
  alti = ui.GetDouble("CRAFT_ALTITUDE");

  //caculate the LineExposureDuration (led)
  if( ui.WasEntered("V/H_OVERRIDE") )
    fmc = ui.GetDouble("V/H_OVERRIDE")/1000.0;
  else
    //forward motion compensation is directly equivalent to V/H
    fmc = sqrt(horV*horV + radV*radV)/alti;  
  rollV = fmc*ROLLC;  //roll angular velcoity is equal to  V/H * constant    (units rad/sec)
  //led = rad/mm * sec/rad = radians(2.5)/FIDL / rollV    (final units: sec/mm)
  led = (2.5*acos(-1.0)/180.0)/rollV/FIDL;  

  //use led and the number of mm to determine the start and stop times
  isisTime = ui.GetString("GMT");

  //calculate starting and stoping times
  time0 = isisTime.Et() - led*FIDL*21.5;
  time1 = time0 + led*FIDL*43;

  isisTime = time0;
  keyword.setName("StartTime");
  keyword.setValue(iStrTEMP=isisTime.UTC());
  inst_pvlG.addKeyword(keyword);

  isisTime = time1;
  keyword.setName("StopTime");
  keyword.setValue(iStrTEMP=isisTime.UTC());
  inst_pvlG.addKeyword(keyword);

  keyword.setName("LineExposureDuration");
  //converted led to msec/mm--negative sign to account for the anti-parallel time and line axes
  keyword.setValue(iStrTEMP=toString(-led),"sec/mm");  
  inst_pvlG.addKeyword(keyword);

  panCube.putGroup(inst_pvlG);

  ///////////////////////////////////The kernals group
  kernels_pvlG.setName("Kernels");
  kernels_pvlG.clear();

  keyword.setName("NaifFrameCode");
  keyword.setValue(toString(insCode));
  kernels_pvlG.addKeyword(keyword);

  keyword.setName("LeapSecond");
  keyword.setValue( transTable.Translate("LeapSecond","File1") );
  kernels_pvlG.addKeyword(keyword);

  keyword.setName("TargetAttitudeShape");
  keyword.setValue( transTable.Translate("TargetAttitudeShape", "File1") );
  keyword.addValue( transTable.Translate("TargetAttitudeShape", "File2") );
  keyword.addValue( transTable.Translate("TargetAttitudeShape", "File3") );
  kernels_pvlG.addKeyword(keyword);

  keyword.setName("TargetPosition");
  keyword.setValue("Table");
  keyword.addValue( transTable.Translate("TargetPosition", "File1") );
  keyword.addValue( transTable.Translate("TargetPosition", "File2") );
  kernels_pvlG.addKeyword(keyword);

  keyword.setName("ShapeModel");
  keyword.setValue( transTable.Translate("ShapeModel", "File1") );
  kernels_pvlG.addKeyword(keyword);

  keyword.setName("InstrumentPointing");
  keyword.setValue("Table");
  kernels_pvlG.addKeyword(keyword);

  keyword.setName("InstrumentPosition");
  keyword.setValue("Table");
  kernels_pvlG.addKeyword(keyword);

  keyword.setName("InstrumentAddendum");
  keyword.setValue( transTable.Translate("InstrumentAddendum",mission));
  kernels_pvlG.addKeyword(keyword);

  panCube.putGroup(kernels_pvlG);

  //Load all the kernals
  Load_Kernel(kernels_pvlG["TargetPosition"]);
  Load_Kernel(kernels_pvlG["TargetAttitudeShape"]);
  Load_Kernel(kernels_pvlG["LeapSecond"]);

  //////////////////////////////////////////attach a target rotation table
  char frameName[32];
  SpiceInt frameCode;
  SpiceBoolean found;
  //get the framecode from the body code (301=MOON)
  cidfrm_c(301, sizeof(frameName), &frameCode, frameName, &found);  
  if(!found) {
    QString naifTarget = QString("IAU_MOOM");
    namfrm_c(naifTarget.toAscii().data(), &frameCode);
    if(frameCode == 0) {
      QString msg = "Can not find NAIF code for [" + naifTarget + "]";
      throw IException(IException::Io, msg, _FILEINFO_);
    }
  }
  spRot = new SpiceRotation(frameCode);
  //create a table from starttime to endtime (streched by 3%) with NODES entries
  spRot->LoadCache(time0-0.015*(time1-time0), time1+0.015*(time1-time0), NODES);  
  Table tableTargetRot = spRot->Cache("BodyRotation");
  tableTargetRot.Label() += PvlKeyword("Description", "Created by apollopaninit");
  panCube.write(tableTargetRot);


  //////////////////////////////////////////////////attach a sun position table
  spPos = new SpicePosition(10,301);  //Position of the sun (10) WRT to the MOON (301)
  //create a table from starttime to endtime (stretched by 3%) with NODES entries
  spPos->LoadCache(time0-0.015*(time1-time0), time1+0.015*(time1-time0), NODES);  
  Table tableSunPos = spPos->Cache("SunPosition");
  tableSunPos.Label() += PvlKeyword("SpkTableStartTime", toString(time0-0.015*(time1-time0)));
  tableSunPos.Label() += PvlKeyword("SpkTablleEndTime", toString(time1+0.015*(time1-time0)));
  tableSunPos.Label() += PvlKeyword("Description", "Created by apollopaninit");
  panCube.write(tableSunPos);  //attach the table to the cube


  /////////////Finding the principal scan line position and orientation
  //get the radii of the MOON
  SpiceInt tempRadii = 0;
  bodvcd_c(301,"RADII",3,&tempRadii,R_MOON);  //units are km
  double  omega,phi,kappa;

  std::vector<double>  posSel;  //Seleno centric position
  std::vector<double> sunPos;  //sunPosition used to transform to J2000
  std::vector<double> posJ20;  //camera position in J2000
  posSel.resize(3);
  sunPos.resize(3);
  posJ20.resize(3);

  double  temp,
          vel[3] = { 0.0, 0.0, 0.0 },  //the total velocity vector (combined Horizonatal and normal components) 
                   //  in km/sec
          M[3][3] = { { 0.0, 0.0, 0.0 },
                      { 0.0, 0.0, 0.0 },
                      { 0.0, 0.0, 0.0 } },    //rotation matrix
          zDir[] = { 0.0, 0.0, 1.0 },  //selenographic Z axis
          northPN[3]  = { 0.0, 0.0, 0.0 }, //normal to the plane containing all the north/south directions, 
                      //  that is plane containing 
                      //  the origin, the z axis, and the primary point of intersection
          northL[3] = { 0.0, 0.0, 0.0 },    //north direction vector in local horizontal plane
          azm[3] = { 0.0, 0.0, 0.0 },   //azm direction of the veclocity vector in selenographic coordinates
          azmP[3] = { 0.0, 0.0, 0.0 },  //azm rotated (partially) and projected into the image plane
          norm[3] = { 0.0, 0.0, 0.0 },  //normal to the local horizontal plane
          look[3] = { 0.0, 0.0, 0.0 };  //unit direction vector in the pincipal cameral look direction, 
                    //  parallel to the vector from the center of the moon through the spacecraft

  double  pos0[3] = { 0.0, 0.0, 0.0 },  //coordinate of the camera position
          pInt[3] = { 0.0, 0.0, 0.0 };  //coordinate of the principle intersection point

  /////////////////calculating the camera position for the center (principal scan line)
  pos0[1] = ui.GetDouble("LON_NADIR")*deg2rad;
  pos0[0] = ui.GetDouble("LAT_NADIR")*deg2rad;
  pos0[2] = ui.GetDouble("CRAFT_ALTITUDE");  //units are km
  Geographic2GeocentricLunar(pos0,pos0);    //function is written so the input can also be the 
                                            //  output

  /////////////////////calculating the camera orientation for the center (principal) scan line
  pInt[1] = ui.GetDouble("LON_INT")*deg2rad;
  pInt[0] = ui.GetDouble("LAT_INT")*deg2rad;
  pInt[2] = 0.0;
  Geographic2GeocentricLunar(pInt,pInt); //function is written so the input can also be the output
  //calculate the unit look direction vector in object space
  look[0] = -pos0[0] + pInt[0];
  look[1] = -pos0[1] + pInt[1];
  look[2] = -pos0[2] + pInt[2];
  temp = sqrt(look[0]*look[0] + look[1]*look[1] + look[2]*look[2]);
  look[0] /= temp;
  look[1] /= temp;
  look[2] /= temp;
  //the local normal vector is equal to pInt0/|pInt0|
  temp = sqrt(pInt[0]*pInt[0] + pInt[1]*pInt[1] + pInt[2]*pInt[2]);
  norm[0] = pInt[0]/temp;
  norm[1] = pInt[1]/temp;
  norm[2] = pInt[2]/temp;
  //omega and phi are defined so that M(phi)M(omega)look = [0 0 -1]  leaving only the roation 
  //  around z axis to be found
  omega = -atan2(look[1], look[2]);  //omega rotation to zero look[1]
  phi   = atan2(-look[0], sin(omega)*look[1] - cos(omega)*look[2]);  //phi rotation to zero look[0]
  //use the horizontal velocity vector direction to solve for the last rotation; we will make the 
  //  image x axis parallel to the in-image-plane projection of the horizontal direction of flight.
  //  The local normal cross the selenogrpahic z gives northPN (normal to the plane containing all 
  //  the north/south directions), that is, the plane containing the origin, the z axis, and the 
  //  primary point of intersection.
  crossp(northPN,norm,northL);   
  //The normal to the plane containing all the north/south directions cross the local normal 
  //  direction gives the local north/south direction in the local normal plane
  crossp(norm, zDir, northPN); 
  if (northL[2] < 0) {  //if by chance we got the south direction change the signs
    northL[0] = -northL[0];
    northL[1] = -northL[1];
    northL[2] = -northL[2];
  }
  //define the rotation matrix to convert northL to the azimuth of flight.
  //  A left handed rotation of "VEL_AZM" around the positive normal direction will convert northL 
  //  to azm
  MfromVecLeftAngle(M,norm,ui.GetDouble("VEL_AZM")*deg2rad);    
  azm[0] = M[0][0]*northL[0] + M[0][1]*northL[1] + M[0][2]*northL[2];
  azm[1] = M[1][0]*northL[0] + M[1][1]*northL[1] + M[1][2]*northL[2];
  azm[2] = M[2][0]*northL[0] + M[2][1]*northL[1] + M[2][2]*northL[2];
  //apply the two rotations we already know
  MfromLeftEulers(M,omega,phi,0.0);
  azmP[0] = M[0][0]*azm[0] + M[0][1]*azm[1] + M[0][2]*azm[2];
  azmP[1] = M[1][0]*azm[1] + M[1][1]*azm[1] + M[1][2]*azm[2];
  azmP[2] = M[2][0]*azm[2] + M[2][1]*azm[1] + M[2][2]*azm[2];
  //subtract that portion of the azm that is perpindicular to the image plane (also the portion 
  //  which is parallel to look) making azm a vector parrallel to the image plane
  //  Further, since we're now rotated into some coordinate system that differs from 
  //  the image coordinate system by only a kappa rotation making the vector parrallel to the 
  //  image plan is as simple as zeroing the z component (and as pointless to further calculations 
  //  as a nat's fart in hurricane) nevertheless it completes the logical transition
  azmP[2] = 0.0;  

  //finally the kappa rotation that will make azmP parallel (including sign) to the camera x axis                  
  kappa = -atan2(-azmP[1], azmP[0]);  


  ////////////////////Add an instrument position table
  //Define the table records
  TableRecord recordPos;  // reacord to be added to table
  // add x,y,z position labels and ephemeris time et to record
  TableField x("J2000X", TableField::Double);  
  TableField y("J2000Y", TableField::Double);
  TableField z("J2000Z", TableField::Double);
  TableField t("ET", TableField::Double);
  recordPos += x;
  recordPos += y;
  recordPos += z;
  recordPos += t;
  Table tablePos("InstrumentPosition", recordPos);
  //now that the azm and norm vectors are defined 
  //  the total velocity vector can be calcualted (km/sec)
  vel[0] = horV*azm[0] + radV * norm[0];
  vel[1] = horV*azm[1] + radV * norm[1];
  vel[2] = horV*azm[2] + radV * norm[2];
  //we'll provide a two ellement table (more is redundant because the motion is modeled as linear 
  //  at this point)  we'll extend the nodes 3% beyond the edges of the images to be sure 
  //  rounding errors don't cause problems
  temp = 0.515*(time1-time0);  //3% extension
  posSel[0] = pos0[0] - temp*vel[0];    //selenocentric coordinate calculation
  posSel[1] = pos0[1] - temp*vel[1];
  posSel[2] = pos0[2] - temp*vel[2];
  //converting to J2000
  temp = time0 - 0.005*(time1-time0);  //et just before the first scan line
  spPos->SetEphemerisTime(temp);
  spRot->SetEphemerisTime(temp);
  //Despite being labeled as J2000, the coordinates for the instrument position are in fact in 
  //  target centric coordinated rotated to a system centered at the target with aces parallel 
  //  to J2000, whatever that means
  posJ20 = spRot->J2000Vector(posSel); //J2000Vector calls rotates the position vector into J2000,
                                       //  completing the transformation
  recordPos[0] = posJ20[0];
  recordPos[1] = posJ20[1];
  recordPos[2] = posJ20[2];
  recordPos[3] = temp;  //temp = et (right now anyway)
  tablePos += recordPos;
  tablePos.Label() += PvlKeyword("SpkTableStartTime",toString(temp));
  //now the other node
  temp = 0.515*(time1-time0);      //3% extension
  posSel[0] = pos0[0] + temp*vel[0];    //selenocentric coordinate calculation
  posSel[1] = pos0[1] + temp*vel[1];
  posSel[2] = pos0[2] + temp*vel[2];
  //converting to J2000
  temp = time1 + 0.015*(time1-time0);  //et just after the last scan line
  spPos->SetEphemerisTime(temp);
  spRot->SetEphemerisTime(temp);
  //Despite being labeled as J2000, the coordinates for the instrument position are in fact 
  //  in target centric coordinated rotated to a system centered at the target with aces 
  //  parallel to J2000, whatever that means
  posJ20 = spRot->J2000Vector(posSel); //J2000Vector calls rotates the position vector into J2000,
                                       //  completing the transformation
  recordPos[0] = posJ20[0];
  recordPos[1] = posJ20[1];
  recordPos[2] = posJ20[2];
  recordPos[3] = temp;  //temp = et (right now anyway)
  tablePos += recordPos;
  tablePos.Label() += PvlKeyword("SpkTableEndTime",toString(temp));
  tablePos.Label() += PvlKeyword("CacheType","Linear");
  tablePos.Label() += PvlKeyword("Description","Created by apollopaninit");
  panCube.write(tablePos);  //now attach it to the table

  /////////////////////////////attach a camera pointing table
  double  cacheSlope,  //time between epoches in the table
          rollComb,  //magnitude of roll relative to the center in the middle of the epoch
          relT,  //relative time at the center of each epoch
          Q[NODES][5],  //NODES four ellement unit quarternions and et (to be calculated).
          gimVec[3],  //the direction of the gimbal rotation vector (to the cameras persepective 
                      //  this is always changing because the camera is mounted to the roll frame 
                      //  assembly which is mounted to the gimbal)
          M0[3][3],  //rotation matrix of the previous epoch
          Mtemp1[3][3],  //intermediate step in the multiplication of rotation matricies
          Mtemp2[3][3],  //intermediate step in the multiplication of rotation matricies
          Mdg[3][3],  //incremental rotation due the the gimbal motion in the camera frame
          Mdr[3][3];  //the contribution of the roll motion in the camera frame during time 
                      //  cacheSlope
  std::vector <double> M_J2toT;  //rotation matrix from J2000 to the target frame
  M_J2toT.resize(9);
  //Table Definition
  TableField q0("J2000Q0", TableField::Double);
  TableField q1("J2000Q1", TableField::Double);
  TableField q2("J2000Q2", TableField::Double);
  TableField q3("J2000Q3", TableField::Double);
  TableField et("ET", TableField::Double);
  TableRecord recordRot;
  recordRot += q0;
  recordRot += q1;
  recordRot += q2;
  recordRot += q3;
  recordRot += et;
  Table tableRot("InstrumentPointing",recordRot);
  //From the cameras perspective the gimbal motion is around a constantly changing axis, 
  //  this is handled by combining a series of incremental rotations
  MfromLeftEulers(M0, omega, phi, kappa);  //rotation matrix in the center Q[(NOPDES-1)/2]
  spRot->SetEphemerisTime(isisTime.Et());
  M_J2toT = spRot->Matrix();   //this actually gives the rotation from J2000 to target centric
  for(j=0; j<3; j++)    //reformating M_J2toT to a 3x3
    for(k=0; k<3; k++)
      Mtemp1[j][k] = M_J2toT[3*j+k];
  mxm_c(M0, Mtemp1, Mtemp2);
  M2Q(Mtemp2, Q[(NODES-1)/2]);  //save the middle scan line quarternion

  Q[(NODES-1)/2][4] = (time1 + time0)/2.0;  //time in the center of the image
  //the total time is scaled up slightly so that nodes will extend just beyond the edge of the image
  cacheSlope = 1.03*(time1 - time0)/(NODES-1);    
  //Mdr is constant for all the forward time computations
  MfromLeftEulers(Mdr,cacheSlope*rollV,0.0,0.0);  
  for (i=(NODES-1)/2+1; i<NODES; i++) {    //moving foward in time first
    Q[i][4] = Q[i-1][4] + cacheSlope;    //new time epoch
    //epoch center time relative to the center line
    relT = double(i - (NODES-1)/2 - 0.5)*cacheSlope;  
    rollComb = relT*rollV;
    gimVec[0] = 0.0;      //gimbal rotation vector direction in the middle of the epoch
    gimVec[1] =  cos(rollComb);
    gimVec[2] = -sin(rollComb);
    //incremental rotation due to the gimbal (forward motion compensation)
    MfromVecLeftAngle(Mdg, gimVec, fmc*cacheSlope);    
    //the new rotation matrix is Transpose(Mdr)*Transpose(Mdg)*M0--NOTE the order swap and 
    //  transposes are needed because both Mdr and Mdg were caculated in image space and need to be 
    //  transposed to apply to object space
    mtxm_c(Mdg, M0, Mtemp1);  
    //M0 is now what would typically be considered the rotation matrix of an image.  It rotates a 
    //  vector from the target centric space into camera space.  However, what is standard to 
    //  include in the cube labels is a rotation from camera space to J2000.  M0 is therefore the 
    //  transpose of the first part of this rotation.  Transpose(M0) is the rotation from camera 
    //  space to target centric space
    mtxm_c(Mdr, Mtemp1, M0);  
    //now adding the rotation from the target frame to J2000
    spRot->SetEphemerisTime(Q[i][4]);
    //this actually gives the rotation from J2000 to target centric--hence the mxmt_c function being 
    //  used later
    M_J2toT = spRot->Matrix();   
    for(j=0; j<3; j++)  //reformating M_J2toT to a 3x3
      for(k=0; k<3; k++)
        Mtemp1[j][k] = M_J2toT[3*j+k];
    mxm_c(M0, Mtemp1, Mtemp2);
    M2Q(Mtemp2, Q[i]);    //convert to a quarterion
  }

  MfromLeftEulers(M0, omega, phi, kappa);  //rotation matrix in the center Q[(NOPDES-1)/2]
  //Mdr is constant for all the backward time computations
  MfromLeftEulers(Mdr, -cacheSlope*rollV, 0.0, 0.0);    
  for (i=(NODES-1)/2-1; i>=0; i--) {  //moving backward in time
    Q[i][4] = Q[i+1][4] - cacheSlope;  //new time epoch
    //epoch center time relative to the center line
    relT = double(i  - (NODES-1)/2 + 0.5)*cacheSlope;  
    rollComb = relT*rollV;
    gimVec[0] = 0.0;      //gimbal rotation vector direction in the middle of the epoch
    gimVec[1] =  cos(rollComb);
    gimVec[2] = -sin(rollComb);
    //incremental rotation due to the gimbal (forward motion compensation)
    MfromVecLeftAngle(Mdg, gimVec, -fmc*cacheSlope);    
    //the new rotation matrix is Transpose(Mdr)*Transpose(Mdg)*M0    NOTE the order swap and 
    //  transposes are needed because both Mdr and Mdg were caculated in image space and need to be
    //  transposed to apply to object space
    mtxm_c(Mdg, M0, Mtemp1);  
    //M0 is now what would typically be considered the rotation matrix of an image.  It rotates a 
    //  vector from the target centric space into camera space.  However, what is standard to 
    //  include in the cube labels is a rotation from camera space to J2000.  M0 is therefore the 
    //  transpose of the first part of this rotation.  Transpose(M0) is the rotation from camera 
    //  space to target centric space
    mtxm_c(Mdr, Mtemp1, M0);  
    //now adding the rotation from the target frame to J2000
    spRot->SetEphemerisTime(Q[i][4]);
    M_J2toT = spRot->Matrix();
    for(j=0; j<3; j++)  //reformating M_J2toT to a 3x3
      for(k=0; k<3; k++)
        Mtemp1[j][k] = M_J2toT[3*j+k];
    mxm_c(M0, Mtemp1, Mtemp2);
    M2Q(Mtemp2, Q[i]);    //convert to a quarterion
  }
  //fill in the table
  for (i=0; i<NODES; i++) {
    recordRot[0] = Q[i][0];
    recordRot[1] = Q[i][1];
    recordRot[2] = Q[i][2];
    recordRot[3] = Q[i][3];
    recordRot[4] = Q[i][4];
    tableRot += recordRot;
  }
  tableRot.Label() += PvlKeyword("CkTableStartTime", toString(Q[0][4]));
  tableRot.Label() += PvlKeyword("CkTableEndTime", toString(Q[NODES-1][4]));
  tableRot.Label() += PvlKeyword("Description", "Created by appollopan2isis");

  keyword.setName("TimeDependentFrames");
  keyword.setValue(toString(scFrameCode));
  keyword.addValue("1");
  tableRot.Label() += keyword;

  keyword.setName("ConstantFrames");
  keyword.setValue(toString(insCode));
  keyword.addValue(toString(scFrameCode));
  tableRot.Label() += keyword;

  keyword.setName("ConstantRotation");
  keyword.setValue("1");
  for (i=1;i<9;i++)
    if (i%4 == 0) keyword.addValue("1");
    else keyword.addValue("0");
  tableRot.Label() += keyword;
  panCube.write(tableRot);


  /////////////////////////Attach a table with all the measurements of the fiducial mark locations.
  Chip patternS,searchS;   //scaled pattern and search chips
  Cube  fidC;  //Fiducial image

  //line and sample coordinates for looping through the panCube
  double l=1,s=1,sample,line,sampleInitial=1,lineInitial=1,play;  

  int  regStatus,
       fidn,
       panS,
       refL,  //number of lines in the patternS
       refS;  //number of samples in the patternS
  Pvl pvl;

  bool foundFirst=false;

  QString fileName;

  panS = panCube.sampleCount();

  //Table definition
  TableRecord recordFid;
  TableField indexFid("FID_INEX",TableField::Integer);
  TableField xFid("X_COORD",TableField::Double);
  TableField yFid("Y_COORD",TableField::Double);
  recordFid += indexFid;
  recordFid += xFid;
  recordFid += yFid;
  Table tableFid("Fiducial Measurement",recordFid);

  //read the image resolutions and scale the constants acordingly
  double  resolution = ui.GetDouble("MICRONS"),    //pixel size in microns
          scale            = SCALE  *5.0/resolution,  //reduction scale for fast autoregistrations
          searchHeight     = SEARCHh*5.0/resolution,  //number of lines (in 5-micron-pixels) in 
                                                      //  search space for the first fiducial
          searchCellSize   = SEARCHc*5.0/resolution,  //height/width of search chips block
          averageSamples   = AVERs  *5.0/resolution,  //scaled smaples between fiducials
          averageLines     = AVERl  *5.0/resolution;  //scaled average distance between the top and 
                                                      //bottom fiducials

  if( 15.0/resolution < 1.5) play=1.5;
  else play = 15.0/resolution; 

  //copy the patternS chip (the entire ApolloPanFiducialMark.cub)
  FileName fiducialFileName("$apollo15/calibration/ApolloPanFiducialMark.cub");
  fidC.open(fiducialFileName.expanded(),"r");
  if( !fidC.isOpen() ) {
    QString msg = "Unable to open the fiducial patternS cube: ApolloPanFiducialMark.cub\n";
    throw IException(IException::User, msg, _FILEINFO_);
  }
  refL = fidC.lineCount();
  refS = fidC.sampleCount();
  //scaled pattern chip for fast matching
  patternS.SetSize(int((refS-2)/SCALE), int((refL-2)/SCALE));  
  patternS.TackCube((refS-1)/2, (refL-1)/2);
  patternS.Load(fidC, 0, SCALE);

  //parameters for maximum correlation autoregestration  
  // see:  file:///usgs/pkgs/isis3nightly2011-09-21/isis/doc/documents/patternSMatch/patternSMatch.html#DistanceTolerance
  FileName fiducialPvl("$apollo15/templates/apolloPanFiducialFinder.pvl");
  pvl.read(fiducialPvl.expanded());  //read in the autoreg parameters
  AutoReg *arS = AutoRegFactory::Create(pvl);

  *arS->PatternChip()   = patternS;  //patternS chip is constant

  //set up a centroid measurer
  CentroidApolloPan centroid(resolution);
  Chip inputChip,selectionChip;
  inputChip.SetSize(int(ceil(200*5.0/resolution)), int(ceil(200*5.0/resolution)));
  fileName = ui.GetFileName("FROM");
  if( panCube.pixelType() == 1)  //UnsignedByte
    centroid.setDNRange(12, 1e99);  //8 bit bright target
  else
    centroid.setDNRange(3500, 1e99);  //16 bit bright target

  Progress progress;
  progress.SetText("Locating Fiducials");
  progress.SetMaximumSteps(91);

  //Search for the first fiducial, search sizes are constanst
  searchS.SetSize(int(searchCellSize/scale),int(searchCellSize/scale));  
  //now start searching along a horizontal line for the first fiducial mark
  for(l = searchCellSize/2;
      l<searchHeight+searchCellSize/2.0 && !foundFirst;
      l+=searchCellSize-125*5.0/resolution) {
    for (s = searchCellSize/2;
         s < averageSamples + searchCellSize/2.0 && !foundFirst;
         s += searchCellSize-125*5.0/resolution) {
      searchS.TackCube(s, l);
      searchS.Load(panCube, 0, scale);
      *arS->SearchChip() = searchS;
      regStatus = arS->Register();
      if (regStatus == AutoReg::SuccessPixel) {
        inputChip.TackCube(arS->CubeSample(), arS->CubeLine());
        inputChip.Load(panCube, 0, 1);
        inputChip.SetCubePosition(arS->CubeSample(), arS->CubeLine());
        //continuous dynamic range selection
        centroid.selectAdaptive(&inputChip, &selectionChip);    
        //elliptical trimming/smoothing
        if (centroid.elipticalReduction(&selectionChip, 95, play, 2000)) {  
          //center of mass to reduce selection to a single measure
          centroid.centerOfMass(&selectionChip, &sample, &line);    
          inputChip.SetChipPosition(sample, line);
          sampleInitial = inputChip.CubeSample();
          lineInitial   = inputChip.CubeLine();
          foundFirst = true;  //once the first fiducial is found stop
        }
      }
    }
  }
  if(s>=averageLines+searchCellSize/2.0) {
     QString msg = "Unable to locate a fiducial mark in the input cube [" + fileName + 
                  "].  Check FROM and MICRONS parameters.";
     throw IException(IException::Io, msg, _FILEINFO_);
     return;
  }
  progress.CheckStatus();

  //record first fiducial measurement in the table
  recordFid[0] = 0;
  recordFid[1] = sampleInitial;
  recordFid[2] = lineInitial;
  tableFid += recordFid;
  for (s= sampleInitial, l=lineInitial, fidn=0;  s<panS;  s+=averageSamples, fidn++) {
     //corrections for half spacing of center fiducials
     if (fidn == 22) s -= averageSamples/2.0;
     if (fidn == 23) s -= averageSamples/2.0;

     //look for the bottom fiducial
     searchS.TackCube(s,l+averageLines);
     searchS.Load(panCube, 0, scale);
     *arS->SearchChip()   = searchS;
     regStatus = arS->Register();
     if (regStatus == AutoReg::SuccessPixel) {
       inputChip.TackCube(arS->CubeSample(), arS->CubeLine());
       inputChip.Load(panCube,0,1);
       inputChip.SetCubePosition(arS->CubeSample(), arS->CubeLine());
     }
     else {  //if autoreg is unsuccessful, a larger window will be used
       inputChip.TackCube(s, l+averageLines);
       inputChip.Load(panCube, 0, 1);
       inputChip.SetCubePosition(s, l+averageLines);
     }
     centroid.selectAdaptive(&inputChip, &selectionChip);  //continuous dynamic range selection
     //elliptical trimming/smoothing... if this fails move on
     if (centroid.elipticalReduction(&selectionChip, 95, play, 2000) != 0 ) {      
       //center of mass to reduce selection to a single measure
       centroid.centerOfMass(&selectionChip, &sample, &line);      
       inputChip.SetChipPosition(sample, line);
       sample = inputChip.CubeSample();
       line   = inputChip.CubeLine();
       recordFid[0] = fidn*2+1;
       recordFid[1] = sample;
       recordFid[2] = line;
       tableFid += recordFid;
     }
     progress.CheckStatus();

     //look for the top fiducial
     if (s == sampleInitial) //first time through the loop?
       continue;  //then the top fiducial was already found
     searchS.TackCube(s, l);
     searchS.Load(panCube, 0, scale);
     *arS->SearchChip()   = searchS;
     regStatus = arS->Register();
     if (regStatus == AutoReg::SuccessPixel) {
       inputChip.TackCube(arS->CubeSample(), arS->CubeLine());
       inputChip.Load(panCube, 0, 1);
       inputChip.SetCubePosition(arS->CubeSample(), arS->CubeLine());
     }
     else {  //if autoreg is unsuccessful, a larger window will be used
       inputChip.TackCube(s, l);
       inputChip.Load(panCube, 0, 1);
       inputChip.SetCubePosition(s, l);
     }
     centroid.selectAdaptive(&inputChip, &selectionChip);//continuous dynamic range selection
     //inputChip.Write("inputTemp.cub");//debug
     //selectionChip.Write("selectionTemp.cub");//debug
     //elliptical trimming/smoothing... if this fails move on
     if (centroid.elipticalReduction(&selectionChip, 95, play, 2000) !=0) {    
       //center of mass to reduce selection to a single measure
       centroid.centerOfMass(&selectionChip, &sample, &line);  
       inputChip.SetChipPosition(sample, line);
       //when finding the top fiducial both s and l are refined for a successful measurement, 
       //  this will help follow trends in the scaned image
       s = inputChip.CubeSample(); 
       l = inputChip.CubeLine();
       recordFid[0] = fidn*2;
       recordFid[1] = s;
       recordFid[2] = l;
       tableFid += recordFid;
     }
     progress.CheckStatus();
  }

  panCube.write(tableFid);
  //close the new cube
  panCube.close(false);
  panCube.open(ui.GetFileName("FROM"),"rw");
 
  delete spPos;
  delete spRot;

  //now instantiate a camera to make sure all of this is working
  ApolloPanoramicCamera* cam = (ApolloPanoramicCamera*)(panCube.camera());
  //log the residual report from interior orientation 
  PvlGroup residualStats("InteriorOrientationStats");
  residualStats += PvlKeyword("FiducialsFound",  toString(tableFid.Records()));
  residualStats += PvlKeyword("ResidualMax",  toString(cam->intOriResidualMax()),"pixels");
  residualStats += PvlKeyword("ResidualMean", toString(cam->intOriResidualMean()),"pixels");
  residualStats += PvlKeyword("ResidualStdev", toString(cam->intOriResidualStdev()),"pixels");

  Application::Log( residualStats ); 


  return;
}