/**
eigentrain

computes the eigen space for matrix images.

This function is used in the training procedure of the face recognition pca
algorithm.
INPUT:  the data matrix of images
OUTPUT: mean: the mean value of the images
eigen_values: eigenvalues
eigen_base: eigenvectors

The data matrix is mean centered, and this is a side effect.
*/
void eigentrain(Matrix *mean, Matrix *eigen_vals,
                Matrix *eigen_base, Matrix images) 
{
    double p = 0.0;
    Matrix M, eigenvectors;

    DEBUG(1, "Calculating mean image.");
    *mean = get_mean_image(images);

    DEBUG(1, "Calculating the mean centered images for the training set.");
    mean_subtract_images(images, *mean);

    MESSAGE2ARG("Calculating Covariance Matrix: M = images' * images. M is a %d by %d Matrix.", images->col_dim, images->col_dim);
    M = transposeMultiplyMatrixL(images, images);
    DEBUG_INT(3, "Covariance Matrix Rows", M->row_dim);
    DEBUG_INT(3, "Covariance Matrix Cols", M->col_dim);

    DEBUG(2, "Allocating memory for eigenvectors and eigenvalues.");
    eigenvectors = makeMatrix(M->row_dim, M->col_dim);
    *eigen_vals = makeMatrix(M->row_dim, 1);


    MESSAGE("Computing snap shot eigen vectors using the double precision cv eigensolver.");

    cvJacobiEigens_64d(M->data, eigenvectors->data, (*eigen_vals)->data, images->col_dim, p, 1);

    freeMatrix(M);

    DEBUG(1, "Verifying the eigen vectors");
    /* Reconstruct M because it is destroyed in cvJacobiEigens */
    M = transposeMultiplyMatrixL(images, images);
    if (debuglevel >= 3)
        eigen_verify(M, *eigen_vals, eigenvectors);
    freeMatrix(M);

    *eigen_base = multiplyMatrix(images, eigenvectors);
    MESSAGE2ARG("Recovered the %d by %d high resolution eigen basis.", (*eigen_base)->row_dim, (*eigen_base)->col_dim);

    DEBUG(1, "Normalizing eigen basis");
    basis_normalize(*eigen_base);

    /*Remove last elements because they are unneeded.  Mean centering the image
    guarantees that the data points define a hyperplane that passes through
    the origin. Therefore all points are in a k - 1 dimensional subspace.
    */
    (*eigen_base)->col_dim -= 1;
    (*eigen_vals)->row_dim -= 1;
    eigenvectors->col_dim -= 1;

    DEBUG(1, "Verifying eigenbasis");
    if (debuglevel >= 3)
        basis_verify(images, *eigen_base);

    /* The eigenvectors for the smaller covariance (snap shot) are not needed */
    freeMatrix(eigenvectors);
}
void
subspaceTrain (Subspace *s, Matrix images, ImageList *srt, int numSubjects, int dropNVectors, CutOffMode cutOffMode, double cutOff, int useLDA, int writeTextInterm
               /*START Changed by Zeeshan: For LPP*/
               ,int useLPP, int neighbourCount, int useAdaptiveK, int lppKeepNVectors, char* lppDistance
               /*END Changed by Zeeshan: For LPP*/
               /*START 	Changed by Zeeshan: For ICA*/
               ,int useICA, int arch, double learningRate, int blockSize, int iterations
               /*END 	Changed by Zeeshan: For ICA*/
               )
{
    int i;
    Matrix m;
    int n = 0;    /* The number of eigen vectors to keep */
    double total_energy, energy;
    Matrix tmp;

    /* Initialize structure */

    s->useLDA         = useLDA;
    s->cutOffMode     = cutOffMode;
    s->cutOff         = cutOff;
    s->dropNVectors   = dropNVectors;

    s->numSubjects    = numSubjects;
    s->numPixels      = images->row_dim;

    /*START Changed by Zeeshan: For LPP*/
    s->useLPP 	    = useLPP;
    s->neighbourCount = neighbourCount;
    s->lppDistance    = strdup(lppDistance);
    s->lppKeepNVectors= lppKeepNVectors;
    s->useAdaptiveK   = useAdaptiveK;
    /*END Changed by Zeeshan: For LPP*/

    /*START Changed by Zeeshan: For ICA*/
    s->useICA 	    = useICA;
    s->arch	    = arch;
    s->ica2Basis 	    = NULL;
    s->learningRate   = learningRate;
    s->blockSize 	    = blockSize;
    s->iterations     = iterations;
    /*END Changed by Zeeshan: For ICA*/

    /*START Changed by Zeeshan: For ICA & LPP*/
    /*********************************************************************
    * STEP ZERO: Make sure LDA and LPP are executed exclusively
    ********************************************************************/
    DEBUG_CHECK (!(s->useLDA && s->useLPP) && !(s->useLDA && s->useICA) && !(s->useICA && s->useLPP), 
        "Either LDA, LPP or ICA should be executed.");
    /*END Changed by Zeeshan: For ICA & LPP*/


    /*********************************************************************
    * STEP ONE: Calculate the eigenbasis
    ********************************************************************/

    /* Compute the Eigenvalues and Eigenvectors for the covariance matrix
    derived from the images data matrix. The image data is "centered", meaning
    the mean image is subtracted from all images before PCA is performed.
    This centering is done in place, so after this call images are centered. */

    MESSAGE("Computing the PCA eigenspace.");

    eigentrain (&s->mean, &s->values, &s->basis, images);

    MESSAGE("Finished computing eigenspace.");

    /* Numerical roundoff errors may lead to small negative values.
    Strip those before saving the matrix. */

    m = s->values;
    for (i = 0; i < m->row_dim; i++)
    {
        if (ME(m,i,0) < 0) {
            if (ME(m,i,0) < -1e-10)
                printf("WARNING: Large negative eigenvalue found %f. Truncating to zero.\n", ME(m,i,0));
            ME(m,i,0) = 0;
        }
    }

    /*********************************************************************
    * STEP TWO: Drop eigenvectors from the front or truncate them from
    * the back
    ********************************************************************/

    /* 
    The following is used to filter the vectors that are retained after PCA
    training.  The function first optionally removes the vectors from the matrix
    based on the argument -dropNVectors. This argument is always intrepreted in
    terms of absolute numbers, i.e. a value of 1 means drop the first vector, a
    value of 3 means drop the first three. The function then drops vectors from the
    tail based on -cutOffMode and -cutOff arguments. Here, the mode controls how the
    cutoff is performed. The possible modes are:

    NONE:    Keep all remaining eigenvectors.
    SIMPLE:  Keep a percentage where the percentage is specified by cutOff.
    ENERGY:  Keep the fewest vectors are such that the sum of energy for them just
    exceeds cutOff times the total energy. 
    STRETCH: Keep all eigenvectors that have eigenvalues greater than a percentage 
    of the largest, where the percentage is specied by cutOff. 
    CLASSES: Keep as many eigenvectors as there are LDA classes.

    For both Energy and Stretch, if eigen values/vectors are dropped from the front, 
    these are not included in the determination of the total energy or the max
    eigen value. 
    */

    /* Drop the first vectors  */

    DEBUG_CHECK (s->dropNVectors < (s->basis)->col_dim, "Number of vectors to drop must be less than the number of the eigen vectors");

    /* transpose eigenValues for use in this function */

    tmp = transposeMatrix (s->values);
    freeMatrix (s->values);
    s->values = tmp;

    if (s->dropNVectors && (s->dropNVectors < (s->values)->col_dim))
    {
        tmp = matrixCols (s->basis, s->dropNVectors, (s->basis)->col_dim-1);
        freeMatrix (s->basis);
        s->basis = tmp;

        tmp = matrixCols (s->values, s->dropNVectors, (s->values)->col_dim-1);
        freeMatrix (s->values);
        s->values = tmp;
    }

    /* transpose the eigenValues back to the original order. */

    tmp = transposeMatrix (s->values);
    freeMatrix (s->values);
    s->values = tmp;

    DEBUG_CHECK((s->values)->row_dim - s->dropNVectors > 0, "Too many eigen vectors droped from front. Can not proceed.");

    switch (s->cutOffMode)
    {
    case CUTOFF_NONE:
        n = (s->basis)->col_dim;
        break;

    case CUTOFF_SIMPLE:
        n = (int)((s->basis)->col_dim * s->cutOff / 100.0);
        break;

    case CUTOFF_ENERGY:
        /* compute total energy - this will not include vectors/values dropped from front. */
        total_energy = 0;
        for (i = 0; i < (s->values)->row_dim; i++) {
            total_energy += ME(s->values, i, 0);
        }

        /* compute cutoff point */
        i = 0;
        energy = 0;
        while ((i < (s->values)->row_dim) && (energy < total_energy * s->cutOff / 100.0)) {
            energy += ME(s->values, i, 0);
            i++;
        }
        n = i;
        break;

    case CUTOFF_STRETCH:
        i = 1;
        while ((i < (s->values)->row_dim) &&
            (100.0*(ME(s->values, i, 0) / ME(s->values, s->dropNVectors, 0)) > cutOff )) {
                i++;
        }
        n = i;
        break;

    case CUTOFF_CLASSES:
        n = s->numSubjects;
        break;

    case CUTOFF_DROPVEC:
        n = (int)((s->basis)->col_dim - s->cutOff);
        break;

    default:
        n = 0;
        DEBUG_CHECK (0, "ERROR: Unkown cutoff type");
        break;
    };

    /* Never set the dimensionality of the PCA subspace below the number of
    LDA classes when LDA is being used. Doing so creates a horrible problem
    for LDA: too fee dimensions */

    if (s->useLDA && (n < s->numSubjects))
        n = s->numSubjects;

    DEBUG_CHECK (n <= (s->basis)->col_dim, "Tried to expand, not contract, PCA space.");

    MESSAGE1ARG ("Retaining %d eigen vectors.",n);

    tmp = matrixCols ( s->basis, 0 , n-1);
    freeMatrix (s->basis);
    s->basis = tmp;

    DEBUG_INT (1, "Number of eigen vectors kept.", n);

    DEBUG_CHECK ((s->basis)->col_dim > 0, "All basis vectors deleted after cutoff "
        "and vector drop was processed.");

    MESSAGE2ARG("Truncating PCA Space. Subspace projection expressed "
        "as %d by %d matrix.", s->basis->row_dim, s->basis->col_dim);

    /*********************************************************************
    * STEP THREE: Do the LDA if specified
    ********************************************************************/

    if (s->useLDA)
    {
        /* Need to project original images into PCA space */

        Matrix fisherBasis, fisherValues, combinedBasis;
        Matrix imspca = transposeMultiplyMatrixL (s->basis, images);

        MESSAGE("Computing Fisher Linear Discriminants for "
            "training images projected into PCA subspace.");

        fisherTrain (imspca, srt, &fisherBasis, &fisherValues, writeTextInterm);

        combinedBasis = multiplyMatrix (s->basis, fisherBasis);
        basis_normalize (combinedBasis);

        MESSAGE2ARG ("PCA and LDA Combined. Combined projection expressed as %d by "
            "%d matrix.", combinedBasis->row_dim, combinedBasis->col_dim);

        s->values = fisherValues;
        s->basis  = combinedBasis;
    }

    /*START Changed by Zeeshan: For LPP*/
    /*********************************************************************
    * STEP FOUR: Do the LPP if specified
    ********************************************************************/
    if (s->useLPP)
    {
        /* Need to project original images into PCA space */

        Matrix laplacianBasis, laplacianValues, combinedBasis;
        Matrix imspca = transposeMultiplyMatrixL (s->basis, images);           

        MESSAGE("Computing Locality Preservation Projections for "
            "training images projected into PCA subspace.");

        laplacianTrain (imspca, srt, &laplacianBasis, &laplacianValues, neighbourCount, 
            useAdaptiveK, lppKeepNVectors, &s->values, lppDistance, writeTextInterm);

        combinedBasis = multiplyMatrix (s->basis, laplacianBasis);
        basis_normalize (combinedBasis);

        MESSAGE2ARG ("PCA and LPP Combined. Combined projection expressed as %d by "
            "%d matrix.", combinedBasis->row_dim, combinedBasis->col_dim);

        s->values = calculateStandardDeviation (laplacianBasis);//OPTION4
        //s->values = laplacianValues;//OPTION2
        s->basis  = combinedBasis;

        for (i = 0; i < s->values->row_dim; i++)
        {
            if (ME(s->values, i, 0) <= 0.001)
            {
                if (ME(s->values, i, 0) < -1e-10)
                    printf("WARNING: Large negative value found %f. Truncating to zero.\n", ME(s->values, i, 0));
                ME(s->values, i, 0) = 0.001;
            }
        }


    }
    /*END Changed by Zeeshan: For LPP*/


    /*START Changed by Zeeshan: For ICA*/
    /*********************************************************************
    * STEP FIVE: Do the ICA if specified
    ********************************************************************/
    if (s->useICA)
    {
        /* Need to project original images into PCA space */
        Matrix independentBasis;
        Matrix imspca = transposeMultiplyMatrixL (s->basis, images);           

        MESSAGE("Computing independent components from the principle components.");

        independentTrain(s->basis, imspca, &independentBasis, s->arch, s->blockSize, s->learningRate, s->iterations);

        if (s->arch == 1)
        {
            Matrix combinedBasis;
            combinedBasis = multiplyMatrix (s->basis, independentBasis);
            s->basis  = combinedBasis;
            s->ica2Basis = NULL;

            MESSAGE2ARG ("PCA and ICA Combined. Combined projection expressed as %d by "
                "%d matrix.", combinedBasis->row_dim, combinedBasis->col_dim);	
        }
        else if (s->arch == 2)
        {
            s->ica2Basis = independentBasis;
            MESSAGE2ARG ("PCA and ICA kept separate. ICA projection expressed as %d by "
                "%d matrix.", independentBasis->row_dim, independentBasis->col_dim);	
        }
    }
    /*END Changed by Zeeshan: For ICA*/
}
void fisherTrain(Matrix imspca, ImageList *srt, Matrix *fisherBasis, Matrix *fisherValues, int writeTextInterm) {
    int i;
    int numberOfClasses;
    Matrix G, N, Tmp;
    Matrix Rw = makeIdentityMatrix(imspca->row_dim);
    Matrix Siw = makeIdentityMatrix(imspca->row_dim);
    Matrix Ev = makeMatrix(imspca->row_dim, 1);
    Matrix Evecs = makeMatrix(imspca->row_dim, imspca->row_dim);
    Matrix Mw = findWCSMatrix(imspca, srt, &numberOfClasses, writeTextInterm);
    Matrix Mb = findBCSMatrix(imspca, Mw);
    *fisherValues = makeMatrix(imspca->row_dim, 1);

    MESSAGE2ARG("LDA Training started with %d classes and %d total training images.", numberOfClasses, imspca->col_dim);

    if (writeTextInterm) { SAVE_MATRIX(Mw); SAVE_MATRIX(Mb); } /* output textfiles of intermediate matrices */


    MESSAGE("Computing eigenspace decomposition of within class scatter matrix.");
    cvJacobiEigens_64d(Mw->data, Rw->data, Ev->data, Mw->row_dim, 0.0);

    MESSAGE("Computing the inverse scale matrix derived from eigenvalues and transformed scatter matrix.");
    for (i = 0; i < Ev->row_dim; i++)
        ME(Siw, i, i) = (ME(Ev, i, 0) <= 0.0) ? 0.0 : 1 / ( sqrt( ME(Ev, i, 0) ) );

    G = transposeMultiplyMatrixR(Siw, Rw);
    Tmp = transposeMultiplyMatrixR(Mb, G);
    N = multiplyMatrix(G, Tmp);
    freeMatrix(Tmp);

    if (writeTextInterm) {
        SAVE_MATRIX(Rw);
        SAVE_MATRIX(Ev);
        SAVE_MATRIX(N);
        SAVE_MATRIX(G);
        SAVE_MATRIX(Siw);
    } /* output textfiles of intermediate matrices */

    MESSAGE("Computing eigenspace of transformed between class scatter matrix.");
    cvJacobiEigens_64d(N->data, Evecs->data, (*fisherValues)->data, N->row_dim, 0.0);
    DEBUG(3, "FINSISHED");

    Tmp = multiplyMatrix(Siw, Evecs);
    DEBUG(1, "Calculating fisher basis");
    *fisherBasis = multiplyMatrix(Rw, Tmp);

    if (writeTextInterm) { SAVE_MATRIX(*fisherBasis); SAVE_MATRIX(*fisherValues); SAVE_MATRIX(Evecs); } /* output textfiles of intermediate matrices */

    /* The following verification code does not seem to work so it has been commented out. */
    /* fisherVerify(*fisherBasis, *fisherValues, Mw, Mb); */

    /* Crop the basis to the proper number of vectors */
    (*fisherBasis)->col_dim = numberOfClasses - 1;
    (*fisherValues)->row_dim = numberOfClasses - 1;

    basis_normalize(*fisherBasis);

    MESSAGE2ARG("Completed LDA Training. Fisher basis projection matrix has dimensions %d by %d.", (*fisherBasis)->row_dim, (*fisherBasis)->col_dim);

    /*Freeing memory allocated during LDA Training. */
    freeMatrix(Tmp);
    freeMatrix(Rw);
    freeMatrix(Siw);
    freeMatrix(Ev);
    freeMatrix(Mw);
    freeMatrix(Mb);
    freeMatrix(G);
    freeMatrix(N);
}