Exemple #1
0
void Eigsym::eig(const MatDoub &A, MatDoub &V, VecDoub &lambda) {
	unsigned int n = A.ncols();	/* size of the matrix */
	double a[n*n]; 	/* store initial matrix */
	double w[n];		/* store eigenvalues */
	int matz = 1;		/* return both eigenvalues as well as eigenvectors */
	double x[n*n]; 	/* store eigenvectors */

	for(unsigned int i=0; i<n; i++) {
		for(unsigned int j=0; j<n; j++) {
			a[i*n+j] = A[i][j];
		}
	}

	unsigned int ierr = 0;

	ierr = rs ( n, a, w, matz, x );

	V.assign(n,n,0.0);
	lambda.resize(n);
	for(unsigned int i=0; i<n; i++) {
		lambda[i] = w[i];
		for(unsigned int j=0; j<n; j++) {
			V[j][i] = x[i*n+j];
		}
	}
}
Exemple #2
0
void dump_nrmat( MatDoub &m ) {
	for( int r=0; r<m.nrows(); r++ ) {
		for( int c=0; c<m.ncols(); c++ ) {
			printf( "%+3.2le ", m[r][c] );
		}
		printf( "\n" );
	}	
}
void addMatrixRow(MatDoub U, int row, MatDoub &out) {

    int dummy = -1000;

    for(int i=0; i<out.nrows(); i++) {
        out[i][row] = dummy;
        out[row][i] = dummy;
    }

    datain   = U.getMatrixArray();
    datainO  = out.getMatrixArray();
    int k            = 0;

    data     [out.nrows()*out.nrows()];

    for(int i=0; i < out.nrows()*out.ncols(); i++) {

        if( datainO[i] == dummy )
            data[i] = 0;
        else
            data[i] = datain[k++];

    }

    out = MatDoub( out.nrows(), out.nrows(), data );

}
Exemple #4
0
void copyNRMatToZMat( MatDoub &m, ZMat &z ) {
	// account for NR3 is rowmajor, ZMat is colmajor.
	int rows = m.nrows();
	int cols = m.ncols();
	if( z.rows != rows || z.cols != cols ) {
		z.alloc( rows, cols, zmatF64 );
	}
	for( int r=0; r<rows; r++ ) {
		for( int c=0; c<cols; c++ ) {
			z.putD( r, c, m[r][c] );
		}
	}
}
/*
 Calculte the Modularity matrix when split
 into more than two communities, see [2]
 in method declarations above.
 */
void calculateB(MatDoub B, MatDoub &Bg) {

    int Ng = B.ncols();

    if( Bg.ncols() != Ng )
        Bg.resize(Ng,Ng);

    for(int i=0; i<Ng; i++) {
        for(int j=0; j<Ng; j++) {
            double sum = 0.0;
            for(int k=0; k<Ng; k++)
                sum += B[i][k];

            Bg[i][j] = B[i][j] -1.0 * delta(i,j) * sum;
        }
    }

}
/*
 Utility method used by Geodesic and RandomWalk algorithms
 to set-up the Modularity and Laplacian matrices.
 */
void setupMatrices() {

    //--- Matrix size
    int N = R.nrows();

    //--- 2*m
    two_m = elist.size();

    //--- _norm
    _norm = 1.0/(2.0*two_m);

    for(int i=0; i<N; i++) {
        C[i] = 0;
        for(int j=0; j<N; j++) {
            R[i][j] = 0.0;
            A[i][j] = 0.0;
        }
    }

    //--- Setup Matrices
    //--- Store the current community each vertex belongs too
    for(int i=0; i<N; i++) {
        R[i][i] = n[i+1].getDegree();
        C[i]    = n[i+1].c;
    }

    //--- The Adjacency matrix, A
    for(int i=0; i<elist.size(); i++) {
        int ind_i = elist[i].so -1;
        int ind_j = elist[i].si -1;

        if( ind_i == ind_j || ind_j == ind_i )
            A[ind_i][ind_j] = 1.0 * elist[i].Globalwe;
        else {
            A[ind_i][ind_j] = 1.0 * elist[i].Globalwe;
            A[ind_j][ind_i] = 1.0 * elist[i].Globalwe;
        }

    }

    for(int i=0; i<N; i++) {
        for(int j=0; j<N; j++) {
            R[i][j] = R[i][j] - A[i][j];
        }
    }

    //--- The Modularity matrix, Bgi
    for(int i=1; i<n.size(); i++) {
        for(int j=1; j<n.size(); j++) {
            Bgi[i-1][j-1] = A[i-1][j-1] - (n[i].getDegree() * n[j].getDegree() * _norm);
        }
    }

}
/*
 Utility method use by the RandomWalk algorithm to
 update the Adjacency and Laplacian matrices.
 */
void upDateMatrices() {

    for(int i=0; i<R.nrows(); i++) {
        C[i] = 0;
        for(int j=0; j<R.nrows(); j++) {
            R[i][j] = 0;
        }
    }

    //--- Setup Matrices
    //--- Store the current community each vertex belongs too
    for(int i=0; i<R.nrows(); i++) {
        R[i][i] = n[i+1].getDegree();
        C[i]    = n[i+1].c;
    }

    //--- Update the Adjacency matrix, A
    for(int i=0; i<elist.size(); i++) {

        if( elist[i].removed ) {

            int ind_i = elist[i].so -1;
            int ind_j = elist[i].si -1;

            //if edge has been removed, remove this entry
            //from A.
            A[ind_i][ind_j] = 0;
            A[ind_j][ind_i] = 0;
        }
    }

    //--- Update the Lapacian matrix, R
    for(int i=0; i<R.nrows(); i++) {
        for(int j=0; j<R.nrows(); j++) {
            R[i][j] = R[i][j] - A[i][j];
        }
    }

}
/*
 Utility method used by RandomWalk algorithm to
 resize the Graph Laplacian for each community, com,
 within the network.
 */
void getSubMatrix(int com, vector<node> &Nodes) {

    int dummy = -1000;
    int rows  = 0;

    Rh.resize(R.nrows(), R.nrows());
    Rh = R;

    //--- NR style
    for( int i=0; i< C.size(); i++) {

        if( C[i] == com )
            Nodes.push_back(node(rows++,0.0,0.0));
        else {
            for( int k=0; k<Rh.nrows(); k++) {
                Rh[i][k] = dummy;
                Rh[k][i] = dummy;
            }
        }

    }

    datain = Rh.getMatrixArray();
    data   [Rh.nrows()*Rh.nrows()];
    int ind = 0;

    for(int i=0; i < Rh.nrows()*Rh.ncols(); i++) {

        double ele = datain[i];
        if(ele != dummy)
            data[ind++] = ele;

    }

    Ri.resize(rows,rows);
    Ri = MatDoub( rows, rows, data );

}
/*
 Update the index vectors, si and SI, for each node in the
 current split such that:

 si(i) =  1 if eigenvector_max(i) > 0
       = -1 if eigenvector_max(i) < 0

 SI(i,0) = 1
 SI(i,1) = 0 if eigenvector_max(i) > 0
         = 0
         = 1 if eigenvector_max(i) < 0
 */
void maximiseIndexVectors( int ind ) {

    int Ng = u.ncols();

    si.resize(Ng);

    SI.resize(Ng,2);

    for(int i=0; i<Ng; i++) {

        if(u[i][ind] < 0) {
            si[i]    = -1;
            SI[i][0] =  0;
            SI[i][1] =  1;
        } else {
            si[i]    =  1;
            SI[i][0] =  1;
            SI[i][1] =  0;
        }

    }

}
/*
 Calculate the eigenvalues, betai, and eigenvectors, u, for
 the current Modularity matrix Bgi.
 */
void calculateEigenVectors() {

    int Ng = Bgi.ncols();

    if(u.ncols() != Ng) {
        u.resize(Ng,Ng);
        betai.resize(Ng);
    }

    u.resize(Ng,Ng);
    betai.resize(Ng);

    Symmeig h(Bgi, true);

    for(int i=0; i<Ng; i++) {
        betai[i] = h.d[i];
        for(int j=0; j<Ng; j++) {
            u[j][i] = h.z[j][i];
        }
    }


}
/*
 Find the leading eigenvector, i.e.
 the one which corresponds to the most positive
 eigenvalue.
 */
void findLeadingEigenVectors(int &ind) {


    int Ng = Bgi.ncols();

    int ind_max = 0;
    int ind_min = 0;

    double max  = betai[ind_max];
    double min  = betai[ind_min];

    for(int i=0; i<Ng-1; i++) {
        if( betai[i] > max ) {
            max     = betai[i];
            ind_max = i;
        }
    }

    ind = ind_max;

}
void removeMatrixRow( MatDoub Unr, MatDoub &outnr ) {

    int dummy = -1000;

    datain = Unr.getMatrixArray();
    data   [outnr.nrows()*outnr.nrows()];
    int k=0;

    for(int i=0; i < Unr.nrows()*Unr.nrows(); i++) {

        double ele = datain[i];
        if(ele != dummy)
            data[k++] = ele;

    }

    outnr = MatDoub( outnr.nrows(), outnr.nrows(), data );


}
void removeMatrixRow( MatDoub &out ) {

    int dummy = -1000;

    datain = Ri.getMatrixArray();
    data  [Ri.nrows()*Ri.nrows()];
    int k=0;

    for(int i=0; i < Ri.nrows()*Ri.ncols(); i++) {

        double ele = datain[i];
        if(ele != dummy)
            data[k++] = ele;

    }

    out = MatDoub( out.nrows(), out.nrows(), data );


}
Exemple #14
0
	double interpolate( util::matrix_t<double> &data,
		util::matrix_t<double>  &par,
		double I, double T, int idx, bool quiet )
	{
		MatDoub tempirr;
		std::vector<double> parvals;
		std::vector<sp_point> pts, hull;

		double maxz = -1e99;
		double tmin = 1e99;
		double tmax = -1e99;
		double imin = 1e99;
		double imax = -1e99;
		double dist = 1e99;
		int idist = -1;
		for( size_t i=0;i<data.nrows();i++ )
		{
			double z = par(i,idx);
			if ( !std::isfinite( z ) )
				continue;

			double temp = data(i,TC);//x value
			double irr = data(i,IRR);//y value

			if ( temp < tmin ) tmin = temp;
			if ( temp > tmax ) tmax = temp;
			if ( irr < imin ) imin = irr;
			if ( irr > imax ) imax = irr;

			double d = sqrt( (irr-I)*(irr-I) + (temp-T)*(temp-T) );
			if ( d < dist )
			{
				dist = d;
				idist = (int)i;
			}
			
			std::vector<double> it(2,0.0);
			it[0] = temp; it[1] = irr;
			tempirr.push_back( it );

			parvals.push_back( z );

			if ( z > maxz ) maxz = z;

			pts.push_back( sp_point( temp, irr, z ) );
		}

		Toolbox::convex_hull( pts, hull );
		if ( Toolbox::pointInPolygon( hull, sp_point(T, I, 0.0) ) )
		{
			// scale values based on max - helps GM interp routine
			for( size_t i=0;i<parvals.size();i++)
				parvals[i] /= maxz;

			Powvargram vgram( tempirr, parvals, 1.75, 0. );
			GaussMarkov gm( tempirr, parvals, vgram );

			// test the fit against the data
			double err_fit = 0.;
			for( size_t i=0;i<parvals.size();i++ )
			{
				double zref = parvals[i];
				double zfit = gm.interp( tempirr[i] );
				double dz = zref - zfit;
				err_fit += dz*dz;
			}
			err_fit = sqrt(err_fit);
			if ( err_fit > 0.01 )
			{
				log(	util::format("interpolation function for iec61853 parameter '%s' at I=%lg T=%lg is poor: %lg RMS",
							parnames[idx], I, T, err_fit ),

					SSC_WARNING );
			}

			std::vector<double> q(2,0.0);
				q[0] = T;
				q[1] = I;
			
			// now interpolate and return the value
			return gm.interp( q ) * maxz;
		}
		else
		{
			// if we're pretty close, return the nearest known value
			if ( dist < 30. )
			{
				
				if ( !quiet )
					log( util::format("query point (%lg, %lg) is outside convex hull of data but close... returning nearest value from data table at (%lg, %lg)=%lg",
						T, I, data(idist,TC), data(idist,IRR), par(idist,idx) ),

						SSC_WARNING );

				return par(idist,idx);
			}

				

			// fall back to the 5 parameter model's auxiliary equations 
			// to estimate the parameter values outside the convex hull

			int idx_stc = -1;
			for( size_t i=0;i<data.nrows();i++)
				if ( data(i,IRR) == 1000.0
					&& data(i,TC) == 25.0 )
					idx_stc = (int)i;

			if ( idx_stc < 0 )
				throw general_error("STC conditions required to be supplied in the temperature/irradiance data");



			double value = par(idist,idx);;
			if ( idx == A )
			{
				double a_nearest = par( idist, A );
				double T_nearest = data( idist, TC );
				double a_est = a_nearest * T/T_nearest;
				value = a_est;
			}
			else if ( idx == IL )
			{
				double IL_nearest = par( idist, IL );
				double I_nearest = data(idist, IRR );
				double IL_est = IL_nearest * I/I_nearest;
				value = IL_est;
			}/*
			else if ( idx == IO )
			{
#define Tc_ref 298.15
#define Eg_ref 1.12
#define KB 8.618e-5

				double IO_stc = par(idx_stc,IO);
				double TK = T+273.15;
				double EG = Eg_ref * (1-0.0002677*(TK-Tc_ref));
				double IO_oper =  IO_stc * pow(TK/Tc_ref, 3) * exp( 1/KB*(Eg_ref/Tc_ref - EG/TK) );
				value = IO_oper;	
			}*/
			else if ( idx == RSH )
			{
				double RSH_nearest = par( idist, RSH );
				double I_nearest = data(idist, IRR );
				double RSH_est = RSH_nearest * I_nearest/I;
				value = RSH_est;
			}
			
			if ( !quiet )
				log( util::format("query point (%lg, %lg) is too far out of convex hull of data (dist=%lg)... estimating value from 5 parameter modele at (%lg, %lg)=%lg",
					T, I, dist, data(idist,TC), data(idist,IRR), value ),

					SSC_WARNING );

			return value;
		}
	}
/*
 Calculate the split of nodes belonging to the last group of nodes
 with negative eigenvector values.
 */
void splitN(MatDoub Bg, VecInt keys, int dummy, double tol) {

    cout << "> In splitN method... " << endl;

    int N = Bg.nrows();
    MatDoub Bgii(N,N);
    MatDoub Bgiii(N,N);
    VecInt  keysi_n (N);

    //--- Starting from the group Modularity matrix Bg,
    //--- resize matrices: Bgi, keysi_p, keysi_n, u and betai.
    Bgiii  = Bg;
    int Ng = 0;

    for(int i=0; i<keys.size(); i++) {
        if(keys[i] != dummy) {
            Ng++;
        } else {
            for(int k=0; k<Bgiii.nrows(); k++) {
                Bgiii[i][k] = dummy;
                Bgiii[k][i] = dummy;
            }
        }
    }

    keysi_n.resize(Ng);
    VecInt keysi_p(Ng);

    int k=0;
    for(int i=0; i<keys.size(); i++) {
        if(keys[i] != dummy)
            keysi_n[k++] = keys[i];
    }

    Bgii.resize(Ng,Ng);
    removeMatrixRow(Bgiii,Bgii);

    Bgi.resize(Bgii.nrows(),Bgii.nrows());

    //--- Calculate the Modularity matrix Bgi for the new node group
    calculateB(Bgii, Bgi);

    u.resize(Ng,Ng);
    betai.resize(Ng);

    //--- Calculate eigenvectors, and values, from Bgi...
    calculateEigenVectors();

    int ind = 0;
    findLeadingEigenVectors(ind);

    //--- Check that maximum eigenvalue is greater than the tolerance.
    cout << "> max EigenValue is " << betai[ind] << " with ind " << ind << endl;
    if(betai[ind] > tol ) {

        //--- set up the index vectors, si and SI, for the initial split
        maximiseIndexVectors(ind);

        double deltaQ_old = 0.0;
        double deltaQ_new = 0.0;

        int cp = 0;
        int cn = 0;

        //--- Calculate the Spectral Modularity
        deltaModularity(deltaQ_old);
        cout << "> Spectral Q: " << deltaQ_old << endl;

        double diff = fabs(deltaQ_old);
        int count   = 0;

        //--- Fine tuning stage to maximum deltaModularity for the initial split
        while( diff > tol ) {

            modifySplit( tol, Ng );

            deltaModularity(deltaQ_new);
            cout << "> Modified Q: " << deltaQ_new << endl;

            diff = fabs( deltaQ_new - deltaQ_old );

            deltaQ_old = deltaQ_new;

        }

        //--- Keep recorded of maximum fine-tuned Modularity value.
        specQ += deltaQ_old;
        for(int i=0; i<Ng; i++) {
            si[i] = si[i];
            if(si[i] > 0) cp++;
            else          cn++;
        }

        if(cp < 1 || cn < 1) {
            cout << "> Stop splitting. " << endl;
            return;
        }

        int Ncomn = maxCommunity() + 1;
        int Ncomp = Ncomn + 1;

        cout << "> node list " << endl;
        for(int i=0; i<keysi_n.size(); i++) {
            if( si[i] < 0) {
                keysi_n[i] = keysi_n[i];
                keysi_p[i] = dummy;
                n[(int)keysi_n[i]].c = Ncomn;
                cout << "> Node: " << keysi_n[i] << " c = " << n[(int)keysi_n[i]].c << endl;
            } else {
                keysi_p[i] = keysi_n[i];
                keysi_n[i] = dummy;
                cout << "> Node: " << keysi_p[i] << " c = " << n[(int)keysi_p[i]].c << endl;
            }
        }

        //--- Recursively split the group of positive eigenvector nodes
        splitP(Bgii, keysi_p, dummy, tol);

        //--- Recursively split the group of negative eigenvector nodes
        splitN(Bgii, keysi_n, dummy, tol);

    } else {
        cout << "> Stop splitting. " << endl;
        return ;
    }


}
//--- MAIN PROGRAM
//-------------------------------------------------------------------------------------
int main(int argc, char * argv[]) {

    int seed;
    int a_type;
    int w_type;
    string title;
    string if_weighted;
    string if_help;
    const char *file_network;
    const char *file_names;

    if ( argc != 5 ) {
        printHelpMessage( argv[0] );
    }

    if_help = argv[1];
    if( if_help.compare("-h") == 0 || if_help.compare("-help") == 0 ) {
        printHelpMessage( argv[0] );
    }

    seed = atoi(argv[1]);
    cout << "> seed is " << seed << endl;

    //--- Initialize random seed:
    _rand.setSeed(seed);

    a_type = atoi(argv[2]);

    if( a_type < 1 || a_type > 3 ) {
        cout << "argument 2: the type of algorithm to run needs to be either (1,2,3): " << endl;
        cout << "          : 1 = Geodesic edge Betweenness" << endl;
        cout << "          : 2 = Random edge Betweenness"   << endl;
        cout << "          : 3 = Spectral Betweenness"      << endl;
        exit(1);
    }

    switch(a_type) {

    case 1:
        cout << "> Using Geodesic edge Betweenness." << endl;
        title = "Geodesic edge Betweenness.";
        break;

    case 2:
        cout << "> Using Random edge Betweenness." << endl;
        title = "RandomWalk edge Betweenness.";
        break;

    case 3:
        cout << "> Using Spectral Betweenness." << endl;
        title = "Spectral Betweenness.";
        break;

    default:
        break;

    }

    if_weighted = argv[3];

    if( if_weighted.compare("w") == 0 ) {
        w_type = 3;
        cout << "> Using a weighted network " << endl;
    } else {
        if( if_weighted.compare("nw") == 0 ) {
            w_type = 2;
            cout << "> Using a non-weighted network " << endl;
        } else {
            cout << "argument 3: specify if network file is weighted or not: " << endl;
            cout << "          : w  = Using a weighted network file " << endl;
            cout << "          : nw = Using a non-weighted network file " << endl;
            exit(1);
        }
    }

    file_network = argv[4];

    //--- Default values for parameters which may be modified from the commandline
    ihelper = Helper();
    reader.readFile(file_network, w_type);
    Gn      = reader.getNodeSet();
    Gelist  = reader.getEdgeSet();

    vector<int> key_listi;
    vector<int> key_listj;
    vector<int> key_listk;

    cout << "> The Global node list..." << endl;
    for(int i=1; i<Gn.size(); i++) {
        key_listi.push_back(Gn[i].ID);
        key_listj.push_back(Gn[i].ID);
        key_listk.push_back(-1);
        Gn[i].print();
        Gn[i].printEdges();
    }

    //--- To use getSubSample, comment following two lines, and
    //--- uncomment getSubSample(key_listj).
    n     = Gn;
    elist = Gelist;
    //getSubSample(key_listj);

    //cout << "The sub-node list ... " << endl;
    //for(int i=1; i<n.size(); i++){
    //n[i].print();
    //n[i].printEdges();
    //}

    cout << "> The Global edge list..." << endl;
    for(int i=0; i<elist.size(); i++) {
        elist[i].print();
    }

    forcytoscape = new fstream("OUT/communities_newman.txt",ios_base::out);
    (*forcytoscape) << "communities" << endl;

    removededges = new fstream("OUT/removededges.txt",ios_base::out);
    (*removededges) << "Removed Edges" << endl;
    (*removededges) << "so \t IDso \t si \t IDsi \t we \t Globalweight \t key" << endl;

    totallist = ihelper.cloneEdgeList(elist);

    com_max      = 0;
    specQ        = 0.0;
    double Q     = 0.0;
    double Q_SD     = 0.0;
    double Q_old = 0.0;
    double Q_SD_old = 0.0;

    int loop       = elist.size();
    int E          = loop;
    double Q_max   = 0.0;
    double Q_limit = 1.0;
    bool stopping  = false;

    int N = n.size()-1;

    R.resize(N,N);
    Ri.resize(N,N);
    A.resize(N,N);
    Ai.resize(N,N);
    Bi.resize(N,N);
    C.resize(N);

    S.resize(N,1);
    V.resize(N,1);
    T.resize(N,N);
    Ti.resize(N,N);
    Rc.resize((N-1),(N-1));
    Vi.resize(C.size(),1);

    B.resize(N,N);
    Bm.resize(N,N);
    Bgi.resize(N,N);

    keys_p.resize(N);
    keys_n.resize(N);

    u.resize(N,N);  //eigenvectors
    betai.resize(N);//eigenvalues

    SI.resize(N,2);
    si.resize(N);
    visited.resize(N);

    setupMatrices();

    cout << "> Running " << title.c_str() << endl;

    cstart = clock();

    if( a_type == 3 ) {
        //--- Calculate betweenness using the Spectral algorithm
        calculateSpectralModularity();
    } else {

        while( loop !=0 && !stopping ) {

            int old_max_com = com_max;

            //--- Calculate betweenness using Geodesic or RandomWalk algorithms
            if( a_type == 1 )
                calculateEdgeBetweennessGeodesic();
            else
                calculateEdgeBetweennessRandom();

            //--- Calculate the Modularity
            Q_old = Q;
            Q_SD_old = Q_SD;
            Q     = 0.0;
            Q_SD   = 0.0;

            Modularity(Q, Q_SD);

            //--- Store networks state if Modularity has increased during this iteraction
            if(com_max > old_max_com) {
                vec_mod.push_back(Q);
                vec_mod_err.push_back(Q_SD);
                vec_com_max.push_back(com_max);
                vec_nodes.push_back(storeNodes());
            }


            //--- Record the maximum Modularity value
            if( Q > Q_max ) {
                Q_max = Q;
            } else {
                if( Q_max > 0.0 && (Q_max - Q)/Q_max > Q_limit ) stopping = true;
            }


            //--- Find edge with maximum edge betweenness score and remove
            edge _max;
            _max = totallist[1].Clone();
            for(int i=1; i<totallist.size(); i++) {

                if( totallist[i].removed == false ) {

                    if(totallist[i].we >= _max.we) {

                        if(totallist[i].we > _max.we)
                            _max = totallist[i];
                        else {
                            int rdm = rand()%2;
                            if(rdm == 1) _max = totallist[i];
                        }
                    }
                }
                totallist[i].we = 0;
            }

            //--- Record the removed edges.
            _max.print( removededges );

            n[elist[_max.key-1].so].removeEdge(_max.key);
            n[elist[_max.key-1].si].removeEdge(_max.key);
            n[elist[_max.key-1].so].setDegree( (n[elist[_max.key-1].so].getDegree() - 1) );
            n[elist[_max.key-1].si].setDegree( (n[elist[_max.key-1].si].getDegree() - 1) );
            totallist[_max.key].removed = true;
            elist[_max.key-1].removed   = true;
            --loop;

            //--- Calculate the remaining processor time
            DrawProgressBar( 20, ((double)E - (double)loop)/(double)E );

        }
    }

    //--- Recored the CPU-time taken
    cend = clock();
    double cpu_time_used = ((double) (cend - cstart)) / CLOCKS_PER_SEC;
    cout << "" << endl;
    cout << "> cputime: " << cpu_time_used << " seconds " << endl;
    cout << "> Network (nodes): " << N << " (edges): " << E << endl;

    if( a_type != 3 ) {

        //--- Print all stored Modularity values
        modularityscore = new fstream("OUT/modularityscore.txt",ios_base::out);
        (*modularityscore) << title.c_str() << endl;
        for(int i=0; i<vec_mod.size(); i++) {
            (*modularityscore) << vec_mod[i] << " " << vec_mod_err[i] << " " << vec_com_max[i] << endl;
        }
        modularityscore->close();

        int ind   = findMax(vec_mod);
        int com   = 1;
        int _size = 0;
        int c_max = com_max;

        //--- Print node communities for maximum Modularity value, for Geodesic or RandomWalk runs
        communityout = new fstream("OUT/communityout.txt",ios_base::out);
        (*communityout) << "Max Q: " << vec_mod[ind] << " +- " << vec_mod_err[ind] << endl;
        (*communityout) << "cputime: " << cpu_time_used << " seconds " << endl;
        (*communityout) << "Network (nodes): " << N << " (edges): " << E << endl;

        while(com<(c_max+1)) {
            _size = 0;
            for(int i=0; i<vec_nodes[ind].size(); i++) {
                if(vec_nodes[ind][i].c == com ) {
                    (*communityout) << vec_nodes[ind][i].ID << "\t" << vec_nodes[ind][i].c << endl;
                    //vec_nodes[ind][i].print( communityout );
                    _size++;
                }
            }
            if(_size != 0)
                (*communityout) << "community: " << com << " size: " << _size << endl;
            com++;
        }

        for(int i=0; i<vec_nodes[ind].size(); i++) {
            for(int j=0; j<key_listi.size(); j++) {
                if(vec_nodes[ind][i].ID == key_listi[j]) {
                    key_listk[j] = vec_nodes[ind][i].c;
                    break;
                }
            }
        }

        //--- Print node communities for maximum Modularity for the consensus matrix
        consensusout = new fstream("OUT/consensusout.txt",ios_base::out);
        (*consensusout) << "key list" << endl;
        for(int i=0; i<key_listi.size(); i++) {
            if(key_listk[i] == -1 && key_listj[i] != -1)
                key_listj[i] = -1;
            (*consensusout) << key_listi[i] << " " << key_listj[i] << " " << key_listk[i] << endl;
            //(*consensusout) << key_listi[i] << " = " << key_listk[i] << endl;
            (*forcytoscape) << key_listi[i] << " = " << key_listk[i] << endl;
            cout << key_listi[i] << " " << key_listj[i] << " " << key_listk[i] << endl;
        }
    } else {

        int com   = 1;
        int _size = 0;
        int c_max = maxCommunity();

        //--- Store node communities for maximum Modularity for the Spectral Modularity run
        communityout = new fstream("OUT/communityout.txt",ios_base::out);
        (*communityout) << "communityout" << endl;
        (*communityout) << "Max Q: " << specQ << endl;
        (*communityout) << "cputime: " << cpu_time_used << " seconds " << endl;
        (*communityout) << "Network (nodes): " << N << " (edges): " << E << endl;
        while(com<(c_max+1)) {
            _size = 0;
            for(int i=0; i<n.size(); i++) {
                if(n[i].c == com ) {
                    n[i].print( communityout );
                    _size++;
                }
            }
            if(_size != 0)
                (*communityout) << "community: " << com << " size: " << _size << endl;
            com++;
        }

        for(int i=1; i<n.size(); i++) {
            for(int j=0; j<key_listi.size(); j++) {
                if(n[i].ID == key_listi[j]) {
                    key_listk[j] = n[i].c;
                    break;
                }
            }
        }

        //--- Print node communities for maximum Modularity the consensus matrix
        consensusout = new fstream("OUT/consensusout.txt",ios_base::out);
        (*consensusout) << "key list" << endl;
        for(int i=0; i<key_listi.size(); i++) {
            if(key_listk[i] == -1 && key_listj[i] != -1)
                key_listj[i] = -1;
            (*consensusout) << key_listi[i] << " " << key_listj[i] << " " << key_listk[i] << endl;
            //(*consensusout) << key_listi[i] << " = " << key_listk[i] << endl;
            (*forcytoscape) << key_listi[i] << " = " << key_listk[i] << endl;
            cout << key_listi[i] << " " << key_listj[i] << " " << key_listk[i] << endl;
        }


    }


    //--- Remove data structures
    communityout->close();
    forcytoscape->close();
    vec_mod.clear();
    vec_mod_err.clear();
    vec_nodes.clear();

    exit(1);

}
void C_csp_gen_collector_receiver::init(const C_csp_collector_receiver::S_csp_cr_init_inputs init_inputs, 
			C_csp_collector_receiver::S_csp_cr_solved_params & solved_params)
{
	// Check that ms_params are set
	check_double_params_are_set();

	// Could sanity-check other parameters here...
	if(ms_params.m_interp_arr < 1 || ms_params.m_interp_arr > 2)
	{
		std::string msg = util::format("The interpolation code must be 1 (interpolate) or 2 (nearest neighbor)"
			"The input value was %d, so it was reset to 1", ms_params.m_interp_arr);
		mc_csp_messages.add_notice(msg);
		ms_params.m_interp_arr = 1;
	}
	if(ms_params.m_rad_type < 1 || ms_params.m_rad_type > 3)
	{	// Fairly important to know the intent of this input, so throw an exception if it's not one of the three options
		std::string msg = util::format("The solar resource radiation type must be 1 (DNI), 2 (Beam horizontal), or "
			"3 (Total horizontal). The input value was %d.");
		throw(C_csp_exception("C_csp_gen_collector_receiver::init",msg));
	}

	if(ms_params.mv_sfhlQ_coefs.size() < 1)
	{
		throw(C_csp_exception("C_csp_gen_collector_receiver::init","The model requires at least one irradiation-based "
			"thermal loss adjustment coefficient (mv_sfhlQ_coefs)"));
	}
	if(ms_params.mv_sfhlT_coefs.size() < 1)
	{
		throw(C_csp_exception("C_csp_gen_collector_receiver::init", "The model requires at least one temperature-based "
			"thermal loss adjustment coefficient (mv_sfhlT_coefs)"));
	}
	if( ms_params.mv_sfhlV_coefs.size() < 1 )
	{
		throw(C_csp_exception("C_csp_gen_collector_receiver::init", "The model requires at least one wind-based "
			"thermal loss adjustment coefficient (mv_sfhlV_coefs)"));
	}

	// Unit conversions
	ms_params.m_latitude *= CSP::pi/180.0;		//[rad], convert from deg
	ms_params.m_longitude *= CSP::pi / 180.0;	//[rad], convert from deg
	ms_params.m_theta_stow *= CSP::pi / 180.0;	//[rad], convert from deg
	ms_params.m_theta_dep *= CSP::pi / 180.0;	//[rad], convert from deg
	ms_params.m_T_sfdes += 273.15;	//[K], convert from C

	if( !ms_params.m_is_table_unsorted )
	{
		/*
		Standard azimuth-elevation table
		*/

		//does the table look right?
		if( (ms_params.m_optical_table.nrows() < 5 && ms_params.m_optical_table.ncols() > 3) || 
			(ms_params.m_optical_table.ncols() == 3 && ms_params.m_optical_table.nrows() > 4) )
		{
			mc_csp_messages.add_message(C_csp_messages::WARNING, "The optical efficiency table option flag may not match the specified table format. If running SSC, ensure \"IsTableUnsorted\""
			" =0 if regularly-spaced azimuth-zenith matrix is used and =1 if azimuth,zenith,efficiency points are specified.");
		}



		if( ms_params.m_optical_table.nrows() <= 0 || ms_params.m_optical_table.ncols() <= 0 ) // If these were not set correctly, it will create memory allocation crash not caught by error handling.
		{
			throw(C_csp_exception("C_csp_gen_collector_receiver::init","The optical table must have a positive number of rows and columns"));
		}

		double *xax = new double[ms_params.m_optical_table.ncols() - 1];
		double *yax = new double[ms_params.m_optical_table.nrows() - 1];
		double *data = new double[(ms_params.m_optical_table.ncols() - 1) * (ms_params.m_optical_table.nrows() - 1)];

		//get the xaxis data values
		for( size_t i = 1; i<ms_params.m_optical_table.ncols(); i++ ){
			xax[i - 1] = ms_params.m_optical_table(0,i)*CSP::pi/180.0;
		}
		//get the yaxis data values
		for( size_t j = 1; j<ms_params.m_optical_table.nrows(); j++ ){
			yax[j - 1] = ms_params.m_optical_table(j,0)*CSP::pi / 180.0;
		}
		//Get the data values
		for( size_t j = 1; j<ms_params.m_optical_table.nrows(); j++ ){
			for( size_t i = 1; i<ms_params.m_optical_table.ncols(); i++ ){
				data[i - 1 + (ms_params.m_optical_table.ncols() - 1)*(j - 1)] = ms_params.m_optical_table(j, i);
			}
		}

		mc_optical_table.AddXAxis(xax, (int)ms_params.m_optical_table.ncols() - 1);
		mc_optical_table.AddYAxis(yax, (int)ms_params.m_optical_table.nrows() - 1);
		mc_optical_table.AddData(data);
		delete[] xax, yax, data;	
	}
	else
	{
		/*
		Use the unstructured data table
		*/

		/*
		------------------------------------------------------------------------------
		Create the regression fit on the efficiency map
		------------------------------------------------------------------------------
		*/

		if( ms_params.m_optical_table.ncols() != 3 )
		{
			std::string msg = util::format("The heliostat field efficiency file is not formatted correctly. Type expects 3 columns"
				" (zenith angle, azimuth angle, efficiency value) and instead has %d cols.", ms_params.m_optical_table.ncols());
			throw(C_csp_exception("C_csp_gen_collector_receiver::init", msg));
		}

		MatDoub sunpos;
		vector<double> effs;

		int nrows = (int)ms_params.m_optical_table.nrows();

		//read the data from the array into the local storage arrays
		sunpos.resize(nrows, VectDoub(2));
		effs.resize(nrows);
		double eff_maxval = -9.e9;
		for( int i = 0; i<nrows; i++ )
		{
			sunpos.at(i).at(0) = ms_params.m_optical_table(i,0) / az_scale * CSP::pi / 180.;
			sunpos.at(i).at(1) = ms_params.m_optical_table(i,1) / zen_scale * CSP::pi / 180.;
			double eff = ms_params.m_optical_table(i,2);

			effs.at(i) = eff;
			if( eff > eff_maxval ) eff_maxval = eff;
		}

		//scale values based on maximum. This helps the GM interpolation routine
		m_eff_scale = eff_maxval;
		for( int i = 0; i<nrows; i++ )
			effs.at(i) /= m_eff_scale;

		//Create the field efficiency table
		Powvargram vgram(sunpos, effs, 1.99, 0.);
		mpc_optical_table_uns = new GaussMarkov(sunpos, effs, vgram);

		//test how well the fit matches the data
		double err_fit = 0.;
		int npoints = (int)sunpos.size();
		for( int i = 0; i<npoints; i++ ){
			double zref = effs.at(i);
			double zfit = mpc_optical_table_uns->interp(sunpos.at(i));
			double dz = zref - zfit;
			err_fit += dz * dz;
		}
		err_fit = sqrt(err_fit);
		if( err_fit > 0.01 )
		{
			std::string msg = util::format("The heliostat field interpolation function fit is poor! (err_fit=%f RMS)", err_fit);
			mc_csp_messages.add_message(C_csp_messages::WARNING, msg);
		}

	}	// end unstructured data table

	init_sf();

	m_mode = C_csp_collector_receiver::OFF;					//[-] 0 = requires startup, 1 = starting up, 2 = running
	m_mode_prev = m_mode;
	
	return;
}
void removeMatrixRow(int row, MatDoub &out) {

    int dummy = -1000;

    Rh.resize(Ri.nrows(),Ri.nrows());
    Rh = Ri;

    for(int i=0; i<Rh.nrows(); i++) {
        Rh[row][i] = dummy;
        Rh[i][row] = dummy;
    }

    datain = Rh.getMatrixArray();
    data   [Rh.nrows()*Rh.nrows()];
    int k=0;

    for(int i=0; i < Rh.nrows()*Rh.nrows(); i++) {

        double ele = datain[i];
        if(ele != dummy)
            data[k++] = ele;

    }

    out = MatDoub( out.nrows(), out.nrows(), data );


}
/*
 Utility method used by the RandomWalk algorithm to
 invert the Graph Laplacian and accummulate the random-path
 contributions from each source-sink node pair, in
 accordance with [2] (see method declarations above).
 */
void calculateRandomWalk(int c, vector<node> Nodes) {

    queue<node> termNodes;
    for(int i=0; i<Nodes.size(); i++)
        termNodes.push(Nodes[i]);

    int N = Nodes.size();
    S.resize(N,1);
    V.resize(N,1);
    T.resize(N,N);
    Rc.resize((N-1),(N-1));
    Vi.resize(C.size(),1);

    //--- Remove arbitrary termination ('sink') state '0'.
    removeMatrixRow(0,Rc);

    //--- Invert Matrix.
    LUdcmp lu( Rc );

    Ti.resize(Rc.nrows(),Rc.nrows());

    lu.inverse( Ti );

    for(int i=0; i< T.nrows(); i++) {
        for(int j=0; j< T.nrows(); j++) {
            T[i][j] = 0;
        }
    }

    //--- Add back arbitrary termination ('sink') state '0'.
    addMatrixRow(Ti,0,T);

    while ( !termNodes.empty() ) {

        node termNode  = termNodes.front();
        termNodes.pop();

        for(int t=0; t< Nodes.size(); t++) {

            //--- Take the next start ('source') state.
            node startNode = Nodes[t];

            if( startNode.k != termNode.k ) {

                for(int i=0; i<S.nrows(); i++) {
                    S[i][0]  = 0;
                    V[i][0]  = 0;
                    Vi[i][0] = 0;
                }

                S[startNode.k][0] =  1;
                S[termNode.k][0]  = -1;

                //--- V = T * S
                for(int i=0; i<T.nrows(); i++) {
                    double sum = 0.0;
                    for(int j=0; j<T.nrows(); j++) {
                        sum += T[i][j] * S[j][0];
                    }
                    V[i][0] = sum;
                }

                addMatrixRows(V, c, Vi);

                //--- Edge Betweenness, i.e.
                //--- the currents (potential differences) alone each edge!
                for(int i=0; i<elist.size(); i++) {

                    if( !elist[i].removed ) {
                        int Ni = elist[i].so-1;
                        int Nj = elist[i].si-1;

                        totallist[i+1].we += fabs(Vi[Ni][0] - Vi[Nj][0]);

                    }
                }//elist
            }//Nodes
        }//startNodes

    }

}