Esempio n. 1
0
int main (int argc, char* argv[])
{
//// These are input parameters for the simulation
int
	m=16,	///number of grid points in axial direction
	n=4,	///number of grid points in circumferential direction
	e=2,	///number of ECs per node
	s=3;	///number of SMCs per node

///Time variables
double  tfinal  	= 100.00;
double 	interval	= 1e-2;
//File written every 1 second
int file_write_per_unit_time=int(1/interval);


///Global variables that are to be read by each processor
int nbrs[4], dims[2], periods[2], reorder, coords[2];

///Global declaration of request and status update place holders.
///Request and Status handles for nonblocking Send and Receive operations, for communicating with each of the four neighbours.
MPI_Request reqs[8];
MPI_Status stats[8];


///Initialize MPI
MPI_Init(&argc, &argv);

int rank,numtasks;

int outbuf, inbuf[4]={MPI_PROC_NULL,MPI_PROC_NULL,MPI_PROC_NULL,MPI_PROC_NULL},
	source,dest;
int
	tag=1;			///tag for messaging information for nearset neighbour.

//Open an instance of a log file on each processor


//Reveal information of myself and size of MPI_COMM_WORLD
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);


	dims[0]		=m;
	dims[1]		=n;
	periods[0]	=0;
	periods[1]	=1;
	reorder		=0;

int err;
char filename[20];
//open an output stream for logfile
ofstream logptr;

	err=sprintf(filename,"logfile%d.txt",rank);

	logptr.open(filename);

	if (!logptr.good()){logptr.open(filename,fstream::trunc);}


	bool a = logptr.is_open();

  err=MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, reorder, &grid.comm);
  if(err!=MPI_SUCCESS){logptr<<rank<<": "<<"failed at cart create"<<endl;}

  err=MPI_Comm_rank(grid.comm, &grid.rank);
  if(err!=MPI_SUCCESS){logptr<<rank<<": "<<"failed at comm rank"<<endl;}



  err=MPI_Cart_coords(grid.comm, grid.rank, 2, coords);
  if(err!=MPI_SUCCESS){logptr<<rank<<": "<<"failed at cart coords"<<endl;}
  err=MPI_Cart_shift(grid.comm, 0, 1, &nbrs[UP], &nbrs[DOWN]);
  if(err!=MPI_SUCCESS){logptr<<rank<<": "<<"failed at cart shift up down"<<endl;}
  err=MPI_Cart_shift(grid.comm, 1, 1, &nbrs[LEFT], &nbrs[RIGHT]);
  if(err!=MPI_SUCCESS){logptr<<rank<<": "<<"failed at cart left right"<<endl;}


  //outbuf now contains the rank of a processor from cartcomm communicator
  outbuf = grid.rank;

  for (int i=0; i<4; i++) {
     dest = nbrs[i];
     source = nbrs[i];
     MPI_Isend(&outbuf, 1, MPI_INT, dest, tag,
               grid.comm, &reqs[i]);
     MPI_Irecv(&inbuf[i], 1, MPI_INT, source, tag,
               grid.comm, &reqs[i+4]);
     }

  MPI_Waitall(8, reqs, stats);


logptr<<"rank="<<grid.rank<<"\t"<<"coords= "<<coords[0]<<","<<coords[1]<<"\t"<<"nbrs(u,d,l,r)="<<nbrs[UP]<<","<<nbrs[DOWN]<<","<<nbrs[LEFT]<<","<<nbrs[RIGHT]<<endl;

///Each tasks now calculates the number of ECs per node.
if (m!=(numtasks/n))
	e	=	m/numtasks;

grid.nbrs[UP] 	= nbrs[UP];
grid.nbrs[DOWN]	= nbrs[DOWN];
grid.nbrs[LEFT] = nbrs[LEFT];
grid.nbrs[RIGHT]= nbrs[RIGHT];

///Each tasks now calculates the number of ECs per node.
grid.num_fluxes_smc						=	12;			///number of SMC Ioinic currents to be evaluated for eval of LHS of the d/dt terms of the ODEs.
grid.num_fluxes_ec						=	12;			///number of EC Ioinic currents to be evaluated for eval of LHS of the d/dt terms of the ODEs.

grid.num_coupling_species_smc			=	3;			///number of SMC coupling species homogenic /heterogenic
grid.num_coupling_species_ec			=	3;			///number of SMC coupling species homogenic /heterogenic

if (m!=(numtasks/n))
	e	=	m/numtasks;

grid.neq_smc	                		=	5;			/// number of SMC ODEs for a single cell
grid.neq_ec			                    =	4;			/// number of EC ODEs for a single cell

grid.num_ec_axially		        		=	e;
grid.num_smc_axially	            	=	e*13;
grid.num_ec_circumferentially       	=	s*5;
grid.num_smc_circumferentially	        =	s;
grid.neq_ec_axially		        		=	grid.num_ec_axially * grid.neq_ec;
grid.neq_smc_axially	            	=	grid.num_smc_axially * grid.neq_smc;

grid.m				       				 =	m;
grid.n				       				 =	n;

///Local and global MPI information.
grid.numtasks			        =	numtasks;
//this is my rank in MPI_COMM_WORLD
err = MPI_Comm_rank(MPI_COMM_WORLD,&grid.universal_rank);
grid.universal_rank                     =   	rank;


///Now allocate memory space for the structures representing the cells and the various members of those structures.

//Each of the two cell grids have two additional rows and two additional columns as ghost cells.
//Follwing is an example of a 5x7 grid with added ghost cells on all four sides. the 0s are the actual
//members of the grid whereas the + are the ghost cells.

// + + + + + + + + +
// + 0 0 0 0 0 0 0 +
// + 0 0 0 0 0 0 0 +
// + 0 0 0 0 0 0 0 +
// + 0 0 0 0 0 0 0 +
// + 0 0 0 0 0 0 0 +
// + + + + + + + + +


	smc 	= (celltype1**) checked_malloc((grid.num_smc_circumferentially+2)* sizeof(celltype1*), stdout, "smc");
	for (int i=0; i<=(grid.num_smc_circumferentially+2); i++){
		smc[i]	= (celltype1*) checked_malloc((grid.num_smc_axially+2)* sizeof(celltype1), stdout, "smc column dimension");
	}
	ec 	= (celltype2**) checked_malloc((grid.num_ec_circumferentially+2)* sizeof(celltype2*), stdout, "ec");
	for (int i=0; i<=(grid.num_ec_circumferentially+2); i++){
		ec[i]	= (celltype2*) checked_malloc((grid.num_ec_axially+2)* sizeof(celltype2), stdout, "ec column dimension");
	}

///Memory allocation for state vector, the single cell evaluation placeholders (The RHS of the ODEs for each cell) and coupling fluxes is implemented in this section.
///In ghost cells, only the state vector array for each type of cells exists including all other cells.
///The memory is allocated for all the cells except the ghost cells, hence the ranges 1 to grid.num_ec_circumferentially(inclusive).
	///SMC domain
	/*for (int i = 0; i <= grid.num_smc_circumferentially + 1; i++) {  		///commented as this is probably not required
		for (int j = 0; j <= grid.num_smc_axially + 1; j++) {
			smc[i][j].p = (double*) checked_malloc(
					grid.num_neq_smc * sizeof(double*), stdout,
					"state vector in smc");
		}
	}*/
	for (int i = 1; i <= grid.num_smc_circumferentially; i++) {
		for (int j = 1; j <= grid.num_smc_axially; j++) {
			smc[i][j].A = (double*) checked_malloc(
					grid.num_fluxes_smc * sizeof(double), stdout,
					"matrix A in smc");
			smc[i][j].B = (double*) checked_malloc(
					grid.num_coupling_species_smc * sizeof(double), stdout,
					"matrix B in smc");
			smc[i][j].C = (double*) checked_malloc(
					grid.num_coupling_species_smc * sizeof(double), stdout,
					"matrix C in smc");
		}
	}

	///EC domain
/*	for (int i = 0; i <= grid.num_ec_circumferentially + 1; i++) {			///commented as this is probably not required
		for (int j = 0; j <= grid.num_ec_axially + 1; j++) {
			ec[i][j].q = (double*) checked_malloc(
					grid.num_neq_ec * sizeof(double*), stdout,
					"state vector in ec");
		}
	}*/
	for (int i = 1; i <= grid.num_ec_circumferentially; i++) {
		for (int j = 1; j <= grid.num_ec_axially; j++) {
			ec[i][j].A = (double*) checked_malloc(
					grid.num_fluxes_ec * sizeof(double), stdout,
					"matrix A in ec");
			ec[i][j].B = (double*) checked_malloc(
					grid.num_coupling_species_ec * sizeof(double), stdout,
					"matrix B in ec");
			ec[i][j].C = (double*) checked_malloc(
					grid.num_coupling_species_ec * sizeof(double), stdout,
					"matrix C in ec");
		}
	}


	///Allocating memory space for coupling data to be sent and received by MPI communication routines.

	///sendbuf and recvbuf are 2D arrays having up,down,left and right directions as their first dimension.
	///Each dimension is broken down into two segments, e.g. up1,up2,down1 & down2,etc..
	///The length of the second dimension is equal to half the number of cells for which the information is to be sent and received.
	///Thus each communicating pair will exchange data twice to get the full lenght.

		sendbuf = (double**) checked_malloc(8 * sizeof(double*), stdout,
				"sendbuf dimension 1");
		recvbuf = (double**) checked_malloc(8 * sizeof(double*), stdout,
				"recvbuf dimension 1");

		///Each processor now allocates the memory for send and recv buffers those will hold the coupling information.
		///Since sendbuf must contain the information of number of SMCs and ECs being sent in the directions,
		///the first two elements contain the total count of SMCs located on the rank in the relevant dimension (circumferential or axial) and the count of SMCs for which
		///information is being sent, respectively.
		///The next two elements contain the same information for ECs.

		int extent_s, extent_e;	///Variables to calculate the length of the prospective buffer based on number of cell in either orientations (circumferential or axial).

		grid.added_info_in_send_buf = 4;	///Number of elements containing additional information at the beginning of the send buffer.

		/// data to send to the neighbour in UP1 direction
		extent_s = (int) (ceil((double) (grid.num_smc_circumferentially) / 2));
		extent_e = (int) (ceil((double) (grid.num_ec_circumferentially) / 2));
		sendbuf[UP1] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"sendbuf[UP1] dimension 2");
		sendbuf[UP1][0] = (double) (1); //Start of the 1st segment of SMC array in  in UP direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[UP1][1] = (double) (extent_s); //End of the 1st segment of SMC array in UP direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[UP1][2] = (double) (1); //Start of the 1st segment of EC array in UP direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[UP1][3] = (double) (extent_e); ///End of the 1st segment of EC array in UP direction (circumferential direction) to be sent to neighbouring processor

		sendbuf[UP2] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"sendbuf[UP2] dimension 2");
		sendbuf[UP2][0] = (double) (extent_s -1); //Start of the 2nd segment of SMC array in UP direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[UP2][1] = (double) (grid.num_smc_circumferentially); //End of the 2nd segment of SMC array in  in UP direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[UP2][2] = (double) (extent_e -1); //Start of the 2nd segment of EC array in  in UP direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[UP2][3] = (double) (grid.num_ec_circumferentially); //End of the 2nd segment of EC array in  in UP direction (circumferential direction) to be sent to neighbouring processor

		/// data to receive from the neighbour in UP direction
		recvbuf[UP1] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"recvbuf[UP1] dimension 2");
		recvbuf[UP2] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"recvbuf[UP2] dimension 2");

		/// data to send to the neighbour in DOWN direction
		sendbuf[DOWN1] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"sendbuf[DOWN1] dimension 2");
		sendbuf[DOWN1][0] = (double) (1); //Start of the 1st segment of SMC array in  in DOWN direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[DOWN1][1] = (double) (extent_s); //End of the 1st segment of SMC array in DOWN direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[DOWN1][2] = (double) (1); //Start of the 1st segment of EC array in DOWN direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[DOWN1][3] = (double) (extent_e); ///End of the 1st segment of EC array in DOWN direction (circumferential direction) to be sent to neighbouring processor


		sendbuf[DOWN2] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"sendbuf[DOWN2] dimension 2");
		sendbuf[DOWN2][0] = (double) (extent_s -1); //Start of the 2nd segment of SMC array in DOWN direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[DOWN2][1] = (double) (grid.num_smc_circumferentially); //End of the 2nd segment of SMC array in  in DOWN direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[DOWN2][2] = (double) (extent_e -1); //Start of the 2nd segment of EC array in  in DOWN direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[DOWN2][3] = (double) (grid.num_ec_circumferentially); //End of the 2nd segment of EC array in  in DOWN direction (circumferential direction) to be sent to neighbouring processor

		/// data to recv from the neighbour in DOWN direction
		recvbuf[DOWN1] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"recvbuf[DOWN1] dimension 2");
		recvbuf[DOWN2] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"recvbuf[DOWN2] dimension 2");

		/// data to send to the neighbour in LEFT direction
		extent_s = (int) (ceil((double) (grid.num_smc_axially) / 2));
		extent_e = (int) (ceil((double) (grid.num_ec_axially) / 2));
		sendbuf[LEFT1] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"sendbuf[LEFT1] dimension 2");
		sendbuf[LEFT1][0] = (double) (1); //Start of the 1st segment of SMC array in  in LEFT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[LEFT1][1] = (double) (extent_s); //END of the 1st segment of SMC array in  in LEFT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[LEFT1][2] = (double) (1); //Start of the 1st segment of EC array in  in LEFT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[LEFT1][3] = (double) (extent_e); //END of the 1st segment of EC array in  in LEFT direction (circumferential direction) to be sent to neighbouring processor

		sendbuf[LEFT2] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"sendbuf[LEFT2] dimension 2");
		sendbuf[LEFT2][0] = (double) (extent_s -1); //Start of the 2nd segment of SMC array in LEFT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[LEFT2][1] = (double) (grid.num_smc_axially);//END of the 2nd segment of SMC array in LEFT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[LEFT2][2] = (double) (extent_e-1); //Start of the 2nd segment of EC array in LEFT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[LEFT2][3] = (double) (grid.num_ec_axially); //END of the 2nd segment of EC array in LEFT direction (circumferential direction) to be sent to neighbouring processor

		/// data to receive from the neighbour in LEFT direction
		recvbuf[LEFT1] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"recvbuf[LEFT1] dimension 2");
		recvbuf[LEFT2] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"recvbuf[LEFT2] dimension 2");

		/// data to send to the neighbour in RIGHT direction
		extent_s = (int) (ceil((double) (grid.num_smc_axially) / 2));
		extent_e = (int) (ceil((double) (grid.num_ec_axially) / 2));
		sendbuf[RIGHT1] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"sendbuf[RIGHT1] dimension 2");
		sendbuf[RIGHT1][0] = (double) (1); //Start of the 1st segment of SMC array in  in RIGHT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[RIGHT1][1] = (double) (extent_s); //END of the 1st segment of SMC array in  in RIGHT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[RIGHT1][2] = (double) (1); //Start of the 1st segment of EC array in  in RIGHT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[RIGHT1][3] = (double) (extent_e); //END of the 1st segment of EC array in  in RIGHT direction (circumferential direction) to be sent to neighbouring processor

		sendbuf[RIGHT2] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"sendbuf[RIGHT2] dimension 2");
		sendbuf[RIGHT2][0] = (double) (extent_s -1); //Start of the 2nd segment of SMC array in RIGHT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[RIGHT2][1] = (double) (grid.num_smc_axially);//END of the 2nd segment of SMC array in RIGHT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[RIGHT2][2] = (double) (extent_e-1); //Start of the 2nd segment of EC array in RIGHT direction (circumferential direction) to be sent to neighbouring processor
		sendbuf[RIGHT2][3] = (double) (grid.num_ec_axially); //END of the 2nd segment of EC array in RIGHT direction (circumferential direction) to be sent to neighbouring processor

		/// data to receive from the neighbour in RIGHT direction
		recvbuf[RIGHT1] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"recvbuf[RIGHT1] dimension 2");
		recvbuf[RIGHT2] = (double*) checked_malloc(
				grid.added_info_in_send_buf
						+ (grid.num_coupling_species_smc * extent_s
								+ grid.num_coupling_species_ec * extent_e)
								* sizeof(double), stdout,
				"recvbuf[RIGHT2] dimension 2");

	int NEQ =	grid.neq_smc*(grid.num_smc_axially*grid.num_smc_circumferentially) + grid.neq_ec*(grid.num_ec_axially*grid.num_ec_circumferentially);


    ///Setup output streams to write data in files. Each node opens an independent set of files and write various state variables into it.
	checkpoint_handle *check = initialise_checkpoint(myRank);

    ofstream    Time,
                ci,si,vi,wi,Ii,cpCi,cpVi,cpIi,
                cj,sj,vj,Ij,cpCj,cpVj,cpIj;
	err = sprintf(filename, "time%d.txt", grid.universal_rank);
	Time.open(filename);
	if (!Time.good()){
		Time.open(filename, fstream::trunc);}

	err = sprintf(filename, "smc_c%d.txt", grid.universal_rank);
	ci.open(filename);
	if (!ci.good()){ci.open(filename, fstream::trunc);}

	err = sprintf(filename, "smc_s%d.txt", grid.universal_rank);
	si.open(filename);
	if (!si.good()){si.open(filename, fstream::trunc);}

	err = sprintf(filename, "smc_v%d.txt", grid.universal_rank);
	vi.open(filename);
	if (!vi.good()){vi.open(filename, fstream::trunc);}

	err = sprintf(filename, "smc_w%d.txt", grid.universal_rank);
	wi.open(filename);
	if (!wi.good()){wi.open(filename, fstream::trunc);}

	err = sprintf(filename, "smc_I%d.txt", grid.universal_rank);
	Ii.open(filename);
	if (!Ii.good()){Ii.open(filename, fstream::trunc);}

	err = sprintf(filename, "ec_c%d.txt", grid.universal_rank);
	cj.open(filename);
	if (!cj.good()){cj.open(filename, fstream::trunc);}

	err = sprintf(filename, "ec_s%d.txt", grid.universal_rank);
	sj.open(filename);
	if (!sj.good()){sj.open(filename, fstream::trunc);}

	err = sprintf(filename, "ec_v%d.txt", grid.universal_rank);
	vj.open(filename);
	if (!vj.good()){vj.open(filename, fstream::trunc);}

	err = sprintf(filename, "ec_I%d.txt", grid.universal_rank);
	Ij.open(filename);
	if (!Ij.good()){Ij.open(filename, fstream::trunc);}

	err = sprintf(filename, "smc_cpC%d.txt", grid.universal_rank);
		cpCi.open(filename);
	if (!cpCi.good()){cpCi.open(filename, fstream::trunc);}

	err = sprintf(filename, "ec_cpC%d.txt", grid.universal_rank);
		cpCj.open(filename);
	if (!cpCj.good()){cpCj.open(filename, fstream::trunc);}

	err = sprintf(filename, "smc_cpV%d.txt", grid.universal_rank);
		cpVi.open(filename);
	if (!cpVi.good()){cpVi.open(filename, fstream::trunc);}

	err = sprintf(filename, "ec_cpV%d.txt", grid.universal_rank);
	cpVj.open(filename);
	if (!cpVj.good()){cpVj.open(filename, fstream::trunc);}


	err = sprintf(filename, "smc_cpI%d.txt", grid.universal_rank);
	cpIi.open(filename);
	if (!cpIi.good()){cpIi.open(filename, fstream::trunc);}

	err = sprintf(filename, "ec_cpI%d.txt", grid.universal_rank);
	cpIj.open(filename);
	if (!cpIj.good()){cpIj.open(filename, fstream::trunc);}

	///Setting up the solver

	RKSUITE		rksuite;

	double 	tnow	= 0.0;

	//Error control variables
	double 	TOL	= 1e-6;

	double * thres = (double*)checked_malloc(NEQ*sizeof(double),stdout,"Threshod array for RKSUITE");

	for (int i=1; i<=NEQ; i++)
		thres[i]	=	1e-6;

	//Variables holding new and old values
	double* y =  (double*)checked_malloc(NEQ*sizeof(double),stdout,"Solver array y for RKSUITE");
	double* yp=  (double*)checked_malloc(NEQ*sizeof(double),stdout,"Solver array yp for RKSUITE");
	double* ymax=  (double*)checked_malloc(NEQ*sizeof(double),stdout,"Solver array ymax for RKSUITE");
	double* derivative=  (double*)checked_malloc(NEQ*sizeof(double),stdout,"Solver array derivative for RKSUITE");


///Mapping state vectors of each cell (all except the ghost cells) to the corresponding locations on the solver's solution array y[].

	int k=0,offset;
	for(i=1;i<=grid.num_smc_circumferentially; i++){
		for (j=1;j<=grid.num_smc_axially;j++){
			if (i>1)
			k=((i-1)*grid.neq_smc_axially);
			else if (i==1)
			k=0;
					smc[i][j].p[smc_Ca]	=	&y[k+((j-1)*grid.neq_smc)+0];
					smc[i][j].p[smc_Vm]	=	&y[k+((j-1)*grid.neq_smc)+1];
					smc[i][j].p[smc_SR]	=	&y[k+((j-1)*grid.neq_smc)+2];
					smc[i][j].p[smc_w]	=	&y[k+((j-1)*grid.neq_smc)+3];
					smc[i][j].p[smc_IP3]=	&y[k+((j-1)*grid.neq_smc)+4];
			}
	 	}
	offset = (grid.neq_smc*grid.num_smc_circumferentially*grid.num_smc_axially);

	for(i=1; i<= grid.num_ec_circumferentially; i++){
		for (j=1;j<= grid.num_ec_axially; j++){
			if (i>1)
			k= offset+((i-1)*grid.neq_ec_axially);
			else if (i==1)
			k=offset+0;
					ec[i][j].q[ec_Ca]	=	&y[k+((j-1)*grid.neq_ec)+0];
					ec[i][j].q[ec_Vm]	=	&y[k+((j-1)*grid.neq_ec)+1];
					ec[i][j].q[ec_SR]	=	&y[k+((j-1)*grid.neq_ec)+2];
					ec[i][j].q[ec_IP3]	=	&y[k+((j-1)*grid.neq_ec)+3];
			}
	 	}

	///Initialize different state variables and coupling data values.
	void Initialize_koeingsberger_smc(grid);
	void Initialize_koeingsberger_ec(grid);


 	//Solver method
	int method	=	2;		//RK(4,5)
	//Error Flag
	int uflag = 0;

	int state 	=  couplingParms(CASE);
	int itteration=0;
	double tend;

    ///Write my local information in my rank's logfile
	logptr<<"COUPLING COEFFICIENTS"<<endl
	<<"g_hm_smc=\t"<<cpl_cef.g_hm_smc<<endl
	<<"g_hm_ec=\t"<<cpl_cef.g_hm_ec<<endl
	<<"p_hm_smc=\t"<<cpl_cef.p_hm_smc<<endl
	<<"p_hm_ec=\t"<<cpl_cef.p_hm_ec<<endl
	<<"pIP_hm_smc=\t"<<cpl_cef.pIP_hm_smc<<endl
	<<"pIP_hm_ec=\t"<<cpl_cef.pIP_hm_ec<<endl
	<<"g_ht_smc=\t"<<cpl_cef.g_ht_smc<<endl
	<<"g_ht_ec=\t"<<cpl_cef.g_ht_ec<<endl
	<<"p_ht_smc=\t"<<cpl_cef.p_ht_smc<<endl
	<<"p_ht_ec=\t"<<cpl_cef.p_ht_ec<<endl
	<<"pIP_ht_smc=\t"<<cpl_cef.pIP_ht_smc<<endl
	<<"pIP_ht_ec=\t"<<cpl_cef.pIP_ht_ec<<endl;

	logptr<<"Spatial Gradient info:"<<endl;
	//logptr<<"Minimum JPLC\t="<<min_jplc<<endl;
	//logptr<<"Maximum JPLC\t"<<max_jplc<<endl;
	//logptr<<"Gradient\t"<<gradient<<endl;

	logptr<<"Total Tasks="<<numtasks<<endl;
	logptr<<"Number of grid points in axial direction = "<<m<<endl;
	logptr<<"Number of grid points in circumferential direction = "<<n<<endl;
	logptr<<"Number of ECs per node (axially) = "<<grid.num_ec_axially<<endl;
	logptr<<"Number of SMCs per node (circumferentially) = "<<grid.num_smc_circumferentially<<endl;
	logptr<<"Total ECs on this node = "<< (grid.num_ec_axially * grid.num_ec_circumferentially)<<endl;
	logptr<<"Total SMCs on this node = "<< (grid.num_smc_axially * grid.num_smc_circumferentially)<<endl<<endl;
	logptr<<"Total number of cells on this node ="<<(grid.num_ec_axially * grid.num_ec_circumferentially)+(grid.num_smc_axially *
	grid.num_smc_circumferentially)<<endl;
	logptr<<"Total number of cells in the full computational domain = "<<((grid.num_ec_axially * grid.num_ec_circumferentially)+(grid.num_smc_axially *
	grid.num_smc_circumferentially))*numtasks<<endl;
	logptr<<"Total number of equations in the full computational domain = "<<NEQ*numtasks<<endl;

	///Solver section

	rksuite.setup(NEQ, tnow, y, tfinal, TOL, thres, method, "UT", false, 0.00, false );

	MPI_Barrier(MPI_COMM_WORLD);
	communication_async_send_recv(logptr);


	///Iterative  calls to the solver start here.
	for (tend = interval; tend < tfinal; tend+=interval)
	{

	/// RKSUITE UT Call
	/// prototype (f,twant,tgot,ygot,ypgot,ymax,work,uflag)
	rksuite.ut( computeDerivatives,tend,tnow, y, yp,ymax,uflag);

	if (uflag >= 5) {
		        logptr<<rank<<"RKSUITE error "<<uflag<<" occured at t = "<<tnow<<endl;
		        MPI_Abort(MPI_COMM_WORLD,grid.universal_rank);
			}


	///Increament the itteration as rksuite has finished solving between bounds tnow<= t <= tend.
	itteration++;


	/// Call for interprocessor communication
	MPI_Barrier(MPI_COMM_WORLD);
	communication_async_send_recv(logptr);




	if (itteration==5){
		logptr<<"At t= "<<tend;
		for (int j =1; j<=e; j++)
		logptr<<"JPLC["<<j<<"]= "<<ec[0][j-1].JPLC<<"\t";
		logptr<<endl;
		}
		
	if (itteration==1e5){
		logptr<<"At t= "<<tend;
		for (int j =1; j<=e; j++)
		logptr<<"JPLC["<<j<<"]= "<<ec[0][j-1].JPLC<<"\t";
		logptr<<endl;
		}
	

	if ((itteration % file_write_per_unit_time)==0){

           	   	   	Time<<tend<<endl;

                    for (int i =1; i<=grid.num_smc_circumferentially; i++){
                        for(int j=1; j<=grid.num_smc_axially; j++){
                           ci<<smc[i-1][j-1].c<<"\t";
                           si<<smc[i-1][j-1].s<<"\t";
                           vi<<smc[i-1][j-1].v<<"\t";
                           wi<<smc[i-1][j-1].w<<"\t";
                           Ii<<smc[i-1][j-1].I<<"\t";
                           cpCi<<smc[i-1][j-1].B[0]<<"\t";
                           cpVi<<smc[i-1][j-1].B[1]<<"\t";
                           cpIi<<smc[i-1][j-1].B[2]<<"\t";
                        }//end j
                    }//end i
                            ci<<endl;si<<endl;vi<<endl;wi<<endl;Ii<<endl;
                            cpCi<<endl;cpVi<<endl;cpIi<<endl;

                    for (int i =1; i<=grid.num_ec_circumferentially; i++){
                        for(int j=1; j<=grid.num_ec_axially; j++){
                           cj<<ec[i-1][j-1].c<<"\t";
                           sj<<ec[i-1][j-1].s<<"\t";
                           vj<<ec[i-1][j-1].v<<"\t";
                           Ij<<ec[i-1][j-1].I<<"\t";
                           cpCj<<ec[i-1][j-1].B[0]<<"\t";
                           cpVj<<ec[i-1][j-1].B[1]<<"\t";
                           cpIj<<ec[i-1][j-1].B[2]<<"\t";
                           }//end j
                    }//end i
                    		cj<<endl;sj<<endl;vj<<endl;Ij<<endl;
                            cpCj<<endl;cpVj<<endl;cpIj<<endl;

	}//end itteration


	}//end of for loop on TEND


cout<<"["<<grid.rank<<"]"<<"End of run at time "<<tend<<endl;
logptr.close();
Time.close();
ci.close();si.close();vi.close();wi.close();Ii.close();
cj.close();sj.close();vj.close();Ij.close();
cpCi.close();cpCj.close();cpVi.close();cpVj.close();cpIi.close();cpIj.close();
delete[] y;
MPI_Finalize();
}// end main()