bool ParDiSO::makeSchurComplement(CSRdouble& A, CSRdouble& S) { double ddum; shiftIndices_(A, 1); //shiftIndices_(S, 1); // Check if this matrix is OK PARDISOCHECK_D(&mtype, &A.nrows, A.pData, A.pRows, A.pCols, &error); error_(); phase = 12; iparm[38] = S.nrows; // Perform symbolic analysis and numerical factorization PARDISOCALL_D(pt, &maxfct, &mnum, &mtype, &phase, &A.nrows, A.pData, A.pRows, A.pCols, perm, &nrhs, &iparm[1], &msglvl, &ddum, &ddum, &error, &dparm[1]); S.nonzeros = int(iparm[39]); S.allocate(S.nrows, S.ncols, S.nonzeros); // calculate and store the Schur-complement PARDISOSCHUR_D(pt, &maxfct, &mnum, &mtype, S.pData, S.pRows, S.pCols); shiftIndices_(S, -1); shiftIndices_(A, -1); error_(); int nonzeros = S.pRows[S.nrows]; bool is_it_full = false; if (nonzeros == S.nrows*S.ncols) is_it_full = true; return is_it_full; }
int set_up_BD ( int * DESCD, double * Dmat, CSRdouble& BT_i, CSRdouble& B_j, CSRdouble& Btsparse ) { // Read-in of matrices X, Z and T from file (filename[X,Z,T]) // X and Z are read in entrely by every process // T is read in strip by strip (number of rows in each process is at maximum = blocksize) // D is constructed directly in a distributed way // B is first assembled sparse in root process and afterwards the necessary parts // for constructing the distributed Schur complement are sent to each process FILE *fT; int ni, i,j, info; int *DESCT; double *Tblock, *temp; int nTblocks, nstrips, pTblocks, stripcols, lld_T, pcol, colcur,rowcur; CSRdouble Xtsparse, Ztsparse,XtT_sparse,ZtT_sparse,XtT_temp, ZtT_temp; Xtsparse.loadFromFile ( filenameX ); Ztsparse.loadFromFile ( filenameZ ); Xtsparse.transposeIt ( 1 ); Ztsparse.transposeIt ( 1 ); XtT_sparse.allocate ( m,k,0 ); ZtT_sparse.allocate ( l,k,0 ); pcol= * ( position+1 ); // Matrix T is read in by strips of size (blocksize * *(dims+1), k) // Strips of T are read in row-wise and thus it is as if we store strips of T' (transpose) column-wise with dimensions (k, blocksize * *(dims+1)) // However we must then also transpose the process grid to distribute T' correctly // number of strips in which we divide matrix T' nstrips= n % ( blocksize * * ( dims+1 ) ) ==0 ? n / ( blocksize * * ( dims+1 ) ) : ( n / ( blocksize * * ( dims+1 ) ) ) +1; //the number of columns of T' included in each strip stripcols= blocksize * * ( dims+1 ); //number of blocks necessary to store complete column of T' nTblocks= k%blocksize==0 ? k/blocksize : k/blocksize +1; //number of blocks necessary in this process to store complete column of T' pTblocks= ( nTblocks - *position ) % *dims == 0 ? ( nTblocks- *position ) / *dims : ( nTblocks- *position ) / *dims +1; pTblocks= pTblocks <1? 1:pTblocks; //local leading dimension of the strip of T' (different from process to process) lld_T=pTblocks*blocksize; // Initialisation of descriptor of strips of matrix T' DESCT= ( int* ) malloc ( DLEN_ * sizeof ( int ) ); if ( DESCT==NULL ) { printf ( "unable to allocate memory for descriptor for Z\n" ); return -1; } // strip of T (k,stripcols) is distributed across ICTXT2D starting in process (0,0) in blocks of size (blocksize,blocksize) // the local leading dimension in this process is lld_T descinit_ ( DESCT, &k, &stripcols, &blocksize, &blocksize, &i_zero, &i_zero, &ICTXT2D, &lld_T, &info ); if ( info!=0 ) { printf ( "Descriptor of matrix Z returns info: %d\n",info ); return info; } // Allocation of memory for the strip of T' in all processes Tblock= ( double* ) calloc ( pTblocks*blocksize*blocksize, sizeof ( double ) ); if ( Tblock==NULL ) { printf ( "Error in allocating memory for a strip of Z in processor (%d,%d)",*position,* ( position+1 ) ); return -1; } // Initialisation of matrix D (all diagonal elements of D equal to lambda) temp=Dmat; for ( i=0,rowcur=0,colcur=0; i<Dblocks; ++i, ++colcur, ++rowcur ) { if ( rowcur==*dims ) { rowcur=0; temp += blocksize; } if ( colcur==* ( dims+1 ) ) { colcur=0; temp += blocksize*lld_D; } if ( *position==rowcur && * ( position+1 ) == colcur ) { for ( j=0; j<blocksize; ++j ) { * ( temp + j * lld_D +j ) =lambda; } if ( i==Dblocks-1 && Ddim % blocksize != 0 ) { for ( j=blocksize-1; j>= Ddim % blocksize; --j ) { * ( temp + j * lld_D + j ) =0.0; } } } } fT=fopen ( filenameT,"rb" ); if ( fT==NULL ) { printf ( "Error opening file\n" ); return -1; } // Set up of matrix D and B per strip of T' for ( ni=0; ni<nstrips; ++ni ) { if ( ni==nstrips-1 ) { if(Tblock != NULL) free ( Tblock ); Tblock=NULL; Tblock= ( double* ) calloc ( pTblocks*blocksize*blocksize, sizeof ( double ) ); if ( Tblock==NULL ) { printf ( "Error in allocating memory for a strip of Z in processor (%d,%d)\n",*position,* ( position+1 ) ); return -1; } } //Each process only reads in a part of the strip of T' //When k is not a multiple of blocksize, read-in of the last elements of the rows of T is tricky if ( ( nTblocks-1 ) % *dims == *position && k%blocksize !=0 ) { if ( ni==0 ) { info=fseek ( fT, ( long ) ( pcol * blocksize * ( k ) * sizeof ( double ) ),SEEK_SET ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } } else { info=fseek ( fT, ( long ) ( blocksize * ( * ( dims+1 )-1 ) * ( k ) * sizeof ( double ) ),SEEK_CUR ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } } for ( i=0; i<blocksize; ++i ) { info=fseek ( fT, ( long ) ( blocksize * *position * sizeof ( double ) ),SEEK_CUR ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } for ( j=0; j < pTblocks-1; ++j ) { fread ( Tblock + i*pTblocks*blocksize + j*blocksize,sizeof ( double ),blocksize,fT ); info=fseek ( fT, ( long ) ( ( ( *dims ) -1 ) * blocksize * sizeof ( double ) ),SEEK_CUR ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } } fread ( Tblock + i*pTblocks*blocksize + j*blocksize,sizeof ( double ),k%blocksize,fT ); } //Normal read-in of the strips of T from a binary file (each time blocksize elements are read in) } else { if ( ni==0 ) { info=fseek ( fT, ( long ) ( pcol * blocksize * ( k ) * sizeof ( double ) ),SEEK_SET ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } } else { info=fseek ( fT, ( long ) ( blocksize * ( * ( dims+1 )-1 ) * ( k ) * sizeof ( double ) ),SEEK_CUR ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } } for ( i=0; i<blocksize; ++i ) { info=fseek ( fT, ( long ) ( blocksize * *position * sizeof ( double ) ),SEEK_CUR ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } for ( j=0; j < pTblocks-1; ++j ) { fread ( Tblock + i*pTblocks*blocksize + j*blocksize,sizeof ( double ),blocksize,fT ); info=fseek ( fT, ( long ) ( ( * ( dims )-1 ) * blocksize * sizeof ( double ) ),SEEK_CUR ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } } fread ( Tblock + i*pTblocks*blocksize + j*blocksize,sizeof ( double ),blocksize,fT ); info=fseek ( fT, ( long ) ( ( k - blocksize * ( ( pTblocks-1 ) * *dims + *position +1 ) ) * sizeof ( double ) ),SEEK_CUR ); if ( info!=0 ) { printf ( "Error in setting correct begin position for reading Z file\nprocessor (%d,%d), error: %d \n", *position,pcol,info ); return -1; } } } blacs_barrier_ ( &ICTXT2D,"A" ); // End of read-in // Matrix D is the sum of the multiplications of all strips of T' by their transpose // Up unitl now, the entire matrix is stored, not only upper/lower triangular, which is possible since D is symmetric // Be aware, that you akways have to allocate memory for the enitre matrix, even when only dealing with the upper/lower triangular part pdgemm_ ( "N","T",&k,&k,&stripcols,&d_one, Tblock,&i_one, &i_one,DESCT, Tblock,&i_one, &i_one,DESCT, &d_one, Dmat, &i_one, &i_one, DESCD ); //Z'Z //pdsyrk_ ( "U","N",&k,&stripcols,&d_one, Tblock,&i_one, &i_one,DESCT, &d_one, Dmat, &t_plus, &t_plus, DESCD ); // Matrix B consists of X'T and Z'T, since each process only has some parts of T at its disposal, // we need to make sure that the correct columns of Z and X are multiplied with the correct columns of T. for ( i=0; i<pTblocks; ++i ) { XtT_temp.ncols=k; //This function multiplies the correct columns of X' with the blocks of T at the disposal of the process // The result is also stored immediately at the correct positions of X'T. (see src/tools.cpp) XtT_temp.clear(); mult_colsA_colsC ( Xtsparse, Tblock+i*blocksize, lld_T, ( * ( dims+1 ) * ni + pcol ) *blocksize, blocksize, ( *dims * i + *position ) *blocksize, blocksize, XtT_temp, 0 ); if ( XtT_temp.nonzeros>0 ) { if ( XtT_sparse.nonzeros==0 ){ XtT_sparse.clear(); XtT_sparse.make2 ( XtT_temp.nrows,XtT_temp.ncols,XtT_temp.nonzeros,XtT_temp.pRows,XtT_temp.pCols,XtT_temp.pData ); } else { XtT_sparse.addBCSR ( XtT_temp ); } } } //Same as above for calculating Z'T for ( i=0; i<pTblocks; ++i ) { ZtT_temp.ncols=k; ZtT_temp.clear(); mult_colsA_colsC ( Ztsparse, Tblock+i*blocksize, lld_T, ( * ( dims+1 ) * ni + pcol ) *blocksize, blocksize, blocksize * ( *dims * i + *position ), blocksize, ZtT_temp, 0 ); if ( ZtT_temp.nonzeros>0 ) { if ( ZtT_sparse.nonzeros==0 ){ ZtT_sparse.clear(); ZtT_sparse.make2 ( ZtT_temp.nrows,ZtT_temp.ncols,ZtT_temp.nonzeros,ZtT_temp.pRows,ZtT_temp.pCols,ZtT_temp.pData ); } else ZtT_sparse.addBCSR ( ZtT_temp ); } } blacs_barrier_ ( &ICTXT2D,"A" ); } XtT_temp.clear(); ZtT_temp.clear(); Xtsparse.clear(); Ztsparse.clear(); if(DESCT != NULL) free ( DESCT ); DESCT=NULL; if(Tblock != NULL) free ( Tblock ); Tblock=NULL; //printf("T read in\n"); info=fclose ( fT ); if ( info!=0 ) { printf ( "Error in closing open streams" ); return -1; } if(filenameT != NULL) free(filenameT); filenameT=NULL; //Each process only has calculated some parts of B //All parts are collected by the root process (iam==0), which assembles B //Each process then receives BT_i and B_j corresponding to the D_ij available to the process if ( iam!=0 ) { //Each process other than root sends its X' * T and Z' * T to the root process. MPI_Send ( & ( XtT_sparse.nonzeros ),1, MPI_INT,0,iam,MPI_COMM_WORLD ); MPI_Send ( & ( XtT_sparse.pRows[0] ),XtT_sparse.nrows + 1, MPI_INT,0,iam+size,MPI_COMM_WORLD ); MPI_Send ( & ( XtT_sparse.pCols[0] ),XtT_sparse.nonzeros, MPI_INT,0,iam+2*size,MPI_COMM_WORLD ); MPI_Send ( & ( XtT_sparse.pData[0] ),XtT_sparse.nonzeros, MPI_DOUBLE,0,iam+3*size,MPI_COMM_WORLD ); XtT_sparse.clear(); MPI_Send ( & ( ZtT_sparse.nonzeros ),1, MPI_INT,0,iam,MPI_COMM_WORLD ); MPI_Send ( & ( ZtT_sparse.pRows[0] ),ZtT_sparse.nrows + 1, MPI_INT,0,4*size + iam,MPI_COMM_WORLD ); MPI_Send ( & ( ZtT_sparse.pCols[0] ),ZtT_sparse.nonzeros, MPI_INT,0,iam+ 5*size,MPI_COMM_WORLD ); MPI_Send ( & ( ZtT_sparse.pData[0] ),ZtT_sparse.nonzeros, MPI_DOUBLE,0,iam+6*size,MPI_COMM_WORLD ); ZtT_sparse.clear(); //printf("Process %d sent ZtT and XtT\n",iam); // And eventually receives the necessary BT_i and B_j // Blocking sends are used, which is why the order of the receives is critical depending on the coordinates of the process int nonzeroes; if (*position >= pcol) { MPI_Recv ( &nonzeroes,1,MPI_INT,0,iam,MPI_COMM_WORLD,&status ); BT_i.allocate ( blocksize*Drows,m+l,nonzeroes ); MPI_Recv ( & ( BT_i.pRows[0] ),blocksize*Drows + 1, MPI_INT,0,iam + size,MPI_COMM_WORLD,&status ); int count; MPI_Get_count(&status,MPI_INT,&count); BT_i.nrows=count-1; MPI_Recv ( & ( BT_i.pCols[0] ),nonzeroes, MPI_INT,0,iam+2*size,MPI_COMM_WORLD,&status ); MPI_Recv ( & ( BT_i.pData[0] ),nonzeroes, MPI_DOUBLE,0,iam+3*size,MPI_COMM_WORLD,&status ); MPI_Recv ( &nonzeroes,1, MPI_INT,0,iam+4*size,MPI_COMM_WORLD,&status ); B_j.allocate ( blocksize*Dcols,m+l,nonzeroes ); MPI_Recv ( & ( B_j.pRows[0] ),blocksize*Dcols + 1, MPI_INT,0,iam + 5*size,MPI_COMM_WORLD,&status ); MPI_Get_count(&status,MPI_INT,&count); B_j.nrows=count-1; MPI_Recv ( & ( B_j.pCols[0] ),nonzeroes, MPI_INT,0,iam+6*size,MPI_COMM_WORLD,&status ); MPI_Recv ( & ( B_j.pData[0] ),nonzeroes, MPI_DOUBLE,0,iam+7*size,MPI_COMM_WORLD,&status ); //Actually BT_j is sent, so it still needs to be transposed B_j.transposeIt ( 1 ); } else { MPI_Recv ( &nonzeroes,1, MPI_INT,0,iam+4*size,MPI_COMM_WORLD,&status ); B_j.allocate ( blocksize*Dcols,m+l,nonzeroes ); MPI_Recv ( & ( B_j.pRows[0] ),blocksize*Dcols + 1, MPI_INT,0,iam + 5*size,MPI_COMM_WORLD,&status ); int count; MPI_Get_count(&status,MPI_INT,&count); B_j.nrows=count-1; MPI_Recv ( & ( B_j.pCols[0] ),nonzeroes, MPI_INT,0,iam+6*size,MPI_COMM_WORLD,&status ); MPI_Recv ( & ( B_j.pData[0] ),nonzeroes, MPI_DOUBLE,0,iam+7*size,MPI_COMM_WORLD,&status ); B_j.transposeIt ( 1 ); MPI_Recv ( &nonzeroes,1,MPI_INT,0,iam,MPI_COMM_WORLD,&status ); BT_i.allocate ( blocksize*Drows,m+l,nonzeroes ); MPI_Recv ( & ( BT_i.pRows[0] ),blocksize*Drows + 1, MPI_INT,0,iam + size,MPI_COMM_WORLD,&status ); MPI_Get_count(&status,MPI_INT,&count); BT_i.nrows=count-1; MPI_Recv ( & ( BT_i.pCols[0] ),nonzeroes, MPI_INT,0,iam+2*size,MPI_COMM_WORLD,&status ); MPI_Recv ( & ( BT_i.pData[0] ),nonzeroes, MPI_DOUBLE,0,iam+3*size,MPI_COMM_WORLD,&status ); } } else { for ( i=1; i<size; ++i ) { // The root process receives parts of X' * T and Z' * T sequentially from all processes and directly adds them together. int nonzeroes; MPI_Recv ( &nonzeroes,1,MPI_INT,i,i,MPI_COMM_WORLD,&status ); if(nonzeroes>0) { XtT_temp.allocate ( m,k,nonzeroes ); MPI_Recv ( & ( XtT_temp.pRows[0] ),m + 1, MPI_INT,i,i+size,MPI_COMM_WORLD,&status ); MPI_Recv ( & ( XtT_temp.pCols[0] ),nonzeroes, MPI_INT,i,i+2*size,MPI_COMM_WORLD,&status ); MPI_Recv ( & ( XtT_temp.pData[0] ),nonzeroes, MPI_DOUBLE,i,i+3*size,MPI_COMM_WORLD,&status ); XtT_sparse.addBCSR ( XtT_temp ); XtT_temp.clear(); } MPI_Recv ( &nonzeroes,1, MPI_INT,i,i,MPI_COMM_WORLD,&status ); if(nonzeroes>0) { ZtT_temp.allocate ( l,k,nonzeroes ); MPI_Recv ( & ( ZtT_temp.pRows[0] ),l + 1, MPI_INT,i,4*size + i,MPI_COMM_WORLD,&status ); MPI_Recv ( & ( ZtT_temp.pCols[0] ),nonzeroes, MPI_INT,i,i+ 5*size,MPI_COMM_WORLD,&status ); MPI_Recv ( & ( ZtT_temp.pData[0] ),nonzeroes, MPI_DOUBLE,i,i+6*size,MPI_COMM_WORLD,&status ); ZtT_sparse.addBCSR ( ZtT_temp ); ZtT_temp.clear(); } } XtT_sparse.transposeIt ( 1 ); ZtT_sparse.transposeIt ( 1 ); // B' is created by concatening blocks X'T and Z'T create1x2BlockMatrix ( XtT_sparse, ZtT_sparse,Btsparse ); XtT_sparse.clear(); ZtT_sparse.clear(); /*Btsparse.transposeIt(1); Btsparse.writeToFile("B_sparse.csr"); Btsparse.transposeIt(1);*/ // For each process row i BT_i is created which is also sent to processes in column i to become B_j. for ( int rowproc= *dims - 1; rowproc>= 0; --rowproc ) { BT_i.ncols=Btsparse.ncols; BT_i.nrows=0; BT_i.nonzeros=0; int Drows_rowproc; if (rowproc!=0) { Drows_rowproc= ( Dblocks - rowproc ) % *dims == 0 ? ( Dblocks- rowproc ) / *dims : ( Dblocks- rowproc ) / *dims +1; Drows_rowproc= Drows_rowproc<1? 1 : Drows_rowproc; } else Drows_rowproc=Drows; for ( i=0; i<Drows_rowproc; ++i ) { //Each process in row i can hold several blocks of contiguous rows of D for which we need the corresponding rows of B_T // Therefore we use the function extendrows to create BT_i (see src/tools.cpp) BT_i.extendrows ( Btsparse, ( i * *dims + rowproc ) * blocksize,blocksize ); } for ( int colproc= ( rowproc==0 ? 1 : 0 ); colproc < * ( dims+1 ); ++colproc ) { int rankproc; rankproc= blacs_pnum_ (&ICTXT2D, &rowproc,&colproc); MPI_Send ( & ( BT_i.nonzeros ),1, MPI_INT,rankproc,rankproc,MPI_COMM_WORLD ); MPI_Send ( & ( BT_i.pRows[0] ),BT_i.nrows + 1, MPI_INT,rankproc,rankproc+size,MPI_COMM_WORLD ); MPI_Send ( & ( BT_i.pCols[0] ),BT_i.nonzeros, MPI_INT,rankproc,rankproc+2*size,MPI_COMM_WORLD ); MPI_Send ( & ( BT_i.pData[0] ),BT_i.nonzeros, MPI_DOUBLE,rankproc,rankproc+3*size,MPI_COMM_WORLD ); //printf("BT_i's sent to processor %d\n",rankproc); rankproc= blacs_pnum_ (&ICTXT2D, &colproc,&rowproc); MPI_Send ( & ( BT_i.nonzeros ),1, MPI_INT,rankproc,rankproc+4*size,MPI_COMM_WORLD ); MPI_Send ( & ( BT_i.pRows[0] ),BT_i.nrows + 1, MPI_INT,rankproc,rankproc+5*size,MPI_COMM_WORLD ); MPI_Send ( & ( BT_i.pCols[0] ),BT_i.nonzeros, MPI_INT,rankproc,rankproc+6*size,MPI_COMM_WORLD ); MPI_Send ( & ( BT_i.pData[0] ),BT_i.nonzeros, MPI_DOUBLE,rankproc,rankproc+7*size,MPI_COMM_WORLD ); //printf("B_j's sent to processor %d\n",rankproc); } } B_j.make2 ( BT_i.nrows,BT_i.ncols,BT_i.nonzeros,BT_i.pRows,BT_i.pCols,BT_i.pData ); B_j.transposeIt ( 1 ); } return 0; }
int main(int argc, char **argv) { int info, i, j, pcol, Adim; double *D; int *DESCD; CSRdouble BT_i, B_j, Xsparse, Zsparse, Btsparse; /*BT_i.allocate(0,0,0); B_j.allocate(0,0,0); Xsparse.allocate(0,0,0); Zsparse.allocate(0,0,0); Btsparse.allocate(0,0,0);*/ //Initialise MPI and some MPI-variables info = MPI_Init ( &argc, &argv ); if ( info != 0 ) { printf ( "Error in MPI initialisation: %d\n",info ); return info; } position= ( int* ) calloc ( 2,sizeof ( int ) ); if ( position==NULL ) { printf ( "unable to allocate memory for processor position coordinate\n" ); return EXIT_FAILURE; } dims= ( int* ) calloc ( 2,sizeof ( int ) ); if ( dims==NULL ) { printf ( "unable to allocate memory for grid dimensions coordinate\n" ); return EXIT_FAILURE; } //BLACS is the interface used by PBLAS and ScaLAPACK on top of MPI blacs_pinfo_ ( &iam,&size ); //determine the number of processes involved info=MPI_Dims_create ( size, 2, dims ); //determine the best 2D cartesian grid with the number of processes if ( info != 0 ) { printf ( "Error in MPI creation of dimensions: %d\n",info ); return info; } //Until now the code can only work with square process grids //So we try to get the biggest square grid possible with the number of processes involved if (*dims != *(dims+1)) { while (*dims * *dims > size) *dims -=1; *(dims+1)= *dims; if (iam==0) printf("WARNING: %d processor(s) unused due to reformatting to a square process grid\n", size - (*dims * *dims)); size = *dims * *dims; //cout << "New size of process grid: " << size << endl; } blacs_get_ ( &i_negone,&i_zero,&ICTXT2D ); //Initialisation of the BLACS process grid, which is referenced as ICTXT2D blacs_gridinit_ ( &ICTXT2D,"R",dims, dims+1 ); if (iam < size) { //The rank (iam) of the process is mapped to a 2D grid: position= (process row, process column) blacs_pcoord_ ( &ICTXT2D,&iam,position, position+1 ); if ( *position ==-1 ) { printf ( "Error in proces grid\n" ); return -1; } //Filenames, dimensions of all matrices and other important variables are read in as global variables (see src/readinput.cpp) info=read_input ( *++argv ); if ( info!=0 ) { printf ( "Something went wrong when reading input file for processor %d\n",iam ); return -1; } //blacs_barrier is used to stop any process of going beyond this point before all processes have made it up to this point. blacs_barrier_ ( &ICTXT2D,"ALL" ); if ( * ( position+1 ) ==0 && *position==0 ) printf ( "Reading of input-file succesful\n" ); if ( * ( position+1 ) ==0 && *position==0 ) { printf("\nA linear mixed model with %d observations, %d genotypes, %d random effects and %d fixed effects\n", n,k,m,l); printf("was analyzed using %d (%d x %d) processors\n",size,*dims,*(dims+1)); } //Dimension of A (sparse matrix) is the number of fixed effects(m) + the sparse random effects (l) Adim=m+l; //Dimension of D (dense matrix) is the number of dense effects (k) Ddim=k; pcol= * ( position+1 ); //Define number of blocks needed to store a complete column/row of D Dblocks= Ddim%blocksize==0 ? Ddim/blocksize : Ddim/blocksize +1; //Define the number of rowblocks needed by the current process to store its part of the dense matrix D Drows= ( Dblocks - *position ) % *dims == 0 ? ( Dblocks- *position ) / *dims : ( Dblocks- *position ) / *dims +1; Drows= Drows<1? 1 : Drows; //Define the number of columnblocks needed by the current process to store its part of the dense matrix D Dcols= ( Dblocks - pcol ) % * ( dims+1 ) == 0 ? ( Dblocks- pcol ) / * ( dims+1 ) : ( Dblocks- pcol ) / * ( dims+1 ) +1; Dcols=Dcols<1? 1 : Dcols; //Define the local leading dimension of D (keeping in mind that matrices are always stored column-wise) lld_D=Drows*blocksize; //Initialise the descriptor of the dense distributed matrix DESCD= ( int* ) malloc ( DLEN_ * sizeof ( int ) ); if ( DESCD==NULL ) { printf ( "unable to allocate memory for descriptor for C\n" ); return -1; } //D with dimensions (Ddim,Ddim) is distributed over all processes in ICTXT2D, with the first element in process (0,0) //D is distributed into blocks of size (blocksize,blocksize), having a local leading dimension lld_D in this specific process descinit_ ( DESCD, &Ddim, &Ddim, &blocksize, &blocksize, &i_zero, &i_zero, &ICTXT2D, &lld_D, &info ); if ( info!=0 ) { printf ( "Descriptor of matrix C returns info: %d\n",info ); return info; } //Allocate the space necessary to store the part of D that is held into memory of this process. D = ( double* ) calloc ( Drows * blocksize * Dcols * blocksize,sizeof ( double ) ); if ( D==NULL ) { printf ( "unable to allocate memory for Matrix D (required: %ld bytes)\n", Drows * blocksize * Dcols * blocksize * sizeof ( double ) ); return EXIT_FAILURE; } blacs_barrier_ ( &ICTXT2D,"ALL" ); if (iam==0) printf ( "Start set up of B & D\n" ); blacs_barrier_ ( &ICTXT2D,"ALL" ); //set_up_BD is declared in readdist.cpp and constructs the parts of matrices B & D in each processor //which are necessary to create the distributed Schur complement of D info = set_up_BD ( DESCD, D, BT_i, B_j, Btsparse ); //printdense(Drows*blocksize, Dcols * blocksize,D,"matrix_D.txt"); blacs_barrier_ ( &ICTXT2D,"ALL" ); if (iam==0) printf ( "Matrices B & D set up\n" ); if(printD_bool) { int array_of_gsizes[2], array_of_distribs[2], array_of_dargs[2], array_of_psize[2] ; int buffersize; MPI_Datatype file_type; MPI_File fh; MPI_Status status; array_of_gsizes[0]=Dblocks * blocksize; array_of_gsizes[1]=Dblocks * blocksize; array_of_distribs[0]=MPI_DISTRIBUTE_CYCLIC; array_of_distribs[1]=MPI_DISTRIBUTE_CYCLIC; array_of_dargs[0]=blocksize; array_of_dargs[1]=blocksize; array_of_psize[0]=*dims; array_of_psize[1]=*(dims + 1); MPI_Type_create_darray(size,iam,2,array_of_gsizes, array_of_distribs, array_of_dargs, array_of_psize, MPI_ORDER_FORTRAN, MPI_DOUBLE, &file_type); MPI_Type_commit(&file_type); info = MPI_File_open(MPI_COMM_WORLD, filenameD, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh); /*if ( ( Drows-1 ) % *(dims+1) == *position && ( Dcols-1 ) % *(dims) == pcol && Ddim%blocksize !=0 ) buffersize=((Drows-1) * blocksize + Ddim % blocksize) * ((Dcols-1) * blocksize + Ddim % blocksize); else if ( ( Drows-1 ) % *(dims+1) == *position && Ddim%blocksize !=0 ) buffersize=((Drows-1) * blocksize + Ddim % blocksize) * Dcols * blocksize; else if ( ( Dcols-1 ) % *(dims) == *position && Ddim%blocksize !=0 ) buffersize=((Dcols-1) * blocksize + Ddim % blocksize) * Drows * blocksize; else*/ buffersize= Dcols * Drows * blocksize * blocksize; MPI_File_set_view(fh, 0, MPI_DOUBLE, file_type, "native", MPI_INFO_NULL); info =MPI_File_write_all(fh, D,buffersize, MPI_DOUBLE, &status); MPI_File_close(&fh); if(iam==0) { printf("Matrix D (dimension %d) is printed in file %s\n", Dblocks*blocksize,filenameD); } if(filenameD != NULL) free(filenameD); filenameD=NULL; //delete[] array_of_gsizes, delete[] array_of_distribs, delete[] array_of_dargs, delete[] array_of_psize; } //Now every matrix has to set up the sparse matrix A, consisting of X'X, X'Z, Z'X and Z'Z + lambda*I Xsparse.loadFromFile ( filenameX ); Zsparse.loadFromFile ( filenameZ ); if(filenameX != NULL) free(filenameX); filenameX=NULL; if(filenameZ != NULL) free(filenameZ); filenameZ=NULL; smat_t *X_smat, *Z_smat; X_smat= (smat_t *) calloc(1,sizeof(smat_t)); Z_smat= (smat_t *) calloc(1,sizeof(smat_t)); X_smat = smat_new_from ( Xsparse.nrows,Xsparse.ncols,Xsparse.pRows,Xsparse.pCols,Xsparse.pData,0,0 ); Z_smat = smat_new_from ( Zsparse.nrows,Zsparse.ncols,Zsparse.pRows,Zsparse.pCols,Zsparse.pData,0,0 ); smat_t *Xt_smat, *Zt_smat; Xt_smat= (smat_t *) calloc(1,sizeof(smat_t)); Zt_smat= (smat_t *) calloc(1,sizeof(smat_t)); Xt_smat = smat_copy_trans ( X_smat ); Zt_smat = smat_copy_trans ( Z_smat ); CSRdouble Asparse; smat_t *XtX_smat, *XtZ_smat, *ZtZ_smat, *lambda_smat, *ZtZlambda_smat; XtX_smat= (smat_t *) calloc(1,sizeof(smat_t)); XtZ_smat= (smat_t *) calloc(1,sizeof(smat_t)); ZtZ_smat= (smat_t *) calloc(1,sizeof(smat_t)); XtX_smat = smat_matmul ( Xt_smat, X_smat ); XtZ_smat = smat_matmul ( Xt_smat, Z_smat ); ZtZ_smat = smat_matmul ( Zt_smat,Z_smat ); Xsparse.clear(); Zsparse.clear(); smat_free(Xt_smat); smat_free(Zt_smat); /*smat_free(X_smat); smat_free(Z_smat);*/ CSRdouble Imat; makeIdentity ( l, Imat ); lambda_smat= (smat_t *) calloc(1,sizeof(smat_t)); lambda_smat = smat_new_from ( Imat.nrows,Imat.ncols,Imat.pRows,Imat.pCols,Imat.pData,0,0 ); smat_scale_diag ( lambda_smat, -lambda ); ZtZlambda_smat= (smat_t *) calloc(1,sizeof(smat_t)); ZtZlambda_smat = smat_add ( lambda_smat, ZtZ_smat ); smat_free(ZtZ_smat); //smat_free(lambda_smat); smat_to_symmetric_structure ( XtX_smat ); smat_to_symmetric_structure ( ZtZlambda_smat ); CSRdouble XtX_sparse, XtZ_sparse, ZtZ_sparse; XtX_sparse.make2 ( XtX_smat->m,XtX_smat->n,XtX_smat->nnz,XtX_smat->ia,XtX_smat->ja,XtX_smat->a ); XtZ_sparse.make2 ( XtZ_smat->m,XtZ_smat->n,XtZ_smat->nnz,XtZ_smat->ia,XtZ_smat->ja,XtZ_smat->a ); ZtZ_sparse.make2 ( ZtZlambda_smat->m,ZtZlambda_smat->n,ZtZlambda_smat->nnz,ZtZlambda_smat->ia,ZtZlambda_smat->ja,ZtZlambda_smat->a ); /*smat_free(XtX_smat); smat_free(XtZ_smat); smat_free(ZtZlambda_smat);*/ Imat.clear(); if (iam==0) { cout << "*** [ t t ] *** " << endl; cout << "*** [ X X X Z ] *** " << endl; cout << "*** [ ] *** " << endl; cout << "*** G e n e r a t i n g m a t r i x A = [ ] *** " << endl; cout << "*** [ t t ] *** " << endl; cout << "*** [ Z X Z Z ] *** " << endl; } //Sparse matrix A only contains the upper triangular part of A create2x2SymBlockMatrix ( XtX_sparse, XtZ_sparse, ZtZ_sparse, Asparse ); //Asparse.writeToFile("A_sparse.csr"); smat_free(XtX_smat); smat_free(XtZ_smat); smat_free(ZtZlambda_smat); XtX_sparse.clear(); XtZ_sparse.clear(); ZtZ_sparse.clear(); blacs_barrier_ ( &ICTXT2D,"ALL" ); if(printsparseC_bool) { CSRdouble Dmat, Dblock, Csparse; Dblock.nrows=Dblocks * blocksize; Dblock.ncols=Dblocks * blocksize; Dblock.allocate(Dblocks * blocksize, Dblocks * blocksize, 0); Dmat.allocate(0,0,0); for (i=0; i<Drows; ++i) { for(j=0; j<Dcols; ++j) { dense2CSR_sub(D + i * blocksize + j * lld_D * blocksize,blocksize,blocksize,lld_D,Dblock,( * ( dims) * i + *position ) *blocksize, ( * ( dims+1 ) * j + pcol ) *blocksize); if ( Dblock.nonzeros>0 ) { if ( Dmat.nonzeros==0 ) { Dmat.make2 ( Dblock.nrows,Dblock.ncols,Dblock.nonzeros,Dblock.pRows,Dblock.pCols,Dblock.pData ); } else { Dmat.addBCSR ( Dblock ); } } Dblock.clear(); } } blacs_barrier_(&ICTXT2D,"A"); if ( iam!=0 ) { //Each process other than root sends its Dmat to the root process. MPI_Send ( & ( Dmat.nonzeros ),1, MPI_INT,0,iam,MPI_COMM_WORLD ); MPI_Send ( & ( Dmat.pRows[0] ),Dmat.nrows + 1, MPI_INT,0,iam+size,MPI_COMM_WORLD ); MPI_Send ( & ( Dmat.pCols[0] ),Dmat.nonzeros, MPI_INT,0,iam+2*size,MPI_COMM_WORLD ); MPI_Send ( & ( Dmat.pData[0] ),Dmat.nonzeros, MPI_DOUBLE,0,iam+3*size,MPI_COMM_WORLD ); Dmat.clear(); } else { for ( i=1; i<size; ++i ) { // The root process receives parts of Dmat sequentially from all processes and directly adds them together. int nonzeroes, count; MPI_Recv ( &nonzeroes,1,MPI_INT,i,i,MPI_COMM_WORLD,&status ); /*MPI_Get_count(&status, MPI_INT, &count); printf("Process 0 received %d elements of process %d\n",count,i);*/ if(nonzeroes>0) { printf("Nonzeroes : %d\n ",nonzeroes); Dblock.allocate ( Dblocks * blocksize,Dblocks * blocksize,nonzeroes ); MPI_Recv ( & ( Dblock.pRows[0] ), Dblocks * blocksize + 1, MPI_INT,i,i+size,MPI_COMM_WORLD,&status ); /*MPI_Get_count(&status, MPI_INT, &count); printf("Process 0 received %d elements of process %d\n",count,i);*/ MPI_Recv ( & ( Dblock.pCols[0] ),nonzeroes, MPI_INT,i,i+2*size,MPI_COMM_WORLD,&status ); /*MPI_Get_count(&status, MPI_INT, &count); printf("Process 0 received %d elements of process %d\n",count,i);*/ MPI_Recv ( & ( Dblock.pData[0] ),nonzeroes, MPI_DOUBLE,i,i+3*size,MPI_COMM_WORLD,&status ); /*MPI_Get_count(&status, MPI_DOUBLE, &count); printf("Process 0 received %d elements of process %d\n",count,i);*/ Dmat.addBCSR ( Dblock ); } } //Dmat.writeToFile("D_sparse.csr"); Dmat.reduceSymmetric(); Btsparse.transposeIt(1); create2x2SymBlockMatrix(Asparse,Btsparse, Dmat, Csparse); Btsparse.clear(); Dmat.clear(); Csparse.writeToFile(filenameC); Csparse.clear(); if(filenameC != NULL) free(filenameC); filenameC=NULL; } } Btsparse.clear(); blacs_barrier_(&ICTXT2D,"A"); //AB_sol will contain the solution of A*X=B, distributed across the process rows. Processes in the same process row possess the same part of AB_sol double * AB_sol; int * DESCAB_sol; DESCAB_sol= ( int* ) malloc ( DLEN_ * sizeof ( int ) ); if ( DESCAB_sol==NULL ) { printf ( "unable to allocate memory for descriptor for AB_sol\n" ); return -1; } //AB_sol (Adim, Ddim) is distributed across all processes in ICTXT2D starting from process (0,0) into blocks of size (Adim, blocksize) descinit_ ( DESCAB_sol, &Adim, &Ddim, &Adim, &blocksize, &i_zero, &i_zero, &ICTXT2D, &Adim, &info ); if ( info!=0 ) { printf ( "Descriptor of matrix C returns info: %d\n",info ); return info; } AB_sol=(double *) calloc(Adim * Dcols*blocksize,sizeof(double)); // Each process calculates the Schur complement of the part of D at its disposal. (see src/schur.cpp) // The solution of A * Y = B_j is stored in AB_sol (= A^-1 * B_j) blacs_barrier_(&ICTXT2D,"A"); make_Sij_parallel_denseB ( Asparse, BT_i, B_j, D, lld_D, AB_sol ); BT_i.clear(); B_j.clear(); //From here on the Schur complement S of D is stored in D blacs_barrier_ ( &ICTXT2D,"ALL" ); //The Schur complement is factorised (by ScaLAPACK) pdpotrf_ ( "U",&k,D,&i_one,&i_one,DESCD,&info ); if ( info != 0 ) { printf ( "Cholesky decomposition of D was unsuccessful, error returned: %d\n",info ); return -1; } //From here on the factorization of the Schur complement S is stored in D blacs_barrier_ ( &ICTXT2D,"ALL" ); //The Schur complement is inverted (by ScaLAPACK) pdpotri_ ( "U",&k,D,&i_one,&i_one,DESCD,&info ); if ( info != 0 ) { printf ( "Inverse of D was unsuccessful, error returned: %d\n",info ); return -1; } //From here on the inverse of the Schur complement S is stored in D blacs_barrier_(&ICTXT2D,"A"); double* InvD_T_Block = ( double* ) calloc ( Dblocks * blocksize + Adim ,sizeof ( double ) ); //Diagonal elements of the (1,1) block of C^-1 are still distributed and here they are gathered in InvD_T_Block in the root process. if(*position == pcol) { for (i=0; i<Ddim; ++i) { if (pcol == (i/blocksize) % *dims) { int Dpos = i%blocksize + ((i/blocksize) / *dims) * blocksize ; *(InvD_T_Block + Adim +i) = *( D + Dpos + lld_D * Dpos); } } for ( i=0,j=0; i<Dblocks; ++i,++j ) { if ( j==*dims ) j=0; if ( *position==j ) { dgesd2d_ ( &ICTXT2D,&blocksize,&i_one,InvD_T_Block + Adim + i * blocksize,&blocksize,&i_zero,&i_zero ); } if ( *position==0 ) { dgerv2d_ ( &ICTXT2D,&blocksize,&i_one,InvD_T_Block + Adim + blocksize*i,&blocksize,&j,&j ); } } } blacs_barrier_(&ICTXT2D,"A"); //Only the root process performs a selected inversion of A. if (iam==0) { int pardiso_message_level = 1; int pardiso_mtype=-2; ParDiSO pardiso ( pardiso_mtype, pardiso_message_level ); int number_of_processors = 1; char* var = getenv("OMP_NUM_THREADS"); if(var != NULL) { sscanf( var, "%d", &number_of_processors ); } else { printf("Set environment OMP_NUM_THREADS to 1"); exit(1); } pardiso.iparm[2] = 2; pardiso.iparm[3] = number_of_processors; pardiso.iparm[8] = 0; pardiso.iparm[11] = 1; pardiso.iparm[13] = 0; pardiso.iparm[28] = 0; //This function calculates the factorisation of A once again so this might be optimized. pardiso.findInverseOfA ( Asparse ); printf("Processor %d inverted matrix A\n",iam); } blacs_barrier_(&ICTXT2D,"A"); // To minimize memory usage, and because only the diagonal elements of the inverse are needed, Y' * S is calculated row by rowblocks // the diagonal element is calculates as the dot product of this row and the corresponding column of Y. (Y is solution of AY=B) double* YSrow= ( double* ) calloc ( Dcols * blocksize,sizeof ( double ) ); int * DESCYSROW; DESCYSROW= ( int* ) malloc ( DLEN_ * sizeof ( int ) ); if ( DESCYSROW==NULL ) { printf ( "unable to allocate memory for descriptor for AB_sol\n" ); return -1; } //YSrow (1,Ddim) is distributed across processes of ICTXT2D starting from process (0,0) into blocks of size (1,blocksize) descinit_ ( DESCYSROW, &i_one, &Ddim, &i_one,&blocksize, &i_zero, &i_zero, &ICTXT2D, &i_one, &info ); if ( info!=0 ) { printf ( "Descriptor of matrix C returns info: %d\n",info ); return info; } blacs_barrier_(&ICTXT2D,"A"); //Calculating diagonal elements 1 by 1 of the (0,0)-block of C^-1. for (i=1; i<=Adim; ++i) { pdsymm_ ("R","U",&i_one,&Ddim,&d_one,D,&i_one,&i_one,DESCD,AB_sol,&i,&i_one,DESCAB_sol,&d_zero,YSrow,&i_one,&i_one,DESCYSROW); pddot_(&Ddim,InvD_T_Block+i-1,AB_sol,&i,&i_one,DESCAB_sol,&Adim,YSrow,&i_one,&i_one,DESCYSROW,&i_one); /*if(*position==1 && pcol==1) printf("Dot product in process (1,1) is: %g\n", *(InvD_T_Block+i-1)); if(*position==0 && pcol==1) printf("Dot product in process (0,1) is: %g\n",*(InvD_T_Block+i-1));*/ } blacs_barrier_(&ICTXT2D,"A"); if(YSrow != NULL) free(YSrow); YSrow = NULL; if(DESCYSROW != NULL) free(DESCYSROW); DESCYSROW = NULL; if(AB_sol != NULL) free(AB_sol); AB_sol = NULL; if(DESCAB_sol != NULL) free(DESCAB_sol); DESCAB_sol = NULL; if(D != NULL) free(D); D = NULL; if(DESCD != NULL) free(DESCD); DESCD = NULL; //Only in the root process we add the diagonal elements of A^-1 if (iam ==0) { for(i=0; i<Adim; ++i) { j=Asparse.pRows[i]; *(InvD_T_Block+i) += Asparse.pData[j]; } Asparse.clear(); printdense ( Adim+k,1,InvD_T_Block,"diag_inverse_C_parallel.txt" ); } if(InvD_T_Block != NULL) free(InvD_T_Block); InvD_T_Block = NULL; blacs_gridexit_(&ICTXT2D); } //cout << iam << " reached end before MPI_Barrier" << endl; MPI_Barrier(MPI_COMM_WORLD); //MPI_Finalize(); return 0; }