int getBestTrimera(int nK, t_Data *ptRefData, int* pnT1, int* pnT2, int* pnT3, int *pnSplit1, int *pnSplit2, int* anRestrict, int nLenI, t_Align* atAlign, int* anD, int* anR, int* anBestD, int *anBestR) { int j = 0, k = 0, l = 0; int ** aanT = NULL, **aanBestT = NULL; int ** aanT2 = NULL, **aanBestT2 = NULL; int nBestTri = BIG_INT; int nT1 = -1, nT2 = -1, nT3 = -1, nSplit1 = -1, nSplit2 = -1; allocateMatrices(nLenI, &aanT,&aanBestT,&aanT2,&aanBestT2); for(k = 0; k < nLenI; k++){ for(l = k; l < nLenI - 1; l++){ aanT[k][l] = BIG_INT; aanBestT[k][l] = -1; for(j = 0; j < nK; j++){ if(anRestrict[j] == FALSE){ int nX = atAlign[j].anD[l] - atAlign[j].anD[k]; if(nX < aanT[k][l] || nX == aanT[k][l] && ptRefData->anFreq[j] > ptRefData->anFreq[aanBestT[k][l]]){ aanT[k][l] = nX; aanBestT[k][l] = j; } } } aanT[k][l] += anD[k] + anR[nLenI - l - 2]; if(aanT[k][l] < nBestTri){ nBestTri = aanT[k][l]; nSplit1 = k; nSplit2 = l; nT1 = anBestD[k]; nT2 = aanBestT[k][l]; nT3 = anBestR[nLenI -l -2]; } } } (*pnT1) = nT1; (*pnT2) = nT2; (*pnT3) = nT3; (*pnSplit1) = nSplit1; (*pnSplit2) = nSplit2; for(j = 0; j < nLenI; j++){ free(aanT[j]); free(aanBestT[j]); free(aanT2[j]); free(aanBestT2[j]); } free(aanT); free(aanBestT); free(aanT2); free(aanBestT2); return nBestTri; }
/* ************************************************************************ */ int main (int argc, char** argv) { int rank; int size; int rest; int from, to; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); struct options options; struct calculation_arguments arguments; struct calculation_results results; /* Parameter nur einmal abfragen */ if(rank == 0) { AskParams(&options, argc, argv); } MPI_Bcast(&options, (sizeof(options)), MPI_BYTE, MASTER, MPI_COMM_WORLD); initVariables(&arguments, &results, &options); /* Damit allocation + initialization richtig läuft, wird für GS size = 1 gesetzt */ if(options.method == METH_GAUSS_SEIDEL) { size = 1; } /* Aufteilen bis auf rest */ int N_part = arguments.N; int lines = N_part - 1; rest = lines % size; N_part = (lines - rest) / size; /* globale zeilennummer berechnen, hier wird der rest beachtet */ /* offset ist (rank + 1) für rank < rest, steigt also linear mit steigendem rang */ if(rank < rest) { from = N_part * rank + rank + 1; to = N_part * (rank + 1) + (rank + 1); } /* offset hier ist rest also die der maximale offset von oben */ else { from = N_part * rank + rest + 1; to = N_part * (rank + 1) + rest ; } arguments.to = to; arguments.from = from; /* at least we only need N - 1 processes for calculation */ if((unsigned int)size > (arguments.N -1)) { size = (arguments.N - 1); if(rank == MASTER ) { printf("\nWarning, you are using more processes than rows.\n This can slow down the calculation process! \n\n"); } } //calculate Number of Rows arguments.numberOfRows = ((to - from + 1) > 0 ) ? (to - from + 1) : 0; allocateMatrices(&arguments); initMatrices(&arguments, &options, rank, size); gettimeofday(&start_time, NULL); /* start timer */ if (options.method == METH_JACOBI ) { calculateJacobi(&arguments, &results, &options, rank, size); } else { /* GS berechnet nur MASTER */ if(rank == MASTER) { printf("\nGS wird nur sequentiell berechnet! \n"); calculate(&arguments, &results, &options); } } gettimeofday(&comp_time, NULL); /* stop timer */ /* only once */ if(rank == MASTER) { displayStatistics(&arguments, &results, &options, size); } /* GS macht alte ausgabe */ if((options.method == METH_GAUSS_SEIDEL) && (rank == MASTER)) { DisplayMatrix(&arguments, &results, &options); } else { DisplayMatrixMPI(&arguments, &results, &options, rank, size, from, to); } freeMatrices(&arguments); /* free memory */ MPI_Finalize(); return 0; }