int* genera_perm(int n) { int *array_perm = NULL; int i; /* Contro de errores de los parametros de entrada */ if (n <= 0) return NULL; /* Reserva y comprobacion de memoria de la permutacion */ array_perm = (int *)malloc(n*sizeof(int)); if (array_perm == NULL) return NULL; /* Inicializacion del array */ for (i=0; i<n ;i++) array_perm[i] = i; /* Bucle de llenado del array aleatorio */ for (i=0; i<n ;i++) /* Puesto que se opera con un array de n elementos el limite superior del indice debe ser n-1 */ swap(i,aleat_num(i,n-1),array_perm); return array_perm; }
int calculate_next() { int i; for (i = 0; i < NUM_PIPES; i++) { next_pipes[i] = pipes[aleat_num(0, NUM_PIPES - 1)]; } return 0; }
int main(int argc, char *argv[]){ int i, j, k, rows_index, last_rows_index, sum = 0; int **first, **second; int **result; int *row_first, *row_result = NULL; int size; int rank, n_process, n_process_working; int rows_per_process, remaining_rows; MPI_Status status; int no_need = 0; srand(time(NULL)); /* Start up MPI */ MPI_Init(&argc, &argv); /* Get some info about MPI */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &n_process); if(argc!=NUM_ARG){ if(rank == RANK_MASTER) printf("USAGE:\n %s <n_size>\n", argv[0]); MPI_Finalize(); return -1; } if((size =atoi(argv[1]))<=0){ if(rank == RANK_MASTER) printf("(ERROR\t) Invalid size\n"); MPI_Finalize(); return -1; } if (rank == RANK_MASTER) { /* Allocation and setting of the matrices */ first = (int **)malloc(sizeof(int)*size); second = (int **)malloc(sizeof(int)*size); result = (int **)malloc(sizeof(int)*size); for(i=0;i<size;i++){ first[i]= (int *)malloc(sizeof(int)*size); second[i] = (int *)malloc(sizeof(int)*size); result[i] = (int *)malloc(sizeof(int)*size); } /* Setting the contents of the matrices and broadcast of the second matrix */ for(i=0;i<size;i++){ for(j=0;j<size;j++){ first[i][j]=aleat_num(0,size); second[i][j]=aleat_num(0,size); result[i][j]=aleat_num(0,size); } MPI_Bcast(&(second[i][0]), size, MPI_INT, RANK_MASTER, MPI_COMM_WORLD); } /* Send to each slave (rank-1)*(size/n_process)+1 to i*(size/n_process) * rows of the first column */ if (size < (n_process-1)) { rows_per_process = 1; remaining_rows = 0; n_process_working = size; } else { rows_per_process = size / (n_process-1); remaining_rows = size % (n_process-1); n_process_working = n_process-1; } printf(" %s ROWS PER PROCESS = %d\n",LABEL_MASTER, rows_per_process); printf(" %s REMAINING ROWS = %d\n",LABEL_MASTER, remaining_rows); printf(" %s PROCESSES WORKING = %d\n",LABEL_MASTER, n_process_working); for (i = 1; i <= n_process_working; i++) { for (rows_index = (i-1)*(rows_per_process); rows_index < i*rows_per_process ;rows_index++){ MPI_Send(&(first[rows_index][0]),size,MPI_INT,i,TAG_ROWS,MPI_COMM_WORLD); printf(" %s SENT ROW [%d] to processor %d\n",LABEL_MASTER, rows_index, i); } } /* If there are remaining rows (never more than N-1), master sends to each * processos like a Round Robin */ last_rows_index = rows_index; /* The first remaining row to send */ for (i = 1; i <= remaining_rows ; i++, last_rows_index++){ MPI_Send(&(first[last_rows_index][0]),size,MPI_INT,i,TAG_REMAINING_ROWS,MPI_COMM_WORLD); printf(" %s SENT REMAINING ROW [%d] to processor %d\n",LABEL_MASTER, last_rows_index, i); } /* Receives result */ for (i = 1; i <= n_process_working; i++) { for (rows_index = (i-1)*(rows_per_process); rows_index < i*rows_per_process ;rows_index++){ MPI_Recv(&(result[rows_index][0]),size,MPI_INT,i,TAG_RESULT,MPI_COMM_WORLD,&status); } } /* Receives remaining rows of the result */ last_rows_index = rows_index; /* The first remaining row to receive */ for (i = 1; i <= remaining_rows ; i++, last_rows_index++){ MPI_Recv(&(result[last_rows_index][0]),size,MPI_INT,i,TAG_RESULT,MPI_COMM_WORLD,&status); } printf(" %s RESULTS\n", LABEL_MASTER); /* PRINT */ for(i=0;i<size;i++){ printf("\t"); for(j=0;j<size;j++){ printf("%d ",first[i][j]); } printf("x "); for(j=0;j<size;j++){ printf("%d ",second[i][j]); } printf("= "); for(j=0;j<size;j++){ printf("%d ",result[i][j]); } printf("\n"); } free(first); free(second); free(result); MPI_Barrier(MPI_COMM_WORLD); } else { /* Cannot scatter the workload between a high number of processors with a matrix * size relative small */ if (rank-1 >= size) { printf(" # %s %d # NOT NECESSARY\n",LABEL_SLAVE, rank); no_need = 1; MPI_Barrier(MPI_COMM_WORLD); } if (no_need != 1){ /* Receives the second matrix */ second = (int **)malloc(sizeof(int)*size); for (i = 0; i < size; i++) { second[i] = (int *)malloc(sizeof(int)*size); MPI_Bcast(&(second[i][0]), size, MPI_INT, RANK_MASTER, MPI_COMM_WORLD); } /* Each slave receives (rank-1)*(size/n_process)+1 to i*(size/n_process) * rows of the first matrix to calculate that many rows of the product matrix */ row_first = (int *)malloc(sizeof(int)*size); result = (int **)malloc(sizeof(int)*size); rows_per_process = (size < (n_process-1)) ? 1 : (size / (n_process-1)); for (rows_index = 0; rows_index < rows_per_process ;rows_index++){ MPI_Recv(row_first,size,MPI_INT,RANK_MASTER,TAG_ROWS,MPI_COMM_WORLD,&status); /* Calculation of the result row */ result[rows_index] = (int *)malloc(sizeof(int)*size); multiply_row(result[rows_index], row_first, second, size); MPI_Send(&(result[rows_index][0]),size,MPI_INT,RANK_MASTER,TAG_RESULT,MPI_COMM_WORLD); } /* If there are remaining rows to finish (never more than N-1) each slave * calculates the remaining row according to their rank */ remaining_rows = (size < (n_process-1)) ? 0 : size % (n_process-1); last_rows_index = rows_index; /* This is the corresponding index of the remaining row */ /* Only the first processes ought to deal with the remaining rows */ if ((remaining_rows >= rank) && (remaining_rows != 0)) { MPI_Recv(row_first,size,MPI_INT,RANK_MASTER,TAG_REMAINING_ROWS,MPI_COMM_WORLD,&status); result[last_rows_index] = (int *)malloc(sizeof(int)*size); multiply_row(result[last_rows_index], row_first, second, size); MPI_Send(&(result[last_rows_index][0]),size,MPI_INT,RANK_MASTER,TAG_RESULT,MPI_COMM_WORLD); } free(second); free(row_first); free(result); MPI_Barrier(MPI_COMM_WORLD); } } /* All done */ MPI_Finalize(); return 0; }
int main(int argc, char **argv) { int rank, n_process; int *vector=NULL; int *subvector=NULL; int *subvectorSlave=NULL; int i, n, j; int interval; int l,m; MPI_Status status; int size_vector, n_process_working, size_subvector; srand(time(NULL)); if(argc!=2){ printf("USAGE:\n %s log2(size_vector)\n", argv[0]); return -1; } if ((n = atoi(argv[1])) < 2){ printf("(ERROR\t) Vector size must be greater than 2^2\n"); return -1; } size_vector = pow(2,n); /* Start up MPI */ MPI_Init(&argc, &argv); /* Get some info about MPI */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &n_process); /*If the number of slave process is smaller than MIN_SLAVE_PROCESS(4)*/ if(n_process-1 < MIN_SLAVE_PROCESS){ printf("(ERROR\t) Needed at least four slave processes\n"); MPI_Finalize(); return -1; } n_process--; if(rank == RANK_MASTER){ if ((vector = (int *)calloc(size_vector+VIRTUAL_ELEMENTS, sizeof(int))) == NULL) { perror("calloc vector"); MPI_Finalize(); return -1; } printf("# MASTER # VECTOR = "); for(i=FIRST_INDEX; i < size_vector+2 ;i++){ vector[i] = aleat_num(1,size_vector); printf("%d ",vector[i]); } if((size_vector % n_process)!=0){ m = log2(n_process); while(m > n) m = log2(m); n_process_working = pow(2,m); size_subvector = pow(2,n-m); } else{ n_process_working = n_process; size_subvector = size_vector/n_process; } /* Broadcast size of the subvector */ MPI_Bcast(&size_subvector, 1, MPI_INT, RANK_MASTER, MPI_COMM_WORLD); /* Broadcasts the number of processes working */ MPI_Bcast(&n_process_working, 1, MPI_INT, RANK_MASTER, MPI_COMM_WORLD); /* Calculation and sending of the subvectors to each process */ l=0; m=size_subvector-1; if ((subvector = (int *)malloc((size_subvector+VIRTUAL_ELEMENTS)*sizeof(int))) == NULL){ perror("malloc subvector"); MPI_Finalize(); return -1; } for(i=1; i <= n_process_working ;i++){ memcpy( subvector, &vector[l], (size_subvector+VIRTUAL_ELEMENTS)*sizeof(int)); printf("\n# MASTER # SLAVE %d SUB_VECTOR = ", i); for(j=0; j<size_subvector+VIRTUAL_ELEMENTS; j++){ printf("%d ",subvector[j]); } l = m+1; m += size_subvector; /* And then the subvector */ MPI_Send( subvector, size_subvector+VIRTUAL_ELEMENTS, MPI_INT, i, TAG_SUB_VECTOR, MPI_COMM_WORLD); } /* Master receives all the subvectors and compounds then into the resultant vector */ for (i = 1; i <= n_process_working; i++) { MPI_Recv( &(vector[FIRST_INDEX + ((i-1) * size_subvector)]), size_subvector, MPI_INT, i, TAG_SMOOTHED, MPI_COMM_WORLD, &status); } printf("\n# MASTER # SMOOTHED_SUB_VECTOR = "); for(i=FIRST_INDEX; i < size_vector+2; i++){ printf("%d ",vector[i]); } printf("\n"); free(vector); free(subvector); MPI_Barrier(MPI_COMM_WORLD); } else{ MPI_Bcast(&size_subvector, 1, MPI_INT, RANK_MASTER, MPI_COMM_WORLD); MPI_Bcast(&n_process_working, 1, MPI_INT, RANK_MASTER, MPI_COMM_WORLD); if (rank <= n_process_working) { subvectorSlave = (int *)malloc((size_subvector+VIRTUAL_ELEMENTS)*sizeof(int)); MPI_Recv( subvectorSlave, size_subvector+VIRTUAL_ELEMENTS, MPI_INT, RANK_MASTER, TAG_SUB_VECTOR, MPI_COMM_WORLD, &status); /* printf("\n# SLAVE %d # SUB_VECTOR = ", rank); for(j=0; j<size_subvector+VIRTUAL_ELEMENTS; j++){ printf("%d ",subvectorSlave[j]); } */ smooth_vector(subvectorSlave,size_subvector+VIRTUAL_ELEMENTS); /* printf("\n# SLAVE %d # SMOOTHED = ", rank); for(j=0; j<size_subvector+VIRTUAL_ELEMENTS; j++){ printf("%d ",subvectorSlave[j]); } */ MPI_Send( &(subvectorSlave[FIRST_INDEX]), size_subvector, MPI_INT, RANK_MASTER, TAG_SMOOTHED, MPI_COMM_WORLD); free(subvector); } MPI_Barrier(MPI_COMM_WORLD); } /* All done */ MPI_Finalize(); return 0; }
int calculate_next_u(int i) { next_pipes[i] = pipes[aleat_num(0, NUM_PIPES - 1)]; }
int main(int argc, char *argv[]){ int i, j, k, sum = 0; int **first, **second, **multiply=NULL; int tamanio; if(argc!=NUM_ARG){ printf("USAGE:\n %s <n_size>\n", argv[0]); return -1; } if((tamanio =atoi(argv[1]))<=0){ printf("(ERROR\t) Invalid size\n"); return -1; } srand(time(NULL)); first = (int **)malloc(sizeof(int)*tamanio); second = (int **)malloc(sizeof(int)*tamanio); multiply = (int **)malloc(sizeof(int)*tamanio); for(i=0;i<tamanio;i++){ first[i]= (int *)malloc(sizeof(int)*tamanio); second[i] = (int *)malloc(sizeof(int)*tamanio); multiply[i] = (int *)malloc(sizeof(int)*tamanio); } for(i=0;i<tamanio;i++){ for(j=0;j<tamanio;j++){ first[i][j]=aleat_num(0,tamanio); second[i][j]=aleat_num(0,tamanio); } } printf("First matrix:\n"); for(i=0;i<tamanio;i++){ for(j=0;j<tamanio;j++){ printf("%d ",first[i][j]); if(j==tamanio-1) printf("\n"); } } printf("Second matrix:\n"); for(i=0;i<tamanio;i++){ for(j=0;j<tamanio;j++){ printf("%d ",second[i][j]); if(j==tamanio-1) printf("\n"); } } for ( i = 0 ; i < tamanio ; i++ ) { for ( j = 0 ; j < tamanio ; j++ ) { for ( k = 0 ; k < tamanio ; k++ ) { sum = sum + first[i][k]*second[k][j]; } multiply[i][j] = sum; sum = 0; } } printf("Product of entered matrices:\n"); for ( i = 0 ; i < tamanio ; i++ ) { for ( j = 0 ; j < tamanio ; j++ ){ printf("%d ", multiply[i][j]); if(j==tamanio-1) printf("\n"); } } free(first); free(second); free(multiply); return 0; }