void run_airline(Airline* self, ipc_t conn) { mprintf("Dude\n"); ipcConn = conn; me = self; register_exit_function(NULL); //redirect_signals(); Vector* threads = bootstrap_planes(self, conn); planeThreads = threads; listen(threads); // If we got here, it means we recieved an end message struct Message msg; msg.type = MessageTypeEnd; broadcast(threads, msg); for (size_t i = 0; i < self->numberOfPlanes; i++) { struct PlaneThread* t = getFromVector(threads, i); pthread_join(t->thread, NULL); message_queue_destroy(t->queue); free(t); } destroyVector(threads); mprintf("Out of run_airline\n"); }
void printVectorOnStream(pvector_t vector, FILE *stream, const char *unit_t_format) { index_t i; fprintf(stream, "%d\n", vector->length); for(i = 0; i < vector->length; i++) fprintf(stream, unit_t_format, getFromVector(vector, i)); fprintf(stream, "\n"); }
int dataExchange(unit_t tmin, unit_t tmax, pvector_t *v, pvector_t tmp) { index_t i, x, pos = 0; unit_t t; pvector_t data; if (initVector(&data, ELEMENTS_PER_PROCESS << 1) == NULL) return ERRORCODE_CANT_MALLOC; for(i = 0; i < PROCESS_NUMBER; i++) { if (ID == i) copyVector(*v, tmp, (*v)->length); if (MPI_Bcast(tmp->vector, listLength(i), MPI_UNIT, i, MPI_COMM_WORLD) != MPI_SUCCESS) return ERRORCODE_MPI_ERROR; for(x = 0; x < listLength(i); x++) { t = getFromVector(tmp, x); if (t <= tmax) { if (t > tmin) setInVector(data, pos++, t); } else x+= ELEMENTS_NUMBER; } } freeVector(v); if (initVector(v, pos) == NULL) return ERRORCODE_CANT_MALLOC; copyVector(data, *v, pos); freeVector(&data); return ERRORCODE_NOERRORS; }
Product* getProduct(char* name, Vector* products) { int i = getProductId(name, products); if (i != -1) { return (Product*) getFromVector(products, i); } //this Product doesn't exist, let's add it Product* res; if ((res = malloc(sizeof(Product))) == NULL) { return NULL; } res->name = malloc((strlen(name) + 1) * sizeof(char)); if (res->name == NULL) { free(res); return NULL; } strcpy(res->name, name); if ((i = addToVector(products, res)) == -1) { free(res); return NULL; } res->id = i; return res; }
void broadcast(Vector* threads, struct Message msg) { mprintf("Broadcasting message with type %d\n", msg.type); size_t len = getVectorSize(threads); for (size_t i = 0; i < len; i++) { struct PlaneThread* plane = (struct PlaneThread*) getFromVector(threads, i); if (!plane->done) { message_queue_push(plane->queue, msg); } } }
void redirect_destinations_message(Vector* threads, union MapMessage* in) { struct Message msg; struct PlaneThread* thread = (struct PlaneThread*) getFromVector(threads, in->destinations.planeId); msg.type = MessageTypeDestinations; memcpy(msg.payload.destinations.destinations, in->destinations.destinations, MAX_DESTINATIONS * sizeof(int)); memcpy(msg.payload.destinations.distances, in->destinations.distance, MAX_DESTINATIONS * sizeof(int)); msg.payload.destinations.count = in->destinations.count; message_queue_push(thread->queue, msg); }
void redirect_stock_message(Vector* threads, union MapMessage* in) { struct Message msg; struct PlaneThread* thread = (struct PlaneThread*) getFromVector(threads, in->stock.header.id); struct StockMessagePart* stock = &in->stock.stocks; msg.type = MessageTypeStock; msg.payload.stock.count = stock->count; memcpy(msg.payload.stock.delta, stock->quantities, sizeof(int) * MAX_STOCKS); mprintf("Redirect stock message to %d with stock count %d\n", thread->plane->id, stock->count); message_queue_push(thread->queue, msg); }
int getProductId(char* productName, Vector* products) { size_t len = getVectorSize(products); for (size_t i =0; i < len; i++) { Product* product = (Product*) getFromVector(products, i); if (strcmp(productName, product->name) ==0){ return product->id; } } return -1; }
void send_ready_message(void) { if (stage == 1) { int flying = 0; for (size_t i = 0; i < me->numberOfPlanes; i++) { struct PlaneThread* thread = getFromVector(planeThreads, i); if (!thread->done) { flying++; } } comm_airline_status(ipcConn, me->id, flying, me->numberOfPlanes); } else { comm_airline_ready(ipcConn); } }
void start_phase(Vector* threads, struct Message msg) { pthread_mutex_lock(&planesLeftLock); size_t len = getVectorSize(threads); planesLeftInStage = 0; for (size_t i = 0; i < len; i++) { struct PlaneThread* thread = (struct PlaneThread*) getFromVector(threads, i); if (!thread->done) { planesLeftInStage++; } } mprintf("Setting planes left to %d\n", planesLeftInStage); if (planesLeftInStage == 0) { send_ready_message(); } else { broadcast(threads, msg); } pthread_mutex_unlock(&planesLeftLock); }
void copySampleToVector(pvector_t source, pvector_t destiny, index_t by, index_t number) { int step, i; for(i = 0, step = by; i < number; i++, step += by) setInVector(destiny, i, getFromVector(source, step)); }
void copyVector(pvector_t source, pvector_t destiny, index_t length) { index_t i; for(i = 0; i < length; i++) setInVector(destiny, i, getFromVector(source, i)); }
int main(int argc, char** argv) { pvector_t v, tmp = NULL, samples = NULL; index_t i, length, step; unit_t min, max; MPI_Status status; MPI_Datatype sampleDatatype; if (initMPI(&argc, &argv) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "Cannot initialize MPI."); if (argc < 3) { fprintf(stderr, "MPI Parallel Sorting by Regular Sampling implementation.\nUsage:\n\t%s <data set (to read)> <result file (to write)>\n", argv[0]); MPI_Finalize(); return 1; } if (ID == ROOT_ID) { tmp = openVectorFile(ARGV_FILE_NAME); printf("Data set size: %d, process number: %d\n", tmp->length, PROCESS_NUMBER); if ((tmp->length/PROCESS_NUMBER) <= PROCESS_NUMBER) AbortAndExit(ERRORCODE_SIZE_DONT_MATCH, "Processor number is too big or size of data set is too small for correct calculation.\n"); ELEMENTS_NUMBER = tmp->length; } if (MPI_Bcast(tableOfConstants, TABLE_OF_CONSTANTS_SIZE, MPI_INT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error."); ELEMENTS_PER_PROCESS = listLength(ID); initVector(&v, ELEMENTS_PER_PROCESS); if (ID == ROOT_ID) { /* Bcast data set */ copyVector(tmp, v, v->length); for(i = 1, step = ELEMENTS_PER_PROCESS; i < PROCESS_NUMBER; i++) { if (MPI_Send(&(tmp->vector[step]), listLength(i), MPI_UNIT, i, 0, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error."); step += listLength(i); } } else if (MPI_Recv(v->vector, ELEMENTS_PER_PROCESS, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error."); quicksortVector(v); if (initVector(&samples, PROCESS_NUMBER -1) == NULL) return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for samples vector."); MPI_Type_vector(PROCESS_NUMBER, 1, ELEMENTS_NUMBER / SQR_PROCESS_NUMBER, MPI_UNIT, &sampleDatatype); MPI_Type_commit(&sampleDatatype); if (ID != ROOT_ID) { /* Sending samples to root proces */ if (MPI_Send(v->vector, 1, sampleDatatype, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error."); if (initVector(&tmp, listLength(PROCESS_NUMBER -1)) == NULL) return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for temporary vector."); } else { /* Reciving samples */ copySampleToVector(v, tmp, (v->length)/PROCESS_NUMBER, PROCESS_NUMBER); for(step = PROCESS_NUMBER, i = 1; i < PROCESS_NUMBER; i++, step += PROCESS_NUMBER) if (MPI_Recv(&(tmp->vector[step]), PROCESS_NUMBER, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error."); quicksort(tmp->vector, 0, SQR_PROCESS_NUMBER); copySampleToVector(tmp, samples, SQR_PROCESS_NUMBER / (PROCESS_NUMBER - 1), PROCESS_NUMBER - 1); } /* Broadcast selected samples to processors */ if (MPI_Bcast(samples->vector, PROCESS_NUMBER-1, MPI_UNIT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error."); if ((i = dataExchange((ID == 0) ? UNITT_MIN : getFromVector(samples, ID -1), (ID == (PROCESS_NUMBER - 1)) ? UNITT_MAX : getFromVector(samples, ID), &v, tmp)) != ERRORCODE_NOERRORS) return AbortAndExit(i, "Error in while of data exchange."); /* Sorting new data */ quicksortVector(v); if (ID != ROOT_ID) { /* Sending sorted data */ if (MPI_Send(&(v->length), 1, MPI_INT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send (sending size of data) error."); if (MPI_Send(v->vector, v->length, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error."); } else { /* Receiving sorted data */ copyVector(v, tmp, v->length); for(step = v->length, i = 1; i < PROCESS_NUMBER; i++) { if (MPI_Recv(&length, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv (sending size of data) error."); if (MPI_Recv(&(tmp->vector[step]), length, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS) return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error."); step += length; } writeVectorToFile(tmp, ARGV_RESULT_NAME); freeVector(&tmp); } freeVector(&v); MPI_Finalize(); return 0; }
Product* getProductByID(int id, Vector* product) { return (Product*) getFromVector(product, id); }
void getProductName(int id, Vector* products, char* name) { Product* product = (Product*) getFromVector(products, id); strcpy(name, product->name); }