Esempio n. 1
0
void client(char *addr, char *port) {
	printLog(0, "Connecting to %s on port %s...\n", addr, port);
	
	struct addrinfo *ais, hints;
	memset(&hints, 0, sizeof(struct addrinfo));
	hints.ai_family=AF_UNSPEC;
	hints.ai_socktype=SOCK_STREAM;

	if (getaddrinfo(addr, port, &hints, &ais) != 0) {
		exitErr(2, "Wrong address/port or network error.");
	}

	int sock=-1;

	for (struct addrinfo *ai = ais; ai; ai=ai->ai_next) {
		sock=socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
		if (sock==-1) continue;
		if (connect(sock, ai->ai_addr, ai->ai_addrlen) == -1) {
			sock=-1;
			continue;
		}
		break;
	}
	freeaddrinfo(ais);

	if (sock == -1) exitErr(2, "Cannot connect to the host.");

	dataExchange(sock);

	close(sock);
}
Esempio n. 2
0
int main(int argc, char** argv) {
	pvector_t v, tmp = NULL, samples = NULL;
	index_t i, length, step;
	unit_t min, max;
	MPI_Status status;
	MPI_Datatype sampleDatatype;

	if (initMPI(&argc, &argv) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "Cannot initialize MPI.");

	if (argc < 3) {
		fprintf(stderr, "MPI Parallel Sorting by Regular Sampling implementation.\nUsage:\n\t%s <data set (to read)> <result  file (to write)>\n", argv[0]);
		MPI_Finalize(); return 1;
	}

	if (ID == ROOT_ID) {
		tmp = openVectorFile(ARGV_FILE_NAME);
		printf("Data set size: %d, process number: %d\n", tmp->length, PROCESS_NUMBER);
		if ((tmp->length/PROCESS_NUMBER) <= PROCESS_NUMBER)
			AbortAndExit(ERRORCODE_SIZE_DONT_MATCH, "Processor number is too big or size of data set is too small for correct calculation.\n");
		ELEMENTS_NUMBER = tmp->length;
	}

	if (MPI_Bcast(tableOfConstants, TABLE_OF_CONSTANTS_SIZE, MPI_INT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error.");

	ELEMENTS_PER_PROCESS = listLength(ID);
	initVector(&v, ELEMENTS_PER_PROCESS);

	if (ID == ROOT_ID) { /* Bcast data set */
		copyVector(tmp, v, v->length);
		for(i = 1, step = ELEMENTS_PER_PROCESS; i < PROCESS_NUMBER; i++) {
			if (MPI_Send(&(tmp->vector[step]), listLength(i), MPI_UNIT, i, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
			step += listLength(i);
		}
	} else if (MPI_Recv(v->vector, ELEMENTS_PER_PROCESS, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");

	quicksortVector(v);

	if (initVector(&samples, PROCESS_NUMBER -1) == NULL)
		return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for samples vector.");

	MPI_Type_vector(PROCESS_NUMBER, 1, ELEMENTS_NUMBER / SQR_PROCESS_NUMBER, MPI_UNIT, &sampleDatatype);
	MPI_Type_commit(&sampleDatatype);

	if (ID != ROOT_ID) { /* Sending samples to root proces */
 		if (MPI_Send(v->vector, 1, sampleDatatype, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
		if (initVector(&tmp, listLength(PROCESS_NUMBER -1)) == NULL)
			return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for temporary vector.");
	} else { /* Reciving samples */
		copySampleToVector(v, tmp, (v->length)/PROCESS_NUMBER, PROCESS_NUMBER);
		for(step = PROCESS_NUMBER, i = 1; i < PROCESS_NUMBER; i++, step += PROCESS_NUMBER)
			if (MPI_Recv(&(tmp->vector[step]), PROCESS_NUMBER, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");
		quicksort(tmp->vector, 0, SQR_PROCESS_NUMBER);
		copySampleToVector(tmp, samples, SQR_PROCESS_NUMBER / (PROCESS_NUMBER - 1), PROCESS_NUMBER - 1);
	}

	/* Broadcast selected samples to processors */
	if (MPI_Bcast(samples->vector, PROCESS_NUMBER-1, MPI_UNIT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error.");

	if ((i = dataExchange((ID == 0) ? UNITT_MIN : getFromVector(samples, ID -1), (ID == (PROCESS_NUMBER - 1)) ? UNITT_MAX : getFromVector(samples, ID), &v, tmp)) != ERRORCODE_NOERRORS)
		return AbortAndExit(i, "Error in while of data exchange.");

	/* Sorting new data */
	quicksortVector(v);

	if (ID != ROOT_ID) { /* Sending sorted data */
		if (MPI_Send(&(v->length), 1, MPI_INT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send (sending size of data) error.");
		if (MPI_Send(v->vector, v->length, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
	} else { /* Receiving sorted data */
		copyVector(v, tmp, v->length);
		for(step = v->length, i = 1; i < PROCESS_NUMBER; i++) {
			if (MPI_Recv(&length, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv (sending size of data) error.");
			if (MPI_Recv(&(tmp->vector[step]), length, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");
			step += length;
		}
		writeVectorToFile(tmp, ARGV_RESULT_NAME);
		freeVector(&tmp);
	}
	freeVector(&v);
	MPI_Finalize();
	return 0;
}