Example #1
0
/* ************************************************************************ */
int
main (int argc, char** argv)
{
  struct options options;
  struct calculation_arguments arguments;
  struct calculation_results results; 
  int rc;
  
  rc = MPI_Init(&argc,&argv);
  if (rc != MPI_SUCCESS) 
  {
    printf("Error initializing MPI. Terminating.\n");
    MPI_Abort(MPI_COMM_WORLD, rc);
  }
  AskParams(&options, argc, argv);                    /* get parameters */   
  initVariables(&arguments, &results, &options);           /* ******************************************* */
  initMPI(&mpis, &arguments);	                     /* initalize MPI */
  allocateMatrices(&arguments);                            /*  get and initialize variables and matrices  */
  initMatrices(&arguments, &options);                      /* ******************************************* */
  
  gettimeofday(&start_time, NULL);                   /*  start timer         */
  calculate(&arguments, &results, &options);         /*  solve the equation  */
  gettimeofday(&comp_time, NULL);                    /*  stop timer          */
  if (0 == mpis.rank)
  {
    displayStatistics(&arguments, &results, &options);               /* **************** */
    DisplayMatrix("Matrix:",                                         /*  display some    */
		  arguments.Matrix[results.m][0], options.interlines);       /*  statistics and  */
  }
  freeMatrices(&arguments);
  freeMPI(&mpis);                                                      /*  free memory     */
  /* **************** */
  //MPI_Barrier(MPI_COMM_WORLD);
  MPI_Finalize();
  return 0;
}
Example #2
0
	extern "C" LUAMBEDTLS_DLL_EXPORTED int luaopen_luambedtls(lua_State * L){
		State * state = new State(L);
		Stack * stack = state->stack;
		Module luambedtls_module;

		stack->newTable();

		initMPI(state, luambedtls_module);
		initASN1buf(state, luambedtls_module);
		initASN1named(state, luambedtls_module);
		initASN1sequence(state, luambedtls_module);

		//key-pairs
		initPKContext(state, luambedtls_module);
		initPKinfo(state, luambedtls_module);

		initCTRDRBGContext(state, luambedtls_module);
		initDHMContext(state, luambedtls_module);
		initEntropyContext(state, luambedtls_module);
		initSSLConfig(state, luambedtls_module);
		initSSLContext(state, luambedtls_module);
		initSSLCookieContext(state, luambedtls_module);
		initSSLSession(state, luambedtls_module);
		initx509crt(state, luambedtls_module);
		initx509crl(state, luambedtls_module);
		initx509crlEntry(state, luambedtls_module);
		initx509crtProfile(state, luambedtls_module);
		initx509csr(state, luambedtls_module);
		initx509writeCert(state, luambedtls_module);
		initx509writeCSR(state, luambedtls_module);
		initTimingDelayContext(state, luambedtls_module);
		initAESContext(state, luambedtls_module);

		//symmetric-encryption
		initARC4Context(state, luambedtls_module);
		initBlowfishContext(state, luambedtls_module);
		initCamelliaContext(state, luambedtls_module);
		initDESContext(state, luambedtls_module);
		initDES3Context(state, luambedtls_module);
		initGCMContext(state, luambedtls_module);
		initXTEAContext(state, luambedtls_module);

		//asymmetric-ecnryption
		initDHMContext(state, luambedtls_module);
		initRSAContext(state, luambedtls_module);

		//EC
		initECPCurveInfo(state, luambedtls_module);
		initECPPoint(state, luambedtls_module);
		initECPGroup(state, luambedtls_module);
		initECPKeyPair(state, luambedtls_module);
		initECDHContext(state, luambedtls_module);
		initECSDAContext(state, luambedtls_module);

		//message-digest
		initMDContext(state, luambedtls_module);
		initMDinfo(state, luambedtls_module);

		//cipher
		initCipherContext(state, luambedtls_module);
		initCipherInfo(state, luambedtls_module);

		//utils
		initUtils(state, luambedtls_module);

		luambedtls_module["init"] = init;
		initConstants(state, luambedtls_module);
		luambedtls_module["strError"] = strError;
		luambedtls_module["debugTreshhold"] = debugTreshhold;
		luambedtls_module["MPIlen"] = MPIlen;
		luambedtls_module["pushOIDAttrShortName"] = pushOIDAttrShortName;
		luambedtls_module["pushOIDNumericString"] = pushOIDNumericString;
		luambedtls_module["pushOIDExtType"] = pushOIDExtType;
		luambedtls_module["pushOIDPkAlg"] = pushOIDPkAlg;

		state->registerLib(luambedtls_module);
		return 1;
	}
Example #3
0
int main(int argc, char** argv) {
	pvector_t v, tmp = NULL, samples = NULL;
	index_t i, length, step;
	unit_t min, max;
	MPI_Status status;
	MPI_Datatype sampleDatatype;

	if (initMPI(&argc, &argv) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "Cannot initialize MPI.");

	if (argc < 3) {
		fprintf(stderr, "MPI Parallel Sorting by Regular Sampling implementation.\nUsage:\n\t%s <data set (to read)> <result  file (to write)>\n", argv[0]);
		MPI_Finalize(); return 1;
	}

	if (ID == ROOT_ID) {
		tmp = openVectorFile(ARGV_FILE_NAME);
		printf("Data set size: %d, process number: %d\n", tmp->length, PROCESS_NUMBER);
		if ((tmp->length/PROCESS_NUMBER) <= PROCESS_NUMBER)
			AbortAndExit(ERRORCODE_SIZE_DONT_MATCH, "Processor number is too big or size of data set is too small for correct calculation.\n");
		ELEMENTS_NUMBER = tmp->length;
	}

	if (MPI_Bcast(tableOfConstants, TABLE_OF_CONSTANTS_SIZE, MPI_INT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error.");

	ELEMENTS_PER_PROCESS = listLength(ID);
	initVector(&v, ELEMENTS_PER_PROCESS);

	if (ID == ROOT_ID) { /* Bcast data set */
		copyVector(tmp, v, v->length);
		for(i = 1, step = ELEMENTS_PER_PROCESS; i < PROCESS_NUMBER; i++) {
			if (MPI_Send(&(tmp->vector[step]), listLength(i), MPI_UNIT, i, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
			step += listLength(i);
		}
	} else if (MPI_Recv(v->vector, ELEMENTS_PER_PROCESS, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");

	quicksortVector(v);

	if (initVector(&samples, PROCESS_NUMBER -1) == NULL)
		return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for samples vector.");

	MPI_Type_vector(PROCESS_NUMBER, 1, ELEMENTS_NUMBER / SQR_PROCESS_NUMBER, MPI_UNIT, &sampleDatatype);
	MPI_Type_commit(&sampleDatatype);

	if (ID != ROOT_ID) { /* Sending samples to root proces */
 		if (MPI_Send(v->vector, 1, sampleDatatype, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
		if (initVector(&tmp, listLength(PROCESS_NUMBER -1)) == NULL)
			return AbortAndExit(ERRORCODE_CANT_MALLOC, "Cannot allocate memory for temporary vector.");
	} else { /* Reciving samples */
		copySampleToVector(v, tmp, (v->length)/PROCESS_NUMBER, PROCESS_NUMBER);
		for(step = PROCESS_NUMBER, i = 1; i < PROCESS_NUMBER; i++, step += PROCESS_NUMBER)
			if (MPI_Recv(&(tmp->vector[step]), PROCESS_NUMBER, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");
		quicksort(tmp->vector, 0, SQR_PROCESS_NUMBER);
		copySampleToVector(tmp, samples, SQR_PROCESS_NUMBER / (PROCESS_NUMBER - 1), PROCESS_NUMBER - 1);
	}

	/* Broadcast selected samples to processors */
	if (MPI_Bcast(samples->vector, PROCESS_NUMBER-1, MPI_UNIT, ROOT_ID, MPI_COMM_WORLD) != MPI_SUCCESS)
		return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Bcast error.");

	if ((i = dataExchange((ID == 0) ? UNITT_MIN : getFromVector(samples, ID -1), (ID == (PROCESS_NUMBER - 1)) ? UNITT_MAX : getFromVector(samples, ID), &v, tmp)) != ERRORCODE_NOERRORS)
		return AbortAndExit(i, "Error in while of data exchange.");

	/* Sorting new data */
	quicksortVector(v);

	if (ID != ROOT_ID) { /* Sending sorted data */
		if (MPI_Send(&(v->length), 1, MPI_INT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send (sending size of data) error.");
		if (MPI_Send(v->vector, v->length, MPI_UNIT, ROOT_ID, 0, MPI_COMM_WORLD) != MPI_SUCCESS)
			return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Send error.");
	} else { /* Receiving sorted data */
		copyVector(v, tmp, v->length);
		for(step = v->length, i = 1; i < PROCESS_NUMBER; i++) {
			if (MPI_Recv(&length, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv (sending size of data) error.");
			if (MPI_Recv(&(tmp->vector[step]), length, MPI_UNIT, i, 0, MPI_COMM_WORLD, &status) != MPI_SUCCESS)
				return AbortAndExit(ERRORCODE_MPI_ERROR, "MPI_Recv error.");
			step += length;
		}
		writeVectorToFile(tmp, ARGV_RESULT_NAME);
		freeVector(&tmp);
	}
	freeVector(&v);
	MPI_Finalize();
	return 0;
}