コード例 #1
0
static MPI_Offset
get_filesize(const char *filename)
{
    int		mpierr;
    MPI_File	fd;
    MPI_Offset	filesize;
#ifndef H5_HAVE_MPI_GET_SIZE
    struct stat stat_buf;
#endif

#ifdef H5_HAVE_MPI_GET_SIZE
    mpierr = MPI_File_open(MPI_COMM_SELF, (char*)filename, MPI_MODE_RDONLY,
	MPI_INFO_NULL, &fd);
    VRFY((mpierr == MPI_SUCCESS), "");

    mpierr = MPI_File_get_size(fd, &filesize);
    VRFY((mpierr == MPI_SUCCESS), "");

    mpierr = MPI_File_close(&fd);
    VRFY((mpierr == MPI_SUCCESS), "");
#else
    /* Some systems (only SGI Altix Propack 4 so far) doesn't return correct
     * file size for MPI_File_get_size.  Use stat instead.
     */
    if((mpierr=stat(filename, &stat_buf))<0)
    VRFY((mpierr == MPI_SUCCESS), "");

    /* Hopefully this casting is safe */
    filesize = (MPI_Offset)(stat_buf.st_size);
#endif

    return(filesize);
}
コード例 #2
0
FORTRAN_API void FORT_CALL mpi_file_get_size_(MPI_Fint *fh, MPI_Offset *size, MPI_Fint *ierr )
{
    MPI_File fh_c;
    
    fh_c = MPI_File_f2c(*fh);
    *ierr = MPI_File_get_size(fh_c, size);
}
コード例 #3
0
ファイル: mpi_File.c プロジェクト: abouteiller/ompi-aurelien
JNIEXPORT jlong JNICALL Java_mpi_File_getSize(
        JNIEnv *env, jobject jthis, jlong fh)
{
    MPI_Offset size;
    int rc = MPI_File_get_size((MPI_File)fh, &size);
    ompi_java_exceptionCheck(env, rc);
    return (jlong)size;
}
コード例 #4
0
ファイル: chimeracheckrdp.cpp プロジェクト: azerxu/mothur
//***************************************************************************************************************
void ChimeraCheckRDP::readName(string namefile) {
	try{
	
		string name;

	#ifdef USE_MPI
		
		MPI_File inMPI;
		MPI_Offset size;
		MPI_Status status;

		//char* inFileName = new char[namefile.length()];
		//memcpy(inFileName, namefile.c_str(), namefile.length());
		
		char inFileName[1024];
		strcpy(inFileName, namefile.c_str());

		MPI_File_open(MPI_COMM_WORLD, inFileName, MPI_MODE_RDONLY, MPI_INFO_NULL, &inMPI);  
		MPI_File_get_size(inMPI, &size);

		//delete inFileName;

		char* buffer = new char[size];
		MPI_File_read(inMPI, buffer, size, MPI_CHAR, &status);

		string tempBuf = buffer;
		if (tempBuf.length() > size) { tempBuf = tempBuf.substr(0, size);  }
		istringstream iss (tempBuf,istringstream::in);
		delete buffer;
		
		while(!iss.eof()) {
			iss >> name; m->gobble(iss);
			names[name] = name;
		}
	
		MPI_File_close(&inMPI);
		
	#else	
	
		ifstream in;
		m->openInputFile(namefile, in);
				
		while (!in.eof()) {
			in >> name; m->gobble(in);
			names[name] = name;
		}
		in.close();
	
	#endif
	
	}
	catch(exception& e) {
		m->errorOut(e, "ChimeraCheckRDP", "readName");
		exit(1);
	}
}
コード例 #5
0
ファイル: file_get_size_f.c プロジェクト: bringhurst/ompi
void mpi_file_get_size_f(MPI_Fint *fh, MPI_Offset *size, MPI_Fint *ierr)
{
    MPI_File c_fh = MPI_File_f2c(*fh);
    MPI_Offset c_size;

    *ierr = OMPI_INT_2_FINT(MPI_File_get_size(c_fh, 
					      &c_size));
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        *size = (MPI_Fint) c_size;
    }
}
コード例 #6
0
ファイル: mpi_ram_raf.c プロジェクト: hinike/MyDocuments
raf_t MPI_Load_raf(char *name,MPI_Comm comm){
	raf_t raf=(raf_t)RTmalloc(sizeof(struct raf_struct_s));
	raf_init(raf,name);
	raf->blocksize=65536;
	MPI_File f;
	MPI_Comm_size(comm,&(raf->workers));
	MPI_Comm_rank(comm,&(raf->rank));
	int e=MPI_File_open(comm,name,MPI_MODE_RDONLY,MPI_INFO_NULL,&f);
	if(e){
		int i=1024;
		char msg[1024];
		MPI_Error_string(e,msg,&i);
		Fatal(0,error,"err is %s\n",msg);
	}
	MPI_File_set_errhandler(f,MPI_ERRORS_ARE_FATAL);
	MPI_File_get_size(f,&(raf->size));
	if ((raf->size)%(raf->blocksize)) Fatal(0,error,"file not multiple of block size");
	if (((raf->size)/(raf->blocksize))%(raf->workers)) Fatal(0,error,"block count not multiple of worker count");
	//Warning(info,"my share is %d",(raf->size)/(raf->workers));
	raf->data=RTmalloc((raf->size)/(raf->workers));
	if (1) {
		Warning(info,"using MPI_File_read_all");
		MPI_Datatype ftype;
		MPI_Type_vector((raf->size)/(raf->blocksize),(raf->blocksize),(raf->blocksize)*(raf->workers),MPI_CHAR,&ftype);
		MPI_Type_commit(&ftype);
		MPI_File_set_view(f,(raf->blocksize)*(raf->rank),MPI_CHAR,ftype,"native",MPI_INFO_NULL);
		MPI_File_read_all(f,raf->data,(raf->size)/(raf->workers),MPI_CHAR,MPI_STATUS_IGNORE);
		MPI_File_close(&f);
		MPI_Type_free(&ftype);
	} else {
		Warning(info,"using MPI_File_read_at");
		int blockcount=((raf->size)/(raf->blocksize))/(raf->workers);
		for(int i=0;i<blockcount;i++){
			MPI_File_read_at(f,((i*(raf->workers)+(raf->rank))*(raf->blocksize)),
				(raf->data)+(i*(raf->blocksize)),(raf->blocksize),MPI_CHAR,MPI_STATUS_IGNORE);
		}
		MPI_File_close(&f);
	}
	raf->rq_tag=core_add(raf,request_service);
	raf->ack_tag=core_add(raf,receive_service);
	raf->shared.read=read_at;
	raf->shared.size=mpi_size;
	raf->shared.close=mpi_close;
	//Warning(info,"file loaded");
	return raf;
}
コード例 #7
0
ファイル: egfilebuf.cpp プロジェクト: vpike/LomonosovTB
bool read_file_bufferizer::begin_read(const char *filename, file_offset start_pos, file_offset length) {
	assert(opened_ == false); // do not call twice
#ifdef FILE_VIA_MPI
	if (MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh) != MPI_SUCCESS)
		return false;
#else
#ifdef WIN32_FILE
	fh = CreateFile(filename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, NULL, NULL);
	if (fh == INVALID_HANDLE_VALUE) return false;
	unsigned long size_low, size_high;
	size_low = GetFileSize(fh, &size_high);
	total_file_length = ((file_offset)size_high) << 32 | ((file_offset)size_low);
#else
	fh = open(filename, O_RDONLY | O_BINARY);
	if (fh < 0) return false;
#endif
#endif
	opened_ = true;
	buf_size = TB_FILE_BUF_SIZE;
	buffer = (char *)malloc(buf_size);
	buf_pos = bytes_in_buffer = 0;
	cur_file_pos_read = start_pos;
#ifdef FILE_VIA_MPI
	MPI_File_get_size(fh, &total_file_length);
#else

	#if !defined(WIN32) && !defined(__ANDROID__)
	BUILD_BUG_ON(sizeof(off_t) < 8); // lseek MUST use 64-bit file offsets. check _LARGEFILE64_SOURCE and _FILE_OFFSET_BITS=64 defines
	#endif

#ifndef WIN32_FILE
	total_file_length = os_lseek64(fh, 0, SEEK_END);
#endif
#endif
	if (length != (file_offset)FB_TO_THE_END) end_file_pos = start_pos+length;
	else end_file_pos = total_file_length;
	write_mode = false;
	no_file = false;
	read_file_name = (char *)malloc(strlen(filename)+1);
	strcpy(read_file_name, filename);
	return true;
}
コード例 #8
0
ファイル: dinfermodel.c プロジェクト: Unidata/netcdf-c
/**
\internal
\ingroup datasets
Provide open, read and close for use when searching for magic numbers
*/
static int
openmagic(struct MagicFile* file)
{
    int status = NC_NOERR;

    switch (file->model->iosp) {
    case NC_IOSP_MEMORY: {
	/* Get its length */
	NC_memio* meminfo = (NC_memio*)file->parameters;
        assert(meminfo != NULL);
	file->filelen = (long long)meminfo->size;
	} break;
    case NC_IOSP_FILE: {
#ifdef USE_PARALLEL
        if (file->use_parallel) {
	    int retval;
	    MPI_Offset size;
            assert(file->parameters != NULL);
	    if((retval = MPI_File_open(((NC_MPI_INFO*)file->parameters)->comm,
                                   (char*)file->path,MPI_MODE_RDONLY,
                                   ((NC_MPI_INFO*)file->parameters)->info,
                                   &file->fh)) != MPI_SUCCESS) {
#ifdef MPI_ERR_NO_SUCH_FILE
		int errorclass;
		MPI_Error_class(retval, &errorclass);
		if (errorclass == MPI_ERR_NO_SUCH_FILE)
#ifdef NC_ENOENT
		    status = NC_ENOENT;
#else
		    status = errno;
#endif
		else
#endif
		    status = NC_EPARINIT;
		goto done;
	    }
	    /* Get its length */
	    if((retval=MPI_File_get_size(file->fh, &size)) != MPI_SUCCESS)
	        {status = NC_EPARINIT; goto done;}
	    file->filelen = (long long)size;
	} else
コード例 #9
0
ファイル: aiori-MPIIO.c プロジェクト: gcongiu/E10
IOR_offset_t
IOR_GetFileSize_MPIIO(IOR_param_t * test,
                      MPI_Comm      testComm,
                      char        * testFileName)
{
    IOR_offset_t aggFileSizeFromStat,
                 tmpMin, tmpMax, tmpSum;
    MPI_File     fd;

    MPI_CHECK(MPI_File_open(testComm, testFileName, MPI_MODE_RDONLY,
                            MPI_INFO_NULL, &fd),
              "cannot open file to get file size");
    MPI_CHECK(MPI_File_get_size(fd, &aggFileSizeFromStat),
              "cannot get file size");
    MPI_CHECK(MPI_File_close(&fd), "cannot close file");

    if (test->filePerProc == TRUE) {
        MPI_CHECK(MPI_Allreduce(&aggFileSizeFromStat, &tmpSum, 1,
                                MPI_LONG_LONG_INT, MPI_SUM, testComm),
                  "cannot total data moved");
        aggFileSizeFromStat = tmpSum;
    } else {
        MPI_CHECK(MPI_Allreduce(&aggFileSizeFromStat, &tmpMin, 1,
                                MPI_LONG_LONG_INT, MPI_MIN, testComm),
                  "cannot total data moved");
        MPI_CHECK(MPI_Allreduce(&aggFileSizeFromStat, &tmpMax, 1,
                  MPI_LONG_LONG_INT, MPI_MAX, testComm),
                  "cannot total data moved");
        if (tmpMin != tmpMax) {
            if (rank == 0) {
                WARN("inconsistent file size by different tasks");
            }
            /* incorrect, but now consistent across tasks */
            aggFileSizeFromStat = tmpMin;
        }
    }

    return(aggFileSizeFromStat);

} /* IOR_GetFileSize_MPIIO() */
コード例 #10
0
ファイル: avtParallel.C プロジェクト: burlen/visit_vtk_7_src
void
PullInMPI_IOSymbols()
{
#ifdef PARALLEL

    //Don't call this!
    EXCEPTION1(ImproperUseException, "Do not call PullInMPI_IOSymbols");

    MPI_Info info;
    MPI_File fh;
    MPI_Offset sz;
    char *nm;
    int whence;
    void *buf;
    int count;
    MPI_Datatype datatype;
    MPI_Status status;

    MPI_File_open(VISIT_MPI_COMM, nm, 0, info, &fh);
    MPI_File_get_size(fh, &sz);
    MPI_File_seek(fh, sz, whence);
    MPI_File_read(fh, buf, count, datatype, &status);
#endif
}
コード例 #11
0
ファイル: mpiSort.c プロジェクト: fredjarlier/mpiSORT
int main (int argc, char *argv[]){

	char *x, *y, *z, *xbuf, *hbuf, *chrNames[MAXNBCHR];
	int fd;
	off_t hsiz;
	struct stat st;

	MPI_File mpi_filed;
	MPI_File mpi_file_split_comm;

	MPI_Offset fileSize, unmapped_start, discordant_start;
	int num_proc, rank;
	int res, nbchr, i, paired, write_sam;
	int ierr, errorcode = MPI_ERR_OTHER;
	char *file_name, *output_dir;

	char *header;

	unsigned int headerSize;
	unsigned char threshold;

	size_t input_file_size;
	size_t unmappedSize = 0;
	size_t discordantSize = 0;
	size_t *readNumberByChr = NULL, *localReadNumberByChr = NULL;
	Read **reads;

	double time_count;
	double time_count1;
	int g_rank, g_size;
	MPI_Comm split_comm; //used to split communication when jobs have no reads to sort
	int split_rank, split_size; //after split communication we update the rank and the size
	double tic, toc;
	int compression_level;
	size_t fsiz, lsiz, loff;
	const char *sort_name;
	MPI_Info finfo;

	/* Set default values */
	compression_level = 3;
	parse_mode = MODE_OFFSET;
	sort_name = "coordinate";
	paired = 0;
	threshold = 0;
	write_sam = 0;
	/* Check command line */
	while ((i = getopt(argc, argv, "c:hnpq:")) != -1) {
		switch(i) {
			case 'c': /* Compression level */
				compression_level = atoi(optarg);
				break;
			case 'h': /* Usage display */
				usage(basename(*argv));
				return 0;
			case 'n':
				parse_mode = MODE_NAME;
				sort_name = "queryname";
				break;
			case 'p': /* Paired reads */
				paired = 1;
				break;
			case 'q': /* Quality threshold */
				threshold = atoi(optarg);
				break;
			default:
				usage(basename(*argv));
				return 1;
		}
	}
	if (argc - optind != 2) {
		usage(basename(*argv));
		return 1;
	}
	file_name = argv[optind];
	output_dir = argv[optind+1];

	/* Check arguments */
	res = access(file_name, F_OK|R_OK);
	if (res == -1)
		err(1, "%s", file_name);
	res = access(output_dir, F_OK|W_OK);
	if (res == -1)
		err(1, "%s", output_dir);

	/* MPI inits */
	res = MPI_Init(&argc, &argv);
	assert(res == MPI_SUCCESS);
	res = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	assert(res == MPI_SUCCESS);
	res = MPI_Comm_size(MPI_COMM_WORLD, &num_proc);
	assert(res == MPI_SUCCESS);

	g_rank = rank;
	g_size = num_proc;

	/* Small summary */
	if (rank == 0) {
		fprintf(stderr, "Number of processes : %d\n", num_proc);
		fprintf(stderr, "Reads' quality threshold : %d\n", threshold);
		fprintf(stderr, "Compression Level is : %d\n", compression_level);
		fprintf(stderr, "SAM file to read : %s\n", file_name);
		fprintf(stderr, "Output directory : %s\n", output_dir);
	}

	/* Process input file */
	fd = open(file_name, O_RDONLY, 0666);
	assert(fd != -1);
	assert(fstat(fd, &st) != -1);
	xbuf = mmap(NULL, (size_t)st.st_size, PROT_READ, MAP_FILE|MAP_PRIVATE, fd, 0);
	assert(xbuf != MAP_FAILED);

	/* Parse SAM header */
	memset(chrNames, 0, sizeof(chrNames));
	x = xbuf; nbchr = 0;
	while (*x == '@') {
		y = strchr(x, '\n');
		z = x; x = y + 1;
		if (strncmp(z, "@SQ", 3) != 0) continue;
		/* Save reference names */
		y = strstr(z, "SN:");
		assert(y != NULL);
		z = y + 3;
		while (*z && !isspace((unsigned char)*z)) z++;
		chrNames[nbchr++] = strndup(y + 3, z - y - 3);
		assert(nbchr < MAXNBCHR - 2);
	}
	chrNames[nbchr++] = strdup(UNMAPPED);
	chrNames[nbchr++] = strdup(DISCORDANT);

	hsiz = x - xbuf;
	hbuf = strndup(xbuf, hsiz);

	if (rank == 0) {
		fprintf(stderr, "The size of the file is %zu bytes\n", (size_t)st.st_size);
		fprintf(stderr, "Header has %d+2 references\n", nbchr - 2);
	}
	asprintf(&header, "@HD\tVN:1.0\tSO:%s\n%s", sort_name, hbuf);

	free(hbuf);

	assert(munmap(xbuf, (size_t)st.st_size) != -1);
	assert(close(fd) != -1);

	//task FIRST FINE TUNING FINFO FOR READING OPERATIONS


	MPI_Info_create(&finfo);
	/*
	 * In this part you shall adjust the striping factor and unit according
	 * to the underlying filesystem.
	 * Harmless for other file system.
	 *
	 */
	MPI_Info_set(finfo,"striping_factor", STRIPING_FACTOR);
	MPI_Info_set(finfo,"striping_unit", STRIPING_UNIT); //2G striping
	MPI_Info_set(finfo,"ind_rd_buffer_size", STRIPING_UNIT); //2gb buffer
	MPI_Info_set(finfo,"romio_ds_read",DATA_SIEVING_READ);

	/*
	 * for collective reading and writing
	 * should be adapted too and tested according to the file system
	 * Harmless for other file system.
	 */
	MPI_Info_set(finfo,"nb_proc", NB_PROC);
	MPI_Info_set(finfo,"cb_nodes", CB_NODES);
	MPI_Info_set(finfo,"cb_block_size", CB_BLOCK_SIZE);
	MPI_Info_set(finfo,"cb_buffer_size", CB_BUFFER_SIZE);


	//we open the input file
	ierr = MPI_File_open(MPI_COMM_WORLD, file_name,  MPI_MODE_RDONLY , finfo, &mpi_filed);
	//assert(in != -1);
	if (ierr){
		if (rank == 0) fprintf(stderr, "%s: Failed to open file in process 0 %s\n", argv[0], argv[1]);
		MPI_Abort(MPI_COMM_WORLD, errorcode);
		exit(2);
	}
	ierr = MPI_File_get_size(mpi_filed, &fileSize);
	assert(ierr == MPI_SUCCESS);
	input_file_size = (long long)fileSize;

	/* Get chunk offset and size */
	fsiz = input_file_size;
	lsiz = fsiz / num_proc;
	loff = rank * lsiz;

	tic = MPI_Wtime();

	headerSize = unmappedSize = discordantSize = strlen(header);

	//We place file offset of each process to the begining of one read's line
	size_t *goff =(size_t*)calloc((size_t)(num_proc+1), sizeof(size_t));
	init_goff(mpi_filed,hsiz,input_file_size,num_proc,rank,goff);

	//We calculate the size to read for each process
	lsiz = goff[rank+1]-goff[rank];
	//NOW WE WILL PARSE
	size_t j=0;
	size_t poffset = goff[rank]; //Current offset in file sam

	//nbchr because we add the discordant reads in the structure
	reads = (Read**)malloc((nbchr)*sizeof(Read));//We allocate a linked list of struct for each Chromosome (last chr = unmapped reads)
	readNumberByChr = (size_t*)malloc((nbchr)*sizeof(size_t));//Array with the number of reads found in each chromosome
	localReadNumberByChr = (size_t*)malloc((nbchr)*sizeof(size_t));//Array with the number of reads found in each chromosome
	Read ** anchor = (Read**)malloc((nbchr)*sizeof(Read));//Pointer on the first read of each chromosome

	//Init first read
	for(i = 0; i < (nbchr); i++){
		reads[i] = malloc(sizeof(Read));
		reads[i]->coord = 0;
		anchor[i] = reads[i];
		readNumberByChr[i]=0;
	}

	toc = MPI_Wtime();

	char *local_data_tmp = malloc(1024*1024);
	char *local_data =(char*)malloc(((goff[rank+1]-poffset)+1)*sizeof(char));
	size_t size_tmp= goff[rank+1]-poffset;
	local_data[goff[rank+1]-poffset] = 0;
	char *q=local_data;

	//We read the file sam and parse
	while(poffset < goff[rank+1]){

		size_t size_to_read = 0;

		if( (goff[rank+1]-poffset) < DEFAULT_INBUF_SIZE ){
			size_to_read = goff[rank+1]-poffset;
		}
		else{
			size_to_read = DEFAULT_INBUF_SIZE;
		}

		// we load the buffer
		//hold temporary size of SAM
		//due to limitation in MPI_File_read_at
		local_data_tmp =(char*)realloc(local_data_tmp, (size_to_read+1)*sizeof(char));
		local_data_tmp[size_to_read]=0;

		// Original reading part is before 18/09/2015
		MPI_File_read_at(mpi_filed, (MPI_Offset)poffset, local_data_tmp, size_to_read, MPI_CHAR, MPI_STATUS_IGNORE);
		size_t local_offset=0;
		assert(strlen(local_data_tmp) == size_to_read);

		//we look where is the last line read for updating next poffset
		size_t offset_last_line = size_to_read-1;

		size_t extra_char=0;
		while(local_data_tmp[offset_last_line] != '\n'){
			offset_last_line -- ;
			extra_char++;
		}

		local_data_tmp[size_to_read - extra_char]=0;
		size_t local_data_tmp_sz = strlen(local_data_tmp);

		//If it s the last line of file, we place a last '\n' for the function tokenizer
		if(rank == num_proc-1 && ((poffset+size_to_read) == goff[num_proc])){
			local_data_tmp[offset_last_line]='\n';
		}

		//Now we parse Read in local_data
		parser_paired(local_data_tmp, rank, poffset, threshold, nbchr, &readNumberByChr, chrNames, &reads);

		//now we copy local_data_tmp in local_data
		char *p = local_data_tmp;
		int pos =0;
		while (*p && (pos < local_data_tmp_sz)) {*q=*p;p++;q++;pos++;}

		//we go to the next line
		poffset+=(offset_last_line+1);
		local_offset+=(offset_last_line+1);

	}

	assert(size_tmp == strlen(local_data));

	fprintf(stderr, "%d (%.2lf)::::: *** FINISH PARSING FILE ***\n", rank, MPI_Wtime()-toc);

	if (local_data_tmp) free(local_data_tmp);
	malloc_trim(0);

	MPI_Barrier(MPI_COMM_WORLD);

	//We set attribute next of the last read and go back to first read of each chromosome
	for(i = 0; i < nbchr; i++){
		reads[i]->next = NULL;
		reads[i] = anchor[i];
	}
	free(anchor);

	//We count how many reads we found
	size_t nb_reads_total =0,nb_reads_global =0;
	for(j=0;j<nbchr;j++){
		nb_reads_total+=readNumberByChr[j];
	}

	MPI_Allreduce(&nb_reads_total, &nb_reads_global, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD);

	/*
	 * We care for unmapped and discordants reads
	 */

	int s = 0;
	for (s = 1; s < 3; s++){

		MPI_File mpi_file_split_comm2;
		double time_count;

		size_t total_reads = 0;
		MPI_Allreduce(&readNumberByChr[nbchr-s], &total_reads , 1, MPI_LONG_LONG_INT, MPI_SUM, MPI_COMM_WORLD);

		if ((rank == 0) && (s == 1))
			fprintf(stderr, "rank %d :::: total read to sort for unmapped = %zu \n", rank, total_reads);

		if ((rank == 0) && (s == 2))
			fprintf(stderr, "rank %d :::: total read to sort for discordant = %zu \n", rank, total_reads);

		MPI_Barrier(MPI_COMM_WORLD);

		if (total_reads == 0){
			// nothing to sort for unmapped
			// maybe write an empty bam file
		}
		else{
			int i1,i2;
			size_t *localReadsNum_rank0 = (size_t *)malloc(num_proc*sizeof(size_t));
			localReadsNum_rank0[0] = 0;
			int file_pointer_to_free = 0;
			int split_comm_to_free = 0;
			//we build a vector with rank job
			int val_tmp1 = 0;
			int val_tmp2 = 0;
			int chosen_rank = 0;
			// the color tells in what communicator the rank pertain
			// color = 0 will be the new communicator color
			// otherwise the color is 1
			int *color_vec_to_send =  (int *)malloc(num_proc*sizeof(int));
			// the key value tell the order in the new communicator
			int *key_vec_to_send =  (int *)malloc(num_proc*sizeof(int));

			//rank 0 gather the vector
			MPI_Allgather(&readNumberByChr[nbchr-s] , 1, MPI_LONG_LONG_INT, localReadsNum_rank0 , 1, MPI_LONG_LONG_INT, MPI_COMM_WORLD);
			MPI_Barrier(MPI_COMM_WORLD);

			if (rank == 0){
				//we must chose the first rank with reads to sort
				i1=0;
				while (localReadsNum_rank0[i1] == 0){
					chosen_rank++;
					i1++;
				}
			}

			//we broadcast the chosen rank
			//task: replace the broadcast with a sendrecieve
			MPI_Bcast( &chosen_rank, 1, MPI_INT, 0, MPI_COMM_WORLD);
			MPI_Barrier(MPI_COMM_WORLD);

			//we must chose which rank is going to split the communication
			if (((rank == chosen_rank) || rank == 0) && (chosen_rank != 0)){
				//the rank 0 will recieve the key_vec_to_send and colorvec_to_send
				//first we exchange the size o
				if (rank == chosen_rank){
					header=(char *)malloc((headerSize + 1)*sizeof(char));
					MPI_Recv(header, headerSize + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
				}
				if (rank == 0){
					MPI_Send(header, headerSize + 1, MPI_CHAR, chosen_rank,  0, MPI_COMM_WORLD);
				}
			}
			else {
				//we do nothing here
			}

			if (rank == chosen_rank) {

				int counter = 0;
				//we compute the number of 0 in the localReadsNum_vec
				for(i1 = 0; i1 < num_proc; i1++){
					if (localReadsNum_rank0[i1] == 0) {
						counter++;
					}
				}
				// if no jobs without reads we do nothing
				if ( counter == 0 ){
					// nothing to do we associate split_comm with
					split_comm = MPI_COMM_WORLD;
					for (i2 = 0; i2 < num_proc; i2++) {

						if (localReadsNum_rank0[i2] == 0) {
							color_vec_to_send[i2] = 1;
							key_vec_to_send[i2] = val_tmp2;
							val_tmp2++;
						} else {
							color_vec_to_send[i2] = 0;
							key_vec_to_send[i2] = val_tmp1;
							val_tmp1++;
						}
					}
				}
				else{
					// now we compute the color according to
					// the number of reads to sort
					for(i2 = 0; i2 < num_proc; i2++){
						if (localReadsNum_rank0[i2] == 0){
							color_vec_to_send[i2] = 1;
							key_vec_to_send[i2] = val_tmp2;
							val_tmp2++;
						} else{
							color_vec_to_send[i2] = 0;
							key_vec_to_send[i2] = val_tmp1;
							val_tmp1++;
						}
					} // end for loop
				}// end if
			}// end if (rank == chosen_rank)

			MPI_Barrier(MPI_COMM_WORLD);
			// we scatter the key and color vector
			// we create key and color variable for each job
			int local_color = 0;
			int local_key = 0;
			// we scatter the color and key
			MPI_Scatter( color_vec_to_send, 1, MPI_INT, &local_color, 1, MPI_INT, chosen_rank, MPI_COMM_WORLD);
			MPI_Scatter( key_vec_to_send, 1, MPI_INT, &local_key, 1, MPI_INT, chosen_rank, MPI_COMM_WORLD);
			// we create a communicator
			// we group all communicator
			// with color of zero
			if (local_color == 0){

				MPI_Comm_split( MPI_COMM_WORLD, local_color, local_key, &split_comm);
				ierr = MPI_File_open(split_comm, file_name,  MPI_MODE_RDONLY , finfo, &mpi_file_split_comm2);
				//we ask to liberate file pointer
				file_pointer_to_free = 1;
				//we ask to liberate the split_comm
				split_comm_to_free = 1;
			}
			else{
				MPI_Comm_split( MPI_COMM_WORLD, MPI_UNDEFINED, local_key, &split_comm);
				mpi_file_split_comm2 = mpi_filed;
			}

			//now we change the rank in the reads structure
			if (local_color == 0){
				MPI_Comm_rank(split_comm, &split_rank);
				MPI_Comm_size(split_comm, &split_size);

				g_rank = split_rank;
				g_size = split_size;

				reads[nbchr-s] = reads[nbchr-s]->next;
				localReadNumberByChr[nbchr-s] = readNumberByChr[nbchr-s];
				if (s == 2){
					unmapped_start = startOffset(g_rank,
												 g_size,
												 unmappedSize,
												 headerSize,
												 nbchr-s,
												 localReadNumberByChr[nbchr-s],
												 split_comm
												 );

					if(!unmapped_start){
						fprintf(stderr, "No header was defined for unmapped. \n Shutting down.\n");
						MPI_Finalize();
						return 0;
					}

					time_count = MPI_Wtime();
					writeSam_discordant_and_unmapped(
							split_rank,
							output_dir,
							header,
							localReadNumberByChr[nbchr-s],
							chrNames[nbchr-s],
							reads[nbchr-s],
							split_size,
							split_comm,
							file_name,
							mpi_file_split_comm2,
							finfo,
							compression_level,
							local_data,
							goff[rank],
							write_sam);

					if (split_rank == chosen_rank){
							fprintf(stderr,	"rank %d :::::[MPISORT] Time to write chromosom %s ,  %f seconds \n\n\n", split_rank,
									chrNames[nbchr-s], MPI_Wtime() - time_count);
					}
				}
				else{
					discordant_start = startOffset(g_rank,
												   g_size,
												   discordantSize,
												   headerSize,
												   nbchr-s,
												   localReadNumberByChr[nbchr-s],
												   split_comm);

					if(!discordant_start){
						fprintf(stderr, "No header was defined for discordant.\n Shutting down.\n");
						MPI_Finalize();
						return 0;
					}
					time_count = MPI_Wtime();

					writeSam_discordant_and_unmapped(
							g_rank,
							output_dir,
							header,
							localReadNumberByChr[nbchr-s],
							chrNames[nbchr-s],
							reads[nbchr-s],
							g_size,
							split_comm,
							file_name,
							mpi_file_split_comm2,
							finfo,
							compression_level,
							local_data,
							goff[rank],
							write_sam
							);


					if (split_rank == chosen_rank){
							fprintf(stderr,	"rank %d :::::[MPISORT] Time to write chromosom %s ,  %f seconds \n\n\n", split_rank,
								chrNames[nbchr-s], MPI_Wtime() - time_count);
					}

				}
				while( reads[nbchr-s]->next != NULL){
						Read *tmp_chr = reads[nbchr-s];
						reads[nbchr-s] = reads[nbchr-s]->next;
						free(tmp_chr);
				}
				free(localReadsNum_rank0);
			}
			else{
				// we do nothing
			}

			//we put a barrier before freeing pointers
			MPI_Barrier(MPI_COMM_WORLD);
			//we free the file pointer

			if  (file_pointer_to_free)
				MPI_File_close(&mpi_file_split_comm2);

			//we free the split_comm
			if (split_comm_to_free)
				MPI_Comm_free(&split_comm);

			split_comm_to_free = 0;
			file_pointer_to_free = 0;

			free(color_vec_to_send);
			free(key_vec_to_send);

		}
	} //end for (s=1; s < 3; s++){

	/*
	 *  We write the mapped reads in a file named chrX.bam
	 *	We loop by chromosoms.
	 */

	MPI_Barrier(MPI_COMM_WORLD);
	for(i = 0; i < (nbchr-2); i++){

		/*
		 * First Part of the algorithm
		 *
		 * In this part we elected a rank which is the first rank
		 * to have reads to sort.
		 *
		 * Once elected a rank, we plit the communicator according to
		 * wether the rank has reads to sort for this chromosom.
		 *
		 * The new communicator is COMM_WORLD.
		 *
		 * If all jobs have reads to sort no need to split the communicator and then
		 * COMM_WORLD = MPI_COMM_WORLD
		 *
		 */

		int i1,i2;
		size_t localReadsNum_rank0[num_proc];
		localReadsNum_rank0[0]=0;
		int file_pointer_to_free = 0;
		int split_comm_to_free = 0;
		//we build a vector with rank job
		int val_tmp1 = 0;
		int val_tmp2 = 0;
		int chosen_rank = 0; //needed to tell what rank is going to compute the color and key
		int chosen_split_rank= 0; //the rank that collect data once the communication splitted normally this rank is 0

		// the color tells in what communicator the rank pertain
		// color = 0 will be the new communicator color
		// otherwise the color is 1
		// the key value tell the order in the new communicator
		int *color_vec_to_send 	=  malloc(num_proc * sizeof(int));
		int *key_vec_to_send 	=  malloc(num_proc * sizeof(int));

		// first we test if the there's reads to sort
		// rank 0 recieve the sum of all the reads count
		size_t total_reads_by_chr = 0;
		MPI_Allreduce(&readNumberByChr[i], &total_reads_by_chr, 1, MPI_LONG_LONG_INT, MPI_SUM, MPI_COMM_WORLD);

		//fprintf(stderr, "rank %d :::: readNumberByChr[i] = %zu \n", rank, readNumberByChr[i]);
		//fprintf(stderr, "rank %d :::: total_reads_by_chr = %zu \n", rank, total_reads_by_chr);

		if (total_reads_by_chr == 0)
			continue; //pass to next chromosome

		//rank 0 gather the vector
		MPI_Allgather(&readNumberByChr[i] , 1, MPI_LONG_LONG_INT, localReadsNum_rank0 , 1, MPI_LONG_LONG_INT, MPI_COMM_WORLD);


		if (rank == 0){
			//the rank 0 chose the first rank with reads to sort
			i1=0;
			while ((localReadsNum_rank0[i1] == 0) && (i1 < num_proc)){
				chosen_rank++;
				i1++;
			}
			fprintf(stderr, "rank %d :::: Elected rank = %d \n", rank, chosen_rank);
		}

		//we broadcast the chosen rank
		//task: replace the broadcast with a sendrecieve
		MPI_Bcast( &chosen_rank, 1, MPI_INT, 0, MPI_COMM_WORLD);
		MPI_Barrier(MPI_COMM_WORLD);

		if (((rank == chosen_rank) || rank == 0) && (chosen_rank != 0)){

			//first we exchange the size o
			if (rank == chosen_rank){
				header = malloc((headerSize + 1)*sizeof(char));
				header[headerSize] = '\0';
				MPI_Recv(header, headerSize + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
			}
			if (rank == 0){
				MPI_Send(header, headerSize + 1, MPI_CHAR, chosen_rank,  0, MPI_COMM_WORLD);
			}
		}
		else {
			//we do nothing here
		}

		MPI_Barrier(MPI_COMM_WORLD);

		if (rank == chosen_rank) {
			int counter = 0;
			//we compute the number of 0 in the localReadsNum_vec
			for(i1 = 0; i1 < num_proc; i1++){
				if (localReadsNum_rank0[i1] == 0) {
						counter++;
					}
			}
			// if no jobs without reads we do nothing
			if ( counter == 0 ){
				// nothing to do we associate split_comm with
				fprintf(stderr, "rank %d ::::[MPISORT] we don't split the rank \n", rank);
				split_comm = MPI_COMM_WORLD;
				for (i2 = 0; i2 < num_proc; i2++) {
					if (localReadsNum_rank0[i2] == 0) {
						color_vec_to_send[i2] = 1;
						key_vec_to_send[i2] = val_tmp2;
						val_tmp2++;
					} else {
						color_vec_to_send[i2] = 0;
						key_vec_to_send[i2] = val_tmp1;
						val_tmp1++;
					}
				}
			}
			else{
				// now we compute the color according to
				// the number of reads to sort
				fprintf(stderr, "rank %d ::::[MPISORT] we split the rank \n", rank);
				for(i2 = 0; i2 < num_proc; i2++){
					if (localReadsNum_rank0[i2] == 0){
						color_vec_to_send[i2] = 1;
						key_vec_to_send[i2] = val_tmp2;
						val_tmp2++;
					} else{
						color_vec_to_send[i2] = 0;
						key_vec_to_send[i2] = val_tmp1;
						val_tmp1++;
					}
				} // end for loop
			}// end if
		}// end if (rank == plit_rank)

		MPI_Barrier(MPI_COMM_WORLD);
		//we create key and color variable for each job
		int local_color = 0;
		int local_key = 0;
		// rank 0 scatter the color and the key vector
		MPI_Scatter( color_vec_to_send, 1, MPI_INT, &local_color, 1, MPI_INT, chosen_rank, MPI_COMM_WORLD);
		MPI_Scatter( key_vec_to_send, 1, MPI_INT, &local_key, 1, MPI_INT, chosen_rank, MPI_COMM_WORLD);
		MPI_Barrier(MPI_COMM_WORLD);
		// now we create a communicator
		// we group all communicator
		// with color of zero
		if (local_color == 0){
			MPI_Comm_split( MPI_COMM_WORLD, local_color, local_key, &split_comm);
			ierr = MPI_File_open(split_comm, file_name,  MPI_MODE_RDONLY, finfo, &mpi_file_split_comm);
			//we ask to liberate file pointer
			file_pointer_to_free = 1;
			//we ask to liberate the split_comm
			split_comm_to_free = 1;
		}
		else{
			MPI_Comm_split( MPI_COMM_WORLD, MPI_UNDEFINED, local_key, &split_comm);
			mpi_file_split_comm = mpi_filed;
		}

		//now we change the rank in the reads structure
		if (local_color == 0){

			MPI_Comm_rank(split_comm, &split_rank);
			MPI_Comm_size(split_comm, &split_size);
			
			//we update g_rank
			g_rank = split_rank;
			g_size = split_size;
		}
		else{
			g_rank = split_rank;			
			g_size = split_size = num_proc;
		}

		localReadNumberByChr[i] = readNumberByChr[i];
		MPI_Barrier(MPI_COMM_WORLD);

		if ((local_color == 0) && (i < (nbchr - 2))) {

			/*
			 * Second part of the algorithm
			 *
			 * First we load coordinates, offset sources, and read size in vector
			 *
			 * Then we sort the coordinates of the reads
			 * with a bitonic sorter
			 *
			 * Then according to the reads coordinates we reoder the offset sources, and size
			 * this is done thanks to the index of the sorting.
			 *
			 * Afterward we compute the offsets of the reads in
			 * the destination file.
			 *
			 * Finally we dispatch the information to all ranks
			 * in the communicator for the next step.
			 */

			//we do a local merge sort
			if(reads[i] && reads[i]->next && reads[i]->next->next){
				mergeSort(reads[i], readNumberByChr[i]);
			}

			size_t local_readNum = localReadNumberByChr[i];

			reads[i] = reads[i]->next;

			//first we compute the dimension of the parabitonic sort
			// dimension is the number of processors where we
			// perform the bitonic sort
			// int dimensions = (int)(log2(num_processes));
			// find next ( must be greater) power, and go one back
			int dimensions = 1;
			while (dimensions <= split_size)
				dimensions <<= 1;

			dimensions >>= 1;

			// we get the maximum number of reads among
			// all the workers

			/*
			 * Here we split the programm in 2 cases
			 *
			 * 1) The first case de split_size is a power of 2 (the best case)
			 * 		this case is the simpliest we don't have extra communication to dispatch the read
			 * 		envenly between the jobs
			 *
			 * 2) The split_size is not a power of 2 (the worst case)
			 * 		well in this case we shall dispatch the jobs between jobs evenly.
			 *
			 */

			if (split_rank == chosen_split_rank){

				fprintf(stderr,	"Rank %d :::::[MPISORT] Dimensions for bitonic = %d \n", split_rank, dimensions);
				fprintf(stderr,	"Rank %d :::::[MPISORT] Split size 			   = %d \n", split_rank, split_size);

			}
			//we test the computed dimension
			if (dimensions == split_size ){

				size_t max_num_read = 0;
				MPI_Allreduce(&localReadNumberByChr[i], &max_num_read, 1, MPI_LONG_LONG_INT, MPI_MAX, split_comm);

				// if the dimension == split_size
				MPI_Barrier(split_comm);

				size_t first_local_readNum = local_readNum;

				/*
				 * Vector creation and allocation
				 				fprintf(stderr,	"split rank %d :::::[MPISORT] max_num_read = %zu \n", split_rank, max_num_read);
				 */
				local_readNum = max_num_read;

				time_count = MPI_Wtime();

				size_t *local_reads_coordinates_unsorted 	= calloc(local_readNum, sizeof(size_t));
				size_t *local_reads_coordinates_sorted 		= calloc(local_readNum, sizeof(size_t));
				size_t *local_offset_source_unsorted 		= calloc(local_readNum, sizeof(size_t));
				size_t *local_offset_source_sorted 			= calloc(local_readNum, sizeof(size_t));
				int *local_dest_rank_sorted 				= calloc(local_readNum, sizeof(int));
				int *local_reads_sizes_unsorted 			= calloc(local_readNum, sizeof(int));
				int *local_reads_sizes_sorted 				= calloc(local_readNum, sizeof(int));
				int *local_source_rank_unsorted 			= calloc(local_readNum, sizeof(int));
				int *local_source_rank_sorted 				= calloc(local_readNum, sizeof(int));

				if (split_rank == chosen_split_rank)
					fprintf(stderr,	"rank %d :::::[MPISORT][MALLOC 1] time spent = %f s\n", split_rank, MPI_Wtime() - time_count);

				local_reads_coordinates_unsorted[0] = 0;
				local_reads_coordinates_sorted[0] 	= 0;
				local_dest_rank_sorted[0] 			= 0;
				local_reads_sizes_unsorted[0] 		= 0;
				local_reads_sizes_sorted[0] 		= 0;
				local_source_rank_unsorted[0] 		= 0;
				local_source_rank_sorted[0] 		= 0;
				local_offset_source_unsorted[0] 	= 0;
				local_offset_source_sorted[0] 		= 0;

				//those vectors are the same that  local_..._sorted but without zero padding
				size_t *local_reads_coordinates_sorted_trimmed = NULL;
				int *local_dest_rank_sorted_trimmed = NULL;
				int *local_reads_sizes_sorted_trimmed = NULL;
				size_t *local_offset_source_sorted_trimmed = NULL;
				size_t *local_offset_dest_sorted_trimmed = NULL;
				int *local_source_rank_sorted_trimmed = NULL;

				//vectors used in the bruck just after the parabitonic sort
				size_t *local_reads_coordinates_sorted_trimmed_for_bruck = NULL;
				int *local_dest_rank_sorted_trimmed_for_bruck = NULL;
				int *local_reads_sizes_sorted_trimmed_for_bruck = NULL;
				size_t *local_offset_source_sorted_trimmed_for_bruck = NULL;
				size_t *local_offset_dest_sorted_trimmed_for_bruck = NULL;
				int *local_source_rank_sorted_trimmed_for_bruck = NULL;


				//task Init offset and size for source - free chr
				// from mpiSort_utils.c
				get_coordinates_and_offset_source_and_size_and_free_reads(
						split_rank,
						local_source_rank_unsorted,
						local_reads_coordinates_unsorted,
						local_offset_source_unsorted,
						local_reads_sizes_unsorted,
						reads[i],
						first_local_readNum
				);

				//init indices for qksort
				size_t *coord_index = (size_t*)malloc(local_readNum*sizeof(size_t));

				for(j = 0; j < local_readNum; j++){
					coord_index[j] = j;
				}

				//To start we sort locally the reads coordinates.
				//this is to facilitate the bitonic sorting
				//if the local coordinates to sort are to big we could get rid of
				//this step.
				time_count = MPI_Wtime();

				base_arr2 = local_reads_coordinates_unsorted;
				qksort(coord_index, local_readNum, sizeof(size_t), 0, local_readNum - 1, compare_size_t);

				if (split_rank == chosen_split_rank)
						fprintf(stderr,	"rank %d :::::[MPISORT][LOCAL SORT] time spent = %f s\n", split_rank, MPI_Wtime() - time_count);

				//We index data
				for(j = 0; j < local_readNum; j++){
					local_reads_coordinates_sorted[j] 			= local_reads_coordinates_unsorted[coord_index[j]];
					local_source_rank_sorted[j] 				= local_source_rank_unsorted[coord_index[j]];
					local_reads_sizes_sorted[j] 				= local_reads_sizes_unsorted[coord_index[j]];
					local_offset_source_sorted[j] 				= local_offset_source_unsorted[coord_index[j]];
					local_dest_rank_sorted[j] 					= rank; //will be updated after sorting the coordinates
				}

				/*
				*   FOR DEBUG
				*  
					

				for(j = 0; j < local_readNum - 1; j++){
					assert( local_reads_coordinates_sorted[j] < local_reads_coordinates_sorted[j+1]);
				}
				*/

				free(coord_index); 				 		//ok
				free(local_source_rank_unsorted); 	    //ok
				free(local_reads_coordinates_unsorted); //ok
				free(local_reads_sizes_unsorted); 		//ok
				free(local_offset_source_unsorted); 	//ok

				// we need the total number of reads.
				size_t total_num_read = 0;
				MPI_Allreduce(&localReadNumberByChr[i], &total_num_read, 1, MPI_LONG_LONG_INT, MPI_SUM, split_comm);

				/*
				 *
				 * In this section the number of bitonic dimension
				 * is equal to the split size.
				 *
				 * In this case there are less communication in preparation
				 * of the sorting.
				 *
				 * We use the parabitonic version 2.
				 */

				//we calll the bitonic

				time_count = MPI_Wtime();

				ParallelBitonicSort2(
					split_comm,
					split_rank,
					dimensions,
					local_reads_coordinates_sorted,
					local_reads_sizes_sorted,
					local_source_rank_sorted,
					local_offset_source_sorted,
					local_dest_rank_sorted,
					max_num_read
					);

				if (split_rank == chosen_split_rank)
					fprintf(stderr,	"rank %d :::::[MPISORT][BITONIC 2] time spent = %f s\n",
											split_rank, MPI_Wtime() - time_count);
				size_t k1;
				size_t tmp2 = 0;
				for (k1 = 1; k1 < max_num_read; k1++){
					assert(local_reads_coordinates_sorted[k1-1] <= local_reads_coordinates_sorted[k1]);
					local_dest_rank_sorted[k1]= split_rank;
				}
				/*
				for (k1 = 0; k1 < max_num_read; k1++){
					fprintf(stderr,	"rank %d :::::[MPISORT][BITONIC 2]  local_reads_coordinates_sorted[%zu]= %zu s\n",
											split_rank, k1, local_reads_coordinates_sorted[k1]);

					fprintf(stderr,	"rank %d :::::[MPISORT][BITONIC 2]  local_source_rank_sorted[%zu]= %d s\n",
											split_rank, k1, local_source_rank_sorted[k1]);							
				}
				*/
				size_t *local_offset_dest_sorted = malloc(max_num_read*sizeof(size_t));
				size_t last_local_offset = 0;


				// We compute the local_dest_offsets_sorted
				size_t local_total_offset = 0;

				for (k1 = 0; k1 <  max_num_read; k1++){
					local_offset_dest_sorted[k1] = local_reads_sizes_sorted[k1];
					local_total_offset += local_reads_sizes_sorted[k1];
				}

				//we make the cumulative sum of all offsets
				for (k1 = 1; k1 < max_num_read; k1++){
					local_offset_dest_sorted[k1] = local_offset_dest_sorted[k1 - 1] + local_offset_dest_sorted[k1];
				}

				//we exchange the last destination offset
				last_local_offset = local_offset_dest_sorted[max_num_read-1];


				//number of block to send
				int blocksize = 1;

				MPI_Offset *y  = calloc(split_size, sizeof(MPI_Offset));
				MPI_Offset *y2 = calloc(split_size + 1, sizeof(MPI_Offset));

				//we wait all processors

				MPI_Gather(&last_local_offset, 1, MPI_LONG_LONG_INT, y, 1, MPI_LONG_LONG_INT, 0, split_comm);

				if (split_rank ==0){
					for (k1 = 1; k1 < (split_size + 1); k1++) {
						y2[k1] = y[k1-1];
					}
				}

				if (split_rank ==0){
					for (k1 = 1; k1 < (split_size +1); k1++) {
						y2[k1] = y2[k1-1] + y2[k1];
					}
				}

				size_t offset_to_add = 0;
				MPI_Scatter(y2, 1, MPI_LONG_LONG_INT, &offset_to_add, 1, MPI_LONG_LONG_INT, 0, split_comm);

				free(y);
				free(y2);

				//we add offset of the previous rank
				for (k1 = 0; k1 < max_num_read; k1++){
					if (local_reads_sizes_sorted[k1] != 0)
						local_offset_dest_sorted[k1] += offset_to_add;
					else
						local_offset_dest_sorted[k1] = 0;
				}


				/*
				for (k1 = 0; k1 < max_num_read; k1++){

					fprintf(stderr, "\n");

					fprintf(stderr,	"rank %d :::::[MPISORT][BITONIC 2]  local_reads_coordinates_sorted[%zu]= %zu s\n",
											split_rank, k1, local_reads_coordinates_sorted[k1]);

					fprintf(stderr,	"rank %d :::::[MPISORT][BITONIC 2]  local_source_rank_sorted[%zu]= %d s\n",
											split_rank, k1, local_source_rank_sorted[k1]);							
				

					fprintf(stderr,	"rank %d :::::[MPISORT][BITONIC 2]  local_offset_dest_sorted[%zu]= %d s\n",
											split_rank, k1, local_offset_dest_sorted[k1]);							
				
					fprintf(stderr, "\n");
				}
				*/

				/*
				 * we update destination rank according to
				 * original number of reads read.
				 *
				 */

				//we compute the new rank dest according to max_num_read
				size_t previous_num_reads_per_job[dimensions];
				//we create a vector of size split_size with previous reads per job
				MPI_Allgather(&first_local_readNum , 1, MPI_LONG_LONG_INT, previous_num_reads_per_job , 1, MPI_LONG_LONG_INT, split_comm);

				// we compute the position of of the read in the first
				// reference without the zero padding of bitonic
				size_t pos_ref0 = 0;

				//we need the number of zeros we add for the padding
				size_t N0 = max_num_read*dimensions - total_num_read;

				int new_rank = 0;
				int previous_rank = 0;
				// we compute the new rank for
				// the reads sorted by offset destination
				size_t h = 0;


				pos_ref0 = max_num_read*split_rank - N0;
				for(j = 0; j < max_num_read; j++) {
					if ( local_reads_sizes_sorted[j] != 0){
						int new_rank = chosen_split_rank;
						pos_ref0 = (max_num_read*split_rank +j) - N0;
						if (pos_ref0 >= 0) {
							size_t tmp2 = 0;
							for (h = 0; h < dimensions; h++){
								tmp2 += previous_num_reads_per_job[h];
								if ( pos_ref0 < tmp2)  {
									new_rank = h;
									break;
									}
								}
							previous_rank = local_dest_rank_sorted[j];
							local_dest_rank_sorted[j] = new_rank;
						}
					}
				}

				MPI_Barrier(split_comm);

				size_t offset  = 0;
				size_t numItems = 0;
				size_t num_read_for_bruck = 0;
				int *p = local_reads_sizes_sorted;
				if (p[0] != 0) {offset = 0;};
				if (p[max_num_read -1] == 0){offset = max_num_read;}
				else {while ((*p == 0) && (offset < max_num_read )){ offset++; p++;}}

				/*
				 * REMOVE ZERO PADDING BEFORE BRUCK
				 *
				 */

				time_count = MPI_Wtime();

				if (offset > 0){

					// we remove zeros in the vector we have 2 cases
					// the first offset <  max_num_read
					// and the entire vector is null
					if ( offset < max_num_read ){

						numItems = max_num_read - offset;

						local_reads_coordinates_sorted_trimmed_for_bruck    = malloc(numItems * sizeof(size_t));
						local_offset_source_sorted_trimmed_for_bruck        = malloc(numItems * sizeof(size_t));
						local_offset_dest_sorted_trimmed_for_bruck			= malloc(numItems * sizeof(size_t));
						local_reads_sizes_sorted_trimmed_for_bruck          = malloc(numItems * sizeof(int));
						local_dest_rank_sorted_trimmed_for_bruck            = malloc(numItems * sizeof(int));
						local_source_rank_sorted_trimmed_for_bruck 		    = malloc(numItems * sizeof(int));
						size_t y=0;

						for (y = 0; y < numItems; y++){

							local_reads_coordinates_sorted_trimmed_for_bruck[y]    = local_reads_coordinates_sorted[y+offset];
							local_offset_source_sorted_trimmed_for_bruck[y]        = local_offset_source_sorted[y+offset];
							local_offset_dest_sorted_trimmed_for_bruck[y]		   = local_offset_dest_sorted[y+offset];
							local_reads_sizes_sorted_trimmed_for_bruck[y]          = local_reads_sizes_sorted[y+offset];
							local_dest_rank_sorted_trimmed_for_bruck[y]            = local_dest_rank_sorted[y+offset];
							local_source_rank_sorted_trimmed_for_bruck[y] 		   = local_source_rank_sorted[y+offset];
						}

						num_read_for_bruck = numItems;

						/*
						 *
						 * FOR DEBUG
						 *

						for(y = 0; y < num_read_for_bruck; y++){
							assert( local_reads_sizes_sorted_trimmed_for_bruck[y] 		!= 0 );
							assert( local_source_rank_sorted_trimmed_for_bruck[y] 		< dimensions);
							assert( local_dest_rank_sorted_trimmed_for_bruck[y]   		< dimensions);
							assert( local_offset_source_sorted_trimmed_for_bruck[y] 	!= 0);
							assert( local_offset_dest_sorted_trimmed_for_bruck[y] 	    != 0);
							assert( local_reads_coordinates_sorted_trimmed_for_bruck[y] != 0);
						}
						*/

					}
					else{

						numItems = 0;
						local_reads_coordinates_sorted_trimmed_for_bruck    = malloc(numItems * sizeof(size_t));
						local_offset_source_sorted_trimmed_for_bruck        = malloc(numItems * sizeof(size_t));
						local_offset_dest_sorted_trimmed_for_bruck          = malloc(numItems * sizeof(size_t));
						local_reads_sizes_sorted_trimmed_for_bruck          = malloc(numItems * sizeof(int));
						local_dest_rank_sorted_trimmed_for_bruck            = malloc(numItems * sizeof(int));
						local_source_rank_sorted_trimmed_for_bruck 		    = malloc(numItems * sizeof(int));
						num_read_for_bruck = 0;
					}
				}
				else {

					numItems = local_readNum;
					local_reads_coordinates_sorted_trimmed_for_bruck    = malloc(local_readNum * sizeof(size_t));
					local_offset_source_sorted_trimmed_for_bruck        = malloc(local_readNum * sizeof(size_t));
					local_offset_dest_sorted_trimmed_for_bruck          = malloc(local_readNum * sizeof(size_t));
					local_reads_sizes_sorted_trimmed_for_bruck          = malloc(local_readNum * sizeof(int));
					local_dest_rank_sorted_trimmed_for_bruck            = malloc(local_readNum * sizeof(int));
					local_source_rank_sorted_trimmed_for_bruck 		    = malloc(local_readNum * sizeof(int));

					size_t y=0;
					for (y = 0; y < local_readNum; y++){

						local_reads_coordinates_sorted_trimmed_for_bruck[y]    = local_reads_coordinates_sorted[y];
						local_offset_source_sorted_trimmed_for_bruck[y]        = local_offset_source_sorted[y];
						local_offset_dest_sorted_trimmed_for_bruck[y]          = local_offset_dest_sorted[y];
						local_reads_sizes_sorted_trimmed_for_bruck[y]          = local_reads_sizes_sorted[y];
						local_dest_rank_sorted_trimmed_for_bruck[y]            = local_dest_rank_sorted[y];
						local_source_rank_sorted_trimmed_for_bruck[y] 		   = local_source_rank_sorted[y];
					}

					num_read_for_bruck = numItems;

					/*
					 *
					 * FOR DEBUG
					 *
					for(y = 0; y < num_read_for_bruck; y++){
						assert( local_reads_sizes_sorted_trimmed_for_bruck[y] 		!= 0 );
						assert( local_source_rank_sorted_trimmed_for_bruck[y] 		< dimensions);
						assert( local_dest_rank_sorted_trimmed_for_bruck[y]   		< dimensions);
						assert( local_offset_source_sorted_trimmed_for_bruck[y] 	!= 0);
						assert( local_offset_dest_sorted_trimmed_for_bruck[y] 	    != 0);
						assert( local_reads_coordinates_sorted_trimmed_for_bruck[y] != 0);
					}
					*/
				}

				free(local_reads_coordinates_sorted);
				free(local_offset_source_sorted);
				free(local_offset_dest_sorted);
				free(local_reads_sizes_sorted);
				free(local_dest_rank_sorted);
				free(local_source_rank_sorted);


				if (split_rank == chosen_split_rank)
					fprintf(stderr,	"rank %d :::::[MPISORT][TRIMMING] time spent = %f s\n", split_rank, MPI_Wtime() - time_count);

				/*
				 * We do a Bruck on rank of origin reading
				 */

				size_t m=0;
				int num_proc = dimensions;
				size_t *number_of_reads_by_procs = calloc( dimensions, sizeof(size_t));

				//fprintf(stderr,	"rank %d :::::[MPISORT] num_read_for_bruck = %zu \n", split_rank, num_read_for_bruck);

				for(m = 0; m < num_read_for_bruck; m++){
					 //assert(new_pbs_orig_rank_off_phase1[m] < dimensions);
					 //assert(new_pbs_dest_rank_phase1[m] < dimensions);
					 number_of_reads_by_procs[local_source_rank_sorted_trimmed_for_bruck[m]]++;
				}

				int *local_source_rank_sorted_trimmed_for_bruckv2 = malloc( num_read_for_bruck * sizeof(int));

				for(m = 0; m < num_read_for_bruck; m++){
					local_source_rank_sorted_trimmed_for_bruckv2[m] = local_source_rank_sorted_trimmed_for_bruck[m];
				}

				size_t count6 = 0;
				for(m = 0; m < dimensions; m++){
					count6 += number_of_reads_by_procs[m];
				}

				assert( count6 == num_read_for_bruck );
				MPI_Barrier(split_comm);

				size_t **reads_coordinates 		= malloc(sizeof(size_t *) * dimensions);
				size_t **local_source_offsets 	= malloc(sizeof(size_t *) * dimensions);
				size_t **dest_offsets 			= malloc(sizeof(size_t *) * dimensions);
				int **read_size 				= malloc(sizeof(int *) * dimensions);
				int **dest_rank 				= malloc(sizeof(int *) * dimensions);
				int **source_rank				= malloc(sizeof(int *) * dimensions);

				/*
				 * We send in order
				 *
				 * local_offset_source_sorted_trimmed_for_bruck
				 * local_dest_rank_sorted_trimmed_for_bruck
				 * local_reads_coordinates_sorted_trimmed_for_bruck
				 * local_reads_sizes_sorted_trimmed_for_bruck
				 *
				 */

				COMM_WORLD = split_comm;
				time_count = MPI_Wtime();

				bruckWrite3(split_rank,
							dimensions,
							count6,
							number_of_reads_by_procs,
							local_source_rank_sorted_trimmed_for_bruckv2,
							local_offset_source_sorted_trimmed_for_bruck,     //offset sources
							&local_source_offsets,
							local_dest_rank_sorted_trimmed_for_bruck,     	  //destination rank
							&dest_rank,
							local_reads_coordinates_sorted_trimmed_for_bruck, //reads coordinates
							&reads_coordinates,
							local_reads_sizes_sorted_trimmed_for_bruck,       //read size
							&read_size,
							local_source_rank_sorted_trimmed_for_bruck,		  //source rank
							&source_rank,
							local_offset_dest_sorted_trimmed_for_bruck,
							&dest_offsets
				);

				if (split_rank == chosen_split_rank)
					fprintf(stderr,	"rank %d :::::[MPISORT][BRUCK 3] time spent = %f s\n",
							split_rank, MPI_Wtime() - time_count);


				time_count = MPI_Wtime();

				free(local_reads_coordinates_sorted_trimmed_for_bruck);
				free(local_dest_rank_sorted_trimmed_for_bruck);
				free(local_reads_sizes_sorted_trimmed_for_bruck);
				free(local_offset_source_sorted_trimmed_for_bruck);
				free(local_offset_dest_sorted_trimmed_for_bruck);
				free(local_source_rank_sorted_trimmed_for_bruck);
				free(local_source_rank_sorted_trimmed_for_bruckv2);

				local_reads_coordinates_sorted_trimmed 	  = malloc(first_local_readNum * sizeof(size_t));
				local_offset_source_sorted_trimmed   	  = malloc(first_local_readNum * sizeof(size_t));
				local_offset_dest_sorted_trimmed   	  	  = malloc(first_local_readNum * sizeof(size_t));
				local_dest_rank_sorted_trimmed   		  = malloc(first_local_readNum * sizeof(int));
				local_source_rank_sorted_trimmed		  = malloc(first_local_readNum * sizeof(int));
				local_reads_sizes_sorted_trimmed		  = malloc(first_local_readNum * sizeof(int));

				if (split_rank == chosen_split_rank)
					fprintf(stderr,	"rank %d :::::[MPISORT][FREE + MALLOC] time spent = %f s\n",
											split_rank, MPI_Wtime() - time_count);
				/*
				 * GET DATA AFTER BRUCK
				 *
				 */

				j=0;
				size_t k = 0;

				for(m = 0; m < num_proc; m++)
				{
					for(k = 0; k < number_of_reads_by_procs[m]; k++)
					{
						
						local_offset_dest_sorted_trimmed[k + j] 		= dest_offsets[m][k];
						local_dest_rank_sorted_trimmed[k + j] 			= dest_rank[m][k];
						local_reads_sizes_sorted_trimmed[k + j] 		= read_size[m][k];
						local_offset_source_sorted_trimmed[k + j] 		= local_source_offsets[m][k];
						local_reads_coordinates_sorted_trimmed[k + j] 	= reads_coordinates[m][k];
						local_source_rank_sorted_trimmed[k + j] 		= source_rank[m][k];

					}
					free(dest_offsets[m]);
					free(dest_rank[m]);
					free(read_size[m]);
					free(local_source_offsets[m]);
					free(reads_coordinates[m]);
					free(source_rank[m]);
					j += number_of_reads_by_procs[m];
				}


				free(number_of_reads_by_procs);
				if (dest_rank != NULL)
					free(dest_rank);
				if (read_size != NULL)
					free(read_size);
				if (local_source_offsets != NULL)
					free(local_source_offsets);
				if (reads_coordinates != NULL)
					free(reads_coordinates);
				if (source_rank != NULL)
					free(source_rank);
				if (dest_offsets != NULL)
					free(dest_offsets);

				local_readNum = first_local_readNum;


				/*
				 *
				 * FOR DEBUG
				 *
				for ( j = 0; j < local_readNum; j++){
					assert ( local_reads_coordinates_sorted_trimmed[j]    != 0 );
					assert ( local_offset_source_sorted_trimmed[j]        != 0 );
					assert ( local_offset_dest_sorted_trimmed[j]   		  != 0 );
					assert ( local_reads_sizes_sorted_trimmed 			  != 0 );
					assert ( local_dest_rank_sorted_trimmed[j]            < split_size );
					assert ( local_source_rank_sorted_trimmed[j] 		  < split_size );
				}
				*/

				free(local_reads_coordinates_sorted_trimmed);

				if (split_rank == chosen_split_rank)
					fprintf(stderr,	"rank %d :::::[MPISORT] we call write SAM \n", split_rank);

				malloc_trim(0);

				time_count = MPI_Wtime();

				writeSam(
					split_rank,
					output_dir,
					header,
					local_readNum,
					total_reads_by_chr,
					chrNames[i],
					reads[i],
					split_size,
					split_comm,
					chosen_split_rank,
					file_name,
					mpi_file_split_comm,
					finfo,
					compression_level,
					local_offset_dest_sorted_trimmed,
					local_offset_source_sorted_trimmed,
					local_reads_sizes_sorted_trimmed,
					local_dest_rank_sorted_trimmed,
					local_source_rank_sorted_trimmed,
					local_data,
					goff[rank],
					first_local_readNum
				);

				if (split_rank == chosen_split_rank){
					fprintf(stderr,	"rank %d :::::[MPISORT][WRITESAM] chromosom %s :::  %f seconds\n\n\n",
							split_rank, chrNames[i], MPI_Wtime() - time_count);

				}
			}
			else{

				/*
				 * We are in the case the number of cpu is
				 * not a power of 2
				 *
				 *
				 */

				parallel_sort_any_dim(
						dimensions, 				//dimension for parabitonic
						local_readNum,
						split_rank,
						split_size,
						reads,
						i, 							//chromosom number
						chosen_split_rank,
						split_comm,
						localReadNumberByChr,
						local_data,
						file_name,
						output_dir,
						finfo,
						compression_level,
						total_reads_by_chr,
						goff[rank],
						headerSize,
						header,
						chrNames[i],
						mpi_file_split_comm
					);

			} //end if dimensions < split_rank

		} //if ((local_color == 0) && (i < (nbchr - 2))) //in the splitted dimension
		else{
			//we do nothing here
		}

		//we put a barrier before freeing pointers
		MPI_Barrier(MPI_COMM_WORLD);
		//we free the file pointer
		if  (file_pointer_to_free)
			MPI_File_close(&mpi_file_split_comm);
		//we free the split_comm
		if (split_comm_to_free){
			MPI_Comm_free(&split_comm);
		}

		free(color_vec_to_send);
		free(key_vec_to_send);

	}// end loop upon chromosoms (line 665)
コード例 #12
0
ファイル: sm-rijndael.c プロジェクト: tyagodm/AES-PARALELO
int main( int argc, char *argv[] )
{
    unsigned int itr;

    int operacao;
    int verbose;
    int juntar;
    char * chave_file;
    char * entrada_file;
    char * saida_file;

    octeto Nb,Nk,Nr;
    octeto bloco[4*8];
    octeto chave[4*8*15];

    int worldsize, rank;
    MPI_Status status;
    MPI_File chave_handle;
    MPI_File entrada_handle;
    MPI_File saida_handle;

    MPI_Offset entrada_bytes;
    unsigned int numero_blocos;
    unsigned int blocos_processo;
    MPI_Offset bloco_byte_inicio;
    MPI_Offset bloco_byte_fim;
    MPI_Offset iterador;

    Tabela * tabela;
    octeto * tabelaEmpacotada;
    unsigned int proc;
    unsigned int tamanho_tabela;
    Tabela * tabela2;
    unsigned int no_proc;
    unsigned int no_resto;
    unsigned int i;
    BTreeNode * node;
    Indice * indice;


    MPI_Init(&argc,&argv);

    MPI_Comm_size(MPI_COMM_WORLD,&worldsize);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);

    operacao = INDEFINIDA;
    verbose = 0;
    juntar = 0;
    chave_file = NULL;
    entrada_file = NULL;
    saida_file = NULL;
    for(itr = 1;itr < (unsigned int)argc;itr++)
    {
/* Instrucoes de uso */
        if( strcmp(argv[itr],"-a") == 0 || strcmp(argv[itr],"--ajuda") == 0 || 
            strcmp(argv[itr],"-h") == 0 || strcmp(argv[itr],"--help") == 0 )
        {
            if(rank == 0)
            {
                printf(" Uso: mpiexec -n [PROCESSOS] ./sm-rijndael [ARGUMENTO VALOR].\n");
                printf(" Encripta/Decripta um arquivo usando o algoritmo Rijndael(AES) extendido,\n");
                printf(" realizando um pre-processamento de blocos repetidos.\n");
                printf("  Argumentos opcionais:\n");
                printf("   -v,--verbose: Exibe mensagens de conclusao da operacao.\n");
                printf("   -j,--juntar: Concatena as tabelas de cada processo em um mestre.\n");
                printf("  Argumentos obrigatorios:\n");
                printf("   -op,--operacao: Informa se o objetivo da execucao eh encriptar ou decriptar.\n");
                printf("                    * Os valores possiveis sao: \'encriptar\' e \'decriptar\'.\n");
                printf("   -e,-i,--entrada,--input: Caminho e nome do arquivo a ser criptografado.\n");
                printf("   -s,-o,--saida,--output: Caminho e nome do arquivo resultante do processo de criptografia da entrada.\n");
                printf("   -c,-k,--chave,--key: Caminho e nome do arquivo contendo a chave.\n");
                printf("  O arquivo contendo a chave eh em formato binario de acordo com a seguinte especificacao:\n");
                printf("   - O primeiro byte deve conter o tamanho do bloco (em palavras de 4 bytes).\n");
                printf("      * O bloco pode possuir tamanho: 4, 5, 6, 7 ou 8.\n");
                printf("   - O segundo byte deve conter o tamanho da chave (em palavras de 4 bytes).\n");
                printf("      * Esta aplicacao aceita chaves com tamanho: 4, 5, 6, 7 ou 8.\n");
                printf("   - Os proximos 4*[tamanho da chave] bytes do arquivo sao os bytes componentes da chave, que\n");
                printf("     devem estar (obrigatoriamente) escritos no formato hexadecimal da linguagem C (0xff).\n");
                printf("   * Eh recomendavel o uso de um editor hexadecimal na construcao do arquivo chave.\n");
            }
            goto finalizando;
        }

/* Juntar: Concatena as tabelas de cada processo em um mestre */
        else
        if( strcmp(argv[itr],"-j") == 0 || strcmp(argv[itr],"--juntar") == 0)
        {
            juntar = 1;
        }

/* Verbose: exibir mensagens de finalizacao */
        else
        if( strcmp(argv[itr],"-v") == 0 || strcmp(argv[itr],"--verbose") == 0)
        {
            verbose = 1;
        }

/* Operacao a ser realizada */
        else
        if( strcmp(argv[itr],"-op") == 0 || strcmp(argv[itr],"--operacao") == 0 )
        {
            if( itr+1 < argc )
            {
                if( strcmp(argv[itr+1],"encriptar") == 0 )
                {
                    operacao = ENCRIPTAR;
                }
                else
                if( strcmp(argv[itr+1],"decriptar") == 0 )
                {
                    operacao = DECRIPTAR;
                }
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo com a chave */
        else
        if( strcmp(argv[itr],"-c") == 0 || strcmp(argv[itr],"--chave") == 0 || 
            strcmp(argv[itr],"-k") == 0 || strcmp(argv[itr],"--key") == 0 )
        {
            if(itr+1 < argc)
            {
                chave_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo de entrada */
        else
        if( strcmp(argv[itr],"-e") == 0 || strcmp(argv[itr],"--entrada") == 0 || 
            strcmp(argv[itr],"-i") == 0 || strcmp(argv[itr],"--input") == 0 )
        {
            if(itr+1 < argc)
            {
                entrada_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo de saida */
        else 
        if( strcmp(argv[itr],"-s") == 0 || strcmp(argv[itr],"--saida") == 0 || 
            strcmp(argv[itr],"-o") == 0 || strcmp(argv[itr],"--output") == 0 )
        {
            if(itr+1 < argc)
            {
                saida_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }
/* Erro desconhecido */
        else
        {
            if(rank == 0)
            {
                printf("Erro nos argumentos passados.\n");
            }
            goto help;
        }
    }
/* Fim da leitura dos argumentos */

    if( operacao == INDEFINIDA || chave_file == NULL || entrada_file == NULL || saida_file == NULL )
    {
        if(rank == 0)
        {
            if( operacao == INDEFINIDA )
                printf("A operacao a ser realizada eh invalida ou nao foi especificada.\n");
            if( chave_file == NULL )
                printf("Esta faltando especificar o arquivo com a chave.\n");
            if( entrada_file == NULL )
                printf("Esta faltando especificar o arquivo de entrada.\n");
            if( saida_file == NULL )
                printf("Esta faltando especificar o arquivo de saida.\n");
        }
        goto help;
    }
/* Fim do tratamento dos argumentos */

    if( MPI_File_open( MPI_COMM_WORLD, chave_file, MPI_MODE_RDONLY, MPI_INFO_NULL, &chave_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na abertura do arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,&Nb,1, MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura do tamanho de um bloco no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }
    if( Nb< 4 || Nb > 8 )
    {
        if( rank == 0 )
        {
            printf("Tamanho de bloco invalido no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,&Nk,1, MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura do tamanho da chave no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }
    if( Nk< 4 || Nk > 8 )
    {
        if( rank == 0 )
        {
            printf("Tamanho de chave invalido no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,chave,4*Nk,MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura da chave no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    MPI_File_close( &chave_handle );
    Nr = numero_rodadas(Nb,Nk);
    KeyExpansion(chave,Nb,Nk);

    if( MPI_File_open( MPI_COMM_WORLD, entrada_file, 
            MPI_MODE_RDONLY, 
            MPI_INFO_NULL, &entrada_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na abertura do arquivo de entrada (%s).\n",entrada_file);
        }
        goto help;
    }

    MPI_File_get_size(entrada_handle,&entrada_bytes);


    if( MPI_File_open( MPI_COMM_WORLD, saida_file, 
            MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL, 
            MPI_INFO_NULL, &saida_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na criacao do arquivo de saida (%s).\n",saida_file);
            printf("Uma possivel causa eh que o arquivo ja exista.\n");
        }
        goto help;
    }

    numero_blocos = ( entrada_bytes / (Nb*4) );
    blocos_processo = numero_blocos / worldsize;

    if( operacao == ENCRIPTAR || operacao == DECRIPTAR )
    {
        bloco_byte_inicio = 4*Nb*blocos_processo*rank;
        bloco_byte_fim = 4*Nb*blocos_processo*(rank+1);

        tabela = novaTabela(Nb*4);

        for( iterador = bloco_byte_inicio ; iterador < bloco_byte_fim ; iterador += (4*Nb) )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }

            novaOcorrenciaTabela(tabela,bloco,iterador);
        }
        
        iterador = 4*Nb*blocos_processo*worldsize + 4*Nb*rank;
        if( iterador < numero_blocos*4*Nb )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }

            novaOcorrenciaTabela(tabela,bloco,iterador);
        }
        else if( operacao == ENCRIPTAR  &&  iterador == numero_blocos*4*Nb )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }
            bloco[ 4*Nb - 1 ] = (octeto)(entrada_bytes - numero_blocos*4*Nb);
            novaOcorrenciaTabela(tabela,bloco,iterador);
        }


        if( juntar == 1 )
        {
            tabelaEmpacotada = (octeto*)malloc( entrada_bytes );
            if( rank == 0 ) /* Mestre que vai concatenar todas as arvores*/
            {
                for(proc=1;proc<worldsize;proc++)
                {
                    MPI_Recv( tabelaEmpacotada, entrada_bytes, MPI_BYTE, MPI_ANY_SOURCE, TAG_TABELA_EMPACOTADA, MPI_COMM_WORLD, &status );
                    desempacotarInserindo(tabelaEmpacotada,tabela);
                }
                
                tamanho_tabela = numeroBlocosTabela(tabela);

                no_proc = (tamanho_tabela / worldsize);
                no_resto = (tamanho_tabela % worldsize);
                
                tabela2 = novaTabela(Nb*4);
                for(proc=1;proc<worldsize;proc++)
                {
                    for(i=0;i<no_proc;i++)
                    {
                        soInsiraTabela(tabela2, popLastTabelaNode(tabela) );
                    }
                    if( no_resto > 1 )
                    {
                        soInsiraTabela(tabela2, popLastTabelaNode(tabela) );
                        no_resto--;
                    }
                    empacotarTabela(tabela2,tabelaEmpacotada);

                    MPI_Send(tabelaEmpacotada,numeroBytesTabela(tabela2), MPI_BYTE, proc, TAG_TABELA_EMPACOTADA_2, MPI_COMM_WORLD );

                    destruirArvore(tabela2->root);
                    tabela2->root = NULL;
                }
                destruirTabela(tabela2);
            }
            else
            {
                empacotarTabela(tabela,tabelaEmpacotada);
                MPI_Send(tabelaEmpacotada,numeroBytesTabela(tabela), MPI_BYTE, 0, TAG_TABELA_EMPACOTADA, MPI_COMM_WORLD );
                destruirArvore(tabela->root);
                tabela->root = NULL;

                MPI_Recv( tabelaEmpacotada, entrada_bytes, MPI_BYTE, 0, TAG_TABELA_EMPACOTADA_2, MPI_COMM_WORLD, &status );
                desempacotarInserindo(tabelaEmpacotada,tabela);
            }
            free(tabelaEmpacotada);
        }

        if( operacao == ENCRIPTAR )
            MPI_File_set_size(saida_handle,(MPI_Offset)( (numero_blocos+1)*(Nb*4) ) );
        else if( operacao == DECRIPTAR )
            MPI_File_set_size(saida_handle,entrada_bytes);

        tamanho_tabela = numeroBlocosTabela(tabela);
        for( i=0 ; i<tamanho_tabela ; i++ )
        {
            node = popLastTabelaNode(tabela);
//          memcpy (bloco,node->bloco,4*Nb);

            if( operacao == ENCRIPTAR )
                AES_encriptar_bloco(node->bloco,Nb,chave,Nr);
            else if( operacao == DECRIPTAR )
                AES_decriptar_bloco(node->bloco,Nb,chave,Nr);

            indice = node->ocorrencias;
            while( indice != NULL )
            {
                if( MPI_File_write_at(saida_handle,indice->indice,node->bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
                {
                    if( rank == 0 )
                    {
                        printf("Erro ao escrever no arquivo de saida (%s).\n",saida_file);
                    }
                    goto help;
                }
                indice = indice->next;
            }
            destruirArvore(node);
        }
        destruirTabela(tabela);

        if( operacao == DECRIPTAR )
        {
            MPI_Barrier( MPI_COMM_WORLD ); /*Barreira q impede q alguem leia antes do valor decriptografado ser escrito */

            if( MPI_File_read_at(saida_handle,entrada_bytes-1,bloco,1,MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao realizar leitura no arquivo de saida (%s).\n",saida_file);
                }
                goto help;
            }

            MPI_Barrier( MPI_COMM_WORLD ); /* Barreira q impede q alqum processo trunque o arquivo antes de outro processo ler*/

            MPI_File_set_size(saida_handle,entrada_bytes - 4*Nb + bloco[0]);
        }

        if( rank == 0 && verbose==1)
        {
            if( operacao == ENCRIPTAR )
                printf("A encriptacao do arquivo foi realizada com sucesso.\n");
            else if( operacao == DECRIPTAR )
                printf("A decriptacao do arquivo foi realizada com sucesso.\n");
        }
    }

    goto finalizando;

sempar:
    if( rank == 0 )
    {
        printf("Sem par correspondente para a opcao %s.\n",argv[itr]);
    }

help:
    if( rank == 0 )
    {
        printf("Use a opcao --help para melhor entendimento do uso da aplicacao.\n");
    }

finalizando:
    MPI_Finalize( );
    return 0;
}
コード例 #13
0
ファイル: 2Dmapsearch-MPIIO.c プロジェクト: pmem/mpi-pmem-ext
int main(int argc,char **argv) {
  
  MPI_File file;
  long long mapxsize,mapysize;
  
  long long myxmin,myxmax,myymin,myymax;
  int processes_in_x_dim,processes_in_y_dim;
  int my_proc_id_in_x_dim,my_proc_id_in_y_dim;

  long long boxxsize,boxysize; // sizes of a map fragment handled by each process

  int myrank,proccount;

  MPI_Offset filesize;

  long long x,y; // counters to go through a map fragment

  double max_similarity,my_similarity,my_temp_similarity;

  double cell_val;

  int provided_thread_support;
  MPI_Init_thread(&argc,&argv,MPI_THREAD_MULTIPLE, &provided_thread_support);
  
  // first read the file name from command line
  
  if (argc<7) {
    printf("\nSyntax: 2Dmapsearch-MPIIO <map_filename> mapxsize mapysize processes_in_x_dim processes_in_y_dim pmem_path\n");
    MPI_Finalize();
    exit(-1);
  }
  
  mapxsize=atol(argv[2]);
  mapysize=atol(argv[3]);
  
  processes_in_x_dim=atoi(argv[4]);
  processes_in_y_dim=atoi(argv[5]);

  if (mapxsize*mapysize<=0) {
    printf("\nWrong map size given.\n");
    MPI_Finalize();
    exit(-1);
  }

  // find out my rank and the number of processes 

  MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
  MPI_Comm_size(MPI_COMM_WORLD,&proccount);

  // now check if the number of processes matches the specified processes in dims
  if (proccount!=(processes_in_x_dim*processes_in_y_dim)) {
    printf("\nThe number of processes started does not match processes_in_x_dim*processes_in_y_dim.\n");
    MPI_Finalize();
    exit(-1);
  }

  MPI_Info info;
  MPI_Info_create(&info);
  MPI_Info_set(info,"pmem_path",argv[6]);
  MPI_Info_set(info,"pmem_io_mode","0");
  MPI_File_open(MPI_COMM_WORLD,argv[1],MPI_MODE_RDWR,info,&file) ;
  
  // now check the size of the file vs the given map size
  MPI_File_get_size(file,&filesize);
  if (filesize<mapxsize*mapysize) {
    printf("\nFile too small for the specified map size.\n");
    MPI_File_close(&file);
    MPI_Finalize();
    exit(-1);
  }


  // now each process should determine its bounding box for the map

  // length of each box will be (mapxsize/processes_in_x_dim) and similarly for the y dimension

  boxxsize=(mapxsize/processes_in_x_dim);
  boxysize=(mapysize/processes_in_y_dim);

  my_proc_id_in_x_dim=myrank%processes_in_x_dim;
  my_proc_id_in_y_dim=myrank/processes_in_x_dim;

  myxmin=my_proc_id_in_x_dim*boxxsize;
  myymin=my_proc_id_in_y_dim*boxysize;
  myxmax=myxmin+boxxsize;
  myymax=myymin+boxysize;

  // now each process should scan its fragment

  // if a certain element is detected then the application scans its immediate surroundings for elements of some other types

  my_similarity=0;

  for(x=myxmin;x<myxmax;x++)
    for(y=myymin;y<myymax;y++) {
      
      cell_val=get_xy_cell(x,y,file,mapxsize,mapysize);
      if ((cell_val>=CELL_VAL_LOW_THRESHOLD) && (cell_val<=CELL_VAL_HIGH_THRESHOLD))
	my_temp_similarity=eval_surrounding(x,y,file,mapxsize,mapysize);
      
      if (my_temp_similarity>=my_similarity)
	my_similarity=my_temp_similarity;
      
    }


  // now all processes should select the highest similarity

  MPI_Reduce(&my_similarity,&max_similarity,1,MPI_DOUBLE,MPI_MAX,0,MPI_COMM_WORLD);

  if (!myrank) {
    printf("\nThe final similarity is %f\n",max_similarity);

  }


 MPI_File_close(&file);

 MPI_Finalize();

}
コード例 #14
0
ファイル: h5test.c プロジェクト: zlongshen/hdf5
/*-------------------------------------------------------------------------
 * Function:  h5_get_file_size
 *
 * Purpose:  Get the current size of a file (in bytes)
 *
 * Return:  Success:  Size of file in bytes
 *    Failure:  -1
 *
 * Programmer:  Quincey Koziol
 *              Saturday, March 22, 2003
 *
 *-------------------------------------------------------------------------
 */
h5_stat_size_t
h5_get_file_size(const char *filename, hid_t fapl)
{
    char temp[2048];    /* Temporary buffer for file names */
    h5_stat_t  sb;     /* Structure for querying file info */
    int j = 0;

    if(fapl == H5P_DEFAULT) {
        /* Get the file's statistics */
        if(0 == HDstat(filename, &sb))
            return((h5_stat_size_t)sb.st_size);
    } /* end if */
    else {
        hid_t  driver;         /* VFD used for file */

        /* Get the driver used when creating the file */
        if((driver = H5Pget_driver(fapl)) < 0)
            return(-1);

        /* Check for simple cases */
        if(driver == H5FD_SEC2 || driver == H5FD_STDIO || driver == H5FD_CORE ||
#ifdef H5_HAVE_WINDOWS
                driver == H5FD_WINDOWS ||
#endif /* H5_HAVE_WINDOWS */
#ifdef H5_HAVE_DIRECT
                driver == H5FD_DIRECT ||
#endif /* H5_HAVE_DIRECT */
                driver == H5FD_LOG) {
            /* Get the file's statistics */
            if(0 == HDstat(filename, &sb))
                return((h5_stat_size_t)sb.st_size);
        } /* end if */
        else if(driver == H5FD_MULTI) {
            H5FD_mem_t mt;
            h5_stat_size_t tot_size = 0;

            HDassert(HDstrlen(multi_letters) == H5FD_MEM_NTYPES);
            for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt)) {
                /* Create the filename to query */
                HDsnprintf(temp, sizeof temp, "%s-%c.h5", filename, multi_letters[mt]);

                /* Check for existence of file */
                if(0 == HDaccess(temp, F_OK)) {
                    /* Get the file's statistics */
                    if(0 != HDstat(temp, &sb))
                        return(-1);

                    /* Add to total size */
                    tot_size += (h5_stat_size_t)sb.st_size;
                } /* end if */
            } /* end for */

            /* Return total size */
            return(tot_size);
        } /* end if */
#ifdef H5_HAVE_PARALLEL
        else if(driver == H5FD_MPIO) {
            MPI_File fh;         /* MPI file handle used to open the file and verify its size */
            int mpi_ret;
            MPI_Offset file_size;

            mpi_ret = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
            if (mpi_ret != MPI_SUCCESS) return -1;
            mpi_ret = MPI_File_get_size(fh, &file_size);
            if (mpi_ret != MPI_SUCCESS) return -1;
            mpi_ret = MPI_File_close(&fh);
            if (mpi_ret != MPI_SUCCESS) return -1;

            return file_size;
        }
#endif /* H5_HAVE_PARALLEL */
        else if(driver == H5FD_FAMILY) {
            h5_stat_size_t tot_size = 0;

            /* Try all filenames possible, until we find one that's missing */
            for(j = 0; /*void*/; j++) {
                /* Create the filename to query */
                HDsnprintf(temp, sizeof temp, filename, j);

                /* Check for existence of file */
                if(HDaccess(temp, F_OK) < 0)
                    break;

                /* Get the file's statistics */
                if(0 != HDstat(temp, &sb))
                    return(-1);

                /* Add to total size */
                tot_size += (h5_stat_size_t)sb.st_size;
            } /* end for */

            /* Return total size */
            return(tot_size);
        } /* end if */
        else {
            HDassert(0 && "Unknown VFD!");
        } /* end else */
    } /* end else */

    return(-1);
} /* end get_file_size() */
コード例 #15
0
ファイル: HW1_101062141_basic.c プロジェクト: Q-Max/pp2015hw1
int main (int argc, char *argv[]) {
	int rank, size;

	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Status status;
	if (argc < 4) {
		if (rank == ROOT) {
			fprintf(stderr, "Insufficient args\n");
			fprintf(stderr, "Usage: %s N input_file output_file", argv[0]);
		}
		MPI_Barrier(MPI_COMM_WORLD);
		MPI_Finalize();
		return 0;
	}

	int N = atoi(argv[1]), alloc_num, former_alloc_num=0, last_alloc_num=0;
	const char *inName = argv[2];
	const char *outName = argv[3];
	double start, finish, iotime = 0, commtime = 0, io_all, comm_all, cpu_all, cputime = 0, cpustart, cpufinish;
	int *root_ptr; // for root process (which rank == 0) only
  
	// Part 1: Read file
	/* Note: You should deal with cases where (N < size) in Homework 1 */
	int rc, i, trend = NOTSORTED;
	int *array;
	MPI_File fp;
	MPI_File fh;
	MPI_Offset my_offset;
	MPI_Request req;
	rc = MPI_File_open(MPI_COMM_WORLD, inName, MPI_MODE_RDONLY, MPI_INFO_NULL, &fp); 
	if(rc != MPI_SUCCESS){
		MPI_Abort(MPI_COMM_WORLD, rc);
	}
	MPI_Offset total_number_of_bytes;
	MPI_File_get_size(fp, &total_number_of_bytes);
	if(total_number_of_bytes/sizeof(int)<N){
		if(rank==ROOT)
			puts("N is bigger than testcase in input file, read to the end");
		N = total_number_of_bytes/sizeof(int);
	}
	// sheu
    // todo
    // detect whether the file is sorted
    //--------------------------------------------------------------------------------
	if(rank!=ROOT){
		MPI_File_open(MPI_COMM_WORLD, outName, MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh);		
	}
	else{
		alloc_num = N;
		MPI_File_seek(fp,(MPI_Offset)0, MPI_SEEK_SET);
		array = (int*)malloc(sizeof(int)*alloc_num);
		start = MPI_Wtime();
		MPI_File_read(fp, array, alloc_num, MPI_INT, &status);
		finish = MPI_Wtime();
		iotime += finish-start;
		MPI_File_open(MPI_COMM_WORLD, outName, MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh);		
		if(N>=2){
#ifdef DEBUG
			printall(array, alloc_num);
#endif
			if(array[0]>array[1])
				trend = -1;
			else if(array[0]==array[1])
				trend = 0;
			else
				trend = 1;
			for(i=2;i<alloc_num;i++){
				if(array[i-1]>array[i]&&(trend==-1||trend==0)){
					trend = -1;
					continue;
				}
				else if(array[i-1]==array[i]&&trend==0)
					continue;
				else if(array[i-1]<array[i]&&(trend==1||trend==0)){
					trend = 1;
					continue;
				}
				else{
					trend = NOTSORTED;
					break;
				}
			}
#ifdef DEBUG
			printf("%d\n", trend);
#endif
			if(trend==1||trend==0){
				printf("sorted file\n");
				my_offset = 0;
				printall(array, alloc_num);
				start = MPI_Wtime();
				MPI_File_write_at(fh, my_offset, array, alloc_num, MPI_INT, &status);
				finish = MPI_Wtime();
				iotime += finish - start;
				printf("iotime   : %8.5lf\ncommtime : %8.5lf\n",iotime,commtime);			
			}
			else if(trend==-1){
				printf("descending sorted file\n");
				root_ptr = (int*)malloc(sizeof(int)*alloc_num);
				for(i=0;i<alloc_num;i++){
					root_ptr[i] = array[alloc_num-i-1];
				}
				my_offset = 0;
				start = MPI_Wtime();
				MPI_File_write_at(fh, my_offset, root_ptr, alloc_num, MPI_INT, &status);
				finish = MPI_Wtime();
				iotime += finish - start;
				printf("iotime   : %8.5lf\ncommtime : %8.5lf\n",iotime,commtime);
			}
		}
		else{
			// N==1
			trend = 0;
			my_offset = 0;
			start = MPI_Wtime();
			MPI_File_write_at(fh, my_offset, array, alloc_num, MPI_INT, &status);
			finish = MPI_Wtime();
			iotime += finish - start;
			printf("iotime   : %8.5lf\ncommtime : %8.5lf\n",iotime,commtime);
			
		}
	}
	MPI_Barrier(MPI_COMM_WORLD);
	MPI_Bcast(&trend, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
	if(trend==-1||trend==0||trend==1){
		MPI_Finalize();
		exit(0);
	}
	// read data in memory
	// sheu if N < 2 x  # of processes, root take over all computation
	
	alloc_num = N/size;
	if(2*size>N){
		// if N < 2 x size, root take over
		if(rank!=ROOT){			
			MPI_Finalize();
			exit(0);
		}
		else{
			alloc_num = N;
			MPI_File_seek(fp,(MPI_Offset)0, MPI_SEEK_SET);
			array = (int*)malloc(sizeof(int)*alloc_num);
			start = MPI_Wtime();
			MPI_File_read(fp, array, alloc_num, MPI_INT, &status);
			finish = MPI_Wtime();
			iotime = finish-start;
			cpustart = MPI_Wtime();
			singleOESort(array, alloc_num);
			cpufinish = MPI_Wtime();
			cputime = cpufinish - cpustart;
#ifdef DEBUG
			printall(array, alloc_num);      
#endif
			my_offset = 0;
			start = MPI_Wtime();
			MPI_File_write_at(fh, my_offset, array, alloc_num, MPI_INT, &status);
			finish = MPI_Wtime();
			iotime += finish - start;
			printf("iotime   : %8.5lfs\ncommtime : %8.5lfs\ncputime  : %8.5fs\n",iotime,commtime,cputime);
			MPI_Finalize();
			exit(0);
		}
	}
	else if((alloc_num)%2){
		// if alloc_num is odd number
		// every process alloc_num-- to guarantee every process has even elements
		// except last node
		alloc_num--;
		former_alloc_num = alloc_num;
		MPI_File_seek(fp,(MPI_Offset)alloc_num*rank*sizeof(int), MPI_SEEK_SET);
		if(rank==size-1){
			alloc_num = N - alloc_num * rank;
		}
		last_alloc_num = N - alloc_num * rank;
		array = (int*)malloc(sizeof(int)*alloc_num);
		start = MPI_Wtime();
		MPI_File_read(fp, array, alloc_num, MPI_INT, &status);
		finish = MPI_Wtime();
		iotime += finish-start;
	}
	else{
		// last process need deal with more inputs
		former_alloc_num = alloc_num;
		MPI_File_seek(fp, (MPI_Offset)rank*alloc_num*sizeof(int), MPI_SEEK_SET);
		if(rank==size-1){			
			alloc_num = N - alloc_num * rank;
		}	
		last_alloc_num = N - alloc_num * rank;
		array = (int*)malloc(sizeof(int)*alloc_num);
		start = MPI_Wtime();
		MPI_File_read(fp, array, alloc_num, MPI_INT, &status);
		finish = MPI_Wtime();
		iotime += finish-start;
	}
	
	// sheu
	// todo
	// sort and communicate with other
	//--------------------------------------------------------------------------------
	int tmp1,tmp2,sorted_temp,count=0;
	int sorted=0;
#ifdef DEBUG
	int *num_ptr, *pos_ptr; 
	if(rank==ROOT){
		root_ptr = (int*)calloc(0,sizeof(int)*N);	
		num_ptr = (int*)malloc(sizeof(int)*size);
		pos_ptr = (int*)malloc(sizeof(int)*size);
		for(i=0;i<size;i++){
			num_ptr[i] = former_alloc_num;
			pos_ptr[i] = i * former_alloc_num;
		}
		num_ptr[size-1] = last_alloc_num;
	}
#endif
	cpustart = MPI_Wtime();
	while(!sorted){
		sorted=1;
		for(i=0;i+1<alloc_num;i+=2){
			if(array[i]>array[i+1]){
				swap(&array[i],&array[i+1]);
				sorted = 0;
			}
		}
		for(i=1;i<alloc_num;i+=2){
			if(i==alloc_num-1){
				start = MPI_Wtime();
				// has even elements, which is guaranteed but last process
			
				// send to rank+1, last node do nothing
				if(rank!=size-1){
					MPI_Isend(&array[i],1,MPI_INT,rank+1,0,MPI_COMM_WORLD,&req);
				}
				// receive from rank-1, root node do nothing
				if(rank!=ROOT){
					MPI_Recv(&tmp1,1,MPI_INT,rank-1,MPI_ANY_TAG,MPI_COMM_WORLD,&status);  
				}
				if(array[0]<tmp1&&rank!=ROOT){
					swap(&array[0],&tmp1);
					sorted = 0;
				}
				// tmp1 will always be smaller
				if(rank!=size-1)
					MPI_Wait(&req, MPI_STATUS_IGNORE);
				// send to rank-1, root node do nothing
				if(rank!=ROOT){
					MPI_Isend(&tmp1,1,MPI_INT,rank-1,0,MPI_COMM_WORLD, &req);
				}
				// receive from rank+1, last node do nothing
				if(rank!=size-1){
					MPI_Recv(&tmp2,1,MPI_INT,rank+1,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
				}
				if(array[i]>tmp2&&rank!=size-1){
					swap(&array[i],&tmp2);
					sorted = 0;
				}
				if(rank!=ROOT)
					MPI_Wait(&req, MPI_STATUS_IGNORE);
				finish = MPI_Wtime();
				commtime += finish - start;
				continue;
			}
			else if(array[i]>array[i+1]){
				swap(&array[i],&array[i+1]);
				sorted = 0;
			}
			if(rank==size-1&&alloc_num%2&&i==alloc_num-2){
				start = MPI_Wtime();
				// has odd elements, only in last process
				MPI_Recv(&tmp1,1,MPI_INT,rank-1,MPI_ANY_TAG,MPI_COMM_WORLD,&status);        
				if(array[0]<tmp1){
					swap(&array[0],&tmp1);
					sorted = 0;
				}
				MPI_Isend(&tmp1,1,MPI_INT,rank-1,0,MPI_COMM_WORLD,&req);
				finish = MPI_Wtime();
				commtime += finish - start;
			}
		}
		start = MPI_Wtime();
		MPI_Allreduce(&sorted,&sorted_temp,1,MPI_INT,MPI_LAND,MPI_COMM_WORLD);
		finish = MPI_Wtime();
		commtime += finish - start;
		sorted = sorted_temp;
		count++;
		/*if(count>2*N){
			printf("quite weird, I can't finish");
			break;
		}*/
	}
	cpufinish = MPI_Wtime();
	cputime = cpufinish - cpustart - commtime;
#ifdef DEBUG
	MPI_Barrier(MPI_COMM_WORLD);
	printall(array,alloc_num);
	MPI_Gatherv(array, alloc_num, MPI_INT, root_ptr, num_ptr, pos_ptr, MPI_INT, ROOT, MPI_COMM_WORLD);
	if(rank==ROOT){
		printall(root_ptr, N);
	}
#endif
	my_offset = rank*former_alloc_num*sizeof(int);
	start = MPI_Wtime();
	MPI_File_write_at(fh, my_offset, array, alloc_num, MPI_INT, &status);
	finish = MPI_Wtime();
	iotime += finish - start;
	MPI_Allreduce(&iotime,&io_all,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
	MPI_Allreduce(&commtime,&comm_all,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
	MPI_Allreduce(&cputime,&cpu_all,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
	io_all /= size;
	comm_all /= size;
	if(rank==ROOT)
		printf("iotime   : %8.5lfs\ncommtime : %8.5lfs\ncputime  : %8.5lfs(sum)\n",io_all ,comm_all, cpu_all);
	MPI_Finalize();
	return 0;
}
コード例 #16
0
ファイル: mpi_dsrc.cpp プロジェクト: PCDS/paraDSRC
void readlines(MPI_File *in, const int rank, const int size, const int overlap,
               /*char ***lines,*/ int *nlines) {
    MPI_Offset filesize;
    MPI_Offset localsize;
    MPI_Offset start;
    MPI_Offset end;
    char *chunk;
    MPI_Offset bytesRead=0;
    MPI_Offset myBytesRead = 4000000, myActualRead = 0;
    MPI_File_get_size(*in, &filesize);
    int control = 0;

    printf("Rank %d started\n", rank);

    while(bytesRead < filesize){
   // while (0){
        /* figure out who reads what */
        start = (rank * myBytesRead) + bytesRead;
        end   = start + myBytesRead - 1;
        
        /* add overlap to the end of everyone's chunk... */
        if (end > filesize || (end + overlap) > filesize)
            end = filesize;
        else
            end += overlap;

        if(start < filesize){
            localsize =  end - start + 1;
      
        }else{
            start = filesize - 1;
            localsize = 0;
        }
        /* allocate memory */
        chunk = (char *)malloc( (localsize + 1)*sizeof(char));

        /* everyone reads in their part */
        MPI_File_read_at(*in, start, chunk, localsize, MPI_CHAR, MPI_STATUS_IGNORE);
        chunk[localsize] = '\0';

        /*
         *  everyone calculate what their start and end *really* are by going 
         *  from the first newline after start to the first newline after the
         *  overlap region starts (eg, after end - overlap + 1)
         */
        int locstart=0, locend=localsize;
        if (localsize != 0)
        {
            if (rank != 0) {
                while(chunk[locstart] != '\n' || chunk[locstart+1] != '+' || chunk[locstart+2] != '\n'){
                    locstart++;
                }
                locstart += 3;

                while(chunk[locstart] != '\n'){
                    locstart++;
                }
                locstart++;
            }

           if (end != filesize) {
                locend -= overlap;
            
                while(chunk[locend] != '\n' || chunk[locend+1] != '+' || chunk[locend+2] != '\n'){
                    locend++;
                }

                locend += 3;

                while(chunk[locend] != '\n'){
                    locend++;
                }
                locend++; 
            }
        }

        // what was actually read by Pi
        myActualRead = locend-locstart;
        if(rank == 0)
            myActualRead += bytesRead;

        /* Now we'll count the number of lines */
        /************************/
        // This part represents the processing:
        // while (fastq_file.ReadNextRecord(rec))

        std::string s = std::string(&chunk[locstart], &chunk[locend]);
        size_t n = std::count(s.begin(), s.end(), '\n');

        uint32 varSuperblockSize = (unsigned int) n/4;

        //printf("Rank %d's superblock has %ld records. #%d\n", rank, varSuperblockSize, control);

       // int varBlockSize = 32;
      //  for (;;)
       // {
         //   if(n % varBlockSize != 0)
           //     varBlockSize++;
         //   else
           //     break;
        //}
        //printf("  ==>Rank %d's block has %d records. #%d\n", rank, varBlockSize, control);

        FastqRecord rec;
       // DsrcFile dsrc_file(varSuperblockSize);

        //dsrc_file.StartCompress("test");

        // who variable decides if processing title or DNA or plus or QS
        int who = 0;
        bool errorFree[4] = {false, false, false, false};
        int64 rec_no = 0;
        // ** READING A RECORD (TITLE, DNA SEQ, PLUS, QUALITY SCORE)
        int j = locstart; 
        while (j < locend){

            switch (who){
              // Read title  
              case 0: {
                uint32 i = 0;
                for (;;){
                    int32 c = chunk[j++];

                    if (c != '\n' && c != '\r'){
                        if (i >= rec.title_size){
                            rec.Extend(rec.title, rec.title_size);
                        }
                        rec.title[i++] = (uchar) c;
                    } else if (i > 0){
                        break;
                    }
                }
                rec.title[i] = 0;
                rec.title_len = i;      
                errorFree[who++] = i > 0 && rec.title[0] == '@';
                break;
              }
              // Read DNA sequence
              case 1:{
                uint32 i = 0;
                int32 c;

                if (rec.sequence_breaks){
                    delete rec.sequence_breaks;
                    rec.sequence_breaks = NULL;
                }
                uint32 last_eol_pos = 0;
                uint32 sequence_break = 0;

                for (;;){
                    c = chunk[j++];

                    if (c == '+'){
                        j--;
                        break;
                    }

                    //if (c == FILE_EOF)
                     //   break;

                    if (c != '\n' && c != '\r'){
                        if (i >= rec.sequence_size){
                            rec.Extend(rec.sequence, rec.sequence_size);
                        }
                        rec.sequence[i++] = (uchar) c;
                    } else{
                        if (last_eol_pos != i){
                            if (sequence_break){
                                if (!rec.sequence_breaks){
                                    rec.sequence_breaks = new std::vector<int>;
                                }
                                rec.sequence_breaks->push_back(sequence_break);
                            } else{
                                sequence_break = i - last_eol_pos;
                            }
                            last_eol_pos = i;
                        }
                    }
                }
                rec.sequence[i] = 0;
                rec.sequence_len = i;
                errorFree[who++] = true;
                break;
              }
              // Read "+"  
              case 2:{
                uint32 i = 0;
                int32 c;
                for (;;){
                    c = chunk[j++];
                    //if (c == FILE_EOF)
                      //  break;

                    if (c != '\n' && c != '\r'){
                        if (i >= rec.plus_size){
                            rec.Extend(rec.plus, rec.plus_size);
                        }
                        rec.plus[i++] = (uchar) c;
                    }
                    else if (i > 0){
                        break;
                    }
                }
                rec.plus[i] = 0;
                rec.plus_len = i;
                errorFree[who++] = i > 0;            
                break;
              }
              // Read quality score
              case 3:{
                uint32 i;
                uint32 last_eol_pos = 0;

                if (rec.quality_breaks){
                    delete rec.quality_breaks;
                    rec.quality_breaks = NULL;
                }

                if (rec.sequence_size > rec.quality_size)
                    rec.ExtendTo(rec.quality, rec.quality_size, rec.sequence_size);

                for (i = 0; i < rec.sequence_len;){
                    int32 c = chunk[j++];
                    //if (c == FILE_EOF)
                    //    break;

                    if (c != '\n' && c != '\r'){
                        rec.quality[i++] = (uchar)c;
                    } else{
                        if (last_eol_pos != i){
                            if (!rec.quality_breaks){
                                rec.quality_breaks = new std::vector<int>;
                            }
                            rec.quality_breaks->push_back(i - last_eol_pos);
                            last_eol_pos = i;
                        }
                    }
                }
                j++; // get the newline

                rec.quality[i] = 0;
                rec.quality_len = i;
                errorFree[who++] = (i == rec.sequence_len);
                break;
              }  
            }
           // If a full record has been read
            if(who == 4){
                if(errorFree[0] && errorFree[1] && errorFree[2] && errorFree[2]){
                   // dsrc_file.WriteRecord(rec);
                    //printf("Rank %d has %ld processed\n", rank, rec_no);
                    ++rec_no;
                    who = 0;
                } else{
                    printf("Rank %d has an error\n", rank);
                    break;
                } 
            }
            //if (chunk[i] == '\n'){
              //  (*nlines)++;
           // }
        }

        //dsrc_file.FinishCompress();
        free(chunk);

        //printf("Rank %d's superblock has %ld records. #%d\n", rank, (*nlines)/4, control);
        //*nlines = 0;
        
        MPI_Reduce(&myActualRead, &bytesRead, 1, MPI_LONG_LONG_INT, MPI_SUM, 0, MPI_COMM_WORLD);
        MPI_Bcast(&bytesRead, 1, MPI_LONG_LONG_INT, 0, MPI_COMM_WORLD);

       // if(rank == 0)
            //printf("%d.>> bytesRead: %ld\n", control, bytesRead);

        control++;


    } // ReadNextRecord
    if(rank == 0)
        printf("%d.***    bytesRead: %ld | filesize: %ld     ***\n", control, bytesRead, filesize);

    return;
}
コード例 #17
0
void readlines(MPI_File *in, const int rank, const int size, const int overlap, char ***lines, int *nlines) {
	/*@see: http://stackoverflow.com/a/13328819/2521647 */
	MPI_Offset filesize;
	MPI_Offset localsize;
	MPI_Offset start;
	MPI_Offset end;
	char *chunk;

	/* figure out who reads what */

	MPI_File_get_size(*in, &filesize);
	localsize = filesize / size;
	start = rank * localsize;
	end = start + localsize - 1;

	/* add overlap to the end of everyone's chunk... */
	end += overlap;

	/* except the last processor, of course */
	if (rank == size - 1)
		end = filesize;

	localsize = end - start + 1;

	/* allocate memory */
	chunk = malloc((localsize + 1) * sizeof(char));

	/* everyone reads in their part */
	MPI_File_read_at_all(*in, start, chunk, localsize, MPI_CHAR,
			MPI_STATUS_IGNORE);
	chunk[localsize] = '\0';

	/*
	 * everyone calculate what their start and end *really* are by going
	 * from the first newline after start to the first newline after the
	 * overlap region starts (eg, after end - overlap + 1)
	 */

	int locstart = 0, locend = localsize;
	if (rank != 0) {
		while (chunk[locstart] != '\n')
			locstart++;
		locstart++;
	}
	if (rank != size - 1) {
		locend -= overlap;
		while (chunk[locend] != '\n')
			locend++;
	}
	localsize = locend - locstart + 1;

	/* Now let's copy our actual data over into a new array, with no overlaps */
	char *data = (char *) malloc((localsize + 1) * sizeof(char));
	memcpy(data, &(chunk[locstart]), localsize);
	data[localsize] = '\0';
	free(chunk);

	/* Now we'll count the number of lines */
	*nlines = 0;
	for (int i = 0; i < localsize; i++)
		if (data[i] == '\n')
			(*nlines)++;

	/* Now the array lines will point into the data array at the start of each line */
	/* assuming nlines > 1 */
	*lines = (char **) malloc((*nlines) * sizeof(char *));
	(*lines)[0] = strtok(data, "\n");
	for (int i = 1; i < (*nlines); i++)
		(*lines)[i] = strtok(NULL, "\n");

	return;
}
コード例 #18
0
void _FileParticleLayout_SetInitialCounts( void* particleLayout, void* _swarm ) {
	FileParticleLayout*        self         = (FileParticleLayout*)particleLayout;
	Swarm*                     swarm        = (Swarm*)_swarm;
	Name                       filename     = self->filename;
	MPI_File                   mpiFile;
	int                        openResult;
	MPI_Offset                 bytesCount;
	SizeT                      particleSize = swarm->particleExtensionMgr->finalSize;
	div_t                      division;

	Journal_DPrintf( self->debug, "In %s(): for ParticleLayout \"%s\", of type %s\n",
		__func__, self->name, self->type );
	Stream_IndentBranch( Swarm_Debug );	

	Journal_DPrintf( self->debug, "Finding number of bytes in checkpoint file \"%s\":\n",
		self->filename );

	openResult = MPI_File_open( swarm->comm, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &mpiFile );

	Journal_Firewall( 
		openResult == 0, 
		self->errorStream,
		"Error in %s for %s '%s' - Cannot open file %s.\n", 
		__func__, 
		self->type, 
		self->name, 
		filename );
	
	MPI_File_get_size( mpiFile, &bytesCount );
	MPI_File_close( &mpiFile );

	Journal_DPrintf( self->debug, "...calculated bytes total of %u.\n", bytesCount );
	
	/* Divide by particle size to get number of particles */
	division = div( bytesCount, particleSize );
	self->totalInitialParticles = division.quot;

	Journal_DPrintf( self->debug, "given bytes total %u / particle size %u ->\n"
		"\ttotalInitialParticles = %u.\n", bytesCount, (unsigned int)particleSize,
		self->totalInitialParticles );

	Journal_Firewall( 
		division.rem == 0,
		self->errorStream,
		"Error in func %s for %s '%s' - Trying to read particle information from %s which stores %u bytes.\n"
		"This doesn't produce an integer number of particles of size %u - It gives remainder = %u\n", 
		__func__, 
		self->type, 
		self->name, 
		filename, 
		bytesCount, 
		(unsigned int)particleSize, 
		division.rem ); 

	Journal_DPrintf( self->debug, "calling parent func to set cell counts:\n", bytesCount );
	_GlobalParticleLayout_SetInitialCounts( self, swarm );

	Stream_UnIndentBranch( Swarm_Debug );	
	Journal_DPrintf( self->debug, "...finished %s() for ParticleLayout \"%s\".\n",
		__func__, self->name );
}
コード例 #19
0
ファイル: t_mpi.c プロジェクト: ArielleBassanelli/gempak
/*
 * Verify that MPI_Offset exceeding 2**31 can be computed correctly.
 * Print any failure as information only, not as an error so that this
 * won't abort the remaining test or other separated tests.
 *
 * Test if MPIO can write file from under 2GB to over 2GB and then
 * from under 4GB to over 4GB.
 * Each process writes 1MB in round robin fashion.
 * Then reads the file back in by reverse order, that is process 0
 * reads the data of process n-1 and vice versa.
 */
static int
test_mpio_gb_file(char *filename)
{
    int mpi_size, mpi_rank;
    MPI_Info info = MPI_INFO_NULL;
    int mrc;
    MPI_File  fh;
    int i, j, n;
    int vrfyerrs;
    int writerrs;    /* write errors */
    int nerrs;
    int ntimes;      /* how many times */
    char  *buf = NULL;
    char  expected;
    MPI_Offset  size;
    MPI_Offset  mpi_off;
    MPI_Offset  mpi_off_old;
    MPI_Status  mpi_stat;
    h5_stat_t stat_buf;
    int is_signed, sizeof_mpi_offset;

    nerrs = 0;
    /* set up MPI parameters */
    MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
    MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);

    if (VERBOSE_MED)
        printf("MPI_Offset range test\n");

    /* figure out the signness and sizeof MPI_Offset */
    mpi_off = 0;
    is_signed = ((MPI_Offset)(mpi_off - 1)) < 0;
    sizeof_mpi_offset = (int)(sizeof(MPI_Offset));

    /*
     * Verify the sizeof MPI_Offset and correctness of handling multiple GB
     * sizes.
     */
    if (MAINPROCESS){      /* only process 0 needs to check it*/
  printf("MPI_Offset is %s %d bytes integeral type\n",
      is_signed ? "signed" : "unsigned", (int)sizeof(MPI_Offset));
  if (sizeof_mpi_offset <= 4 && is_signed){
      printf("Skipped 2GB range test "
        "because MPI_Offset cannot support it\n");
  }else {
      /* verify correctness of assigning 2GB sizes */
      mpi_off = 2 * 1024 * (MPI_Offset)MB;
      INFO((mpi_off>0), "2GB OFFSET assignment no overflow");
      INFO((mpi_off-1)==TWO_GB_LESS1, "2GB OFFSET assignment succeed");

      /* verify correctness of increasing from below 2 GB to above 2GB */
      mpi_off = TWO_GB_LESS1;
      for (i=0; i < 3; i++){
    mpi_off_old = mpi_off;
    mpi_off = mpi_off + 1;
    /* no overflow */
    INFO((mpi_off>0), "2GB OFFSET increment no overflow");
    /* correct inc. */
    INFO((mpi_off-1)==mpi_off_old, "2GB OFFSET increment succeed");
      }
  }

  if (sizeof_mpi_offset <= 4){
      printf("Skipped 4GB range test "
        "because MPI_Offset cannot support it\n");
  }else {
      /* verify correctness of assigning 4GB sizes */
      mpi_off = 4 * 1024 * (MPI_Offset)MB;
      INFO((mpi_off>0), "4GB OFFSET assignment no overflow");
      INFO((mpi_off-1)==FOUR_GB_LESS1, "4GB OFFSET assignment succeed");

      /* verify correctness of increasing from below 4 GB to above 4 GB */
      mpi_off = FOUR_GB_LESS1;
      for (i=0; i < 3; i++){
    mpi_off_old = mpi_off;
    mpi_off = mpi_off + 1;
    /* no overflow */
    INFO((mpi_off>0), "4GB OFFSET increment no overflow");
    /* correct inc. */
    INFO((mpi_off-1)==mpi_off_old, "4GB OFFSET increment succeed");
      }
  }
    }

    /*
     * Verify if we can write to a file of multiple GB sizes.
     */
    if (VERBOSE_MED)
  printf("MPIO GB file test %s\n", filename);

    if (sizeof_mpi_offset <= 4){
  printf("Skipped GB file range test "
    "because MPI_Offset cannot support it\n");
    }else{
  buf = malloc(MB);
  VRFY((buf!=NULL), "malloc succeed");

  /* open a new file. Remove it first in case it exists. */
  /* Must delete because MPI_File_open does not have a Truncate mode. */
  /* Don't care if it has error. */
  MPI_File_delete(filename, MPI_INFO_NULL);
  MPI_Barrier(MPI_COMM_WORLD);  /* prevent racing condition */

  mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
        info, &fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_OPEN");

  printf("MPIO GB file write test %s\n", filename);

  /* instead of writing every bytes of the file, we will just write
   * some data around the 2 and 4 GB boundaries.  That should cover
   * potential integer overflow and filesystem size limits.
   */
  writerrs = 0;
  for (n=2; n <= 4; n+=2){
      ntimes = GB/MB*n/mpi_size + 1;
      for (i=ntimes-2; i <= ntimes; i++){
    mpi_off = (i*mpi_size + mpi_rank)*(MPI_Offset)MB;
    if (VERBOSE_MED)
        HDfprintf(stdout,"proc %d: write to mpi_off=%016llx, %lld\n",
      mpi_rank, mpi_off, mpi_off);
    /* set data to some trivial pattern for easy verification */
    for (j=0; j<MB; j++)
        *(buf+j) = i*mpi_size + mpi_rank;
    if (VERBOSE_MED)
        HDfprintf(stdout,"proc %d: writing %d bytes at offset %lld\n",
      mpi_rank, MB, mpi_off);
    mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
    INFO((mrc==MPI_SUCCESS), "GB size file write");
    if (mrc!=MPI_SUCCESS)
        writerrs++;
      }
  }

  /* close file and free the communicator */
  mrc = MPI_File_close(&fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");

  mrc = MPI_Barrier(MPI_COMM_WORLD);
  VRFY((mrc==MPI_SUCCESS), "Sync after writes");

  /*
   * Verify if we can read the multiple GB file just created.
   */
  /* open it again to verify the data written */
  /* but only if there was no write errors */
  printf("MPIO GB file read test %s\n", filename);
  if (errors_sum(writerrs)>0){
      printf("proc %d: Skip read test due to previous write errors\n",
    mpi_rank);
      goto finish;
  }
  mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh);
  VRFY((mrc==MPI_SUCCESS), "");

  /* Only read back parts of the file that have been written. */
  for (n=2; n <= 4; n+=2){
      ntimes = GB/MB*n/mpi_size + 1;
      for (i=ntimes-2; i <= ntimes; i++){
    mpi_off = (i*mpi_size + (mpi_size - mpi_rank - 1))*(MPI_Offset)MB;
    if (VERBOSE_MED)
        HDfprintf(stdout,"proc %d: read from mpi_off=%016llx, %lld\n",
      mpi_rank, mpi_off, mpi_off);
    mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
    INFO((mrc==MPI_SUCCESS), "GB size file read");
    expected = i*mpi_size + (mpi_size - mpi_rank - 1);
    vrfyerrs=0;
    for (j=0; j<MB; j++){
        if ((*(buf+j) != expected) &&
      (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)){
          printf("proc %d: found data error at [%ld+%d], expect %d, got %d\n",
        mpi_rank, (long)mpi_off, j, expected, *(buf+j));
        }
    }
    if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
        printf("proc %d: [more errors ...]\n", mpi_rank);

    nerrs += vrfyerrs;
      }
  }

  /* close file and free the communicator */
  mrc = MPI_File_close(&fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");

  /*
   * one more sync to ensure all processes have done reading
   * before ending this test.
   */
  mrc = MPI_Barrier(MPI_COMM_WORLD);
  VRFY((mrc==MPI_SUCCESS), "Sync before leaving test");

        /*
         * Check if MPI_File_get_size works correctly.  Some systems (only SGI Altix
         * Propack 4 so far) return wrong file size.  It can be avoided by reconfiguring
         * with "--disable-mpi-size".
         */
#ifdef H5_HAVE_MPI_GET_SIZE
  printf("Test if MPI_File_get_size works correctly with %s\n", filename);

  mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh);
        VRFY((mrc==MPI_SUCCESS), "");

        if (MAINPROCESS){      /* only process 0 needs to check it*/
            mrc = MPI_File_get_size(fh, &size);
      VRFY((mrc==MPI_SUCCESS), "");

            mrc=HDstat(filename, &stat_buf);
      VRFY((mrc==0), "");

            /* Hopefully this casting is safe */
            if(size != (MPI_Offset)(stat_buf.st_size)) {
                printf("Warning: MPI_File_get_size doesn't return correct file size.  To avoid using it in the library, reconfigure and rebuild the library with --disable-mpi-size.\n");
            }
        }

  /* close file and free the communicator */
  mrc = MPI_File_close(&fh);
  VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");

  /*
   * one more sync to ensure all processes have done reading
   * before ending this test.
   */
  mrc = MPI_Barrier(MPI_COMM_WORLD);
  VRFY((mrc==MPI_SUCCESS), "Sync before leaving test");
#else
        printf("Skipped testing MPI_File_get_size because it's disabled\n");
#endif
    }

finish:
    if (buf)
  HDfree(buf);
    return (nerrs);
}
コード例 #20
0
ファイル: rijndael.c プロジェクト: tyagodm/AES-PARALELO
int main( int argc, char *argv[] )
{
    int itr;
    int operacao;
    char * chave_file;
    char * entrada_file;
    char * saida_file;

    octeto Nb,Nk,Nr;
    octeto bloco[4*8];
    octeto chave[4*8*15];

    int worldsize, rank;
    MPI_Status status;
    MPI_File chave_handle;
    MPI_File entrada_handle;
    MPI_File saida_handle;

    MPI_Offset entrada_bytes;
    unsigned int numero_blocos;
    unsigned int blocos_processo;
    MPI_Offset bloco_byte_inicio;
    MPI_Offset bloco_byte_fim;
    MPI_Offset iterador;


    MPI_Init(&argc,&argv);

    MPI_Comm_size(MPI_COMM_WORLD,&worldsize);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);

    operacao = INDEFINIDA;
    chave_file = NULL;
    entrada_file = NULL;
    saida_file = NULL;
    for(itr = 1;itr < argc;itr++)
    {
/* Instrucoes de uso */
        if( strcmp(argv[itr],"-a") == 0 || strcmp(argv[itr],"--ajuda") == 0 || 
            strcmp(argv[itr],"-h") == 0 || strcmp(argv[itr],"--help") == 0 )
        {
            if(rank == 0)
            {
                printf(" Uso: mpiexec -n [PROCESSOS] ./rijndael [ARGUMENTO VALOR].\n");
                printf(" Encripta/Decripta um arquivo usando o algoritmo Rijndael(AES) extendido.\n");
                printf("  Argumentos obrigatorios:\n");
                printf("   -op,--operacao: Informa se o objetivo da execucao eh encriptar ou decriptar.\n");
                printf("                    * Os valores possiveis sao: \'encriptar\' e \'decriptar\'.\n");
                printf("   -e,-i,--entrada,--input: Caminho e nome do arquivo a ser criptografado.\n");
                printf("   -s,-o,--saida,--output: Caminho e nome do arquivo resultante do processo de criptografia da entrada.\n");
                printf("   -c,-k,--chave,--key: Caminho e nome do arquivo contendo a chave.\n");
                printf("  O arquivo contendo a chave eh em formato binario de acordo com a seguinte especificacao:\n");
                printf("   - O primeiro byte deve conter o tamanho do bloco (em palavras de 4 bytes).\n");
                printf("      * O bloco pode possuir tamanho: 4, 5, 6, 7 ou 8.\n");
                printf("   - O segundo byte deve conter o tamanho da chave (em palavras de 4 bytes).\n");
                printf("      * Esta aplicacao aceita chaves com tamanho: 4, 5, 6, 7 ou 8.\n");
                printf("   - Os proximos 4*[tamanho da chave] bytes do arquivo sao os bytes componentes da chave, que\n");
                printf("     devem estar (obrigatoriamente) escritos no formato hexadecimal da linguagem C (0xff).\n");
                printf("   * Eh recomendavel o uso de um editor hexadecimal na construcao do arquivo chave.\n");
            }
            goto finalizando;
        }

/* Operacao a ser realizada */
        else
        if( strcmp(argv[itr],"-op") == 0 || strcmp(argv[itr],"--operacao") == 0 )
        {
            if( itr+1 < argc )
            {
                if( strcmp(argv[itr+1],"encriptar") == 0 )
                {
                    operacao = ENCRIPTAR;
                }
                else
                if( strcmp(argv[itr+1],"decriptar") == 0 )
                {
                    operacao = DECRIPTAR;
                }
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo com a chave */
        else
        if( strcmp(argv[itr],"-c") == 0 || strcmp(argv[itr],"--chave") == 0 || 
            strcmp(argv[itr],"-k") == 0 || strcmp(argv[itr],"--key") == 0 )
        {
            if(itr+1 < argc)
            {
                chave_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo de entrada */
        else
        if( strcmp(argv[itr],"-e") == 0 || strcmp(argv[itr],"--entrada") == 0 || 
            strcmp(argv[itr],"-i") == 0 || strcmp(argv[itr],"--input") == 0 )
        {
            if(itr+1 < argc)
            {
                entrada_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo de saida */
        else 
        if( strcmp(argv[itr],"-s") == 0 || strcmp(argv[itr],"--saida") == 0 || 
            strcmp(argv[itr],"-o") == 0 || strcmp(argv[itr],"--output") == 0 )
        {
            if(itr+1 < argc)
            {
                saida_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }
/* Erro desconhecido */
        else
        {
            if(rank == 0)
            {
                printf("Erro nos argumentos passados.\n");
            }
            goto help;
        }
    }
/* Fim da leitura dos argumentos */

    if( operacao == INDEFINIDA || chave_file == NULL || entrada_file == NULL || saida_file == NULL )
    {
        if(rank == 0)
        {
            if( operacao == INDEFINIDA )
                printf("A operacao a ser realizada eh invalida ou nao foi especificada.\n");
            if( chave_file == NULL )
                printf("Esta faltando especificar o arquivo com a chave.\n");
            if( entrada_file == NULL )
                printf("Esta faltando especificar o arquivo de entrada.\n");
            if( saida_file == NULL )
                printf("Esta faltando especificar o arquivo de saida.\n");
        }
        goto help;
    }
/* Fim do tratamento dos argumentos */

    if( MPI_File_open( MPI_COMM_WORLD, chave_file, MPI_MODE_RDONLY, MPI_INFO_NULL, &chave_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na abertura do arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,&Nb,1, MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura do tamanho de um bloco no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }
    if( Nb< 4 || Nb > 8 )
    {
        if( rank == 0 )
        {
            printf("Tamanho de bloco invalido no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,&Nk,1, MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura do tamanho da chave no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }
    if( Nk< 4 || Nk > 8 )
    {
        if( rank == 0 )
        {
            printf("Tamanho de chave invalido no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,chave,4*Nk,MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura da chave no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    MPI_File_close( &chave_handle );
    Nr = numero_rodadas(Nb,Nk);
    KeyExpansion(chave,Nb,Nk);

    if( MPI_File_open( MPI_COMM_WORLD, entrada_file, 
            MPI_MODE_RDONLY, 
            MPI_INFO_NULL, &entrada_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na abertura do arquivo de entrada (%s).\n",entrada_file);
        }
        goto help;
    }

    MPI_File_get_size(entrada_handle,&entrada_bytes);


    if( MPI_File_open( MPI_COMM_WORLD, saida_file, 
            MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL, 
            MPI_INFO_NULL, &saida_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na criacao do arquivo de saida (%s).\n",saida_file);
            printf("Uma possivel causa eh que o arquivo ja exista.\n");
        }
        goto help;
    }

    numero_blocos = ( entrada_bytes / (Nb*4) );
    blocos_processo = numero_blocos / worldsize;

    if( operacao == ENCRIPTAR )
    {
        MPI_File_set_size(saida_handle,(MPI_Offset)( (numero_blocos+1)*(Nb*4) ) );

        bloco_byte_inicio = 4*Nb*blocos_processo*rank;
        bloco_byte_fim = 4*Nb*blocos_processo*(rank+1);


        for( iterador = bloco_byte_inicio ; iterador < bloco_byte_fim ; iterador += (4*Nb) )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }

            AES_encriptar_bloco(bloco,Nb,chave,Nr);

            if( MPI_File_write_at(saida_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao escrever no arquivo de saida (%s).\n",saida_file);
                }
                goto help;
            }
        }
        
        iterador = 4*Nb*blocos_processo*worldsize + 4*Nb*rank;
        if( iterador <= numero_blocos*4*Nb )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }
            if( iterador == numero_blocos*4*Nb )
                bloco[ 4*Nb - 1 ] = (octeto)(entrada_bytes - numero_blocos*4*Nb);

            AES_encriptar_bloco(bloco,Nb,chave,Nr);

            if( MPI_File_write_at(saida_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao escrever no arquivo de saida (%s).\n",saida_file);
                }
                goto help;
            }
        }
        if( rank == 0 )
        {
           // printf("A encriptacao do arquivo foi realizada com sucesso.\n");
        }
    }
    else 
    if( operacao == DECRIPTAR )
    {
        MPI_File_set_size(saida_handle,entrada_bytes);

        bloco_byte_inicio = 4*Nb*blocos_processo*rank;
        bloco_byte_fim = 4*Nb*blocos_processo*(rank+1);


        for( iterador = bloco_byte_inicio ; iterador < bloco_byte_fim ; iterador += (4*Nb) )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }

            AES_decriptar_bloco(bloco,Nb,chave,Nr);

            if( MPI_File_write_at(saida_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao escrever no arquivo de saida (%s).\n",saida_file);
                }
                goto help;
            }
        }

        iterador = 4*Nb*blocos_processo*worldsize + 4*Nb*rank;
        if( iterador < numero_blocos*4*Nb )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }

            AES_decriptar_bloco(bloco,Nb,chave,Nr);

            if( MPI_File_write_at(saida_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao escrever no arquivo de saida (%s).\n",saida_file);
                }
                goto help;
            }
        }

        MPI_Barrier( MPI_COMM_WORLD ); /*Barreira q impede q alguem leia antes do valor decriptografado ser escrito */

        if( MPI_File_read_at(saida_handle,entrada_bytes-1,bloco,1,MPI_BYTE,&status) != MPI_SUCCESS )
        {
            if( rank == 0 )
            {
                printf("Erro ao realizar leitura no arquivo de saida (%s).\n",saida_file);
            }
            goto help;
        }

        MPI_Barrier( MPI_COMM_WORLD ); /* Barreira q impede q alqum processo trunque o arquivo antes de outro processo ler*/

        MPI_File_set_size(saida_handle,entrada_bytes - 4*Nb + bloco[0]);

        if( rank == 0 )
        {
           // printf("A decriptacao do arquivo foi realizada com sucesso.\n");
        }
    }

    goto finalizando;

sempar:
    if( rank == 0 )
    {
        printf("Sem par correspondente para a opcao %s.\n",argv[itr]);
    }

help:
    if( rank == 0 )
    {
        printf("Use a opcao --help para melhor entendimento do uso da aplicacao.\n");
    }

finalizando:
    MPI_Finalize( );
    return 0;
}
コード例 #21
0
int main(int argc, char **argv) {
    int rank, size;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    if(argc != 4 && rank == 0) {
        fprintf(stderr, "Usage: %s str1_file str2_file out_file\n", argv[0]);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    if(size == 1) {
        fprintf(stderr, "At least 2 processes expected\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    char *alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
    int alphabet_len = strlen(alphabet);
    #ifdef DEBUG_TIME
    double start_t, end_t;
    start_t = end_t = 0; // suppress warnings about uninitialized variables
    if(rank == 0)
        start_t = MPI_Wtime();
    #endif
    // open files with str1 and str2 and read it's contents
    char *filename_str1 = argv[1];
    char *filename_str2 = argv[2];
    char *filename_out = argv[3];
    MPI_File F_str1, F_str2;
    MPI_File_open(MPI_COMM_WORLD, filename_str1, MPI_MODE_RDONLY, MPI_INFO_NULL, &F_str1);
    MPI_File_open(MPI_COMM_WORLD, filename_str2, MPI_MODE_RDONLY, MPI_INFO_NULL, &F_str2);
    // get lengths of str1 and str2
    MPI_Offset str1_len, str2_len;
    MPI_File_get_size(F_str1, &str1_len);
    MPI_File_get_size(F_str2, &str2_len);
    char *str1 = (char*)malloc(sizeof(char) * (str1_len + 1));
    char *str2 = (char*)malloc(sizeof(char) * (str2_len + 1));
    // now read str1 and str2 from files
    MPI_File_read_at(F_str1, (MPI_Offset)0, str1, str1_len, MPI_CHAR, MPI_STATUS_IGNORE);
    str1[str1_len] = '\0';
    MPI_File_read_at(F_str2, (MPI_Offset)0, str2, str2_len, MPI_CHAR, MPI_STATUS_IGNORE);
    str2[str2_len] = '\0';
    MPI_File_close(&F_str1);
    MPI_File_close(&F_str2);
    #ifdef DEBUG_TIME
    if(rank == 0) {
        end_t = MPI_Wtime();
        printf("%f seconds for reading from files\n", end_t - start_t);
    }
    #endif

    int i;
    int start_index, end_index;
    int chunk_size = str2_len / (size - 1);
    if(rank > 0) {
        // every rank>0 processes part of str2
        // then rank=0 reduces results
        start_index = (rank - 1) * chunk_size;
        end_index = rank * chunk_size - 1;
        if(rank == size - 1) // last rank processes everything, what is left
            end_index = str2_len - 1;
        int *appearances = (int*)malloc(sizeof(int) * alphabet_len);
        // appearances[0] corresponds to 'A'
        // appearances[25] - 'Z'
        memset(appearances, 0, sizeof(int) * alphabet_len);
        int letter;
        for(i = start_index; i <= end_index; i++) {
            letter = str2[i] - 'A';
            appearances[letter] += 1;
        }
        MPI_Gatherv(appearances, alphabet_len, MPI_INT, NULL, NULL, NULL, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Bcast(appearances, alphabet_len, MPI_INT, 0, MPI_COMM_WORLD);
        // now every rank>0 contains _global_ count of how many times every letter from alphabet appears in str2
        int *ds[alphabet_len], ds_length[alphabet_len]; // ds=decreasing sequence
        for(i = 0; i < alphabet_len; i++) {
            ds[i] = (int*)malloc(sizeof(int) * appearances[i]);
            // ds[0] corresponds to letter 'A'
            // if str2='bacdeafa', then ds[0]={8, 6, 2}
            memset(ds[i], 0, sizeof(int) * appearances[i]);
            ds_length[i] = 0;
        }
        // every rank>0 processes it's part of str2
        // but now rank=1 processes _last_ part of str2
        // for example, if str2='abcdefghj', size=4 (including rank=0)
        // then rank=1 processes 'ghj', rank=2 - 'def', rank=3 - 'abc'
        start_index = (size - (rank + 1)) * chunk_size;
        end_index = (size - rank) * chunk_size - 1;
        if(rank == 1) {
            end_index = str2_len - 1;
        }
        for(i = end_index; i >= start_index; i--) {
           letter = str2[i] - 'A';
           ds[letter][ds_length[letter]] = i;
           ds_length[letter] += 1;
        }
        // decreasing sequence is done, send results to rank=0
        MPI_Gather(ds_length, alphabet_len, MPI_INT, NULL, 0, MPI_INT, 0, MPI_COMM_WORLD);
        // now send all ds's
        for(i = 0; i < alphabet_len; i++) {
            MPI_Gatherv(ds[i], ds_length[i], MPI_INT, NULL, NULL, NULL, MPI_INT, 0, MPI_COMM_WORLD);
        }
        // cleanup
        for(i = 0; i < alphabet_len; i++) {
            free(ds[i]);
        }
        free(appearances);
    }
    if(rank == 0) {
        #ifdef DEBUG_TIME
        start_t = MPI_Wtime();
        #endif
        int *appearances = (int*)malloc(sizeof(int) * ((size - 1) * alphabet_len + 1)); // '+ 1' to receive value from rank=0
                                                                                        // but it is unused
        int *recvcounts = (int*)malloc(sizeof(int) * size);
        int *displs = (int*)malloc(sizeof(int) * size);
        recvcounts[0] = 1;
        displs[0] = 0;
        for(i = 1; i < size; i++) {
            start_index = (i - 1) * chunk_size;
            //end_index = i * chunk_size - 1;
            //if(i == size - 1)
            //    end_index = str2_len - 1;
            recvcounts[i] = alphabet_len;
            displs[i] = alphabet_len * (i - 1) + 1;
        }
        MPI_Gatherv(MPI_IN_PLACE, 1, MPI_INT, appearances, recvcounts, displs, MPI_INT, 0, MPI_COMM_WORLD);
        // reduce data from all processes
        int k;
        int letter_displacement;
        for(i = 1; i <= alphabet_len; i++) {
            letter_displacement = i - 1;
            for(k = 2; k < size; k++) {
                appearances[i] += appearances[displs[k] + letter_displacement];
            }
        }
        #ifdef DEBUG_TIME
        end_t = MPI_Wtime();
        printf("%f seconds for calculation of how many times every letter appears in str2\n", end_t - start_t);
        start_t = MPI_Wtime();
        #endif
        // now appearances[1] through appearances[26] contain _global_ count of how many times every letter appears in str2
        // broadcast it to all processes
        MPI_Bcast(appearances + 1, alphabet_len, MPI_INT, 0, MPI_COMM_WORLD);
        // receive decreasing sequences' lengths
        int ds_lengths[alphabet_len * size];
        // rank=0 sends 26 MPI_INTs, so ds_lengths[0] through ds_lengths[25] are not significant
        MPI_Gather(MPI_IN_PLACE, alphabet_len, MPI_INT, ds_lengths, alphabet_len, MPI_INT, 0, MPI_COMM_WORLD);
        int *ds[alphabet_len];
        recvcounts[0] = 1;
        displs[0] = 0;
        for(i = 0; i < alphabet_len; i++) {
            // only appearances[1] through appearances[26] contain _global_ count
            ds[i] = (int*)malloc(sizeof(int) * (appearances[i + 1] + 1)); // '+1' to receive value from rank=0
            for(k = 1; k < size; k++) {
                recvcounts[k] = ds_lengths[alphabet_len * k + i];
                //k=1 - rank=1. ds_length[26] - 'A', ds_lengths[27] - 'B'
                //k=2 - rank=2, ds_length[52] - 'A', ds_lengths[53] - 'B'
                if(k == 1) {
                    displs[k] = 1;
                } else {
                    displs[k] = ds_lengths[alphabet_len * (k - 1) + i] + displs[k - 1];
                }
            }
            MPI_Gatherv(MPI_IN_PLACE, 1, MPI_INT, ds[i], recvcounts, displs, MPI_INT, 0, MPI_COMM_WORLD);
        }
        #ifdef DEBUG_TIME
        end_t = MPI_Wtime();
        printf("%f seconds for creation of decreasing sequences\n", end_t - start_t);
        start_t = MPI_Wtime();
        #endif
        int letter_appearances;
        int dec_sequences_count = 1;
        int **dec_sequences = (int**)malloc(sizeof(int**) * 1);
        dec_sequences[0] = (int*)malloc(sizeof(int) * appearances[str1[0] - 'A' + 1]);
        int *dec_sequences_lengths = (int*)malloc(sizeof(int) * 1);
        dec_sequences_lengths[0] = appearances[str1[0] - 'A' + 1];
        for(i = 0; i < dec_sequences_lengths[0]; i++) {
            dec_sequences[0][i] = ds[(int)(str1[0] - 'A')][i + 1];
        }
        int seq;
        int letter;
        for(i = 1; i < str1_len; i++) {
            letter = str1[i] - 'A';
            letter_appearances = appearances[letter + 1]; // '+1' because appearances[0] is not valid
            for(k = 1; k <= letter_appearances; k++) {
                seq = lower_bound(dec_sequences, dec_sequences_lengths, dec_sequences_count, ds[letter][k]);
                if(seq >= 0) {
                    dec_sequences_lengths[seq] += 1;
                    dec_sequences[seq] = (int*)realloc(dec_sequences[seq], sizeof(int) * dec_sequences_lengths[seq]); // it is bad, i know
                    dec_sequences[seq][dec_sequences_lengths[seq] - 1] = ds[letter][k]; // but would not rewrite this, because it will still be slower
                } else {                                                                // than lcs_sequential
                    dec_sequences_count += 1;
                    dec_sequences_lengths = (int*)realloc(dec_sequences_lengths, sizeof(int) * dec_sequences_count);
                    dec_sequences_lengths[dec_sequences_count - 1] = 1;
                    dec_sequences = (int**)realloc(dec_sequences, sizeof(int**) * dec_sequences_count);
                    dec_sequences[dec_sequences_count - 1] = (int*)malloc(sizeof(int) * 1);
                    dec_sequences[dec_sequences_count - 1][0] = ds[letter][k];
                }
            }
        }
        #ifdef DEBUG_TIME
        end_t = MPI_Wtime();
        printf("%f seconds for LCS creation\n", end_t - start_t);
        #endif
        FILE *F_out = fopen(filename_out, "w");
        if(!F_out) {
            fprintf(stderr, "Unable to open file '%s' for writing\nOutput to console...\n", filename_out);
            F_out = stdout;
        }
        fprintf(F_out, "lcs length = %d\n", dec_sequences_count);
        fprintf(F_out, "lcs sequence\n");
        for(i = 0; i < dec_sequences_count; i++) {
            fprintf(F_out, "%c", str2[dec_sequences[i][dec_sequences_lengths[i] - 1]]);
        }
        fprintf(F_out, "\n");
        fclose(F_out);
        // cleanup
        free(dec_sequences_lengths);
        for(i = 0; i < dec_sequences_count; i++) {
            free(dec_sequences[i]);
        }
        free(dec_sequences);
        for(i = 0; i < alphabet_len; i++) {
            free(ds[i]);
        }
        free(appearances);
        free(recvcounts);
        free(displs);
    }

    free(str1);
    free(str2);
    MPI_Finalize();
    return 0;
}
コード例 #22
0
ファイル: misc.cpp プロジェクト: carsten-clauss/MP-MPICH
int main(int argc, char **argv)
{
    int buf[1024], amode, flag, mynod, len, i;
    MPI_File fh;
    MPI_Status status;
    MPI_Datatype newtype;
    MPI_Offset disp, offset;
    MPI_Group group;
    MPI_Datatype etype, filetype;
    char datarep[25], *filename;

    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &mynod);

/* process 0 takes the file name as a command-line argument and 
   broadcasts it to other processes */
    if (!mynod) {
	i = 1;
	while ((i < argc) && strcmp("-fname", *argv)) {
	    i++;
	    argv++;
	}
	if (i >= argc) {
	    printf("\n*#  Usage: misc  <mpiparameter> -- -fname filename\n\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	argv++;
	len = strlen(*argv);
	filename = (char *) malloc(len+1);
	strcpy(filename, *argv);
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }
    else {
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	filename = (char *) malloc(len+1);
	MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
    }


    MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  MPI_INFO_NULL, &fh);

    MPI_File_write(fh, buf, 1024, MPI_INT, &status);

    MPI_File_sync(fh);

    MPI_File_get_amode(fh, &amode);
    if (!mynod) printf("testing MPI_File_get_amode\n");
    if (amode != (MPI_MODE_CREATE | MPI_MODE_RDWR))
	printf("amode is %d, should be %d\n\n", amode, MPI_MODE_CREATE |
                      MPI_MODE_RDWR);

    MPI_File_get_atomicity(fh, &flag);
    if (flag) printf("atomicity is %d, should be 0\n", flag);
    if (!mynod) printf("setting atomic mode\n");
    MPI_File_set_atomicity(fh, 1);
    MPI_File_get_atomicity(fh, &flag);
    if (!flag) printf("atomicity is %d, should be 1\n", flag);
    MPI_File_set_atomicity(fh, 0);
    if (!mynod) printf("reverting back to nonatomic mode\n");

    MPI_Type_vector(10, 10, 20, MPI_INT, &newtype);
    MPI_Type_commit(&newtype);

    MPI_File_set_view(fh, 1000, MPI_INT, newtype, "native", MPI_INFO_NULL);
    if (!mynod) printf("testing MPI_File_get_view\n");
    MPI_File_get_view(fh, &disp, &etype, &filetype, datarep);
    if ((disp != 1000) || strcmp(datarep, "native"))
	printf("disp = %I64, datarep = %s, should be 1000, native\n\n", disp, datarep);

    if (!mynod) printf("testing MPI_File_get_byte_offset\n");
    MPI_File_get_byte_offset(fh, 10, &disp);
    if (disp != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", disp, (int) (1000+20*sizeof(int)));

    MPI_File_get_group(fh, &group);

    if (!mynod) printf("testing MPI_File_set_size\n");
    MPI_File_set_size(fh, 1000+15*sizeof(int));
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_File_sync(fh);
    MPI_File_get_size(fh, &disp);
    if (disp != 1000+15*sizeof(int)) printf("file size = %I64, should be %d\n\n", disp, (int) (1000+15*sizeof(int)));
 
    if (!mynod) printf("seeking to eof and testing MPI_File_get_position\n");
    MPI_File_seek(fh, 0, MPI_SEEK_END);
    MPI_File_get_position(fh, &disp);
    if (disp != 10) printf("file pointer posn = %I64, should be 10\n\n", disp);

    if (!mynod) printf("testing MPI_File_get_byte_offset\n");
    MPI_File_get_byte_offset(fh, disp, &offset);
    if (offset != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", offset, (int) (1000+20*sizeof(int)));
    MPI_Barrier(MPI_COMM_WORLD);

    if (!mynod) printf("testing MPI_File_seek with MPI_SEEK_CUR\n");
    MPI_File_seek(fh, -10, MPI_SEEK_CUR);
    MPI_File_get_position(fh, &disp);
    MPI_File_get_byte_offset(fh, disp, &offset);
    if (offset != 1000)
	printf("file pointer posn in bytes = %I64, should be 1000\n\n", offset);

    if (!mynod) printf("preallocating disk space up to 8192 bytes\n");
    MPI_File_preallocate(fh, 8192);

    if (!mynod) printf("closing the file and deleting it\n");
    MPI_File_close(&fh);
    
    MPI_Barrier(MPI_COMM_WORLD);
    if (!mynod) MPI_File_delete(filename, MPI_INFO_NULL);

    MPI_Type_free(&newtype);
    MPI_Type_free(&filetype);
    MPI_Group_free(&group);
    free(filename);
    MPI_Finalize(); 
    return 0;
}
コード例 #23
0
ファイル: large_file.c プロジェクト: meghnave/SpherePacking
int main(int argc, char **argv)
{
    MPI_File fh;
    MPI_Status status;
    MPI_Offset size;
    long long *buf, i;
    char *filename;
    int j, mynod, nprocs, len, flag, err;

    MPI_Init(&argc,&argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &mynod);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    if (nprocs != 1) {
	fprintf(stderr, "Run this program on one process only\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }

    i = 1;
    while ((i < argc) && strcmp("-fname", *argv)) {
	i++;
	argv++;
    }
    if (i >= argc) {
	fprintf(stderr, "\n*#  Usage: large -fname filename\n\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
    argv++;
    len = strlen(*argv);
    filename = (char *) malloc(len+1);
    strcpy(filename, *argv);
    fprintf(stderr, "This program creates an 4 Gbyte file. Don't run it if you don't have that much disk space!\n");

    buf = (long long *) malloc(SIZE * sizeof(long long));
    if (!buf) {
	fprintf(stderr, "not enough memory to allocate buffer\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  MPI_INFO_NULL, &fh);

    for (i=0; i<NTIMES; i++) {
	for (j=0; j<SIZE; j++)
	    buf[j] = i*SIZE + j;
	
	err = MPI_File_write(fh, buf, SIZE, MPI_DOUBLE, &status);
        /* MPI_DOUBLE because not all MPI implementations define
           MPI_LONG_LONG_INT, even though the C compiler supports long long. */
        if (err != MPI_SUCCESS) {
	    fprintf(stderr, "MPI_File_write returned error\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
    }

    MPI_File_get_size(fh, &size);
    fprintf(stderr, "file size = %lld bytes\n", size);

    MPI_File_seek(fh, 0, MPI_SEEK_SET);

    for (j=0; j<SIZE; j++) buf[j] = -1;

    flag = 0;
    for (i=0; i<NTIMES; i++) {
	err = MPI_File_read(fh, buf, SIZE, MPI_DOUBLE, &status);
        /* MPI_DOUBLE because not all MPI implementations define
           MPI_LONG_LONG_INT, even though the C compiler supports long long. */
        if (err != MPI_SUCCESS) {
	    fprintf(stderr, "MPI_File_write returned error\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	for (j=0; j<SIZE; j++) 
	    if (buf[j] != i*SIZE + j) {
		fprintf(stderr, "error: buf %d is %lld, should be %lld \n", j, buf[j], 
                                 i*SIZE + j);
		flag = 1;
	    }
    }

    if (!flag) fprintf(stderr, "Data read back is correct\n");
    MPI_File_close(&fh);

    free(buf);
    free(filename);
    MPI_Finalize(); 
    return 0;
}
コード例 #24
0
ファイル: ssio.c プロジェクト: srs51/SRS-3000
int ssioOpen(const char *filename, SSIO *ssio, const u_int mode)
{
	assert(filename != NULL && ssio != NULL);
	assert(mode == SSIO_READ || mode == SSIO_WRITE || mode == SSIO_UPDATE);

#ifdef SSIO_USE_MPI

    // need this check for ss2bt, ssa, etc
    if (!ssio_initd)
    {
        ssioInitialise();
    }

    int open_mode;
    if (SSIO_READ == mode)
    {
        open_mode = MPI_MODE_RDONLY;
    }
    else if (SSIO_WRITE == mode)
    {
        open_mode = MPI_MODE_WRONLY | MPI_MODE_CREATE;
    }
    else //if (SSIO_UPDATE == mode)
    {
        open_mode = MPI_MODE_RDWR;
    }

    // save for ssioData later use
    ssio->fmode = mode;

    int err;
    err = MPI_File_open(process_comm,
                        (char *)filename,
                        open_mode,
                        MPI_INFO_NULL,
                        &ssio->mfile);

    // if anything went wrong
    if (err)
    {
        return 1;
    }

    if (SSIO_READ == mode)
    {
        size_t particle_div = 1;

        // get file size and correct for the header being there
        MPI_Offset filesize;
        err |= MPI_File_get_size(ssio->mfile,
                                 &filesize);

        int n_ranks, rank;
        err |= MPI_Comm_rank(process_comm,
                             &rank);
        err |= MPI_Comm_size(process_comm,
                             &n_ranks);

        // how many particles each process will use (nLocal in pkd.c?)
        const size_t total_particles = ((filesize - SSHEAD_SIZE)/SSDATA_SIZE);
        const size_t local_particles = total_particles/n_ranks;

        ssio->total_to_buffer = local_particles;
        ssio->total_read = 0;
        ssio->extra = total_particles % local_particles;

        // check if there are less particles per process than max buffer size
        if (MAX_BUF_PARTICLES > local_particles)
        {
            // just load the max amount in each process
            ssio->max_buf_particles = local_particles;
        }
        // check if it can be nicely divided up
        else
        {
            // divide until its less than buffer size
            while (local_particles/particle_div > MAX_BUF_PARTICLES
            // or its dividing it can't divide it any more
            && particle_div < local_particles/2
            // or it's divided it equally
            && local_particles % particle_div)
            {
                particle_div++;
            }

            ssio->max_buf_particles = local_particles/particle_div;
        }

        // didn't divide evenly
        if (local_particles % particle_div)
        {
            if (!rank)
            {
                fprintf(stderr, "%d processes\n", n_ranks);
                fprintf(stderr, "%zu total particles\n", total_particles);
                //fprintf(stderr, "%lld local particles\n", local_particles);
                fprintf(stderr, "%f per process\n", total_particles/(float)n_ranks);

                // FIXME do something here instead of erroring out
                fprintf(stderr, "Couldn't divide particles nicely between processes\n");
                fprintf(stderr, "Change max buf particles\n");
                MPI_Abort(MPI_COMM_WORLD, 1);
            }
            else
            {
                MPI_Barrier(MPI_COMM_WORLD);
            }
        }
        //fprintf(stderr, "%lld particles per process\n", local_particles);
        //fprintf(stderr, "%lld particles in one read\n", ssio->max_buf_particles);

        if (ssio->extra && !rank)
        {
            fprintf(stdout, "Prepared to read an extra %lu in a process\n", ssio->extra);
        }
    }
    else
    {
        ssio->max_buf_particles = MAX_BUF_PARTICLES;
    }

    ssio->particles_written = 0;
    ssio->particles_read = ssio->max_buf_particles;
    ssio->file_buf = (char *) malloc(ssio->max_buf_particles*SSDATA_SIZE);
	assert(ssio->file_buf != NULL);
    ssio->cur_buf_ptr = ssio->file_buf;

#else

    // original xdr
	const char type[][3] = {"r","w","r+"};
	const enum xdr_op op[] = {XDR_DECODE,XDR_ENCODE,XDR_ENCODE};
	if (!(ssio->fp = fopen(filename,type[mode])))
		return 1;
	xdrstdio_create(&ssio->xdrs,ssio->fp,op[mode]);

#endif /* SSIO_USE_MPI */

	return 0;
}
コード例 #25
0
ファイル: FileIO.cpp プロジェクト: HPCProjectsTry/grappa
void read_unordered_shared( const char * filename, void * local_ptr, size_t size ) {
  MPI_Status status;
  MPI_File infile;
  MPI_Datatype datatype;
  MPI_Info info;
  char * local_char_ptr = static_cast< char * >( local_ptr );

  // Stupid MPI uses a signed integer for the count of data elements
  // to write. On all the machines we use, this means the max count
  // is 2^31-1. To work around this, we create a datatype large
  // enough that we never need a count value larger than 2^30.

  // move this many gigabyte chunks
  const size_t gigabyte = 1L << 30;
  size_t round_count = size / gigabyte;

  // if there will be any bytes left over, move those too
  if( size & (gigabyte - 1) ) round_count++;

  MPI_CHECK( MPI_Info_create( &info ) );

  if( FLAGS_optimize_for_lustre ) {
    std::map< const std::string, const std::string > info_map = {{
        // disable independent file operations, since we know this
        // routine is the only one touching the file
        { "romio_no_indep_rw", "true" }

        // // disable collective io on lustre for "small" files <= 1GB
        // , { "romio_lustre_ds_in_coll", "1073741825" ) );
        
        // set collective buffering block size to something reasonable
        , { "cb_buffer_size", "33554432" }
        
        // // disable collective buffering for writing
        // , { "romio_cb_write", "disable" }
        // // disable collective buffering for writing
        // , { "romio_cb_read", "disable" }
        
        // disable data sieving for writing
        , { "romio_ds_write", "disable" }
        // disable data sieving for writing
        , { "romio_ds_read", "disable" }
        
        
        // enable direct IO
        , { "direct_read", "true" }
        , { "direct_write", "true" }
        
        // ???
        // , { "romio_lustre_co_ratio", "1" }
        
        // maybe 
        // , { "access_style", "read_once" }
      }};

    set_mpi_info( info, info_map );
  }

  // open file for reading
  int mode = MPI_MODE_RDONLY;
  MPI_CHECK( MPI_File_open( global_communicator.grappa_comm, const_cast< char * >( filename ), mode, info, &infile ) );

  // make sure we will read exactly the whole file
  int64_t total_size = 0;
  MPI_CHECK( MPI_Reduce( &size, &total_size, 1, MPI_INT64_T, MPI_SUM, 0, global_communicator.grappa_comm ) );
  if( 0 == Grappa::mycore() ) {
    MPI_Offset file_size = 0;
    MPI_CHECK( MPI_File_get_size( infile, &file_size ) );
    CHECK_EQ( file_size, total_size ) << "Sizes don't line up to read the enitre file?";
  }

  // // dump file info for debugging
  // if( Grappa::mycore == 0 ) {
  //   dump_mpi_file_info( infile );
  // }

  // compute number of rounds required to read entire file
  MPI_CHECK( MPI_Allreduce( MPI_IN_PLACE, &round_count, 1, MPI_INT64_T, MPI_MAX, global_communicator.grappa_comm ) );

  // read file in gigabyte-sized rounds
  for( int i = 0; i < round_count; ++i ) {
    if( size > gigabyte ) {
      MPI_CHECK( MPI_File_read_shared( infile, local_char_ptr, gigabyte, MPI_BYTE, &status ) );
      size -= gigabyte;
      local_char_ptr += gigabyte;
    } else {
      MPI_CHECK( MPI_File_read_shared( infile, local_char_ptr, size, MPI_BYTE, &status ) );
    }
  }

  MPI_CHECK( MPI_Info_free( &info ) );
  MPI_CHECK( MPI_File_close( &infile ) );
}
コード例 #26
0
int main(int argc, char **argv) {
	if(argc < 2) {
		printf("Usage: %s infile\n", argv[0]);
		exit(1);
	}

	MPI_Comm comm = MPI_COMM_WORLD;
	MPI_Info mpi_info = MPI_INFO_NULL;
	MPI_File fh, fw;
	MPI_Offset file_size, frag_size, read_size;
	MPI_Offset offset;
	MPI_Status status;
	int retval;
	double start, end;

	unsigned char *buf, *outbuf, *outProps;
	size_t destlen;
	size_t propsize = 5;

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(comm, &mpi_rank);
	MPI_Comm_size(comm, &mpi_size);

	MPI_Barrier(comm);
	start = MPI_Wtime();
	/*
	 * read
	 */
	MPI_File_open(comm, argv[1], MPI_MODE_RDONLY, mpi_info, &fh);
	MPI_File_get_size(fh, &file_size);
	//printf("file size:%d\n", file_size);

	frag_size = file_size / mpi_size;
	offset = frag_size * mpi_rank;
	read_size = MIN(frag_size, file_size - offset);
	//printf("rank %d offset %d\n", mpi_rank, offset);

	buf = malloc(frag_size + 2);
	assert(buf != NULL);
	MPI_File_open(comm, argv[1], MPI_MODE_RDONLY, mpi_info, &fh);
	MPI_File_read_at(fh, offset, buf, read_size, MPI_CHAR, &status);
	MPI_File_close(&fh);

	/*
	 * compress
	 */
	destlen = 1.2 * frag_size + 1024 * 1024;
	outbuf = (unsigned char *)malloc(destlen);
	assert(outbuf != NULL);
	destlen = destlen - DATA_OFFSET -propsize;
	outProps = outbuf + DATA_OFFSET;
	retval = LzmaCompress(outbuf + DATA_OFFSET + propsize, &destlen, buf, read_size, outProps, &propsize, -1, 0, -1, -1, -1, -1, 1);
	if(retval != SZ_OK) {
		error_print(retval);
		free(buf);
		free(outbuf);
		exit(1);
	}

	/*
	 * write
	 */
	char *fwname;
	unsigned long long *len;
	fwname = get_fwname(argv[1]);
	len = (unsigned long long *)outbuf;
	*len = read_size;
	//printf("%s %d\n", fwname, destlen);
	MPI_File_open(MPI_COMM_SELF, fwname, MPI_MODE_WRONLY | MPI_MODE_CREATE, mpi_info, &fw);
	MPI_File_set_size(fw, destlen);
	MPI_File_write(fw, outbuf, destlen + DATA_OFFSET + propsize, MPI_CHAR, &status);
	MPI_File_close(&fw);

	MPI_Barrier(comm);
	end = MPI_Wtime();

	size_t cmprs_len;
	double cmprs_ratio;
	MPI_Reduce(&destlen, &cmprs_len, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, comm);
	if(0 == mpi_rank) {
		cmprs_ratio = (double)cmprs_len / file_size;
		printf("file size: %lu\n", file_size);
		printf("after compressed: %lu\n", cmprs_len);
		printf("compress ratio: %f\n", cmprs_ratio);
		printf("number of processes: %d\n", mpi_size);
		printf("time used: %fs\n", end - start);
	}
	MPI_Finalize();
	free(fwname);
	free(buf);
	free(outbuf);
	return 0;
}