Пример #1
0
void _SolutionVector_ShareValuesNotStoredLocally(
	SolutionVector*		self,
	Index*			reqFromOthersCounts,
	RequestInfo**		reqFromOthersInfos,
	Dof_EquationNumber**	reqFromOthers,
	double*			localSolnVecValues )
{

	FeVariable*		feVar = self->feVariable;
	FeMesh*			feMesh = feVar->feMesh;
	FeEquationNumber*	eqNum = self->eqNum;
	Comm*			comm;
	MPI_Comm		mpiComm;
	Partition_Index		nProc;
	Partition_Index		myRank;
	Partition_Index		proc_I;
	Index			req_I;
	Index			indexIntoLocalSolnVecValues;
	MPI_Status		status;
	Index*			reqFromMeCounts;
	Dof_EquationNumber**	reqFromMe;
	double**		reqValuesFromMe;
	MPI_Request**		reqValuesFromMeHandles;
	MPI_Request**		reqFromOthersHandles;
	double**		reqValuesFromOthers;
	MPI_Request**		reqValuesFromOthersHandles;
	Bool*			reqValuesFromOthersReceived;
	Partition_Index	     reqValueSetsFromOthersNotYetReceivedCount;
	Dof_EquationNumber   totalRequestedFromOthers = 0;
	Dof_EquationNumber   totalRequestedFromMe = 0;
   int ierr;

	Journal_DPrintf( self->debug, "In %s - for \"%s\"\n", __func__, self->name );
	Stream_IndentBranch( StgFEM_Debug );

	comm = Mesh_GetCommTopology( feMesh, MT_VERTEX );
	mpiComm = Comm_GetMPIComm( comm );
	MPI_Comm_size( mpiComm, (int*)&nProc );
	MPI_Comm_rank( mpiComm, (int*)&myRank );

	reqFromMeCounts = Memory_Alloc_Array( Index, nProc, "reqFromMeCounts" );
	reqFromOthersHandles = Memory_Alloc_Array_Unnamed( MPI_Request*, nProc );
	reqValuesFromOthersHandles = Memory_Alloc_Array_Unnamed( MPI_Request*, nProc );
	reqValuesFromMeHandles = Memory_Alloc_Array_Unnamed( MPI_Request*, nProc );
	reqValuesFromOthers = Memory_Alloc_2DComplex( double, nProc, reqFromOthersCounts, "reqValuesFromOthers" );
	reqValuesFromOthersReceived = Memory_Alloc_Array_Unnamed( Bool, nProc );

	#if DEBUG
	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
		Journal_DPrintf( self->debug, "Final list of vec values I need from other procs:\n" );
		for ( proc_I=0; proc_I < nProc; proc_I++ ) {
			if ( proc_I == myRank ) continue;
			Journal_DPrintf( self->debug, "\t%d[0-%d]: ", proc_I, reqFromOthersCounts[proc_I] );
			for ( req_I=0; req_I < reqFromOthersCounts[proc_I]; req_I++ ) {
				RequestInfo* reqInfo = &reqFromOthersInfos[proc_I][req_I];
				Journal_DPrintf( self->debug, "(lnode %d, dof %d -> %d ), ",
					reqInfo->lNode_I, reqInfo->nodeLocalDof_I,
					reqFromOthers[proc_I][req_I] );
			}
			Journal_DPrintf( self->debug, "\n" );
		}
	}
	#endif

	/* send out my request counts, receive the req. counts others want from me */
	MPI_Alltoall( reqFromOthersCounts, 1, MPI_UNSIGNED,
		      reqFromMeCounts, 1, MPI_UNSIGNED, mpiComm );

	Journal_DPrintf( self->debug, "After MPI_Alltoall- counts are:\n" );
	totalRequestedFromOthers = 0;
	totalRequestedFromMe = 0;
	Stream_Indent( self->debug );
	Journal_DPrintf( self->debug, "reqFromOthersCounts: " );
	for ( proc_I=0; proc_I < nProc; proc_I++ ) {
		if ( proc_I == myRank ) continue;
		Journal_DPrintf( self->debug, "\tp%d:%d, ", proc_I, reqFromOthersCounts[proc_I] );
		totalRequestedFromOthers += reqFromOthersCounts[proc_I];
	}
	Journal_DPrintf( self->debug, "\n" );
	Journal_DPrintf( self->debug, "reqFromMeCounts: " );
	for ( proc_I=0; proc_I < nProc; proc_I++ ) {
		if ( proc_I == myRank ) continue;
		Journal_DPrintf( self->debug, "\tp%d:%d, ", proc_I, reqFromMeCounts[proc_I] );
		totalRequestedFromMe += reqFromMeCounts[proc_I];
	}
	Journal_DPrintf( self->debug, "\n" );
	Stream_UnIndent( self->debug );

	if ( ( totalRequestedFromOthers == 0) && (totalRequestedFromMe == 0) )
	{
		Journal_DPrintf( self->debug, "No vector values either required from others or "
			"required by others from me, therefore cleaning up memory and returning.\n" );
		Memory_Free( reqFromMeCounts );
		Memory_Free( reqFromOthersHandles );
		Memory_Free( reqValuesFromOthersHandles );
		Memory_Free( reqValuesFromMeHandles );
		Memory_Free( reqValuesFromOthers );
		Memory_Free( reqValuesFromOthersReceived );
		Stream_UnIndentBranch( StgFEM_Debug );
		return;
	}

	Journal_DPrintfL( self->debug, 2, "Starting non-blocking sends of my lists of vector entry indices I want from others:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
/* Journal_Printf( Journal_Register( Info_Type, (Name)"mpi"  ),  "!!! line %d, proc_I %d: count = %u\n", __LINE__, proc_I, reqFromOthersCounts[proc_I] ); */
		if ( reqFromOthersCounts[proc_I] > 0 ) {
			Journal_DPrintfL( self->debug, 2, "Sending to proc %d the list of %d vector entry indices I want from it:\n"
				"\t(tracking via reqFromOthersHandles[%d], tag %d)\n", proc_I,
				reqFromOthersCounts[proc_I], proc_I, VALUE_REQUEST_TAG );

			reqFromOthersHandles[proc_I] = Memory_Alloc_Unnamed( MPI_Request );
			ierr=MPI_Isend( reqFromOthers[proc_I], reqFromOthersCounts[proc_I], MPI_UNSIGNED,
				proc_I, VALUE_REQUEST_TAG, mpiComm, reqFromOthersHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );


	Journal_DPrintfL( self->debug, 2, "Starting non-blocking receive of the vector entries I want from others:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromOthersCounts[proc_I] > 0 ) {
			Journal_DPrintfL( self->debug, 2, "Posting recv reqst from proc %d for the %d vector entries I want from it:\n"
				"\t(tracking via reqValuesFromOthersHandles[%d], tag %d)\n", proc_I,
				reqFromOthersCounts[proc_I], proc_I, VALUE_TAG );
			reqValuesFromOthersHandles[proc_I] = Memory_Alloc_Unnamed( MPI_Request );
			ierr=MPI_Irecv( reqValuesFromOthers[proc_I], reqFromOthersCounts[proc_I], MPI_DOUBLE,
				proc_I, VALUE_TAG, mpiComm, reqValuesFromOthersHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );

	Journal_DPrintfL( self->debug, 2, "Starting blocking receive of the lists of vector entry indices "
		"others want from me:\n" );
	Stream_Indent( self->debug );
	reqFromMe = Memory_Alloc_2DComplex( Dof_EquationNumber, nProc, reqFromMeCounts, "reqFromMe" );
	reqValuesFromMe = Memory_Alloc_2DComplex( double, nProc, reqFromMeCounts, "reqValuesFromMe" );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
/* /Journal_Printf( Journal_Register( Info_Type, (Name)"mpi"  ),  "!!! line %d, proc_I %d: count = %u\n", __LINE__, proc_I, reqFromMeCounts[proc_I] ); */
		if ( reqFromMeCounts[proc_I] > 0 ) {
			ierr=MPI_Recv( reqFromMe[proc_I], reqFromMeCounts[proc_I], MPI_UNSIGNED,
				proc_I, VALUE_REQUEST_TAG, mpiComm, &status );
			Journal_DPrintfL( self->debug, 3, "Received a list of %u requested vector entry indices from proc %u, "
				"with tag %d\n", reqFromMeCounts[proc_I], proc_I, status.MPI_TAG );
		}
	}
	Stream_UnIndent( self->debug );

	#if DEBUG
	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
		Journal_DPrintf( self->debug, "Final lists of vector entry indices other procs want from me are:\n" );
		Stream_Indent( self->debug );
		for ( proc_I=0; proc_I < nProc; proc_I++ ) {
			if ( proc_I == myRank ) continue;
			if ( reqFromMeCounts[proc_I] > 0 ) {
				Journal_DPrintf( self->debug, "%d[0-%d]: ", proc_I, reqFromMeCounts[proc_I] );
				for ( req_I=0; req_I < reqFromMeCounts[proc_I]; req_I++ ) {
					Journal_DPrintf( self->debug, "(eqNum %d), ", reqFromMe[proc_I][req_I] );
				}
				Journal_DPrintf( self->debug, "\n" );
			}
		}
		Stream_UnIndent( self->debug );
	}
	#endif

	/* for all those requested from me, non-blocking send out values */
	Journal_DPrintfL( self->debug, 2, "Beginning non-blocking send out of vector entry lists requested by others:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromMeCounts[proc_I] > 0 ) {
			Journal_DPrintfL( self->debug, 3, "list to proc %d is: ", proc_I );
			for ( req_I=0; req_I < reqFromMeCounts[proc_I]; req_I++ ) {
				/* look up and fill in correct value in array */
				indexIntoLocalSolnVecValues = *(int*)STreeMap_Map( eqNum->ownedMap,
										   reqFromMe[proc_I] + req_I );
				reqValuesFromMe[proc_I][req_I] = localSolnVecValues[indexIntoLocalSolnVecValues];
				Journal_DPrintfL( self->debug, 3, "%d=%f, ", reqFromMe[proc_I][req_I],
					reqValuesFromMe[proc_I][req_I] );
			}
			Journal_DPrintfL( self->debug, 3, "\n" );
			/* Non-blocking send out the now-complete list to this processor */
			reqValuesFromMeHandles[proc_I] = Memory_Alloc_Unnamed( MPI_Request );
			Journal_DPrintfL( self->debug, 2, "Sending to proc %d the list of %d vector entries they want:\n"
				"\t(tracking via reqValuesFromMe[%d], tag %d)\n", proc_I,
				reqFromMeCounts[proc_I], proc_I, VALUE_TAG );
			ierr=MPI_Isend( reqValuesFromMe[proc_I], reqFromMeCounts[proc_I], MPI_DOUBLE,
				proc_I, VALUE_TAG, mpiComm, reqValuesFromMeHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );

	Journal_DPrintfL( self->debug, 1, "Starting iterative-test receive of the vector entries I "
		"requested from others:\n" );
	/* Set up an array for keeping track of who we've received things from
	 * already */
	reqValueSetsFromOthersNotYetReceivedCount = nProc-1;
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		reqValuesFromOthersReceived[proc_I] = False;
		if ( reqFromOthersCounts[proc_I] == 0 ) {
			reqValueSetsFromOthersNotYetReceivedCount--;
		}
	}

	#if DEBUG
	Journal_DPrintfL( self->debug, 2, "(Expecting %d receives from procs: ",
		reqValueSetsFromOthersNotYetReceivedCount );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromOthersCounts[proc_I] > 0 ) {
			Journal_DPrintfL( self->debug, 2, "%d, ", proc_I );
		}
	}
	Journal_DPrintfL( self->debug, 2, ")\n" );
	#endif

	Stream_Indent( self->debug );
	/* now update the values at nodes that I requested from others, as they come in */
	while ( reqValueSetsFromOthersNotYetReceivedCount ) {
		int flag = 0;

		Journal_DPrintfL( self->debug, 3, "%d sets still to go...\n", reqValueSetsFromOthersNotYetReceivedCount );
		for( proc_I=0; proc_I < nProc; proc_I++) {
			if ( proc_I == myRank ) continue;

			if ( (reqFromOthersCounts[proc_I] > 0) && (False == reqValuesFromOthersReceived[proc_I]) ) {
				MPI_Test( reqValuesFromOthersHandles[proc_I], &flag, &status );
				if ( !flag ) {
					/* No results yet from this proc -> continue to next. */
					continue;
				}
				else {
					RequestInfo* reqInfo;
					Journal_DPrintfL( self->debug, 2, "received some requested "
						"values (using reqValuesFromOthersHandles) from proc %d "
						"(with tag %d, exp %d):", proc_I, status.MPI_TAG, VALUE_TAG );
					/* go through each value received from that proc & update onto node */
					for ( req_I=0; req_I < reqFromOthersCounts[proc_I]; req_I++ ) {
						reqInfo = &reqFromOthersInfos[proc_I][req_I];
						Journal_DPrintfL( self->debug, 3, "(lnode %d, dof %d -> %d )=%f, ",
							reqInfo->lNode_I, reqInfo->nodeLocalDof_I,
							reqFromOthers[proc_I][req_I], reqValuesFromOthers[proc_I][req_I] );
						DofLayout_SetValueDouble( feVar->dofLayout, reqInfo->lNode_I, reqInfo->nodeLocalDof_I,
							reqValuesFromOthers[proc_I][req_I] );
					}
					Journal_DPrintfL( self->debug, 2, "\n" );
					reqValuesFromOthersReceived[proc_I] = True;
					reqValueSetsFromOthersNotYetReceivedCount--;
					Memory_Free( reqValuesFromOthersHandles[proc_I] );
				}
			}
		}
	}
	Stream_UnIndent( self->debug );

	/* MPI_Wait to be sure all sends to others have completed */
	Journal_DPrintfL( self->debug, 2, "Making sure all comms of this function finished:...\n" );
	Stream_Indent( self->debug );

	Journal_DPrintfL( self->debug, 2, "Confirming completion of my sends of "
		"vector entry index lists I wanted from others were received:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromOthersCounts[proc_I] > 0 ) {
			ierr=MPI_Wait( reqFromOthersHandles[proc_I], MPI_STATUS_IGNORE );
			Journal_DPrintfL( self->debug, 2, "Confirmed wait on reqFromOthersHandles[%u]"
				"\n", proc_I );
			Memory_Free( reqFromOthersHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );
	Journal_DPrintfL( self->debug, 2, "done.\n" );

	Journal_DPrintfL( self->debug, 2, "Confirming completion of my sends of "
		"vector entry values requested by others were received:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromMeCounts[proc_I] > 0 ) {
			ierr=MPI_Wait( reqValuesFromMeHandles[proc_I], MPI_STATUS_IGNORE );
			Journal_DPrintfL( self->debug, 2, "Confirmed wait on reqValuesFromMeHandles[%u]"
				"\n", proc_I );
			Memory_Free( reqValuesFromMeHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );
	Journal_DPrintfL( self->debug, 2, "done.\n" );

	Stream_UnIndent( self->debug );
	Journal_DPrintfL( self->debug, 2, "done.\n" );

	Memory_Free( reqFromMeCounts );
	Memory_Free( reqFromMe );
	Memory_Free( reqValuesFromMe );
	Memory_Free( reqValuesFromOthers );
	Memory_Free( reqValuesFromOthersReceived );
	Memory_Free( reqFromOthersHandles );
	Memory_Free( reqValuesFromOthersHandles );
	Memory_Free( reqValuesFromMeHandles );

	Stream_UnIndentBranch( StgFEM_Debug );
	return;
}
/* TODO: look at using MPI_Indexed instead */
void ParticleMovementHandler_FinishReceiveAndUpdateShadowParticlesEnteringMyDomain( ParticleCommHandler* self ) {
	MPI_Status	status;
	Cell_ShadowTransferIndex	stCell_I;
	Cell_LocalIndex			lCell_I;
	Neighbour_Index			nbr_I;
	Cell_ShadowTransferIndex	shadowCellsFromProcCount;
	ShadowInfo*			cellShadowInfo = CellLayout_GetShadowInfo( self->swarm->cellLayout );
	ProcNbrInfo*			procNbrInfo = cellShadowInfo->procNbrInfo;
	Neighbour_Index			nbrCount = procNbrInfo->procNbrCnt;
	Particle_InCellIndex		incomingCellParticleCount;
	Particle_InCellIndex		cParticle_I;
	Particle_Index			lParticle_I;
	Index				incomingParticle_I=0; /*Index into the array of all leaving particle indices */
	Index				incomingParticleSetsNotYetReceivedCount;
	Bool*				incomingParticlesReceived;
	#if DEBUG
	GlobalParticle*                 currParticle;
	#endif

	Journal_DPrintf( self->debug, "In %s():\n", __func__ );
	Stream_IndentBranch( Swarm_Debug );
	
	incomingParticlesReceived = Memory_Alloc_Array_Unnamed( Bool, nbrCount );

	/* Calculate how many particle sets we have to receive */
	incomingParticleSetsNotYetReceivedCount = 0;
	for ( nbr_I=0; nbr_I < nbrCount; nbr_I++ ) {
		incomingParticlesReceived[nbr_I] = False;
		if (self->particlesArrivingFromNbrShadowCellsTotalCounts[nbr_I] > 0) {
			incomingParticleSetsNotYetReceivedCount++;
		}
	}

	while ( incomingParticleSetsNotYetReceivedCount > 0 ) {
		int flag = 0;
		Journal_DPrintfL( self->debug, 3, "%d particle sets still to go...\n", incomingParticleSetsNotYetReceivedCount );
		for ( nbr_I=0; nbr_I < nbrCount; nbr_I++ ) {
			if ( (self->particlesArrivingFromNbrShadowCellsTotalCounts[nbr_I] > 0) &&
				(False == incomingParticlesReceived[nbr_I]) )
			{
				MPI_Test( self->particlesArrivingFromNbrShadowCellsHandles[nbr_I], &flag, &status );
				if ( False == flag ) {
					/* No results yet from this proc -> continue to next. */
					continue;
				}
				else {
					Journal_DPrintfL( self->debug, 3, "Received particles from nbr %d (proc %d):\n",
						nbr_I, procNbrInfo->procNbrTbl[nbr_I] );
					Stream_Indent( self->debug );

					incomingParticle_I = 0;
					shadowCellsFromProcCount = cellShadowInfo->procShadowedCnt[nbr_I];

				
					for ( stCell_I=0; stCell_I < shadowCellsFromProcCount; stCell_I++ ) {

						lCell_I = cellShadowInfo->procShadowedTbl[nbr_I][stCell_I];
						Journal_DPrintfL( self->debug, 3, "Incoming cell %d (local index %d):\n",
							stCell_I, lCell_I );
						Stream_Indent( self->debug );

						incomingCellParticleCount =
							self->particlesArrivingFromNbrShadowCellCounts[nbr_I][stCell_I];

						for ( cParticle_I=0; cParticle_I < incomingCellParticleCount; cParticle_I++ ) {	

							#if DEBUG
							currParticle = (GlobalParticle*)ParticleAt(
								self->particlesArrivingFromNbrShadowCells[nbr_I],
								incomingParticle_I,
								self->swarm->particleExtensionMgr->finalSize );
							Journal_DPrintfL( self->debug, 3, "Handling its PIC %d: - at "
								"(%.2f,%.2f,%.2f)\n", cParticle_I,
								currParticle->coord[0], currParticle->coord[1],
								currParticle->coord[2] );
							#endif

							Stream_Indent( self->debug );

							lParticle_I = ParticleMovementHandler_FindFreeSlotAndPrepareForInsertion( self );

							Swarm_CopyParticleOntoSwarm(
								self->swarm,
								lParticle_I,
								self->particlesArrivingFromNbrShadowCells[nbr_I], incomingParticle_I++ ); 

							Swarm_AddParticleToCell( self->swarm, lCell_I, lParticle_I );
							
							Stream_UnIndent( self->debug );
						}	
						Stream_UnIndent( self->debug );
					}
					incomingParticlesReceived[nbr_I] = True;
					incomingParticleSetsNotYetReceivedCount--;
					Stream_UnIndent( self->debug );
				}
			}
		}
	}	

	Memory_Free( incomingParticlesReceived );

	Stream_UnIndentBranch( Swarm_Debug );
}
Пример #3
0
void _ParallelDelaunay_Build( void* pd, void* data )
{
	float _minX, _maxX;
	float _minY, _maxY;
	int numProcs, numSites, i, j, count;
	float stride, start;
	int *alloced = NULL;
	int offset;
	ParallelDelaunay *self = (ParallelDelaunay*)pd;
	DelaunayAttributes attr;

	assert( self );
	
	numProcs = self->numProcs;
	numSites = self->numSites;

	if( numProcs == 1 ){
		self->leftProc = numProcs;
		self->rightProc = numProcs;
	}
	else{
		if( self->rank == MASTER_PROC ){
			self->leftProc = numProcs;
			self->rightProc = self->rank + 1;
		}
		else if( self->rank == (numProcs-1) ){
			self->leftProc = self->rank - 1;
			self->rightProc = numProcs;
		}
		else{
			self->leftProc = self->rank - 1;
			self->rightProc = self->rank + 1;
		}
	}
	
	self->mapGlobalToLocal = Memory_Alloc_Array_Unnamed( int, numSites );
	self->processorLoad = Memory_Alloc_Array_Unnamed( int, numProcs );
	memset( self->processorLoad, 0, sizeof( int )*numProcs );

	if( self->rank == MASTER_PROC ){
		self->processor = Memory_Alloc_Array_Unnamed( int, numSites );

		alloced = Memory_Alloc_Array_Unnamed( int, numSites );
		memset( alloced, 0, sizeof( int )*numSites );
	
		self->initialOrder = Memory_Alloc_Array_Unnamed( int, numSites );
		memset( self->initialOrder, 0, sizeof( int )*numSites );
		
		Delaunay_FindMinMax( self->sites, self->numSites, &_minX, &_minY, &_maxX, &_maxY );
		Delaunay_SortSites( self->sites, self->numSites );

		for( i=0; i<numSites; i++ ){
			self->initialOrder[i] = self->sites[i].id;
		}
		
		stride = (_maxX - _minX)/((float)numProcs);

		start = _minX;
		for( i=0; i<numProcs; i++ ){
			for( j=0; j<numSites; j++ ){
				if( ((*(self->sites[j].coord))[0] >= start-epsilon) &&
						((*(self->sites[j].coord))[0] <= (start+stride+epsilon)) &&
						(!alloced[j]) ){
					
					alloced[j] = 1;
					self->processorLoad[i]++;
					self->processor[j] = i;
				}
			}
			start+=stride;
		}
		
		/*for( i=0; i<numProcs; i++ ){
			printf( "processorLoad[%d] = %d\n", i, self->processorLoad[i] );
		}*/
		
		for( i=MASTER_PROC+1; i<numProcs; i++ ){
			MPI_Send( &(self->processorLoad[i]), 1, MPI_INT, i, LOAD_TAG, *self->comm );
		}

		self->numLocalSites = self->processorLoad[MASTER_PROC];
		self->localPoints = Memory_Alloc_Array_Unnamed( CoordF, self->numLocalSites );
		
		count = 0;
		for( i=0; i<numSites; i++ ){
			if( self->processor[i] == MASTER_PROC ){
				memcpy( &(self->localPoints[count++]), self->sites[i].coord, sizeof(CoordF) );
			}
			else{
				MPI_Send( self->sites[i].coord, sizeof( CoordF ), MPI_BYTE, self->processor[i], DATA_TAG, *self->comm );
			}			
		}

		Memory_Free( alloced );
	}