Ejemplo n.º 1
0
void SROpGenerator_GenLevelEqNums( SROpGenerator* self, unsigned level ) {
	Mesh*			cMesh;
	unsigned*		nNodalDofs;
	unsigned		nDomainNodes, nLocalNodes;
	DofLayout*		dofLayout;
	unsigned**		dstArray;
	unsigned		curEqNum;
	unsigned		maxDofs;
	unsigned		topNode;
	unsigned		base, subTotal;
	MPI_Comm		comm;
	unsigned		nProcs, rank;
	MPI_Status		status;
	unsigned*		tuples;
	Sync*			sync;
	unsigned		n_i, dof_i;

	assert( self && Stg_CheckType( self, SROpGenerator ) );
	assert( self->meshes && self->topMaps && self->eqNums );
	assert( level < self->nLevels );

	cMesh = self->meshes[level];
	nDomainNodes = Mesh_GetDomainSize( cMesh, MT_VERTEX );
	nLocalNodes = Mesh_GetLocalSize( cMesh, MT_VERTEX );
	dofLayout = self->fineEqNum->dofLayout;
	comm = Comm_GetMPIComm( Mesh_GetCommTopology( cMesh, MT_VERTEX ) );
	MPI_Comm_size( comm, (int*)&nProcs );
	MPI_Comm_rank( comm, (int*)&rank );

	/* Allocate for destination array. */
	nNodalDofs = AllocArray( unsigned, nDomainNodes );
	for( n_i = 0; n_i < nDomainNodes; n_i++ )
		nNodalDofs[n_i] = dofLayout->dofCounts[self->topMaps[level][n_i]];
	dstArray = AllocComplex2D( unsigned, nDomainNodes, nNodalDofs );

	/* Build initial destination array and store max dofs. */
	curEqNum = 0;
	maxDofs = 0;
	for( n_i = 0; n_i < nLocalNodes; n_i++ ) {
		if( nNodalDofs[n_i] > maxDofs )
			maxDofs = nNodalDofs[n_i];

		topNode = self->topMaps[level][n_i];
		for( dof_i = 0; dof_i < nNodalDofs[n_i]; dof_i++ ) {
			if( self->fineEqNum->mapNodeDof2Eq[topNode][dof_i] != (unsigned)-1 )
				dstArray[n_i][dof_i] = curEqNum++;
			else
				dstArray[n_i][dof_i] = (unsigned)-1;
		}
	}

	/* Order the equation numbers based on processor rank; cascade counts forward. */
	base = 0;
	subTotal = curEqNum;
	if( rank > 0 ) {
		insist( MPI_Recv( &base, 1, MPI_UNSIGNED, rank - 1, 6669, comm, &status ), == MPI_SUCCESS );
		subTotal = base + curEqNum;
	}
void ElementCellLayout_BuildShadowInfo( ElementCellLayout* self ) {
	unsigned	nDims;
	Comm*		comm;
	int	nIncProcs;
	int*	incProcs;
	unsigned	n_i;

	nDims = Mesh_GetDimSize( self->mesh );
	comm = Mesh_GetCommTopology( self->mesh, nDims );
	Comm_GetNeighbours( comm, &nIncProcs, &incProcs );

	/* Extract neighbouring proc information. */
	self->cellShadowInfo.procNbrInfo = Memory_Alloc_Unnamed( ProcNbrInfo );
	self->cellShadowInfo.procNbrInfo->procNbrCnt = nIncProcs;
	self->cellShadowInfo.procNbrInfo->procNbrTbl = AllocArray( unsigned, nIncProcs );
	memcpy( self->cellShadowInfo.procNbrInfo->procNbrTbl, incProcs, nIncProcs * sizeof(unsigned) );

	/* Count shadow info. */
	if( nIncProcs ) {
		self->cellShadowInfo.procShadowedCnt = AllocArray( unsigned, nIncProcs );
		memset( self->cellShadowInfo.procShadowedCnt, 0, nIncProcs * sizeof(unsigned) );
		self->cellShadowInfo.procShadowCnt = AllocArray( unsigned, nIncProcs );
		memset( self->cellShadowInfo.procShadowCnt, 0, nIncProcs * sizeof(unsigned) );
	}
	for( n_i = 0; n_i < Mesh_GetSharedSize( self->mesh, nDims ); n_i++ ) {
		unsigned	nSharers;
		unsigned*	sharers;
		unsigned	s_i;

		Mesh_GetSharers( self->mesh, nDims, n_i, 
				 &nSharers, &sharers );
		for( s_i = 0; s_i < nSharers; s_i++ )
			self->cellShadowInfo.procShadowedCnt[sharers[s_i]]++;
	}
	for( n_i = 0; n_i < Mesh_GetRemoteSize( self->mesh, nDims ); n_i++ ) {
		unsigned	owner;

		owner = Mesh_GetOwner( self->mesh, nDims, n_i );
		self->cellShadowInfo.procShadowCnt[owner]++;
	}

	/* Build shadow info indices. */
	if( nIncProcs ) {
		self->cellShadowInfo.procShadowedTbl = Memory_Alloc_2DComplex_Unnamed( unsigned, nIncProcs, 
										       self->cellShadowInfo.procShadowedCnt );
		self->cellShadowInfo.procShadowTbl = Memory_Alloc_2DComplex_Unnamed( unsigned, nIncProcs, 
										     self->cellShadowInfo.procShadowCnt );
		memset( self->cellShadowInfo.procShadowedCnt, 0, nIncProcs * sizeof(unsigned) );
		memset( self->cellShadowInfo.procShadowCnt, 0, nIncProcs * sizeof(unsigned) );
	}
	for( n_i = 0; n_i < Mesh_GetSharedSize( self->mesh, nDims ); n_i++ ) {
		unsigned	local;
		unsigned	curInd;
		unsigned	nSharers;
		unsigned*	sharers;
		unsigned	s_i;

		local = Mesh_SharedToLocal( self->mesh, nDims, n_i );

		Mesh_GetSharers( self->mesh, nDims, n_i, 
				 &nSharers, &sharers );
		for( s_i = 0; s_i < nSharers; s_i++ ) {
			curInd = self->cellShadowInfo.procShadowedCnt[sharers[s_i]]++;
			self->cellShadowInfo.procShadowedTbl[sharers[s_i]][curInd] = local;
		}
	}
	for( n_i = 0; n_i < Mesh_GetRemoteSize( self->mesh, nDims ); n_i++ ) {
		unsigned	domain;
		unsigned	curInd;
		unsigned	owner;

		domain = Mesh_GetLocalSize( self->mesh, nDims ) + n_i;
		owner = Mesh_GetOwner( self->mesh, nDims, n_i );
		curInd = self->cellShadowInfo.procShadowCnt[owner]++;
		self->cellShadowInfo.procShadowTbl[owner][curInd] = domain;
	}
}
Ejemplo n.º 3
0
void _SolutionVector_ShareValuesNotStoredLocally(
	SolutionVector*		self,
	Index*			reqFromOthersCounts,
	RequestInfo**		reqFromOthersInfos,
	Dof_EquationNumber**	reqFromOthers,
	double*			localSolnVecValues )
{

	FeVariable*		feVar = self->feVariable;
	FeMesh*			feMesh = feVar->feMesh;
	FeEquationNumber*	eqNum = self->eqNum;
	Comm*			comm;
	MPI_Comm		mpiComm;
	Partition_Index		nProc;
	Partition_Index		myRank;
	Partition_Index		proc_I;
	Index			req_I;
	Index			indexIntoLocalSolnVecValues;
	MPI_Status		status;
	Index*			reqFromMeCounts;
	Dof_EquationNumber**	reqFromMe;
	double**		reqValuesFromMe;
	MPI_Request**		reqValuesFromMeHandles;
	MPI_Request**		reqFromOthersHandles;
	double**		reqValuesFromOthers;
	MPI_Request**		reqValuesFromOthersHandles;
	Bool*			reqValuesFromOthersReceived;
	Partition_Index	     reqValueSetsFromOthersNotYetReceivedCount;
	Dof_EquationNumber   totalRequestedFromOthers = 0;
	Dof_EquationNumber   totalRequestedFromMe = 0;
   int ierr;

	Journal_DPrintf( self->debug, "In %s - for \"%s\"\n", __func__, self->name );
	Stream_IndentBranch( StgFEM_Debug );

	comm = Mesh_GetCommTopology( feMesh, MT_VERTEX );
	mpiComm = Comm_GetMPIComm( comm );
	MPI_Comm_size( mpiComm, (int*)&nProc );
	MPI_Comm_rank( mpiComm, (int*)&myRank );

	reqFromMeCounts = Memory_Alloc_Array( Index, nProc, "reqFromMeCounts" );
	reqFromOthersHandles = Memory_Alloc_Array_Unnamed( MPI_Request*, nProc );
	reqValuesFromOthersHandles = Memory_Alloc_Array_Unnamed( MPI_Request*, nProc );
	reqValuesFromMeHandles = Memory_Alloc_Array_Unnamed( MPI_Request*, nProc );
	reqValuesFromOthers = Memory_Alloc_2DComplex( double, nProc, reqFromOthersCounts, "reqValuesFromOthers" );
	reqValuesFromOthersReceived = Memory_Alloc_Array_Unnamed( Bool, nProc );

	#if DEBUG
	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
		Journal_DPrintf( self->debug, "Final list of vec values I need from other procs:\n" );
		for ( proc_I=0; proc_I < nProc; proc_I++ ) {
			if ( proc_I == myRank ) continue;
			Journal_DPrintf( self->debug, "\t%d[0-%d]: ", proc_I, reqFromOthersCounts[proc_I] );
			for ( req_I=0; req_I < reqFromOthersCounts[proc_I]; req_I++ ) {
				RequestInfo* reqInfo = &reqFromOthersInfos[proc_I][req_I];
				Journal_DPrintf( self->debug, "(lnode %d, dof %d -> %d ), ",
					reqInfo->lNode_I, reqInfo->nodeLocalDof_I,
					reqFromOthers[proc_I][req_I] );
			}
			Journal_DPrintf( self->debug, "\n" );
		}
	}
	#endif

	/* send out my request counts, receive the req. counts others want from me */
	MPI_Alltoall( reqFromOthersCounts, 1, MPI_UNSIGNED,
		      reqFromMeCounts, 1, MPI_UNSIGNED, mpiComm );

	Journal_DPrintf( self->debug, "After MPI_Alltoall- counts are:\n" );
	totalRequestedFromOthers = 0;
	totalRequestedFromMe = 0;
	Stream_Indent( self->debug );
	Journal_DPrintf( self->debug, "reqFromOthersCounts: " );
	for ( proc_I=0; proc_I < nProc; proc_I++ ) {
		if ( proc_I == myRank ) continue;
		Journal_DPrintf( self->debug, "\tp%d:%d, ", proc_I, reqFromOthersCounts[proc_I] );
		totalRequestedFromOthers += reqFromOthersCounts[proc_I];
	}
	Journal_DPrintf( self->debug, "\n" );
	Journal_DPrintf( self->debug, "reqFromMeCounts: " );
	for ( proc_I=0; proc_I < nProc; proc_I++ ) {
		if ( proc_I == myRank ) continue;
		Journal_DPrintf( self->debug, "\tp%d:%d, ", proc_I, reqFromMeCounts[proc_I] );
		totalRequestedFromMe += reqFromMeCounts[proc_I];
	}
	Journal_DPrintf( self->debug, "\n" );
	Stream_UnIndent( self->debug );

	if ( ( totalRequestedFromOthers == 0) && (totalRequestedFromMe == 0) )
	{
		Journal_DPrintf( self->debug, "No vector values either required from others or "
			"required by others from me, therefore cleaning up memory and returning.\n" );
		Memory_Free( reqFromMeCounts );
		Memory_Free( reqFromOthersHandles );
		Memory_Free( reqValuesFromOthersHandles );
		Memory_Free( reqValuesFromMeHandles );
		Memory_Free( reqValuesFromOthers );
		Memory_Free( reqValuesFromOthersReceived );
		Stream_UnIndentBranch( StgFEM_Debug );
		return;
	}

	Journal_DPrintfL( self->debug, 2, "Starting non-blocking sends of my lists of vector entry indices I want from others:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
/* Journal_Printf( Journal_Register( Info_Type, (Name)"mpi"  ),  "!!! line %d, proc_I %d: count = %u\n", __LINE__, proc_I, reqFromOthersCounts[proc_I] ); */
		if ( reqFromOthersCounts[proc_I] > 0 ) {
			Journal_DPrintfL( self->debug, 2, "Sending to proc %d the list of %d vector entry indices I want from it:\n"
				"\t(tracking via reqFromOthersHandles[%d], tag %d)\n", proc_I,
				reqFromOthersCounts[proc_I], proc_I, VALUE_REQUEST_TAG );

			reqFromOthersHandles[proc_I] = Memory_Alloc_Unnamed( MPI_Request );
			ierr=MPI_Isend( reqFromOthers[proc_I], reqFromOthersCounts[proc_I], MPI_UNSIGNED,
				proc_I, VALUE_REQUEST_TAG, mpiComm, reqFromOthersHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );


	Journal_DPrintfL( self->debug, 2, "Starting non-blocking receive of the vector entries I want from others:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromOthersCounts[proc_I] > 0 ) {
			Journal_DPrintfL( self->debug, 2, "Posting recv reqst from proc %d for the %d vector entries I want from it:\n"
				"\t(tracking via reqValuesFromOthersHandles[%d], tag %d)\n", proc_I,
				reqFromOthersCounts[proc_I], proc_I, VALUE_TAG );
			reqValuesFromOthersHandles[proc_I] = Memory_Alloc_Unnamed( MPI_Request );
			ierr=MPI_Irecv( reqValuesFromOthers[proc_I], reqFromOthersCounts[proc_I], MPI_DOUBLE,
				proc_I, VALUE_TAG, mpiComm, reqValuesFromOthersHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );

	Journal_DPrintfL( self->debug, 2, "Starting blocking receive of the lists of vector entry indices "
		"others want from me:\n" );
	Stream_Indent( self->debug );
	reqFromMe = Memory_Alloc_2DComplex( Dof_EquationNumber, nProc, reqFromMeCounts, "reqFromMe" );
	reqValuesFromMe = Memory_Alloc_2DComplex( double, nProc, reqFromMeCounts, "reqValuesFromMe" );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
/* /Journal_Printf( Journal_Register( Info_Type, (Name)"mpi"  ),  "!!! line %d, proc_I %d: count = %u\n", __LINE__, proc_I, reqFromMeCounts[proc_I] ); */
		if ( reqFromMeCounts[proc_I] > 0 ) {
			ierr=MPI_Recv( reqFromMe[proc_I], reqFromMeCounts[proc_I], MPI_UNSIGNED,
				proc_I, VALUE_REQUEST_TAG, mpiComm, &status );
			Journal_DPrintfL( self->debug, 3, "Received a list of %u requested vector entry indices from proc %u, "
				"with tag %d\n", reqFromMeCounts[proc_I], proc_I, status.MPI_TAG );
		}
	}
	Stream_UnIndent( self->debug );

	#if DEBUG
	if ( Stream_IsPrintableLevel( self->debug, 2 ) ) {
		Journal_DPrintf( self->debug, "Final lists of vector entry indices other procs want from me are:\n" );
		Stream_Indent( self->debug );
		for ( proc_I=0; proc_I < nProc; proc_I++ ) {
			if ( proc_I == myRank ) continue;
			if ( reqFromMeCounts[proc_I] > 0 ) {
				Journal_DPrintf( self->debug, "%d[0-%d]: ", proc_I, reqFromMeCounts[proc_I] );
				for ( req_I=0; req_I < reqFromMeCounts[proc_I]; req_I++ ) {
					Journal_DPrintf( self->debug, "(eqNum %d), ", reqFromMe[proc_I][req_I] );
				}
				Journal_DPrintf( self->debug, "\n" );
			}
		}
		Stream_UnIndent( self->debug );
	}
	#endif

	/* for all those requested from me, non-blocking send out values */
	Journal_DPrintfL( self->debug, 2, "Beginning non-blocking send out of vector entry lists requested by others:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromMeCounts[proc_I] > 0 ) {
			Journal_DPrintfL( self->debug, 3, "list to proc %d is: ", proc_I );
			for ( req_I=0; req_I < reqFromMeCounts[proc_I]; req_I++ ) {
				/* look up and fill in correct value in array */
				indexIntoLocalSolnVecValues = *(int*)STreeMap_Map( eqNum->ownedMap,
										   reqFromMe[proc_I] + req_I );
				reqValuesFromMe[proc_I][req_I] = localSolnVecValues[indexIntoLocalSolnVecValues];
				Journal_DPrintfL( self->debug, 3, "%d=%f, ", reqFromMe[proc_I][req_I],
					reqValuesFromMe[proc_I][req_I] );
			}
			Journal_DPrintfL( self->debug, 3, "\n" );
			/* Non-blocking send out the now-complete list to this processor */
			reqValuesFromMeHandles[proc_I] = Memory_Alloc_Unnamed( MPI_Request );
			Journal_DPrintfL( self->debug, 2, "Sending to proc %d the list of %d vector entries they want:\n"
				"\t(tracking via reqValuesFromMe[%d], tag %d)\n", proc_I,
				reqFromMeCounts[proc_I], proc_I, VALUE_TAG );
			ierr=MPI_Isend( reqValuesFromMe[proc_I], reqFromMeCounts[proc_I], MPI_DOUBLE,
				proc_I, VALUE_TAG, mpiComm, reqValuesFromMeHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );

	Journal_DPrintfL( self->debug, 1, "Starting iterative-test receive of the vector entries I "
		"requested from others:\n" );
	/* Set up an array for keeping track of who we've received things from
	 * already */
	reqValueSetsFromOthersNotYetReceivedCount = nProc-1;
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		reqValuesFromOthersReceived[proc_I] = False;
		if ( reqFromOthersCounts[proc_I] == 0 ) {
			reqValueSetsFromOthersNotYetReceivedCount--;
		}
	}

	#if DEBUG
	Journal_DPrintfL( self->debug, 2, "(Expecting %d receives from procs: ",
		reqValueSetsFromOthersNotYetReceivedCount );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromOthersCounts[proc_I] > 0 ) {
			Journal_DPrintfL( self->debug, 2, "%d, ", proc_I );
		}
	}
	Journal_DPrintfL( self->debug, 2, ")\n" );
	#endif

	Stream_Indent( self->debug );
	/* now update the values at nodes that I requested from others, as they come in */
	while ( reqValueSetsFromOthersNotYetReceivedCount ) {
		int flag = 0;

		Journal_DPrintfL( self->debug, 3, "%d sets still to go...\n", reqValueSetsFromOthersNotYetReceivedCount );
		for( proc_I=0; proc_I < nProc; proc_I++) {
			if ( proc_I == myRank ) continue;

			if ( (reqFromOthersCounts[proc_I] > 0) && (False == reqValuesFromOthersReceived[proc_I]) ) {
				MPI_Test( reqValuesFromOthersHandles[proc_I], &flag, &status );
				if ( !flag ) {
					/* No results yet from this proc -> continue to next. */
					continue;
				}
				else {
					RequestInfo* reqInfo;
					Journal_DPrintfL( self->debug, 2, "received some requested "
						"values (using reqValuesFromOthersHandles) from proc %d "
						"(with tag %d, exp %d):", proc_I, status.MPI_TAG, VALUE_TAG );
					/* go through each value received from that proc & update onto node */
					for ( req_I=0; req_I < reqFromOthersCounts[proc_I]; req_I++ ) {
						reqInfo = &reqFromOthersInfos[proc_I][req_I];
						Journal_DPrintfL( self->debug, 3, "(lnode %d, dof %d -> %d )=%f, ",
							reqInfo->lNode_I, reqInfo->nodeLocalDof_I,
							reqFromOthers[proc_I][req_I], reqValuesFromOthers[proc_I][req_I] );
						DofLayout_SetValueDouble( feVar->dofLayout, reqInfo->lNode_I, reqInfo->nodeLocalDof_I,
							reqValuesFromOthers[proc_I][req_I] );
					}
					Journal_DPrintfL( self->debug, 2, "\n" );
					reqValuesFromOthersReceived[proc_I] = True;
					reqValueSetsFromOthersNotYetReceivedCount--;
					Memory_Free( reqValuesFromOthersHandles[proc_I] );
				}
			}
		}
	}
	Stream_UnIndent( self->debug );

	/* MPI_Wait to be sure all sends to others have completed */
	Journal_DPrintfL( self->debug, 2, "Making sure all comms of this function finished:...\n" );
	Stream_Indent( self->debug );

	Journal_DPrintfL( self->debug, 2, "Confirming completion of my sends of "
		"vector entry index lists I wanted from others were received:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromOthersCounts[proc_I] > 0 ) {
			ierr=MPI_Wait( reqFromOthersHandles[proc_I], MPI_STATUS_IGNORE );
			Journal_DPrintfL( self->debug, 2, "Confirmed wait on reqFromOthersHandles[%u]"
				"\n", proc_I );
			Memory_Free( reqFromOthersHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );
	Journal_DPrintfL( self->debug, 2, "done.\n" );

	Journal_DPrintfL( self->debug, 2, "Confirming completion of my sends of "
		"vector entry values requested by others were received:\n" );
	Stream_Indent( self->debug );
	for( proc_I=0; proc_I < nProc; proc_I++) {
		if ( proc_I == myRank ) continue;
		if ( reqFromMeCounts[proc_I] > 0 ) {
			ierr=MPI_Wait( reqValuesFromMeHandles[proc_I], MPI_STATUS_IGNORE );
			Journal_DPrintfL( self->debug, 2, "Confirmed wait on reqValuesFromMeHandles[%u]"
				"\n", proc_I );
			Memory_Free( reqValuesFromMeHandles[proc_I] );
		}
	}
	Stream_UnIndent( self->debug );
	Journal_DPrintfL( self->debug, 2, "done.\n" );

	Stream_UnIndent( self->debug );
	Journal_DPrintfL( self->debug, 2, "done.\n" );

	Memory_Free( reqFromMeCounts );
	Memory_Free( reqFromMe );
	Memory_Free( reqValuesFromMe );
	Memory_Free( reqValuesFromOthers );
	Memory_Free( reqValuesFromOthersReceived );
	Memory_Free( reqFromOthersHandles );
	Memory_Free( reqValuesFromOthersHandles );
	Memory_Free( reqValuesFromMeHandles );

	Stream_UnIndentBranch( StgFEM_Debug );
	return;
}
Ejemplo n.º 4
0
void SolutionVector_UpdateSolutionOntoNodes( void* solutionVector ) {
	SolutionVector*		self = (SolutionVector *)solutionVector;
	double*			localSolnVecValues;
	Node_LocalIndex 	lNode_I = 0;
	Dof_Index		currNodeNumDofs;
	Dof_Index		nodeLocalDof_I;
	Partition_Index		ownerProc;
	FeVariable*		feVar = self->feVariable;
	FeMesh*			feMesh = feVar->feMesh;
	MPI_Comm		mpiComm;
	FeEquationNumber*	eqNum = self->eqNum;
	Dof_EquationNumber	currEqNum;
	Index			indexIntoLocalSolnVecValues;
	Index*			reqFromOthersCounts;
	Index*			reqFromOthersSizes;
	RequestInfo**		reqFromOthersInfos;
	Dof_EquationNumber**	reqFromOthers;
	Comm*			comm;
	Partition_Index		nProc;
	Partition_Index		myRank;
	Partition_Index		proc_I;
	double			initialGuessAtNonLocalEqNumsRatio = 0.1;
	double			ratioToIncreaseRequestArraySize = 1.5;
	Index			newReqFromOthersSize;

	Journal_DPrintf( self->debug, "In %s - for \"%s\"\n", __func__, self->name );
	Stream_IndentBranch( StgFEM_Debug );

	#if DEBUG
	if ( Stream_IsPrintableLevel( self->debug, 3 ) ) {
		Journal_DPrintf( self->debug, "Vector data:\n" );
		_SolutionVector_VectorView( self->vector, self->debug );
	}
	#endif

	comm = Mesh_GetCommTopology( feMesh, MT_VERTEX );
	mpiComm = Comm_GetMPIComm( comm );
	MPI_Comm_size( mpiComm, (int*)&nProc );
	MPI_Comm_rank( mpiComm, (int*)&myRank );

	/* allocate arrays for nodes that I want on each processor */
	reqFromOthersCounts = Memory_Alloc_Array( Index, nProc, "reqFromOthersCounts" );
	reqFromOthersSizes = Memory_Alloc_Array( Index, nProc, "reqFromOthersSizes" );
	reqFromOthersInfos = Memory_Alloc_Array( RequestInfo*, nProc, "reqFromOthersInfos" );
	reqFromOthers = Memory_Alloc_Array( Dof_EquationNumber*, nProc, "reqFromOthers" );
	/* Allocate the arrays of req. values from others independently, as we don't know how large they'll be */
	for ( proc_I=0; proc_I < nProc; proc_I++ ) {
		reqFromOthersCounts[proc_I] = 0;

		if (proc_I == myRank) continue;

		/* Our initial guess at number of non-local eqNums is a small ratio of the number of local dofs */
		reqFromOthersSizes[proc_I] = eqNum->localEqNumsOwnedCount * initialGuessAtNonLocalEqNumsRatio;
		/* Special case for really small meshes: make sure it's at least 1 */
		if (0 == reqFromOthersSizes[proc_I] ) {
			reqFromOthersSizes[proc_I]++;
		}
		reqFromOthersInfos[proc_I] = Memory_Alloc_Array( RequestInfo, reqFromOthersSizes[proc_I],
			"reqFromOthersInfos[proc_I]" );
		reqFromOthers[proc_I] = Memory_Alloc_Array( Dof_EquationNumber, reqFromOthersSizes[proc_I],
			"reqFromOthers[proc_I]" );
	}

	/* Get the locally held part of the vector */
	//Vector_GetArray( self->vector, &localSolnVecValues );
	VecGetArray( self->vector, &localSolnVecValues );

	for( lNode_I=0; lNode_I < Mesh_GetLocalSize( feMesh, MT_VERTEX ); lNode_I++ ) {
		currNodeNumDofs = feVar->dofLayout->dofCounts[ lNode_I ];
		Journal_DPrintfL( self->debug, 3, "getting solutions for local node %d, has %d dofs.\n", lNode_I, currNodeNumDofs );

		/* process each dof */
		for ( nodeLocalDof_I = 0; nodeLocalDof_I < currNodeNumDofs; nodeLocalDof_I++ ) {
			Journal_DPrintfL( self->debug, 3, "\tdof %d: ", nodeLocalDof_I );

			currEqNum = eqNum->mapNodeDof2Eq[lNode_I][nodeLocalDof_I];
			if( currEqNum != -1 ) {
				Journal_DPrintfL( self->debug, 3, "is unconstrained, eqNum %d:", currEqNum );

				if( STreeMap_HasKey( eqNum->ownedMap, &currEqNum ) ) {
					indexIntoLocalSolnVecValues = *(int*)STreeMap_Map( eqNum->ownedMap, &currEqNum );
					Journal_DPrintfL( self->debug, 3, "local -> just copying value %f\n",
						localSolnVecValues[indexIntoLocalSolnVecValues] );
					DofLayout_SetValueDouble( feVar->dofLayout, lNode_I, nodeLocalDof_I,
						localSolnVecValues[indexIntoLocalSolnVecValues] );
				}
				else {
					RequestInfo*	requestInfo;

					Journal_DPrintfL( self->debug, 3, "nonlocal -> add to req list " );
					ownerProc = FeEquationNumber_CalculateOwningProcessorOfEqNum( eqNum, currEqNum );
					Journal_DPrintfL( self->debug, 3, "from proc %d\n", ownerProc );
					/* first check count & realloc if necessary */
					if (reqFromOthersCounts[ownerProc] == reqFromOthersSizes[ownerProc] ) {
						newReqFromOthersSize = reqFromOthersSizes[ownerProc] * ratioToIncreaseRequestArraySize;
						if ( newReqFromOthersSize == reqFromOthersSizes[ownerProc] ) {
							/* Special case: always increase by at least 1 */
							newReqFromOthersSize++;
						}
						reqFromOthersSizes[ownerProc] = newReqFromOthersSize;

						Journal_DPrintfL( self->debug, 3, "req list from proc %d count %d now "
							"equal to size, so reallocing to size %d\n",
							ownerProc, reqFromOthersCounts[ownerProc],
							reqFromOthersSizes[ownerProc] );

						reqFromOthersInfos[ownerProc] = Memory_Realloc_Array(
							reqFromOthersInfos[ownerProc], RequestInfo, reqFromOthersSizes[ownerProc] );
						reqFromOthers[ownerProc] = Memory_Realloc_Array(
							reqFromOthers[ownerProc], Dof_EquationNumber, reqFromOthersSizes[ownerProc] );
					}
					requestInfo = &reqFromOthersInfos[ownerProc][ reqFromOthersCounts[ownerProc] ];
					requestInfo->lNode_I = lNode_I;
					requestInfo->nodeLocalDof_I = nodeLocalDof_I;
					reqFromOthers[ownerProc][reqFromOthersCounts[ownerProc]] = currEqNum;
					(reqFromOthersCounts[ownerProc])++;
				}
			}
			else {
				Journal_DPrintfL( self->debug, 3, "is a BC, so skipping...\n" );
			}
		}
	}

	if ( nProc > 1 ) {
		_SolutionVector_ShareValuesNotStoredLocally( self, reqFromOthersCounts, reqFromOthersInfos, reqFromOthers,
			localSolnVecValues );
	}

	for ( proc_I=0; proc_I < nProc; proc_I++ ) {
		if (proc_I == myRank) continue;
		Memory_Free( reqFromOthers[proc_I] );
		Memory_Free( reqFromOthersInfos[proc_I] );
	}
	Memory_Free( reqFromOthers );
	Memory_Free( reqFromOthersInfos );
	Memory_Free( reqFromOthersCounts );
	Memory_Free( reqFromOthersSizes );

	//Vector_RestoreArray( self->vector, &localSolnVecValues );
	VecRestoreArray( self->vector, &localSolnVecValues );

	/*
	** Syncronise the FEVariable in question.
	*/

	FeVariable_SyncShadowValues( feVar );

	Stream_UnIndentBranch( StgFEM_Debug );
}