void TEST_DAL_scattervReceive( Data *data, dal_size_t size, int root )
{
	int i, j;

	SPD_ASSERT( DAL_allocArray( data, size ), "not enough memory to allocate data" );
// 	SPD_ASSERT( DAL_allocData( data, size ), "not enough space to allocate data" );

	Data globalBuf;
	DAL_acquireGlobalBuffer( &globalBuf );

	//VALID ONLY FOR THIS TEST//
	globalBuf.array.size = GET_N();
	////////////////////////////

	int blockSize = DAL_dataSize(&globalBuf) / GET_N();

	//Retrieving the number of iterations
	dal_size_t max_count;
	MPI_Bcast( &max_count, 1, MPI_LONG_LONG, root, MPI_COMM_WORLD );
	int num_iterations = max_count / blockSize + (max_count % blockSize > 0);
	int recvCount, tmp;

	switch( data->medium ) {
		case File: {

			for ( i=0; i<num_iterations; i++ ) {
				tmp = MIN( blockSize, size-i*blockSize );
				recvCount = tmp > 0 ? tmp : 0;
				MPI_Scatterv( NULL, NULL, NULL, MPI_INT, globalBuf.array.data, recvCount, MPI_INT, root, MPI_COMM_WORLD );

				if ( recvCount )
					DAL_dataCopyOS( &globalBuf, 0, data, i*blockSize, recvCount );
			}
			break;
		}
		case Array: {
			int recvDispl = 0;

			for ( i=0; i<num_iterations; i++ ) {
				tmp = MIN( blockSize, size-i*blockSize );
				recvCount = tmp > 0 ? tmp : 0;
				MPI_Scatterv( NULL, NULL, NULL, MPI_INT, data->array.data+recvDispl, recvCount, MPI_INT, root, MPI_COMM_WORLD );
				recvDispl += recvCount;
			}
			break;
		}
		default:
			DAL_UNSUPPORTED( data );
	}

	DAL_releaseGlobalBuffer( &globalBuf );
}
Пример #2
0
int decode_SNM_ADV( n2n_SNM_ADV_t     *pkt,
                    const snm_hdr_t   *hdr,
                    const uint8_t     *base,
                    size_t *rem,
                    size_t *idx )
{
    int i, retval = 0;

    memset(pkt, 0, sizeof(n2n_SNM_ADV_t));

    retval += decode_sock(&pkt->sn, base, rem, idx);
    if (GET_N(hdr->flags))
    {
        retval += decode_uint16(&pkt->comm_num, base, rem, idx);

        if (alloc_communities(&pkt->comm_ptr, pkt->comm_num))
        {
            return -1;
        }

        for (i = 0; i < pkt->comm_num; i++)
        {
            retval += decode_SNM_comm(&pkt->comm_ptr[i], base, rem, idx);
        }
    }

    return retval;
}
Пример #3
0
int encode_SNM_INFO( uint8_t *base,
                     size_t  *idx,
                     const snm_hdr_t      *hdr,
                     const n2n_SNM_INFO_t *info )
{
    int i, retval = 0;
    retval += encode_SNM_hdr(base, idx, hdr);
    retval += encode_uint16(base, idx, info->sn_num);
    retval += encode_uint16(base, idx, info->comm_num);

    if (GET_S(hdr->flags) || GET_A(hdr->flags)) /* SNM / ADV adresses */
    {
        for (i = 0; i < info->sn_num; i++)
        {
            retval += encode_sock(base, idx, &info->sn_ptr[i]);
        }
    }
    if (GET_C(hdr->flags) || GET_N(hdr->flags))
    {
        for (i = 0; i < info->comm_num; i++)
        {
            retval += encode_SNM_comm(base, idx, &info->comm_ptr[i]);
        }
    }
    return retval;
}
//do_i_receive 	: return a number different from 0 if the calling process (node) has to RECEIVE data FROM another process (node) in the current step.
bool do_i_receive( const TestInfo *ti, int step )
{
	switch( ti->algoVar[0] ) {
		case 1: return ! do_i_send( ti, step ) && GET_ID(ti) < 2*ACTIVE_PROCS(ti,step);
	}
	return ! do_i_send( ti, step ) && ! ( GET_ID(ti) % ( GET_N(ti) / ACTIVE_PROCS(ti,step+1) ) ) ;
}
//do_i_send	: true if the calling process (node) has to SEND data TO another process (node) in the current step
bool do_i_send( const TestInfo *ti, int step )
{
	switch( ti->algoVar[0] ) {
		case 1: return GET_ID(ti) < ACTIVE_PROCS(ti,step);
	}
	return ! ( GET_ID(ti) % ( GET_N(ti) / ACTIVE_PROCS(ti,step) ) );
}
//to_who	: return the rank of the process (node) from which i SEND data in the current step
int to_who( const TestInfo *ti, int step )
{
	switch( ti->algoVar[0] ) {
		case 1: return GET_ID(ti) + ACTIVE_PROCS(ti,step);
	}
	return GET_ID(ti) + ( GET_N(ti) / ACTIVE_PROCS(ti,step+1) );
}
void gather( const TestInfo *ti, Data *data )
{
	// node 0
	if( ! GET_ID(ti) ) {
		/*
		int actualSize = *size;
		int i;
		// receiving sequentially from ohter nodes
		for( i = 1; i < GET_N(ti); ++ i ) {
			MPI_Status stat;
			_MPI_Recv( a+actualSize, GET_M(ti)-actualSize, MPI_INT, nth_token_owner(ti,i), 0, MPI_COMM_WORLD, &stat );
			_MPI_Get_count( &stat, MPI_INT, size );
			actualSize += *size;
		}
		*size = actualSize;
		*/

		int i;
		// receiving sequentially from ohter nodes
		for( i = 1; i < GET_N(ti); ++ i ) {
			DAL_receiveAU( data, nth_token_owner(ti,i) );
		}
	}
	// other nodes
	else {
		// _MPI_Send ( a, *size, MPI_INT, 0, 0, MPI_COMM_WORLD );
		// *size = 0;
		DAL_sendU( data, 0 );
		DAL_destroy( data );
	}
}
Пример #8
0
int encode_SNM_REQ( uint8_t *base,
                    size_t  *idx,
                    const snm_hdr_t     *hdr,
                    const n2n_SNM_REQ_t *req )
{
    int i, retval = 0;
    retval += encode_SNM_hdr(base, idx, hdr);
    if (GET_N(hdr->flags))
    {
        retval += encode_uint16(base, idx, req->comm_num);

        for (i = 0; i < req->comm_num; i++)
        {
            retval += encode_SNM_comm(base, idx, &req->comm_ptr[i]);
        }
    }
    return retval;
}
Пример #9
0
int encode_SNM_ADV( uint8_t *base,
                    size_t  *idx,
                    const snm_hdr_t     *hdr,
                    const n2n_SNM_ADV_t *adv )
{
    int i, retval = 0;
    retval += encode_SNM_hdr(base, idx, hdr);
    retval += encode_sock(base, idx, &adv->sn);
    if (GET_N(hdr->flags))
    {
        retval += encode_uint16(base, idx, adv->comm_num);

        for (i = 0; i < adv->comm_num; i++)
        {
            retval += encode_SNM_comm(base, idx, &adv->comm_ptr[i]);
        }
    }
    return retval;
}
Пример #10
0
int decode_SNM_INFO( n2n_SNM_INFO_t   *pkt,
                     const snm_hdr_t  *hdr,
                     const uint8_t    *base,
                     size_t * rem,
                     size_t * idx )
{
    int i, retval = 0;

    memset(pkt, 0, sizeof(n2n_SNM_INFO_t));

    retval += decode_uint16(&pkt->sn_num, base, rem, idx);
    retval += decode_uint16(&pkt->comm_num, base, rem, idx);

    if (GET_S(hdr->flags) || GET_A(hdr->flags)) /* SNM / ADV adresses */
    {
        if (alloc_supernodes(&pkt->sn_ptr, pkt->sn_num))
        {
            return -1;
        }
        for (i = 0; i < pkt->sn_num; i++)
        {
            retval += decode_sock(&pkt->sn_ptr[i], base, rem, idx);
        }
    }
    if (GET_C(hdr->flags) || GET_N(hdr->flags))
    {
        if (alloc_communities(&pkt->comm_ptr, pkt->comm_num))
        {
            free_supernodes(&pkt->sn_ptr);
            return -1;
        }

        for (i = 0; i < pkt->comm_num; i++)
        {
            retval += decode_SNM_comm(&pkt->comm_ptr[i], base, rem, idx);
        }
    }

    return retval;
}
Пример #11
0
// return the ID of the node owning the n-th data token. Useful during gathering
int nth_token_owner( const TestInfo *ti, int n )
{
	switch( ti->algoVar[0] ) {
		case 1:
		{
			int res = 0;
			int nodes = GET_N(ti);
			int steps = GET_STEP_COUNT(ti);
			int i;

			for( i = 0; i < steps; ++ i ) {
				if( n >= nodes/2 ) {
					res += ACTIVE_PROCS(ti,i);
					n -= nodes/2;
				}
				nodes /= 2;
			}

			return res;
		}
	}
	return n;
}
Пример #12
0
void log_SNM_hdr( const snm_hdr_t *hdr )
{
    traceEvent( TRACE_DEBUG, "HEADER type=%d S=%d C=%d N=%d A=%d E=%d Seq=%d", hdr->type,
                GET_S(hdr->flags), GET_C(hdr->flags), GET_N(hdr->flags), GET_A(hdr->flags), GET_E(hdr->flags),
                hdr->seq_num );
}
int main( int argc, char **argv )
{
	int i, n;
	const int root = 0;

	DAL_initialize( &argc, &argv );

	n = GET_N();
	if( n < 2 ) {
		TESTS_ERROR( 1, "Use this with at least 2 processes!" );
	}

	dal_size_t scounts[n];
	dal_size_t sdispls[n];
	int size = n*5;	//size of data d
	int s;

	//Initializing send counts randomly
	memset( scounts, 0, n*sizeof(dal_size_t) );
	for ( s=0; s<size; s++ )
		scounts[rand()%n]++;

	//Computing displacements
	for ( s=0, i=0; i<n; i++ ) {
		sdispls[i] = s;
		s += scounts[i];
	}

	//Data
	Data d;
	DAL_init( &d );

	if ( GET_ID() == root ) {
// 		SPD_ASSERT( DAL_allocArray( &d, size ), "error allocating data..." );
		SPD_ASSERT( DAL_allocData( &d, size ), "error allocating data..." );

		//tmp buffer to init data
		Data buffer;
		DAL_init( &buffer );
		SPD_ASSERT( DAL_allocArray( &buffer, size ), "error allocating buffer..." );
		for( i=0; i<size; i++ )
			buffer.array.data[i] = i;

		//Initializing data
		DAL_dataCopy( &buffer, &d );

		//Destroying tmp buffer
		DAL_destroy( &buffer );

	}

	if( GET_ID() == root )
		DAL_PRINT_DATA( &d, "This is what I had" );

	//Scatter communication
	TEST_DAL_scatterv( &d, scounts, sdispls, root );

	DAL_PRINT_DATA( &d, "This is what I got" );

	DAL_destroy( &d );

	DAL_finalize( );
	return 0;
}
void TEST_DAL_scattervSend( Data *data, dal_size_t *counts, dal_size_t *displs )
{
	int sc[GET_N()];
	int sd[GET_N()];
	int i, j;

	Data globalBuf;
	DAL_acquireGlobalBuffer( &globalBuf );

	DAL_ASSERT( globalBuf.array.size >= GET_N(), &globalBuf, "The global-buffer is too small for a scatter communication (its size is "DST", but there are %d processes)", globalBuf.array.size, GET_N() );

	//VALID ONLY FOR THIS TEST//
	globalBuf.array.size = GET_N();
	////////////////////////////

	int blockSize = DAL_dataSize(&globalBuf) / GET_N();

	//Retrieving the number of iterations
	dal_size_t max_count = 0;
	for ( i=0; i<GET_N(); i++ )
		if ( counts[i] > max_count )
			max_count = counts[i];
	MPI_Bcast( &max_count, 1, MPI_LONG_LONG, GET_ID(), MPI_COMM_WORLD );
	int num_iterations = max_count / blockSize + (max_count % blockSize > 0);
	int s, tmp;

	switch( data->medium ) {
		case File: {

			for ( i=0; i<num_iterations; i++ ) {

				for ( s=0, j=0; j<GET_N(); j++ ) {
					tmp = MIN( blockSize, (counts[j]-i*blockSize) );
					sc[j] =	tmp > 0 ? tmp : 0;	//Number of elements to be sent to process j by MPI_Alltoallv
					sd[j] = s;
					s += sc[j];

					if( sc[j] )
						DAL_dataCopyOS( data, displs[j] + i*blockSize, &globalBuf, sd[j], sc[j] );
				}
				MPI_Scatterv( globalBuf.array.data, sc, sd, MPI_INT, MPI_IN_PLACE, sc[GET_ID()], MPI_INT, GET_ID(), MPI_COMM_WORLD );
			}

			//TODO: resize root data (maybe a DAL_reallocData function would be useful)
			data->file.size = counts[GET_ID()];
			break;
		}
		case Array: {

			for ( i=0; i<num_iterations; i++ ) {

				for ( j=0; j<GET_N(); j++ ) {
					tmp = MIN( blockSize, (counts[j]-i*blockSize) );
					sc[j] =	tmp > 0 ? tmp : 0;	//Number of elements to be sent to process j by MPI_Alltoallv
					sd[j] = displs[j] + i*blockSize;
				}
				MPI_Scatterv( data->array.data, sc, sd, MPI_INT, MPI_IN_PLACE, sc[GET_ID()], MPI_INT, GET_ID(), MPI_COMM_WORLD );
			}
			SPD_ASSERT( DAL_reallocArray( data, counts[GET_ID()] ), "not enough memory to allocate data" );
			break;
		}
		default:
			DAL_UNSUPPORTED( data );
	}

	DAL_releaseGlobalBuffer( &globalBuf );
}
int main( int argc, char **argv )
{
// 	int i, n;
// 	Data d;
// 	DAL_init( &d );
//
// 	DAL_initialize( &argc, &argv );
//
// 	n = GET_N();
// 	if( n < 2 ) {
// 		TESTS_ERROR( 1, "Use this with at least 2 processes!" );
// 	}
//
// 	if( GET_ID() == 0 ) {
// 		Data tmp;
// 		DAL_init( &tmp );
// 		SPD_ASSERT( DAL_allocArray( &tmp, 1 ), "error allocating array..." );
// 		tmp.array.data[0] = 666;
//
// 		SPD_ASSERT( DAL_allocData( &d, 1 ), "error allocating data..." );
//
// 		DAL_dataCopy( &tmp, &d );
//
// 		DAL_destroy( &tmp );
//
// 		DAL_send( &d, GET_ID()+1 );
// 	}
// 	else if( GET_ID() < GET_N()-1 ) {
// 		DAL_receive( &d, 1, GET_ID()-1 );
// 		DAL_send( &d, GET_ID()+1 );
// 	}
// 	else /* GET_ID() == GET_N()-1 */ {
// 		DAL_receive( &d, 1, GET_ID()-1 );
// 	}
//
// 	DAL_PRINT_DATA( &d, "This is what I got" );
//
// 	DAL_destroy( &d );
//
// 	DAL_finalize( );

	int i, n;

	DAL_initialize( &argc, &argv );

	n = GET_N();
	if( n < 2 ) {
		TESTS_ERROR( 1, "Use this with at least 2 processes!" );
	}

	int count = 10;

	Data sendData;
	DAL_init( &sendData );

	Data recvData;
	DAL_init( &recvData );

	SPD_ASSERT( DAL_allocData( &sendData, count ), "error allocating data..." );

	Data tmp;
	DAL_init( &tmp );
	SPD_ASSERT( DAL_allocArray( &tmp, count ), "error allocating array..." );

	for ( i=0; i<count; i++ )
		tmp.array.data[i] = (GET_ID()+1)*i;

	DAL_dataCopy( &tmp, &sendData );

	DAL_PRINT_DATA( &sendData, "This is what I had" );

	TEST_DAL_sendrecv( &sendData, count/2, 0, &recvData, count/2, 0, (GET_ID() + 1)%n );
	TEST_DAL_sendrecv( &sendData, count/2, count/2, &recvData, count/2, count/2, (GET_ID() + 1)%n );

	DAL_PRINT_DATA( &recvData, "This is what I got" );

	DAL_destroy( &sendData );
	DAL_destroy( &recvData );

	DAL_finalize( );

	return 0;
}