コード例 #1
0
int app_main()
{
  int rc;
  int pool_id;
//  int i;
//  myprintf("app main start\n");
    
  
  //do some init work
  rc = init_system();
  if(rc)
  {
    myprintf("system init failed, progame is exiting... \n");
    return rc;
  }

  
#ifdef __TEST__
  init_test_data();
  trigger_test(pool_id);
#endif

  //MAIN PROCESS
  while(1)
  {
    //poll S frame
    pool_id = S_select_recv();
    if((pool_id == 0) || (pool_id == 1))
    {
      s_decode(pool_id);
      if(g_s_sample_full[pool_id])
	  {
        release_fpga_buffer(S_MODE, pool_id);
        g_s_sample_full[pool_id]=0;
		gSReleaseCnt[pool_id]++;
      }
    }
     
    //poll C frame
    pool_id = C_select_recv();
	  if((pool_id == 0) || (pool_id == 1))
    {  
      c_decode(pool_id);
      if(g_c_sample_full[pool_id])
	  {
        release_fpga_buffer(C_MODE, pool_id);
    	g_c_sample_full[pool_id]=0;
		gCReleaseCnt[pool_id]++;
	  }
	} 
  }
}
コード例 #2
0
/*
 * Run some tests
 */
int main() {
  uint32_t i;

  init_ptr_heap(&heap, 0, (ptr_heap_cmp_fun_t) compare);
  for (i=0; i<10; i++) {
    printf("\n=== Test %"PRIu32" ===\n", i);
    init_test_data(i);
    run_test(i);
    assert(ptr_heap_is_empty(&heap));
  }

  for (i=10; i<20; i++) {
    printf("\n=== Test %"PRIu32" ===\n", i);
    init_test_data(20);
    run_test(20);
    assert(ptr_heap_is_empty(&heap));
  }


  for (i=20; i<30; i++) {
    printf("\n=== Test %"PRIu32" ===\n", i);
    init_test_data_constant(6);
    run_test(6);
    assert(ptr_heap_is_empty(&heap));
  }


  for (i=30; i<50; i++) {
    printf("\n=== Test %"PRIu32" ===\n", i);
    init_test_data(200);
    run_test(200);
    assert(ptr_heap_is_empty(&heap));
  }


  delete_ptr_heap(&heap);

  return 0;
}
コード例 #3
0
void process_test_stream(void)

{

  int forever = TRUE;
  char *callsign;
  double lat, lon, alt;

  date_time_t wall_time;
  time_t last_store = 0;

  init_test_data();

  while (forever) {
    
    PMU_auto_register("Got data");
    
    get_test_data(&callsign, &lat, &lon, &alt);
    
    /*
     * substitute wall clock time for gps time
     */
    
    wall_time.unix_time = time(NULL);
    uconvert_from_utime(&wall_time);

    if((wall_time.unix_time - last_store) >= Glob->params.data_interval) {

      store_line(&wall_time, lat, lon, alt, callsign,
		 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0);

      if (Glob->params.use_spdb) {
	if (Glob->params.single_database) {
	  store_single_spdb(&wall_time, lat, lon, alt, callsign);
	} else {
	  store_mult_spdb(&wall_time, lat, lon, alt, callsign);
	}
      } /* if (Glob->params.use_spdb) */
      
      last_store = wall_time.unix_time;

    } /* if((wall_time.unix_time - last_store) ... */
    
    sleep(1);
    
  } /* while (forever) */

  free_test_data();

}
コード例 #4
0
ファイル: rdma-client.c プロジェクト: kento/ibrdma
int main(int argc, char **argv)
{
  char* host;
  char port[128];
  char* data;
  uint64_t size;

  if (argc != 3)
    usage(argv[0]);

  host = argv[1];
  sprintf(port,"%d",TRANSFER_PORT);
  size = atoi(argv[2]);
  data = (char*)malloc(size);
  
  init_test_data(data, size);

  ibrdma_send(host, port, data, size);
  return 0;
}
コード例 #5
0
ファイル: rdma-client.c プロジェクト: kento/ibrdma
int main(int argc, char **argv)
{
  char* host;
  char* port;
  char* data;
  uint64_t size;

  if (argc != 3)
    usage(argv[0]);

  host = argv[1];
  port = "10150";
  size = atoi(argv[2]);
  data = (char*)malloc(size);

  init_test_data(data, size);
  
  ibrdma_send(host, port, data, size);
  return 0;
}
コード例 #6
0
void cmd_simple_config(int argc, char **argv){
#if CONFIG_INCLUDE_SIMPLE_CONFIG
	char *custom_pin_code = NULL;
	int ret = SC_ERROR;

	if(argc > 2){
		printf("\n\rInput Error!");
	}

	if(argc == 2)
		custom_pin_code = (argv[1]);

	wifi_enter_promisc_mode();
	if(init_test_data(custom_pin_code) == 0){
		filter_add_enable();
		ret = simple_config_test();
		print_simple_config_result(ret);
		remove_filter();
	}
#endif	
}
コード例 #7
0
ファイル: allpairc.c プロジェクト: Jiawen1991/GitHub
static void test_pair (void)
{
  int prev, next, count, tag, index, i, outcount, indices[2];
  int rank, size, flag, ierr, reqcount;
  double send_buf[TEST_SIZE], recv_buf[TEST_SIZE];
  double buffered_send_buf[TEST_SIZE * 2 + MPI_BSEND_OVERHEAD]; /* factor of two is based on guessing - only dynamic allocation would be safe */
  void *buffer;
  MPI_Status statuses[2];
  MPI_Status status;
  MPI_Request requests[2];
  MPI_Comm dupcom, intercom;
#ifdef V_T

  struct _VT_FuncFrameHandle {
      char *name;
      int func;
      int frame;
  };
  typedef struct _VT_FuncFrameHandle VT_FuncFrameHandle_t;

  VT_FuncFrameHandle_t normal_sends,
      buffered_sends,
      buffered_persistent_sends,
      ready_sends,
      sync_sends,
      nblock_sends,
      nblock_rsends,
      nblock_ssends,
      pers_sends,
      pers_rsends,
      pers_ssends,
      sendrecv,
      sendrecv_repl,
      intercomm;

  int classid;
  VT_classdef( "Application:test_pair", &classid );


#define VT_REGION_DEF( _name, _nameframe, _class ) \
        (_nameframe).name=_name; \
        VT_funcdef( (_nameframe).name, _class, &((_nameframe).func) );
#define VT_BEGIN_REGION( _nameframe ) \
        LOCDEF(); \
        VT_begin( (_nameframe).func )
#define VT_END_REGION( _nameframe ) \
        LOCDEF(); VT_end( (_nameframe).func )
#else
#define VT_REGION_DEF( _name, _nameframe, _class )
#define VT_BEGIN_REGION( _nameframe )
#define VT_END_REGION( _nameframe )

#endif




  ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  ierr = MPI_Comm_size(MPI_COMM_WORLD, &size);
  if ( size < 2 ) {
      if ( rank == 0 ) {
	  printf("Program needs to be run on at least 2 processes.\n");
      }
      ierr = MPI_Abort( MPI_COMM_WORLD, 66 );
  }
  ierr = MPI_Comm_dup(MPI_COMM_WORLD, &dupcom);

  if ( rank >= 2 ) {
      /*      printf( "%d Calling finalize.\n", rank ); */
      ierr = MPI_Finalize( );
      exit(0);
  }

  next = rank + 1;
  if (next >= 2)
    next = 0;

  prev = rank - 1;
  if (prev < 0)
    prev = 1;

  VT_REGION_DEF( "Normal_Sends", normal_sends, classid );
  VT_REGION_DEF( "Buffered_Sends", buffered_sends, classid );
  VT_REGION_DEF( "Buffered_Persistent_Sends", buffered_persistent_sends, classid );
  VT_REGION_DEF( "Ready_Sends", ready_sends, classid );
  VT_REGION_DEF( "Sync_Sends", sync_sends, classid );
  VT_REGION_DEF( "nblock_Sends", nblock_sends, classid );
  VT_REGION_DEF( "nblock_RSends", nblock_rsends, classid );
  VT_REGION_DEF( "nblock_SSends", nblock_ssends, classid );
  VT_REGION_DEF( "Pers_Sends", pers_sends, classid );
  VT_REGION_DEF( "Pers_RSends", pers_rsends, classid );
  VT_REGION_DEF( "Pers_SSends", pers_ssends, classid );
  VT_REGION_DEF( "SendRecv", sendrecv, classid );
  VT_REGION_DEF( "SendRevc_Repl", sendrecv_repl, classid );
  VT_REGION_DEF( "InterComm", intercomm, classid );



/*
 * Normal sends
 */

  VT_BEGIN_REGION( normal_sends );

  if (rank == 0)
    printf ("Send\n");

  tag = 0x100;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Send(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( normal_sends );


/*
 * Buffered sends
 */

  VT_BEGIN_REGION( buffered_sends );

  if (rank == 0)
    printf ("Buffered Send\n");

  tag = 138;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf));
    MPI_Bsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Buffer_detach(&buffer, &size);
    if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) {
        printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf));
        MPI_Abort(MPI_COMM_WORLD, 201);
    }
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( buffered_sends );


/*
 * Buffered sends
 */

  VT_BEGIN_REGION( buffered_persistent_sends );

  if (rank == 0)
    printf ("Buffered Persistent Send\n");

  tag = 238;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Buffer_attach(buffered_send_buf, sizeof(buffered_send_buf));
    MPI_Bsend_init(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, requests);
    MPI_Start(requests);
    MPI_Wait(requests, statuses);
    MPI_Request_free(requests);
    MPI_Buffer_detach(&buffer, &size);
    if(buffer != buffered_send_buf || size != sizeof(buffered_send_buf)) {
        printf ("[%d] Unexpected buffer returned by MPI_Buffer_detach(): %p/%d != %p/%d\n", rank, buffer, size, buffered_send_buf, (int)sizeof(buffered_send_buf));
        MPI_Abort(MPI_COMM_WORLD, 201);
    }
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check(recv_buf, prev, tag, count, &status, TEST_SIZE, "send and recv");
  }
  else {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,"send and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

  }

  VT_END_REGION( buffered_persistent_sends );


/*
 * Ready sends.  Note that we must insure that the receive is posted
 * before the rsend; this requires using Irecv.
 */


  VT_BEGIN_REGION( ready_sends );

  if (rank == 0)
    printf ("Rsend\n");

  tag = 1456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Recv(MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD, &status);
    MPI_Rsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
    MPI_Probe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &status);
    if (status.MPI_SOURCE != prev)
      printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE);

    if (status.MPI_TAG != tag)
      printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG);

    MPI_Get_count(&status, MPI_DOUBLE, &i);
    if (i != count)
      printf ("Incorrect count, expected %d, got %d\n",count,i);

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "rsend and recv");
  }
  else {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    MPI_Send( MPI_BOTTOM, 0, MPI_INT, next, tag, MPI_COMM_WORLD);
    MPI_Wait(requests, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "rsend and recv");
    init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( ready_sends );

/*
 * Synchronous sends
 */

  VT_BEGIN_REGION( sync_sends );

  if (rank == 0)
    printf ("Ssend\n");

  tag = 1789;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status);
    if (flag)
      printf ("Iprobe succeeded! source %d, tag %d\n",status.MPI_SOURCE,
                                                      status.MPI_TAG);

    MPI_Ssend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);

    while (!flag)
      MPI_Iprobe(MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &flag, &status);

    if (status.MPI_SOURCE != prev)
      printf ("Incorrect src, expected %d, got %d\n",prev, status.MPI_SOURCE);

    if (status.MPI_TAG != tag)
      printf ("Incorrect tag, expected %d, got %d\n",tag, status.MPI_TAG);

    MPI_Get_count(&status, MPI_DOUBLE, &i);

    if (i != count)
      printf ("Incorrect count, expected %d, got %d\n",count,i);

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "ssend and recv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Ssend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( sync_sends );

/*
 * Nonblocking normal sends
 */

  VT_BEGIN_REGION( nblock_sends );

  if (rank == 0)
    printf ("Isend\n");

  tag = 2123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Isend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
              (requests+1));
    MPI_Waitall(2, requests, statuses);
    rq_check( requests, 2, "isend and irecv" );

    msg_check(recv_buf,prev,tag,count,statuses, TEST_SIZE,"isend and irecv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"isend and irecv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Isend(recv_buf, count, MPI_DOUBLE, next, tag,MPI_COMM_WORLD,
              (requests));
    MPI_Wait((requests), &status);
    rq_check(requests, 1, "isend (and recv)");
  }



  VT_END_REGION( nblock_sends );

/*
 * Nonblocking ready sends
 */


  VT_BEGIN_REGION( nblock_rsends );

  if (rank == 0)
    printf ("Irsend\n");

  tag = 2456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0,
                  MPI_BOTTOM, 0, MPI_INT, next, 0,
                  dupcom, &status);
    MPI_Irsend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
               (requests+1));
    reqcount = 0;
    while (reqcount != 2) {
      MPI_Waitany( 2, requests, &index, statuses);
      if( index == 0 ) {
	  memcpy( &status, statuses, sizeof(status) );
      }
      reqcount++;
    }

    rq_check( requests, 1, "irsend and irecv");
    msg_check(recv_buf,prev,tag,count,&status, TEST_SIZE,"irsend and irecv");
  }
  else {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests);
    MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, next, 0,
                  MPI_BOTTOM, 0, MPI_INT, next, 0,
                  dupcom, &status);
    flag = 0;
    while (!flag)
      MPI_Test(requests, &flag, &status);

    rq_check( requests, 1, "irsend and irecv (test)");
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "irsend and irecv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Irsend(recv_buf, count, MPI_DOUBLE, next, tag,
               MPI_COMM_WORLD, requests);
    MPI_Waitall(1, requests, statuses);
    rq_check( requests, 1, "irsend and irecv");
  }

  VT_END_REGION( nblock_rsends );

/*
 * Nonblocking synchronous sends
 */

  VT_BEGIN_REGION( nblock_ssends );

  if (rank == 0)
    printf ("Issend\n");

  tag = 2789;
  count = TEST_SIZE / 3;
  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
    MPI_Irecv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
              MPI_COMM_WORLD, requests );
    init_test_data(send_buf,TEST_SIZE,0);
    MPI_Issend(send_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,
               (requests+1));
    flag = 0;
    while (!flag)
      MPI_Testall(2, requests, &flag, statuses);

    rq_check( requests, 2, "issend and irecv (testall)");
    msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, 
               "issend and recv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
             MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "issend and recv"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Issend(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD,requests);

    flag = 0;
    while (!flag)
      MPI_Testany(1, requests, &index, &flag, statuses);

    rq_check( requests, 1, "issend and recv (testany)");
  }


  VT_END_REGION( nblock_ssends );


/*
 * Persistent normal sends
 */

  VT_BEGIN_REGION( pers_sends );

  if (rank == 0)
    printf ("Send_init\n");

  tag = 3123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Send_init(send_buf, count, MPI_DOUBLE, next, tag,
                MPI_COMM_WORLD, requests);
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
                MPI_COMM_WORLD, (requests+1));

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Startall(2, requests);
    MPI_Waitall(2, requests, statuses);
    msg_check( recv_buf, prev, tag, count, (statuses+1),
               TEST_SIZE, "persistent send/recv");
  }
  else {
    MPI_Start((requests+1));
    MPI_Wait((requests+1), &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "persistent send/recv");
    init_test_data(send_buf,TEST_SIZE,1);


    MPI_Start(requests);
    MPI_Wait(requests, &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_sends );

/*
 * Persistent ready sends
 */

  VT_BEGIN_REGION( pers_rsends );

  if (rank == 0)
    printf ("Rsend_init\n");

  tag = 3456;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Rsend_init(send_buf, count, MPI_DOUBLE, next, tag,
                  MPI_COMM_WORLD, requests);
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
                 MPI_ANY_TAG, MPI_COMM_WORLD, (requests+1));

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0); MPI_Barrier( MPI_COMM_WORLD );
    MPI_Startall(2, requests);
    reqcount = 0;
    while (reqcount != 2) {
      MPI_Waitsome(2, requests, &outcount, indices, statuses);
      for (i=0; i<outcount; i++) {
        if (indices[i] == 1) {
          msg_check( recv_buf, prev, tag, count, (statuses+i),
                     TEST_SIZE, "waitsome");
        }
	reqcount++;
      }
    }
  }
  else {
    MPI_Start((requests+1)); MPI_Barrier( MPI_COMM_WORLD );
    flag = 0;
    while (!flag)
      MPI_Test((requests+1), &flag, &status);

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE, "test");

    init_test_data(send_buf,TEST_SIZE,1);

 
    MPI_Start(requests);
    MPI_Wait(requests, &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_rsends );


/*
 * Persistent synchronous sends
 */


  VT_BEGIN_REGION( pers_ssends );

  if (rank == 0)
    printf ("Ssend_init\n");

  tag = 3789;
  count = TEST_SIZE / 3;

  clear_test_data(recv_buf,TEST_SIZE);

  MPI_Ssend_init(send_buf, count, MPI_DOUBLE, next, tag,
                 MPI_COMM_WORLD, (requests+1));
  MPI_Recv_init(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
                 MPI_ANY_TAG, MPI_COMM_WORLD, requests);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Startall(2, requests);

    reqcount = 0;
    while (reqcount != 2) {
      MPI_Testsome(2, requests, &outcount, indices, statuses);
      for (i=0; i<outcount; i++) {
        if (indices[i] == 0) {
          msg_check( recv_buf, prev, tag, count, (statuses+i),
                     TEST_SIZE, "testsome");
        }
	reqcount++;
      }
    }
  }
  else {
    MPI_Start(requests);
    flag = 0;
    while (!flag)
      MPI_Testany(1, requests, &index, &flag, statuses);

    msg_check( recv_buf, prev, tag, count, statuses, TEST_SIZE, "testany" );

    init_test_data(send_buf,TEST_SIZE,1);


     MPI_Start((requests+1));
     MPI_Wait((requests+1), &status);
  }
  MPI_Request_free(requests);
  MPI_Request_free((requests+1));


  VT_END_REGION( pers_ssends );


/*
 * Send/receive.
 */


  VT_BEGIN_REGION( sendrecv );

  if (rank == 0)
    printf ("Sendrecv\n");

  tag = 4123;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);
    MPI_Sendrecv(send_buf, count, MPI_DOUBLE, next, tag,
                 recv_buf, count, MPI_DOUBLE, prev, tag,
                 MPI_COMM_WORLD, &status );

    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "sendrecv");
  }
  else {
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "recv/send"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }


  VT_END_REGION( sendrecv );

#ifdef V_T
  VT_flush();
#endif


/*
 * Send/receive replace.
 */

  VT_BEGIN_REGION( sendrecv_repl );

  if (rank == 0)
    printf ("Sendrecv_replace\n");

  tag = 4456;
  count = TEST_SIZE / 3;

  if (rank == 0) {
      init_test_data(recv_buf, TEST_SIZE,0);
    for (i=count; i< TEST_SIZE; i++)
      recv_buf[i] = 0.0;

    MPI_Sendrecv_replace(recv_buf, count, MPI_DOUBLE,
                         next, tag, prev, tag, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "sendrecvreplace");
  }
  else {
    clear_test_data(recv_buf,TEST_SIZE);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
             MPI_ANY_TAG, MPI_COMM_WORLD, &status);
    msg_check( recv_buf, prev, tag, count, &status, TEST_SIZE,
               "recv/send for replace"); init_test_data(recv_buf,TEST_SIZE,1);
    MPI_Send(recv_buf, count, MPI_DOUBLE, next, tag, MPI_COMM_WORLD);
  }

  VT_END_REGION( sendrecv_repl );


/*
 * Send/Receive via inter-communicator
 */

  VT_BEGIN_REGION( intercomm );

  MPI_Intercomm_create(MPI_COMM_SELF, 0, MPI_COMM_WORLD, next, 1, &intercom);

  if (rank == 0)
    printf ("Send via inter-communicator\n");

  tag = 4018;
  count = TEST_SIZE / 5;

  clear_test_data(recv_buf,TEST_SIZE);

  if (rank == 0) {
      init_test_data(send_buf,TEST_SIZE,0);

    LOCDEF();

    MPI_Send(send_buf, count, MPI_DOUBLE, 0, tag, intercom);
    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE, MPI_ANY_SOURCE,
              MPI_ANY_TAG, intercom, &status);
    msg_check(recv_buf, 0, tag, count, &status, TEST_SIZE, "send and recv via inter-communicator");
  }
  else if (rank == 1) {

    LOCDEF();

    MPI_Recv(recv_buf, TEST_SIZE, MPI_DOUBLE,MPI_ANY_SOURCE, MPI_ANY_TAG,
             intercom, &status);
    msg_check( recv_buf, 0, tag, count, &status, TEST_SIZE,"send and recv via inter-communicator");
    init_test_data(recv_buf,TEST_SIZE,0);
    MPI_Send(recv_buf, count, MPI_DOUBLE, 0, tag, intercom);

  }

  VT_END_REGION( normal_sends );



  MPI_Comm_free(&intercom);
  MPI_Comm_free(&dupcom);
} 
コード例 #8
0
int main(int argc, char * argv[])
{
   unsigned int nxl, nyl;

   PV::Timer timer;
   
   int status = 0;
   int argid  = 0;
   int query  = 1;
   int device = DEVICE;
   
   if (argc > 1) {
      device = atoi(argv[1]);
   }
	
   PV::CLDevice * cld = new PV::CLDevice(device);
   
   // query and print information about the devices found
   //
   if (query) cld->query_device_info();
   
   PV::CLKernel * kernel;

   if (device == 1) {
      printf("running on gpu, I hope\n");
      nxl = NXL;
      nyl = NYL;
      kernel = cld->createKernel("convolve.cl", "convolve");
   }
   else {
      nxl = 1;
      nyl = 1;
      kernel = cld->createKernel("convolve_cpu.cl", "convolve_cpu");
   }
   
   size_t global;                      // global domain size for our calculation
   size_t local;                       // local domain size for our calculation
   
   //cl_mem input;                         // device memory used for the input array
   //cl_mem output;                        // device memory used for the output array
   PV::CLBuffer * input;                         // device memory used for the input array
   PV::CLBuffer * output;                        // device memory used for the output array

   //const unsigned int size_ex  = SIZE_EX;
   //const unsigned int size_img = SIZE_IMG;
   size_t size_ex  = SIZE_EX * sizeof(float);
   size_t size_img = SIZE_IMG * sizeof(float);
   
   const unsigned int nxGlobal = NXGLOBAL;
   const unsigned int nyGlobal = NYGLOBAL;
   
   const unsigned int nPad  = NPAD;
   const unsigned int nPad2 = NPAD2;
   
   const unsigned int sx = 1;
   const unsigned int sy = nxGlobal + nPad2;
   
//   float * data     = (float *) malloc(size_ex * sizeof(float));    // original data set given to device
//   float * results_d = (float *) malloc(size_img * sizeof(float));  // results returned from device
//   float * results_l = (float *) malloc(size_img * sizeof(float));  // results returned from local thread
//   unsigned char * activity = (unsigned char *) malloc(size_ex * sizeof(unsigned char));
   float * data     = (float *) malloc(size_ex);    // original data set given to device
   float * results_d = (float *) malloc(size_img);  // results returned from device
   float * results_l = (float *) malloc(size_img);  // results returned from local thread
   //unsigned char * activity = (unsigned char *) malloc(size_ex * sizeof(unsigned char));
	
   assert(data != NULL);
   assert(results_d != NULL);
   assert(results_l != NULL);
   //assert(activity != NULL);
   
   bzero(data,      size_ex);
   bzero(results_d, size_img);
   bzero(results_l, size_img);
//   bzero(data,      size_ex*sizeof(float));
//   bzero(results_d, size_img*sizeof(float));
//   bzero(results_l, size_img*sizeof(float));
   //bzero(activity,  size_ex*sizeof(unsigned char));
	
   size_t local_size_ex = (nxl + nPad2) * (nyl + nPad2) * sizeof(float); // padded image patch
   
   //init_random_data(data, nxGlobal, nyGlobal, nPad);
   init_test_data(data, nxGlobal, nyGlobal, nPad);

   // time running kernel locally
   //
   timer.start();
   convolve_c(data, results_l, nxGlobal, nyGlobal, nPad);
   timer.stop();
   printf("Executing on local:  "); timer.elapsed_time();

#ifdef USE_ACTIVITY_BYTES
   input  = cld->addReadBuffer (argid++, activity, size_ex*sizeof(unsigned char));
#else
   //input  = cld->addReadBuffer (argid++, data,     size_ex*sizeof(float));
   input  = cld->createReadBuffer (size_ex, data);
   input->copyToDevice();
   status |= kernel->setKernelArg(argid++, input);
#endif
   //output = cld->addWriteBuffer(argid++, size_img*sizeof(float));
   output = cld->createWriteBuffer(size_img, results_d);
   status |= kernel->setKernelArg(argid++, output);
//   status = cld->addKernelArg  (argid++, nxGlobal);
//   status = cld->addKernelArg  (argid++, nyGlobal);
//   status = cld->addKernelArg  (argid++, nPad);
//   status = cld->addLocalArg   (argid++, local_size_ex);
   status |= kernel->setKernelArg  (argid++, (int)nxGlobal);
   status |= kernel->setKernelArg  (argid++, (int)nyGlobal);
   status |= kernel->setKernelArg  (argid++, (int)nPad);
   status |= kernel->setLocalArg   (argid++, local_size_ex);
   
   timer.start();
#ifdef USE_ACTIVITY_BYTES
   cld->run(nxGlobal/4, nyGlobal, nxl, nyl);
#else
   //cld->run(nxGlobal, nyGlobal, nxl, nyl);
   printf("starting run...\n");
   kernel->run((size_t)nxGlobal, (size_t)nyGlobal, nxl, nyl);
#endif
   timer.stop();
   printf("Executing on device: "); timer.elapsed_time();
   printf("Elapsed time on device:            device time == %f \n", ((float)kernel->get_execution_time())/1.0e6);
   
   //cld->copyResultsBuffer(output, results_d, size_img*sizeof(float));
   output->copyFromDevice();
   
   // Check results for accuracy
   //
   check_results(results_d, results_l, nxGlobal, nyGlobal, nPad);
   //validate_results(results_d, results_l, nxGlobal, nyGlobal, nPad);

   // Shutdown and cleanup
   //
   //clReleaseMemObject(input);
   //clReleaseMemObject(output);
   delete input;
   delete output;
   delete cld;
   
   printf("Finished...\n");
   
   return status;
}
コード例 #9
0
/*
 * Test main function
 */
int test_noise(Test_time_result_type *times, int mes_length, int num_repeats, int num_noise_repeats, int loading, int num_noise_procs )
{
	int* mode_array=NULL;
	init_test_data( &td );
	
	int proc1, proc2;
	MPI_Status status;
	MPI_Request send_request;
	MPI_Request recv_request;
	
	MPI_Request* requests_noise=NULL;
	MPI_Status*  statuses_noise=NULL; 
	
	int sync_sum;

	int i, j, k, l;
	px_my_time_type time_beg,time_end;
	px_my_time_type sum;
	px_my_time_type st_deviation;
	
	int flag;
	int work_flag=1;

	int command[2];
				
	int remote_proc;

	/*
	 * Try get enough memory. If didn't, return -1. In send_request we got memory for both send and receive request
	 */ 
	requests_noise=(MPI_Request *)malloc(2*num_noise_procs*sizeof(MPI_Request));
	if(requests_noise == NULL )
	{
		return -1;
	}

	statuses_noise=(MPI_Status *)malloc(2*num_noise_procs*sizeof(MPI_Status));
	if(statuses_noise == NULL )
	{
		return -1;
	}

	mode_array=(int *)malloc(comm_size*sizeof(int));
	if(mode_array==NULL)
	{
		return -1;
	}

	if ( !alloc_test_data( &td, mes_length, num_repeats, loading, num_noise_procs ) )
	{
		return -1;
	}

	/*
	 * Ok, lets begin test part
	 */
	srand( (unsigned)time( NULL ) );
	
	for(i=0; i<comm_size; i++)
	for(j=0; j<num_repeats; j++)
	{
		td.tmp_results[i][j] = 0;
	}

	if(comm_rank==0)
	{
		/* Uncomment to debug 
		printf("HELLO! I'm 0, press any key\n");
		getchar();
		*/

		for(proc1=0;proc1<comm_size; proc1++)
		for(proc2=0;proc2<comm_size; proc2++)
		{
			flag=init_mode_array(proc1,proc2,num_noise_procs,comm_size,mode_array);
			if(flag)
			{
				return -1;
			}
			
			for(i=0;i<num_repeats;i++)
			{

				MPI_Bcast( mode_array, comm_size, MPI_INT, 0, MPI_COMM_WORLD );
				
				command[0]=i; /* Iteration number */
				command[1]=proc2; /* Leader (messages passing durations will be stored) */

				if(proc1!=0)
				{
					MPI_Send(&command,2,MPI_INT,proc1,1,MPI_COMM_WORLD);
				}
				if((proc2!=0)&&(proc2!=proc1))
				{
					MPI_Send(&command,2,MPI_INT,proc2,1,MPI_COMM_WORLD);
				}
				
				/*
				 *
				 * Goal messages in proc with number 0
				 *
				 */
				if(mode_array[0]==MODE_GOAL_MESSAGES)
				{
					if(proc1==0)
					{
						remote_proc=proc2;
					}
					else
					{
						remote_proc=proc1;
					}

					time_beg=px_my_cpu_time();

						MPI_Isend( td.send_data[remote_proc], mes_length, MPI_BYTE, remote_proc, 0, MPI_COMM_WORLD, &send_request);
						MPI_Irecv( td.recv_data[remote_proc], mes_length, MPI_BYTE, remote_proc, 0, MPI_COMM_WORLD, &recv_request);
						MPI_Wait( &send_request, &status );
						MPI_Wait( &recv_request, &status );

					time_end = px_my_cpu_time();
					
					/*
					 *
					 * command[0] -- current interation number in message passing repeats  
					 * command[1] -- process leader in the pair
					 *
					 */
					if(proc2==0)
					{
						td.tmp_results[proc1][i] = (px_my_time_type)(time_end - time_beg);
					}					
				}
				

				/*
				 *
				 * Noise messages in proc with number 0
				 *
				 */
				if(mode_array[0]==MODE_NOISE_MESSAGES)
				{
						for( j = 0; j < num_noise_repeats; j++ )
						{
							k=0;
							for( l = 1; l < comm_size; l++ )
							{
								if( mode_array[l]==MODE_NOISE_MESSAGES )
								{
									MPI_Isend( td.send_data_noise[l], loading, MPI_BYTE, l, 0, MPI_COMM_WORLD, &requests_noise[k] );
									MPI_Irecv( td.recv_data_noise[l], loading, MPI_BYTE, l, 0, MPI_COMM_WORLD, &requests_noise[k+1]);
									k+=2;
								}
						   }

						   MPI_Waitall(k,requests_noise,statuses_noise);
						}
				
				}


				/*
				 *
				 * This reduce is used for processes syncronization. All must send 
				 * their order number to the process with number 0
				 *
				 */
				MPI_Reduce(&comm_rank,&sync_sum,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD);

			} /* end for (num_repeats) */
			
		} /* End for proc1,proc2 */
		
		/*
		 *
		 * Finishing work 
		 *
		 */
		for(i=0;i<comm_size;i++)
		{
			mode_array[i]=MODE_FINISH_WORK;
		}
			
		MPI_Bcast( mode_array, comm_size, MPI_INT, 0, MPI_COMM_WORLD );
		MPI_Reduce(&comm_rank,&sync_sum,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD);



	} /* end if(comm_rank==0) */
	else
	{
		while(work_flag)
		{
		 	MPI_Bcast( mode_array, comm_size, MPI_INT, 0, MPI_COMM_WORLD );
		 	switch(mode_array[comm_rank])
		 	{
				case MODE_GOAL_MESSAGES:
					
					MPI_Recv(&command,2,MPI_INT,0,1,MPI_COMM_WORLD,&status);
					
					remote_proc=comm_rank;					
					for(i=0;i<comm_size;i++)
					{
						if((mode_array[i]==MODE_GOAL_MESSAGES)&&(i!=comm_rank))
						{
							remote_proc=i;
							break;
						}						
					}

					time_beg=px_my_cpu_time();

						MPI_Isend( td.send_data[remote_proc], mes_length, MPI_BYTE, remote_proc, 0, MPI_COMM_WORLD, &send_request);
						MPI_Irecv( td.recv_data[remote_proc], mes_length, MPI_BYTE, remote_proc, 0, MPI_COMM_WORLD, &recv_request);
						MPI_Wait( &send_request, &status );
						MPI_Wait( &recv_request, &status );

					time_end = px_my_cpu_time();
					
					/*
					 *
					 * command[0] -- current interation number in message passing repeats  
					 * command[1] -- process leader in the pair
					 *
					 */
					if(comm_rank==command[1])
					{
						td.tmp_results[remote_proc][command[0]] = (px_my_time_type)(time_end - time_beg);
					}
					
				break;
				case MODE_NOISE_MESSAGES:
						for( i = 0; i < num_noise_repeats; i++ )
						{
							k=0;
							for( j = 0; j < comm_size; j++ )
							{
								if( (j != comm_rank) && (mode_array[j] == MODE_NOISE_MESSAGES ) )
								{
									MPI_Isend( td.send_data_noise[j], loading, MPI_BYTE, j, 0, MPI_COMM_WORLD, &requests_noise[k] );
									MPI_Irecv( td.recv_data_noise[j], loading, MPI_BYTE, j, 0, MPI_COMM_WORLD, &requests_noise[k+1]);
									k+=2;
								}
						   }

						   MPI_Waitall(k,requests_noise,statuses_noise);
						}
				
				break;
				case MODE_FINISH_WORK:
					work_flag=0;
				break;
		 	}
			
			MPI_Reduce(&comm_rank,&sync_sum,1,MPI_INT,MPI_SUM,0,MPI_COMM_WORLD);

		} /* end while work_flag */ 	
	}	/* end else if(comm_rank==0) */

	/*
	 * Averaging results
	 */
	for( i = 0; i < comm_size; i++ )
	{
		sum = 0;
		for( j = 0; j < num_repeats; j++ )
		{
			sum += td.tmp_results[i][j];
		}
		times[i].average=(sum/(double)num_repeats);
 			
 		st_deviation=0;
 		for(j=0;j<num_repeats;j++)
 		{
  		 	st_deviation+=(td.tmp_results[i][j]-times[i].average)*(td.tmp_results[i][j]-times[i].average);
		}
 		st_deviation/=(double)(num_repeats);
 		times[i].deviation=sqrt(st_deviation);
 		
		/*
		 *
		 * Function my_time_cmp is described in the file  'network_test.h' and 
		 * is implemented in the file 'network_test.cpp'.
		 *
		 */
 		qsort(td.tmp_results[i], num_repeats, sizeof(px_my_time_type), my_time_cmp );
 		times[i].median=td.tmp_results[i][num_repeats/2]; 	
 		
 		times[i].min=td.tmp_results[i][0]; 	
	}

	/*
	 * Free memory
	 */
	free( requests_noise );
	free( statuses_noise );
	free( mode_array );

	free_test_data( &td );

	return 0;
}