Exemplo n.º 1
0
int main(int argc, char **argv) {

//    feenableexcept(FE_DIVBYZERO);
//    feenableexcept(FE_OVERFLOW);
//    feenableexcept(FE_INVALID);
//
//    signal(SIGSEGV, bt_sighandler);
//    signal(SIGFPE, bt_sighandler);
//    signal(SIGTERM, bt_sighandler);
//    signal(SIGINT, bt_sighandler);
//    signal(SIGABRT, bt_sighandler);

#ifdef HAVE_MPI
	MPI_Init(&argc, &argv);
	int num_procs;
	sip::SIPMPIUtils::check_err(MPI_Comm_size(MPI_COMM_WORLD, &num_procs), __LINE__,__FILE__);

	if (num_procs < 2) {
		std::cerr << "Please run this test with at least 2 mpi ranks"
				<< std::endl;
		return -1;
	}
	sip::SIPMPIUtils::set_error_handler();
	sip::SIPMPIAttr &sip_mpi_attr = sip::SIPMPIAttr::get_instance();
	attr = &sip_mpi_attr;
#endif
	barrier();
#ifdef HAVE_TAU
	TAU_PROFILE_SET_NODE(0);
	TAU_STATIC_PHASE_START("SIP Main");
#endif

//	sip::check(sizeof(int) >= 4, "Size of integer should be 4 bytes or more");
//	sip::check(sizeof(double) >= 8, "Size of double should be 8 bytes or more");
//	sip::check(sizeof(long long) >= 8, "Size of long long should be 8 bytes or more");
//
//	int num_procs;
//	sip::SIPMPIUtils::check_err(MPI_Comm_size(MPI_COMM_WORLD, &num_procs));
//
//	if (num_procs < 2){
//		std::cerr<<"Please run this test with at least 2 mpi ranks"<<std::endl;
//		return -1;
//	}
//
//	sip::SIPMPIUtils::set_error_handler();
//	sip::SIPMPIAttr &sip_mpi_attr = sip::SIPMPIAttr::get_instance();
//

	printf("Running main() from test_sial.cpp\n");
	testing::InitGoogleTest(&argc, argv);
	barrier();
	int result = RUN_ALL_TESTS();

	std::cout << "Rank  " << attr->global_rank() << " Finished RUN_ALL_TEST() " << std::endl << std::flush;

#ifdef HAVE_TAU
	TAU_STATIC_PHASE_STOP("SIP Main");
#endif
	barrier();
#ifdef HAVE_MPI
	MPI_Finalize();
#endif
	return result;

}
Exemplo n.º 2
0
int main(int argc, char * argv[])
{
  TAU_INIT(&argc, &argv)
  TAU_PROFILE_TIMER(orio_maintimer, "main()", "int (int, char **)", TAU_USER);
  TAU_PROFILE_START(orio_maintimer);
  TAU_PROFILE_SET_NODE(0);
    std::vector<viennacl::ocl::device> devices = viennacl::ocl::platform().devices();
    std::vector<cl_device_id> my_devices;
    my_devices.push_back(devices[0].id());
    viennacl::ocl::setup_context(0L, my_devices);

 
  //Change this type definition to double if your gpu supports that
  typedef double       ScalarType;
  
  /////////////////////////////////////////////////
  ///////////// Vector operations /////////////////
  /////////////////////////////////////////////////
  
  void * orio_profiler;
  TAU_PROFILER_CREATE(orio_profiler, "orio_generated_code", "", TAU_USER);

  for(int i = 0; i < 3; ++i) {

  //
  // Define a few vectors (from STL and plain C) and viennacl::vectors
  //
  std::vector<ScalarType>      std_vec1(1000000);
  std::vector<ScalarType>      std_vec2(1000000);

  viennacl::vector<ScalarType> vcl_vec1(1000000);
  viennacl::vector<ScalarType> vcl_vec2(1000000);
  viennacl::scalar<ScalarType> vcl_s1 = ScalarType(5.0);

  //
  // Let us fill the CPU vectors with random values:
  // (random<> is a helper function from Random.hpp)
  //
  
  for (unsigned int i = 0; i < 1000000; ++i)
  {
    std_vec1[i] = random<ScalarType>(); 
    std_vec2[i] = 0.0;
  }
  
  //
  // Copy the CPU vectors to the GPU vectors and vice versa
  //
    TAU_PROFILER_START(orio_profiler);   
  viennacl::copy(std_vec1.begin(), std_vec1.end(), vcl_vec1.begin()); //either the STL way
  viennacl::copy(std_vec2.begin(), std_vec2.end(), vcl_vec2.begin()); //either the STL way

  vcl_vec2 += vcl_s1 * vcl_vec1;

  viennacl::copy(vcl_vec2.begin(), vcl_vec2.end(), std_vec2.begin());

    TAU_PROFILER_STOP(orio_profiler);
  }
  double orio_inclusive[TAU_MAX_COUNTERS];
  TAU_PROFILER_GET_INCLUSIVE_VALUES(orio_profiler, &orio_inclusive);
  printf("{'/*@ coordinate @*/' : %g}\n", orio_inclusive[0]);
  return EXIT_SUCCESS;
}
Exemplo n.º 3
0
int main (int argc, char *argv[]) 
{
    validate_input(argc, argv);

    /*
     * Initialize TAU and start a timer for the main function.
     */
    TAU_INIT(&argc, &argv);
    TAU_PROFILE_SET_NODE(0);
    TAU_PROFILE_TIMER(tautimer, __func__, my_name, TAU_USER);
    TAU_PROFILE_START(tautimer);

    /*
     * Initialize MPI. We don't require threaded support, but with threads
     * we can send the TAU data over SOS asynchronously.
     */
    int rc = MPI_SUCCESS;
    int provided = 0;
    rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
    if (rc != MPI_SUCCESS) {
        char *errorstring;
        int length = 0;
        MPI_Error_string(rc, errorstring, &length);
        fprintf(stderr, "Error: MPI_Init failed, rc = %d\n%s\n", rc, errorstring);
        fflush(stderr);
        exit(99);
    }

    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
    my_printf("%s %s %d Running with comm_size %d\n", argv[0], my_name, getpid(), comm_size);
    MPI_Comm adios_comm;
    MPI_Comm_dup(MPI_COMM_WORLD, &adios_comm);

    adios_init ("arrays.xml", adios_comm);

    /*
     * Loop and do the things
     */
    int iter = 0;
    char tmpstr[256] = {0};
    int * return_codes = (int *)(calloc(num_sources,sizeof(int)));
    while (iter < iterations) {
        int index;
        /*
         * Read upstream input
         */
        for (index = 0 ; index < num_sources ; index++) {
            if (return_codes[index] > 0) {
                my_printf("%s source is gone\n", sources[index]);
                continue; // this input is gone
            }
            my_printf ("%s reading from %s.\n", my_name, sources[index]);
            sprintf(tmpstr,"%s READING FROM %s", my_name, sources[index]);
            TAU_START(tmpstr);
            //mpi_reader(adios_comm, sources[index]);
            return_codes[index] = flexpath_reader(adios_comm, index);
            TAU_STOP(tmpstr);
        }
        /*
        * "compute"
        */
        my_printf ("%s computing.\n", my_name);
        compute(iter);
        bool time_to_go = (num_sources == 0) ? (iter == (iterations-1)) : true;
        for (index = 0 ; index < num_sources ; index++) {
            if (return_codes[index] == 0) {
                time_to_go = false;
                break; // out of this for loop
            }
        }
        /*
         * Send output downstream
         */
        for (index = 0 ; index < num_sinks ; index++) {
            my_printf ("%s writing to %s.\n", my_name, sinks[index]);
            sprintf(tmpstr,"%s WRITING TO %s", my_name, sinks[index]);
            TAU_START(tmpstr);
            //mpi_writer(adios_comm, sinks[index]);
            flexpath_writer(adios_comm, index, (iter > 0), time_to_go);
            TAU_STOP(tmpstr);
        }
        if (time_to_go) {
            break; // out of the while loop
        }
        my_printf ("%s not time to go...\n", my_name);
        iter++;
    }

    /*
     * Finalize ADIOS
     */
    const char const * dot_filename = ".finished";
    if (num_sources > 0) {
        adios_read_finalize_method(ADIOS_READ_METHOD_FLEXPATH);
    #if 0
    } else {
        while (true) {
            // assume this is the main process. It can't exit until 
            // the last process is done.
            if( access( dot_filename, F_OK ) != -1 ) {
                // file exists
                unlink(dot_filename);
                break;
            } else {
                // file doesn't exist
                sleep(1);
            }
        }
    #endif
    }
    if (num_sinks > 0) {
        adios_finalize (my_rank);
    #if 0
    } else {
        // assume this is the last process. 
        // Tell the main process we are done.
        FILE *file;
        if (file = fopen(dot_filename, "w")) {
            fprintf(file, "done.\n");
            fclose(file);
        }
    #endif
    }

    /*
     * Finalize MPI
     */
    MPI_Comm_free(&adios_comm);
    MPI_Finalize();
    my_printf ("%s Done.\n", my_name);

    TAU_PROFILE_STOP(tautimer);
    return 0;
}
Exemplo n.º 4
0
Arquivo: tau.cpp Projeto: LLNL/Caliper
 virtual void initialize(){
     TAU_PROFILE_SET_NODE(0);
 }