예제 #1
0
파일: omp-4.c 프로젝트: berezhko/openmp
int main(int argc, char *argv[]){
    int n;

    omp_set_nested(1);
#pragma omp parallel private(n)
    {
        n=omp_get_thread_num();
#pragma omp parallel
        {
            printf("Часть 1, нить %d - %d\n", n,
            omp_get_thread_num());
        }
    }

    omp_set_nested(0);
#pragma omp parallel private(n)
    {
        n=omp_get_thread_num();
#pragma omp parallel
        {
            printf("Часть 2, нить %d - %d\n", n,
            omp_get_thread_num());
        }
    }

return 0;
}
int
foo (int k)
{
  int i = 6, n = 0;
  omp_set_dynamic (0);
  omp_set_nested (1);
#pragma omp parallel shared (i) num_threads (3)
  {
    int l;

    if (omp_get_num_threads () != 3)
    #pragma omp atomic
      n += 1;
    else
    #pragma omp for
      for (l = 0; l < 3; l++)
	if (k)
	#pragma omp atomic
	  q += i;
	else
	#pragma omp parallel shared (i) num_threads (4)
	  {
	    if (omp_get_num_threads () != 4)
	    #pragma omp atomic
	      n += 1;
	    #pragma omp critical
	      i += 1;
	  }
  }
  if (n == 0 && i != 6 + 3 * 4)
    abort ();
  return 0;
}
예제 #3
0
int main(int argc, char * argv[])
{
	std::random_device rd;
    std::mt19937 gen(rd());
    std::uniform_int_distribution<> dis(1, 100);
	std::vector<int> q(argc == 1 ? 10000000 : atoi(argv[1]));

	std::cout << "problem size is " << q.size() << std::endl;
    for(int i = 0; i < q.size(); i++)
    	q[i] = dis(gen);

    if(getenv("DEBUG") != 0)
    	qsortstat_debug(1);

	//std::vector<int> q0 = q;
	//std::sort(q0.begin(),q0.end());
	//std::cout << "regular sort gives " << check(q0.begin(),q0.end()) << std::endl;
 	omp_set_nested(1);
 	omp_set_dynamic(1); // default dynamic is impl specific
	double t0,t1,t00;
 	t00 = omp_get_wtime();
	bb = q.begin();
 	#pragma omp parallel
 	{
 		#pragma omp single
 		{
		 	t0 = omp_get_wtime();
	 		qsort1(q.begin(),q.end());
 		}	
 	}
 	t1 = omp_get_wtime();
	std::cout << "parallel sort gives " << check(q.begin(),q.end()) << " total " << t1-t00 << " = net " << t1-t0 << " + setup " << t0-t00 << std::endl;
	return 0;
}
예제 #4
0
main ()
{
  thds = omp_get_max_threads ();
  if (thds == 1) {
    printf ("should be run this program on multi threads.\n");
    exit (0);
  }
  omp_set_dynamic (0);
  omp_set_nested (1);
  omp_set_num_threads (2);


  #pragma omp parallel
  {
    /* here is not parallel */
    #pragma omp parallel if (false)
    {
      /* this nested parallel is serialized, by if(false) cluse */
      check_parallel (1);
    }
  }


  if (errors == 0) {
    printf ("parallel 011 : SUCCESS\n");
    return 0;
  } else {
    printf ("parallel 011 : FAILED\n");
    return 1;
  }
}
예제 #5
0
VertexSubset* PhiGraphEngine::vertexUpdate(Graph<Vertex>& phigraph,VertexSubset* frontier,PhiGraphProgram& app){

  VertexSubset* nextFrontier = new VertexSubset(phigraph.vertexNum);
    //uphiLong temp ;
  //omp_set_nested(true);
  //#pragma omp parallel for num_threads(dynamicThreadNum(frontier->m,MIN_ITERATION_NUM,machine_core_num))
  threadNum = dynamicThreadNum(frontier->m);
  if(threadNum == 1){
    omp_set_num_threads(machine_core_num/2);
    for(uphiLong i = 0;i < frontier->m;i++){

      //printf("ID: %d, Max threads: %d, Num threads: %d \n",omp_get_thread_num(), omp_get_max_threads(), omp_get_num_threads());
      uphiLong curVertex = frontier->vertex[i];
      app.update(phigraph,nextFrontier,curVertex);
      //printf("hahahhhhh\n" );
    }
  }else{
    //printf("frontier:%ld\n",frontier->m );
    omp_set_num_threads(threadNum);
    omp_set_nested(true);

    #pragma omp parallel for schedule(dynamic,4)
    for(uphiLong i = 0;i < frontier->m;i++){
      #pragma prefetch frontier->vertex[i]:0:8
      //printf("ID: %d, Max threads: %d, Num threads: %d \n",omp_get_thread_num(), omp_get_max_threads(), omp_get_num_threads());
      uphiLong curVertex = frontier->vertex[i];
      app.update(phigraph,nextFrontier,curVertex);
      //printf("hahahhhhh\n" );
    }
  }

  return nextFrontier;
};
int main()
{

#ifdef _OPENMP
   (void) omp_set_dynamic(FALSE);
   if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
   (void) omp_set_num_threads(3);

   (void) omp_set_nested(TRUE);
   if (! omp_get_nested()) {printf("Warning: nested parallelism not set\n");}
#endif

   printf("Nested parallelism is %s\n", 
           omp_get_nested() ? "supported" : "not supported");

/*
  ------------------------------------------------------------------------
  Inside the parallel region we can no longer distinguish between the 
  threads
  ------------------------------------------------------------------------
*/
#pragma omp parallel
   {
     printf("Thread %d executes the outer parallel region\n",
            omp_get_thread_num());

     #pragma omp parallel num_threads(2)
     {
        printf("  Thread %d executes the inner parallel region\n",
               omp_get_thread_num());
     }  /*-- End of inner parallel region --*/
   }  /*-- End of outer parallel region --*/

   return(0);
}
예제 #7
0
int
main ()
{
  int n[4] = { -1, -1, -1, -1 };
  static int a = 2, b = 4;
  omp_set_num_threads (4);
  omp_set_dynamic (0);
  omp_set_nested (1);
#pragma omp parallel private(b)
  {
    b = omp_get_thread_num ();
#pragma omp parallel firstprivate(a)
    {
      a = (omp_get_thread_num () + a) + 1;
      if (b == omp_get_thread_num ())
	n[omp_get_thread_num ()] = a + (b << 4);
    }
  }
  if (n[0] != 3)
    abort ();
  if (n[3] != -1
      && (n[1] != 0x14 || n[2] != 0x25 || n[3] != 0x36))
    abort ();
  return 0;
}
예제 #8
0
파일: nested-1.c 프로젝트: 0day-ci/gcc
int
main (void)
{
  int i = -1, j = -1;

  omp_set_nested (1);
  omp_set_dynamic (0);
#pragma omp parallel num_threads (4)
  {
#pragma omp single
    {
      i = omp_get_thread_num () + omp_get_num_threads () * 256;
#pragma omp parallel num_threads (2)
      {
#pragma omp single
        {
          j = omp_get_thread_num () + omp_get_num_threads () * 256;
        }
      }
    }
  }
  if (i < 4 * 256 || i >= 4 * 256 + 4)
    abort ();
  if (j < 2 * 256 || j >= 2 * 256 + 2)
    abort ();
  return 0;
}
예제 #9
0
int main(int argc, char** argv)
{
  printf("===== test_balanced.c\n");
  double tol = 0.5;
  if (argc > 1)
    tol = atof(argv[1]);

  // set up parallel environment: need to explicitly allow "nested" threading.
  // (That is, omp threads creating their own omp parallel constructs.)
  //
  int num_threads = 2; // 1 or 2
  omp_set_num_threads(num_threads);
  omp_set_nested(1);
  omp_set_dynamic(0);
  printf("num_threads = %d\n", num_threads);

  double err;
  double integral;
  integral = quadrature_adaptive_parallel(integrand, -2, 4, tol, &err,
                                          "diagnostics_parallel.csv");

  printf("integral:  %f\n", integral);
  printf("(actual:   %f)\n", ACTUAL);
  printf("tolerance: %e\n", tol);
  printf("est error: %e\n", err);
  printf("act error: %e\n", fabs(integral-ACTUAL));
}
예제 #10
0
void RayEngine::hybridRender() {

	Hybrid.renderTimer.start();

	if (Hybrid.enableThreaded) {

		omp_set_nested(true);
		#pragma omp parallel num_threads(2)
		{
			if (omp_get_thread_num() == 0)
				optixRender();
			else
				embreeRender();
		}
		embreeRenderUpdateTexture();
		optixRenderUpdateTexture();

	} else {

		optixRender();
		embreeRender();
		embreeRenderUpdateTexture();
		optixRenderUpdateTexture();

	}

	Hybrid.renderTimer.stop();

}
예제 #11
0
main ()
{
  int	thds, i;

  int	errors = 0;


  thds = omp_get_max_threads ();
  if (thds == 1) {
    printf ("omp_get_max_threads return 1.\n");
    printf ("please, run this program on multi thread environment.\n");
    exit (0);
  }

  omp_set_dynamic (0);
  omp_set_nested (0);


  #pragma omp parallel
  {
    #pragma omp parallel
    {
      if (omp_get_max_threads () != thds) {
      #pragma omp critical
	errors += 1;
      }
    }
  }

#if defined(__OMNI_SCASH__) || defined(__OMNI_SHMEM__)
  /* Omni on SCASH do not support omp_set_num_threads.
   * and, some test 
   */
  printf ("skip some tests. because, Omni on SCASH/SHMEM do not support omp_set_num_threads, yet.\n");
#else
  for (i=1; i<=thds; i++) {
    omp_set_num_threads (i);

    #pragma omp parallel
    {
      #pragma omp parallel
      {
	if (omp_get_max_threads () != i) {
          #pragma omp critical
	  errors += 1;
	}
      }
    }
  }
#endif


  if (errors == 0) {
    printf ("omp_get_max_threads 005 : SUCCESS\n");
    return 0;
  } else {
    printf ("omp_get_max_threads 005 : FAILED\n");
    return 1;
  }
}
예제 #12
0
main ()
{
  thds = omp_get_max_threads ();
  if (thds == 1) {
    printf ("should be run this program on multi threads.\n");
    exit (0);
  }
  omp_set_dynamic (0);
  omp_set_nested (0);

  #pragma omp parallel
  {
    #pragma omp parallel
    {
      if (omp_get_thread_num () != 0 ||
	  omp_get_num_threads () != 1) {
        #pragma omp critical 
	errors += 1;
      }
    }
  }

  #pragma omp parallel
  func_nesting ();


  if (errors == 0) {
    printf ("nesting 001 : SUCCESS\n");
    return 0;
  } else {
    printf ("nesting 001 : FAILED\n");
    return 1;
  }
}
예제 #13
0
파일: main.cpp 프로젝트: imthexie/giraffe
// fast initialization steps that can be done in main thread
void InitializeFast()
{
	std::cout << "# Using " << omp_get_max_threads() << " OpenMP thread(s)" << std::endl;

	GetVersion();

#ifdef DEBUG
	std::cout << "# Running in debug mode" << std::endl;
#else
	std::cout << "# Running in release mode" << std::endl;
#endif

	Eigen::initParallel();

	// set Eigen to use 1 thread because we are doing OpenMP here
	Eigen::setNbThreads(1);

	// disable nested parallelism since we don't need it, and disabling it
	// makes managing number of threads easier
	omp_set_nested(0);

	// turn off IO buffering
	std::cout.setf(std::ios::unitbuf);

	initmagicmoves();
	BoardConstsInit();
	InitializeZobrist();
}
예제 #14
0
int main(int argc, char* argv[])
{
    signal(SIGINT, sigint_handler);
#if !defined(NDEBUG)
    std::cout << "\t> Running in DEBUG mode" << std::endl;
#endif

#if defined(OPENMP_FOUND)
    omp_set_nested(true);
    std::cout << "\t> Running using OPENMP " << std::endl;
    std::cout << "\t\t> " << omp_get_max_threads() << " threads max" << std::endl;
    std::cout << "\t\t> " << omp_get_wtick()*1e9 << "ns tick" << std::endl;
    assert( omp_get_nested() );
#endif

//     test_random();
    Rng rng;
    rng.seed(rand());

    Options options = parse_options(argc, argv);

    typedef std::map<std::string, int> Wins;
    Wins wins;

    for (int kk=0; kk<options.number_of_games; kk++)
    {
        std::cout << std::endl << std::endl;
        std::cout << "****************************************" << std::endl;
        std::cout << "game " << kk << "/" << options.number_of_games << std::endl;

        const Game& game = play_game(options, rng);

        const int winner = game.state.get_winner();
        if (winner < 0) wins["draw"]++;
        else {
            std::string winner_name = "bot";
            if (game.hero_infos[winner].is_real_bot())
                winner_name = game.hero_infos[winner].name;
            wins[winner_name]++;
        }

        std::cout << std::endl;
        std::cout << "after " << options.number_of_games << " games" << std::endl;
        for (Wins::const_iterator wi=wins.begin(), wie=wins.end(); wi!=wie; wi++)
        {
            if (wi->first == "draw")
            {
                std::cout << "  " << wi->second << " draw" << std::endl;
                continue;
            }
            std::cout << "  " << wi->second << " victory for " << wi->first << std::endl;
        }

        if (sigint_already_caught) break;
    }

    return 0;
}
예제 #15
0
main ()
{
  thds = omp_get_max_threads ();
  if (thds == 1) {
    printf ("should be run this program on multi threads.\n");
    exit (0);
  }
  omp_set_dynamic (0);
  omp_set_nested(0);


  clear ();
  #pragma omp parallel
  {
    #pragma omp barrier

    #pragma omp parallel
    {
      /* this nested parallel region is serialized. */
      #pragma omp critical
      {
	int i;

	i = read_data ();
	waittime (1);
	write_data (i+1);
      }
    }
  }
  check (thds);

  clear ();
  #pragma omp parallel
  {
    #pragma omp barrier

    #pragma omp parallel
    {
      /* this nested parallel region is serialized. */
      func_critical ();
    }
  }
  check (thds);


  clear ();
  func_critical ();
  check (1);


  if (errors == 0) {
    printf ("critical 003 : SUCCESS\n");
    return 0;
  } else {
    printf ("critical 003 : FAILED\n");
    return 1;
  }
}
예제 #16
0
파일: rtrt.cpp 프로젝트: bheads/rtrt_thesis
int main (int argc, char *argv[])
{
	srand (time (NULL));
	omp_set_nested (1);
	omp_set_num_threads (16);
	// process command line
	parse_commandline (&argc, &argv);
	LOG (INFO) << "rtrt init";
	Window win (FLAGS_width, FLAGS_height, FLAGS_fullscreen, "Real-time Ray Tracer", FLAGS_interval);
	Image front (win.width(), win.height()), back (win.width(), win.height()); // create the images
	Image *front_p = &front, *back_p = &back;
	World world;
	//world.fill(75, 4);
	world.demo0();
	RayTracer rt (world);
	#pragma omp parallel
	{
		#pragma omp single nowait
		{
			while (win.is_running())
			{
				// update frame
				PerformanceMonitor::instance().RT_FPS.count();
				//win.update_frame_rate();
				win.clearXY();
				// render the new back buffer
				rt.render (back_p);
				// swap buffers
				std::swap (front_p, back_p);
			}
		}
		#pragma omp master
		{
			while (win.is_running())
			{
				// main rendering loop, keep rendering the front buffer
				PerformanceMonitor::instance().GL_FPS.count();
				win.clear(); // clear the render windows back buffer

				if (FLAGS_fps)
				{
					win.update_title_with_frame_rate();    // show the frame rate in the window title
				}

				win.render_image (*front_p); // render the image to the back buffer
				win.update(); // swap the back buffer with the front buffer
				PerformanceMonitor::instance().update();
			}
		}
	}
	LOG (INFO) << "shutting down";
	LOG (INFO) << "Average frame rate was " << win.average_framerate();
	front.destroy_image();
	back.destroy_image();
	win.destroy_window();
	LOG (INFO) << PerformanceMonitor::instance();
	return (0);
}
예제 #17
0
int main() {

	omp_set_nested(1);
	omp_set_max_active_levels(3);

	// compute partitions
	printf("Number of partitions: %d\n", numPartitions(N));

}
예제 #18
0
main ()
{
    thds = omp_get_max_threads ();
    if (thds == 1) {
        printf ("should be run this program on multi threads.\n");
        exit (0);
    }
    omp_set_dynamic (0);
    omp_set_num_threads (2);
    omp_set_nested (1);
    if (omp_get_nested () == 0) {
        printf ("test skipped.\n");
        exit(0);
    }

    sum = 0;
    #pragma omp parallel
    {
        #pragma omp parallel
        {
            int	add;

            if (omp_get_num_threads () == 1) {
                add = 2;
                printf ("nested parallel is serialized.\n");
            } else {
                add = 1;
            }

            #pragma omp critical
            {
                sum += add;
            }
        }
    }
    if (sum != 2*2) {
        errors += 1;
    }


    sum = 0;
    #pragma omp parallel
    func_nesting ();
    if (sum != 2*2) {
        errors += 1;
    }


    if (errors == 0) {
        printf ("nesting 002 : SUCCESS\n");
        return 0;
    } else {
        printf ("nesting 002 : FAILED\n");
        return 1;
    }
}
예제 #19
0
파일: box.cpp 프로젝트: DavidHammen/chrono
int main(int argc, char* argv[]) {
	omp_set_nested(1);
	stepMode = true;
	GPUSystem = new System(1);
	GPUSystem->mTimeStep = .001;
	GPUSystem->mEndTime = 35;
	GPUSystem->mNumObjects = 1;
	GPUSystem->mIterations = 100;
	GPUSystem->mTolerance = 1e-5;
	GPUSystem->mOmegaContact = .9;
	GPUSystem->mOmegaBilateral = .2;
	GPUSystem->mUseOGL = 1;

	float mMu = .5;
	float mWallMu = .5;

	if (argc == 2) {
		numY = atoi(argv[1]);
	} else {
		cout << "ARGS: number of particle layers in y direction" << endl;
		exit(1);
	}
	float container_R = 10.0, container_T = .1;
	ChQuaternion<> quat(1, 0, 0, 0);
	ChVector<> lpos(0, 0, 0);
	CHBODYSHAREDPTR L = CHBODYSHAREDPTR(new CHBODY);
	CHBODYSHAREDPTR R = CHBODYSHAREDPTR(new CHBODY);
	CHBODYSHAREDPTR F = CHBODYSHAREDPTR(new CHBODY);
	CHBODYSHAREDPTR B = CHBODYSHAREDPTR(new CHBODY);
	CHBODYSHAREDPTR BTM = CHBODYSHAREDPTR(new CHBODY);
	CHBODYSHAREDPTR FREE = CHBODYSHAREDPTR(new CHBODY);
	ChQuaternion<> quat2(1, 0, 0, 0);
	quat2.Q_from_AngAxis(PI / 6.0, ChVector<> (1, 0, 0));
	//GPUSystem->InitObject(L, 100000, ChVector<> (-container_R, 0, 0), quat, mWallMu, mWallMu, 0, true, true, -20, -20);
	//GPUSystem->InitObject(R, 100000, ChVector<> (container_R, 0, 0), quat, mWallMu, mWallMu, 0, true, true, -20, -20);
	//GPUSystem->InitObject(F, 100000, ChVector<> (0, 0, -container_R), quat, mWallMu, mWallMu, 0, true, true, -20, -20);
	//GPUSystem->InitObject(B, 100000, ChVector<> (0, 0, container_R), quat, mWallMu, mWallMu, 0, true, true, -20, -20);
	GPUSystem->InitObject(BTM, 1, ChVector<> (0, -container_R, 0), quat, mWallMu, mWallMu, 0, true, true, -1000, -20000);

	//GPUSystem->AddCollisionGeometry(L, BOX, ChVector<> (container_T, container_R, container_R), lpos, quat);
	//GPUSystem->AddCollisionGeometry(R, BOX, ChVector<> (container_T, container_R, container_R), lpos, quat);
	//GPUSystem->AddCollisionGeometry(F, BOX, ChVector<> (container_R, container_R, container_T), lpos, quat);
	//GPUSystem->AddCollisionGeometry(B, BOX, ChVector<> (container_R, container_R, container_T), lpos, quat);
	GPUSystem->AddCollisionGeometry(BTM, BOX, ChVector<> (container_R, container_T, container_R), lpos, quat);

	//GPUSystem->FinalizeObject(L);
	//GPUSystem->FinalizeObject(R);
	//GPUSystem->FinalizeObject(F);
	//GPUSystem->FinalizeObject(B);
	GPUSystem->FinalizeObject(BTM);
	((ChLcpSolverGPU*) (GPUSystem->mSystem->GetLcpSolverSpeed()))->SetContactFactor(.6);
	GPUSystem->Setup();
	SimulationLoop(argc, argv);
	return 0;
}
예제 #20
0
int main(int argc, char * argv[])    
{  
    omp_set_nested(10);     // none zero value is OK!  
#pragma omp parallel num_threads(2)  
    {  
        printf("ID: %d, Max threads: %d, Num threads: %d \n",omp_get_thread_num(), omp_get_max_threads(), omp_get_num_threads());  
#pragma omp parallel num_threads(5)  
        printf("Nested, ID: %d, Max threads: %d, Num threads: %d \n",omp_get_thread_num(), omp_get_max_threads(), omp_get_num_threads());  
  }  
	return 0;    
}  
예제 #21
0
int main(int argc, char *argv[]) {
  omp_set_nested(1);
  omp_set_num_threads(2);
  printf("Master: Nthr %d   Thrid %d   Nested %d\n",omp_get_num_threads(),omp_get_thread_num(),omp_get_nested());
#pragma omp parallel
  {
  printf("Parallel 1: Nthr %d   Thrid %d   Nested %d\n",omp_get_num_threads(),omp_get_thread_num(),omp_get_nested());
  omp_set_num_threads(2);
#pragma omp parallel
  {
  printf("Parallel 2: Nthr %d   Thrid %d   Nested %d\n",omp_get_num_threads(),omp_get_thread_num(),omp_get_nested());
  }
  }
}
예제 #22
0
Waterline::Waterline() {
    subOp.clear();
    subOp.push_back( new BatchPushCutter() );
    subOp.push_back( new BatchPushCutter() );
    subOp[0]->setXDirection();
    subOp[1]->setYDirection();
    nthreads=1;
#ifdef _OPENMP
    nthreads = omp_get_num_procs(); 
    //omp_set_dynamic(0);
    omp_set_nested(1);
#endif

}
예제 #23
0
파일: pr26943-4.c 프로젝트: ChaosJohn/gcc
int
main (void)
{
  int i;
  omp_set_dynamic (0);
  omp_set_nested (1);
#pragma omp parallel num_threads (2) reduction (+:l) \
		     firstprivate (a, b, c, d, e, f, g, h, j)
  if (k == omp_get_thread_num ())
    {
#pragma omp parallel for shared (a, e) firstprivate (b, f) \
			 lastprivate (c, g) private (d, h) \
			 schedule (static, 1) num_threads (4) \
			 reduction (+:j)
      for (i = 0; i < 4; i++)
	{
	  if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')
	    j++;
	  GOMP_barrier ();
#pragma omp atomic
	  a += i;
	  b += i;
	  c = i;
	  d = i;
#pragma omp atomic
	  e[0] += i;
	  f[0] += i;
	  g[0] = 'g' + i;
	  h[0] = 'h' + i;
	  GOMP_barrier ();
	  if (a != 8 + 6 || b != 12 + i || c != i || d != i)
	    j += 8;
	  if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)
	    j += 64;
	  if (h[0] != 'h' + i)
	    j += 512;
	}
      if (j || a != 8 + 6 || b != 12 || c != 3 || d != 20)
	++l;
      if (e[0] != 'a' + 6 || f[0] != 'b' || g[0] != 'g' + 3 || h[0] != 'd')
	l += 8;
    }
  if (l)
    abort ();
  if (a != 8 || b != 12 || c != 16 || d != 20)
    abort ();
  if (e[0] != 'a' || f[0] != 'b' || g[0] != 'c' || h[0] != 'd')
    abort ();
  return 0;
}
main()
{ 
  long long i,j,k;
  omp_set_nested(1);
  #pragma omp parallel 
  {
    for(i = 0; i < MAX; i++);
    #pragma omp parallel 
    {
      for(j = 0; j < MAX; j++);
      fib(40);
    }
  }
}
예제 #25
0
파일: main.c 프로젝트: J0s3l0/PAvanzada
int main(int argc, char** argv){
    
    int tid;
    int i;
    int cancelStatus = 0;
    double inicio, fin;
    double A[N], B[N], S[N], P[N];
    
    
    for(i=0; i<N; ++i){
        A[i] = i * 1.75;
        B[i] = A[i] * 0.58;
    }
    
    
    //paralelismo anidado
    omp_set_nested(1);
    //cancelStatus = omp_get_cancellation();
    
    inicio = omp_get_wtime();
    #pragma omp parallel private(i, tid, cancelStatus) num_threads(4)
    {
        
        if(cancelStatus == 1){
            printf("omp section cancelada");
        }
            
        #pragma omp sections nowait
        {
            #pragma omp section
            for(i = 0; i < N; ++i ){
                S[i] = A[i] + B[i];
            }
            
            #pragma omp section
            for(i = 0; i < N; ++i ){
                P[i] = A[i] * B[i];
            }
            
        }
        
    }
    fin = omp_get_wtime();
    
    printf("Tiempo = %f\n", fin-inicio);
    
    
    return 0;
}
예제 #26
0
main ()
{

  thds = omp_get_max_threads ();
  if (thds == 1) {
    printf ("should be run this program on multi threads.\n");
    exit (0);
  }
  omp_set_dynamic (0);
  omp_set_nested(0);


  #pragma omp parallel
  {
    int	id = omp_get_thread_num ();

    #pragma omp parallel
    {
      i = id;
      barrier (thds);

      if (i != id) {
	#pragma omp critical
	errors += 1;
      }
    }
  }

  #pragma omp parallel 
  {
    int	id = omp_get_thread_num ();

    #pragma omp parallel
    {
      func (id, thds);
    }
  }

  func(0,1);


  if (errors == 0) {
    printf ("threadprivate 003 : SUCCESS\n");
    return 0;
  } else {
    printf ("threadprivate 003 : FAILED\n");
    return 1;
  }
}
예제 #27
0
파일: lib.c 프로젝트: lingfeiwang/findr-R
void LIBINFONAME(lib_init)(unsigned char loglv,unsigned long rs0,size_t nthread)
{
	unsigned long	rs;
	size_t	nth;
	LOGLV(loglv);
	random_init();
	rs=rs0?rs0:(unsigned long)time(NULL);
	random_seed(rs);
	if(nthread)
		omp_set_num_threads((int)nthread);
	omp_set_nested(0);
	nth=(size_t)omp_get_max_threads();
	gsl_set_error_handler_off();
	LOG(7,"Library started with log level %u, initial random seed %lu, and max thread count "PRINTFSIZET".",loglv,rs,nth)
}
예제 #28
0
AdaptiveWaterline::AdaptiveWaterline() {
    subOp.clear();
    subOp.push_back( new FiberPushCutter() );
    subOp.push_back( new FiberPushCutter() );
    subOp[0]->setXDirection();
    subOp[1]->setYDirection();
    nthreads=1;
#ifdef _OPENMP
    nthreads = omp_get_num_procs(); 
    //omp_set_dynamic(0);
    omp_set_nested(1);
#endif
    sampling = 1.0;
    min_sampling = 0.1;
    cosLimit = 0.999;
}
예제 #29
0
파일: sort-1.c 프로젝트: abumaryam/gcc
int
main (int argc, char **argv)
{
  int i, count = 1000000;
  double stime;
  int *unsorted, *sorted, num_threads;
  if (argc >= 2)
    count = strtoul (argv[1], NULL, 0);

  unsorted = malloc (count * sizeof (int));
  sorted = malloc (count * sizeof (int));
  if (unsorted == NULL || sorted == NULL)
    {
      puts ("allocation failure");
      exit (1);
    }

  srand (0xdeadbeef);
  for (i = 0; i < count; i++)
    unsorted[i] = rand ();

  omp_set_nested (1);
  omp_set_dynamic (0);
  #pragma omp parallel
    #pragma omp single nowait
      num_threads = omp_get_num_threads ();
  printf ("Threads: %d\n", num_threads);

  memcpy (sorted, unsorted, count * sizeof (int));
  stime = omp_get_wtime ();
  sort1 (sorted, count);
  verify ("sort1", stime, sorted, count);

  memcpy (sorted, unsorted, count * sizeof (int));
  stime = omp_get_wtime ();
  sort2 (sorted, count);
  verify ("sort2", stime, sorted, count);

#if _OPENMP >= 200805
  memcpy (sorted, unsorted, count * sizeof (int));
  stime = omp_get_wtime ();
  sort3 (sorted, count);
  verify ("sort3", stime, sorted, count);
#endif

  return 0;
}
예제 #30
0
void cSystem::startOpenMP(void)
{
#ifdef __GEM_USE_OPENMP__
    if (omp_get_num_procs() == 1) {
        WARNING("startOpenMP", "cannot start OpenMP since there is only 1 online processor");
    }
    else {
        omp_set_num_threads((int) std::ceil((double) omp_get_num_procs()/2));
        omp_set_nested(false);

        std::cout.precision(0);
        std::cout << "OpenMP is turned on: " << std::ceil(omp_get_num_procs()/2) << " threads\n\n";
    }
#else
    std::cout << "OpenMP is not supported\n\n";
#endif
}