Esempio n. 1
0
void __ompc_fork (int nthreads, workfunc_t work, void *fp)
{
  pmp_thread_t *master = __pmp_get_current_thread();
  __pmp_debug(PMP_DEBUG_CALLS, "__ompc_fork nthreads=%d, work=%p, fp=%p "
              " (nesting depth = %d)\n",
              nthreads, work, fp, master->nesting_depth);
  __pmp_sample(PMP_PROFILE_OMPC_FORK);
  __pmp_memory_fence();
  __pmp_thread_fork(master, nthreads, work, fp);
  __pmp_memory_fence();
  __pmp_debug(PMP_DEBUG_CALLS, "__ompc_fork completed"
              " (nesting depth = %d)\n",
              master->nesting_depth);
}
Esempio n. 2
0
void __ompc_barrier (void)
{
  pmp_thread_t *thread = __pmp_get_current_thread();
  __pmp_debug(PMP_DEBUG_CALLS, "__ompc_barrier\n");
  __pmp_sample(PMP_PROFILE_OMPC_BARRIER);
  __pmp_memory_fence();
  __pmp_thread_barrier(thread);
}
Esempio n. 3
0
void __ompc_ordered (int global_id)
{
  pmp_thread_t *thread = __pmp_get_thread(global_id);

  if (__pmp_get_team_size(thread->team) > 1) {
    pmp_loop_t *loop = thread->loop;
    int64_t ticket_number = thread->ticket_number;
    int64_t now_serving;

#ifdef SUPER_DEBUG
    if (Enabled_Libomp_Call_Debug)
      __pmp_debug("CALLS_DEBUG", "__ompc_ordered: global_id=%d\n", global_id);
#endif
    __pmp_sample(PMP_PROFILE_OMPC_ORDERED);

    if (loop == NULL || loop->sched <= PMP_SCHED_ORDERED_OFFSET) {
      __pmp_warning("ordered directives must be used inside ordered "
                    "OpenMP loops\n");
      return;
    }

    assert(loop != NULL);

    now_serving = loop->now_serving;

    if (now_serving != ticket_number) {
      if ((loop->inc >= 0) ? (now_serving > ticket_number) :
                             (now_serving < ticket_number)) {
        __pmp_warning("ordered OpenMP loop may result in program deadlock\n");
        __pmp_warning("maybe due to multiple ordered directives "
                      "in a loop iteration\n");
      }
      while (loop->now_serving != ticket_number) {
        /* USER LEVEL SPIN LOOP */
        __pmp_yield();
      }
    }

#ifdef SUPER_DEBUG
    if (Enabled_Libomp_Loop_Debug)
      __pmp_debug("LOOPS_DEBUG", "__ompc_ordered: now serving global_id=%d "
                  " ticket_number=%" PRId64 "\n", global_id, ticket_number);
#endif
  }

  __pmp_memory_fence();
}
Esempio n. 4
0
void __ompc_end_ordered (int global_id)
{
  pmp_thread_t *thread = __pmp_get_thread(global_id);

  __pmp_memory_fence();

  if (__pmp_get_team_size(thread->team) > 1) {
    pmp_loop_t *loop = thread->loop;
    int64_t ticket_number = thread->ticket_number;

#ifdef SUPER_DEBUG
    if (Enabled_Libomp_Call_Debug)
      __pmp_debug("CALLS_DEBUG", "__ompc_end_ordered: global_id=%d\n",
                  global_id);
#endif
    __pmp_sample(PMP_PROFILE_OMPC_END_ORDERED);

    if (loop == NULL || loop->sched <= PMP_SCHED_ORDERED_OFFSET) {
      if (thread->global_id == 0)
        __pmp_warning("ordered directives must be used inside ordered "
                      "OpenMP loops\n");
      return;
    }

    assert(loop != NULL);
    assert(loop->now_serving == ticket_number);

#ifdef SUPER_DEBUG
    if (Enabled_Libomp_Loop_Debug)
      __pmp_debug("LOOPS_DEBUG", "__ompc_ordered: stop serving global_id=%d "
                  " ticket_number=%" PRId64 "\n", global_id, ticket_number);
#endif

    loop->now_serving += loop->inc;
    thread->ticket_number = ticket_number + loop->inc;
  }
}
Esempio n. 5
0
static void *__pmp_thread_run (void *arg)
{
  pmp_thread_t *thread = (pmp_thread_t *) arg;
  pmp_team_t *team;

  thread->pthread_id = pthread_self();
#ifdef PMP_NO_NPTL
  thread->tid = getpid();
#else
  thread->tid = gettid();
#endif

#ifndef PMP_NO_TLS
#ifdef PMP_TLS_THREAD
  __pmp_tls_current_thread = thread;
#endif
#ifdef PMP_TLS_LOCAL_ID
  __pmp_tls_current_local_id = thread->local_id;
#endif
#ifdef PMP_TLS_GLOBAL_ID
  __pmp_tls_current_global_id = thread->global_id;
#endif
#endif

#ifdef PMP_USE_PTHREAD_SIGNALS
  if (pthread_sigmask(SIG_BLOCK, &__pmp_manager.mask_block_sigpmp, 
                      NULL) != 0) {
    __pmp_fatal("unable to set thread-specific sigmask\n");
  }
#else
  if (sigprocmask(SIG_BLOCK, &__pmp_manager.mask_block_sigpmp, NULL) != 0) {
    __pmp_fatal("unable to set thread-specific sigmask\n");
  }
#endif

  if (pthread_setspecific(__pmp_manager.thread_key, (void *) thread) != 0) {
    __pmp_fatal("unable to set thread-specific data\n");
  }

  __pmp_debug(PMP_DEBUG_THREAD, "thread global_id %d is running\n",
              thread->global_id);

  /* Note that there is no synchronization between the creating thread and
   * the created thread until here. This is the point where the created
   * thread is assigned to do some work. The reason that this is sufficient
   * is because the __pmp_thread_wait/wake mechanism is "protected" by
   * the thread->sync value which has been pre-initialized. If the creator
   * gets to the wake first, then it will just swap in PMP_THREAD_UNBLOCKED
   * and its work is done. If it gets to the wake second, then the created
   * thread must have got there first and this guarantees that the other
   * thread fields will already be initialized by the created thread.
   *
   * With nested forking, there is the possibility that the creator thread
   * will be usurped by another forking thread (there is no lock between
   * creation of a thread and that thread being assigned to do work). This
   * works for the same reason as described above.
   */

  __pmp_thread_wait(thread);		/* wait for first assignment */

#ifdef PMP_NO_NPTL
  __pmp_shared_catch_segv(thread);	/* set up shared segv handler */
#else
  __pmp_catch_segv();			/* set up thread's segv handler */
#endif

  __pmp_thread_bind(thread);		/* bind to the assigned local_id */

  while (1) {

    __pmp_debug(PMP_DEBUG_THREAD,
                "__pmp_thread_run: thread tid=%d, pthread_id=0x%08x "
                "global_id=%d, local_id=%d\n",
                (int) thread->tid, (int) thread->pthread_id,
                (int) thread->global_id, (int) thread->local_id);

    team = thread->team;
    assert(team != NULL);
#ifndef PMP_NO_TLS
#ifdef PMP_TLS_LOCAL_ID
    __pmp_tls_current_local_id = thread->local_id;
#endif
#ifdef PMP_TLS_TEAM
    __pmp_tls_current_team = team;
#endif
#endif
    __pmp_memory_fence();
    __pmp_thread_work(thread);		/* do the work */
    __pmp_thread_worker_join(team);	/* wake up team master */
    __pmp_memory_fence();
    __pmp_thread_idle(thread);		/* thread is now idle */
    __pmp_thread_wait(thread);		/* wait for more work */
    __pmp_thread_bind(thread);		/* update binding */
  }

  /* Currently unreachable */

  __pmp_debug(PMP_DEBUG_THREAD, "thread global_id %d is exiting\n",
              thread->global_id);

  return NULL;
}