Exemple #1
0
void *
__deregister_frame_info (void *begin)
{
  struct object **p;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  p = &objects;
  while (*p)
    {
      if ((*p)->fde_begin == begin)
	{
	  struct object *ob = *p;
	  *p = (*p)->next;

	  /* If we've run init_frame for this object, free the FDE array.  */
	  if (ob->fde_array && ob->fde_array != begin)
	    free (ob->fde_array);

	  __gthread_mutex_unlock (&object_mutex);
	  return (void *) ob;
	}
      p = &((*p)->next);
    }

  __gthread_mutex_unlock (&object_mutex);
  abort ();
}
Exemple #2
0
void *
__deregister_frame_info_bases (void *begin)
{
  struct object **p;
  struct object *ob = 0;

  /* If .eh_frame is empty, we haven't registered.  */
  if (*(uword *) begin == 0)
    return ob;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  for (p = &unseen_objects; *p ; p = &(*p)->next)
    if ((*p)->u.single == begin)
      {
	ob = *p;
	*p = ob->next;
	goto out;
      }

  for (p = &seen_objects; *p ; p = &(*p)->next)
    if ((*p)->s.b.sorted)
      {
	if ((*p)->u.sort->orig_data == begin)
	  {
	    ob = *p;
	    *p = ob->next;
	    free (ob->u.sort);
	    goto out;
	  }
      }
    else
      {
	if ((*p)->u.single == begin)
	  {
	    ob = *p;
	    *p = ob->next;
	    goto out;
	  }
      }

  __gthread_mutex_unlock (&object_mutex);
  abort ();

 out:
  __gthread_mutex_unlock (&object_mutex);
  return (void *) ob;
}
Exemple #3
0
void
__register_frame_info_table_bases (void *begin, struct object *ob,
				   void *tbase, void *dbase)
{
  ob->pc_begin = (void *)-1;
  ob->tbase = tbase;
  ob->dbase = dbase;
  ob->u.array = begin;
  ob->s.i = 0;
  ob->s.b.from_array = 1;
  ob->s.b.encoding = DW_EH_PE_omit;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  ob->next = unseen_objects;
  unseen_objects = ob;
#ifdef ATOMIC_FDE_FAST_PATH
  /* Set flag that at least one library has registered FDEs.
     Use relaxed MO here, it is up to the app to ensure that the library
     loading/initialization happens-before using that library in other
     threads (in particular unwinding with that library's functions
     appearing in the backtraces).  Calling that library's functions
     without waiting for the library to initialize would be racy.  */
  if (!any_objects_registered)
    __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
#endif

  __gthread_mutex_unlock (&object_mutex);
}
Exemple #4
0
void
__register_frame_info_bases (void *begin, struct object *ob,
			     void *tbase, void *dbase)
{
  /* If .eh_frame is empty, don't register at all.  */
  if (*(uword *) begin == 0)
    return;

  ob->pc_begin = (void *)-1;
  ob->tbase = tbase;
  ob->dbase = dbase;
  ob->u.single = begin;
  ob->s.i = 0;
  ob->s.b.encoding = DW_EH_PE_omit;
#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
  ob->fde_end = NULL;
#endif

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  ob->next = unseen_objects;
  unseen_objects = ob;

  __gthread_mutex_unlock (&object_mutex);
}
void
PREFIX(srand) (GFC_INTEGER_4 *i)
{
  __gthread_mutex_lock (&rand_seed_lock);
  srand_internal (*i);
  __gthread_mutex_unlock (&rand_seed_lock);
}
Exemple #6
0
static void
unregister_dumper (void)
{
  struct dumper_entry *dumper;
  struct dumper_entry *prev_dumper = 0;

  init_mx_once ();
  __gthread_mutex_lock (&__gcov_dump_mx);
  dumper = __gcov_dumper_list;

  while (dumper)
    {
      if (dumper->dumper == &__gcov_dump)
        {
	  if (prev_dumper)
	    prev_dumper->next_dumper = dumper->next_dumper;
 	  else
	    __gcov_dumper_list = dumper->next_dumper;
          break;
        }
      prev_dumper = dumper;
      dumper = dumper->next_dumper;
    }
  __gthread_mutex_unlock (&__gcov_dump_mx);
}
Exemple #7
0
static void
register_dumper (void)
{
  init_mx_once ();
  __gthread_mutex_lock (&__gcov_dump_mx);
  this_dumper.next_dumper = __gcov_dumper_list;
  __gcov_dumper_list = &this_dumper;
  __gthread_mutex_unlock (&__gcov_dump_mx);
}
Exemple #8
0
void
__gcov_flush (void)
{
  init_mx_once ();
  __gthread_mutex_lock (&__gcov_flush_mx);

  gcov_exit ();
  gcov_clear ();

  __gthread_mutex_unlock (&__gcov_flush_mx);
}
Exemple #9
0
void
__gcov_flush (void)
{
  init_mx_once ();
  __gthread_mutex_lock (&__gcov_flush_mx);

  __gcov_dump_int ();
  __gcov_reset_int ();

  __gthread_mutex_unlock (&__gcov_flush_mx);
}
Exemple #10
0
intptr_t __MCFCRT_gthread_unlock_callback_recursive_mutex(intptr_t context){
	__gthread_recursive_mutex_t *const recur_mutex = (__gthread_recursive_mutex_t *)context;
	_MCFCRT_ASSERT(_MCFCRT_GetCurrentThreadId() == __atomic_load_n(&(recur_mutex->__owner), __ATOMIC_RELAXED));

	const size_t old_count = recur_mutex->__count;
	recur_mutex->__count = 0;
	__atomic_store_n(&(recur_mutex->__owner), 0, __ATOMIC_RELAXED);

	__gthread_mutex_unlock(&(recur_mutex->__mutex));
	return (intptr_t)old_count;
}
Exemple #11
0
void
__gcov_dump_all (void)
{
  struct dumper_entry *dumper;

  init_mx_once ();
  __gthread_mutex_lock (&__gcov_dump_mx);

  dumper = __gcov_dumper_list;
  while (dumper)
   {
     dumper->dumper ();
     dumper = dumper->next_dumper;
   }
  __gthread_mutex_unlock (&__gcov_dump_mx);
}
Exemple #12
0
void
__register_frame_info_table (void *begin, struct object *ob)
{
  ob->fde_begin = begin;
  ob->fde_array = begin;

  ob->pc_begin = ob->pc_end = 0;
  ob->count = 0;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  ob->next = objects;
  objects = ob;

  __gthread_mutex_unlock (&object_mutex);
}
Exemple #13
0
void
__register_frame_info_table_bases (void *begin, struct object *ob,
				   void *tbase, void *dbase)
{
  ob->pc_begin = (void *)-1;
  ob->tbase = tbase;
  ob->dbase = dbase;
  ob->u.array = begin;
  ob->s.i = 0;
  ob->s.b.from_array = 1;
  ob->s.b.encoding = DW_EH_PE_omit;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  ob->next = unseen_objects;
  unseen_objects = ob;

  __gthread_mutex_unlock (&object_mutex);
}
Exemple #14
0
static fde *
find_fde (void *pc)
{
  struct object *ob;
  size_t lo, hi;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  for (ob = objects; ob; ob = ob->next)
    {
      if (ob->pc_begin == 0)
	frame_init (ob);
      if (pc >= ob->pc_begin && pc < ob->pc_end)
	break;
    }

  __gthread_mutex_unlock (&object_mutex);

  if (ob == 0)
    return 0;

  /* Standard binary search algorithm.  */
  for (lo = 0, hi = ob->count; lo < hi; )
    {
      size_t i = (lo + hi) / 2;
      fde *f = ob->fde_array[i];

      if (pc < f->pc_begin)
	hi = i;
      else if (pc >= f->pc_begin + f->pc_range)
	lo = i + 1;
      else
	return f;
    }

  return 0;
}
void
dtime_sub (gfc_array_r4 *t, GFC_REAL_4 *result)
{
  GFC_REAL_4 *tp;
  long user_sec, user_usec, system_sec, system_usec;
  static long us = 0, uu = 0, ss = 0 , su = 0;
  GFC_REAL_4 tu, ts, tt;

  if (((t->dim[0].ubound + 1 - t->dim[0].lbound)) < 2)
    runtime_error ("Insufficient number of elements in TARRAY.");

  __gthread_mutex_lock (&dtime_update_lock);
  if (__time_1 (&user_sec, &user_usec, &system_sec, &system_usec) == 0)
    {
      tu = (GFC_REAL_4) ((user_sec - us) + 1.e-6 * (user_usec - uu));
      ts = (GFC_REAL_4) ((system_sec - ss) + 1.e-6 * (system_usec - su));
      tt = tu + ts;
      us = user_sec;
      uu = user_usec;
      ss = system_sec;
      su = system_usec;
    }
  else
    {
      tu = -1;
      ts = -1;
      tt = -1;
    }

  tp = t->data;

  *tp = tu;
  tp += t->dim[0].stride;
  *tp = ts;
  *result = tt;
  __gthread_mutex_unlock (&dtime_update_lock);
}
Exemple #16
0
void
dtime_sub (gfc_array_r4 *t, GFC_REAL_4 *result)
{
  GFC_REAL_4 *tp;
  long user_sec, user_usec, system_sec, system_usec;
  static long us = 0, uu = 0, ss = 0 , su = 0;
  GFC_REAL_4 tu, ts, tt;

  if (((GFC_DESCRIPTOR_EXTENT(t,0))) < 2)
    runtime_error ("Insufficient number of elements in TARRAY.");

  __gthread_mutex_lock (&dtime_update_lock);
  if (gf_cputime (&user_sec, &user_usec, &system_sec, &system_usec) == 0)
    {
      tu = (GFC_REAL_4) ((user_sec - us) + 1.e-6 * (user_usec - uu));
      ts = (GFC_REAL_4) ((system_sec - ss) + 1.e-6 * (system_usec - su));
      tt = tu + ts;
      us = user_sec;
      uu = user_usec;
      ss = system_sec;
      su = system_usec;
    }
  else
    {
      tu = -1;
      ts = -1;
      tt = -1;
    }

  tp = t->base_addr;

  *tp = tu;
  tp += GFC_DESCRIPTOR_STRIDE(t,0);
  *tp = ts;
  *result = tt;
  __gthread_mutex_unlock (&dtime_update_lock);
}
GFC_INTEGER_4
irand (GFC_INTEGER_4 *i)
{
  GFC_INTEGER_4 j;
  if (i)
    j = *i;
  else
    j = 0;

  __gthread_mutex_lock (&rand_seed_lock);

  switch (j)
  {
    /* Return the next RN. */
    case 0:
      break;

    /* Reset the RN sequence to system-dependent sequence and return the
       first value.  */
    case 1:
      srand_internal (0);
      break;
    
    /* Seed the RN sequence with j and return the first value.  */
    default:
      srand_internal (j);
      break;
   }

   rand_seed = GFC_RAND_A * rand_seed % GFC_RAND_M;
   j = (GFC_INTEGER_4) rand_seed;

  __gthread_mutex_unlock (&rand_seed_lock);

   return j;
}
Exemple #18
0
void
__register_frame_info_bases (const void *begin, struct object *ob,
			     void *tbase, void *dbase)
{
  /* If .eh_frame is empty, don't register at all.  */
  if ((const uword *) begin == 0 || *(const uword *) begin == 0)
    return;

  ob->pc_begin = (void *)-1;
  ob->tbase = tbase;
  ob->dbase = dbase;
  ob->u.single = begin;
  ob->s.i = 0;
  ob->s.b.encoding = DW_EH_PE_omit;
#ifdef DWARF2_OBJECT_END_PTR_EXTENSION
  ob->fde_end = NULL;
#endif

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  ob->next = unseen_objects;
  unseen_objects = ob;
#ifdef ATOMIC_FDE_FAST_PATH
  /* Set flag that at least one library has registered FDEs.
     Use relaxed MO here, it is up to the app to ensure that the library
     loading/initialization happens-before using that library in other
     threads (in particular unwinding with that library's functions
     appearing in the backtraces).  Calling that library's functions
     without waiting for the library to initialize would be racy.  */
  if (!any_objects_registered)
    __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
#endif

  __gthread_mutex_unlock (&object_mutex);
}
Exemple #19
0
static fde *
find_fde (void *pc)
{
  struct object *ob;
  size_t lo, hi;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  /* Linear search through the objects, to find the one containing the pc. */
  for (ob = objects; ob; ob = ob->next)
    {
      if (ob->pc_begin == 0)
	frame_init (ob);
      if (pc >= ob->pc_begin && pc < ob->pc_end)
	break;
    }

  if (ob == 0)
    {
      __gthread_mutex_unlock (&object_mutex);
      return 0;
    }

  if (!ob->fde_array || (void *)ob->fde_array == (void *)ob->fde_begin)
    frame_init (ob);

  if (ob->fde_array && (void *)ob->fde_array != (void *)ob->fde_begin)
    {
      __gthread_mutex_unlock (&object_mutex);
      
      /* Standard binary search algorithm.  */
      for (lo = 0, hi = ob->count; lo < hi; )
	{
	  size_t i = (lo + hi) / 2;
	  fde *f = ob->fde_array[i];

	  if (pc < f->pc_begin)
	    hi = i;
	  else if (pc >= f->pc_begin + f->pc_range)
	    lo = i + 1;
	  else
	    return f;
	}
    }
  else
    {
      /* Long slow labourious linear search, cos we've no memory. */
      fde *f;
      
      if (ob->fde_array)
	{
	  fde **p = ob->fde_array;
	  
	  for (; *p; ++p)
	    {
	      f = search_fdes (*p, pc);
	      if (f)
		break;
	    }
	}
      else
	f = search_fdes (ob->fde_begin, pc);
      __gthread_mutex_unlock (&object_mutex);
      return f;
    }
  return 0;
}
Exemple #20
0
void *
__emutls_get_address (struct __emutls_object *obj)
{
  if (! __gthread_active_p ())
    {
      if (__builtin_expect (obj->loc.ptr == NULL, 0))
	obj->loc.ptr = emutls_alloc (obj);
      return obj->loc.ptr;
    }

#ifndef __GTHREADS
  abort ();
#else
  pointer offset = obj->loc.offset;

  if (__builtin_expect (offset == 0, 0))
    {
      static __gthread_once_t once = __GTHREAD_ONCE_INIT;
      __gthread_once (&once, emutls_init);
      __gthread_mutex_lock (&emutls_mutex);
      offset = obj->loc.offset;
      if (offset == 0)
	{
	  offset = ++emutls_size;
	  obj->loc.offset = offset;
	}
      __gthread_mutex_unlock (&emutls_mutex);
    }

  struct __emutls_array *arr = __gthread_getspecific (emutls_key);
  if (__builtin_expect (arr == NULL, 0))
    {
      pointer size = offset + 32;
      arr = calloc (size + 1, sizeof (void *));
      if (arr == NULL)
	abort ();
      arr->size = size;
      __gthread_setspecific (emutls_key, (void *) arr);
    }
  else if (__builtin_expect (offset > arr->size, 0))
    {
      pointer orig_size = arr->size;
      pointer size = orig_size * 2;
      if (offset > size)
	size = offset + 32;
      arr = realloc (arr, (size + 1) * sizeof (void *));
      if (arr == NULL)
	abort ();
      arr->size = size;
      memset (arr->data + orig_size, 0,
	      (size - orig_size) * sizeof (void *));
      __gthread_setspecific (emutls_key, (void *) arr);
    }

  void *ret = arr->data[offset - 1];
  if (__builtin_expect (ret == NULL, 0))
    {
      ret = emutls_alloc (obj);
      arr->data[offset - 1] = ret;
    }
  return ret;
#endif
}
Exemple #21
0
    void unlock()     {
	if (__gthread_mutex_unlock(&_M_mutex) != 0) 
	  __throw_concurrence_unlock_error();
    }
Exemple #22
0
intptr_t __MCFCRT_gthread_unlock_callback_mutex(intptr_t context){
	__gthread_mutex_t *const mutex = (__gthread_mutex_t *)context;

	__gthread_mutex_unlock(mutex);
	return 1;
}