Example #1
0
void attribute_hidden
_IO_vtable_check (void)
{
#ifdef SHARED
  /* Honor the compatibility flag.  */
  void (*flag) (void) = atomic_load_relaxed (&IO_accept_foreign_vtables);
#ifdef PTR_DEMANGLE
  PTR_DEMANGLE (flag);
#endif
  if (flag == &_IO_vtable_check)
    return;

  /* In case this libc copy is in a non-default namespace, we always
     need to accept foreign vtables because there is always a
     possibility that FILE * objects are passed across the linking
     boundary.  */
  {
    Dl_info di;
    struct link_map *l;
    if (_dl_open_hook != NULL
        || (_dl_addr (_IO_vtable_check, &di, &l, NULL) != 0
            && l->l_ns != LM_ID_BASE))
      return;
  }

#else /* !SHARED */
  /* We cannot perform vtable validation in the static dlopen case
     because FILE * handles might be passed back and forth across the
     boundary.  Therefore, we disable checking in this case.  */
  if (__dlopen != NULL)
    return;
#endif

  __libc_fatal ("Fatal error: glibc detected an invalid stdio handle\n");
}
Example #2
0
int
pthread_spin_lock (pthread_spinlock_t *lock)
{
  int val = 0;

  /* We assume that the first try mostly will be successful, thus we use
     atomic_exchange if it is not implemented by a CAS loop (we also assume
     that atomic_exchange can be faster if it succeeds, see
     ATOMIC_EXCHANGE_USES_CAS).  Otherwise, we use a weak CAS and not an
     exchange so we bail out after the first failed attempt to change the
     state.  For the subsequent attempts we use atomic_compare_and_exchange
     after we observe that the lock is not acquired.
     See also comment in pthread_spin_trylock.
     We use acquire MO to synchronize-with the release MO store in
     pthread_spin_unlock, and thus ensure that prior critical sections
     happen-before this critical section.  */
#if ! ATOMIC_EXCHANGE_USES_CAS
  /* Try to acquire the lock with an exchange instruction as this architecture
     has such an instruction and we assume it is faster than a CAS.
     The acquisition succeeds if the lock is not in an acquired state.  */
  if (__glibc_likely (atomic_exchange_acquire (lock, 1) == 0))
    return 0;
#else
  /* Try to acquire the lock with a CAS instruction as this architecture
     has no exchange instruction.  The acquisition succeeds if the lock is not
     acquired.  */
  if (__glibc_likely (atomic_compare_exchange_weak_acquire (lock, &val, 1)))
    return 0;
#endif

  do
    {
      /* The lock is contended and we need to wait.  Going straight back
	 to cmpxchg is not a good idea on many targets as that will force
	 expensive memory synchronizations among processors and penalize other
	 running threads.
	 There is no technical reason for throwing in a CAS every now and then,
	 and so far we have no evidence that it can improve performance.
	 If that would be the case, we have to adjust other spin-waiting loops
	 elsewhere, too!
	 Thus we use relaxed MO reads until we observe the lock to not be
	 acquired anymore.  */
      do
	{
	  /* TODO Back-off.  */

	  atomic_spin_nop ();

	  val = atomic_load_relaxed (lock);
	}
      while (val != 0);

      /* We need acquire memory order here for the same reason as mentioned
	 for the first try to lock the spinlock.  */
    }
  while (!atomic_compare_exchange_weak_acquire (lock, &val, 1));

  return 0;
}
Example #3
0
static void *
test_run (void *p)
{
  while (atomic_load_relaxed (&running))
    printf ("Test running\n");
  printf ("Test finished\n");
  return NULL;
}
Example #4
0
 // Checks the validity of a chunk by verifying its checksum. It doesn't
 // incur termination in the event of an invalid chunk.
 static INLINE bool isValid(const void *Ptr) {
   PackedHeader NewPackedHeader =
       atomic_load_relaxed(getConstAtomicHeader(Ptr));
   UnpackedHeader NewUnpackedHeader =
       bit_cast<UnpackedHeader>(NewPackedHeader);
   return (NewUnpackedHeader.Checksum ==
           computeChecksum(Ptr, &NewUnpackedHeader));
 }
Example #5
0
 // Loads and unpacks the header, verifying the checksum in the process.
 static INLINE
 void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
   PackedHeader NewPackedHeader =
       atomic_load_relaxed(getConstAtomicHeader(Ptr));
   *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
   if (UNLIKELY(NewUnpackedHeader->Checksum !=
       computeChecksum(Ptr, NewUnpackedHeader)))
     dieWithMessage("corrupted chunk header at address %p\n", Ptr);
 }
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
  CHECK(tagged_ptr);
  HWASAN_FREE_HOOK(tagged_ptr);

  if (!PointerAndMemoryTagsMatch(tagged_ptr))
    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));

  void *untagged_ptr = UntagPtr(tagged_ptr);
  void *aligned_ptr = reinterpret_cast<void *>(
      RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
  Metadata *meta =
      reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
  uptr orig_size = meta->requested_size;
  u32 free_context_id = StackDepotPut(*stack);
  u32 alloc_context_id = meta->alloc_context_id;

  // Check tail magic.
  uptr tagged_size = TaggedSize(orig_size);
  if (flags()->free_checks_tail_magic && !right_align_mode && orig_size) {
    uptr tail_size = tagged_size - orig_size;
    CHECK_LT(tail_size, kShadowAlignment);
    void *tail_beg = reinterpret_cast<void *>(
        reinterpret_cast<uptr>(aligned_ptr) + orig_size);
    if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
      ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
                            orig_size, tail_size, tail_magic);
  }

  meta->requested_size = 0;
  meta->alloc_context_id = 0;
  // This memory will not be reused by anyone else, so we are free to keep it
  // poisoned.
  Thread *t = GetCurrentThread();
  if (flags()->max_free_fill_size > 0) {
    uptr fill_size =
        Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
    internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
  }
  if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
      atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
    TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
                     t ? t->GenerateRandomTag() : kFallbackFreeTag);
  if (t) {
    allocator.Deallocate(t->allocator_cache(), aligned_ptr);
    if (auto *ha = t->heap_allocations())
      ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
                free_context_id, static_cast<u32>(orig_size)});
  } else {
    SpinMutexLock l(&fallback_mutex);
    AllocatorCache *cache = &fallback_allocator_cache;
    allocator.Deallocate(cache, aligned_ptr);
  }
}
Example #7
0
int
pthread_mutex_consistent (pthread_mutex_t *mutex)
{
  /* Test whether this is a robust mutex with a dead owner.
     See concurrency notes regarding __kind in struct __pthread_mutex_s
     in sysdeps/nptl/bits/thread-shared-types.h.  */
  if ((atomic_load_relaxed (&(mutex->__data.__kind))
       & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
      || mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
    return EINVAL;

  mutex->__data.__owner = THREAD_GETMEM (THREAD_SELF, tid);

  return 0;
}
Example #8
0
void
attribute_hidden
_dl_tlsdesc_resolve_rela_fixup (struct tlsdesc *td, struct link_map *l)
{
  const ElfW(Rela) *reloc = atomic_load_relaxed (&td->arg);

  /* After GL(dl_load_lock) is grabbed only one caller can see td->entry in
     initial state in _dl_tlsdesc_resolve_early_return_p, other concurrent
     callers will return and retry calling td->entry.  The updated td->entry
     synchronizes with the single writer so all read accesses here can use
     relaxed order.  */
  if (_dl_tlsdesc_resolve_early_return_p
      (td, (void*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_PLT)]) + l->l_addr)))
    return;

  /* The code below was borrowed from _dl_fixup(),
     except for checking for STB_LOCAL.  */
  const ElfW(Sym) *const symtab
    = (const void *) D_PTR (l, l_info[DT_SYMTAB]);
  const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
  const ElfW(Sym) *sym = &symtab[ELFW(R_SYM) (reloc->r_info)];
  lookup_t result;

   /* Look up the target symbol.  If the normal lookup rules are not
      used don't look in the global scope.  */
  if (ELFW(ST_BIND) (sym->st_info) != STB_LOCAL
      && __builtin_expect (ELFW(ST_VISIBILITY) (sym->st_other), 0) == 0)
    {
      const struct r_found_version *version = NULL;

      if (l->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
	{
	  const ElfW(Half) *vernum =
	    (const void *) D_PTR (l, l_info[VERSYMIDX (DT_VERSYM)]);
	  ElfW(Half) ndx = vernum[ELFW(R_SYM) (reloc->r_info)] & 0x7fff;
	  version = &l->l_versions[ndx];
	  if (version->hash == 0)
	    version = NULL;
	}

      result = _dl_lookup_symbol_x (strtab + sym->st_name, l, &sym,
				    l->l_scope, version, ELF_RTYPE_CLASS_PLT,
				    DL_LOOKUP_ADD_DEPENDENCY, NULL);
    }
  else
    {
Example #9
0
int
__lll_unlock_elision (int *lock, short *adapt_count, int pshared)
{
  /* When the lock was free we're in a transaction.  */
  if (*lock == 0)
    __libc_tend (0);
  else
    {
      /* Update adapt_count in the critical section to prevent a
	 write-after-destroy error as mentioned in BZ 20822.  The
	 following update of adapt_count has to be contained within
	 the critical region of the fall-back lock in order to not violate
	 the mutex destruction requirements.  */
      short __tmp = atomic_load_relaxed (adapt_count);
      if (__tmp > 0)
	atomic_store_relaxed (adapt_count, __tmp - 1);

      lll_unlock ((*lock), pshared);
    }
  return 0;
}
Example #10
0
int
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
{
  /* Reject invalid timeouts.  */
  if (__glibc_unlikely (abstime->tv_nsec < 0)
      || __glibc_unlikely (abstime->tv_nsec >= 1000000000))
    return EINVAL;

  /* Repeat until thread terminated.  */
  int tid;
  while ((tid = atomic_load_relaxed (tidp)) != 0)
    {
      /* See exit-thread.h for details.  */
      if (tid == NACL_EXITING_TID)
	/* The thread should now be in the process of exiting, so it will
	   finish quick enough that the timeout doesn't matter.  If any
	   thread ever stays in this state for long, there is something
	   catastrophically wrong.  */
	atomic_spin_nop ();
      else
	{
	  assert (tid > 0);

	  /* If *FUTEX == TID, wait until woken or timeout.  */
	  int err = __nacl_irt_futex.futex_wait_abs ((volatile int *) tidp,
						     tid, abstime);
	  if (err != 0)
	    {
	      if (__glibc_likely (err == ETIMEDOUT))
		return err;
	      assert (err == EAGAIN);
	    }
	}
    }

  return 0;
}
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
                            bool zeroise) {
  if (orig_size > kMaxAllowedMallocSize) {
    if (AllocatorMayReturnNull()) {
      Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
             orig_size);
      return nullptr;
    }
    ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
  }

  alignment = Max(alignment, kShadowAlignment);
  uptr size = TaggedSize(orig_size);
  Thread *t = GetCurrentThread();
  void *allocated;
  if (t) {
    allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
  } else {
    SpinMutexLock l(&fallback_mutex);
    AllocatorCache *cache = &fallback_allocator_cache;
    allocated = allocator.Allocate(cache, size, alignment);
  }
  if (UNLIKELY(!allocated)) {
    SetAllocatorOutOfMemory();
    if (AllocatorMayReturnNull())
      return nullptr;
    ReportOutOfMemory(size, stack);
  }
  Metadata *meta =
      reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
  meta->requested_size = static_cast<u32>(orig_size);
  meta->alloc_context_id = StackDepotPut(*stack);
  meta->right_aligned = false;
  if (zeroise) {
    internal_memset(allocated, 0, size);
  } else if (flags()->max_malloc_fill_size > 0) {
    uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
    internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
  }
  if (!right_align_mode)
    internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
                    size - orig_size);

  void *user_ptr = allocated;
  // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
  // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
  // retag to 0.
  if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
      atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
    tag_t tag = flags()->tag_in_malloc && malloc_bisect(stack, orig_size)
                    ? (t ? t->GenerateRandomTag() : kFallbackAllocTag)
                    : 0;
    user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, tag);
  }

  if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) &&
      right_align_mode) {
    uptr as_uptr = reinterpret_cast<uptr>(user_ptr);
    if (right_align_mode == kRightAlignAlways ||
        GetTagFromPointer(as_uptr) & 1) {  // use a tag bit as a random bit.
      user_ptr = reinterpret_cast<void *>(AlignRight(as_uptr, orig_size));
      meta->right_aligned = 1;
    }
  }

  HWASAN_MALLOC_HOOK(user_ptr, size);
  return user_ptr;
}
Example #12
0
/* Test various atomic.h macros.  */
static int
do_test (void)
{
  atomic_t mem, expected;
  int ret = 0;

#ifdef atomic_compare_and_exchange_val_acq
  mem = 24;
  if (atomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (atomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (atomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 64;
  if (atomic_exchange_acq (&mem, 31) != 64
      || mem != 31)
    {
      puts ("atomic_exchange_acq test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add_acq (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add_rel (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  atomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("atomic_add test failed");
      ret = 1;
    }

  mem = -1;
  atomic_increment (&mem);
  if (mem != 0)
    {
      puts ("atomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_increment_val (&mem) != 3)
    {
      puts ("atomic_increment_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_increment_and_test (&mem)
      || mem != 1)
    {
      puts ("atomic_increment_and_test test 1 failed");
      ret = 1;
    }

  mem = 35;
  if (atomic_increment_and_test (&mem)
      || mem != 36)
    {
      puts ("atomic_increment_and_test test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_increment_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_increment_and_test test 3 failed");
      ret = 1;
    }

  mem = 17;
  atomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("atomic_decrement test failed");
      ret = 1;
    }

  if (atomic_decrement_val (&mem) != 15)
    {
      puts ("atomic_decrement_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_and_test (&mem)
      || mem != -1)
    {
      puts ("atomic_decrement_and_test test 1 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_decrement_and_test (&mem)
      || mem != 14)
    {
      puts ("atomic_decrement_and_test test 2 failed");
      ret = 1;
    }

  mem = 1;
  if (! atomic_decrement_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_decrement_and_test test 3 failed");
      ret = 1;
    }

  mem = 1;
  if (atomic_decrement_if_positive (&mem) != 1
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_if_positive (&mem) != 0
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_decrement_if_positive (&mem) != -1
      || mem != -1)
    {
      puts ("atomic_decrement_if_positive test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (! atomic_add_negative (&mem, 10)
      || mem != -2)
    {
      puts ("atomic_add_negative test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_add_negative (&mem, 100)
      || mem != 100)
    {
      puts ("atomic_add_negative test 2 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_add_negative (&mem, -10)
      || mem != 5)
    {
      puts ("atomic_add_negative test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (atomic_add_negative (&mem, 14)
      || mem != 2)
    {
      puts ("atomic_add_negative test 4 failed");
      ret = 1;
    }

  mem = 0;
  if (! atomic_add_negative (&mem, -1)
      || mem != -1)
    {
      puts ("atomic_add_negative test 5 failed");
      ret = 1;
    }

  mem = -31;
  if (atomic_add_negative (&mem, 31)
      || mem != 0)
    {
      puts ("atomic_add_negative test 6 failed");
      ret = 1;
    }

  mem = -34;
  if (atomic_add_zero (&mem, 31)
      || mem != -3)
    {
      puts ("atomic_add_zero test 1 failed");
      ret = 1;
    }

  mem = -36;
  if (! atomic_add_zero (&mem, 36)
      || mem != 0)
    {
      puts ("atomic_add_zero test 2 failed");
      ret = 1;
    }

  mem = 113;
  if (atomic_add_zero (&mem, -13)
      || mem != 100)
    {
      puts ("atomic_add_zero test 3 failed");
      ret = 1;
    }

  mem = -18;
  if (atomic_add_zero (&mem, 20)
      || mem != 2)
    {
      puts ("atomic_add_zero test 4 failed");
      ret = 1;
    }

  mem = 10;
  if (atomic_add_zero (&mem, -20)
      || mem != -10)
    {
      puts ("atomic_add_zero test 5 failed");
      ret = 1;
    }

  mem = 10;
  if (! atomic_add_zero (&mem, -10)
      || mem != 0)
    {
      puts ("atomic_add_zero test 6 failed");
      ret = 1;
    }

  mem = 0;
  atomic_bit_set (&mem, 1);
  if (mem != 2)
    {
      puts ("atomic_bit_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  atomic_bit_set (&mem, 3);
  if (mem != 8)
    {
      puts ("atomic_bit_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  atomic_bit_set (&mem, 35);
  if (mem != 0x800000010LL)
    {
      puts ("atomic_bit_set test 3 failed");
      ret = 1;
    }
#endif

  mem = 0;
  if (atomic_bit_test_set (&mem, 1)
      || mem != 2)
    {
      puts ("atomic_bit_test_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  if (! atomic_bit_test_set (&mem, 3)
      || mem != 8)
    {
      puts ("atomic_bit_test_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  if (atomic_bit_test_set (&mem, 35)
      || mem != 0x800000010LL)
    {
      puts ("atomic_bit_test_set test 3 failed");
      ret = 1;
    }

  mem = 0x100000000LL;
  if (! atomic_bit_test_set (&mem, 32)
      || mem != 0x100000000LL)
    {
      puts ("atomic_bit_test_set test 4 failed");
      ret = 1;
    }
#endif

#ifdef catomic_compare_and_exchange_val_acq
  mem = 24;
  if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("catomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  catomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("catomic_add test failed");
      ret = 1;
    }

  mem = -1;
  catomic_increment (&mem);
  if (mem != 0)
    {
      puts ("catomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_increment_val (&mem) != 3)
    {
      puts ("catomic_increment_val test failed");
      ret = 1;
    }

  mem = 17;
  catomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("catomic_decrement test failed");
      ret = 1;
    }

  if (catomic_decrement_val (&mem) != 15)
    {
      puts ("catomic_decrement_val test failed");
      ret = 1;
    }

  /* Tests for C11-like atomics.  */
  mem = 11;
  if (atomic_load_relaxed (&mem) != 11 || atomic_load_acquire (&mem) != 11)
    {
      puts ("atomic_load_{relaxed,acquire} test failed");
      ret = 1;
    }

  atomic_store_relaxed (&mem, 12);
  if (mem != 12)
    {
      puts ("atomic_store_relaxed test failed");
      ret = 1;
    }
  atomic_store_release (&mem, 13);
  if (mem != 13)
    {
      puts ("atomic_store_release test failed");
      ret = 1;
    }

  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_relaxed (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_relaxed test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_relaxed (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_relaxed test 2 failed");
      ret = 1;
    }
  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_acquire (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_acquire test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_acquire (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_acquire test 2 failed");
      ret = 1;
    }
  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_release (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_release test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_release (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_release test 2 failed");
      ret = 1;
    }

  mem = 23;
  if (atomic_exchange_acquire (&mem, 42) != 23 || mem != 42)
    {
      puts ("atomic_exchange_acquire test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_exchange_release (&mem, 42) != 23 || mem != 42)
    {
      puts ("atomic_exchange_release test failed");
      ret = 1;
    }

  mem = 23;
  if (atomic_fetch_add_relaxed (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_relaxed test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_acquire (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_acquire test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_release (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_release test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_acq_rel (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_acq_rel test failed");
      ret = 1;
    }

  mem = 3;
  if (atomic_fetch_and_acquire (&mem, 2) != 3 || mem != 2)
    {
      puts ("atomic_fetch_and_acquire test failed");
      ret = 1;
    }

  mem = 4;
  if (atomic_fetch_or_relaxed (&mem, 2) != 4 || mem != 6)
    {
      puts ("atomic_fetch_or_relaxed test failed");
      ret = 1;
    }
  mem = 4;
  if (atomic_fetch_or_acquire (&mem, 2) != 4 || mem != 6)
    {
      puts ("atomic_fetch_or_acquire test failed");
      ret = 1;
    }

  /* This is a single-threaded test, so we can't test the effects of the
     fences.  */
  atomic_thread_fence_acquire ();
  atomic_thread_fence_release ();
  atomic_thread_fence_seq_cst ();

  return ret;
}
Example #13
0
File: tpp.c Project: siddhesh/glibc
int
__pthread_tpp_change_priority (int previous_prio, int new_prio)
{
  struct pthread *self = THREAD_SELF;
  struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);
  int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
  int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);

  if (tpp == NULL)
    {
      /* See __init_sched_fifo_prio.  We need both the min and max prio,
         so need to check both, and run initialization if either one is
         not initialized.  The memory model's write-read coherence rule
         makes this work.  */
      if (fifo_min_prio == -1 || fifo_max_prio == -1)
	{
	  __init_sched_fifo_prio ();
	  fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
	  fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);
	}

      size_t size = sizeof *tpp;
      size += (fifo_max_prio - fifo_min_prio + 1)
	      * sizeof (tpp->priomap[0]);
      tpp = calloc (size, 1);
      if (tpp == NULL)
	return ENOMEM;
      tpp->priomax = fifo_min_prio - 1;
      THREAD_SETMEM (self, tpp, tpp);
    }

  assert (new_prio == -1
	  || (new_prio >= fifo_min_prio
	      && new_prio <= fifo_max_prio));
  assert (previous_prio == -1
	  || (previous_prio >= fifo_min_prio
	      && previous_prio <= fifo_max_prio));

  int priomax = tpp->priomax;
  int newpriomax = priomax;
  if (new_prio != -1)
    {
      if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0)
	return EAGAIN;
      ++tpp->priomap[new_prio - fifo_min_prio];
      if (new_prio > priomax)
	newpriomax = new_prio;
    }

  if (previous_prio != -1)
    {
      if (--tpp->priomap[previous_prio - fifo_min_prio] == 0
	  && priomax == previous_prio
	  && previous_prio > new_prio)
	{
	  int i;
	  for (i = previous_prio - 1; i >= fifo_min_prio; --i)
	    if (tpp->priomap[i - fifo_min_prio])
	      break;
	  newpriomax = i;
	}
    }

  if (priomax == newpriomax)
    return 0;

  /* See CREATE THREAD NOTES in nptl/pthread_create.c.  */
  lll_lock (self->lock, LLL_PRIVATE);

  tpp->priomax = newpriomax;

  int result = 0;

  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
      if (__sched_getparam (self->tid, &self->schedparam) != 0)
	result = errno;
      else
	self->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
      self->schedpolicy = __sched_getscheduler (self->tid);
      if (self->schedpolicy == -1)
	result = errno;
      else
	self->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result == 0)
    {
      struct sched_param sp = self->schedparam;
      if (sp.sched_priority < newpriomax || sp.sched_priority < priomax)
	{
	  if (sp.sched_priority < newpriomax)
	    sp.sched_priority = newpriomax;

	  if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0)
	    result = errno;
	}
    }

  lll_unlock (self->lock, LLL_PRIVATE);

  return result;
}