Esempio n. 1
0
// "Increment" the value of a semaphore atomically and
// return its old value. Note that this implements
// the special case of "incrementing" any negative
// value to +1 directly.
//
// NOTE: The value will _not_ wrap above SEM_VALUE_MAX
static int __sem_inc(atomic_uint* sem_count_ptr) {
  unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
  unsigned int shared = old_value  & SEMCOUNT_SHARED_MASK;
  unsigned int new_value;

  // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
  // memory access made before can be seen in other threads.
  // A release fence may be sufficient, but it is still in discussion whether
  // POSIX semaphores should provide sequential consistency.
  do {
    // Can't go higher than SEM_VALUE_MAX.
    if (SEMCOUNT_TO_VALUE(old_value) == SEM_VALUE_MAX) {
      break;
    }

    // If the counter is negative, go directly to one, otherwise just increment.
    if (SEMCOUNT_TO_VALUE(old_value) < 0) {
      new_value = SEMCOUNT_ONE | shared;
    } else {
      new_value = SEMCOUNT_INCREMENT(old_value) | shared;
    }
  } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
           new_value));

  return SEMCOUNT_TO_VALUE(old_value);
}
Esempio n. 2
0
File: ex2.c Progetto: artpol84/poc
void *f(void* thr_data)
{
    int my_idx = *(int*)thr_data;

    cpu_set_t set;
    CPU_ZERO(&set);
    CPU_SET(my_idx, &set);
    if( pthread_setaffinity_np(pthread_self(), sizeof(set), &set) ){
        abort();
    }


    while( !start );

    timings[my_idx][0] = GET_TS();
    
    for(int n = 0; n < niter; ++n) {
        _Bool flag = false;
        while( !flag ){
            int tmp = acnt;
            flag = atomic_compare_exchange_weak(&acnt, &tmp, tmp + 1);
        }
        ++cnt; // undefined behavior, in practice some updates missed
    }
    timings[my_idx][1] = GET_TS();
    return 0;
}
Esempio n. 3
0
TEST(stdatomic, atomic_compare_exchange) {
  atomic_int i;
  int expected;

  atomic_store(&i, 123);
  expected = 123;
  ASSERT_TRUE(atomic_compare_exchange_strong(&i, &expected, 456));
  ASSERT_FALSE(atomic_compare_exchange_strong(&i, &expected, 456));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  ASSERT_TRUE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_FALSE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  int iter_count = 0;
  do {
    ++iter_count;
    ASSERT_LT(iter_count, 100);  // Arbitrary limit on spurious compare_exchange failures.
    ASSERT_EQ(expected, 123);
  } while(!atomic_compare_exchange_weak(&i, &expected, 456));
  ASSERT_FALSE(atomic_compare_exchange_weak(&i, &expected, 456));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  iter_count = 0;
  do {
    ++iter_count;
    ASSERT_LT(iter_count, 100);
    ASSERT_EQ(expected, 123);
  } while(!atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_FALSE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_EQ(456, expected);
}
Esempio n. 4
0
void CRYPTO_refcount_inc(CRYPTO_refcount_t *in_count) {
  _Atomic CRYPTO_refcount_t *count = (_Atomic CRYPTO_refcount_t *) in_count;
  uint32_t expected = atomic_load(count);

  while (expected != CRYPTO_REFCOUNT_MAX) {
    uint32_t new_value = expected + 1;
    if (atomic_compare_exchange_weak(count, &expected, new_value)) {
      break;
    }
  }
}
Esempio n. 5
0
File: main.cpp Progetto: CCJY/coliru
int main() {
    std::shared_ptr < int > a;

    std::shared_ptr < int > b;
    std::shared_ptr < int > c = std::make_shared < int > (10);

    while(atomic_compare_exchange_weak(&a, &b, c))
      ;

    assert(atomic_load(&a) == c);  
    assert(atomic_load(&a).use_count() == 2);
}
Esempio n. 6
0
int pthread_key_create(pthread_key_t* key, void (*key_destructor)(void*)) {
  for (size_t i = 0; i < BIONIC_PTHREAD_KEY_COUNT; ++i) {
    uintptr_t seq = atomic_load_explicit(&key_map[i].seq, memory_order_relaxed);
    while (!SeqOfKeyInUse(seq)) {
      if (atomic_compare_exchange_weak(&key_map[i].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
        atomic_store(&key_map[i].key_destructor, reinterpret_cast<uintptr_t>(key_destructor));
        *key = i | KEY_VALID_FLAG;
        return 0;
      }
    }
  }
  return EAGAIN;
}
Esempio n. 7
0
static unsigned int
AcquireSegmentBufferSpace(
    _In_ SystemPipe_t*              Pipe,
    _In_ SystemPipeSegmentBuffer_t* Buffer,
    _In_ size_t                     Length)
{
    // Variables
    unsigned int ReadIndex;
    unsigned int WriteIndex;
    size_t BytesAvailable;

    // Make sure we write all the bytes
    while (1) {
        WriteIndex      = atomic_load(&Buffer->WritePointer);
        ReadIndex       = atomic_load(&Buffer->ReadCommitted);
        BytesAvailable  = MIN(
            CalculateBytesAvailableForWriting(Buffer, ReadIndex, WriteIndex), Length);
        if (BytesAvailable != Length) {
            SchedulerAtomicThreadSleep((atomic_int*)&Buffer->ReadCommitted, (int*)&ReadIndex, 0);
            continue; // Start over
        }

        // Synchronize with other producers
        if (Pipe->Configuration & PIPE_MULTIPLE_PRODUCERS) {
            while (BytesAvailable == Length) {
                size_t NewWritePointer  = WriteIndex + BytesAvailable;
                if (atomic_compare_exchange_weak(&Buffer->WritePointer, &WriteIndex, NewWritePointer)) {
                    break;
                }
                ReadIndex       = atomic_load(&Buffer->ReadCommitted);
                BytesAvailable  = MIN(
                    CalculateBytesAvailableForWriting(Buffer, ReadIndex, WriteIndex), Length);
            }

            // Did we end up overcomitting?
            if (BytesAvailable != Length) {
                continue; // Start write loop all over
            }
        }
        else {
            atomic_store_explicit(&Buffer->WritePointer, WriteIndex + BytesAvailable, memory_order_relaxed);
        }

        // Break us out here
        if (BytesAvailable == Length) {
            break;
        }
    }
    return WriteIndex;
}
Esempio n. 8
0
// Same as __sem_dec, but will not touch anything if the
// value is already negative *or* 0. Returns the old value.
static int __sem_trydec(atomic_uint* sem_count_ptr) {
  unsigned int old_value = atomic_load_explicit(sem_count_ptr, memory_order_relaxed);
  unsigned int shared = old_value & SEMCOUNT_SHARED_MASK;

  // Use memory_order_seq_cst in atomic_compare_exchange operation to ensure all
  // memory access made by other threads can be seen in current thread.
  // An acquire fence may be sufficient, but it is still in discussion whether
  // POSIX semaphores should provide sequential consistency.
  do {
    if (SEMCOUNT_TO_VALUE(old_value) <= 0) {
      break;
    }
  } while (!atomic_compare_exchange_weak(sem_count_ptr, &old_value,
           SEMCOUNT_DECREMENT(old_value) | shared));

  return SEMCOUNT_TO_VALUE(old_value);
}
Esempio n. 9
0
int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *in_count) {
  _Atomic CRYPTO_refcount_t *count = (_Atomic CRYPTO_refcount_t *)in_count;
  uint32_t expected = atomic_load(count);

  for (;;) {
    if (expected == 0) {
      abort();
    } else if (expected == CRYPTO_REFCOUNT_MAX) {
      return 0;
    } else {
      const uint32_t new_value = expected - 1;
      if (atomic_compare_exchange_weak(count, &expected, new_value)) {
        return new_value == 0;
      }
    }
  }
}
Esempio n. 10
0
File: qsbr.c Progetto: rmind/libqsbr
/*
 * qsbr_register: register the current thread for QSBR.
 */
int
qsbr_register(qsbr_t *qs)
{
	qsbr_tls_t *t, *head;

	t = pthread_getspecific(qs->tls_key);
	if (__predict_false(t == NULL)) {
		if ((t = malloc(sizeof(qsbr_tls_t))) == NULL) {
			return ENOMEM;
		}
		pthread_setspecific(qs->tls_key, t);
	}
	memset(t, 0, sizeof(qsbr_tls_t));

	do {
		head = qs->list;
		t->next = head;
	} while (!atomic_compare_exchange_weak(&qs->list, head, t));

	return 0;
}
Esempio n. 11
0
/////////////////////////////////////////////////////////////////////////
// System Pipe Structured Buffer Code
static void
GetSegmentProductionSpot(
    _In_ SystemPipe_t*              Pipe,
    _In_ SystemPipeSegment_t*       Segment)
{
    // Variables
    int ProductionSpots;

    atomic_fetch_add(&Segment->References, 1);
    while (1) {
        ProductionSpots = atomic_load(&Segment->ProductionSpots);
        if (!ProductionSpots) {
            SchedulerAtomicThreadSleep(&Segment->ProductionSpots, &ProductionSpots, 0);
            continue; // Start over
        }

        // Synchronize with other producers
        if (Pipe->Configuration & PIPE_MULTIPLE_PRODUCERS) {
            while (ProductionSpots) {
                if (atomic_compare_exchange_weak(&Segment->ProductionSpots, 
                    &ProductionSpots, ProductionSpots - 1)) {
                    break;
                }
            }

            // Did we end up overcomitting?
            if (!ProductionSpots) {
                continue; // Start write loop all over
            }
            break;
        }
        else {
            // No sweat
            atomic_store_explicit(&Segment->ProductionSpots, ProductionSpots - 1, memory_order_relaxed);
            break;
        }
    }
}
Esempio n. 12
0
static size_t
ReadRawSegmentBuffer(
    _In_ SystemPipe_t*              Pipe,
    _In_ SystemPipeSegmentBuffer_t* Buffer,
    _In_ uint8_t*                   Data,
    _In_ size_t                     Length)
{
    // Variables
    unsigned int ReadIndex;
    unsigned int WriteIndex;
    size_t BytesAvailable   = 0;
    size_t BytesRead        = 0;
    size_t BytesCommitted;
    
    // Make sure there are bytes to read
    while (BytesRead < Length) {
        // Use the write-comitted
        WriteIndex      = atomic_load(&Buffer->WriteCommitted);
        ReadIndex       = atomic_load(&Buffer->ReadPointer);
        BytesAvailable  = MIN(
            CalculateBytesAvailableForReading(Buffer, ReadIndex, WriteIndex), 
            Length - BytesRead);
        BytesCommitted  = BytesAvailable;
        if (!BytesAvailable) {
            if (Pipe->Configuration & PIPE_NOBLOCK) {
                break;
            }
            SchedulerAtomicThreadSleep((atomic_int*)&Buffer->WriteCommitted, (int*)&WriteIndex, 0);
            continue; // Start over
        }

        // Synchronize with other consumers
        if (Pipe->Configuration & PIPE_MULTIPLE_CONSUMERS) {
            while (BytesAvailable) {
                size_t NewReadPointer   = ReadIndex + BytesAvailable;
                if (atomic_compare_exchange_weak(&Buffer->ReadPointer, &ReadIndex, NewReadPointer)) {
                    break;
                }
                WriteIndex      = atomic_load(&Buffer->WriteCommitted);
                BytesAvailable  = MIN(
                    CalculateBytesAvailableForReading(Buffer, ReadIndex, WriteIndex), 
                    Length - BytesRead);
            }

            if (!BytesAvailable) {
                continue; // Start over as we ran out
            }

            // Wait for our turn
            BytesCommitted = atomic_load(&Buffer->ReadCommitted);
            while (BytesCommitted < ReadIndex) {
                BytesCommitted = atomic_load(&Buffer->ReadCommitted);
            }
            BytesCommitted = BytesAvailable;
        }
        else {
            atomic_store_explicit(&Buffer->ReadPointer, ReadIndex + BytesAvailable, memory_order_relaxed);
        }

        // Write the data to the provided buffer
        while (BytesAvailable--) {
            Data[BytesRead++] = Buffer->Pointer[(ReadIndex++ & (Buffer->Size - 1))];
        }
        atomic_fetch_add(&Buffer->ReadCommitted, BytesCommitted);
        SchedulerHandleSignal((uintptr_t*)&Buffer->ReadCommitted);

        // If it was possible read bytes, return. With raw bytes we allow
        // the reader to read less, however never allow to read 0
        if (BytesRead > 0) {
            break;
        }
    }
    return BytesRead;
}
Esempio n. 13
0
/////////////////////////////////////////////////////////////////////////
// System Pipe Raw Buffer Code
static size_t
WriteRawSegmentBuffer(
    _In_ SystemPipe_t*              Pipe,
    _In_ SystemPipeSegmentBuffer_t* Buffer,
    _In_ const uint8_t*             Data,
    _In_ size_t                     Length)
{
    // Variables
    unsigned int ReadIndex;
    unsigned int WriteIndex;
    size_t BytesWritten = 0;
    size_t BytesAvailable;
    size_t BytesCommitted;

    // Make sure we write all the bytes
    while (BytesWritten < Length) {
        WriteIndex      = atomic_load(&Buffer->WritePointer);
        ReadIndex       = atomic_load(&Buffer->ReadCommitted);
        BytesAvailable  = MIN(
            CalculateBytesAvailableForWriting(Buffer, ReadIndex, WriteIndex),
            Length - BytesWritten);
        BytesCommitted  = BytesAvailable;
        if (!BytesAvailable) {
            if (Pipe->Configuration & PIPE_NOBLOCK) {
                break;
            }
            SchedulerAtomicThreadSleep((atomic_int*)&Buffer->ReadCommitted, (int*)&ReadIndex, 0);
            continue; // Start over
        }

        // Synchronize with other producers
        if (Pipe->Configuration & PIPE_MULTIPLE_PRODUCERS) {
            while (BytesAvailable) {
                size_t NewWritePointer  = WriteIndex + BytesAvailable;
                if (atomic_compare_exchange_weak(&Buffer->WritePointer, &WriteIndex, NewWritePointer)) {
                    break;
                }
                ReadIndex       = atomic_load(&Buffer->ReadCommitted);
                BytesAvailable  = MIN(
                    CalculateBytesAvailableForWriting(Buffer, ReadIndex, WriteIndex),
                    Length - BytesWritten);
            }

            if (!BytesAvailable) {
                continue; // Start over as we ran out
            }

            // Wait for our turn
            BytesCommitted = atomic_load(&Buffer->WriteCommitted);
            while (BytesCommitted < WriteIndex) {
                BytesCommitted = atomic_load(&Buffer->WriteCommitted);
            }
            BytesCommitted = BytesAvailable;
        }
        else {
            atomic_store_explicit(&Buffer->WritePointer, WriteIndex + BytesAvailable, memory_order_relaxed);
        }

        // Write the data to the internal buffer
        while (BytesAvailable--) {
            Buffer->Pointer[(WriteIndex++ & (Buffer->Size - 1))] = Data[BytesWritten++];
        }
        atomic_fetch_add(&Buffer->WriteCommitted, BytesCommitted);
        SchedulerHandleSignal((uintptr_t*)&Buffer->WriteCommitted);
    }
    return BytesWritten;
}