Ejemplo n.º 1
0
main ()
{
  v = 0;
  count = 0;

  __atomic_store_n (&v, count + 1, __ATOMIC_RELAXED);
  if (v != ++count)
    abort ();

  __atomic_store_n (&v, count + 1, __ATOMIC_RELEASE);
  if (v != ++count)
    abort ();

  __atomic_store_n (&v, count + 1, __ATOMIC_SEQ_CST);
  if (v != ++count)
    abort ();

  /* Now test the generic variant.  */
  count++;

  __atomic_store (&v, &count, __ATOMIC_RELAXED);
  if (v != count++)
    abort ();

  __atomic_store (&v, &count, __ATOMIC_RELEASE);
  if (v != count++)
    abort ();

  __atomic_store (&v, &count, __ATOMIC_SEQ_CST);
  if (v != count)
    abort ();


  return 0;
}
Ejemplo n.º 2
0
static void* thread_proc(void* param) {
    int robot_n = (int) param;
    for (;;) {
        long long start = microseconds();
        long long count = 0;
        while (microseconds() - start < 1000000) {
            long long iterations = 10000;
            for (int i = 0; i < iterations; ++i) {
                while (__atomic_load_n(&current, __ATOMIC_SEQ_CST) != robot_n * 2) {
#ifdef USE_PAUSE
                    _mm_pause();
#endif
                }
                __atomic_store_n(&current, robot_n * 2 + 1, __ATOMIC_SEQ_CST);
                //printf("%d\n", robot_n);
                __atomic_store_n(&current, (robot_n + 1) % thread_count * 2, __ATOMIC_SEQ_CST);
            }
            count += iterations;
        }
        if (robot_n == 0) {
            long long dns = 1000ll * (microseconds() - start);
            long long ns_per_call = dns / count;
            printf("%lld ns per step\n", ns_per_call);
        }
    }
}
Ejemplo n.º 3
0
void
SIZE(libat_store) (UTYPE *mptr, UTYPE newval, int smodel)
{
    if (maybe_specialcase_relaxed(smodel))
        __atomic_store_n (mptr, newval, __ATOMIC_RELAXED);
    else if (maybe_specialcase_acqrel(smodel))
        /* Note that ACQ and ACQ_REL are not valid for store.  */
        __atomic_store_n (mptr, newval, __ATOMIC_RELEASE);
    else
        __atomic_store_n (mptr, newval, __ATOMIC_SEQ_CST);
}
BOOL tsumego_setup_if_closed(ThreadInfo *info, const Board *board, TreeBlock *bl) {
  const TreeHandle *s = info->s;

  Stone win_state = S_EMPTY;

  // Check if the newly created node b is an end state, if so, no need to call dcnn.
  Stone curr_player = OPPONENT(board->_next_player);

  if (curr_player == s->params.defender) {
    // Check if in the given region, whether a player has two eyes.
    // Check at least one group of player lives.
    BOOL curr_lives = OneGroupLives(board, curr_player, &s->params.ld_region);
    if (curr_lives) win_state = curr_player;
  }

  if (s->params.defender == S_BLACK && board->_w_cap >= 4) win_state = S_WHITE;
  else if (s->params.defender == S_WHITE && board->_b_cap >= 4) win_state = S_BLACK;

  // If it is a terminal state, then we can proceed.
  // Change the prove/dis_prove number, if one side lives.
  if (win_state != S_EMPTY) {
    /*
    if (win_state == S_WHITE) {
      char buf[100];
      Coord last_move = bl->parent->data.moves[bl->parent_offset];
      printf("%s Lives! Final move: %s\n", curr_player == S_BLACK ? "B" : "W", get_move_str(last_move, curr_player, buf));
      ShowBoard(board, SHOW_ALL);
      printf("\n");
    }
    */

    int b, w;
    if (win_state == S_BLACK) {
      b = 0;
      w = INIT_PROVE_NUM;
    } else {
      w = 0;
      b = INIT_PROVE_NUM;
    }
    // Update the statistics in the parent node.
    ProveNumber *pn = &bl->parent->cnn_data.ps[bl->parent_offset];
    __atomic_store_n(&pn->w, w, __ATOMIC_RELAXED);
    __atomic_store_n(&pn->b, b, __ATOMIC_RELAXED);

    bl->n = 0;
    bl->terminal_status = win_state;
    // bl->player = mmove.player;
    return TRUE;
  }
  return FALSE;
}
Ejemplo n.º 5
0
void
__register_frame_info_table_bases (void *begin, struct object *ob,
				   void *tbase, void *dbase)
{
  ob->pc_begin = (void *)-1;
  ob->tbase = tbase;
  ob->dbase = dbase;
  ob->u.array = begin;
  ob->s.i = 0;
  ob->s.b.from_array = 1;
  ob->s.b.encoding = DW_EH_PE_omit;

  init_object_mutex_once ();
  __gthread_mutex_lock (&object_mutex);

  ob->next = unseen_objects;
  unseen_objects = ob;
#ifdef ATOMIC_FDE_FAST_PATH
  /* Set flag that at least one library has registered FDEs.
     Use relaxed MO here, it is up to the app to ensure that the library
     loading/initialization happens-before using that library in other
     threads (in particular unwinding with that library's functions
     appearing in the backtraces).  Calling that library's functions
     without waiting for the library to initialize would be racy.  */
  if (!any_objects_registered)
    __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
#endif

  __gthread_mutex_unlock (&object_mutex);
}
Ejemplo n.º 6
0
Threading::Semaphore::Semaphore()
{
	// other platforms explicitly make a thread-private (unshared) semaphore
	// here. But it seems Mach doesn't support that.
	MACH_CHECK(semaphore_create(mach_task_self(), (semaphore_t *)&m_sema, SYNC_POLICY_FIFO, 0));
	__atomic_store_n(&m_counter, 0, __ATOMIC_SEQ_CST);
}
Ejemplo n.º 7
0
Archivo: task.c Proyecto: jtramm/gcc
static inline void
gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task)
{
  struct gomp_taskgroup *taskgroup = child_task->taskgroup;
  if (taskgroup == NULL)
    return;
  child_task->prev_taskgroup->next_taskgroup = child_task->next_taskgroup;
  child_task->next_taskgroup->prev_taskgroup = child_task->prev_taskgroup;
  if (taskgroup->num_children > 1)
    --taskgroup->num_children;
  else
    {
      /* We access taskgroup->num_children in GOMP_taskgroup_end
	 outside of the task lock mutex region, so
	 need a release barrier here to ensure memory
	 written by child_task->fn above is flushed
	 before the NULL is written.  */
      __atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE);
    }
  if (taskgroup->children != child_task)
    return;
  if (child_task->next_taskgroup != child_task)
    taskgroup->children = child_task->next_taskgroup;
  else
    {
      taskgroup->children = NULL;
      if (taskgroup->in_taskgroup_wait)
	{
	  taskgroup->in_taskgroup_wait = false;
	  gomp_sem_post (&taskgroup->taskgroup_sem);
	}
    }
}
Ejemplo n.º 8
0
void
core_run(void *arg_admin, void *arg_worker)
{
    pthread_t worker, admin;
    int ret;

    if (!core_init) {
        log_crit("core cannot run because it hasn't been initialized");
        return;
    }

    ret = pthread_create(&worker, NULL, core_worker_evloop, arg_worker);
    if (ret != 0) {
        log_crit("pthread create failed for worker thread: %s", strerror(ret));
        goto error;
    }

    __atomic_store_n(&admin_running, true, __ATOMIC_RELAXED);
    ret = pthread_create(&admin, NULL, core_admin_evloop, arg_admin);
    if (ret != 0) {
        log_crit("pthread create failed for admin thread: %s", strerror(ret));
        goto error;
    }

    core_server_evloop();

error:
    core_teardown();
}
Ejemplo n.º 9
0
/**
 * Acquire existing session from sotrage or create new one.
 * All session MUST be requested through this function.
 * @param[in] ip IPv4 address of client.
 * @param[in] existing_only Do only existing sssion search.
 * @return New client.
 */
struct zsession *session_acquire(uint32_t ip, bool existing_only)
{
    struct zsession *sess = NULL;
    size_t sidx = STORAGE_IDX(ip);

    // search for existing session
    pthread_rwlock_rdlock(&zinst()->sessions_lock[sidx]);
    HASH_FIND(hh, zinst()->sessions[sidx], &ip, sizeof(ip), sess);
    if (NULL != sess) {
        __atomic_add_fetch(&sess->refcnt, 1, __ATOMIC_RELAXED);
    }
    pthread_rwlock_unlock(&zinst()->sessions_lock[sidx]);

    // or create new session
    if (!existing_only && NULL == sess) {
        pthread_rwlock_wrlock(&zinst()->sessions_lock[sidx]);

        HASH_FIND(hh, zinst()->sessions[sidx], &ip, sizeof(ip), sess);
        if (NULL != sess) {
            __atomic_add_fetch(&sess->refcnt, 1, __ATOMIC_RELAXED);
        } else {
            sess = session_create();
            sess->ip = ip;
            __atomic_store_n(&sess->last_activity, ztime(false), __ATOMIC_RELAXED);
            __atomic_add_fetch(&sess->refcnt, 1, __ATOMIC_RELAXED); // sessions storage reference

            HASH_ADD(hh, zinst()->sessions[sidx], ip, sizeof(ip), sess);
        }

        pthread_rwlock_unlock(&zinst()->sessions_lock[sidx]);
    }

    return sess;
}
Ejemplo n.º 10
0
static inline void
gomp_task_run_post_remove_parent (struct gomp_task *child_task)
{
  struct gomp_task *parent = child_task->parent;
  if (parent == NULL)
    return;
  if (__builtin_expect (child_task->parent_depends_on, 0)
      && --parent->taskwait->n_depend == 0
      && parent->taskwait->in_depend_wait)
    {
      parent->taskwait->in_depend_wait = false;
      gomp_sem_post (&parent->taskwait->taskwait_sem);
    }
  child_task->prev_child->next_child = child_task->next_child;
  child_task->next_child->prev_child = child_task->prev_child;
  if (parent->children != child_task)
    return;
  if (child_task->next_child != child_task)
    parent->children = child_task->next_child;
  else
    {
      /* We access task->children in GOMP_taskwait
	 outside of the task lock mutex region, so
	 need a release barrier here to ensure memory
	 written by child_task->fn above is flushed
	 before the NULL is written.  */
      __atomic_store_n (&parent->children, NULL, MEMMODEL_RELEASE);
      if (parent->taskwait && parent->taskwait->in_taskwait)
	{
	  parent->taskwait->in_taskwait = false;
	  gomp_sem_post (&parent->taskwait->taskwait_sem);
	}
    }
}
Ejemplo n.º 11
0
/**
 * Register a irq handler and its parameter.
 *
 * return 0 if OK
 */
ac_u32 ac_exception_irq_register(int_handler handler,
    identify_and_clear_source iacs, ac_uptr param) {
  ac_u32 status = 1;

  // Currently there is no unregtister so we're just racing
  // with the interrupt handler itself. The interrupt handler
  // will be looking at the handler only and if its not AC_NULL
  // assume is good. Thus we'll update that last when we add
  // a new entry.
  for (ac_u32 i = irq_handler_count; i < MAX_HANDLERS; i++) {
    ac_bool* pavailable = &irq_handlers[i].available;
    ac_bool expected = AC_TRUE;
    ac_bool ok = __atomic_compare_exchange_n(pavailable, &expected,
        AC_FALSE, AC_TRUE, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
    if (ok) {
      irq_handlers[i].param = param;
      irq_handlers[i].iacs = iacs;
      int_handler* phandler = &irq_handlers[i].handler;
      __atomic_store_n(phandler, handler, __ATOMIC_RELEASE);
      irq_handler_count += 1;
      status = 0;
      break;
    }
  }
  return status;
}
void unlock(struct clh_node **node) {
    struct clh_node *pred = (*node)->prev;
    struct clh_node *tmp = *node;

    *node = pred; // Take the previous node, we reuse it
    __atomic_store_n(&tmp->locked, 0, __ATOMIC_RELEASE);
}
Ejemplo n.º 13
0
/**
 * @see mpscifo.h
 */
void add(MpscFifo_t *pQ, Msg_t *pMsg) {
#if 0
  if (pMsg != NULL) {
    // Be sure pMsg->pNext == NULL
    pMsg->pNext = NULL;

    // Using Builtin Clang doesn't seem to support stdatomic.h
    Msg_t** ptr_pHead = &pQ->pHead;
    Msg_t *pPrevHead = __atomic_exchange_n(ptr_pHead, pMsg, __ATOMIC_SEQ_CST); //ACQ_REL);
    Msg_t** ptr_pNext = &pPrevHead->pNext;
    __atomic_store_n(ptr_pNext, pMsg, __ATOMIC_SEQ_CST); //RELEASE);
    int32_t* ptr_count = &pQ->count;
    __atomic_fetch_add(ptr_count, 1, __ATOMIC_SEQ_CST);
    // TODO: Support "blocking" which means use condition variable
  }
#else
  if (pMsg != NULL) {
    pMsg->pNext = NULL;
    void** ptr_pHead = (void*)&pQ->pHead;
    Msg_t* pPrevHead = __atomic_exchange_n(ptr_pHead, pMsg, __ATOMIC_SEQ_CST); //ACQ_REL);
    pPrevHead->pNext = pMsg;
    pQ->count += 1;
  }
#endif
}
Ejemplo n.º 14
0
/**
 * Initialize timer_rescheduler interupt
 */
STATIC void init_timer() {
  union apic_timer_lvt_fields_u lvtu = { .fields = get_apic_timer_lvt() };
  ac_u32 out_eax, out_ebx, out_ecx, out_edx;

  // Verify that TSC_DEADLINE is enabled
  //
  // See "Intel 64 and IA-32 Architectures Software Developer's Manual"
  // Volume 3 chapter 10 "Advanded Programmable Interrupt Controller (APIC)"
  // Section 10.5.4.1 "TSC-Deadline Mode"

  get_cpuid(1, &out_eax, &out_ebx, &out_ecx, &out_edx);
  if (AC_GET_BITS(ac_u32, out_ecx, 1, 24) != 1) {
    ac_printf("CPU does not support TSC-Deadline mode\n");
    reset_x86();
  }

  lvtu.fields.vector = TIMER_RESCHEDULE_ISR_INTR; // interrupt vector
  lvtu.fields.disable = AC_FALSE; // interrupt enabled
  lvtu.fields.mode = 2;     // TSC-Deadline
  set_apic_timer_lvt(lvtu.fields);

  slice_default = AcTime_nanos_to_ticks(SLICE_DEFAULT_NANOSECS);

  __atomic_store_n(&timer_reschedule_isr_counter, 0, __ATOMIC_RELEASE);
  set_apic_timer_tsc_deadline(ac_tscrd() + slice_default);

  ac_printf("init_timer:-slice_default_nanosecs=%ld slice_default=%ld\n",
      SLICE_DEFAULT_NANOSECS, slice_default);
}
Ejemplo n.º 15
0
void
GOMP_doacross_post (long *counts)
{
  struct gomp_thread *thr = gomp_thread ();
  struct gomp_work_share *ws = thr->ts.work_share;
  struct gomp_doacross_work_share *doacross = ws->doacross;
  unsigned long ent;
  unsigned int i;

  if (__builtin_expect (doacross == NULL, 0))
    {
      __sync_synchronize ();
      return;
    }

  if (__builtin_expect (ws->sched == GFS_STATIC, 1))
    ent = thr->ts.team_id;
  else if (ws->sched == GFS_GUIDED)
    ent = counts[0];
  else
    ent = counts[0] / doacross->chunk_size;
  unsigned long *array = (unsigned long *) (doacross->array
					    + ent * doacross->elt_sz);

  if (__builtin_expect (doacross->flattened, 1))
    {
      unsigned long flattened
	= (unsigned long) counts[0] << doacross->shift_counts[0];

      for (i = 1; i < doacross->ncounts; i++)
	flattened |= (unsigned long) counts[i]
		     << doacross->shift_counts[i];
      flattened++;
      if (flattened == __atomic_load_n (array, MEMMODEL_ACQUIRE))
	__atomic_thread_fence (MEMMODEL_RELEASE);
      else
	__atomic_store_n (array, flattened, MEMMODEL_RELEASE);
      return;
    }

  __atomic_thread_fence (MEMMODEL_ACQUIRE);
  for (i = doacross->ncounts; i-- > 0; )
    {
      if (counts[i] + 1UL != __atomic_load_n (&array[i], MEMMODEL_RELAXED))
	__atomic_store_n (&array[i], counts[i] + 1UL, MEMMODEL_RELEASE);
    }
}
Ejemplo n.º 16
0
inline void Lock::release ( void )
{
#ifdef HAVE_NEW_GCC_ATOMIC_OPS
   __atomic_store_n(&state_, 0, __ATOMIC_RELEASE);
#else
   __sync_lock_release( &state_ );
#endif
}
Ejemplo n.º 17
0
void
test ()
{
  int a_ = a;
  int b_ = b;
  int c_ = c;
  int d_ = d;
  int e_ = e;
  int f_ = f;
  int g_ = g;
  int h_ = h;
  int i_ = i;
  int j_ = j;
  int k_ = k;
  int l_ = l;
  int m_ = m;
  int n_ = n;
  int o_ = o;
  int p_ = p;

  int z;

  for (z = 0; z < 1000; z++)
    {
      __atomic_store_n (&y, 0x100000002ll, __ATOMIC_SEQ_CST);
      __atomic_store_n (&y, 0x300000004ll, __ATOMIC_SEQ_CST);
    }

  a = a_;
  b = b_;
  c = c_;
  d = d_;
  e = e_;
  f = f_;
  g = g_;
  h = h_;
  i = i_;
  j = j_;
  k = k_;
  l = l_;
  m = m_;
  n = n_;
  o = o_;
  p = p_;
}
Ejemplo n.º 18
0
void enable_kcov(void)
{
	/* Enable coverage collection on the current thread. */
	if (ioctl(kcovfd, KCOV_ENABLE, 0))
		printf("Error enabling kcov: %s\n", strerror(errno));

	/* Reset coverage from the tail of the ioctl() call. */
	__atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
}
Ejemplo n.º 19
0
void simulate_thread_main()
{
  int x;

  /* Execute loads with value changing at various cyclic values.  */
  for (table_cycle_size = 16; table_cycle_size > 4 ; table_cycle_size--)
    {
      ret = __atomic_load_n (&value, __ATOMIC_SEQ_CST);
      /* In order to verify the returned value (which is not atomic), it needs
	 to be atomically stored into another variable and check that.  */
      __atomic_store_n (&result, ret, __ATOMIC_SEQ_CST);

      /* Execute the fetch/store a couple of times just to ensure the cycles
         have a chance to be interesting.  */
      ret = __atomic_load_n (&value, __ATOMIC_SEQ_CST);
      __atomic_store_n (&result, ret, __ATOMIC_SEQ_CST);
    }
}
Ejemplo n.º 20
0
pdate* plist_date_create(struct tm value)
{
  pdate* date = (pdate*)malloc(sizeof(pdate));
  if (!date) return 0;
  date->super.kind = PLIST_NODE_DATE;
  date->value = value;
  __atomic_store_n(&date->super.ref_count, 1, __ATOMIC_SEQ_CST);
  return date;
}
Ejemplo n.º 21
0
static void* entry_trampoline(void* param) {
  // Invoke the entry point
  ac_tcb* ptcb = (ac_tcb*)param;
  ptcb->entry(ptcb->entry_arg);

  // Mark AC_THREAD_ID_EMPTY
  ac_uptr* pthread_id = &ptcb->thread_id;
  __atomic_store_n(pthread_id, AC_THREAD_ID_EMPTY, __ATOMIC_RELEASE);
  return AC_NULL;
}
Ejemplo n.º 22
0
void __MCFCRT_gthread_relock_callback_recursive_mutex(intptr_t context, intptr_t unlocked){
	__gthread_recursive_mutex_t *const recur_mutex = (__gthread_recursive_mutex_t *)context;

	_MCFCRT_ASSERT((size_t)unlocked >= 1);
	__gthread_mutex_lock(&(recur_mutex->__mutex));

	const uintptr_t self = _MCFCRT_GetCurrentThreadId();
	__atomic_store_n(&(recur_mutex->__owner), self, __ATOMIC_RELAXED);
	recur_mutex->__count = (size_t)unlocked;
}
Ejemplo n.º 23
0
/**
 * Indentify and clear source of interrupt.
 * After returning interrupts will be enabled
 * so we use __atomic operations on source.
 */
void periodic_iacs(ac_uptr param) {
  irq_param* pirq_param = (irq_param*)param;
  ac_u32 timer_ris = ac_timer_rd_ris(pirq_param->timer);
  if ((timer_ris & 0x1) != 0) {
    ac_bool* psource = &pirq_param->source;
    __atomic_store_n(psource, AC_TRUE, __ATOMIC_RELEASE);
    ac_timer_wr_int_clr(pirq_param->timer);
    ac_debug_printf("\n\nperiodic: %d cleared\n", pirq_param->timer);
  }
}
Ejemplo n.º 24
0
unsigned int
__libat_feat1_init (void)
{
  unsigned int eax, ebx, ecx, edx;
  FEAT1_REGISTER = 0;
  __get_cpuid (1, &eax, &ebx, &ecx, &edx);
  /* See the load in load_feat1.  */
  __atomic_store_n (&__libat_feat1, FEAT1_REGISTER, __ATOMIC_RELAXED);
  return FEAT1_REGISTER;
}
Ejemplo n.º 25
0
 void signalFromInterrupt()
 {
     #if defined ( __CC_ARM   )
         __sync_lock_release(&ready);
     #elif defined   (  __GNUC__  )
         __atomic_store_n (&ready, true, __ATOMIC_SEQ_CST);
     #else
     # error "This compiler is not supported"
     #endif
 }
Ejemplo n.º 26
0
inline void DependableObject::disableSubmission()
{
   _needsSubmission = false;
#ifdef HAVE_NEW_GCC_ATOMIC_OPS
   __atomic_store_n(&_submitted, false, __ATOMIC_RELEASE);
#else
   _submitted = false;
   memoryFence();
#endif
}
Ejemplo n.º 27
0
extern "C" void * worker (void *)
{
  int i, j;
  Foo ** a;
  a = new Foo * [nobjects / nthreads];

  for (j = 0; j < niterations; j++) {

    // printf ("%d\n", j);
    for (i = 0; i < (nobjects / nthreads); i ++) {
      a[i] = new Foo[size];
      // These stores are to force the cost of a cache miss, if the data isn't cached.
      //  for this test: ./threadtest 4 1000 100000 8 8
      //  it increases the runtime from 4.35s to 5.09s on my i7-4600U
      __atomic_store_n(&a[i][0].y, i, __ATOMIC_RELEASE);
      for (volatile int d = 0; d < work; d++) {
	volatile int f = 1;
	f = f + f;
	f = f * f;
	f = f + f;
	f = f * f;
	__atomic_store_n(&a[i][d%size].x, f, __ATOMIC_RELEASE);
      }
      assert (a[i]);
    }
    
    for (i = 0; i < (nobjects / nthreads); i ++) {
      delete[] a[i];
      for (volatile int d = 0; d < work; d++) {
	volatile int f = 1;
	f = f + f;
	f = f * f;
	f = f + f;
	f = f * f;
      }
    }
  }

  delete [] a;

  return NULL;
}
Ejemplo n.º 28
0
/**
 * @see mpscifo.h
 */
Msg_t *rmv_raw(MpscFifo_t *pQ) {
  Msg_t *pResult = pQ->pTail;
  Msg_t *pNext = __atomic_load_n(&pResult->pNext, __ATOMIC_SEQ_CST); //ACQUIRE);
  if (pNext != NULL) {
    __atomic_fetch_sub(&pQ->count, 1, __ATOMIC_SEQ_CST);
    __atomic_store_n(&pQ->pTail, pNext, __ATOMIC_SEQ_CST); //RELEASE
  } else {
    pResult = NULL;
  }
  return pResult;
}
Ejemplo n.º 29
0
Archivo: lock.c Proyecto: abumaryam/gcc
void
gomp_unset_nest_lock_25 (omp_nest_lock_25_t *lock)
{
  /* ??? Validate that we own the lock here.  */

  if (--lock->count == 0)
    {
      __atomic_store_n (&lock->owner, 0, MEMMODEL_RELEASE);
      futex_wake (&lock->owner, 1);
    }
}
Ejemplo n.º 30
0
intptr_t __MCFCRT_gthread_unlock_callback_recursive_mutex(intptr_t context){
	__gthread_recursive_mutex_t *const recur_mutex = (__gthread_recursive_mutex_t *)context;
	_MCFCRT_ASSERT(_MCFCRT_GetCurrentThreadId() == __atomic_load_n(&(recur_mutex->__owner), __ATOMIC_RELAXED));

	const size_t old_count = recur_mutex->__count;
	recur_mutex->__count = 0;
	__atomic_store_n(&(recur_mutex->__owner), 0, __ATOMIC_RELAXED);

	__gthread_mutex_unlock(&(recur_mutex->__mutex));
	return (intptr_t)old_count;
}