示例#1
0
QueueResult queue_pop(queue_p q)
{
    assert(q);
    assert(queue_has_front(q) == QUEUE_TRUE);

    /* get the head */
    node *popped = (node*) atomic_load(&q->head);
    node *compare = popped;

    /* set the tail and head to nothing if they are the same */
    if (atomic_compare_exchange_strong(&q->tail, &compare, 0)) {
        compare = popped;
        /* it is possible for another thread to have pushed after
         * we swap out the tail. In this case, the head will be different
         * then what was popped, so we just do a blind exchange regardless
         * of the result.
         */
        atomic_compare_exchange_strong(&q->head, &compare, 0);
    } else {
        /* tail is different from head, set the head to the next value */
        node *new_head = 0;
        while (!new_head) {
            /* it is possible that the next node has not been assigned yet,
             * so just spin until the pushing thread stores the value.
	     */
            new_head = (node *) atomic_load(&popped->next);
        }
        atomic_store(&q->head, new_head->next);
    }

    free(popped);
    return QUEUE_SUCCESS;
}
示例#2
0
文件: ao_wasapi.c 项目: ThreeGe/mpv
static void thread_resume(struct ao *ao)
{
    struct wasapi_state *state = ao->priv;
    HRESULT hr;

    MP_DBG(state, "Thread Resume\n");
    UINT32 padding = 0;
    hr = IAudioClient_GetCurrentPadding(state->pAudioClient, &padding);
    if (hr != S_OK) {
        MP_ERR(state, "IAudioClient_GetCurrentPadding returned %s\n",
               mp_HRESULT_to_str(hr));
    }

    // Fill the buffer before starting, but only if there is no audio queued to
    // play.  This prevents overfilling the buffer, which leads to problems in
    // exclusive mode
    if (padding < (UINT32) state->bufferFrameCount)
        thread_feed(ao);

    // start feeding next wakeup if something else hasn't been requested
    int expected = WASAPI_THREAD_RESUME;
    atomic_compare_exchange_strong(&state->thread_state, &expected,
                                   WASAPI_THREAD_FEED);
    hr = IAudioClient_Start(state->pAudioClient);
    if (hr != S_OK) {
        MP_ERR(state, "IAudioClient_Start returned %s\n",
               mp_HRESULT_to_str(hr));
    }

    return;
}
示例#3
0
// Sets UsedBMap to signal consumer it can be used.
int queue_push(queue *q, qitem* item)
{
	qitem *buf        = (qitem*)(q->buf + q->mapbytes*2);
	atomic_llong *map = (atomic_llong*)(q->buf + q->mapbytes);
	const int diff    = (item - buf);
	const int i       = diff / 64;
	const int zbit    = diff % 64;

	while (1)
	{
		long long int mval = atomic_load(&map[i]);
		long long int nval;

		nval = mval | (((long long int)1) << zbit);

		if (atomic_compare_exchange_strong(&map[i], &mval, nval))
		{
			// printf("PUSHING ON POS i=%d zbit=%d diff=%d rdiff=%lld\n",i, zbit, diff, item-buf);
			break;
		}
		else
			atomic_fetch_add(&q->ct, 1);

		usleep(DELAY_BY);
	}
	return 1;
}
示例#4
0
void* queueDeqC(Queue *queue, QueueElement *oldQueueHead, int threadId) {
	LOG_PROLOG();
	void *ptr = NULL;
	//printf("queueDeq: queuePtr: %u, q->head: %u, q->tail: %u, q->head->next: %u \n",queue, queue->head, queue->tail, queue->head->next);
	QueueElement *first = oldQueueHead;
	QueueElement *last = queue->tail;
	QueueElement *next = first->next;
	//printf("queueDeq: firstPtr: %u, lastPtr: %u, first->next: %u \n",first, last, next);
	//printf("in deq \n");
	if (first == queue->head) { // someone else dequeued
		//printf("first == queue->head\n");
		if (first == last) {    // queue is empty
			clearHazardPointer(globalHPStructure, threadId);
			//printf("first == last\n");
			if (next == NULL) {
				ptr = NULL;
			}
			else {  // someone is enqueuing simultaneously
				//printf("someone is enqueuing simul \n");
				atomic_compare_exchange_strong(&queue->tail, &last, next);
				ptr = NULL;
			}
		}
		else {
			void *element = next->value;
			if (atomic_compare_exchange_strong(&queue->head, &first, next)) {
				//printf("dequeuing successful\n");
				clearHazardPointer(globalHPStructure, threadId);
				//printf("queueDeq: clearing HP of thread %d on successful dequeu\n", threadId);
				freeMemHP(globalHPStructure, threadId, first);
				//printf("queueDeq: thread = %d trying to free = %u\n", threadId, first);
				ptr = element;
			}
			else {
				clearHazardPointer(globalHPStructure, threadId);
				//printf("queueDeq: clearing HP of thread %d on failed dequeu\n", threadId);
				ptr = NULL;
			}
		}
	}
	else {
		clearHazardPointer(globalHPStructure, threadId);
	}
	LOG_EPILOG();
	return ptr;
}
示例#5
0
TEST(stdatomic, atomic_compare_exchange) {
  atomic_int i;
  int expected;

  atomic_store(&i, 123);
  expected = 123;
  ASSERT_TRUE(atomic_compare_exchange_strong(&i, &expected, 456));
  ASSERT_FALSE(atomic_compare_exchange_strong(&i, &expected, 456));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  ASSERT_TRUE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_FALSE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  int iter_count = 0;
  do {
    ++iter_count;
    ASSERT_LT(iter_count, 100);  // Arbitrary limit on spurious compare_exchange failures.
    ASSERT_EQ(expected, 123);
  } while(!atomic_compare_exchange_weak(&i, &expected, 456));
  ASSERT_FALSE(atomic_compare_exchange_weak(&i, &expected, 456));
  ASSERT_EQ(456, expected);

  atomic_store(&i, 123);
  expected = 123;
  iter_count = 0;
  do {
    ++iter_count;
    ASSERT_LT(iter_count, 100);
    ASSERT_EQ(expected, 123);
  } while(!atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_FALSE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
          memory_order_relaxed));
  ASSERT_EQ(456, expected);
}
示例#6
0
 // Packs and stores the header, computing the checksum in the process. We
 // compare the current header with the expected provided one to ensure that
 // we are not being raced by a corruption occurring in another thread.
 static INLINE void compareExchangeHeader(void *Ptr,
                                          UnpackedHeader *NewUnpackedHeader,
                                          UnpackedHeader *OldUnpackedHeader) {
   NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
   PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
   PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
   if (UNLIKELY(!atomic_compare_exchange_strong(
           getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
           memory_order_relaxed)))
     dieWithMessage("race on chunk header at address %p\n", Ptr);
 }
示例#7
0
// Reserves a free slot in ReserveBMap
qitem* queue_get_item(queue *q)
{
	int i;
	qitem *buf        = (qitem*)(q->buf + q->mapbytes*2);
	atomic_llong *map = (atomic_llong*)q->buf;
	int zbit;
	qitem *r;

	while(1)
	{
		// Reserve space
		for (i = 0; i < q->map_elements; i++)
		{
			long long int mval = atomic_load(&map[i]);
			long long int nval;

			// if no zero bit go to next element
			if (!(zbit = ffz(mval)))
			{
				continue;
			}

			nval = mval | (((long long int)1) << (--zbit));

			// update map val with bit set. 
			// If successful we are done.
			if (atomic_compare_exchange_strong(&map[i], &mval, nval))
			{
				// printf("ZBIT %d %lld %lld\n",zbit,mval, sizeof(mval));
				atomic_fetch_add(&q->size, 1);
				break;
			}
			// Unable to exchange, go again for same index. 
			else
			{
				atomic_fetch_add(&q->ct, 1);
				i--;
			}
		}

		if (i < q->map_elements)
		{
			r = &buf[i*64+zbit];
			break;
		}
		else
		{
			usleep(100);
		}
	}
	return r;
}
示例#8
0
void wbuf_release(char *buf1, int index)
{
	atomic_llong *map = (atomic_llong*)buf1;
	int i = index / 64;
	int bit = index % 64;

	while (1)
	{
		long long int mval = atomic_load(&map[i]);
		long long int nval = mval & (~(((long long int)1) << bit));
		if (atomic_compare_exchange_strong(&map[i], &mval, nval))
			break;
	}
}
示例#9
0
int wbuf_put(const int npages, char *buf1, char *data, int *tries)
{
	int i,t = 0;
	char *buf = buf1+WBUF_MAPBYTES(npages);
	atomic_llong *map = (atomic_llong*)buf1;
	int zbit;

	while(1)
	{
		// Reserve space
		for (i = 0; i < WBUF_MAP_ELEMENTS(npages); i++)
		{
			long long int mval = atomic_load(&map[i]);
			long long int nval;

			// if no zero bit go to next element
			if (!(zbit = ffz(mval)))
				continue;

			nval = mval | (((long long int)1) << (--zbit));

			// update map val with bit set. 
			// If successful we are done.
			if (atomic_compare_exchange_strong(&map[i], &mval, nval))
				break;
			// Unable to exchange, go again for same index. 
			else
			{
				i--;
				t++;
			}
		}

		if (i < WBUF_MAP_ELEMENTS(npages))
		{
			// Copy data
			memcpy(buf + i*64*PGSZ + zbit*PGSZ, data, PGSZ);
			break;
		}
		else
		{
			usleep(100);
		}
	}

	if (tries != NULL)
		*tries = t;

	return i*64 + zbit;
}
示例#10
0
// Deletes a pthread_key_t. note that the standard mandates that this does
// not call the destructors for non-NULL key values. Instead, it is the
// responsibility of the caller to properly dispose of the corresponding data
// and resources, using any means it finds suitable.
int pthread_key_delete(pthread_key_t key) {
  if (__predict_false(!KeyInValidRange(key))) {
    return EINVAL;
  }
  key &= ~KEY_VALID_FLAG;
  // Increase seq to invalidate values in all threads.
  uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
  if (SeqOfKeyInUse(seq)) {
    if (atomic_compare_exchange_strong(&key_map[key].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
      return 0;
    }
  }
  return EINVAL;
}
示例#11
0
文件: tss.c 项目: saltstar/smartnix
int tss_create(tss_t* k, void (*dtor)(void*)) {
    unsigned i = (uintptr_t)&k / 16 % PTHREAD_KEYS_MAX;
    unsigned j = i;

    if (!dtor)
        dtor = nodtor;
    do {
        key_t expected = NULL;
        if (atomic_compare_exchange_strong(&keys[j], &expected, dtor)) {
            *k = j;
            return 0;
        }
    } while ((j = (j + 1) % PTHREAD_KEYS_MAX) != i);
    return EAGAIN;
}
示例#12
0
bool queueEnqC(Queue *queue, void* element, int threadId) {
	LOG_PROLOG();
	LOG_INFO("%d", threadId);
	QueueElement *queueElement = createNode(element);
	//printf("queueEnq: value = %u, blkPtr = %u, next ptr = %u\n", queueElement->value, element, queueElement->next);
	QueueElement *last = queue->tail;
	QueueElement *next = last->next;
	bool flag = false;
	if (last == queue->tail) {
		if (next == NULL) {
			if (atomic_compare_exchange_strong(&last->next, &next, queueElement)) {
				atomic_compare_exchange_strong(&queue->tail, &last, queueElement);
				//printf("queueEnq: tailValue = %d, headValue = %d\n", ((Block*)(queue->tail->value))->memBlock, ((Block*)(queue->head->next->value))->memBlock);
				//printf("queueEnq: tailValue = %d \n", ((Block*)(queue->tail->value))->memBlock);
				flag = true;
			}
		}
		else {
			atomic_compare_exchange_strong(&queue->tail, &last, next);
		}
	}
	LOG_EPILOG();
	return flag;
}
示例#13
0
文件: avcodec.c 项目: etix/vlc
static vlc_vdp_video_field_t *GetSurface(vlc_va_t *va)
{
    vlc_va_sys_t *sys = va->sys;
    vlc_vdp_video_field_t *f;

    for (unsigned i = 0; (f = sys->pool[i]) != NULL; i++)
    {
        uintptr_t expected = 1;

        if (atomic_compare_exchange_strong(&f->frame->refs, &expected, 2))
        {
            vlc_vdp_video_field_t *field = vlc_vdp_video_copy(f);
            atomic_fetch_sub(&f->frame->refs, 1);
            return field;
        }
    }
    return NULL;
}
示例#14
0
int add_nonblocking_queue(NONBLOCKING_QUEUE* queue, ELEMENT* element)
{
    // Get the head and tail
    unsigned long long head = atomic_load(&(queue->head));
    unsigned long long tail = atomic_load(&(queue->tail));
    int tail_index = tail % MAX_ELEMENTS;

    // Make sure the spot is free and we are first
    if (tail - head < MAX_ELEMENTS &&
            !((queue->element_array)[tail_index]) &&
            atomic_compare_exchange_strong(&(queue->tail), &tail, tail + 1))
    {
        (queue->element_array)[tail_index] = element;
        return 1;
    }

    // Fail
    return 0;
}
示例#15
0
ELEMENT* remove_nonblocking_queue(NONBLOCKING_QUEUE* queue)
{
    // Get the head and tail
    unsigned long long head = atomic_load(&(queue->head));
    unsigned long long tail = atomic_load(&(queue->tail));
    int head_index = head % MAX_ELEMENTS;

    // Make sure the spot is free and we are first
    if (tail - head > 0 &&
            (queue->element_array)[head_index] &&
            atomic_compare_exchange_strong(&(queue->head), &head, head + 1))
    {
        ELEMENT* element = (queue->element_array)[head_index];
        (queue->element_array)[head_index] = NULL;
        return element;
    }

    // Fail
    return NULL;
}
示例#16
0
文件: ao_wasapi.c 项目: BILIHUBSU/mpv
static void thread_reset(struct ao *ao)
{
    struct wasapi_state *state = ao->priv;
    HRESULT hr;
    MP_DBG(state, "Thread Reset\n");
    hr = IAudioClient_Stop(state->pAudioClient);
    /* we may get S_FALSE if the stream is already stopped */
    if (hr != S_OK && hr != S_FALSE)
        MP_ERR(state, "IAudioClient_Stop returned: %s\n", mp_HRESULT_to_str(hr));

    /* we may get S_FALSE if the stream is already reset */
    hr = IAudioClient_Reset(state->pAudioClient);
    if (hr != S_OK && hr != S_FALSE)
        MP_ERR(state, "IAudioClient_Reset returned: %s\n", mp_HRESULT_to_str(hr));

    atomic_store(&state->sample_count, 0);
    // start feeding next wakeup if something else hasn't been requested
    int expected = WASAPI_THREAD_RESET;
    atomic_compare_exchange_strong(&state->thread_state, &expected, WASAPI_THREAD_FEED);
    return;
}
示例#17
0
int metal_condition_wait(struct metal_condition *cv,
			 struct metal_mutex *m)
{
	struct metal_mutex *tmpm = 0;
	int v;

	/* Check if the mutex has been acquired */
	if (!cv || !m || !metal_mutex_is_acquired(m))
		return -EINVAL;

	if (!atomic_compare_exchange_strong(&cv->m, &tmpm, m)) {
		if (m != tmpm)
			return -EINVAL;
	}

	v = atomic_load(&cv->v);

	/* Release the mutex first. */
	metal_mutex_release(m);
	while (atomic_load(&cv->v) == v);
	/* Acquire the mutex again. */
	metal_mutex_acquire(m);
	return 0;
}
示例#18
0
// Item is no longer being used. Clear bits.
// First we must clear used bmap, then reserve bmap.
void queue_recycle(queue *q, qitem* item)
{
	qitem *buf         = (qitem*)(q->buf + q->mapbytes*2);
	atomic_llong *rmap = (atomic_llong*)(q->buf);
	atomic_llong *umap = (atomic_llong*)(q->buf + q->mapbytes);
	atomic_llong *map = umap;
	const int diff    = (item - buf);
	const int i       = diff / 64;
	const int zbit    = diff % 64;

	// printf("Recycle diff=%d, i=%d, zbit=%d %lld %lld\n",diff, i, zbit, (long long int)buf, (long long int)item);
	while (1)
	{
		long long int mval = atomic_load(&map[i]);
		long long int nval;

		nval = mval & (~(((long long int)1) << zbit));

		if (atomic_compare_exchange_strong(&map[i], &mval, nval))
		{
			if (map == umap)
			{
				map = rmap;
			}
			else
			{
				break;
			}
			continue;
		}
		else
			atomic_fetch_add(&q->ct, 1);

		usleep(DELAY_BY);
	}
}
示例#19
0
//The C builtin only returns a boolean.
int CAS_SC(atomic_loc *tgt, int c, int v){
  return atomic_compare_exchange_strong(&(tgt->ai), &c, v);
}
示例#20
0
文件: pr69904.c 项目: 0day-ci/gcc
int
main (void)
{
  glob = atomic_compare_exchange_strong (&foo, &bar, 0);
  return glob;
}
示例#21
0
char *nl_langinfo_l(nl_item item, locale_t locale) {
  switch (item) {
    case CODESET:
      return (char *)locale->ctype->codeset;
#define STRING(item, category, field) \
  case item:                          \
    return (char *)COMPILE_STRING(locale, category, field)
#define WSTRING(item, category, field) \
  case item:                           \
    return (char *)COMPILE_WSTRING(locale, category, field)
      WSTRING(D_T_FMT, time, d_t_fmt);
      WSTRING(D_FMT, time, d_fmt);
      WSTRING(T_FMT, time, t_fmt);
      WSTRING(T_FMT_AMPM, time, t_fmt_ampm);
      WSTRING(AM_STR, time, am_str);
      WSTRING(PM_STR, time, pm_str);
      WSTRING(DAY_1, time, day[0]);
      WSTRING(DAY_2, time, day[1]);
      WSTRING(DAY_3, time, day[2]);
      WSTRING(DAY_4, time, day[3]);
      WSTRING(DAY_5, time, day[4]);
      WSTRING(DAY_6, time, day[5]);
      WSTRING(DAY_7, time, day[6]);
      WSTRING(ABDAY_1, time, abday[0]);
      WSTRING(ABDAY_2, time, abday[1]);
      WSTRING(ABDAY_3, time, abday[2]);
      WSTRING(ABDAY_4, time, abday[3]);
      WSTRING(ABDAY_5, time, abday[4]);
      WSTRING(ABDAY_6, time, abday[5]);
      WSTRING(ABDAY_7, time, abday[6]);
      WSTRING(MON_1, time, mon[0]);
      WSTRING(MON_2, time, mon[1]);
      WSTRING(MON_3, time, mon[2]);
      WSTRING(MON_4, time, mon[3]);
      WSTRING(MON_5, time, mon[4]);
      WSTRING(MON_6, time, mon[5]);
      WSTRING(MON_7, time, mon[6]);
      WSTRING(MON_8, time, mon[7]);
      WSTRING(MON_9, time, mon[8]);
      WSTRING(MON_10, time, mon[9]);
      WSTRING(MON_11, time, mon[10]);
      WSTRING(MON_12, time, mon[11]);
      WSTRING(ABMON_1, time, abmon[0]);
      WSTRING(ABMON_2, time, abmon[1]);
      WSTRING(ABMON_3, time, abmon[2]);
      WSTRING(ABMON_4, time, abmon[3]);
      WSTRING(ABMON_5, time, abmon[4]);
      WSTRING(ABMON_6, time, abmon[5]);
      WSTRING(ABMON_7, time, abmon[6]);
      WSTRING(ABMON_8, time, abmon[7]);
      WSTRING(ABMON_9, time, abmon[8]);
      WSTRING(ABMON_10, time, abmon[9]);
      WSTRING(ABMON_11, time, abmon[10]);
      WSTRING(ABMON_12, time, abmon[11]);
      WSTRING(ERA, time, era);
      WSTRING(ERA_D_FMT, time, era_d_fmt);
      WSTRING(ERA_D_T_FMT, time, era_d_t_fmt);
      WSTRING(ERA_T_FMT, time, era_t_fmt);
      WSTRING(ALT_DIGITS, time, alt_digits);
      WSTRING(RADIXCHAR, numeric, decimal_point);
      WSTRING(THOUSEP, numeric, thousands_sep);
      STRING(YESEXPR, messages, yesexpr);
      STRING(NOEXPR, messages, noexpr);
#undef STRING
#undef WSTRING
    case CRNCYSTR: {
      // CRNCYSTR should return the currency symbol used by the locale,
      // preceded by a character indicating where the currency symbol
      // should be placed.
      //
      // Instead of storing it separately, we can derive it from several
      // existing properties.

      struct lc_compiled *compiled = __locale_get_compiled(locale);
      if (compiled == NULL)
        return NULL;

      // Return an already existing copy if available.
      char *old_crncystr = atomic_load(&compiled->crncystr);
      if (old_crncystr != NULL)
        return (char *)old_crncystr;

      // String only makes sense if there is a currency symbol and its
      // placement is uniform.
      const struct lc_monetary *monetary = locale->monetary;
      const wchar_t *currency_symbol = monetary->currency_symbol;
      if (currency_symbol == NULL || *currency_symbol == '\0' ||
          monetary->p_cs_precedes != monetary->n_cs_precedes)
        return (char *)"";

      // Determine the character preceding the currency symbol,
      // indicating the position.
      char precedes = monetary->p_cs_precedes;
      char position;
      if (precedes == CHAR_MAX) {
        if (wcscmp(currency_symbol, monetary->mon_decimal_point) != 0)
          return (char *)"";
        position = '.';
      } else {
        position = precedes ? '-' : '+';
      }

      // Generate new string.
      char *new_crncystr;
      if (asprintf_l(&new_crncystr, locale, "%c%ls", position,
                     currency_symbol) == -1)
        return (char *)"";

      // Store the new copy and return it.
      if (!atomic_compare_exchange_strong(&compiled->crncystr, &old_crncystr,
                                          new_crncystr)) {
        // Race condition. Another thread created a copy at the same time.
        free(new_crncystr);
        return old_crncystr;
      }
      return new_crncystr;
    }
    default:
      return (char *)"";
  }
}
示例#22
0
void caml_empty_minor_heap_domain (struct domain* domain)
{
  CAMLnoalloc;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  unsigned rewrite_successes = 0;
  unsigned rewrite_failures = 0;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  uintnat minor_allocated_bytes = young_end - young_ptr;
  struct oldify_state st = {0};
  value **r;
  struct caml_ephe_ref_elt *re;
  struct caml_custom_elt *elt;

  st.promote_domain = domain;

  if (minor_allocated_bytes != 0) {
    uintnat prev_alloc_words = domain_state->allocated_words;

#ifdef DEBUG
    /* In DEBUG mode, verify that the minor_ref table contains all young-young pointers
       from older to younger objects */
    {
    struct addrmap young_young_ptrs = ADDRMAP_INIT;
    mlsize_t i;
    value iter;
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      *caml_addrmap_insert_pos(&young_young_ptrs, (value)*r) = 1;
    }
    for (iter = (value)young_ptr;
         iter < (value)young_end;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      if (hd != 0) {
        value curr = Val_hp(iter);
        tag_t tag = Tag_hd (hd);

        if (tag < No_scan_tag && tag != Cont_tag) {
          // FIXME: should scan Cont_tag
          for (i = 0; i < Wosize_hd(hd); i++) {
            value* f = Op_val(curr) + i;
            if (Is_block(*f) && is_in_interval(*f, young_ptr, young_end) &&
                *f < curr) {
              CAMLassert(caml_addrmap_contains(&young_young_ptrs, (value)f));
            }
          }
        }
      }
    }
    caml_addrmap_clear(&young_young_ptrs);
    }
#endif

    caml_gc_log ("Minor collection of domain %d starting", domain->state->id);
    caml_ev_begin("minor_gc");
    caml_ev_begin("minor_gc/roots");
    caml_do_local_roots(&oldify_one, &st, domain, 0);

    caml_scan_stack(&oldify_one, &st, domain_state->current_stack);

    for (r = minor_tables->major_ref.base; r < minor_tables->major_ref.ptr; r++) {
      value x = **r;
      oldify_one (&st, x, &x);
    }
    caml_ev_end("minor_gc/roots");

    caml_ev_begin("minor_gc/promote");
    oldify_mopup (&st);
    caml_ev_end("minor_gc/promote");

    caml_ev_begin("minor_gc/ephemerons");
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      CAMLassert (Ephe_domain(re->ephe) == domain);
      if (re->offset == CAML_EPHE_DATA_OFFSET) {
        /* Data field has already been handled in oldify_mopup. Handle only
         * keys here. */
        continue;
      }
      value* key = &Op_val(re->ephe)[re->offset];
      if (*key != caml_ephe_none && Is_block(*key) &&
          is_in_interval(*key, young_ptr, young_end)) {
        resolve_infix_val(key);
        if (Hd_val(*key) == 0) { /* value copied to major heap */
          *key = Op_val(*key)[0];
        } else {
          CAMLassert(!ephe_check_alive_data(re,young_ptr,young_end));
          *key = caml_ephe_none;
          Ephe_data(re->ephe) = caml_ephe_none;
        }
      }
    }
    caml_ev_end("minor_gc/ephemerons");

    caml_ev_begin("minor_gc/update_minor_tables");
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value v = **r;
      if (Is_block (v) && is_in_interval ((value)Hp_val(v), young_ptr, young_end)) {
        value vnew;
        header_t hd = Hd_val(v);
        int offset = 0;
        if (Tag_hd(hd) == Infix_tag) {
          offset = Infix_offset_hd(hd);
          v -= offset;
        }
        CAMLassert (Hd_val(v) == 0);
        vnew = Op_val(v)[0] + offset;
        CAMLassert (Is_block(vnew) && !Is_minor(vnew));
        CAMLassert (Hd_val(vnew));
        if (Tag_hd(hd) == Infix_tag) {
          CAMLassert(Tag_val(vnew) == Infix_tag);
          v += offset;
        }
        if (caml_domain_alone()) {
          **r = vnew;
          ++rewrite_successes;
        } else {
          if (atomic_compare_exchange_strong((atomic_value*)*r, &v, vnew))
            ++rewrite_successes;
          else
            ++rewrite_failures;
        }
      }
    }
    CAMLassert (!caml_domain_alone() || rewrite_failures == 0);
    caml_ev_end("minor_gc/update_minor_tables");

    caml_ev_begin("minor_gc/finalisers");
    caml_final_update_last_minor(domain);
    /* Run custom block finalisation of dead minor values */
    for (elt = minor_tables->custom.base; elt < minor_tables->custom.ptr; elt++) {
      value v = elt->block;
      if (Hd_val(v) == 0) {
        /* !!caml_adjust_gc_speed(elt->mem, elt->max); */
      } else {
        /* Block will be freed: call finalisation function, if any */
        void (*final_fun)(value) = Custom_ops_val(v)->finalize;
        if (final_fun != NULL) final_fun(v);
      }
    }
    caml_final_empty_young(domain);
    caml_ev_end("minor_gc/finalisers");


    clear_table ((struct generic_table *)&minor_tables->major_ref);
    clear_table ((struct generic_table *)&minor_tables->minor_ref);
    clear_table ((struct generic_table *)&minor_tables->ephe_ref);
    clear_table ((struct generic_table *)&minor_tables->custom);

    domain_state->young_ptr = domain_state->young_end;
    domain_state->stat_minor_words += Wsize_bsize (minor_allocated_bytes);
    domain_state->stat_minor_collections++;
    domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;

    caml_ev_end("minor_gc");
    caml_gc_log ("Minor collection of domain %d completed: %2.0f%% of %u KB live, rewrite: successes=%u failures=%u",
                 domain->state->id,
                 100.0 * (double)st.live_bytes / (double)minor_allocated_bytes,
                 (unsigned)(minor_allocated_bytes + 512)/1024, rewrite_successes, rewrite_failures);
  }
  else {
    caml_final_empty_young(domain);
    caml_gc_log ("Minor collection of domain %d: skipping", domain->state->id);
  }

#ifdef DEBUG
  {
    value *p;
    for (p = (value *) domain_state->young_start;
         p < (value *) domain_state->young_end; ++p){
      *p = Debug_free_minor;
    }
  }
#endif
}
示例#23
0
CAMLexport value caml_promote(struct domain* domain, value root)
{
  value **r;
  value iter, f;
  mlsize_t i;
  caml_domain_state* domain_state = domain->state;
  struct caml_minor_tables *minor_tables = domain_state->minor_tables;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  float percent_to_scan;
  uintnat prev_alloc_words = domain_state->allocated_words;
  struct oldify_state st = {0};
  struct caml_ephe_ref_elt *re;

  /* Integers are already shared */
  if (Is_long(root))
    return root;

  /* Objects which are in the major heap are already shared. */
  if (!Is_minor(root))
    return root;

  st.oldest_promoted = (value)domain_state->young_start;
  st.promote_domain = domain;

  CAMLassert(caml_owner_of_young_block(root) == domain);
  oldify_one (&st, root, &root);
  oldify_mopup (&st);

  CAMLassert (!Is_minor(root));
  /* FIXME: surely a newly-allocated root is already darkened? */
  caml_darken(0, root, 0);

  percent_to_scan = st.oldest_promoted <= (value)young_ptr ? 0.0 :
    (((float)(st.oldest_promoted - (value)young_ptr)) * 100.0 /
     ((value)young_end - (value)domain_state->young_start));

  if (percent_to_scan > Percent_to_promote_with_GC) {
    caml_gc_log("caml_promote: forcing minor GC. %%_minor_to_scan=%f", percent_to_scan);
    // ???
    caml_empty_minor_heap_domain (domain);
  } else {
    caml_do_local_roots (&forward_pointer, st.promote_domain, domain, 1);
    caml_scan_stack (&forward_pointer, st.promote_domain, domain_state->current_stack);

    /* Scan major to young pointers. */
    for (r = minor_tables->major_ref.base;
         r < minor_tables->major_ref.ptr; r++) {
      value old_p = **r;
      if (Is_block(old_p) && is_in_interval(old_p,young_ptr,young_end)) {
        value new_p = old_p;
        forward_pointer (st.promote_domain, new_p, &new_p);
        if (old_p != new_p) {
          if (caml_domain_alone())
            **r = new_p;
          else
            atomic_compare_exchange_strong((atomic_value*)*r, &old_p, new_p);
        }
      }
    }

    /* Scan ephemeron ref table */
    for (re = minor_tables->ephe_ref.base;
         re < minor_tables->ephe_ref.ptr; re++) {
      value* key = &Op_val(re->ephe)[re->offset];
      if (Is_block(*key) && is_in_interval(*key,young_ptr,young_end)) {
        forward_pointer (st.promote_domain, *key, key);
      }
    }

    /* Scan young to young pointers */
    for (r = minor_tables->minor_ref.base; r < minor_tables->minor_ref.ptr; r++) {
      forward_pointer (st.promote_domain, **r, *r);
    }

    /* Scan newer objects */
    for (iter = (value)young_ptr;
         iter <= st.oldest_promoted;
         iter = next_minor_block(domain_state, iter)) {
      value hd = Hd_hp(iter);
      value curr = Val_hp(iter);
      if (hd != 0) {
        tag_t tag = Tag_hd (hd);
        if (tag == Cont_tag) {
          struct stack_info* stk = Ptr_val(Op_val(curr)[0]);
          if (stk != NULL)
            caml_scan_stack(&forward_pointer, st.promote_domain, stk);
        } else if (tag < No_scan_tag) {
          for (i = 0; i < Wosize_hd (hd); i++) {
            f = Op_val(curr)[i];
            if (Is_block(f)) {
              forward_pointer (st.promote_domain, f,((value*)curr) + i);
            }
          }
        }
      }
    }
  }
  domain_state->stat_promoted_words += domain_state->allocated_words - prev_alloc_words;
  return root;
}