void spin_lock(atomic_t *lock, atomic_int value)
{

    uint32_t  i, n;

    for ( ;; ) {

        if (*lock == 0 && atomic_cas(lock, 0, value)) {
            return;
        }

        if (cpu_num > 1) {

            for (n = 1; n < pid; n <<= 1) {

                for (i = 0; i < n; i++) {
                    cpu_pause();
                }

                if (*lock == 0 && atomic_cas(lock, 0, value)) {
                    return;
                }
            }
        }

        /*   causes the calling thread to relinquish the CPU.  
		  *  The thread is moved to the end of the queue for its static priority and a new thread gets to run.
		  */
       sched_yield();
    }
}
/* Test atomic_cas with a correct old value */
static void test_atomic_cas_same(void)
{
    atomic_int_t res = ATOMIC_INIT(0);

    TEST_ASSERT_EQUAL_INT(1, atomic_cas(&res, 0, 12345));
    TEST_ASSERT_EQUAL_INT(12345, ATOMIC_VALUE(res));
    TEST_ASSERT_EQUAL_INT(1, atomic_cas(&res, 12345, -9876));
    TEST_ASSERT_EQUAL_INT(-9876, ATOMIC_VALUE(res));
    TEST_ASSERT_EQUAL_INT(1, atomic_cas(&res, -9876, -9876));
    TEST_ASSERT_EQUAL_INT(-9876, ATOMIC_VALUE(res));
    TEST_ASSERT_EQUAL_INT(1, atomic_cas(&res, -9876, 0));
    TEST_ASSERT_EQUAL_INT(0, ATOMIC_VALUE(res));
}
/* Test atomic_cas with a non-matching old value */
static void test_atomic_cas_diff(void)
{
    atomic_int_t res = ATOMIC_INIT(32767);

    TEST_ASSERT_EQUAL_INT(0, atomic_cas(&res, 22222, 12345));
    TEST_ASSERT_EQUAL_INT(32767, ATOMIC_VALUE(res));
    ATOMIC_VALUE(res) = -12345;
    TEST_ASSERT_EQUAL_INT(0, atomic_cas(&res, 12345, 12345));
    TEST_ASSERT_EQUAL_INT(-12345, ATOMIC_VALUE(res));
    TEST_ASSERT_EQUAL_INT(0, atomic_cas(&res, 12345, 12345));
    TEST_ASSERT_EQUAL_INT(-12345, ATOMIC_VALUE(res));
    TEST_ASSERT_EQUAL_INT(0, atomic_cas(&res, 12345, -12345));
    TEST_ASSERT_EQUAL_INT(-12345, ATOMIC_VALUE(res));
}
Exemple #4
0
/* Helper, returns an index into the events array from the ceq ring.  -1 if the
 * ring was empty when we looked (could be filled right after we looked).  This
 * is the same algorithm used with BCQs, but with a magic value (-1) instead of
 * a bool to track whether or not the slot is ready for consumption. */
static int32_t get_ring_idx(struct ceq *ceq)
{
	long pvt_idx, prod_idx;
	int32_t ret;
	do {
		prod_idx = atomic_read(&ceq->prod_idx);
		pvt_idx = atomic_read(&ceq->cons_pvt_idx);
		if (__ring_empty(prod_idx, pvt_idx))
			return -1;
	} while (!atomic_cas(&ceq->cons_pvt_idx, pvt_idx, pvt_idx + 1));
	/* We claimed our slot, which is pvt_idx.  The new cons_pvt_idx is advanced
	 * by 1 for the next consumer.  Now we need to wait on the kernel to fill
	 * the value: */
	while ((ret = ceq->ring[pvt_idx & (ceq->ring_sz - 1)]) == -1)
		cpu_relax();
	/* Set the value back to -1 for the next time the slot is used */
	ceq->ring[pvt_idx & (ceq->ring_sz - 1)] = -1;
	/* We now have our entry.  We need to make sure the pub_idx is updated.  All
	 * consumers are doing this.  We can just wait on all of them to update the
	 * cons_pub to our location, then we update it to the next.
	 *
	 * We're waiting on other vcores, but we don't know which one(s). */
	while (atomic_read(&ceq->cons_pub_idx) != pvt_idx)
		cpu_relax_vc(vcore_id());	/* wait on all of them */
	/* This is the only time we update cons_pub.  We also know no one else is
	 * updating it at this moment; the while loop acts as a lock, such that
	 * no one gets to this point until pub == their pvt_idx, all of which are
	 * unique. */
	/* No rwmb needed, it's the same variable (con_pub) */
	atomic_set(&ceq->cons_pub_idx, pvt_idx + 1);
	return ret;
}
Exemple #5
0
static void atomic_set(sp_counted_base_atomic_type volatile *pw,int v)
{
    long vo=pw->li;
    while(!atomic_cas(pw,vo,v)) {
        vo=pw->li;
    }
}
Exemple #6
0
void lengthfixed_mtmp::free(void *p, size_t sz)
{
    NUT_DEBUGGING_ASSERT_ALIVE;
    assert(NULL != p && _granularity == std::max(sz, sizeof(void*)));

    while(true)
    {
        if (_free_num >= (int) MAX_FREE_NUM) // NOTE _free_num 只起参考作用
        {
            ma_free(_alloc, p, _granularity);
            return;
        }

        const TagedPtr<void> old_head(_head.cas);
        *reinterpret_cast<void**>(p) = old_head.ptr;
        const TagedPtr<void> new_head(p, old_head.tag + 1);
        if (atomic_cas(&(_head.cas), old_head.cas, new_head.cas))
        {
            // NOTE _free_num 在多线程下并不可靠
            if (NULL == old_head.ptr)
                _free_num = 1;
            else
                ++_free_num;
            return;
        }
    }
}
Exemple #7
0
int nomad_set_local_node_id(uint64_t newid)
{
	if (atomic_cas(&local_node_id, 0, newid))
		return -EBUSY;

	return 0;
}
Exemple #8
0
static file_t* file_get_free()
{
  for (file_t* f = files; f < files + MAX_FILES; f++)
    if (atomic_cas(&f->refcnt, 0, 2) == 0)
      return f;
  panic("fail to get a free file");
  return NULL;
}
Exemple #9
0
static int atomic_get(sp_counted_base_atomic_type volatile *pw)
{
    long v=pw->li;
    while(!atomic_cas(pw,v,v)) {
        v=pw->li;
    }
    return v;
}
Exemple #10
0
/** Open an input device.
 * @param _device	Device being opened.
 * @param datap		Where to store handle-specific data pointer (unused).
 * @return		Status code describing result of the operation. */
static status_t input_device_open(device_t *_device, void **datap) {
	input_device_t *device = _device->data;

	if(atomic_cas(&device->open, 0, 1) != 0) {
		return STATUS_IN_USE;
	}

	return STATUS_SUCCESS;
}
Exemple #11
0
static __inline__ bool atomic_acquire(atomic_t *v)
{
#if defined(_ARCH_X86_64_) || defined(_ARCH_X86_)
    //For some reason gcc targeting x86 generates code for atomic_cas() that requires fewer registers
    return atomic_cas(v, 1, 0);
#else
    return __sync_lock_test_and_set(&v->counter, 1) == 0;
#endif
}
Exemple #12
0
inline int atomic_conditional_increment( sp_counted_base_atomic_type * pw)
{
    for(;;) {
        int rv = atomic_get(pw);
        if(rv == 0)
            return 0;
        if(atomic_cas(pw,rv,rv+1))
            return rv;
    }
}
Exemple #13
0
RingBufferEntry*
allocEntry(RingBufferType t) {
  ASSERT(Util::isPowerOfTwo(kMaxRBEntries));
  RingBufferEntry* rb;
  int newRingPos, oldRingPos;
  do {
    oldRingPos = g_ringIdx;
    rb = &g_ring[oldRingPos];
    newRingPos = (oldRingPos + 1) % kMaxRBEntries;
  } while (!atomic_cas(&g_ringIdx, oldRingPos, newRingPos));
  rb->m_ts = uint32_t(_rdtsc());
  rb->m_type = t;
  rb->m_threadId = (uint32_t)((int64)pthread_self() & 0xFFFFFFFF);
  return rb;
}
Exemple #14
0
static su_state *new_state(su_state *s) {
	int i;
	su_state *ns;
	for (i = 1; i < SU_OPT_MAX_THREADS; i++) {
		ns = &s->msi->threads[i];
		if (atomic_cas(&ns->thread_finished, 1, 0)) {
			memcpy(ns, s, sizeof(su_state));
			atomic_add(&s->msi->thread_count, 1);
			ns->tid = atomic_add(&s->msi->tid_count, 1);
			assert(ns->tid > 0);
			return ns;
		}
	}
	return NULL;
}
Exemple #15
0
tx::TX_CODE Store::markForDeletion(const pos_t pos, const tx::transaction_id_t tid) {
  if(atomic_cas(&_tidVector[pos], tx::START_TID, tid)) {
    return tx::TX_CODE::TX_OK;
  }

  if(_tidVector[pos] == tid) {
    // It is a row that we inserted ourselves. So we leave it as it is.
    // No need for a CAS here since we already have it "locked"
    // WARNING:
    // This only works as long as inserted pos as committed before deleted.
    // Otherwise we need to remove the position from the inserted list
    return tx::TX_CODE::TX_OK;
  }

  return tx::TX_CODE::TX_FAIL_CONCURRENT_COMMIT;
}
Exemple #16
0
void* lengthfixed_mtmp::alloc(size_t sz)
{
    NUT_DEBUGGING_ASSERT_ALIVE;
    assert(_granularity == std::max(sz, sizeof(void*)));

    while (true)
    {
        const TagedPtr<void> old_head(_head.cas);

        if (NULL == old_head.ptr)
            return ma_alloc(_alloc, _granularity);

        void *next = *reinterpret_cast<void**>(old_head.ptr);
        const TagedPtr<void> new_head(next, old_head.tag + 1);
        if (atomic_cas(&(_head.cas), old_head.cas, new_head.cas))
        {
            _free_num = std::max(0, _free_num - 1); // NOTE _free_num 在多线程下并不可靠
            return old_head.ptr;
        }
    }
}
Exemple #17
0
/* Helper, from u/p/uthread.c.  Keep it in sync.  (don't want to move this into
 * glibc yet). */
static bool register_evq(struct syscall *sysc, struct event_queue *ev_q)
{
	int old_flags;
	sysc->ev_q = ev_q;
	wrmb();	/* don't let that write pass any future reads (flags) */
	/* Try and set the SC_UEVENT flag (so the kernel knows to look at ev_q) */
	do {
		/* no cmb() needed, the atomic_read will reread flags */
		old_flags = atomic_read(&sysc->flags);
		/* Spin if the kernel is mucking with syscall flags */
		while (old_flags & SC_K_LOCK)
			old_flags = atomic_read(&sysc->flags);
		/* If the kernel finishes while we are trying to sign up for an event,
		 * we need to bail out */
		if (old_flags & (SC_DONE | SC_PROGRESS)) {
			sysc->ev_q = 0;		/* not necessary, but might help with bugs */
			return FALSE;
		}
	} while (!atomic_cas(&sysc->flags, old_flags, old_flags | SC_UEVENT));
	return TRUE;
}
Exemple #18
0
void main(void)
{
	int failed, rv, i;

	atomic_t target, orig;
	atomic_val_t value;
	atomic_val_t oldvalue;

	failed = 0;
	TC_START("Test atomic operation primitives");

	TC_PRINT("Test atomic_cas()\n");
	target = 4;
	value = 5;
	oldvalue = 6;

	CHECK_OUTPUT(atomic_cas(&target, oldvalue, value), 0);
	target = 6;
	CHECK_OUTPUT(atomic_cas(&target, oldvalue, value), 1);
	CHECK_OUTPUT(target, value);

	TC_PRINT("Test atomic_add()\n");
	target = 1;
	value = 2;
	CHECK_OUTPUT(atomic_add(&target, value), 1);
	CHECK_OUTPUT(target, 3);

	TC_PRINT("Test atomic_sub()\n");
	target = 10;
	value = 2;
	CHECK_OUTPUT(atomic_sub(&target, value), 10);
	CHECK_OUTPUT(target, 8);

	TC_PRINT("Test atomic_inc()\n");
	target = 5;
	CHECK_OUTPUT(atomic_inc(&target), 5);
	CHECK_OUTPUT(target, 6);

	TC_PRINT("Test atomic_dec()\n");
	target = 2;
	CHECK_OUTPUT(atomic_dec(&target), 2);
	CHECK_OUTPUT(target, 1);

	TC_PRINT("Test atomic_get()\n");
	target = 50;
	CHECK_OUTPUT(atomic_get(&target), 50);

	TC_PRINT("Test atomic_set()\n");
	target = 42;
	value = 77;
	CHECK_OUTPUT(atomic_set(&target, value), 42);
	CHECK_OUTPUT(target, value);

	TC_PRINT("Test atomic_clear()\n");
	target = 100;
	CHECK_OUTPUT(atomic_clear(&target), 100);
	CHECK_OUTPUT(target, 0);

	TC_PRINT("Test atomic_or()\n");
	target = 0xFF00;
	value  = 0x0F0F;
	CHECK_OUTPUT(atomic_or(&target, value), 0xFF00);
	CHECK_OUTPUT(target, 0xFF0F);

	TC_PRINT("Test atomic_xor()\n");
	target = 0xFF00;
	value  = 0x0F0F;
	CHECK_OUTPUT(atomic_xor(&target, value), 0xFF00);
	CHECK_OUTPUT(target, 0xF00F);

	TC_PRINT("Test atomic_and()\n");
	target = 0xFF00;
	value  = 0x0F0F;
	CHECK_OUTPUT(atomic_and(&target, value), 0xFF00);
	CHECK_OUTPUT(target, 0x0F00);

	TC_PRINT("Test atomic_nand()\n");
	target = 0xFF00;
	value  = 0x0F0F;
	CHECK_OUTPUT(atomic_nand(&target, value), 0xFF00);
	CHECK_OUTPUT(target, 0xFFFFF0FF);

	TC_PRINT("Test atomic_test_bit()\n");
	for (i = 0; i < 32; i++) {
		target = 0x0F0F0F0F;
		CHECK_TRUTH(atomic_test_bit(&target, i),
			    (target & (1 << i)));
	}

	TC_PRINT("Test atomic_test_and_clear_bit()\n");
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		CHECK_TRUTH(atomic_test_and_clear_bit(&target, i),
			    (orig & (1 << i)));
		CHECK_OUTPUT(target, orig & ~(1 << i));
	}

	TC_PRINT("Test atomic_test_and_set_bit()\n");
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		CHECK_TRUTH(atomic_test_and_set_bit(&target, i),
			    (orig & (1 << i)));
		CHECK_OUTPUT(target, orig | (1 << i));
	}

	TC_PRINT("Test atomic_clear_bit()\n");
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		atomic_clear_bit(&target, i);
		CHECK_OUTPUT(target, orig & ~(1 << i));
	}

	TC_PRINT("Test atomic_set_bit()\n");
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		atomic_set_bit(&target, i);
		CHECK_OUTPUT(target, orig | (1 << i));
	}

	if (failed) {
		TC_PRINT("%d tests failed\n", failed);
		rv = TC_FAIL;
	} else {
		rv = TC_PASS;
	}

	TC_END_RESULT(rv);
	TC_END_REPORT(rv);
}
Exemple #19
0
void atomic_test(void)
{
	int i;

	atomic_t target, orig;
	atomic_val_t value;
	atomic_val_t oldvalue;

	target = 4;
	value = 5;
	oldvalue = 6;

	/* atomic_cas() */
	zassert_true((atomic_cas(&target, oldvalue, value) == 0), "atomic_cas");
	target = 6;
	zassert_true((atomic_cas(&target, oldvalue, value) == 1), "atomic_cas");
	zassert_true((target == value), "atomic_cas");

	/* atomic_add() */
	target = 1;
	value = 2;
	zassert_true((atomic_add(&target, value) == 1), "atomic_add");
	zassert_true((target == 3), "atomic_add");

	/* atomic_sub() */
	target = 10;
	value = 2;
	zassert_true((atomic_sub(&target, value) == 10), "atomic_sub");
	zassert_true((target == 8), "atomic_sub");

	/* atomic_inc() */
	target = 5;
	zassert_true((atomic_inc(&target) == 5), "atomic_inc");
	zassert_true((target == 6), "atomic_inc");

	/* atomic_dec() */
	target = 2;
	zassert_true((atomic_dec(&target) == 2), "atomic_dec");
	zassert_true((target == 1), "atomic_dec");

	/* atomic_get() */
	target = 50;
	zassert_true((atomic_get(&target) == 50), "atomic_get");

	/* atomic_set() */
	target = 42;
	value = 77;
	zassert_true((atomic_set(&target, value) == 42), "atomic_set");
	zassert_true((target == value), "atomic_set");

	/* atomic_clear() */
	target = 100;
	zassert_true((atomic_clear(&target) == 100), "atomic_clear");
	zassert_true((target == 0), "atomic_clear");

	/* atomic_or() */
	target = 0xFF00;
	value  = 0x0F0F;
	zassert_true((atomic_or(&target, value) == 0xFF00), "atomic_or");
	zassert_true((target == 0xFF0F), "atomic_or");

	/* atomic_xor() */
	target = 0xFF00;
	value  = 0x0F0F;
	zassert_true((atomic_xor(&target, value) == 0xFF00), "atomic_xor");
	zassert_true((target == 0xF00F), "atomic_xor");

	/* atomic_and() */
	target = 0xFF00;
	value  = 0x0F0F;
	zassert_true((atomic_and(&target, value) == 0xFF00), "atomic_and");
	zassert_true((target == 0x0F00), "atomic_and");


	/* atomic_nand() */
	target = 0xFF00;
	value  = 0x0F0F;
	zassert_true((atomic_nand(&target, value) == 0xFF00), "atomic_nand");
	zassert_true((target == 0xFFFFF0FF), "atomic_nand");

	/* atomic_test_bit() */
	for (i = 0; i < 32; i++) {
		target = 0x0F0F0F0F;
		zassert_true(!!(atomic_test_bit(&target, i) == !!(target & (1 << i))),
			    "atomic_test_bit");
	}

	/* atomic_test_and_clear_bit() */
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		zassert_true(!!(atomic_test_and_clear_bit(&target, i)) == !!(orig & (1 << i)),
			    "atomic_test_and_clear_bit");
		zassert_true(target == (orig & ~(1 << i)), "atomic_test_and_clear_bit");
	}

	/* atomic_test_and_set_bit() */
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		zassert_true(!!(atomic_test_and_set_bit(&target, i)) == !!(orig & (1 << i)),
			    "atomic_test_and_set_bit");
		zassert_true(target == (orig | (1 << i)), "atomic_test_and_set_bit");
	}

	/* atomic_clear_bit() */
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		atomic_clear_bit(&target, i);
		zassert_true(target == (orig & ~(1 << i)), "atomic_clear_bit");
	}

	/* atomic_set_bit() */
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		atomic_set_bit(&target, i);
		zassert_true(target == (orig | (1 << i)), "atomic_set_bit");
	}

}
Exemple #20
0
/* Consumer side, returns TRUE on success and fills *msg with the ev_msg.  If
 * the ucq appears empty, it will return FALSE.  Messages may have arrived after
 * we started getting that we do not receive. */
bool get_ucq_msg(struct ucq *ucq, struct event_msg *msg)
{
	uintptr_t my_idx;
	struct ucq_page *old_page, *other_page;
	struct msg_container *my_msg;
	struct spin_pdr_lock *ucq_lock = (struct spin_pdr_lock*)(&ucq->u_lock);

	do {
loop_top:
		cmb();
		my_idx = atomic_read(&ucq->cons_idx);
		/* The ucq is empty if the consumer and producer are on the same
		 * 'next' slot. */
		if (my_idx == atomic_read(&ucq->prod_idx))
			return FALSE;
		/* Is the slot we want good?  If not, we're going to need to try
		 * and move on to the next page.  If it is, we bypass all of
		 * this and try to CAS on us getting my_idx. */
		if (slot_is_good(my_idx))
			goto claim_slot;
		/* Slot is bad, let's try and fix it */
		spin_pdr_lock(ucq_lock);
		/* Reread the idx, in case someone else fixed things up while we
		 * were waiting/fighting for the lock */
		my_idx = atomic_read(&ucq->cons_idx);
		if (slot_is_good(my_idx)) {
			/* Someone else fixed it already, let's just try to get
			 * out */
			spin_pdr_unlock(ucq_lock);
			/* Make sure this new slot has a producer (ucq isn't
			 * empty) */
			if (my_idx == atomic_read(&ucq->prod_idx))
				return FALSE;
			goto claim_slot;
		}
		/* At this point, the slot is bad, and all other possible
		 * consumers are spinning on the lock.  Time to fix things up:
		 * Set the counter to the next page, and free the old one. */
		/* First, we need to wait and make sure the kernel has posted
		 * the next page.  Worst case, we know that the kernel is
		 * working on it, since prod_idx != cons_idx */
		old_page = (struct ucq_page*)PTE_ADDR(my_idx);
		while (!old_page->header.cons_next_pg)
			cpu_relax();
		/* Now set the counter to the next page */
		assert(!PGOFF(old_page->header.cons_next_pg));
		atomic_set(&ucq->cons_idx, old_page->header.cons_next_pg);
		/* Side note: at this point, any *new* consumers coming in will
		 * grab slots based off the new counter index (cons_idx) */
		/* Now free up the old page.  Need to make sure all other
		 * consumers are done.  We spin til enough are done, like an
		 * inverted refcnt. */
		while (atomic_read(&old_page->header.nr_cons) < NR_MSG_PER_PAGE)
		{
			/* spinning on userspace here, specifically, another
			 * vcore and we don't know who it is.  This will spin a
			 * bit, then make sure they aren't preeempted */
			cpu_relax_any();
		}
		/* Now the page is done.  0 its metadata and give it up. */
		old_page->header.cons_next_pg = 0;
		atomic_set(&old_page->header.nr_cons, 0);
		/* We want to "free" the page.  We'll try and set it as the
		 * spare.  If there is already a spare, we'll free that one. */
		other_page = (struct ucq_page*)atomic_swap(&ucq->spare_pg,
		                                           (long)old_page);
		assert(!PGOFF(other_page));
		if (other_page) {
			munmap(other_page, PGSIZE);
			atomic_dec(&ucq->nr_extra_pgs);
		}
		/* All fixed up, unlock.  Other consumers may lock and check to
		 * make sure things are done. */
		spin_pdr_unlock(ucq_lock);
		/* Now that everything is fixed, try again from the top */
		goto loop_top;
claim_slot:
		cmb();	/* so we can goto claim_slot */
		/* If we're still here, my_idx is good, and we'll try to claim
		 * it.  If we fail, we need to repeat the whole process. */
	} while (!atomic_cas(&ucq->cons_idx, my_idx, my_idx + 1));
	assert(slot_is_good(my_idx));
	/* Now we have a good slot that we can consume */
	my_msg = slot2msg(my_idx);
	/* linux would put an rmb_depends() here */
	/* Wait til the msg is ready (kernel sets this flag) */
	while (!my_msg->ready)
		cpu_relax();
	rmb();	/* order the ready read before the contents */
	/* Copy out */
	*msg = my_msg->ev_msg;
	/* Unset this for the next usage of the container */
	my_msg->ready = FALSE;
	wmb();	/* post the ready write before incrementing */
	/* Increment nr_cons, showing we're done */
	atomic_inc(&((struct ucq_page*)PTE_ADDR(my_idx))->header.nr_cons);
	return TRUE;
}
Exemple #21
0
	T* cas(T* cmp, T* val)
	{
		return (T*) atomic_cas(atomic_, cmp, val);
	}
Exemple #22
0
void *alloc_active(procheap_t *fromheap)
{
    active_t old_active, new_active;
    struct active_struct new_active_st;
    desc_t *desc;
    struct anchor_struct old_anchor_st, new_anchor_st;
    anchor_t old_anchor, new_anchor;
    
    void *addr = NULL;
    size_t next_index;
    

    //
    // <1> reserve block
    //

    // other thread can change active, this loop must read active
    // repeatly.

    DPRINTF("try alloc from active\n");
    do
    {
        // copy active field of heap
        memcpy((void *)&old_active,
               (void *)&(fromheap->active),
               sizeof(active_t));

        if (old_active == ZERO_ACTIVE)
            goto ACTIVE_FAIL;

        // copy old active into new active
        extract_active(&new_active_st, &old_active);

        // set credits
        if (ACTIVE_GET_CREDITS(old_active) == 0)
            new_active = ZERO_ACTIVE;
        else
            new_active_st.credits = new_active_st.credits - 1;

        make_active(&new_active_st, &new_active);

        DPRINTF("old active = 0x%llX new active=0x%llX\n",
                (u64)old_active, (u64)new_active);
    } while (atomic_cas((void *)&(fromheap->active),
                        old_active,
                        new_active) == ATOMIC_FAIL);

    //
    // <2> pop block
    //

    DPRINTF("new_active.addr=%p\n", ACTIVE_GET_ADDR(new_active));
    DPRINTF("new_active.credits=%llu\n", ACTIVE_GET_CREDITS(new_active));

    desc = ACTIVE_GET_ADDR(new_active);


    do
    {
        // copy anchor into old_anchor and new_anchor
        memcpy((void *)&old_anchor,
               (void *)&(desc->anchor),
               sizeof(anchor_t));
        extract_anchor(&old_anchor_st, &old_anchor);
        extract_anchor(&new_anchor_st, &old_anchor);

        DPRINTF("desc->next=%p sb=%p heap=%p size=%lu max=%lu\n",
                desc->next, desc->sb, desc->heap,
                desc->size, desc->maxcount);

        DPRINTF("anchor->tag=%lu state=%lu counter=%lu avail=%lu\n",
                old_anchor_st.tag, old_anchor_st.state,
                old_anchor_st.counter, old_anchor_st.avail);

        /* get free block */
        addr = desc->sb + old_anchor_st.avail * desc->size;

        /* set avail that are index of next free block */
        next_index = *(u64 *)addr;
        new_anchor_st.avail = next_index;
        
        DPRINTF("free block->%p next block=%lu\n", addr, next_index);

        /* increase tag for ABA problem */
        new_anchor_st.tag = old_anchor_st.tag + 1;

        /* set state & counter */
        if (ACTIVE_GET_CREDITS(old_active) == 0)
        {
            if (old_anchor_st.counter == 0)
            {
                /* no free block */
                new_anchor_st.state = ANCHOR_STATE_FULL;
            }
            else
            {
                new_anchor_st.state = old_anchor_st.state;
                new_anchor_st.counter -= MIN(old_anchor_st.counter,
                                            MAXCREDITS);
            }
        }
        else
        {
            /* do nothing */
        }

        /* building new anchor is finished */
        make_anchor(&new_anchor_st, &new_anchor);

        DPRINTF("old anchor=%llX new anchor=%llX\n",
                (u64)old_anchor, (u64)new_anchor);
    } while (atomic_cas((void *)&(desc->anchor),
                        old_anchor,
                        new_anchor) == ATOMIC_FAIL);

    /* If there is no credits, but more free block */
    

    return addr;

ACTIVE_FAIL:
    DPRINTF("alloc from active fail: no active\n");
    goto FUNC_END;

FUNC_END:
    return NULL;
}
Exemple #23
0
void *alloc_newsb(procheap_t *fromheap)
{
    desc_t *new_desc;
    struct anchor_struct anchor_st;
    struct active_struct active_st;
    active_t new_active;
    anchor_t new_anchor;
    void *ret;
    
    DPRINTF("alloc from new-sb\n");


    /* desc address must be aligned to store credits in active */
    //new_desc = (desc_t *)memalign(ACTIVE_ADDR_ALIGN,
    //sizeof(desc_t));
    new_desc = get_desc();

    /* allocate new super-block */
    new_desc->sb = get_newsb(fromheap->sc->sbsize);
    setup_sb(new_desc->sb, fromheap->sc->sz, fromheap->sc->sbsize);

    /* setup descriptor */
    new_desc->size = fromheap->sc->sz;
    new_desc->maxcount = fromheap->sc->sbsize / new_desc->size;
    new_desc->heap = fromheap;

    /* setup active for heap */
    active_st.addr = new_desc;
    active_st.credits = MIN(new_desc->maxcount, MAXCREDITS) - 1;
    make_active(&active_st, &new_active);
        
    /* setup anchor for desc */
    anchor_st.avail = 1;
    anchor_st.counter = new_desc->maxcount - active_st.credits - 1;
    anchor_st.state = ANCHOR_STATE_ACTIVE;

    /* set anchor into desc */
    make_anchor(&(anchor_st), &new_anchor);
    memcpy((void *)&(new_desc->anchor),
           (void *)&new_anchor, sizeof(anchor_t));

    
    /*
     * LOCATE MEMORY FENSE HERE
     * -> why?
     */

    
    /* set active into heap -> must be done atomically */
    if (atomic_cas((void *)&(fromheap->active),
                   ZERO_ACTIVE, /* active is empty */
                   (u64)new_active) == ATOMIC_SUCCESS)
    {
        desc_t **p = (desc_t **)(new_desc->sb);
        DPRINTF("new superblock->%p\n", new_desc->sb);
        DPRINTF("new desc->%p\n", new_desc);
        DPRINTF("set new active:0x%llX\n", (u64)(new_active));
        DPRINTF("set new anchor:0x%llX\n", (u64)(new_anchor));
        
        *p = new_desc;
        ret = (void *)((desc_t **)new_desc->sb + 1);
    }
    else
    {
        /* another thread already have set new superblock,
           this thread do roll-back */
        free(new_desc->sb);
        put_desc(new_desc);
        ret = NULL;
    }
    
    return ret;
}
Exemple #24
0
void interrupt(su_state *s, int in) {
	int old;
	do {
		old = atomic_get(&s->msi->interrupt);
	} while (!atomic_cas(&s->msi->interrupt, old, old | in));
}
Exemple #25
0
TEST(atomic, cas) {
  volatile atomic_t current_value = 0;
  atomic_t old_value = atomic_cas(&current_value, 1, 0);
  ASSERT_EQ(old_value, 0);
}