Example #1
0
struct point GetPoint(struct point_container *pc)
{
	struct point ret;
	while(1)
	{
		volatile struct point *ptr_p = pc->ptr;
		int save_version = ptr_p->version;
		if(ptr_p == pc->ptr && save_version == ptr_p->version)
		{
			ret.x = ptr_p->x;
			ret.y = ptr_p->y;
			ret.z = ptr_p->z;
			if(ptr_p == pc->ptr && save_version == ptr_p->version)
			{
				if(ret.x != ret.y || ret.x != ret.z || ret.y != ret.z)
				{
					printf("%d,%d,%d,%u\n",ret.x,ret.y,ret.z,save_version);
					assert(0);
				}	
				break;
			}
			ATOMIC_INCREASE(&miss_count);
		}
		else
			ATOMIC_INCREASE(&miss_count);
	}	
	ATOMIC_INCREASE(&get_count);
	return ret;
}
Example #2
0
void refobj_init(refobj *r,void (*destructor)(void*))
{
	r->destructor = destructor;
	r->high32 = kn_systemms();
	r->low32  = (uint32_t)(ATOMIC_INCREASE(&g_ref_counter));
	ATOMIC_INCREASE(&r->refcount);
}
Example #3
0
void SetPoint(struct point_container *pc,struct point p)
{
	mutex_lock(pc->mtx);
	pc->p.x = p.x;
	pc->p.y = p.y;
	pc->p.z = p.z;
	mutex_unlock(pc->mtx);
	ATOMIC_INCREASE(&set_count);
}
Example #4
0
struct point GetPoint(struct point_container *pc)
{
	mutex_lock(pc->mtx);
	struct point ret;
	ret.x = pc->p.x;
	ret.y = pc->p.y;
	ret.z = pc->p.z;
	mutex_unlock(pc->mtx);
	ATOMIC_INCREASE(&get_count);
	return ret;
}
Example #5
0
void SetPoint(struct point_container *pc,struct point p)
{
	struct point *new_p = &pc->array[pc->index];
	pc->index = (pc->index + 1)%2;
	new_p->x = p.x;
	new_p->y = p.y;
	new_p->z = p.z;
	__asm__ volatile("" : : : "memory");
	new_p->version = ++pc->g_version;
	__asm__ volatile("" : : : "memory");
	pc->ptr = new_p;	
	ATOMIC_INCREASE(&set_count);
}
Example #6
0
void
arc_update_resource_size(arc_t *cache, arc_resource_t res, size_t size)
{
    arc_object_t *obj = (arc_object_t *)res;
    if (obj) {
        MUTEX_LOCK(&cache->lock);
        arc_state_t *state = ATOMIC_READ(obj->state);
        if (LIKELY(state == &cache->mru || state == &cache->mfu)) {
            ATOMIC_DECREASE(state->size, obj->size);
            obj->size = ARC_OBJ_BASE_SIZE(obj) + cache->cos + size;
            ATOMIC_INCREASE(state->size, obj->size);
        }
        ATOMIC_INCREMENT(cache->needs_balance);
        MUTEX_UNLOCK(&cache->lock);
    }
}
Example #7
0
/* Move the object to the given state. If the state transition requires,
* fetch, evict or destroy the object. */
static inline int
arc_move(arc_t *cache, arc_object_t *obj, arc_state_t *state)
{
    // In the first conditional we check If the object is being locked,
    // which means someone is fetching its value and we don't what
    // don't mess up with it. Whoever is fetching will also take care of moving it
    // to one of the lists (or dropping it)
    // NOTE: while the object is being fetched it doesn't belong
    //       to any list, so there is no point in going ahead
    //       also arc_balance() should never go through this object
    //       (since in none of the lists) so it won't be affected.
    //       The only call which would silently fail is arc_remove()
    //       but if the object is being fetched and need to be removed
    //       will be determined by who is fetching the object or by the
    //       next call to arc_balance() (which would anyway happen if
    //       the object will be put into the cache by the fetcher)
    //
    // In the second conditional instead we handle a specific corner case which
    // happens when concurring threads access an item which has been just fetched
    // but also dropped (so its state is NULL).
    // If a thread entering arc_lookup() manages to get the object out of the hashtable
    // before it's being deleted it will try putting the object to the mfu list without checking first
    // if it was already in a list or not (new objects should be first moved to the 
    // mru list and not the mfu one)
    if (UNLIKELY(obj->locked || (state == &cache->mfu && ATOMIC_READ(obj->state) == NULL)))
        return 0;

    MUTEX_LOCK(&cache->lock);

    arc_state_t *obj_state = ATOMIC_READ(obj->state);

    if (LIKELY(obj_state != NULL)) {

        if (LIKELY(obj_state == state)) {
            // short path for recurring keys
            // (those in the mfu list being hit again)
            if (LIKELY(state->head.next != &obj->head))
                arc_list_move_to_head(&obj->head, &state->head);
            MUTEX_UNLOCK(&cache->lock);
            return 0;
        }

        // if the state is not NULL
        // (and the object is not going to be being removed)
        // move the ^ (p) marker
        if (LIKELY(state != NULL)) {
            if (obj_state == &cache->mrug) {
                size_t csize = cache->mrug.size
                             ? (cache->mfug.size / cache->mrug.size)
                             : cache->mfug.size / 2;
                cache->p = MIN(cache->c, cache->p + MAX(csize, 1));
            } else if (obj_state == &cache->mfug) {
                size_t csize = cache->mfug.size
                             ? (cache->mrug.size / cache->mfug.size)
                             : cache->mrug.size / 2;
                cache->p = MAX(0, cache->p - MAX(csize, 1));
            }
        }

        ATOMIC_DECREASE(obj_state->size, obj->size);
        arc_list_remove(&obj->head);
        ATOMIC_DECREMENT(obj_state->count);
        ATOMIC_SET(obj->state, NULL);
    }

    if (state == NULL) {
        if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
            release_ref(cache->refcnt, obj->node);
    } else if (state == &cache->mrug || state == &cache->mfug) {
        obj->async = 0;
        arc_list_prepend(&obj->head, &state->head);
        ATOMIC_INCREMENT(state->count);
        ATOMIC_SET(obj->state, state);
        ATOMIC_INCREASE(state->size, obj->size);
    } else if (obj_state == NULL) {

        obj->locked = 1;
        
        // unlock the cache while the backend is fetching the data
        // (the object has been locked while being fetched so nobody
        // will change its state)
        MUTEX_UNLOCK(&cache->lock);
        size_t size = 0;
        int rc = cache->ops->fetch(obj->ptr, &size, cache->ops->priv);
        switch (rc) {
            case 1:
            case -1:
            {
                if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
                    release_ref(cache->refcnt, obj->node);
                return rc;
            }
            default:
            {
                if (size >= cache->c) {
                    // the (single) object doesn't fit in the cache, let's return it
                    // to the getter without (re)adding it to the cache
                    if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
                        release_ref(cache->refcnt, obj->node);
                    return 1;
                }
                MUTEX_LOCK(&cache->lock);
                obj->size = ARC_OBJ_BASE_SIZE(obj) + cache->cos + size;
                arc_list_prepend(&obj->head, &state->head);
                ATOMIC_INCREMENT(state->count);
                ATOMIC_SET(obj->state, state);
                ATOMIC_INCREASE(state->size, obj->size);
                ATOMIC_INCREMENT(cache->needs_balance);
                break;
            }
        }
        // since this object is going to be put back into the cache,
        // we need to unmark it so that it won't be ignored next time
        // it's going to be moved to another list
        obj->locked = 0;
    } else {
        arc_list_prepend(&obj->head, &state->head);
        ATOMIC_INCREMENT(state->count);
        ATOMIC_SET(obj->state, state);
        ATOMIC_INCREASE(state->size, obj->size);
    }
    MUTEX_UNLOCK(&cache->lock);
    return 0;
}