Esempio n. 1
0
void
thd_free(struct thread *thd)
{
        struct thread *old_freelist_head;
        if (NULL == thd) return;

        while (thd->stack_ptr > 0) {
                struct thd_invocation_frame *frame;

                /*
                 * FIXME: this should include upcalling into effected
                 * spds, to inform them of the deallocation.
                 */

                frame = &thd->stack_base[thd->stack_ptr];
                spd_mpd_ipc_release((struct composite_spd*)frame->current_composite_spd);

                thd->stack_ptr--;
        }

        if (NULL != thd->data_region) {
                cos_put_pg_pool((struct page_list*)thd->data_region);
        }

        do {
                old_freelist_head = thread_freelist_head;
                thd->freelist_next = old_freelist_head;
        } while (unlikely(!cos_cas((unsigned long *)&thread_freelist_head, (unsigned long)old_freelist_head, (unsigned long)thd)));

        return;
}
Esempio n. 2
0
/* 
 * Before actually unmap cbuf from a component, we need to atomically
 * clear the page pointer in the meta, which guarantees that clients 
 * do not have seg fault. Clients have to check NULL when receive cbuf
 */
static int
cbuf_unmap_prepare(struct cbuf_info *cbi)
{
	struct cbuf_maps *m = &cbi->owner;
	unsigned long old_nfo, new_nfo;

	if (cbuf_referenced(cbi)) return 1;
	cbuf_references_clear(cbi);

	/* 
	 * We need to clear out the meta. Consider here manager removes a
	 * cbuf from component c0 and allocates that cbuf to component c1,  
	 * but c1 sends the cbuf back to c0. If c0 sees the old meta, it may 
	 * be confused. However, we have to leave the inconsistent bit here
	 */
	do {
		old_nfo = m->m->nfo;
		if (old_nfo & CBUF_REFCNT_MAX) return 1;
		new_nfo = old_nfo & CBUF_INCONSISENT;
		if (unlikely(!cos_cas(&m->m->nfo, old_nfo, new_nfo))) return 1;
		m   = FIRST_LIST(m, next, prev);
	} while (m != &cbi->owner);

	return 0;
}
Esempio n. 3
0
/*
 * Return values:
 * 0   : try and take the lock again in local memory
 * -ret: return -ret
 */
int
lock_take_contention(cos_lock_t *l, union cos_lock_atomic_struct *result,
                     union cos_lock_atomic_struct *prev_val, u16_t owner)
{
    int     lock_id = l->lock_id;
    spdid_t spdid   = cos_spd_id();
    int ret;

    if (lock_component_pretake(spdid, lock_id, owner)) return -1;

    /* Must access memory (be volatile) as we want
     * to detect changes here */
    if (owner != l->atom.c.owner) return 0;
    /* Mark the lock as contested */
    if (!l->atom.c.contested) {
        result->c.contested = 1;
        result->c.owner     = owner;
        if (!cos_cas((unsigned long*)&l->atom.v, prev_val->v, result->v)) return 0;
        assert(l->atom.c.contested);
    }
    /* Note if a 1 is returned, there is a
     * generation mismatch, and we just want to
     * try and take the lock again anyway */
    ret = lock_component_take(spdid, lock_id, owner);
    return ret < 0 ? ret : 0;
}
Esempio n. 4
0
int
lock_release_contention(cos_lock_t *l, union cos_lock_atomic_struct *prev_val)
{
    int lock_id = l->lock_id;
    /*
     * This must evaluate to false, as contested
     * is already set, we are the owner (thus no
     * other thread should set that),
     */
    if (unlikely(!cos_cas((unsigned long*)&l->atom, prev_val->v, 0))) BUG();
    if (lock_component_release(cos_spd_id(), lock_id)) return -1;
    return 0;
}
Esempio n. 5
0
/* 
 * Do any components have a reference to the cbuf? 
 * key function coordinates manager and client.
 * When this returns 1, this cbuf may or may not be used by some components.
 * When this returns 0, it guarantees: 
 * If all clients use the protocol correctly, there is no reference 
 * to the cbuf and no one will receive the cbuf after this. Furthermore, 
 * if the cbuf is in some free list, its inconsistent bit is already set.
 * That is to say, the manager can safely collect or re-balance this cbuf.
 *
 * Proof: 1. If a component gets the cbuf from free-list, it will 
 * simply discard this cbuf as its inconsistent bit is set.
 * 2. Assume component c sends the cbuf. 
 * It is impossible to send the cbuf after we check c's refcnt, since c 
 * has no reference to this cbuf.
 * If this send happens before we check c's refcnt, because the sum of 
 * nsent is equal to the sum of nrecv, this send has been received and 
 * no further receive will happen.
 * 3. No one will call cbuf2buf to receive this cbuf after this function, 
 * as all sends have been received and no more sends will occur during this function
 *
 * However, if clients do not use protocol correctly, this function 
 * provides no guarantee. cbuf_unmap_prepare takes care of this case.
 */
static int
cbuf_referenced(struct cbuf_info *cbi)
{
	struct cbuf_maps *m = &cbi->owner;
	int sent, recvd, ret = 1;
	unsigned long old_nfo, new_nfo;
	unsigned long long old;
	struct cbuf_meta *mt, *own_mt = m->m;

	old_nfo = own_mt->nfo;
	new_nfo = old_nfo | CBUF_INCONSISENT;
	if (unlikely(!cos_cas(&own_mt->nfo, old_nfo, new_nfo))) goto done;

	mt   = (struct cbuf_meta *)(&old);
	sent = recvd = 0;
	do {
		struct cbuf_meta *meta = m->m;

		/* 
		 * Guarantee atomically read the two words (refcnt and nsent/nrecv).
		 * Consider this case, c0 sends a cbuf to c1 and frees this 
		 * this cbuf, but before c1 receives it, the manager comes in 
		 * and checks c1's refcnt. Now it is zero. But before the manager 
		 * checks c1's nsent/nrecv, it is preempted by c1. c1 receives 
		 * this cbuf--increment refcnt and nsent/nrecv. After this, we 
		 * switch back the manager, who will continues to check c1's 
		 * nsent/nrecv, now it is 1, which is equal to c0's nsent/nrecv. 
		 * Thus the manager can collect or unmap this cbuf.
		 */
		memcpy(&old, meta, sizeof(unsigned long long));
		if (unlikely(!cos_dcas(&old, old, old))) goto unset;
		if (CBUF_REFCNT(mt)) goto unset;		
		/* 
		 * TODO: add per-mapping counter of sent and recv in the manager
		 * each time atomic clear those counter in the meta
		 */
		sent  += mt->snd_rcv.nsent;
		recvd += mt->snd_rcv.nrecvd;
		m      = FIRST_LIST(m, next, prev);
	} while (m != &cbi->owner);
	if (sent != recvd) goto unset;
	ret = 0;
	if (CBUF_IS_IN_FREELIST(own_mt)) goto done;

unset:
	CBUF_FLAG_ATOMIC_REM(own_mt, CBUF_INCONSISENT);
done:
	return ret;
}
Esempio n. 6
0
struct
thread *thd_alloc(struct spd *spd)
{
	struct thread *thd, *new_freelist_head;
	unsigned short int id;
	void *page;

	do {
		thd = thread_freelist_head;
		new_freelist_head = thread_freelist_head->freelist_next;
	} while (unlikely(!cos_cas((unsigned long *)&thread_freelist_head, (unsigned long)thd, (unsigned long)new_freelist_head)));

	if (thd == NULL) {
		printk("cos: Could not create thread.\n");
		return NULL;
	}

	page = cos_get_pg_pool();
	if (unlikely(NULL == page)) {
		printk("cos: Could not allocate the data page for new thread.\n");
		thread_freelist_head = thd;
		return NULL;
	}

	id = thd->thread_id;
	memset(thd, 0, sizeof(struct thread));
	thd->thread_id = id;
	thd->cpu = get_cpuid();

	thd->data_region = page;
	*(int*)page = 4; /* HACK: sizeof(struct cos_argr_placekeeper) */
	thd->ul_data_page = COS_INFO_REGION_ADDR + (PAGE_SIZE * id);
	thd_publish_data_page(thd, (vaddr_t)page);

	/* Initialization */
	thd->stack_ptr = -1;
	/* establish this thread's base spd */
	thd_invocation_push(thd, spd, 0, 0);

	thd->flags = 0;

	thd->pending_upcall_requests = 0;
	thd->freelist_next = NULL;

        fpu_thread_init(thd);

	return thd;
}