CSTUB_FN(int, lock_component_pretake) (struct usr_inv_cap *uc,
				       spdid_t spdid, unsigned long lock_id, 
				       unsigned short int thd)
{
	long fault = 0;
	int ret;
	
        struct rec_data_lk *rd = NULL;
redo:
        rd = rd_update(lock_id, LOCK_PRETAKE);
	assert(rd);

#ifdef BENCHMARK_MEAS_PRETAKE
	rdtscll(meas_end);
	if (test_flag) {
		test_flag = 0;
		printc("recovery a lock cost: %llu\n", meas_end - meas_start);
	}
#endif		
	
	CSTUB_INVOKE(ret, fault, uc, 3, spdid, rd->s_lkid, thd);
	
	if (unlikely(fault)){
		printc("cli:thd %d see a fault in lock_component_pretake!\n", 
		       cos_get_thd_id());
#ifdef BENCHMARK_MEAS_PRETAKE
		test_flag = 1;
		rdtscll(meas_start);
#endif		
		CSTUB_FAULT_UPDATE();		
		goto redo;  // update the generation number
	}

	if (ret == -EINVAL) {
		/* printc("cli:thd %d lock_component_pretake return EINVAL\n", cos_get_thd_id()); */
		rd_recover_state(rd);
		goto redo;
	}

	/* printc("cli:thd %d lock_component_pretake return %d\n", cos_get_thd_id(), ret); */
	
	return ret;
}
/* 
 * The problem being solved here is this: T_1 wishes to take the
 * mutex, finds that it is taken by another thread.  It calls into
 * this function, but is preempted by T_2, the lock holder.  The lock
 * is released.  T_1 is switched back to and it invokes this component
 * asking to block till the lock is released.  This component has no
 * way of knowing that the lock already has been released, so we block
 * for no reason in wait for the lock to be "released".  Thus what we
 * do is have the client call the pretake function checking before and
 * after invoking it that the lock is still taken.  We record the
 * generation number in pretake and make sure that it is consistent in
 * take.  This signifies that no release has happened in the interim,
 * and that we really should sleep.
 */
int lock_component_pretake(spdid_t spd, unsigned long lock_id, unsigned short int thd)
{
	struct meta_lock *ml;
 	spdid_t spdid = cos_spd_id();
	int ret = 0;

	ACT_RECORD(ACT_PRELOCK, spd, lock_id, cos_get_thd_id(), thd);
	TAKE(spdid);
//	lock_print_all();
	ml = lock_find(lock_id, spd);
	if (NULL == ml) {
		ret = -1;
		goto done;
	}
	ml->gen_num = generation;
done:
	RELEASE(spdid);
	return ret;
}
Exemple #3
0
void cos_init(void *arg)
{
	static volatile int first = 1;
	
	union sched_param sp;
	pnums = avg = 0;
	inc1 = inc2 = 0;

#ifdef DEBUG_PERIOD	
	unsigned long cos_immediate_process_cnt_prev = 0;

	if (cos_get_thd_id() == debug_thd) {
		if (periodic_wake_create(cos_spd_id(), 100)) BUG();
		while(1) {
			periodic_wake_wait(cos_spd_id());
			printc("num interrupt_wait %ld interrupt_process %ld\n", 
			       interrupt_wait_cnt, interrupt_process_cnt);
			interrupt_wait_cnt = 0;
			interrupt_process_cnt = 0;
			if (cos_immediate_process_cnt > 0) {
				printc("num immediate interrupt_process %ld\n", 
				       cos_immediate_process_cnt - cos_immediate_process_cnt_prev);
				cos_immediate_process_cnt_prev = cos_immediate_process_cnt;
			}

		}
	}
#endif
	
	if (first) {
		first = 0;

#ifdef DEBUG_PERIOD		
		sp.c.type = SCHEDP_PRIO;
		sp.c.value = 10;
		debug_thd = sched_create_thd(cos_spd_id(), sp.v, 0, 0);
#endif

		init();
	} else {
		prints("net: not expecting more than one bootstrap.");
	}
}
Exemple #4
0
void trans_recv_hi(void)
{ 
  unsigned int amnt_hi = 1;
  long evt_hi;
  td_t td_hi;
  char *params_hi = "7";

  printc("***HIGH PRIO RECV STARTING***\n");
  evt_hi = evt_split(cos_spd_id(), 0, 0);
  assert(evt_hi > 0);
  td_hi = tsplit(cos_spd_id(), td_root, params_hi, strlen(params_hi), TOR_READ, evt_hi);
  printc("EVT_HI (%ld) TD_HI (%d)\n", evt_hi, td_hi);

  do {
    evt_wait(cos_spd_id(), evt_hi);
    // if((amnt_hi++ % 1000) == 0)
    printc("hi prio count (%u) spd(%d) tid(%d)\n", amnt_hi++, cos_spd_id(), cos_get_thd_id());
  } while (1);//cur_itr++ < ITR);
  return;
}
Exemple #5
0
void cos_init(void)
{
	u64_t start, end, avg, tot = 0, dev = 0;
	int i, j;

	call();			/* get stack */
	quarantine_fork(cos_spd_id(), cos_spd_id()-1); /* dirty way to find pong */
	printc("cpu %ld, thd %d from ping\n",cos_cpuid(), cos_get_thd_id());
	printc("Starting %d Invocations.\n", ITER);

	for (i = 0 ; i < ITER ; i++) {
		rdtscll(start);
//		cos_send_ipi(i, 0, 0, 0);
		call();
		rdtscll(end);
		meas[i] = end-start;
	}

	for (i = 0 ; i < ITER ; i++) tot += meas[i];
	avg = tot/ITER;
	printc("avg %lld\n", avg);
	for (tot = 0, i = 0, j = 0 ; i < ITER ; i++) {
		if (meas[i] < avg*2) {
			tot += meas[i];
			j++;
		}
	}
	printc("avg w/o %d outliers %lld\n", ITER-j, tot/j);

	for (i = 0 ; i < ITER ; i++) {
		u64_t diff = (meas[i] > avg) ? 
			meas[i] - avg : 
			avg - meas[i];
		dev += (diff*diff);
	}
	dev /= ITER;
	printc("deviation^2 = %lld\n", dev);

//	printc("%d invocations took %lld\n", ITER, end-start);
	return;
}
Exemple #6
0
int
resolve_dependency(struct spd_tmem_info *sti, int skip_cbuf)
{
	struct cos_cbuf_item *cci;
	/* union cbuf_meta cm; */

	int ret = -1;

	/* DOUT("skip_cbuf is %d\n",skip_cbuf); */

	for(cci = FIRST_LIST(&sti->tmem_list, next, prev);
	    cci != &sti->tmem_list && skip_cbuf > 0; 
	    cci = FIRST_LIST(cci, next, prev), skip_cbuf--) ;

	if (cci == &sti->tmem_list) goto done;

	union cbuf_meta cm;
	cm.c_0.v = cci->entry->c_0.v;			

	ret = (u32_t)cci->entry->c_0.th_id;

	if (!CBUF_IN_USE(cm.c.flags)) goto cache;

	/* DOUT("cm.c_0.v is %p \n", cm.c_0.v); */
	// Jiguo: A thread could ask for multiple cbuf items, so it 
	// could find to be dependent on itself
	/* DOUT("ret :: %d current thd : %d \n", ret, cos_get_thd_id()); */
	if (ret == cos_get_thd_id()){
		DOUT("Try to depend on itself ....\n");
		goto self;
	}

done:
	return ret;
cache:
	ret = -2;
	goto done;
self:
	ret = 0;
	goto done;
}
int periodic_wake_wait(spdid_t spdinv)
{
	spdid_t spdid = cos_spd_id();
	struct thread_event *te;
	u16_t tid = cos_get_thd_id();
	long long t;

	TAKE(spdid);
	te = te_pget(tid);
	if (NULL == te) BUG();
	if (!(te->flags & TE_PERIODIC)) goto err;
		
	assert(!EMPTY_LIST(te, next, prev));
	te->flags |= TE_BLOCKED;

	rdtscll(t);
	if (te->missed) {	/* we're late */
		long long diff;
		assert(te->completion);

		diff = (t - te->completion);
		te->lateness_tot += diff;
		//te->samples++;
		te->miss_lateness_tot += diff;
		//te->miss_samples++;
		
		te->completion = 0;
	} else {		/* on time! */
		te->completion = t;
	}
	RELEASE(spdid);

	if (-1 == sched_block(spdid, 0)) {
		prints("fprr: sched block failed in timed_event_periodic_wait.");
	}

	return 0;
err:
	RELEASE(spdid);
	return -1;
}
void cos_upcall_fn(upcall_type_t t, void *arg1, void *arg2, void *arg3)
{
	printc("thread %d passing arg1 %p here (type %d spd %ld)\n",
	       cos_get_thd_id(), arg1, t, cos_spd_id());

	switch (t) {
	case COS_UPCALL_THD_CREATE:
	/* New thread creation method passes in this type. */
	{
		if (arg1 == 0) {
			cos_init();
		}
		return;
	}
	default:
		/* fault! */
		*(int*)NULL = 0;
		return;
	}
	return;
}
Exemple #9
0
//static volatile int cur_itr = 0;
void trans_recv_lo(void)
{  
  unsigned int amnt_lo = 1;
  long evt_lo;
  td_t td_lo;
  char *params_lo = "8";

  printc("***LOW PRIO RECV STARTING***\n");
  evt_lo = evt_split(cos_spd_id(), 0, 0);
  assert(evt_lo > 0);
  td_lo = tsplit(cos_spd_id(), td_root, params_lo, strlen(params_lo), TOR_READ, evt_lo);
  printc("EVT_LO (%ld) TD_LO (%d)\n", evt_lo, td_lo);

  do {
    evt_wait(cos_spd_id(), evt_lo);
    //   if((amnt_lo++ % 1000) == 0)
    printc("lo prio count (%u) spd(%d) tid(%d)\n", amnt_lo++, cos_spd_id(), cos_get_thd_id());
  } while (1);//cur_itr++ < ITR);
  
  return;
}
Exemple #10
0
int net_close(spdid_t spdid, net_connection_t nc)
{
	struct intern_connection *ic;
	u16_t tid = cos_get_thd_id();

	if (!net_conn_valid(nc)) goto perm_err;
	ic = net_conn_get_internal(nc);
	if (NULL == ic) goto perm_err; /* should really be EINVAL */
	if (tid != ic->tid) goto perm_err;
	assert(ACTIVE == ic->thd_status);

	/* This should be called from within lwip, not here, but this
	 * is here to have comparable performance characteristics as
	 * if it were in lwip */
	portmgr_free(cos_spd_id(), /* u16_t port_num */ 0);

	__net_close(ic);
	return 0;
perm_err:
	return -EPERM;
}
Exemple #11
0
int fault_page_fault_handler(spdid_t spdid, void *fault_addr, int flags, void *ip)
{
	unsigned long r_ip; 	/* the ip to return to */
	int tid = cos_get_thd_id();
	int i;

	/* START UNCOMMENT FOR FAULT INFO */
	if (regs_active) BUG();
	regs_active = 1;
	cos_regs_save(tid, spdid, fault_addr, &regs);
	printc("Thread %d faults in spd %d @ %p\n",
	       tid, spdid, fault_addr);
	cos_regs_print(&regs);
	regs_active = 0;

	for (i = 0 ; i < 5 ; i++)
		printc("Frame ip:%lx, sp:%lx\n",
		       cos_thd_cntl(COS_THD_INVFRM_IP, tid, i, 0),
		       cos_thd_cntl(COS_THD_INVFRM_SP, tid, i, 0));
	/* END UNCOMMENT FOR FAULT INFO */

	/* remove from the invocation stack the faulting component! */
	assert(!cos_thd_cntl(COS_THD_INV_FRAME_REM, tid, 1, 0));

	/* Manipulate the return address of the component that called
	 * the faulting component... */
	assert(r_ip = cos_thd_cntl(COS_THD_INVFRM_IP, tid, 1, 0));
	/* ...and set it to its value -8, which is the fault handler
	 * of the stub. */
	assert(!cos_thd_cntl(COS_THD_INVFRM_SET_IP, tid, 1, r_ip-8));

	/* 
	 * Look at the booter: when recover is happening, the sstub is
	 * set to 0x1, thus we should just wait till recovery is done.
	 */
	if ((int)ip == 1) failure_notif_wait(cos_spd_id(), spdid);
	else         failure_notif_fail(cos_spd_id(), spdid);

	return 0;
}
Exemple #12
0
int net_recv(spdid_t spdid, net_connection_t nc, void *data, int sz)
{
//	struct udp_pcb *up;
	struct intern_connection *ic;
	u16_t tid = cos_get_thd_id();
	int xfer_amnt = 0;

//	if (!cos_argreg_buff_intern(data, sz)) return -EFAULT;
	if (!net_conn_valid(nc)) return -EINVAL;

//	NET_LOCK_TAKE();
	ic = net_conn_get_internal(nc);
	if (NULL == ic) {
		//NET_LOCK_RELEASE();
		return -EINVAL;
	}
	if (tid != ic->tid) {
		//NET_LOCK_RELEASE();
		return -EPERM;
	}

	switch (ic->conn_type) {
	case UDP:
		xfer_amnt = cos_net_udp_recv(ic, data, sz);
		break;
	case TCP:
		xfer_amnt = cos_net_tcp_recv(ic, data, sz);
		break;
	case TCP_CLOSED:
//		__net_close(ic);
		xfer_amnt = -EPIPE;
		break;
	default:
		printc("net_recv: invalid connection type: %d", ic->conn_type);
		BUG();
	}
	assert(xfer_amnt <= sz);
	//NET_LOCK_RELEASE();
	return xfer_amnt;
}
static int channel_init(int channel)
{
	char *addr, *start;
	unsigned long i, sz;
	unsigned short int bid;
	int direction;

	direction = cos_trans_cntl(COS_TRANS_DIRECTION, channel, 0, 0);
	if (direction < 0) {
		channels[channel].exists = 0;
		return 0;
	}  
	channels[channel].exists = 1;
	channels[channel].direction = direction;

	sz = cos_trans_cntl(COS_TRANS_MAP_SZ, channel, 0, 0);
	assert(sz <= (4*1024*1024)); /* current 8MB max */
	start = valloc_alloc(cos_spd_id(), cos_spd_id(), sz/PAGE_SIZE);
	assert(start);
	for (i = 0, addr = start ; i < sz ; i += PAGE_SIZE, addr += PAGE_SIZE) {
		assert(!cos_trans_cntl(COS_TRANS_MAP, channel, (unsigned long)addr, i));
	}
	cringbuf_init(&channels[channel].rb, start, sz);

	if (direction == COS_TRANS_DIR_LTOC) {
		bid = cos_brand_cntl(COS_BRAND_CREATE, 0, 0, cos_spd_id());
		assert(bid > 0);
		assert(!cos_trans_cntl(COS_TRANS_BRAND, channel, bid, 0));
		if (sched_add_thd_to_brand(cos_spd_id(), bid, cos_get_thd_id())) BUG();
		while (1) {
			int ret;
			if (-1 == (ret = cos_brand_wait(bid))) BUG();
			assert(channels[channel].t);
			evt_trigger(cos_spd_id(), channels[channel].t->evtid);
		}
	}


	return 0;
}
Exemple #14
0
static void
walk_stack_all(spdid_t spdid, struct cos_regs *regs)
{
    unsigned long *fp, *stack, fp_off;
    int i, tid = cos_get_thd_id();

    printc("Stack trace for thread %d [spdid, instruction pointer]:\n", tid);

    fp = (unsigned long *)regs->regs.bp;
    stack = map_stack(spdid, (vaddr_t)fp);
    printc("\t[%d, %lx]\n", spdid, (unsigned long)regs->regs.ip);
    walk_stack(spdid, fp, stack);
    unmap_stack(spdid, stack);

    assert(cos_spd_id() == cos_thd_cntl(COS_THD_INV_FRAME, tid, 0, 0));
    assert(spdid == cos_thd_cntl(COS_THD_INV_FRAME, tid, 1, 0));

    for (i = 2 ; (spdid = cos_thd_cntl(COS_THD_INV_FRAME, tid, i, 0)) != 0 ; i++) {
        unsigned long sp;

        /* We're ignoring the initial IPs the IP is in the
         * invocation stubs, and noone cares about the
         * stubs */
        sp = cos_thd_cntl(COS_THD_INVFRM_SP, tid, i, 0);
        assert(sp);

        stack = map_stack(spdid, sp);
        /* The invocation stubs save ebp last, thus *(esp+16)
         * = ebp.  This offset corresponds to the number of
         * registers pushed in
         * SS_ipc_client_marshal_args... */
        fp_off = ((sp & (~PAGE_MASK))/sizeof(unsigned long));
        fp = (unsigned long *)&stack[fp_off];

        walk_stack(spdid, fp, stack);
        unmap_stack(spdid, stack);
    }

}
Exemple #15
0
static int interrupt_process(void *d, int sz, int *recv_len)
{
	unsigned short int ucid = cos_get_thd_id();
	unsigned int *buff;
	int max_len;
	struct thd_map *tm;
	unsigned int len;

	assert(d);

	tm = get_thd_map(ucid);
	assert(tm);
	if (rb_retrieve_buff(tm->uc_rb, &buff, &max_len)) {
		prints("net: could not retrieve buffer from ring.\n");
		goto err;
	}
	len = buff[0];
	*recv_len = len;
	if (unlikely(len > MTU)) {
		printc("len %d > %d\n", len, MTU);
		goto err_replace_buff;
	}
	memcpy(d, &buff[1], len);

	/* OK, recycle the buffer. */
	if (rb_add_buff(tm->uc_rb, buff, MTU)) {
		prints("net: could not add buffer to ring.");
	}

	return 0;

err_replace_buff:
	/* Recycle the buffer (essentially dropping packet)... */
	if (rb_add_buff(tm->uc_rb, buff, MTU)) {
		prints("net: OOM, and filed to add buffer.");
	}
err:
	return -1;
}
Exemple #16
0
void 
cos_init(void)
{
	static int first = 0;
	union sched_param sp;
	int i;
	

	if(first == 0){
		first = 1;

		for (i=0; i<PAGE_NUM; i++) s_addr[i] = 0;
		for (i=0; i<PAGE_NUM; i++) d_addr[i] = 0;

		sp.c.type = SCHEDP_PRIO;
		sp.c.value = THREAD1;
		sched_create_thd(cos_spd_id(), sp.v, 0, 0);

	} else {
		timed_event_block(cos_spd_id(), 50);
		periodic_wake_create(cos_spd_id(), 1);
		i = 0;
		while(i++ < 80) { /* 80 x 10 x 4k  < 4M */
			printc("<<< MM RECOVERY TEST START (thd %d) >>>\n", cos_get_thd_id());
			get_test();
#ifdef BEST_TEST
			alias_test();
			revoke_test();
#endif

			/* all_in_one(); */

			printc("<<< MM RECOVERY TEST DONE!! >>> {%d}\n\n\n", i);
			periodic_wake_wait(cos_spd_id());
		}
	}
	
	return;
}
CSTUB_FN(int, twmeta)(struct usr_inv_cap *uc,
                      spdid_t spdid, td_t td, const char *key,
                      unsigned int klen, const char *val, unsigned int vlen)
{
    int ret;
    long fault = 0;
    cbuf_t cb;
    int sz = sizeof(struct __sg_twmeta_data) + klen + vlen + 1;
    struct __sg_twmeta_data *d;
    struct rec_data_tor *rd;

    assert(key && val && klen > 0 && vlen > 0);
    assert(key[klen] == '\0' && val[vlen] == '\0' && sz <= PAGE_SIZE);

redo:
    printc("<<< In: call twmeta (thread %d) >>>\n", cos_get_thd_id());
    rd = rd_update(td, STATE_TWMETA);
    assert(rd);

    d = cbuf_alloc(sz, &cb);
    if (!d) assert(0); //return -1;

    d->td   = td;   // do not pass rd->s_tid since this is only for recovery
    d->klen = klen;
    d->vlen = vlen;
    memcpy(&d->data[0], key, klen + 1);
    memcpy(&d->data[klen + 1], val, vlen + 1);

    CSTUB_INVOKE(ret, fault, uc, 3, spdid, cb, sz);

    if (unlikely(fault)) {
        CSTUB_FAULT_UPDATE();
        goto redo;
    }

    cbuf_free(cb);
    return ret;
}
Exemple #18
0
/*
 * FIXME: keeping the lock during a bunch of memory allocation.  This
 * is never good, but the code is much simpler for it.  A trade-off
 * I'm commonly making now.
 */
long evt_create(spdid_t spdid)
{
    u16_t tid = cos_get_thd_id();
    struct evt_grp *g;
    struct evt *e;
    int ret = -ENOMEM;

    lock_take(&evt_lock);
    g = evt_grp_find(tid);
    /* If the group associated with this thread hasn't been
     * created yet. */
    if (!g) {
        g = evt_grp_create(spdid, tid);
        if (NULL == g) goto err;
        e = __evt_new(g);
        if (NULL == e) {
            evt_grp_free(g);
            goto err;
        }
        evt_grp_add(g);
    } else {
        e = __evt_new(g);
        if (NULL == e) goto err;
    }
    e->extern_id = mapping_create(e);
    e->n_received = 0;
    if (0 > e->extern_id) goto free_evt_err;
    ret = e->extern_id;
done:
    lock_release(&evt_lock);
    return ret;
free_evt_err:
    __evt_free(e);
err:
    goto done;
}
Exemple #19
0
void cos_init(void)
{
	static int first = 0, flag = 0;
	union sched_param sp;

	if(first == 0){
		first = 1;
		sp.c.type = SCHEDP_PRIO;
		sp.c.value = 8;
		high = sched_create_thd(cos_spd_id(), sp.v, 0, 0);
	} else {
		if (cos_get_thd_id() == high) {
			periodic_wake_create(cos_spd_id(), NOISE_PERIOD);
			while(1){
				periodic_wake_wait(cos_spd_id());
				/* printc("PERIODIC: noise....(thd %d in spd %ld)\n", */
				/*        cos_get_thd_id(), cos_spd_id()); */
				sinusoid_spike();
			}
		}
	}
	
	return;
}
Exemple #20
0
void 
cos_init(void)
{
	static int first = 0;
	union sched_param sp;
	int i, j, k;
	
	if(first == 0){
		first = 1;

		sp.c.type = SCHEDP_PRIO;
		sp.c.value = 10;
		warm = sched_create_thd(cos_spd_id(), sp.v, 0, 0);

		sp.c.type = SCHEDP_PRIO;
		sp.c.value = 11;
		high = sched_create_thd(cos_spd_id(), sp.v, 0, 0);

		sp.c.type = SCHEDP_PRIO;
		sp.c.value = 15;
		med = sched_create_thd(cos_spd_id(), sp.v, 0, 0);

		sp.c.type = SCHEDP_PRIO;
		sp.c.value = 20;
		low = sched_create_thd(cos_spd_id(), sp.v, 0, 0);

	} else {
#ifdef EXAMINE_LOCK
		if (cos_get_thd_id() == high) {
			printc("<<<high thd %d>>>\n", cos_get_thd_id());
			timed_event_block(cos_spd_id(), 5);
			ec3_ser1_test();
		}

		if (cos_get_thd_id() == med) {
			printc("<<<med thd %d>>>\n", cos_get_thd_id());
			timed_event_block(cos_spd_id(), 2);
			ec3_ser1_test();
		}

		if (cos_get_thd_id() == low) {
			printc("<<<low thd %d>>>\n", cos_get_thd_id());
			ec3_ser1_test();
		}
#endif

#ifdef EXAMINE_EVT
		if (cos_get_thd_id() == high) {
			printc("<<<high thd %d>>>\n", cos_get_thd_id());
			ec3_ser1_test();
		}
		
		if (cos_get_thd_id() == med) {
			printc("<<<med thd %d>>>\n", cos_get_thd_id());
			ec3_ser1_test();
		}

		if (cos_get_thd_id() == warm) {
			printc("<<<warm thd %d>>>\n", cos_get_thd_id());
			ec3_ser2_test();
		}
		
		if (cos_get_thd_id() == low) {
			printc("<<<low thd %d>>>\n", cos_get_thd_id());
			ec3_ser2_test();
		}
#endif
	}

	return;
}
Exemple #21
0
static inline long
evt_wait_all(void) { return evt_wait(cos_spd_id(), evt_all[cos_get_thd_id()]); }
Exemple #22
0
static inline long
evt_get(void) { return evt_get_thdid(cos_get_thd_id()); }
static inline void block_cli_if_desc_update_sched_wakeup(spdid_t spdid,
							 u16_t thdid)
{
	call_desc_update(cos_get_thd_id(), state_sched_wakeup);
}
void 
cos_init(void *arg)
{
	static int first = 1;

	if (first) {
		union sched_param sp;

		first = 0;
		INIT_LIST(&threads, next, prev);
		init_spds();

		sp.c.type = SCHEDP_PRIO;
		sp.c.value = 5;

		if (sched_create_thd(cos_spd_id(), sp.v, 0, 0) == 0) BUG();
		return;
	}
	DOUT("thd %d Tmem policy running.....\n", cos_get_thd_id());
#ifdef THD_POOL
	printc("<<<Thd Pool with total %d tmems, component size %d>>>\n", MAX_NUM_MEM, THD_POOL);
	if (THD_POOL != 1)
		thdpool_max_policy();
	else
		thdpool_1_policy();
#else
	printc("<<<Now using Algorithm %d, total number of tmems:%d >>>\n", ALGORITHM, MAX_NUM_MEM);
	DOUT("Tmem policy: %d in spd %ld\n", cos_get_thd_id(), cos_spd_id());
	init_policy();
#endif

	periodic_wake_create(cos_spd_id(), POLICY_PERIODICITY);

	/* Wait for all other threads to initialize */
	int i = 0, waiting = 100 / POLICY_PERIODICITY, counter = 0, report_period = 100 / POLICY_PERIODICITY;
	do {
		periodic_wake_wait(cos_spd_id());
	} while (i++ < waiting);

	init_thds();

	//unsigned long long s,e;
	while (1) {
		counter++;
		if (counter % report_period == 0) {
			/* report tmems usage */
			cbufmgr_buf_report();
			stkmgr_stack_report();
		}
		gather_data(counter % report_period);
#ifdef THD_POOL
		if (counter % report_period == 0) {
			if (THD_POOL == 1)
				thdpool_1_policy();
			else
				thdpool_max_policy(counter % report_period);
		}
#else
		//rdtscll(s);
		DOUT("POLICY starts!\n");
		policy();
		DOUT("POLICY ends!\n");
		//rdtscll(e);
		//printc("SP:%llu cycles\n",e-s);
#endif
		periodic_wake_wait(cos_spd_id());
	}
	return;
}
void cos_init(void *arg)
{

	int start_time_in_ticks = 0;
	int duration_time_in_ticks = 0;

	int local_period = 0;

	static int first = 1;

//	static int pre_run = 0;

	if (first) {
		union sched_param sp;
		int i;
		first = 0;
		parse_initstr();
		assert(priority);
		sp.c.type = SCHEDP_PRIO;
		sp.c.value = priority;

		if (sched_create_thd(cos_spd_id(), sp.v, 0, 0) == 0) BUG();
		if (priority == 30) { //best effort thds
			printc("thd num %d\n",thd_num);
				for (i=0; i<(thd_num-1); i++)
				sched_create_thd(cos_spd_id(), sp.v, 0, 0);
		}
		return;
	}

	local_period = period;

	unsigned long cyc_per_tick;
	cyc_per_tick = sched_cyc_per_tick();

	unsigned long exe_cycle;
	exe_cycle = cyc_per_tick/US_PER_TICK;
	exe_cycle = exe_cycle*exe_t;

	start_time_in_ticks = start_time*100;
	duration_time_in_ticks = duration_time*100;

	printc("In spd %ld Thd %d, period %d ticks, execution time %d us in %lu cycles\n", cos_spd_id(),cos_get_thd_id(), local_period, exe_t, exe_cycle);

	/* int event_thd = 0; */
	/* unsigned long pre_t_0 = 0; */
	if (local_period <= 0){/* Create all non-periodic tasks */

#ifdef BEST_EFF   // for best effort sys now
		int i;
		if (first == 0){
//			for (i= 0;i<5;i++) create_thd("r0");
			pre_t_0 = sched_timestamp();
			first = 1;
		}

		printc("<<<<1 thd %d in spd %ld\n",cos_get_thd_id(), cos_spd_id());
		event_thd = cos_get_thd_id();

		for (i = 0 ; i < 100; i++){
			left(200000,200000,0,0);
		}

		unsigned long pre_run_remained = 0;
		unsigned long numm = 10000*cyc_per_tick/US_PER_TICK;;
		while(1) {
			for (i = 0 ; i < 10; i++){
				pre_run_remained = numm;  /* refill */
				pre_run_remained = left(20000,20000,0,0);
				/* printc(" thd %d pre_t_0 %u pre_t %lu\n", cos_get_thd_id(), pre_t_0, pre_t); */
			}
			unsigned long pre_t = sched_timestamp();
			if ( pre_t > pre_t_0 + 1*100) break;
		}
		printc("BF thd %d finish pre_run\n", cos_get_thd_id());

#endif
		/* pub_duration_time_in_ticks = duration_time_in_ticks; */
		timed_event_block(cos_spd_id(), start_time_in_ticks);
		printc("<<<<2 thd %d in spd %ld\n",cos_get_thd_id(), cos_spd_id());
	}
	else {/* Create all periodic tasks */
		if (local_period == 0 || (exe_t > local_period*US_PER_TICK)) BUG();
		/* if (cos_get_thd_id() == 20) { */
		/* 	printc("pre allocating ...\n"); */
		/* 	int mm; */
		/* 	for (mm = 0 ; mm < 3000; mm++) */
		/* 		left(70000, 70000, 0, 0); */
		/* 	printc("done.\n"); */
		/* } */
		periodic_wake_create(cos_spd_id(), local_period);

		int i = 0;
		int waiting = 0;

		if(start_time_in_ticks <= 0)
			/* waiting = (50+100*10) / local_period;   /\* use 50 before. Now change to let BF threads run first 10 seconds before 0 second *\/ */
			waiting = (50) / local_period;   /* use 50 before. Now change to let BF threads run first 10 seconds before 0 second */
		else
			waiting = start_time_in_ticks / local_period;
		do {
			periodic_wake_wait(cos_spd_id());
		} while (i++ < waiting); /* wait 50 ticks */
	}

/* Let all tasks run */
	unsigned long exe_cyc_remained = 0;
//	unsigned long long t;
//	unsigned long val;
	int refill_number = 0;

	unsigned long exe_cyc_event_remained = 0;
//	printc("start...!!\n");
	while (1) {
		if(local_period <= 0){			/* used for transient non-periodic tasks only */
			exe_cyc_event_remained = exe_cycle;  /* refill */
			while(1) {
				exe_cyc_event_remained = exe_cycle;  /* refill */
				/* rdtscll(t); */
				/* val = (int)(t & (TOTAL_AMNT-1)); */
				/* if (val >= 64){ */
					exe_cyc_event_remained = left(exe_cyc_event_remained,exe_cycle,0,0);
				/* } */
				/* else{ */
				/* 	exe_cyc_event_remained = right(exe_cyc_event_remained,exe_cycle,0,0); */
				/* } */
				unsigned long t = sched_timestamp();
				/* if ( t > (unsigned long)(7*100 + start_time_in_ticks + duration_time_in_ticks)) { */
				/* 	printc("thd %d left!!!\n",cos_get_thd_id()); */
				if ( t > (unsigned long)( start_time_in_ticks + duration_time_in_ticks)) {
					/* printc("thd %d left>>>\n",cos_get_thd_id()); */

					timed_event_block(cos_spd_id(), 10000);
				}
			}
		}
		else{
			/* rdtscll(start); */
			/* used for transient periodic tasks only */
			if (start_time_in_ticks > 0 && (local_period*refill_number > duration_time_in_ticks)){
				for(;;) periodic_wake_wait(cos_spd_id());
			}
			exe_cyc_remained = exe_cycle;  /* refill */
			/* printc("thd %d in home comp, going to call\n",cos_get_thd_id()); */
			while(exe_cyc_remained) {
				exe_cyc_remained = left(exe_cyc_remained,exe_cycle,0,0);	  
			}
			
			/* rdtscll(end); */
			/* printc("%d, times : %d\n", cos_get_thd_id(), times); */
			/* printc("\n @@thd %ld is sleeping in spd %d\n", cos_get_thd_id(), cos_spd_id()); */
			/* printc("thd %d back in home comp, going to block\n",cos_get_thd_id()); */
			periodic_wake_wait(cos_spd_id());
			/* printc("thd %d woke up.\n",cos_get_thd_id()); */
			refill_number++;	  
			/* printc("\n @@thd %ld is waking in spd %d\n", cos_get_thd_id(), cos_spd_id()); */
			/* printc("\n thd %d refilled...%d\n", cos_get_thd_id(), refill_number); */
		}
	}
	return;
}
Exemple #26
0
int lock_component_release(spdid_t spd, unsigned long lock_id)
{
	struct meta_lock *ml;
	struct blocked_thds *sent, *bt;
	spdid_t spdid = cos_spd_id();

	ACT_RECORD(ACT_UNLOCK, spd, lock_id, cos_get_thd_id(), 0);
	TAKE(spdid);

	generation++;
	ml = lock_find(lock_id, spd);
	if (!ml) goto error;

	/* Apparently, lock_take calls haven't been made. */
	if (EMPTY_LIST(&ml->b_thds, next, prev)) {
		RELEASE(spdid);
		return 0;
	}
	sent = bt = FIRST_LIST(&ml->b_thds, next, prev);
	/* Remove all threads from the lock's list */
	REM_LIST(&ml->b_thds, next, prev);
	/* Unblock all waiting threads */
	while (1) {
		struct blocked_thds *next;
		u16_t tid;

		/* This is suboptimal: if we wake a thread with a
		 * higher priority, it will be switched to.  Given we
		 * are holding the component lock here, we should get
		 * switched _back_ to so as to wake the rest of the
		 * components. */
		next = FIRST_LIST(bt, next, prev);
		REM_LIST(bt, next, prev);

		ACT_RECORD(ACT_WAKE, spd, lock_id, cos_get_thd_id(), bt->thd_id);

		/* cache locally */
		tid = bt->thd_id;
		/* Last node in the list? */
		if (bt == next) {
			/* This is sneaky, so to reiterate: Keep this
			 * lock till now so that if we wake another
			 * thread, and it begins execution, the system
			 * will switch back to this thread so that we
			 * can wake up the rest of the waiting threads
			 * (one of which might have the highest
			 * priority).  We release before we wake the
			 * last as we don't really need the lock
			 * anymore, an it will avoid quite a few
			 * invocations.*/
			RELEASE(spdid);
		}

		/* Wakeup the way we were put to sleep */
		assert(tid != cos_get_thd_id());
		/* printc("CPU %ld: %d waking up %d for lock %d\n", cos_cpuid(), cos_get_thd_id(), tid, lock_id); */
		sched_wakeup(spdid, tid);

		if (bt == next) break;
		bt = next;
	}

	return 0;
error:
	RELEASE(spdid);
	return -1;
}
Exemple #27
0
int net_send(spdid_t spdid, net_connection_t nc, void *data, int sz)
{
	struct intern_connection *ic;
	u16_t tid = cos_get_thd_id();
	int ret = sz;

//	if (!cos_argreg_buff_intern(data, sz)) return -EFAULT;
	if (!net_conn_valid(nc)) return -EINVAL;
	if (sz > MAX_SEND) return -EMSGSIZE;

//	NET_LOCK_TAKE();
	ic = net_conn_get_internal(nc);
	if (NULL == ic) {
		ret = -EINVAL;
		goto err;
	}
	if (tid != ic->tid) {
		ret = -EPERM;
		goto err;
	}

	switch (ic->conn_type) {
	case UDP:
	{
		struct udp_pcb *up;
		struct pbuf *p;

		/* There's no blocking in the UDP case, so this is simple */
		up = ic->conn.up;
		p = pbuf_alloc(PBUF_TRANSPORT, sz, PBUF_ROM);
		if (NULL == p) {
			ret = -ENOMEM;
			goto err;
		}
		p->payload = data;

		if (ERR_OK != udp_send(up, p)) {
			pbuf_free(p);
			/* IP/port must not be set */
			ret = -ENOTCONN;
			goto err;
		}
		pbuf_free(p);
		break;
	}
	case TCP:
	{
		struct tcp_pcb *tp;
#define TCP_SEND_COPY
#ifdef TCP_SEND_COPY
		void *d;
		struct packet_queue *pq;
#endif
		tp = ic->conn.tp;
		if (tcp_sndbuf(tp) < sz) { 
			ret = 0;
			break;
		}
#ifdef TCP_SEND_COPY
		pq = malloc(sizeof(struct packet_queue) + sz);
		if (unlikely(NULL == pq)) {
			ret = -ENOMEM;
			goto err;
		}
#ifdef TEST_TIMING
		pq->ts_start = timing_record(APP_PROC, ic->ts_start);
#endif
		pq->headers = NULL;
		d = net_packet_data(pq);
		memcpy(d, data, sz);
		if (ERR_OK != (ret = tcp_write(tp, d, sz, 0))) {
#else
		if (ERR_OK != (ret = tcp_write(tp, data, sz, TCP_WRITE_FLAG_COPY))) {
#endif
			free(pq);
			printc("tcp_write returned %d (sz %d, tcp_sndbuf %d, ERR_MEM: %d)", 
			       ret, sz, tcp_sndbuf(tp), ERR_MEM);
			BUG();
		}
		/* No implementation of nagle's algorithm yet.  Send
		 * out the packet immediately if possible. */
		if (ERR_OK != (ret = tcp_output(tp))) {
			printc("tcp_output returned %d, ERR_MEM: %d", ret, ERR_MEM);
			BUG();
		}
		ret = sz;

		break;
	}
	case TCP_CLOSED:
		ret = -EPIPE;
		break;
	default:
		BUG();
	}
err:
//	NET_LOCK_RELEASE();
	return ret;
}

/************************ LWIP integration: **************************/

struct ip_addr ip, mask, gw;
struct netif   cos_if;

static void cos_net_interrupt(char *packet, int sz)
{
	void *d;
	int len;
	struct pbuf *p;
	struct ip_hdr *ih;
	struct packet_queue *pq;
#ifdef TEST_TIMING
	unsigned long long ts;
#endif
//	printc(">>> %d\n", net_lock.lock_id);
	NET_LOCK_TAKE();
//	printc("<<< %d\n", net_lock.lock_id);

	assert(packet);
	ih = (struct ip_hdr*)packet;
	if (unlikely(4 != IPH_V(ih))) goto done;
	len = ntohs(IPH_LEN(ih));
	if (unlikely(len != sz || len > MTU)) {
		printc("len %d != %d or > %d", len, sz, MTU);
		goto done;
	}

	p = pbuf_alloc(PBUF_IP, len, PBUF_ROM);
	if (unlikely(!p)) {
		prints("OOM in interrupt: allocation of pbuf failed.\n");
		goto done;
	}

	/* For now, we're going to do an additional copy.  Currently,
	 * packets should be small, so this shouldn't hurt that badly.
	 * This is done because 1) we are freeing the packet
	 * elsewhere, 2) we want to malloc some (small) packets to
	 * save space and free up the ring buffers, 3) it is difficult
	 * to know in (1) which deallocation method (free or return to
	 * ring buff) to use */
	pq = malloc(len + sizeof(struct packet_queue));
	if (unlikely(NULL == pq)) {
		printc("OOM in interrupt: allocation of packet data (%d bytes) failed.\n", len);
		pbuf_free(p);
		goto done;
	}
	pq->headers = d = net_packet_data(pq);
#ifdef TEST_TIMING
#ifdef TCP_SEND_COPY
	ts = pq->ts_start = timing_timestamp();
#endif	
#endif	
	memcpy(d, packet, len);
	p->payload = p->alloc_track = d;
	/* hand off packet ownership here... */
	if (ERR_OK != cos_if.input(p, &cos_if)) {
		prints("net: failure in IP input.");
		pbuf_free(p);
		goto done;
	}

#ifdef TEST_TIMING
	timing_record(UPCALL_PROC, ts);
#endif
done:
	NET_LOCK_RELEASE();
	return;
}
/* 
 * Dependencies here (thus priority inheritance) will NOT be used if
 * you specify a timeout value.
 */
int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id, unsigned int microsec)
{
	struct meta_lock *ml;
	spdid_t spdid = cos_spd_id();
	unsigned short int curr = (unsigned short int)cos_get_thd_id();
	struct blocked_thds blocked_desc = {.thd_id = curr};
	int ret = 0;
	
//	print("thread %d from spd %d locking for %d micrseconds.", curr, spdid, microsec);

	ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id);
	TAKE(spdid);

	if (0 == microsec) {
		ret = TIMER_EXPIRED;
		goto error;
	}
	ml = lock_find(lock_id, spd);
	/* tried to access a lock not yet created */
	if (!ml) {
		ret = -1;
		//print("take wtf%d%d%d", 0,0,0);
		goto error;
	}
	if (lock_is_thd_blocked(ml, curr)) {
		prints("lock: lock_is_thd_blocked failed in lock_component_take\n");
		goto error;
	}

	/* The calling component needs to retry its user-level lock,
	 * some preemption has caused the generation count to get off,
	 * i.e. we don't have the most up-to-date view of the
	 * lock's state */
	if (ml->gen_num != generation) {
		ml->gen_num = generation;
		ret = 0;
		goto error;
	}
	generation++;

	/* Note that we are creating the list of blocked threads from
	 * memory allocated on the individual thread's stacks. */
	INIT_LIST(&blocked_desc, next, prev);
	ADD_LIST(&ml->b_thds, &blocked_desc, next, prev);
	blocked_desc.timed = (TIMER_EVENT_INF != microsec);
	//ml->owner = thd_id;

	RELEASE(spdid);

	/* Bypass calling the timed every component if there is an infinite wait */
//	assert(TIMER_EVENT_INF == microsec);
//	assert(!blocked_desc.timed);
	if (TIMER_EVENT_INF == microsec) {
		if (-1 == sched_block(spdid, thd_id)) BUG();
		if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG();
		/* 
		 * OK, this seems ridiculous but here is the rational: Assume
		 * we are a middle-prio thread, and were just woken by a low
		 * priority thread. We will preempt that thread when woken,
		 * and will continue here.  If a high priority thread is also
		 * waiting on the lock, then we would preempt the low priority
		 * thread while it should wake the high prio thread. With the
		 * following crit sect will switch to the low prio thread that
		 * still holds the component lock.  See the comments in
		 * lock_component_release. 
		 */
		//TAKE(spdid);
		//RELEASE(spdid);

		ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0);
		ret = 0;
	} else {
		assert(0);
#ifdef NIL
		/* ret here will fall through.  We do NOT use the
		 * dependency here as I can't think through the
		 * repercussions */
		if (-1 == (ret = timed_event_block(spdid, microsec))) return ret;

		/* 
		 * We might have woken from a timeout, which means
		 * that we need to remove this thread from the waiting
		 * list for the lock.
		 */
		TAKE(spdid);
		ml = lock_find(lock_id, spd);
		if (!ml) {
			ret = -1;
			goto error;
		}
		REM_LIST(&blocked_desc, next, prev);
		RELEASE(spdid);

		ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0); 
		/* ret is set to the amnt of time we blocked */
#endif 
	}
	return ret;
error:
	RELEASE(spdid);
	return ret;
}
Exemple #29
0
static int __net_bind(spdid_t spdid, net_connection_t nc, struct ip_addr *ip, u16_t port)
{
	struct intern_connection *ic;
	u16_t tid = cos_get_thd_id();
	int ret = 0;

	//NET_LOCK_TAKE();
	if (!net_conn_valid(nc)) {
		ret = -EINVAL;
		goto done;
	}
	ic = net_conn_get_internal(nc);
	if (NULL == ic) {
		ret = -EINVAL;
		goto done;
	}
	if (tid != ic->tid) {
		ret = -EPERM;
		goto done;
	}
	assert(ACTIVE == ic->thd_status);

	if (portmgr_bind(cos_spd_id(), port)) {
		ret = -EADDRINUSE;
		goto done;
	}

	switch (ic->conn_type) {
	case UDP:
	{
		struct udp_pcb *up;

		up = ic->conn.up;
		assert(up);
		if (ERR_OK != udp_bind(up, ip, port)) {
			ret = -EPERM;
			goto done;
		}
		break;
	}
	case TCP:
	{
		struct tcp_pcb *tp;
		
		tp = ic->conn.tp;
		assert(tp);
		if (ERR_OK != tcp_bind(tp, ip, port)) {
			ret = -ENOMEM;
			goto done;
		}
		break;
	}
	case TCP_CLOSED:
//		__net_close(ic);
		ret = -EPIPE;
		break;
	default:
		BUG();
	}

done:
	//NET_LOCK_RELEASE();
	return ret;
}
Exemple #30
0
/* 
 * Dependencies here (thus priority inheritance) will NOT be used if
 * you specify a timeout value.
 *
 * Return 0: lock taken, -1: could not find lock, 1: inconsistency -- retry!
 */
int lock_component_take(spdid_t spd, unsigned long lock_id, unsigned short int thd_id)
{
	struct meta_lock *ml;
	spdid_t spdid = cos_spd_id();
	unsigned short int curr = (unsigned short int)cos_get_thd_id();
	struct blocked_thds blocked_desc = {.thd_id = curr};
	int ret = -1;
	
	ACT_RECORD(ACT_LOCK, spd, lock_id, cos_get_thd_id(), thd_id);
	TAKE(spdid);

	ml = lock_find(lock_id, spd);
	/* tried to access a lock not yet created */
	if (!ml) goto error;
	assert(!lock_is_thd_blocked(ml, curr));

	/* The calling component needs to retry its user-level lock,
	 * some preemption has caused the generation count to get off,
	 * i.e. we don't have the most up-to-date view of the
	 * lock's state */
	if (ml->gen_num != generation) {
		ml->gen_num = generation;
		ret = 1;
		goto error;
	}
	generation++;

	/* Note that we are creating the list of blocked threads from
	 * memory allocated on the individual thread's stacks. */
	INIT_LIST(&blocked_desc, next, prev);
	ADD_LIST(&ml->b_thds, &blocked_desc, next, prev);
	//ml->owner = thd_id;

	RELEASE(spdid);

	/* printc("cpu %ld: thd %d going to blk waiting for lock %d\n", cos_cpuid(), cos_get_thd_id(), (int)lock_id); */
	if (-1 == sched_block(spdid, thd_id)) {
		printc("Deadlock including thdids %d -> %d in spd %d, lock id %d.\n", 
		       cos_get_thd_id(), thd_id, spd, (int)lock_id);
		debug_print("BUG: Possible deadlock @ "); 
		assert(0);
		if (-1 == sched_block(spdid, 0)) assert(0);
	}

	if (!EMPTY_LIST(&blocked_desc, next, prev)) BUG();
	/* 
	 * OK, this seems ridiculous but here is the rational: Assume
	 * we are a middle-prio thread, and were just woken by a low
	 * priority thread. We will preempt that thread when woken,
	 * and will continue here.  If a high priority thread is also
	 * waiting on the lock, then we would preempt the low priority
	 * thread while it should wake the high prio thread. With the
	 * following crit sect will switch to the low prio thread that
	 * still holds the component lock.  See the comments in
	 * lock_component_release. 
	 */
	//TAKE(spdid);
	//RELEASE(spdid);

	ACT_RECORD(ACT_WAKEUP, spd, lock_id, cos_get_thd_id(), 0);
	ret = 0;
done:
	return ret;
error:
	RELEASE(spdid);
	goto done;
}