Example #1
0
void call_cs(void)
{
	static int first = 0;
	static int high, low;
	u64_t start = 0, end = 0;

	if(first == 1){
		low = cos_get_thd_id();
		sched_wakeup(cos_spd_id(), high);
	}

	if(first == 0){
		first = 1;
		high = cos_get_thd_id();
		sched_block(cos_spd_id(), 0);
		rdtscll(start);
		sched_block(cos_spd_id(), low);
	}

	if (cos_get_thd_id() == low) {
		sched_wakeup(cos_spd_id(), high);
	}

	if (cos_get_thd_id() == high) {
		rdtscll(end);
		printc("context switch cost: %llu cycs\n", (end-start) >> 1);
		first = 0;
	}
Example #2
0
void init() {
	signal(SIGUSR1, &signal1);
	printf("[Init][Pid: %d] Scheduler Testing Program\n", sched_getpid());
	sched_nice(-20);
	for (int i = 0; i < 15; i++) {
		if (sched_fork() == 0) {
			printf("[Child][Pid: %d] Parent Pid: %d\n", sched_getpid(), sched_getppid());
			sched_nice(-19 + i);
			struct timespec start, stop;
			clock_gettime(CLOCK_REALTIME, &start);
			for (long int j = 0; j < 1000000000; j++);
			clock_gettime(CLOCK_REALTIME, &stop);
			printf("[Child][Pid: %d] Execution Complete. Took %ld Seconds.\n", sched_getpid(), (long int)(stop.tv_sec - start.tv_sec));
			sched_exit(i);
			printf("[Child][Pid: %d] This Will Never Get Executed\n", sched_getpid());
		}
	}
	printf("[Init][Pid: %d] Process Information\n", sched_getpid());
	sched_ps();
	for (int i = 0; i < 15; i++) {
		int returncode;
		int cc = sched_wait(&returncode);
		printf("[Init][Pid: %d] Child Returned [%d] With Exit Code [%d]\n", sched_getpid(), cc, returncode);
	}
	int returncode;
	int cc = sched_wait(&returncode);
	printf("[Init][Pid: %d] Calling Wait With No Children Returns [%d]\n", sched_getpid(), cc);
	sched_sleep(&wait1);
	for (int i = 0; i < 15; i++) {
		if (sched_fork() == 0) {
			printf("[Child][Pid: %d] Parent Pid: %d\n", sched_getpid(), sched_getppid());
			sched_nice(-19 + i);
			if (i % 2 == 1)
				sched_sleep(&wait1);
			else
				sched_sleep(&wait2);
			printf("[Child][Pid: %d] Execution Complete.\n", sched_getpid());
			sched_exit(i);
		}
	}
	for (int i = 0; i < 1000000000; i++);
	printf("[Init][Pid: %d] Process Information\n", sched_getpid());
	sched_ps();
	printf("Wakeup 2\n");
	sched_wakeup(&wait2);
	printf("Wakeup 1\n");
	sched_wakeup(&wait1);
	for (int i = 0; i < 15; i++) {
		int returncode;
		int cc = sched_wait(&returncode);
		printf("[Init][Pid: %d] Child Returned [%d] With Exit Code [%d]\n", sched_getpid(), cc, returncode);
	}
	printf("[Init][Pid: %d] Exiting Testing Program. Passing Control Back To Idle\n", sched_getpid());
	sched_exit(0);
}
Example #3
0
/* wake up all blocked threads whose request size smaller than or equal to available size */
static void
cbuf_thd_wake_up(struct cbuf_comp_info *cci, unsigned long sz)
{
	struct blocked_thd *bthd, *next;
	unsigned long long cur, tot;

	assert(cci->num_blocked_thds >= 0);
	/* Cannot wake up thd when in shrink */
	assert(cci->target_size >= cci->allocated_size);

	if (cci->num_blocked_thds == 0) return;
	bthd = cci->bthd_list.next;
	while (bthd != &cci->bthd_list) {
		next = FIRST_LIST(bthd, next, prev);
		if (bthd->request_size <= sz) {
			REM_LIST(bthd, next, prev);
			cci->num_blocked_thds--;
			rdtscll(cur);
			tot = cur-bthd->blk_start;
			cci->track.blk_tot += tot;
			if (tot > cci->track.blk_max) cci->track.blk_max = tot;
			sched_wakeup(cos_spd_id(), bthd->thd_id);
		}
		bthd = next;
	}
	if (cci->num_blocked_thds == 0) cbuf_unmark_relinquish_all(cci);
}
Example #4
0
int evt_trigger(spdid_t spdid, long extern_evt)
{
	struct evt *e;
	int ret = 0;

	lock_take(&evt_lock);

	e = mapping_find(extern_evt);
	if (NULL == e) goto err;

	ACT_RECORD(ACT_TRIGGER, spdid, e->extern_id, cos_get_thd_id(), 0);
	/* Trigger an event being waited for? */
	if (0 != (ret = __evt_trigger(e))) {
		lock_release(&evt_lock);
		ACT_RECORD(ACT_WAKEUP, spdid, e->extern_id, cos_get_thd_id(), ret);
		if (sched_wakeup(cos_spd_id(), ret)) BUG();
	} else {
		lock_release(&evt_lock);
	}

	return 0;
err:
	lock_release(&evt_lock);
	return -1;
}
Example #5
0
static EVENT_HANDLER(kvfsd_alarm_event_handler)
{
	struct thread_s *kvfsd;

	kvfsd = event_get_senderId(event);
	sched_wakeup(kvfsd);
	return 0;
}
Example #6
0
int __sg_sched_wakeup(spdid_t spdid, unsigned short int thd_id, int crash_flag)
{
	if (unlikely(crash_flag)) {
		/* printc("in scheduler server wakeup interface thd_id %d crash_flag %d\n", thd_id, crash_flag); */
		return sched_wakeup_helper(spdid, thd_id);
	}
	return sched_wakeup(spdid, thd_id);
}
Example #7
0
/*
 * sleepq_remove:
 *
 *	Remove an LWP from a sleep queue and wake it up.
 */
void
sleepq_remove(sleepq_t *sq, lwp_t *l)
{
	struct schedstate_percpu *spc;
	struct cpu_info *ci;

	KASSERT(lwp_locked(l, NULL));

	TAILQ_REMOVE(sq, l, l_sleepchain);
	l->l_syncobj = &sched_syncobj;
	l->l_wchan = NULL;
	l->l_sleepq = NULL;
	l->l_flag &= ~LW_SINTR;

	ci = l->l_cpu;
	spc = &ci->ci_schedstate;

	/*
	 * If not sleeping, the LWP must have been suspended.  Let whoever
	 * holds it stopped set it running again.
	 */
	if (l->l_stat != LSSLEEP) {
		KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
		lwp_setlock(l, spc->spc_lwplock);
		return;
	}

	/*
	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
	 * about to call mi_switch(), in which case it will yield.
	 */
	if ((l->l_pflag & LP_RUNNING) != 0) {
		l->l_stat = LSONPROC;
		l->l_slptime = 0;
		lwp_setlock(l, spc->spc_lwplock);
		return;
	}

	/* Update sleep time delta, call the wake-up handler of scheduler */
	l->l_slpticksum += (hardclock_ticks - l->l_slpticks);
	sched_wakeup(l);

	/* Look for a CPU to wake up */
	l->l_cpu = sched_takecpu(l);
	ci = l->l_cpu;
	spc = &ci->ci_schedstate;

	/*
	 * Set it running.
	 */
	spc_lock(ci);
	lwp_setlock(l, spc->spc_mutex);
	sched_setrunnable(l);
	l->l_stat = LSRUN;
	l->l_slptime = 0;
	sched_enqueue(l, false);
	spc_unlock(ci);
}
Example #8
0
/* 
 * Receive packet from the lwip layer and place it in a buffer to the
 * cos udp layer.  Here we'll deallocate the pbuf, and use a custom
 * structure encoded in the packet headers to keep the queue of data
 * to be read by clients.
 */
static void cos_net_lwip_udp_recv(void *arg, struct udp_pcb *upcb, struct pbuf *p,
				   struct ip_addr *ip, u16_t port)
{
	struct intern_connection *ic;
	struct packet_queue *pq, *last;
	void *headers;

	/* We should not receive a list of packets unless it is from
	 * this host to this host (then the headers will be another
	 * packet), but we aren't currently supporting this case. */
	assert(1 == p->ref);
	assert(NULL == p->next && p->tot_len == p->len);
	assert(p->len > 0);
	ic = (struct intern_connection*)arg;
	assert(UDP == ic->conn_type);

	headers = cos_net_header_start(p, UDP);
	assert (NULL != headers);
	/* Over our allocation??? */
	if (ic->incoming_size >= UDP_RCV_MAX) {
		assert(ic->thd_status != RECVING);
		assert(p->type == PBUF_ROM);
		//free(net_packet_pq(headers));
		assert(p->ref > 0);
		pbuf_free(p);

		return;
	}
	pq = net_packet_pq(headers);
	pq->data = p->payload;
	pq->len = p->len;
	pq->next = NULL;
	
	assert((NULL == ic->incoming) == (NULL == ic->incoming_last));
	/* Is the queue empty? */
	if (NULL == ic->incoming) {
		assert(NULL == ic->incoming_last);
		ic->incoming = ic->incoming_last = pq;
	} else {
		last = ic->incoming_last;
		last->next = pq;
		ic->incoming_last = pq;
	}
	ic->incoming_size += p->len;
	assert(1 == p->ref);
	p->payload = p->alloc_track = NULL;
	pbuf_free(p);

	/* If the thread blocked waiting for a packet, wake it up */
	if (RECVING == ic->thd_status) {
		ic->thd_status = ACTIVE;
		assert(ic->thd_status == ACTIVE); /* Detect races */
		sched_wakeup(cos_spd_id(), ic->tid);
	}

	return;
}
Example #9
0
/*
 * Handle clock interrupts.
 *
 * timer_handler() is called directly from the real time clock
 * interrupt.  All interrupts are still disabled at the entry
 * of this routine.
 */
void
timer_handler(void)
{
	struct timer *tmr;
	u_long ticks;
	int wakeup = 0;

	/*
	 * Bump time in ticks.
	 * Note that it is allowed to wrap.
	 */
	lbolt++;
	if (curthread->priority == PRI_IDLE)
		idle_ticks++;

	while (!list_empty(&timer_list)) {
		/*
		 * Check timer expiration.
		 */
		tmr = timer_next(&timer_list);
		if (time_before(lbolt, tmr->expire))
			break;

		list_remove(&tmr->link);
		if (tmr->interval != 0) {
			/*
			 * Periodic timer - reprogram timer again.
			 */
			ticks = time_remain(tmr->expire + tmr->interval);
			timer_add(tmr, ticks);
			sched_wakeup(&tmr->event);
		} else {
			/*
			 * One-shot timer
			 */
			list_insert(&expire_list, &tmr->link);
			wakeup = 1;
		}
	}
	if (wakeup)
		sched_wakeup(&timer_event);

	sched_tick();
}
Example #10
0
File: pong.c Project: songjiguo/C3
int pong(void)
{
	if(cos_get_thd_id() == 12) {
		if (number++ == 3) assert(0);
		sched_wakeup(cos_spd_id() + 1, 11);
		return 0;
	}

	while(cos_get_thd_id() == 11) base_pong();
	return 0;
}
Example #11
0
File: tty.c Project: AndrewD/prex
/*
 * Output is completed.
 */
void
tty_done(struct tty *tp)
{

	if (tp->t_outq.tq_count == 0)
		tp->t_state &= ~TS_BUSY;
	if (tp->t_state & TS_ASLEEP) {
		tp->t_state &= ~TS_ASLEEP;
		sched_wakeup(&tp->t_output);
	}
}
Example #12
0
static err_t cos_net_lwip_tcp_connected(void *arg, struct tcp_pcb *tp, err_t err)
{
	struct intern_connection *ic = arg;
	assert(ic);
	
	assert(CONNECTING == ic->thd_status);
	if (sched_wakeup(cos_spd_id(), ic->tid)) BUG();
	ic->thd_status = ACTIVE;

	return ERR_OK;
}
void call(void)
{
    static int flag = 0;

    static int first,  second;

    u64_t start = 0, end = 0;

    if(flag == 1) {
        /* printc("2\n"); */
        second = cos_get_thd_id();
        sched_wakeup(cos_spd_id(), first);
        /* printc("4\n");  */
    }

    if(flag == 0) {
        /* printc("1\n"); */
        flag = 1;
        first = cos_get_thd_id();
        sched_block(cos_spd_id(), 0);
        /* printc("3\n"); */
        rdtscll(start);
        sched_block(cos_spd_id(), second);
        /* printc("6\n"); */
    }

    if (cos_get_thd_id() == second)
        /* printc("5\n"); */
        sched_wakeup(cos_spd_id(), first);

    if (cos_get_thd_id() == first) {
        /* printc("7\n"); */
        rdtscll(end);
        printc("cost of basics %llu cycs\n", end-start);
    }

    return;
}
Example #14
0
File: cond.c Project: AndrewD/prex
/*
 * Unblock all threads that are blocked on the specified CV.
 */
int
cond_broadcast(cond_t *cond)
{
	cond_t c;
	int err;

	sched_lock();
	if ((err = cond_copyin(cond, &c)) == 0 && c->signal < c->wait) {
		c->signal = c->wait;
		sched_wakeup(&c->event);
	}
	sched_unlock();
	return err;
}
Example #15
0
int __sg_sched_wakeup(spdid_t spdid, unsigned short int thd_id)
{
	/* printc("ser: sched_wakeup (thd %d)\n", cos_get_thd_id()); */
	int ret;
#ifdef LOG_MONITOR
	monevt_enqueue(cos_spd_id(), 12, thd_id);
#endif
	ret = sched_wakeup(spdid, thd_id);
#ifdef LOG_MONITOR
	monevt_enqueue(0, 12, thd_id);
#endif

	return ret;
}
Example #16
0
int __sg_sched_wakeup(spdid_t spdid, unsigned short int thd_id)
{
	/* printc("ser: sched_wakeup (thd %d)\n", cos_get_thd_id()); */
	int ret;
#ifdef LOG_MONITOR
	evt_enqueue(cos_get_thd_id(), spdid, cos_spd_id(), FN_SCHED_WAKEUP, thd_id, EVT_SINV);
#endif
	ret = sched_wakeup(spdid, thd_id);
#ifdef LOG_MONITOR
	evt_enqueue(cos_get_thd_id(), cos_spd_id(), spdid, FN_SCHED_WAKEUP, thd_id, EVT_SRET);
#endif

	return ret;
}
Example #17
0
/*
 * Put one charcter on key queue
 */
static void
keyq_enqueue(u_char c)
{

	/* Forward key to input handker */
	if (input_handler)
		input_handler(c);
	else {
		sched_wakeup(&keypad_event);
		if (keyq_full())
			return;
		keyq[q_tail] = c;
		q_tail = keyq_next(q_tail);
	}
}
int timed_event_wakeup(spdid_t spdinv, unsigned short int thd_id)
{
	spdid_t spdid = cos_spd_id();
	struct thread_event *evt;

	TAKE(spdid);
	ticks = sched_timestamp();
	if (NULL == (evt = find_remove_event(thd_id))) {
		RELEASE(spdid);
		return 1;
	}
	RELEASE(spdid);
	assert(evt->thread_id == thd_id);

	return sched_wakeup(spdid, thd_id);
}
Example #19
0
int thread_launch(struct thread *t) {
	int ret;

	sched_lock();
	{
		if (t->state & TS_EXITED) {
			ret = -EINVAL;
		}
		else {
			ret = sched_wakeup(&t->schedee) ? 0 : -EINVAL;
		}
	}
	sched_unlock();

	return ret;
}
Example #20
0
static int
process_sample_event(event_t *event)
{
	struct sample_data data;
	struct trace_entry *te;

	memset(&data, 0, sizeof(data));

	event__parse_sample(event, sample_type, &data);

	if (sample_type & PERF_SAMPLE_TIME) {
		if (!first_time || first_time > data.time)
			first_time = data.time;
		if (last_time < data.time)
			last_time = data.time;
	}

	te = (void *)data.raw_data;
	if (sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) {
		char *event_str;
		struct power_entry *pe;

		pe = (void *)te;

		event_str = perf_header__find_event(te->type);

		if (!event_str)
			return 0;

		if (strcmp(event_str, "power:power_start") == 0)
			c_state_start(data.cpu, data.time, pe->value);

		if (strcmp(event_str, "power:power_end") == 0)
			c_state_end(data.cpu, data.time);

		if (strcmp(event_str, "power:power_frequency") == 0)
			p_state_change(data.cpu, data.time, pe->value);

		if (strcmp(event_str, "sched:sched_wakeup") == 0)
			sched_wakeup(data.cpu, data.time, data.pid, te);

		if (strcmp(event_str, "sched:sched_switch") == 0)
			sched_switch(data.cpu, data.time, te);
	}
	return 0;
}
Example #21
0
void
mutex_unlock(mutex_t *m)
{
    thread_t *n;

    spinlock_lock(&m->mtx_slock);
    _mutex_wakeup(m);
    m->mtx_owner = NULL;
    n = list_extract_first(&m->mtx_locking);
    if (n) {
        m->mtx_owner = n;
        sched_wakeup(n);
    } else {
        m->mtx_locked = MUTEX_UNLOCKED;
    }
    spinlock_unlock(&m->mtx_slock);
}
Example #22
0
File: tty.c Project: AndrewD/prex
/*
 * Flush tty read and/or write queues, notifying anyone waiting.
 */
static void
tty_flush(struct tty *tp, int rw)
{

	DPRINTF(("tty_flush rw=%d\n", rw));

	if (rw & FREAD) {
		while (ttyq_getc(&tp->t_canq) != -1)
			;
		while (ttyq_getc(&tp->t_rawq) != -1)
			;
		sched_wakeup(&tp->t_input);
	}
	if (rw & FWRITE) {
		tp->t_state &= ~TS_TTSTOP;
		tty_start(tp);
	}
}
int __sg_sched_reflect(spdid_t spdid, int src_spd, int cnt)
{
	struct blocked_thd *blk_thd;
	int ret = 0;

	/* printc("scheduler server side stub (thd %d)\n", cos_get_thd_id()); */

	assert(src_spd);
	cos_sched_lock_take();
	/* printc("scheduler server side stub (thd %d)\n", cos_get_thd_id()); */
	/* printc("passed reflection: spd %d src_spd %d\n", spdid, src_spd); */

	if (!bthds[spdid].next) goto done;
	if (EMPTY_LIST(&bthds[spdid], next, prev)) goto done;

	for (blk_thd = FIRST_LIST(&bthds[spdid], next, prev);
	     blk_thd != &bthds[spdid];
	     blk_thd = FIRST_LIST(blk_thd, next, prev)){
		printc("(cnt)blocked thds %d\n", blk_thd->id);
		cos_sched_lock_release();
		sched_wakeup(spdid, blk_thd->id);
		cos_sched_lock_take();
	}		
	
	/* if (cnt == 1) {   */
	/* 	for (blk_thd = FIRST_LIST(&bthds[spd], next, prev); */
	/* 	     blk_thd != &bthds[spd]; */
	/* 	     blk_thd = FIRST_LIST(blk_thd, next, prev)){ */
	/* 		printc("(cnt)blocked thds %d\n", blk_thd->id); */
	/* 		ret++; */
	/* 	} */
	/* } else { */
	/* 	blk_thd = FIRST_LIST(&bthds[spd], next, prev); */
	/* 	if (!EMPTY_LIST(blk_thd, next, prev)) REM_LIST(blk_thd, next, prev); */
	/* 	ret = blk_thd->id; */
	/* } */
done:
	cos_sched_lock_release();
	return ret;
}
Example #24
0
void blklist_wake_threads(struct blocked_thd *bl)
{
	struct blocked_thd *bthd, *bthd_next;
	spdid_t spdid;

	// Wake up 
	spdid = cos_spd_id();
	DOUT("waking up threads for spd %d\n", spdid);
    
	for(bthd = FIRST_LIST(bl, next, prev) ; bthd != bl ; bthd = bthd_next){
		unsigned short int tid;

		bthd_next = FIRST_LIST(bthd, next, prev);
		DOUT("\tWakeing UP thd: %d", bthd->thd_id);
		REM_LIST(bthd, next, prev);
		tid = bthd->thd_id;
		free(bthd);
		sched_wakeup(cos_spd_id(), tid);        
		DOUT("......UP\n");
	}
    
	DOUT("All thds now awake\n");
}
Example #25
0
void _NORETURN thread_exit(void *ret) {
	struct thread *current = thread_self();
	struct task *task = task_self();
	struct thread *joining;

	/* We can free only not main threads */
	if (current == task_get_main(task)) {
		/* We are last thread. */
		task_exit(ret);
		/* NOTREACHED */
	}

	sched_lock();

	// sched_finish(current);
	current->schedee.waiting = true;
	current->state |= TS_EXITED;

	/* Wake up a joining thread (if any).
	 * Note that joining and run_ret are both in a union. */
	joining = current->joining;
	current->run_ret = ret;
	if (joining) {
		sched_wakeup(&joining->schedee);
	}

	if (current->state & TS_DETACHED)
		/* No one references this thread anymore. Time to delete it. */
		thread_delete(current);

	schedule();

	/* NOTREACHED */
	sched_unlock();  /* just to be honest */
	panic("Returning from thread_exit()");
}
Example #26
0
wakeup_handler(int sig) {
	if (sig==SIGUSR1)
		sched_wakeup(&wq1);
	else
		sched_wakeup(&wq2);
}
Example #27
0
File: tty.c Project: AndrewD/prex
/*
 * Process input of a single character received on a tty.
 * echo if required.
 * This may be called with interrupt level.
 */
void
tty_input(int c, struct tty *tp)
{
	unsigned char *cc;
	tcflag_t iflag, lflag;
	int sig = -1;

#ifdef CONFIG_CPUFREQ
	/* Reload power management timer */
	pm_active();
#endif
	lflag = tp->t_lflag;
	iflag = tp->t_iflag;
	cc = tp->t_cc;

	/* IGNCR, ICRNL, INLCR */
	if (c == '\r') {
		if (iflag & IGNCR)
			goto endcase;
		else if (iflag & ICRNL)
			c = '\n';
	} else if (c == '\n' && (iflag & INLCR))
		c = '\r';

	if (iflag & IXON) {
		/* stop (^S) */
		if (c == cc[VSTOP]) {
			if (!(tp->t_state & TS_TTSTOP)) {
				tp->t_state |= TS_TTSTOP;
				return;
			}
			if (c != cc[VSTART])
				return;
			/* if VSTART == VSTOP then toggle */
			goto endcase;
		}
		/* start (^Q) */
		if (c == cc[VSTART])
			goto restartoutput;
	}
	if (lflag & ICANON) {
		/* erase (^H / ^?) or backspace */
		if (c == cc[VERASE] || c == '\b') {
			if (!ttyq_empty(&tp->t_rawq)) {
				ttyq_unputc(&tp->t_rawq);
				tty_rubout(tp);
			}
			goto endcase;
		}
		/* kill (^U) */
		if (c == cc[VKILL]) {
			while (!ttyq_empty(&tp->t_rawq)) {
				ttyq_unputc(&tp->t_rawq);
				tty_rubout(tp);
			}
			goto endcase;
		}
	}
	if (lflag & ISIG) {
		/* quit (^C) */
		if (c == cc[VINTR] || c == cc[VQUIT]) {
			if (!(lflag & NOFLSH))
				tty_flush(tp, FREAD | FWRITE);
			tty_echo(c, tp);
			sig = (c == cc[VINTR]) ? SIGINT : SIGQUIT;
			goto endcase;
		}
		/* suspend (^Z) */
		if (c == cc[VSUSP]) {
			if (!(lflag & NOFLSH))
				tty_flush(tp, FREAD | FWRITE);
			tty_echo(c, tp);
			sig = SIGTSTP;
			goto endcase;
		}
	}

	/*
	 * Check for input buffer overflow
	 */
	if (ttyq_full(&tp->t_rawq)) {
		tty_flush(tp, FREAD | FWRITE);
		goto endcase;
	}
	ttyq_putc(c, &tp->t_rawq);

	if (lflag & ICANON) {
		if (c == '\n' || c == cc[VEOF] || c == cc[VEOL]) {
			tty_catq(&tp->t_rawq, &tp->t_canq);
			sched_wakeup(&tp->t_input);
		}
	} else
		sched_wakeup(&tp->t_input);

	if (lflag & ECHO)
		tty_echo(c, tp);
 endcase:
	/*
	 * IXANY means allow any character to restart output.
	 */
	if ((tp->t_state & TS_TTSTOP) && (iflag & IXANY) == 0 &&
	    cc[VSTART] != cc[VSTOP])
		return;
 restartoutput:
	tp->t_state &= ~TS_TTSTOP;

	if (sig != -1) {
		if (sig_task)
			exception_post(sig_task, sig);
	}
	tty_start(tp);
}
Example #28
0
static void barrier_do_broadcast(struct barrier_s *barrier)
{
	register uint_t tm_first;
	register uint_t tm_last;
	register uint_t tm_start;
	register uint_t wqdbsz;
	register uint_t tm_end;
	register uint_t ticket;
	register uint_t index;
	register uint_t count;
	register uint_t event;
	register void  *listner;
	register wqdb_t *wqdb;
	register uint_t i;
 
	tm_start = cpu_time_stamp();
	tm_first = barrier->tm_first;
	tm_last  = barrier->tm_last;
	wqdbsz   = PMM_PAGE_SIZE / sizeof(wqdb_record_t);
	ticket   = 0;

#if ARCH_HAS_BARRIERS
	count    = barrier->count;
#else
	count    = barrier->count - 1;	/* last don't sleep */
#endif

	for(index = 0; ((index < BARRIER_WQDB_NR) && (ticket < count)); index++)
	{
		wqdb = barrier->wqdb_tbl[index];

		for(i = 0; ((i < wqdbsz) && (ticket < count)); i++)
		{

#if CONFIG_BARRIER_BORADCAST_UREAD
			event   = cpu_uncached_read(&wqdb->tbl[i].event);
			listner = (void*) cpu_uncached_read(&wqdb->tbl[i].listner);
#else
			event   = wqdb->tbl[i].event;
			listner = wqdb->tbl[i].listner;
#endif

			if(listner != NULL)
			{
				wqdb->tbl[i].listner = NULL;
#if CONFIG_USE_SCHED_LOCKS
				sched_wakeup((struct thread_s*) listner);
#else
				sched_event_send(listner, event);
#endif
				ticket ++;
			}
		}
	}

	tm_end = cpu_time_stamp();

	printk(INFO, "INFO: %s: cpu %d [F: %d, L: %d, B: %d, E: %d, T: %d]\n",
	       __FUNCTION__,
	       cpu_get_id(),
	       tm_first, 
	       tm_last, 
	       tm_start,
	       tm_end,
	       tm_end - tm_first);
}
static void __event_expiration(event_time_t time, struct thread_event *events)
{
	spdid_t spdid = cos_spd_id();

	struct thread_event *tmp, *next_te;

	assert(TIMER_NO_EVENTS != time);

	for (tmp = FIRST_LIST(events, next, prev) ;
	     tmp != events && tmp->event_expiration <= time ; 
	     tmp = next_te) {
		u8_t b;
		unsigned short int tid;

		assert(tmp);
		next_te = FIRST_LIST(tmp, next, prev);
		assert(next_te && next_te->prev == tmp && tmp->next == next_te);
		tmp->flags |= TE_TIMED_OUT;
		REM_LIST(tmp, next, prev);
		b = tmp->flags & TE_BLOCKED;
		tmp->flags &= ~TE_BLOCKED;
		tid = tmp->thread_id;
		if (tmp->flags & TE_PERIODIC) {
			/* thread hasn't blocked? deadline miss! */
			if (!b) {
 			        long long period_cyc;

				tmp->dl_missed++;
				
				if (!tmp->missed) { /* first miss? */
					tmp->missed = 1;
					/* save time of deadline, unless we
					 * have saved the time of an earlier
					 * deadline miss */
					assert(!tmp->completion);
					rdtscll(tmp->completion);
					tmp->miss_samples++;
					tmp->samples++;
				} else {
					period_cyc = tmp->period*cyc_per_tick;
					assert(period_cyc > cyc_per_tick);
					tmp->lateness_tot +=period_cyc;
					tmp->miss_lateness_tot += period_cyc;
					rdtscll(tmp->completion);
				}
			} else {
				if (!tmp->missed) { /* on time, compute lateness */
					long long t;

					assert(tmp->completion);
					rdtscll(t);
					tmp->lateness_tot += -(t - tmp->completion);
					tmp->samples++;
					tmp->completion = 0;
				}
				tmp->missed = 0;
			}

			tmp->dl++;
			/* Next periodic deadline! */
			tmp->event_expiration += tmp->period;
			insert_pevent(tmp);
		}

		if (b) sched_wakeup(spdid, tmp->thread_id);
		/* We don't have to deallocate the thread_events as
		 * they are stack allocated on the sleeping
		 * threads. */
	}
}
Example #30
0
int lock_component_release(spdid_t spd, unsigned long lock_id)
{
	struct meta_lock *ml;
	struct blocked_thds *sent, *bt;
	spdid_t spdid = cos_spd_id();

	ACT_RECORD(ACT_UNLOCK, spd, lock_id, cos_get_thd_id(), 0);
	TAKE(spdid);

	generation++;
	ml = lock_find(lock_id, spd);
	if (!ml) goto error;

	/* Apparently, lock_take calls haven't been made. */
	if (EMPTY_LIST(&ml->b_thds, next, prev)) {
		RELEASE(spdid);
		return 0;
	}
	sent = bt = FIRST_LIST(&ml->b_thds, next, prev);
	/* Remove all threads from the lock's list */
	REM_LIST(&ml->b_thds, next, prev);
	/* Unblock all waiting threads */
	while (1) {
		struct blocked_thds *next;
		u16_t tid;

		/* This is suboptimal: if we wake a thread with a
		 * higher priority, it will be switched to.  Given we
		 * are holding the component lock here, we should get
		 * switched _back_ to so as to wake the rest of the
		 * components. */
		next = FIRST_LIST(bt, next, prev);
		REM_LIST(bt, next, prev);

		ACT_RECORD(ACT_WAKE, spd, lock_id, cos_get_thd_id(), bt->thd_id);

		/* cache locally */
		tid = bt->thd_id;
		/* Last node in the list? */
		if (bt == next) {
			/* This is sneaky, so to reiterate: Keep this
			 * lock till now so that if we wake another
			 * thread, and it begins execution, the system
			 * will switch back to this thread so that we
			 * can wake up the rest of the waiting threads
			 * (one of which might have the highest
			 * priority).  We release before we wake the
			 * last as we don't really need the lock
			 * anymore, an it will avoid quite a few
			 * invocations.*/
			RELEASE(spdid);
		}

		/* Wakeup the way we were put to sleep */
		assert(tid != cos_get_thd_id());
		/* printc("CPU %ld: %d waking up %d for lock %d\n", cos_cpuid(), cos_get_thd_id(), tid, lock_id); */
		sched_wakeup(spdid, tid);

		if (bt == next) break;
		bt = next;
	}

	return 0;
error:
	RELEASE(spdid);
	return -1;
}