Пример #1
0
void
fdwait(int fd, int rw)
{
	if(!startedfdtask){
		startedfdtask = 1;
        epfd = epoll_create(1);
        assert(epfd >= 0);
		taskcreate(fdtask, 0, 32768 * 10);
	}

	taskstate("fdwait for %s", rw=='r' ? "read" : rw=='w' ? "write" : "error");
    struct epoll_event ev = {0};
    ev.data.ptr = taskrunning;
	switch(rw){
	case 'r':
		ev.events |= EPOLLIN | EPOLLPRI;
		break;
	case 'w':
		ev.events |= EPOLLOUT;
		break;
	}

    int r = epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev);
    int duped = 0;
    if (r < 0 || errno == EEXIST) {
        duped = 1;
        fd = dup(fd);
        int r = epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev);
        assert(r == 0);
    }
	taskswitch();
    epoll_ctl(epfd, EPOLL_CTL_DEL, fd, &ev);
    if (duped)
        close(fd);
}
Пример #2
0
void reload_task(void *data)
{
    RELOAD_TASK = taskself();
    struct ServerTask *srv = data;

    while(1) {
        taskswitch();
        task_clear_signal();

        if(RELOAD) {
            log_info("Reload requested, will load %s from %s", bdata(srv->db_file), bdata(srv->server_id));
            Server *old_srv = Server_queue_latest();
            Server *new_srv = reload_server(old_srv, bdata(srv->db_file), bdata(srv->server_id));
            check(new_srv, "Failed to load the new configuration, exiting.");

            // for this to work handlers need to die more gracefully
            Server_queue_push(new_srv);
        } else {
            log_info("Shutdown requested, goodbye.");
            break;
        }
    }

    taskexit(0);
error:
    taskexit(1);
}
Пример #3
0
int _wait(void *socket, int fd, int rw)
{
    startfdtask();
    check(socket != NULL || fd >= 0, "Attempt to wait on a dead socket/fd: %p or %d", socket, fd);

    int max = 0;
    int hot_add = SuperPoll_active_hot(POLL) < SuperPoll_max_hot(POLL);
    int was_registered = 0;

    if(socket != NULL) {
        taskstate(rw == 'r' ? "read handler" : "write handler");
    } else {
        was_registered = Register_fd_exists(fd) != NULL;
        taskstate(rw == 'r' ? "read fd" : "write fd");
    }

    max = SuperPoll_add(POLL, (void *)taskrunning, socket, fd, rw, hot_add);
    check(max != -1, "Error adding fd: %d or socket: %p to task wait list.", fd, socket);

    taskswitch();

    if(task_was_signaled()) {
        debug("GOT SIGNAL %d AFTER WAIT", taskrunning->signal);
        SuperPoll_del(POLL, socket, fd, hot_add);
        return -1;
    } if(was_registered && Register_fd_exists(fd) == NULL) {
        debug("Socket %d was closed after a wait.", fd);
        return -1;
    } else {
        return 0;
    }

error:
    return -1;
}
Пример #4
0
// 退出当前执行的协程
void
taskexit(int val)
{
	taskexitval = val;
	taskrunning->exiting = 1;
	taskswitch();
}
Пример #5
0
//プロセスを待ち状態とする
int sleepTask(Task *task){
	if(nowTask == task){
	}else{
		//実行可能状態のプロセスを待ち状態に移行する場合はリストから削除(そんな場合はないと思うが)
		if(!taskListDelete(taskReadyList, task)){
			/*
				TODO エラー処理
			*/
			cli();
			while(1);
		}
	}
	taskQueueInsert(taskWaitList, task);
	
	
	//現在のタスクを待ち状態にする場合は、プロセスを切り替える
	if(nowTask == task){
		//既存のプロセス切り替え関数は使えない(今のプロセスが実行待ち状態のキューに入るため、実行待ちじゃなくて待ち状態に入れたい)
		Task *nextTask;
		while(!taskQueueDelete(taskReadyList, &nextTask));
		//nowTask = nextTask;
		taskswitch(nextTask);
	}
	return 0;
}
Пример #6
0
/*kills a process with the specified pid, leaves all files
  open*/
void ps_user_kill(int pid)
{
    if (ps_findprocess(pid)!=-1)
        {
              sigterm = pid;
              taskswitch();  
        };
};
Пример #7
0
int _wait(void *socket, int fd, int rw)
{
    startfdtask();

    int max = 0;
    int hot_add = SuperPoll_active_hot(POLL) < SuperPoll_max_hot(POLL);
    
    taskstate(rw == 'r' ? "read wait" : "write wait");

    max = SuperPoll_add(POLL, (void *)taskrunning, socket, fd, rw, hot_add);
    check(max != -1, "Error adding fd %d to task wait list.", fd);

    taskswitch();
    return 0;

error:
    taskswitch();
    return -1;
}
Пример #8
0
void
mmuswitch(Proc* proc)
{
	//ulong *pdb;

	if(proc->newtlb){
		mmuptefree(proc);
		proc->newtlb = 0;
	}

	if(proc->mmupdb){
		//XXX doesn't work for some reason, but it's not needed for uniprocessor
		//pdb = (ulong*)proc->mmupdb->va;
		//xenupdate(&pdb[PDX(MACHADDR)], m->pdb[PDX(MACHADDR)]);
		taskswitch(proc->mmupdb, (ulong)(proc->kstack+KSTACK));
	}
	else
		taskswitch(0, (ulong)(proc->kstack+KSTACK));
}
Пример #9
0
int
taskyield(void)
{
	int n;
	
	n = tasknswitch;
	taskready(taskrunning);
	taskstate("yield");
	taskswitch();
	return tasknswitch - n - 1;
}
Пример #10
0
int
taskyield(void)
{
	int n;
	
	n = tasknswitch;//用来计算自愿放弃协程后,到恢复所发生的切换次数
	taskready(taskrunning);//挂到taskrunqueue后面
	taskstate("yield");
	taskswitch();
	return tasknswitch - n - 1;
}
Пример #11
0
int
chanalt(Alt *a)
{
	int i, j, ncan, n, canblock;
	Channel *c;
	Task *t;

	needstack(512);
	for(i=0; a[i].op != CHANEND && a[i].op != CHANNOBLK; i++)
		;
	n = i;
	canblock = a[i].op == CHANEND;

	t = taskrunning;
    //所有alt指向正在运行的task
	for(i=0; i<n; i++){
		a[i].task = t;
		a[i].xalt = a;
	}
	ncan = 0;
	for(i=0; i<n; i++){
		if(altcanexec(&a[i])){
			ncan++;
		}
	}
	if(ncan){
		j = rand()%ncan;
		for(i=0; i<n; i++){
			if(altcanexec(&a[i])){
				if(j-- == 0){
                    altexec(&a[i]);
	                return i;
	            }
			}
		}
	}

	if(!canblock)
		return -1;

	for(i=0; i<n; i++){
		if(a[i].op != CHANNOP)
			altqueue(&a[i]);
	}

	taskswitch();

	/*
	 * the guy who ran the op took care of dequeueing us
	 * and then set a[0].alt to the one that was executed.
	 */
	return a[0].xalt - a;
}
Пример #12
0
void keyboard_wait()
{
  if (fg_current)  
  while ( getprocessid() != fg_getkeyboardowner())
    {
     //we may probably allow child threads to share the keyoard with its parent
     if ( (current_process->status & PS_ATTB_THREAD) 
           && current_process->accesslevel!=ACCESS_SYS
           && current_process->owner == fg_getkeyboardowner())
     break;
     taskswitch();
    };
 };
Пример #13
0
/*
 * sleep and wakeup
 */
void
tasksleep(Rendez *r)
{
	addtask(&r->waiting, taskrunning);
	if (r->l) {
		qunlock(r->l);
	}
	taskstate("sleep");
	taskswitch();
	if (r->l) {
		qlock(r->l);
	}
}
Пример #14
0
static int
_wlock(RWLock *l, int block)
{
	if(l->writer == nil && l->readers == 0){
		l->writer = taskrunning;
		return 1;
	}
	if(!block)
		return 0;
	addtask(&l->wwaiting, taskrunning);
	taskstate("wlock");
	taskswitch();
	return 1;
}
Пример #15
0
static int
_rlock(RWLock *l, int block)
{
	if(l->writer == nil && l->wwaiting.head == nil){
		l->readers++;
		return 1;
	}
	if(!block)
		return 0;
	addtask(&l->rwaiting, taskrunning);
	taskstate("rlock");
	taskswitch();
	return 1;
}
Пример #16
0
void
mmurelease(Proc* proc)
{
	Page *page, *next;

	/*
	 * Release any pages allocated for a page directory base or page-tables
	 * for this process:
	 *   switch to the prototype pdb for this processor (m->pdb);
	 *   call mmuptefree() to place all pages used for page-tables (proc->mmuused)
	 *   onto the process' free list (proc->mmufree). This has the side-effect of
	 *   cleaning any user entries in the pdb (proc->mmupdb);
	 *   if there's a pdb put it in the cache of pre-initialised pdb's
	 *   for this processor (m->pdbpool) or on the process' free list;
	 *   finally, place any pages freed back into the free pool (palloc).
	 * This routine is only called from sched() with palloc locked.
	 */
	taskswitch(0, (ulong)m + BY2PG);
	mmuptefree(proc);

	if((page = proc->mmupdb) != 0){
		proc->mmupdb = 0;
		while(page){
			next = page->next;
			/* its not a page table anymore, mark it rw */
			xenptunpin(page->va);
			if(paemode || m->pdbcnt > 10){
				page->next = proc->mmufree;
				proc->mmufree = page;
			}
			else{
				page->next = m->pdbpool;
				m->pdbpool = page;
				m->pdbcnt++;
			}
			page = next;
		}
	}

	for(page = proc->mmufree; page; page = next){
		next = page->next;
		if(--page->ref)
			panic("mmurelease: page->ref %ld\n", page->ref);
		pagechainhead(page);
	}
	if(proc->mmufree && palloc.r.p)
		wakeup(&palloc.r);
	proc->mmufree = 0;
}
Пример #17
0
// 协程让出 cpu
int
taskyield(void)
{
	int n;
	
	n = tasknswitch;
    // 添加到就绪队列的尾部, 相当于让排在它后面的协程优先调度
	taskready(taskrunning);
    // 状态标识为 yield
	taskstate("yield");
    // 切换上下文
	taskswitch();
    // tasknswitch表示协程调度次数,出让到再次调用,两次相减就是出让的调度次数
	return tasknswitch - n - 1;
}
Пример #18
0
int taskSwitch(){
	if(getTaskCount(taskReadyList) <= 0){
		return -1;
	}else{
		Task *nextTask;
		taskQueueDelete(taskReadyList, &nextTask);
		if(nowTask != NULL){
			taskQueueInsert(taskReadyList, nowTask);
		}
		
		taskswitch(nextTask);
		return 0;
	}
	
}
Пример #19
0
void
fdwait(int fd, int rw)
{
	int bits;

#ifdef maysam_dbg
	fprintf(stderr,"fdwait on fd=%d 1 startedfdtask=%d\n",fd, startedfdtask);
#endif
	if(!startedfdtask){
		startedfdtask = 1;
		taskcreate(fdtask, 0, 32768);
	}
#ifdef maysam_dbg
	fprintf(stderr,"fdwait on fd=%d 2\n",fd);
#endif

	if(npollfd >= MAXFD){
		fprint(2, "too many poll file descriptors\n");
		abort();
	}
	
#ifdef maysam_dbg
	fprintf(stderr,"fdwait on fd=%d 3\n",fd);
#endif
	taskstate("fdwait for %s", rw=='r' ? "read" : rw=='w' ? "write" : "error");
	bits = 0;
	switch(rw){
	case 'r':
		bits |= POLLIN;
		break;
	case 'w':
		bits |= POLLOUT;
		break;
	}

#ifdef maysam_dbg
	fprintf(stderr,"fdwait on fd=%d 4\n",fd);
#endif
	polltask[npollfd] = taskrunning;
	pollfd[npollfd].fd = fd;
	pollfd[npollfd].events = bits;
	pollfd[npollfd].revents = 0;
	npollfd++;
#ifdef maysam_dbg
	fprintf(stderr,"fdwait on fd=%d before taskswitch\n");
#endif
	taskswitch();
}
Пример #20
0
//called when a process wishes to terminate itself
DWORD exit(DWORD val)
{
    DWORD flags;

    //close all files the process has opened
    closeallfiles(current_process->processid);
    
    /* tell the task switcher to kill this process by setting
       the sigterm global varaible to the current pid. If sigeterm is non-zero
       the taskswitcher terminates the process with pid equal to sigterm*/
    sigterm=current_process->processid;
    
    taskswitch();
    while (1);
    return 0;
    
};
Пример #21
0
/*
 * locking
 */
static int
_qlock(QLock *l, int block)
{
	if(l->owner == nil){
		l->owner = taskrunning;
		return 1;
	}
	if(!block)
		return 0;
	addtask(&l->waiting, taskrunning);
	taskstate("qlock");
	taskswitch();
	if(l->owner != taskrunning){
		fprint(2, "qlock: owner=%p self=%p oops\n", l->owner, taskrunning);
		abort();
	}
	return 1;
}
Пример #22
0
void
mmurelease(Proc *proc)
{
	MMU *p;

	mmuzap();
	if((p = proc->kmaptail) != nil){
		if((p->next = proc->mmuhead) == nil)
			proc->mmutail = p;
		proc->mmuhead = proc->kmaphead;
		proc->mmucount += proc->kmapcount;

		proc->kmaphead = proc->kmaptail = nil;
		proc->kmapcount = proc->kmapindex = 0;
	}
	mmufree(proc);
	taskswitch((uintptr)m+MACHSIZE);
}
Пример #23
0
void
mmuswitch(Proc *proc)
{
	MMU *p;

	mmuzap();
	if(proc->newtlb){
		mmufree(proc);
		proc->newtlb = 0;
	}
	if((p = proc->kmaphead) != nil)
		m->pml4[PTLX(KMAP, 3)] = PADDR(p->page) | PTEWRITE|PTEVALID;
	for(p = proc->mmuhead; p != nil && p->level == PML4E; p = p->next){
		m->mmumap[p->index/MAPBITS] |= 1ull<<(p->index%MAPBITS);
		m->pml4[p->index] = PADDR(p->page) | PTEUSER|PTEWRITE|PTEVALID;
	}
	taskswitch((uintptr)proc->kstack+KSTACK);
}
Пример #24
0
uint taskdelay(uint ms)
{
    uvlong when = 0L;
    uvlong now = 0L;
    Task *t = NULL;
   
    startfdtask();

    now = nsec();
    when = now + (uvlong)ms * 1000000;
    for(t=sleeping.head; t != NULL && t->alarmtime < when; t=t->next)
        ;

    if(t) {
        taskrunning->prev = t->prev;
        taskrunning->next = t;
    } else {
        taskrunning->prev = sleeping.tail;
        taskrunning->next = NULL;
    }
    
    t = taskrunning;
    t->alarmtime = when;

    if(t->prev) {
        t->prev->next = t;
    } else {
        sleeping.head = t;
    }

    if(t->next) {
        t->next->prev = t;
    } else {
        sleeping.tail = t;
    }

    if(!t->system && sleepingcounted++ == 0) {
        taskcount++;
    }

    taskswitch();

    return (nsec() - now) / 1000000;
}
Пример #25
0
uint
taskdelay(uint ms)
{
	uvlong when, now;
	Task *t;
	
	if(!startedfdtask){
		startedfdtask = 1;
        epfd = epoll_create(1);
        assert(epfd >= 0);		
		taskcreate(fdtask, 0, 32768 * 10);
	}

	now = nsec();
	when = now+(uvlong)ms*1000000;
	for(t=sleeping.head; t!=nil && t->alarmtime < when; t=t->next)
		;

	if(t){
		taskrunning->prev = t->prev;
		taskrunning->next = t;
	}else{
		taskrunning->prev = sleeping.tail;
		taskrunning->next = nil;
	}
	
	t = taskrunning;
	t->alarmtime = when;
	if(t->prev)
		t->prev->next = t;
	else
		sleeping.head = t;
	if(t->next)
		t->next->prev = t;
	else
		sleeping.tail = t;

	if(!t->system && sleepingcounted++ == 0)
		taskcount++;
	taskswitch();

	return (nsec() - now)/1000000;
}
Пример #26
0
void
mmuinit(void)
{
	ulong *pte, npgs, pa;

	if(paemode){
		int i;
		xenpdpt = (uvlong*)m->pdb;
		m->pdb = xspanalloc(32, 32, 0);
		/* clear "reserved" bits in initial page directory pointers -- Xen bug? */
		for(i = 0; i < 4; i++)
			((uvlong*)m->pdb)[i] = xenpdpt[i] & ~0x1E6LL;
	}

	/* 
	 * So far only memory up to xentop is mapped, map the rest.
	 * We cant use large pages because our contiguous PA space
	 * is not necessarily contiguous in MA.
	 */
	npgs = conf.mem[0].npage;
	for(pa=conf.mem[0].base; npgs; npgs--, pa+=BY2PG) {
		pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 1);
		if(!pte)
			panic("mmuinit");
		xenupdate(pte, pa|PTEVALID|PTEWRITE);
	}

	memglobal();

#ifdef we_may_eventually_want_this
	/* make kernel text unwritable */
	for(x = KTZERO; x < (ulong)etext; x += BY2PG){
		p = mmuwalk(m->pdb, x, 2, 0);
		if(p == nil)
			panic("mmuinit");
		*p &= ~PTEWRITE;
	}
#endif

	taskswitch(0,  (ulong)m + BY2PG);
}
Пример #27
0
int
chanalt(Alt *a)
{
	int i, j, ncan, n, canblock;
	Channel *c;
	Task *t;

	needstack(512);
	for(i=0; a[i].op != CHANEND && a[i].op != CHANNOBLK; i++)
		;
	n = i;
	canblock = a[i].op == CHANEND;

	t = taskrunning;
	for(i=0; i<n; i++){
		a[i].task = t;
		a[i].xalt = a;
	}
if(dbgalt) print("alt ");
	ncan = 0;
	for(i=0; i<n; i++){
		c = a[i].c;
if(dbgalt) print(" %c:", "esrnb"[a[i].op]);
if(dbgalt) { if(c->name) print("%s", c->name); else print("%p", c); }
		if(altcanexec(&a[i])){
if(dbgalt) print("*");
			ncan++;
		}
	}
	if(ncan){
		j = rand()%ncan;
		for(i=0; i<n; i++){
			if(altcanexec(&a[i])){
				if(j-- == 0){
if(dbgalt){
c = a[i].c;
print(" => %c:", "esrnb"[a[i].op]);
if(c->name) print("%s", c->name); else print("%p", c);
print("\n");
}
					altexec(&a[i]);
					return i;
				}
			}
		}
	}
if(dbgalt)print("\n");

	if(!canblock)
		return -1;

	for(i=0; i<n; i++){
		if(a[i].op != CHANNOP)
			altqueue(&a[i]);
	}

	taskswitch();

	/*
	 * the guy who ran the op took care of dequeueing us
	 * and then set a[0].alt to the one that was executed.
	 */
	return a[0].xalt - a;
}
Пример #28
0
void
mmuinit(void)
{
	uintptr x;
	vlong v;
	int i;

	/* zap double map done by l.s */ 
	m->pml4[512] = 0;
	m->pml4[0] = 0;

	m->tss = mallocz(sizeof(Tss), 1);
	if(m->tss == nil)
		panic("mmuinit: no memory for Tss");
	m->tss->iomap = 0xDFFF;
	for(i=0; i<14; i+=2){
		x = (uintptr)m + MACHSIZE;
		m->tss->ist[i] = x;
		m->tss->ist[i+1] = x>>32;
	}

	/*
	 * We used to keep the GDT in the Mach structure, but it
	 * turns out that that slows down access to the rest of the
	 * page.  Since the Mach structure is accessed quite often,
	 * it pays off anywhere from a factor of 1.25 to 2 on real
	 * hardware to separate them (the AMDs are more sensitive
	 * than Intels in this regard).  Under VMware it pays off
	 * a factor of about 10 to 100.
	 */
	memmove(m->gdt, gdt, sizeof gdt);

	x = (uintptr)m->tss;
	m->gdt[TSSSEG+0].d0 = (x<<16)|(sizeof(Tss)-1);
	m->gdt[TSSSEG+0].d1 = (x&0xFF000000)|((x>>16)&0xFF)|SEGTSS|SEGPL(0)|SEGP;
	m->gdt[TSSSEG+1].d0 = x>>32;
	m->gdt[TSSSEG+1].d1 = 0;

	loadptr(sizeof(gdt)-1, (uintptr)m->gdt, lgdt);
	loadptr(sizeof(Segdesc)*512-1, (uintptr)IDTADDR, lidt);
	taskswitch((uintptr)m + MACHSIZE);
	ltr(TSSSEL);

	wrmsr(0xc0000100, 0ull);	/* 64 bit fsbase */
	wrmsr(0xc0000101, (uvlong)&machp[m->machno]);	/* 64 bit gsbase */
	wrmsr(0xc0000102, 0ull);	/* kernel gs base */

	/* enable syscall extension */
	rdmsr(0xc0000080, &v);
	v |= 1ull;
	wrmsr(0xc0000080, v);

	/* IA32_STAR */
	wrmsr(0xc0000081, ((uvlong)UE32SEL << 48) | ((uvlong)KESEL << 32));

	/* IA32_LSTAR */
	wrmsr(0xc0000082, (uvlong)syscallentry);

	/* SYSCALL flags mask */
	wrmsr(0xc0000084, 0x200);
}