/* * New threads actually come through here on the way to the function * they're supposed to start in. This is so when that function exits, * thread_exit() can be called automatically. */ void mi_threadstart(void *data1, unsigned long data2, void (*func)(void *, unsigned long)) { /* If we have an address space, activate it */ if (curthread->t_vmspace) { as_activate(curthread->t_vmspace); } /* Enable interrupts */ spl0(); assert(curthread->pid >= 1); //kprintf("curthread pid is %d\n",curthread->pid); #if OPT_SYNCHPROBS /* Yield a random number of times to get a good mix of threads */ { int i, n; n = random()%161 + random()%161; for (i=0; i<n; i++) { thread_yield(); } } #endif #if OPT_A2 int result = conSetup(curthread); // stdin , stdout, stderr if(result) panic("Out of Memory\n"); #endif /* Call the function */ func(data1, data2); /* Done. */ thread_exit(); }
void md_forkentry(void * data1, unsigned long unused) { //(void)unused; struct fork_info * info = data1; struct addrspace * new_as; struct trapframe new_tf; /* copy address space */ if (as_copy(info->parent->t_vmspace, &new_as)) { info->child_pid = ENOMEM; // Means no memory for process V(info->sem); // child is done, parent should resume. thread_exit(); } curthread->t_vmspace = new_as; as_activate(new_as); /* copy trap frame */ memcpy(&new_tf, info->tf, sizeof(struct trapframe)); /* change tf's registers to return values for syscall */ new_tf.tf_v0 = 0; new_tf.tf_a3 = 0; new_tf.tf_epc += 4; /* create process and get the pid */ struct thread *new_process = kmalloc(sizeof(struct thread)); new_process->parent_pid = curthread->t_pid; proc_table++; new_process->t_pid = create_tpid(); proc_table++; V(info->sem); mips_usermode(&new_tf); /* mips_usermode does not return */ }
int sys_fork(struct trapframe *tf) { struct thread* newthread; struct trapframe* newtf; struct addrspace* newas; int result; newtf = (struct trapframe*) kmalloc(sizeof(struct trapframe)); if (newtf == NULL){ return ENOMEM; } *newtf = *tf; int err; err= as_copy(curthread->t_vmspace,&newas); if (err){ return ENOMEM; } as_activate(curthread->t_vmspace); result = thread_fork(curthread->t_name, newtf,(unsigned long)newas, (void (*)(void *, unsigned long)) md_forkentry,&newthread); if (result) return -ENOMEM; return newthread->t_pid; }
void forked_child_thread_entry(void * ptr, unsigned long val) { (void)val; as_activate(); KASSERT(ptr != NULL); struct trapframe * tf = ptr; enter_forked_process(tf); }
/* This is the entry point of the forked child process */ int md_forkentry(void* tf, unsigned long vmspace) { // int spl = splhigh(); //kprintf(" process %d is in enter md_forkentry\n",curthread->pID); struct trapframe child_tf; struct trapframe *tf_parent = (struct trapframe*) tf; assert(tf_parent != NULL); // memcpy(child_tf, tf, sizeof(struct trapframe)); tf_parent->tf_epc += 4; //we set the return value to be 0. tf_parent->tf_v0 = 0; tf_parent->tf_a3 = 0; // Load Address Space of Child and Activate it. curthread->t_vmspace = (struct addrspace*)vmspace; assert(curthread->t_vmspace != NULL); as_activate(curthread->t_vmspace); child_tf = *tf_parent; mips_usermode(&child_tf); // we should never reach here panic("switching to user mode returned\n"); return 0; }
void md_forkentry(struct trapframe *tf,unsigned long addrspace_copy) { /* * This function is provided as a reminder. You need to write * both it and the code that calls it. * * Thus, you can trash it and do things another way if you prefer. */ //(void)tf; //kprintf("\n md_forkentry"); tf->tf_v0=0; tf->tf_a3=0; curthread->t_vmspace=(struct addrspace*)addrspace_copy; if(curthread->t_vmspace!=NULL){ as_activate(curthread->t_vmspace); } //copy modified trap frame from kernel heap to stack struct trapframe tf_temp=*tf;//=kmalloc(sizeof(struct trapframe)); tf_temp.tf_v0=tf->tf_v0; tf_temp.tf_a0=tf->tf_a0; tf_temp.tf_a1=tf->tf_a1; tf_temp.tf_a2=tf->tf_a2; tf_temp.tf_a3=tf->tf_a3; tf->tf_epc+=4; mips_usermode(&tf_temp); }
/* * This function is where new threads start running. The arguments * ENTRYPOINT, DATA1, and DATA2 are passed through from thread_fork. * * Because new code comes here from inside the middle of * thread_switch, the beginning part of this function must match the * tail of thread_switch. */ void thread_startup(void (*entrypoint)(void *data1, unsigned long data2), void *data1, unsigned long data2) { struct thread *cur; cur = curthread; /* Clear the wait channel and set the thread state. */ cur->t_wchan_name = NULL; cur->t_state = S_RUN; /* Release the runqueue lock acquired in thread_switch. */ spinlock_release(&curcpu->c_runqueue_lock); /* Activate our address space in the MMU. */ as_activate(); /* Clean up dead threads. */ exorcise(); /* Enable interrupts. */ spl0(); /* Call the function. */ entrypoint(data1, data2); /* Done. */ thread_exit(); }
void enter_process(void *tf,unsigned long addr) { struct trapframe *childframe,child_tf; struct addrspace *childspace; if(tf!=NULL) { tf = (struct trapframe *) tf; childframe = tf; //copy the trapframe info now into the child_tf // child_tf = *childframe; memcpy(&child_tf,tf,sizeof(struct trapframe)); child_tf.tf_a3=0; child_tf.tf_v0=0; child_tf.tf_epc +=4; pid_t parentid = (pid_t)addr; if(process_array[curthread->t_pid]->parent_id!=parentid) process_array[curthread->t_pid]->parent_id=parentid; if(!(curthread->t_addrspace==NULL)) { childspace = curthread->t_addrspace; as_activate(childspace); } mips_usermode(&child_tf); } }
void md_forkentry(struct trapframe *tf, unsigned int vmspace) { struct trapframe child_kstack; /* copy trapframe of parent to chile procss*/ memcpy(&child_kstack, tf, sizeof(struct trapframe)); /* release trapframe when allocated at function process*/ kfree(tf); /* activate addrspace*/ as_activate((struct addrspace *)vmspace); /* increment pc*/ child_kstack.tf_epc += 4; child_kstack.tf_v0 = 0; child_kstack.tf_a3 = 0; /* release lock so that other process can execute process_fork*/ // lock_release(forklock); /* execute chile process*/ mips_usermode(&child_kstack); }
void md_forkentry(struct trapframe *tf, unsigned long child_addr) { /* this function is basically for the child thread, remember the threadfor in sys_fork, it calls the md_forkentry and passed in new TF and addressspace and the child_thread. */ //kprintf("just enter forkentry\n"); struct trapframe* modifiy_tf; struct trapframe dumbvalue; struct addrspace* new_addr = (struct addrspace*) child_addr; modifiy_tf= kmalloc(sizeof(struct trapframe)); //kprintf("just enter memcpy modified tf and tf\n"); memcpy(modifiy_tf, tf, sizeof(struct trapframe)); //kprintf("just after memcpy modified tf and tf\n"); //*modifiy_tf = *tf; modifiy_tf->tf_v0 = 0; modifiy_tf->tf_a3 = 0; modifiy_tf->tf_epc +=4; curthread->t_vmspace = new_addr; //activie the addressspace //kprintf("activate thread -- pid: %d\n", curthread->t_pid); as_activate(curthread->t_vmspace); //memcpy(dumbvalue, tf, sizeof(struct trapframe)); //kprintf("just enter dumbvalue =*tf\n"); dumbvalue = *modifiy_tf; //kprintf("just leave dumbvalue =*tf\n"); mips_usermode(&dumbvalue); }
/* * New threads actually come through here on the way to the function * they're supposed to start in. This is so when that function exits, * thread_exit() can be called automatically. */ void mi_threadstart(void *data1, unsigned long data2, void (*func)(void *, unsigned long)) { /* If we have an address space, activate it */ if (curthread->t_vmspace) { as_activate(curthread->t_vmspace); } /* Enable interrupts */ spl0(); #if OPT_SYNCHPROBS /* Yield a random number of times to get a good mix of threads */ { int i, n; n = random()%161 + random()%161; for (i=0; i<n; i++) { thread_yield(); } } #endif /* Call the function */ func(data1, data2); /* Done. */ thread_exit(0); }
int runprogram(char *progname) { struct addrspace *as; struct vnode *v; vaddr_t entrypoint, stackptr; int result; /* Open the file. */ result = vfs_open(progname, O_RDONLY, 0, &v); if (result) { return result; } /* We should be a new process. */ KASSERT(curproc_getas() == NULL); /* Create a new address space. */ #if OPT_A3 as = as_create(progname); #else as = as_create(); #endif if (as ==NULL) { vfs_close(v); return ENOMEM; } /* Switch to it and activate it. */ curproc_setas(as); as_activate(); /* Load the executable. */ result = load_elf(v, &entrypoint); if (result) { /* p_addrspace will go away when curproc is destroyed */ vfs_close(v); return result; } /* Done with the file now. */ vfs_close(v); /* Define the user stack in the address space */ result = as_define_stack(as, &stackptr); if (result) { /* p_addrspace will go away when curproc is destroyed */ return result; } /* Warp to user mode. */ enter_new_process(0 /*argc*/, NULL /*userspace addr of argv*/, stackptr, entrypoint); /* enter_new_process does not return. */ panic("enter_new_process returned\n"); return EINVAL; }
/* * Load program "progname" and start running it in usermode. * Does not return except on error. * * Calls vfs_open on progname and thus may destroy it. */ int runprogram(char *progname) { struct vnode *v; vaddr_t entrypoint, stackptr; int result; /* Open the file. */ result = vfs_open(progname, O_RDONLY, 0, &v); if (result) { return result; } /* We should be a new thread. */ KASSERT(curthread->t_addrspace == NULL); /* Create a new address space. */ curthread->t_addrspace = as_create(); if (curthread->t_addrspace==NULL) { vfs_close(v); return ENOMEM; } curthread->t_filetable = filetable_create(); if(curthread->t_filetable == NULL){ return ENOMEM; } /* Activate it. */ as_activate(curthread->t_addrspace); /* Load the executable. */ result = load_elf(v, &entrypoint); if (result) { /* thread_exit destroys curthread->t_addrspace */ vfs_close(v); return result; } /* Done with the file now. */ vfs_close(v); /* Define the user stack in the address space */ result = as_define_stack(curthread->t_addrspace, &stackptr); if (result) { /* thread_exit destroys curthread->t_addrspace */ return result; } /* Warp to user mode. */ enter_new_process(0 /*argc*/, NULL /*userspace addr of argv*/, stackptr, entrypoint); /* enter_new_process does not return. */ panic("enter_new_process returned\n"); return EINVAL; }
/* * Load program "progname" and start running it in usermode. * Does not return except on error. * * Calls vfs_open on progname and thus may destroy it. */ int runprogram(char *progname) { struct vnode *v; vaddr_t entrypoint, stackptr; int result; /* Open the file. */ result = vfs_open(progname, O_RDONLY, &v); if (result) { return result; } /* We should be a new thread. */ assert(curthread->t_vmspace == NULL); /* Create a new address space. */ curthread->t_vmspace = as_create(); if (curthread->t_vmspace==NULL) { vfs_close(v); return ENOMEM; } //kprintf("\n in runprogram"); assignPid(); /* Activate it. */ as_activate(curthread->t_vmspace); /* Load the executable. */ result = load_elf(v, &entrypoint); if (result) { /* thread_exit destroys curthread->t_vmspace */ vfs_close(v); return result; } /* Done with the file now. */ vfs_close(v); /* Define the user stack in the address space */ result = as_define_stack(curthread->t_vmspace, &stackptr); if (result) { /* thread_exit destroys curthread->t_vmspace */ return result; } /* Warp to user mode. */ md_usermode(0 /*argc*/, NULL /*userspace addr of argv*/, stackptr, entrypoint); /* md_usermode does not return */ panic("md_usermode returned\n"); return EINVAL; }
/* * Enter user mode for a newly forked process. * * This function is provided as a reminder. You need to write * both it and the code that calls it. * * Thus, you can trash it and do things another way if you prefer. */ void enter_forked_process(struct trapframe *tf) { //kprintf("enter_forked_process \n"); #if OPT_A2 struct trapframe thisTF = *tf; //create tf on stack thisTF.tf_v0 =0; //child returns with 0 thisTF.tf_a3 =0; //error coding thisTF.tf_epc +=4; //counter as_activate(); mips_usermode(&thisTF); panic("Should not return from mips usermode\n"); #else (void)tf; #endif }
void child_process_entry(void *data1, unsigned long data2) { struct trapframe *tf = (struct trapframe *)data1 ; curthread->t_addrspace = (struct addrspace *)data2 ; as_activate(curthread->t_addrspace) ; struct trapframe usertf ; usertf = *tf; //memcpy(&usertf,tf ,sizeof(struct trapframe )); usertf.tf_v0 = 0 ; usertf.tf_a3 = 0 ; usertf.tf_epc += 4 ; kfree(tf) ; mips_usermode(&usertf) ; }
void child_fork_entry(void *data1, unsigned long data2) { struct trapframe *ctf = (struct trapframe *)data1; struct addrspace *caddr = (struct addrspace *)data2; ctf->tf_a3= 0; ctf->tf_v0= 0; ctf->tf_epc = ctf->tf_epc+4; curthread->t_addrspace = caddr; as_activate(curthread->t_addrspace); struct trapframe tf; memcpy(&tf,ctf,sizeof(struct trapframe)); mips_usermode(&tf); //KASSERT(SAME_STACK(cpustacks[curcpu->c_number]-1, (vaddr_t)tf_copy)); }
/* * Enter user mode for a newly forked process. * * This function is provided as a reminder. You need to write * both it and the code that calls it. * * Thus, you can trash it and do things another way if you prefer. */ void enter_forked_process(struct trapframe *tf) { #if OPT_A2 struct trapframe forkTf = *tf; forkTf.tf_a3 = 0; //signal no error forkTf.tf_v0 = 0; forkTf.tf_epc += 4; //advance the PC, to avoid the syscall again as_activate(); mips_usermode(&forkTf); panic("can't come here"); #else (void)tf; #endif //OPT_A2 }
/* * Enter user mode for a newly forked process. * * This function is provided as a reminder. You need to write * both it and the code that calls it. * * Thus, you can trash it and do things another way if you prefer. */ void enter_forked_process(void* ptr, unsigned long args) { //panic("test"); //while(1) {} struct trapframe childtf; bzero(&childtf, sizeof(childtf)); childtf = *(struct trapframe *) ptr; childtf.tf_v0 = 0; childtf.tf_a3 = 0; childtf.tf_epc += 4; kfree(ptr); // load and activate the address space struct addrspace* as = (struct addrspace *) args; proc_setas(as); as_activate(); mips_usermode(&childtf); }
void enter_forked_process(void *data1, unsigned long data2){ struct trapframe *childtf = ((void **)data1)[0]; struct addrspace *childas = ((void **)data1)[1]; // using local variable to put tf on stack struct trapframe local = *childtf; // switch to childas curproc_setas(childas); as_activate(); // set register local.tf_epc += 4; local.tf_v0 = 0; local.tf_a3 = 0; (void)data2; mips_usermode(&local); }
void child_forkentry(void *data1, unsigned long data2){ struct trapframe st_trapframe; struct trapframe* childtf = (struct trapframe*) data1; struct addrspace* childaddr = (struct addrspace*) data2; childtf->tf_v0 = 0; childtf->tf_a3 = 0; childtf->tf_epc += 4; memcpy(&st_trapframe, childtf, sizeof(struct trapframe)); kfree(childtf); childtf = NULL; curproc->p_addrspace = childaddr; as_activate(); mips_usermode(&st_trapframe); };
void md_forkentry(struct fork_data *fd, unsigned long un) { (void) un; struct trapframe local_tf; local_tf = *(fd->tf); curthread->t_vmspace = fd->addr; as_activate(curthread->t_vmspace); kfree(fd->tf); kfree(fd); local_tf.tf_epc += 4; local_tf.tf_a3 = 0; local_tf.tf_v0 = 0; mips_usermode(&local_tf); }
/* * This function is where new threads start running. The arguments * ENTRYPOINT, DATA1, and DATA2 are passed through from thread_fork. * * Because new code comes here from inside the middle of * thread_switch, the beginning part of this function must match the * tail of thread_switch. */ void thread_startup(void (*entrypoint)(void *data1, unsigned long data2), void *data1, unsigned long data2) { struct thread *cur; cur = curthread; /* Clear the wait channel and set the thread state. */ cur->t_wchan_name = NULL; cur->t_state = S_RUN; /* Release the runqueue lock acquired in thread_switch. */ spinlock_release(&curcpu->c_runqueue_lock); /* Activate our address space in the MMU. */ as_activate(); /* Clean up dead threads. */ exorcise(); /* Enable interrupts. */ spl0(); #if OPT_SYNCHPROBS /* Yield a random number of times to get a good mix of threads. */ { int i, n; n = random()%161 + random()%161; for (i=0; i<n; i++) { thread_yield(); } } #endif /* Call the function. */ entrypoint(data1, data2); /* Done. */ thread_exit(); }
/* * Enter user mode for a newly forked process. * * This function is provided as a reminder. You need to write * both it and the code that calls it. * * Thus, you can trash it and do things another way if you prefer. */ void enter_forked_process(void *tf, unsigned long arg) { #if OPT_A2 (void) arg; as_activate(); //activate the address space struct trapframe stacktf; stacktf = *(struct trapframe *) tf; //copy the trapframe stacktf.tf_v0 = 0; stacktf.tf_a3 = 0; stacktf.tf_epc = stacktf.tf_epc + 4; kfree(tf); mips_usermode(&stacktf); return; #else (void)tf; (void)arg; #endif }
void md_forkentry(struct trapframe *tf,unsigned long as_data) { struct trapframe tf_local; struct trapframe *newtf; struct addrspace *newas = (struct addrspace*) as_data; tf_local = *tf; newtf = &tf_local; newtf->tf_v0 = 0; newtf->tf_a3 = 0; newtf->tf_epc += 4; kfree(tf); curthread->t_vmspace = newas; as_activate(curthread->t_vmspace); mips_usermode(&tf_local); }
void md_forkentry(struct trapframe *tf) { /* * This function is used to copy the trap frame of the parent process into * the the child's and return the child process to user mode */ //int spl; // disable interrupts //spl = splhigh(); //new trap frame for the child struct trapframe newtf; // copy the trap frame memcpy (&newtf, tf, sizeof(struct trapframe)); //newtf = *tf; newtf.tf_v0 = 0; // return value of the child process should be 0 newtf.tf_epc += 4; // advance the PC to avoid redo of the syscall newtf.tf_a3 = 0; // indicate that there are no errors curthread->t_vmspace = (struct addrspace*)tf->tf_a0; //memcpy (curthread->t_vmspace, (struct addrspace*) tf->tf_a0, sizeof(struct addrspace*)); //kprintf ("BEOFRE"); //struct addrspace *temp = (struct addrspace*) tf->tf_a0; //int test = as_copy(curthread->t_vmspace, &temp); //kprintf ("as_copy: %d\n", test); //kprintf ("TESTING HERE"); as_activate(curthread->t_vmspace); kfree (tf); V(curthread->t_sem_fork); //kprintf ("md_forkentry childpid: %d\n", curthread->t_pid); // re-enable interrupts //splx(spl); // Warp to user mode mips_usermode(&newtf); }
static void md_forkentry(struct trapframe *tf, unsigned long vmspace) { // disable interrupt struct addrspace *t_vmspace = (struct addrspace*)vmspace; splhigh(); if (t_vmspace == NULL) { thread_exit(); } assert(curthread->t_vmspace == NULL); curthread->t_vmspace = t_vmspace; // copy the trapframe to our own stack and free it struct trapframe mytf; memmove(&mytf, tf, sizeof(mytf)); kfree(tf); // set the return value to 0 mytf.tf_v0 = 0; mytf.tf_a3 = 0; // next instruction mytf.tf_epc += 4; /* Activate it. */ // as_hold(curthread->t_vmspace); as_activate(curthread->t_vmspace); // kprintf("sw to user mode.\n"); // switch to usermode mips_usermode(&mytf); // should not come here assert(0); }
void entrypoint(void* data1, unsigned long data2) { struct trapframe *tf, tfnew; struct addrspace * addr; tf = (struct trapframe *) data1; addr = (struct addrspace *) data2; tf->tf_a3 = 0; tf->tf_v0 = 0; tf->tf_epc += 4; //kprintf(" LMAO "); curproc->p_addrspace = addr; //kprintf(" rekt "); //memcpy(tfnew, tf, sizeof(struct trapframe)); as_activate(); //kprintf(" nips "); tfnew = *tf; //kprintf(" nips "); mips_usermode(&tfnew); //kprintf(" SWAG "); }
/* * Load program "progname" and start running it in usermode. * Does not return except on error. * * Calls vfs_open on progname and thus may destroy it. */ int runprogram(char *progname, char **args, unsigned long nargs) { struct vnode *v; vaddr_t entrypoint, stackptr; int result; /*Initialize Console*/ char *consolein; char *consoleout; char *consoleerr; consolein = kstrdup("con:"); consoleout = kstrdup("con:"); consoleerr = kstrdup("con:"); curthread->t_fdtable[0] = (struct fdesc*) kmalloc(sizeof(struct fdesc)); if (vfs_open(consolein, O_RDONLY, 0664, &(curthread->t_fdtable[0]->vn))) { return EINVAL; } curthread->t_fdtable[0]->name = consolein; curthread->t_fdtable[0]->flag = O_RDONLY; curthread->t_fdtable[0]->ref_count = 1; curthread->t_fdtable[0]->filelock = lock_create("STDIN"); kfree(consolein); curthread->t_fdtable[1] = (struct fdesc*) kmalloc(sizeof(struct fdesc)); if (vfs_open(consoleout, O_WRONLY, 0664, &(curthread->t_fdtable[1]->vn))) { return EINVAL; } curthread->t_fdtable[1]->name = consoleout; curthread->t_fdtable[1]->flag = O_WRONLY; curthread->t_fdtable[1]->ref_count = 1; curthread->t_fdtable[1]->filelock = lock_create("STDOUT"); kfree(consoleout); curthread->t_fdtable[2] = (struct fdesc*) kmalloc(sizeof(struct fdesc)); if (vfs_open(consoleerr, O_WRONLY, 0664, &(curthread->t_fdtable[2]->vn))) { return EINVAL; } curthread->t_fdtable[2]->name = consoleerr; curthread->t_fdtable[2]->flag = O_WRONLY; curthread->t_fdtable[2]->ref_count = 1; curthread->t_fdtable[2]->filelock = lock_create("STDERR"); kfree(consoleerr); /*Console Initialized*/ /* Open the file. */ result = vfs_open(progname, O_RDONLY, 0, &v); if (result) { return result; } /* We should be a new thread. */ KASSERT(curthread->t_addrspace == NULL); /* Create a new address space. */ curthread->t_addrspace = as_create(); if (curthread->t_addrspace == NULL ) { vfs_close(v); return ENOMEM; } /* Activate it. */ as_activate(curthread->t_addrspace); /* Load the executable. */ result = load_elf(v, &entrypoint); if (result) { /* thread_exit destroys curthread->t_addrspace */ vfs_close(v); return result; } /* Done with the file now. */ vfs_close(v); /* Define the user stack in the address space */ result = as_define_stack(curthread->t_addrspace, &stackptr); if (result) { /* thread_exit destroys curthread->t_addrspace */ return result; } int argc = (signed) nargs; int err; char** argv; //kmalloc argv with size of char* times nargs+1 argv = kmalloc((nargs + 1) * sizeof(userptr_t)); size_t len; for (int i = 0; i < argc; i++) { len = strlen(args[i]) + 1; len = (len + 4 - 1) & ~(size_t) (4 - 1); stackptr -= len; if ((err = copyout(args[i], (userptr_t) stackptr, len)) != 0) { kprintf("error copyout"); } argv[i] = (char*) stackptr; } argv[nargs] = NULL; stackptr -= (nargs + 1) * sizeof(userptr_t); if ((err = copyout(argv, (userptr_t) stackptr, (nargs + 1) * sizeof(char*)))) { kprintf("error copying"); } /* Warp to user mode. */ enter_new_process(argc, (userptr_t) stackptr /*userspace addr of argv*/, stackptr, entrypoint); /* enter_new_process does not return. */ panic("enter_new_process returned\n"); return EINVAL; }
/* * High level, machine-independent context switch code. * * The current thread is queued appropriately and its state is changed * to NEWSTATE; another thread to run is selected and switched to. * * If NEWSTATE is S_SLEEP, the thread is queued on the wait channel * WC, protected by the spinlock LK. Otherwise WC and Lk should be * NULL. */ static void thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk) { struct thread *cur, *next; int spl; DEBUGASSERT(curcpu->c_curthread == curthread); DEBUGASSERT(curthread->t_cpu == curcpu->c_self); /* Explicitly disable interrupts on this processor */ spl = splhigh(); cur = curthread; /* * If we're idle, return without doing anything. This happens * when the timer interrupt interrupts the idle loop. */ if (curcpu->c_isidle) { splx(spl); return; } /* Check the stack guard band. */ thread_checkstack(cur); /* Lock the run queue. */ spinlock_acquire(&curcpu->c_runqueue_lock); /* Micro-optimization: if nothing to do, just return */ if (newstate == S_READY && threadlist_isempty(&curcpu->c_runqueue)) { spinlock_release(&curcpu->c_runqueue_lock); splx(spl); return; } /* Put the thread in the right place. */ switch (newstate) { case S_RUN: panic("Illegal S_RUN in thread_switch\n"); case S_READY: thread_make_runnable(cur, true /*have lock*/); break; case S_SLEEP: cur->t_wchan_name = wc->wc_name; /* * Add the thread to the list in the wait channel, and * unlock same. To avoid a race with someone else * calling wchan_wake*, we must keep the wchan's * associated spinlock locked from the point the * caller of wchan_sleep locked it until the thread is * on the list. */ threadlist_addtail(&wc->wc_threads, cur); spinlock_release(lk); break; case S_ZOMBIE: cur->t_wchan_name = "ZOMBIE"; threadlist_addtail(&curcpu->c_zombies, cur); break; } cur->t_state = newstate; /* * Get the next thread. While there isn't one, call md_idle(). * curcpu->c_isidle must be true when md_idle is * called. Unlock the runqueue while idling too, to make sure * things can be added to it. * * Note that we don't need to unlock the runqueue atomically * with idling; becoming unidle requires receiving an * interrupt (either a hardware interrupt or an interprocessor * interrupt from another cpu posting a wakeup) and idling * *is* atomic with respect to re-enabling interrupts. * * Note that c_isidle becomes true briefly even if we don't go * idle. However, because one is supposed to hold the runqueue * lock to look at it, this should not be visible or matter. */ /* The current cpu is now idle. */ curcpu->c_isidle = true; do { next = threadlist_remhead(&curcpu->c_runqueue); if (next == NULL) { spinlock_release(&curcpu->c_runqueue_lock); cpu_idle(); spinlock_acquire(&curcpu->c_runqueue_lock); } } while (next == NULL); curcpu->c_isidle = false; /* * Note that curcpu->c_curthread may be the same variable as * curthread and it may not be, depending on how curthread and * curcpu are defined by the MD code. We'll assign both and * assume the compiler will optimize one away if they're the * same. */ curcpu->c_curthread = next; curthread = next; /* do the switch (in assembler in switch.S) */ switchframe_switch(&cur->t_context, &next->t_context); /* * When we get to this point we are either running in the next * thread, or have come back to the same thread again, * depending on how you look at it. That is, * switchframe_switch returns immediately in another thread * context, which in general will be executing here with a * different stack and different values in the local * variables. (Although new threads go to thread_startup * instead.) But, later on when the processor, or some * processor, comes back to the previous thread, it's also * executing here with the *same* value in the local * variables. * * The upshot, however, is as follows: * * - The thread now currently running is "cur", not "next", * because when we return from switchrame_switch on the * same stack, we're back to the thread that * switchframe_switch call switched away from, which is * "cur". * * - "cur" is _not_ the thread that just *called* * switchframe_switch. * * - If newstate is S_ZOMB we never get back here in that * context at all. * * - If the thread just chosen to run ("next") was a new * thread, we don't get to this code again until * *another* context switch happens, because when new * threads return from switchframe_switch they teleport * to thread_startup. * * - At this point the thread whose stack we're now on may * have been migrated to another cpu since it last ran. * * The above is inherently confusing and will probably take a * while to get used to. * * However, the important part is that code placed here, after * the call to switchframe_switch, does not necessarily run on * every context switch. Thus any such code must be either * skippable on some switches or also called from * thread_startup. */ /* Clear the wait channel and set the thread state. */ cur->t_wchan_name = NULL; cur->t_state = S_RUN; /* Unlock the run queue. */ spinlock_release(&curcpu->c_runqueue_lock); /* Activate our address space in the MMU. */ as_activate(); /* Clean up dead threads. */ exorcise(); /* Turn interrupts back on. */ splx(spl); }