// call scheduler to update tick related info, and check the timer is expired? If expired, then wakup proc void run_timer_list(void) { bool intr_flag; local_intr_save(intr_flag); { list_entry_t *le = list_next(&timer_list); if (le != &timer_list) { timer_t *timer = le2timer(le, timer_link); assert(timer->expires != 0); timer->expires --; while (timer->expires == 0) { le = list_next(le); struct proc_struct *proc = timer->proc; if (proc->wait_state != 0) { assert(proc->wait_state & WT_INTERRUPTED); } else { warn("process %d's wait_state == 0.\n", proc->pid); } cprintf("process pid = %d wait finished.",proc->pid); wakeup_proc(proc); del_timer(timer); if (le == &timer_list) { break; } timer = le2timer(le, timer_link); } } sched_class_proc_tick(current); } local_intr_restore(intr_flag); }
int ipc_event_send(int pid, int event, unsigned int timeout) { struct proc_struct *proc; if ((proc = find_proc(pid)) == NULL || proc->state == PROC_ZOMBIE) { return -E_INVAL; } if (proc == current || proc == idleproc || proc == initproc) { return -E_INVAL; } #ifdef UCONFIG_SWAP if(proc == kswapd) return -E_INVAL; #endif if (proc->wait_state == WT_EVENT_RECV) { wakeup_proc(proc); } current->event_box.event = event; unsigned long saved_ticks; timer_t __timer, *timer = ipc_timer_init(timeout, &saved_ticks, &__timer); uint32_t flags; if ((flags = send_event(proc, timer)) == 0) { return 0; } assert(flags == WT_INTERRUPTED); return ipc_check_timeout(timeout, saved_ticks); }
// try_free_pages - calculate pressure to estimate the number(pressure<<5) of needed page frames in ucore currently, // - then call kswapd kernel thread. bool try_free_pages(size_t n) { struct proc_struct *current = pls_read(current); if (!swap_init_ok || kswapd == NULL) { return 0; } if (current == kswapd) { panic("kswapd call try_free_pages!!.\n"); } if (n >= (1 << 7)) { return 0; } pressure += n; wait_t __wait, *wait = &__wait; bool intr_flag; local_intr_save(intr_flag); { wait_init(wait, current); current->state = PROC_SLEEPING; current->wait_state = WT_KSWAPD; wait_queue_add(&kswapd_done, wait); if (kswapd->wait_state == WT_TIMER) { wakeup_proc(kswapd); } } local_intr_restore(intr_flag); schedule(); assert(!wait_in_queue(wait) && wait->wakeup_flags == WT_KSWAPD); return 1; }
/* do_fork - parent process for a new child process * @clone_flags: used to guide how to clone the child process * @stack: the parent's user stack pointer. if stack==0, It means to fork a kernel thread. * @tf: the trapframe info, which will be copied to child process's proc->tf */ int do_fork(uint32_t clone_flags, uintptr_t stack, struct trapframe *tf) { int ret = -E_NO_FREE_PROC; struct proc_struct *proc; if (nr_process >= MAX_PROCESS) { goto fork_out; } ret = -E_NO_MEM; //LAB4:EXERCISE2 2013011303 /* * Some Useful MACROs, Functions and DEFINEs, you can use them in below implementation. * MACROs or Functions: * alloc_proc: create a proc struct and init fields (lab4:exercise1) * setup_kstack: alloc pages with size KSTACKPAGE as process kernel stack * copy_mm: process "proc" duplicate OR share process "current"'s mm according clone_flags * if clone_flags & CLONE_VM, then "share" ; else "duplicate" * copy_thread: setup the trapframe on the process's kernel stack top and * setup the kernel entry point and stack of process * hash_proc: add proc into proc hash_list * get_pid: alloc a unique pid for process * wakup_proc: set proc->state = PROC_RUNNABLE * VARIABLES: * proc_list: the process set's list * nr_process: the number of process set */ // 1. call alloc_proc to allocate a proc_struct // 2. call setup_kstack to allocate a kernel stack for child process // 3. call copy_mm to dup OR share mm according clone_flag // 4. call copy_thread to setup tf & context in proc_struct // 5. insert proc_struct into hash_list && proc_list // 6. call wakup_proc to make the new child process RUNNABLE // 7. set ret vaule using child proc's pid if ((proc = alloc_proc()) == NULL) { goto fork_out; } proc->pid = get_pid(); proc->parent = current; nr_process++; if (setup_kstack(proc)) { goto bad_fork_cleanup_proc; } if (copy_mm(clone_flags, proc)) { goto bad_fork_cleanup_kstack; } copy_thread(proc, stack, tf); hash_proc(proc); list_add(&proc_list, &(proc->list_link)); wakeup_proc(proc); ret = proc->pid; fork_out: return ret; bad_fork_cleanup_kstack: put_kstack(proc); bad_fork_cleanup_proc: kfree(proc); goto fork_out; }
void wakeup_wait(wait_queue_t *queue, wait_t *wait, uint32_t wakeup_flags, bool del) { if (del) { wait_queue_del(queue, wait); } wait->wakeup_flags = wakeup_flags; wakeup_proc(wait->proc); }
/* do_fork - parent process for a new child process * @clone_flags: used to guide how to clone the child process * @stack: the parent's user stack pointer. if stack==0, It means to fork a kernel thread. * @tf: the trapframe info, which will be copied to child process's proc->tf */ int do_fork(uint32_t clone_flags, uintptr_t stack, struct trapframe *tf) { int ret = -E_NO_FREE_PROC; struct proc_struct *proc; if (nr_process >= MAX_PROCESS) { goto fork_out; } ret = -E_NO_MEM; //LAB4:EXERCISE2 2012011346 /* * Some Useful MACROs, Functions and DEFINEs, you can use them in below implementation. * MACROs or Functions: * alloc_proc: create a proc struct and init fields (lab4:exercise1) * setup_kstack: alloc pages with size KSTACKPAGE as process kernel stack * copy_thread: setup the trapframe on the process's kernel stack top and * setup the kernel entry point and stack of process * hash_proc: add proc into proc hash_list * get_pid: alloc a unique pid for process * wakeup_proc: set proc->state = PROC_RUNNABLE * VARIABLES: * proc_list: the process set's list * nr_process: the number of process set */ // 1. call alloc_proc to allocate a proc_struct proc = alloc_proc(); proc->pid = get_pid(); cprintf("fork pid = %d\n", proc->pid); // 2. call setup_kstack to allocate a kernel stack for child process setup_kstack(proc); // 3. call copy_thread to setup tf & context in proc_struct copy_thread(proc, stack, tf); // 4. insert proc_struct into proc_list list_add_before(&proc_list, &proc->list_link); // 5. call wakeup_proc to make the new child process RUNNABLE wakeup_proc(proc); // 7. set ret vaule using child proc's pid nr_process++; ret = proc->pid; // 8. set parent proc->parent = current; fork_out: return ret; bad_fork_cleanup_kstack: put_kstack(proc); bad_fork_cleanup_proc: kfree(proc); goto fork_out; }
// __do_kill - kill a process with PCB by set this process's flags with PF_EXITING static int __do_kill(struct proc_struct *proc, int error_code) { if (!(proc->flags & PF_EXITING)) { proc->flags |= PF_EXITING; proc->exit_code = error_code; if (proc->wait_state & WT_INTERRUPTED) { wakeup_proc(proc); } return 0; } return -E_KILLED; }
void wakeup_wait(wait_queue_t * queue, wait_t * wait, uint32_t wakeup_flags, bool del) { if (del) { wait_queue_del(queue, wait); } spinlock_acquire(&wait->lock); wait->wakeup_flags = wakeup_flags; spinlock_release(&wait->lock); wakeup_proc(wait->proc); }
int __ucore_wakeup_by_pid(int pid) { //kprintf("ucore_wakeup_by_pid %d\n", pid); struct proc_struct *proc = find_proc(pid); if (!proc) return -E_INVAL; bool flag; local_intr_save(flag); if (proc->state == PROC_ZOMBIE) { local_intr_restore(flag); return -E_INVAL; } if (proc->state == PROC_RUNNABLE) wakeup_proc(proc); local_intr_restore(flag); return 0; }
// do_fork - parent process for a new child process // 1. call alloc_proc to allocate a proc_struct // 2. call setup_kstack to allocate a kernel stack for child process // 3. call copy_mm to dup OR share mm according clone_flag // 4. call wakup_proc to make the new child process RUNNABLE int do_fork(uint32_t clone_flags, uintptr_t stack, struct trapframe *tf) { int ret = -E_NO_FREE_PROC; struct proc_struct *proc; if (nr_process >= MAX_PROCESS) { goto fork_out; } ret = -E_NO_MEM; if ((proc = alloc_proc()) == NULL) { goto fork_out; } proc->parent = current; if (setup_kstack(proc) != 0) { goto bad_fork_cleanup_proc; } if (copy_mm(clone_flags, proc) != 0) { goto bad_fork_cleanup_kstack; } copy_thread(proc, stack, tf); unsigned long intr_flag; local_irq_save(intr_flag); { proc->pid = get_pid(); hash_proc(proc); list_add(&proc_list, &(proc->list_link)); nr_process ++; } local_irq_restore(intr_flag); wakeup_proc(proc); ret = proc->pid; fork_out: return ret; bad_fork_cleanup_kstack: put_kstack(proc); bad_fork_cleanup_proc: kfree(proc); goto fork_out; }
void run_timer_list(void) { bool intr_flag; local_intr_save(intr_flag); { list_entry_t *le = list_next(&timer_list); if (le != &timer_list) { timer_t *timer = le2timer(le, timer_link); assert(timer->expires != 0); timer->expires --; while (timer->expires == 0) { le = list_next(le); if(__ucore_is_linux_timer(timer)){ struct __ucore_linux_timer *lt = &(timer->linux_timer); if(lt->function) (lt->function)(lt->data); del_timer(timer); kfree(timer); continue; } struct proc_struct *proc = timer->proc; if (proc->wait_state != 0) { assert(proc->wait_state & WT_INTERRUPTED); } else { warn("process %d's wait_state == 0.\n", proc->pid); } wakeup_proc(proc); del_timer(timer); if (le == &timer_list) { break; } timer = le2timer(le, timer_link); } } sched_class_proc_tick(current); } local_intr_restore(intr_flag); }
// do_exit - called by sys_exit // 1. set process' state as PROC_ZOMBIE, then call wakeup_proc(parent) to ask parent reclaim itself. // 2. call scheduler to switch to other process int do_exit(int error_code) { if (current == idleproc) { panic("idleproc exit.\n"); } cprintf(" do_exit: proc pid %d will exit\n", current->pid); cprintf(" do_exit: proc parent %x\n", current->parent); current->state = PROC_ZOMBIE; bool intr_flag; struct proc_struct *proc; local_intr_save(intr_flag); { proc = current->parent; if (proc->wait_state == WT_CHILD) { wakeup_proc(proc); } } local_intr_restore(intr_flag); schedule(); panic("do_exit will not return!! %d.\n", current->pid); }
// __do_exit - cause a thread exit (use do_exit, do_exit_thread instead) // 1. call exit_mmap & put_pgdir & mm_destroy to free the almost all memory space of process // 2. set process' state as PROC_ZOMBIE, then call wakeup_proc(parent) to ask parent reclaim itself. // 3. call scheduler to switch to other process static int __do_exit(void) { if (current == idleproc) { panic("idleproc exit.\n"); } if (current == initproc) { panic("initproc exit.\n"); } struct mm_struct *mm = current->mm; if (mm != NULL) { mm->lapic = -1; mp_set_mm_pagetable(NULL); if (mm_count_dec(mm) == 0) { exit_mmap(mm); put_pgdir(mm); bool intr_flag; local_intr_save(intr_flag); { list_del(&(mm->proc_mm_link)); } local_intr_restore(intr_flag); mm_destroy(mm); } current->mm = NULL; } put_sighand(current); put_signal(current); put_fs(current); put_sem_queue(current); current->state = PROC_ZOMBIE; bool intr_flag; struct proc_struct *proc, *parent; local_intr_save(intr_flag); { proc = parent = current->parent; do { if (proc->wait_state == WT_CHILD) { wakeup_proc(proc); } proc = next_thread(proc); } while (proc != parent); if ((parent = next_thread(current)) == current) { parent = initproc; } de_thread(current); while (current->cptr != NULL) { proc = current->cptr; current->cptr = proc->optr; proc->yptr = NULL; if ((proc->optr = parent->cptr) != NULL) { parent->cptr->yptr = proc; } proc->parent = parent; parent->cptr = proc; if (proc->state == PROC_ZOMBIE) { if (parent->wait_state == WT_CHILD) { wakeup_proc(parent); } } } } wakeup_queue(&(current->event_box.wait_queue), WT_INTERRUPTED, 1); local_intr_restore(intr_flag); schedule(); panic("__do_exit will not return!! %d %d.\n", current->pid, current->exit_code); }
// do_fork - parent process for a new child process // 1. call alloc_proc to allocate a proc_struct // 2. call setup_kstack to allocate a kernel stack for child process // 3. call copy_mm to dup OR share mm according clone_flag // 4. call wakup_proc to make the new child process RUNNABLE int do_fork(uint32_t clone_flags, uintptr_t stack, struct trapframe *tf) { int ret = -E_NO_FREE_PROC; struct proc_struct *proc; if (nr_process >= MAX_PROCESS) { goto fork_out; } ret = -E_NO_MEM; if ((proc = alloc_proc()) == NULL) { goto fork_out; } proc->parent = current; list_init(&(proc->thread_group)); assert(current->wait_state == 0); assert(current->time_slice >= 0); proc->time_slice = current->time_slice / 2; current->time_slice -= proc->time_slice; if (setup_kstack(proc) != 0) { goto bad_fork_cleanup_proc; } if (copy_sem(clone_flags, proc) != 0) { goto bad_fork_cleanup_kstack; } if (copy_fs(clone_flags, proc) != 0) { goto bad_fork_cleanup_sem; } if ( copy_signal(clone_flags, proc) != 0 ) { goto bad_fork_cleanup_fs; } if ( copy_sighand(clone_flags, proc) != 0 ) { goto bad_fork_cleanup_signal; } if (copy_mm(clone_flags, proc) != 0) { goto bad_fork_cleanup_sighand; } if (copy_thread(clone_flags, proc, stack, tf) != 0) { goto bad_fork_cleanup_sighand; } bool intr_flag; local_intr_save(intr_flag); { proc->pid = get_pid(); proc->tid = proc->pid; hash_proc(proc); set_links(proc); if (clone_flags & CLONE_THREAD) { list_add_before(&(current->thread_group), &(proc->thread_group)); proc->gid = current->gid; }else{ proc->gid = proc->pid; } } local_intr_restore(intr_flag); wakeup_proc(proc); ret = proc->pid; fork_out: return ret; bad_fork_cleanup_sighand: put_sighand(proc); bad_fork_cleanup_signal: put_signal(proc); bad_fork_cleanup_fs: put_fs(proc); bad_fork_cleanup_sem: put_sem_queue(proc); bad_fork_cleanup_kstack: put_kstack(proc); bad_fork_cleanup_proc: kfree(proc); goto fork_out; }
// init_main - the second kernel thread used to create kswapd_main & user_main kernel threads static int init_main(void *arg) { int pid; #ifndef CONFIG_NO_SWAP if ((pid = kernel_thread(kswapd_main, NULL, 0)) <= 0) { panic("kswapd init failed.\n"); } kswapd = find_proc(pid); set_proc_name(kswapd, "kswapd"); #else #warning swapping disabled #endif int ret; char root[] = "disk0:"; if ((ret = vfs_set_bootfs(root)) != 0) { panic("set boot fs failed: %e.\n", ret); } size_t nr_used_pages_store = nr_used_pages(); size_t slab_allocated_store = slab_allocated(); unsigned int nr_process_store = nr_process; pid = kernel_thread(user_main, NULL, 0); if (pid <= 0) { panic("create user_main failed.\n"); } while (do_wait(0, NULL) == 0) { if (nr_process_store == nr_process) { break; } schedule(); } #ifndef CONFIG_NO_SWAP assert(kswapd != NULL); int i; for (i = 0; i < 10; i ++) { if (kswapd->wait_state == WT_TIMER) { wakeup_proc(kswapd); } schedule(); } #endif mbox_cleanup(); fs_cleanup(); kprintf("all user-mode processes have quit, no /bin/sh?.\n"); #ifndef CONFIG_NO_SWAP assert(initproc->cptr == kswapd && initproc->yptr == NULL && initproc->optr == NULL); assert(kswapd->cptr == NULL && kswapd->yptr == NULL && kswapd->optr == NULL); assert(nr_process == 2 + pls_read(lcpu_count)); #else assert(nr_process == 1 + pls_read(lcpu_count)); #endif assert(nr_used_pages_store == nr_used_pages()); assert(slab_allocated_store == slab_allocated()); kprintf("init check memory pass.\n"); return 0; }