/* Spawn a kernel thread. * This is not quite cool, but we have to do some initialization in * kernel's address space, the approach in linux0.11 is not quite * ease here for the fact that trap occured in the kernel space do * not refering the esp in TSS. * * returns a pointer to the newly borned proc, one page size(with the kernel stack). * */ struct proc* kspawn(void (*func)()){ uint nr; int fd, n; struct file *fp; struct proc *p; nr = find_pid(); if (nr == 0) { panic("no free pid"); } p = (struct proc *) kmalloc(PAGE); if (p==NULL) { panic("no free page"); } proc[nr] = p; p->p_stat = SSLEEP; // set SRUN later. p->p_pid = nr; p->p_ppid = cu->p_pid; p->p_pgrp = cu->p_pgrp; p->p_flag = cu->p_flag; p->p_cpu = cu->p_cpu; p->p_nice = cu->p_nice; p->p_pri = PUSER; // p->p_euid = cu->p_euid; p->p_egid = cu->p_egid; p->p_ruid = cu->p_ruid; p->p_rgid = cu->p_rgid; // increase the reference count of inodes, and dup files if (cu->p_wdir != NULL) { p->p_wdir = cu->p_wdir; p->p_wdir->i_count++; p->p_iroot = cu->p_iroot; p->p_iroot->i_count++; } // dup the files, and fdflag for (fd=0; fd<NOFILE; fd++){ fp = cu->p_ofile[fd]; if (fp != NULL) { fp->f_count++; fp->f_ino->i_count++; } p->p_ofile[fd] = fp; p->p_fdflag[fd] = cu->p_fdflag[fd]; } // signals p->p_sig = cu->p_sig; p->p_sigmask = cu->p_sigmask; for (n=0; n<NSIG; n++) { p->p_sigact[n] = cu->p_sigact[n]; } // clone kernel's address space. vm_clone(&p->p_vm); p->p_contxt = cu->p_contxt; p->p_contxt.eip = (uint)func; p->p_contxt.esp = (uint)p+PAGE; p->p_stat = SRUN; return p; }
int do_fork(unsigned flags) { assert(current_task && kernel_task); assert(running_processes < (unsigned)MAX_TASKS || MAX_TASKS == -1); addr_t eip; task_t *task = task_create(); page_dir_t *newspace; if(flags & FORK_SHAREDIR) newspace = vm_copy(current_task->pd); else newspace = vm_clone(current_task->pd, 0); if(!newspace) { kfree((void *)task); return -ENOMEM; } /* set the address space's entry for the current task. * this is a fast and easy way to store the "what task am I" data * that gets automatically updated when the scheduler switches * into a new address space */ arch_specific_set_current_task(newspace, (addr_t)task); /* Create the new task structure */ task->pd = newspace; copy_task_struct(task, current_task, flags & FORK_SHAREDAT); add_atomic(&running_processes, 1); /* Set the state as usleep temporarily, so that it doesn't accidentally run. * And then add it to the queue */ task->state = TASK_USLEEP; tqueue_insert(primary_queue, (void *)task, task->listnode); cpu_t *cpu = (cpu_t *)current_task->cpu; #if CONFIG_SMP cpu = fork_choose_cpu(current_task); #endif /* Copy the stack */ set_int(0); engage_new_stack(task, current_task); /* Here we read the EIP of this exact location. The parent then sets the * eip of the child to this. On the reschedule for the child, it will * start here as well. */ volatile task_t *parent = current_task; store_context_fork(task); eip = read_eip(); if(current_task == parent) { /* These last things allow full execution of the task */ task->eip=eip; task->state = TASK_RUNNING; task->cpu = cpu; add_atomic(&cpu->numtasks, 1); tqueue_insert(cpu->active_queue, (void *)task, task->activenode); __engage_idle(); return task->pid; } return 0; }