/*===========================================================================* * thread_cleanup * *===========================================================================*/ static void thread_cleanup(struct fproc *rfp) { /* Clean up worker thread. Skip parts if this thread is not associated * with a particular process (i.e., rfp is NULL) */ #if LOCK_DEBUG if (rfp != NULL) { check_filp_locks_by_me(); check_vnode_locks_by_me(rfp); check_vmnt_locks_by_me(rfp); } #endif if (rfp != NULL && rfp->fp_flags & FP_PM_PENDING) { /* Postponed PM call */ job_m_in = rfp->fp_job.j_m_in; rfp->fp_flags &= ~FP_PM_PENDING; service_pm_postponed(); } #if LOCK_DEBUG if (rfp != NULL) { check_filp_locks_by_me(); check_vnode_locks_by_me(rfp); check_vmnt_locks_by_me(rfp); } #endif if (rfp != NULL) { rfp->fp_flags &= ~FP_DROP_WORK; unlock_proc(rfp); } }
/*===========================================================================* * pm_reboot * *===========================================================================*/ PUBLIC void pm_reboot() { /* Perform the VFS side of the reboot call. */ int i; struct fproc *rfp; do_sync(); /* Do exit processing for all leftover processes and servers, * but don't actually exit them (if they were really gone, PM * will tell us about it). */ for (i = 0; i < NR_PROCS; i++) { rfp = &fproc[i]; if (rfp->fp_endpoint == NONE) continue; /* Don't just free the proc right away, but let it finish what it was * doing first */ lock_proc(rfp, 0); free_proc(rfp, 0); unlock_proc(rfp); } do_sync(); unmount_all(); }
/*===========================================================================* * pm_reboot * *===========================================================================*/ void pm_reboot() { /* Perform the VFS side of the reboot call. */ int i; struct fproc *rfp; do_sync(); /* Do exit processing for all leftover processes and servers, but don't * actually exit them (if they were really gone, PM will tell us about it). * Skip processes that handle parts of the file system; we first need to give * them the chance to unmount (which should be possible as all normal * processes have no open files anymore). */ for (i = 0; i < NR_PROCS; i++) { rfp = &fproc[i]; /* Don't just free the proc right away, but let it finish what it was * doing first */ lock_proc(rfp, 0); if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) free_proc(rfp, 0); unlock_proc(rfp); } do_sync(); unmount_all(0 /* Don't force */); /* Try to exit all processes again including File Servers */ for (i = 0; i < NR_PROCS; i++) { rfp = &fproc[i]; /* Don't just free the proc right away, but let it finish what it was * doing first */ lock_proc(rfp, 0); if (rfp->fp_endpoint != NONE) free_proc(rfp, 0); unlock_proc(rfp); } do_sync(); unmount_all(1 /* Force */); }
/*===========================================================================* * pm_reboot * *===========================================================================*/ void pm_reboot() { /* Perform the VFS side of the reboot call. This call is performed from the PM * process context. */ message m_out; int i, r; struct fproc *rfp, *pmfp; pmfp = fp; do_sync(); /* Do exit processing for all leftover processes and servers, but don't * actually exit them (if they were really gone, PM will tell us about it). * Skip processes that handle parts of the file system; we first need to give * them the chance to unmount (which should be possible as all normal * processes have no open files anymore). */ /* This is the only place where we allow special modification of "fp". The * reboot procedure should really be implemented as a PM message broadcasted * to all processes, so that each process will be shut down cleanly by a * thread operating on its behalf. Doing everything here is simpler, but it * requires an exception to the strict model of having "fp" be the process * that owns the current worker thread. */ for (i = 0; i < NR_PROCS; i++) { rfp = &fproc[i]; /* Don't just free the proc right away, but let it finish what it was * doing first */ if (rfp != fp) lock_proc(rfp); if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) { worker_set_proc(rfp); /* temporarily fake process context */ free_proc(0); worker_set_proc(pmfp); /* restore original process context */ } if (rfp != fp) unlock_proc(rfp); } do_sync(); unmount_all(0 /* Don't force */); /* Try to exit all processes again including File Servers */ for (i = 0; i < NR_PROCS; i++) { rfp = &fproc[i]; /* Don't just free the proc right away, but let it finish what it was * doing first */ if (rfp != fp) lock_proc(rfp); if (rfp->fp_endpoint != NONE) { worker_set_proc(rfp); /* temporarily fake process context */ free_proc(0); worker_set_proc(pmfp); /* restore original process context */ } if (rfp != fp) unlock_proc(rfp); } do_sync(); unmount_all(1 /* Force */); /* Reply to PM for synchronization */ memset(&m_out, 0, sizeof(m_out)); m_out.m_type = VFS_PM_REBOOT_REPLY; if ((r = ipc_send(PM_PROC_NR, &m_out)) != OK) panic("pm_reboot: ipc_send failed: %d", r); }
/*===========================================================================* * pm_exec * *===========================================================================*/ int pm_exec(endpoint_t proc_e, vir_bytes path, size_t path_len, vir_bytes frame, size_t frame_len, vir_bytes *pc, vir_bytes *newsp, int user_exec_flags) { /* Perform the execve(name, argv, envp) call. The user library builds a * complete stack image, including pointers, args, environ, etc. The stack * is copied to a buffer inside VFS, and then to the new core image. */ int r, slot; vir_bytes vsp; struct fproc *rfp; int extrabase = 0; static char mbuf[ARG_MAX]; /* buffer for stack and zeroes */ struct vfs_exec_info execi; int i; static char fullpath[PATH_MAX], elf_interpreter[PATH_MAX], firstexec[PATH_MAX], finalexec[PATH_MAX]; struct lookup resolve; struct fproc *vmfp = &fproc[VM_PROC_NR]; stackhook_t makestack = NULL; static int n; n++; struct filp *newfilp = NULL; lock_exec(); lock_proc(vmfp, 0); /* unset execi values are 0. */ memset(&execi, 0, sizeof(execi)); execi.vmfd = -1; /* passed from exec() libc code */ execi.userflags = user_exec_flags; execi.args.stack_high = kinfo.user_sp; execi.args.stack_size = DEFAULT_STACK_LIMIT; okendpt(proc_e, &slot); rfp = fp = &fproc[slot]; lookup_init(&resolve, fullpath, PATH_NOFLAGS, &execi.vmp, &execi.vp); resolve.l_vmnt_lock = VMNT_READ; resolve.l_vnode_lock = VNODE_READ; /* Fetch the stack from the user before destroying the old core image. */ if (frame_len > ARG_MAX) FAILCHECK(ENOMEM); /* stack too big */ r = sys_datacopy(proc_e, (vir_bytes) frame, SELF, (vir_bytes) mbuf, (size_t) frame_len); if (r != OK) { /* can't fetch stack (e.g. bad virtual addr) */ printf("VFS: pm_exec: sys_datacopy failed\n"); FAILCHECK(r); } /* The default is to keep the original user and group IDs */ execi.args.new_uid = rfp->fp_effuid; execi.args.new_gid = rfp->fp_effgid; /* Get the exec file name. */ FAILCHECK(fetch_name(path, path_len, fullpath)); strlcpy(finalexec, fullpath, PATH_MAX); strlcpy(firstexec, fullpath, PATH_MAX); /* Get_read_vp will return an opened vn in execi. * if necessary it releases the existing vp so we can * switch after we find out what's inside the file. * It reads the start of the file. */ Get_read_vp(execi, fullpath, 1, 1, &resolve, fp); /* If this is a script (i.e. has a #!/interpreter line), * retrieve the name of the interpreter and open that * executable instead. */ if(is_script(&execi)) { /* patch_stack will add interpreter name and * args to stack and retrieve the new binary * name into fullpath. */ FAILCHECK(fetch_name(path, path_len, fullpath)); FAILCHECK(patch_stack(execi.vp, mbuf, &frame_len, fullpath)); strlcpy(finalexec, fullpath, PATH_MAX); strlcpy(firstexec, fullpath, PATH_MAX); Get_read_vp(execi, fullpath, 1, 0, &resolve, fp); } /* If this is a dynamically linked executable, retrieve * the name of that interpreter in elf_interpreter and open that * executable instead. But open the current executable in an * fd for the current process. */ if(elf_has_interpreter(execi.args.hdr, execi.args.hdr_len, elf_interpreter, sizeof(elf_interpreter))) { /* Switch the executable vnode to the interpreter */ execi.is_dyn = 1; /* The interpreter (loader) needs an fd to the main program, * which is currently in finalexec */ if((r = execi.elf_main_fd = common_open(finalexec, O_RDONLY, 0)) < 0) { printf("VFS: exec: dynamic: open main exec failed %s (%d)\n", fullpath, r); FAILCHECK(r); } /* ld.so is linked at 0, but it can relocate itself; we * want it higher to trap NULL pointer dereferences. */ execi.args.load_offset = 0x10000; /* Remember it */ strlcpy(execi.execname, finalexec, PATH_MAX); /* The executable we need to execute first (loader) * is in elf_interpreter, and has to be in fullpath to * be looked up */ strlcpy(fullpath, elf_interpreter, PATH_MAX); strlcpy(firstexec, elf_interpreter, PATH_MAX); Get_read_vp(execi, fullpath, 0, 0, &resolve, fp); } /* We also want an FD for VM to mmap() the process in if possible. */ { struct vnode *vp = execi.vp; assert(vp); if(vp->v_vmnt->m_haspeek && major(vp->v_dev) != MEMORY_MAJOR) { int newfd = -1; if(get_fd(vmfp, 0, R_BIT, &newfd, &newfilp) == OK) { assert(newfd >= 0 && newfd < OPEN_MAX); assert(!vmfp->fp_filp[newfd]); newfilp->filp_count = 1; newfilp->filp_vno = vp; newfilp->filp_flags = O_RDONLY; FD_SET(newfd, &vmfp->fp_filp_inuse); vmfp->fp_filp[newfd] = newfilp; /* dup_vnode(vp); */ execi.vmfd = newfd; execi.args.memmap = vfs_memmap; } } } /* callback functions and data */ execi.args.copymem = read_seg; execi.args.clearproc = libexec_clearproc_vm_procctl; execi.args.clearmem = libexec_clear_sys_memset; execi.args.allocmem_prealloc_cleared = libexec_alloc_mmap_prealloc_cleared; execi.args.allocmem_prealloc_junk = libexec_alloc_mmap_prealloc_junk; execi.args.allocmem_ondemand = libexec_alloc_mmap_ondemand; execi.args.opaque = &execi; execi.args.proc_e = proc_e; execi.args.frame_len = frame_len; execi.args.filesize = execi.vp->v_size; for (i = 0; exec_loaders[i].load_object != NULL; i++) { r = (*exec_loaders[i].load_object)(&execi.args); /* Loaded successfully, so no need to try other loaders */ if (r == OK) { makestack = exec_loaders[i].setup_stack; break; } } FAILCHECK(r); /* Inform PM */ FAILCHECK(libexec_pm_newexec(proc_e, &execi.args)); /* Save off PC */ *pc = execi.args.pc; /* call a stack-setup function if this executable type wants it */ vsp = execi.args.stack_high - frame_len; if(makestack) FAILCHECK(makestack(&execi, mbuf, &frame_len, &vsp, &extrabase)); /* Patch up stack and copy it from VFS to new core image. */ libexec_patch_ptr(mbuf, vsp + extrabase); FAILCHECK(sys_datacopy(SELF, (vir_bytes) mbuf, proc_e, (vir_bytes) vsp, (phys_bytes)frame_len)); /* Return new stack pointer to caller */ *newsp = vsp; clo_exec(rfp); if (execi.args.allow_setuid) { /* If after loading the image we're still allowed to run with * setuid or setgid, change credentials now */ rfp->fp_effuid = execi.args.new_uid; rfp->fp_effgid = execi.args.new_gid; } /* Remember the new name of the process */ strlcpy(rfp->fp_name, execi.args.progname, PROC_NAME_LEN); pm_execfinal: if(newfilp) unlock_filp(newfilp); else if (execi.vp != NULL) { unlock_vnode(execi.vp); put_vnode(execi.vp); } if(execi.vmfd >= 0 && !execi.vmfd_used) { if(OK != close_fd(vmfp, execi.vmfd)) { printf("VFS: unexpected close fail of vm fd\n"); } } unlock_proc(vmfp); unlock_exec(); return(r); }