/*===========================================================================* * sef_cb_signal_manager * *===========================================================================*/ static int sef_cb_signal_manager(endpoint_t target, int signo) { /* Process system signal on behalf of the kernel. */ int target_p; struct rproc *rp; message m; /* Lookup slot. */ if(rs_isokendpt(target, &target_p) != OK || rproc_ptr[target_p] == NULL) { if(rs_verbose) printf("RS: ignoring spurious signal %d for process %d\n", signo, target); return OK; /* clear the signal */ } rp = rproc_ptr[target_p]; /* Don't bother if a termination signal has already been processed. */ if((rp->r_flags & RS_TERMINATED) && !(rp->r_flags & RS_EXITING)) { return EDEADEPT; /* process is gone */ } /* Ignore external signals for inactive service instances. */ if( !(rp->r_flags & RS_ACTIVE) && !(rp->r_flags & RS_EXITING)) { if(rs_verbose) printf("RS: ignoring signal %d for inactive %s\n", signo, srv_to_string(rp)); return OK; /* clear the signal */ } if(rs_verbose) printf("RS: %s got %s signal %d\n", srv_to_string(rp), SIGS_IS_TERMINATION(signo) ? "termination" : "non-termination",signo); /* Print stacktrace if necessary. */ if(SIGS_IS_STACKTRACE(signo)) { sys_diagctl_stacktrace(target); } /* In case of termination signal handle the event. */ if(SIGS_IS_TERMINATION(signo)) { rp->r_flags |= RS_TERMINATED; terminate_service(rp); rs_idle_period(); return EDEADEPT; /* process is now gone */ } /* Never deliver signals to VM. */ if (rp->r_pub->endpoint == VM_PROC_NR) { return OK; } /* Translate every non-termination signal into a message. */ m.m_type = SIGS_SIGNAL_RECEIVED; m.m_pm_lsys_sigs_signal.num = signo; rs_asynsend(rp, &m, 1); return OK; /* signal has been delivered */ }
/*===========================================================================* * do_munmap * *===========================================================================*/ int do_munmap(message *m) { int r, n; struct vmproc *vmp; vir_bytes addr, len; endpoint_t target = SELF; if(m->m_type == VM_UNMAP_PHYS) { target = m->m_lsys_vm_unmap_phys.ep; } else if(m->m_type == VM_SHM_UNMAP) { target = m->m_lc_vm_shm_unmap.forwhom; } if(target == SELF) target = m->m_source; if((r=vm_isokendpt(target, &n)) != OK) { panic("do_mmap: message from strange source: %d", m->m_source); } vmp = &vmproc[n]; if(m->m_type == VM_UNMAP_PHYS) { addr = (vir_bytes) m->m_lsys_vm_unmap_phys.vaddr; } else if(m->m_type == VM_SHM_UNMAP) { addr = (vir_bytes) m->m_lc_vm_shm_unmap.addr; } else addr = (vir_bytes) m->VMUM_ADDR; if(addr % VM_PAGE_SIZE) return EFAULT; if(m->m_type == VM_UNMAP_PHYS || m->m_type == VM_SHM_UNMAP) { struct vir_region *vr; if(!(vr = map_lookup(vmp, addr, NULL))) { printf("VM: unmap: address 0x%lx not found in %d\n", addr, target); sys_diagctl_stacktrace(target); return EFAULT; } len = vr->length; } else len = roundup(m->VMUM_LEN, VM_PAGE_SIZE); return map_unmap_range(vmp, addr, len); }
/*===========================================================================* * do_getsysinfo * *===========================================================================*/ int do_getsysinfo() { vir_bytes src_addr, dst_addr; size_t len; /* This call leaks important information. In the future, requests from * non-system processes should be denied. */ if (mp->mp_effuid != 0) { printf("PM: unauthorized call of do_getsysinfo by proc %d '%s'\n", mp->mp_endpoint, mp->mp_name); sys_diagctl_stacktrace(mp->mp_endpoint); return EPERM; } switch(m_in.m_lsys_getsysinfo.what) { case SI_PROC_TAB: /* copy entire process table */ src_addr = (vir_bytes) mproc; len = sizeof(struct mproc) * NR_PROCS; break; #if ENABLE_SYSCALL_STATS case SI_CALL_STATS: src_addr = (vir_bytes) calls_stats; len = sizeof(calls_stats); break; #endif default: return(EINVAL); } if (len != m_in.m_lsys_getsysinfo.size) return(EINVAL); dst_addr = m_in.m_lsys_getsysinfo.where; return sys_datacopy(SELF, src_addr, who_e, dst_addr, len); }
static void mmap_file_cont(struct vmproc *vmp, message *replymsg, void *cbarg, void *origmsg_v) { message *origmsg = (message *) origmsg_v; message mmap_reply; int result; int writable = 0; vir_bytes v = (vir_bytes) MAP_FAILED; if(origmsg->m_mmap.prot & PROT_WRITE) writable = 1; if(replymsg->VMV_RESULT != OK) { #if 0 /* Noisy diagnostic for mmap() by ld.so */ printf("VM: VFS reply failed (%d)\n", replymsg->VMV_RESULT); sys_diagctl_stacktrace(vmp->vm_endpoint); #endif result = origmsg->VMV_RESULT; } else { /* Finish mmap */ result = mmap_file(vmp, replymsg->VMV_FD, origmsg->m_mmap.offset, origmsg->m_mmap.flags, replymsg->VMV_INO, replymsg->VMV_DEV, (u64_t) replymsg->VMV_SIZE_PAGES*PAGE_SIZE, (vir_bytes) origmsg->m_mmap.addr, origmsg->m_mmap.len, &v, 0, writable, 1); } /* Unblock requesting process. */ memset(&mmap_reply, 0, sizeof(mmap_reply)); mmap_reply.m_type = result; mmap_reply.m_mmap.retaddr = (void *) v; if(ipc_send(vmp->vm_endpoint, &mmap_reply) != OK) panic("VM: mmap_file_cont: ipc_send() failed"); }
static int mappedfile_pagefault(struct vmproc *vmp, struct vir_region *region, struct phys_region *ph, int write, vfs_callback_t cb, void *state, int statelen, int *io) { u32_t allocflags; int procfd = region->param.file.fdref->fd; allocflags = vrallocflags(region->flags); assert(ph->ph->refcount > 0); assert(region->param.file.inited); assert(region->param.file.fdref); assert(region->param.file.fdref->dev != NO_DEV); /* Totally new block? Create it. */ if(ph->ph->phys == MAP_NONE) { struct cached_page *cp; u64_t referenced_offset = region->param.file.offset + ph->offset; if(region->param.file.fdref->ino == VMC_NO_INODE) { cp = find_cached_page_bydev(region->param.file.fdref->dev, referenced_offset, VMC_NO_INODE, 0, 1); } else { cp = find_cached_page_byino(region->param.file.fdref->dev, region->param.file.fdref->ino, referenced_offset, 1); } if(cp) { int result = OK; pb_unreferenced(region, ph, 0); pb_link(ph, cp->page, ph->offset, region); if(roundup(ph->offset+region->param.file.clearend, VM_PAGE_SIZE) >= region->length) { result = cow_block(vmp, region, ph, region->param.file.clearend); } else if(result == OK && write) { result = cow_block(vmp, region, ph, 0); } return result; } if(!cb) { #if 0 printf("VM: mem_file: no callback, returning EFAULT\n"); #endif sys_diagctl_stacktrace(vmp->vm_endpoint); return EFAULT; } if(vfs_request(VMVFSREQ_FDIO, procfd, vmp, referenced_offset, VM_PAGE_SIZE, cb, NULL, state, statelen) != OK) { printf("VM: mappedfile_pagefault: vfs_request failed\n"); return ENOMEM; } *io = 1; return SUSPEND; } if(!write) { #if 0 printf("mappedfile_pagefault: nonwrite fault?\n"); #endif return OK; } return cow_block(vmp, region, ph, 0); }
static int mappedfile_pagefault(struct vmproc *vmp, struct vir_region *region, struct phys_region *ph, int write, vfs_callback_t cb, void *state, int statelen, int *io) { u32_t allocflags; int procfd = region->param.file.fdref->fd; allocflags = vrallocflags(region->flags); assert(ph->ph->refcount > 0); assert(region->param.file.inited); assert(region->param.file.fdref); assert(region->param.file.fdref->dev != NO_DEV); /* Totally new block? Create it. */ if(ph->ph->phys == MAP_NONE) { struct cached_page *cp; u64_t referenced_offset = region->param.file.offset + ph->offset; if(region->param.file.fdref->ino == VMC_NO_INODE) { cp = find_cached_page_bydev(region->param.file.fdref->dev, referenced_offset, VMC_NO_INODE, 0, 1); } else { cp = find_cached_page_byino(region->param.file.fdref->dev, region->param.file.fdref->ino, referenced_offset, 1); } /* * Normally, a cache hit saves a round-trip to the file system * to load the page. However, if the page in the VM cache is * marked for one-time use, then force a round-trip through the * file system anyway, so that the FS can update the page by * by readding it to the cache. Thus, for one-time use pages, * no caching is performed. This approach is correct even in * the light of concurrent requests and disappearing processes * but relies on VM requests to VFS being fully serialized. */ if(cp && (!cb || !(cp->flags & VMSF_ONCE))) { int result = OK; pb_unreferenced(region, ph, 0); pb_link(ph, cp->page, ph->offset, region); if(roundup(ph->offset+region->param.file.clearend, VM_PAGE_SIZE) >= region->length) { result = cow_block(vmp, region, ph, region->param.file.clearend); } else if(result == OK && write) { result = cow_block(vmp, region, ph, 0); } /* Discard one-use pages after mapping them in. */ if (result == OK && (cp->flags & VMSF_ONCE)) rmcache(cp); return result; } if(!cb) { #if 0 printf("VM: mem_file: no callback, returning EFAULT\n"); sys_diagctl_stacktrace(vmp->vm_endpoint); #endif return EFAULT; } if(vfs_request(VMVFSREQ_FDIO, procfd, vmp, referenced_offset, VM_PAGE_SIZE, cb, NULL, state, statelen) != OK) { printf("VM: mappedfile_pagefault: vfs_request failed\n"); return ENOMEM; } *io = 1; return SUSPEND; } if(!write) { #if 0 printf("mappedfile_pagefault: nonwrite fault?\n"); #endif return OK; } return cow_block(vmp, region, ph, 0); }