static int mappedfile_pagefault(struct vmproc *vmp, struct vir_region *region, struct phys_region *ph, int write, vfs_callback_t cb, void *state, int statelen, int *io) { u32_t allocflags; int procfd = region->param.file.fdref->fd; allocflags = vrallocflags(region->flags); assert(ph->ph->refcount > 0); assert(region->param.file.inited); assert(region->param.file.fdref); assert(region->param.file.fdref->dev != NO_DEV); /* Totally new block? Create it. */ if(ph->ph->phys == MAP_NONE) { struct cached_page *cp; u64_t referenced_offset = region->param.file.offset + ph->offset; if(region->param.file.fdref->ino == VMC_NO_INODE) { cp = find_cached_page_bydev(region->param.file.fdref->dev, referenced_offset, VMC_NO_INODE, 0, 1); } else { cp = find_cached_page_byino(region->param.file.fdref->dev, region->param.file.fdref->ino, referenced_offset, 1); } if(cp) { int result = OK; pb_unreferenced(region, ph, 0); pb_link(ph, cp->page, ph->offset, region); if(roundup(ph->offset+region->param.file.clearend, VM_PAGE_SIZE) >= region->length) { result = cow_block(vmp, region, ph, region->param.file.clearend); } else if(result == OK && write) { result = cow_block(vmp, region, ph, 0); } return result; } if(!cb) { #if 0 printf("VM: mem_file: no callback, returning EFAULT\n"); #endif sys_diagctl_stacktrace(vmp->vm_endpoint); return EFAULT; } if(vfs_request(VMVFSREQ_FDIO, procfd, vmp, referenced_offset, VM_PAGE_SIZE, cb, NULL, state, statelen) != OK) { printf("VM: mappedfile_pagefault: vfs_request failed\n"); return ENOMEM; } *io = 1; return SUSPEND; } if(!write) { #if 0 printf("mappedfile_pagefault: nonwrite fault?\n"); #endif return OK; } return cow_block(vmp, region, ph, 0); }
/*===========================================================================* * do_mmap * *===========================================================================*/ int do_mmap(message *m) { int r, n; struct vmproc *vmp; vir_bytes addr = (vir_bytes) m->m_mmap.addr; struct vir_region *vr = NULL; int execpriv = 0; size_t len = (vir_bytes) m->m_mmap.len; /* RS and VFS can do slightly more special mmap() things */ if(m->m_source == VFS_PROC_NR || m->m_source == RS_PROC_NR) execpriv = 1; if(m->m_mmap.flags & MAP_THIRDPARTY) { if(!execpriv) return EPERM; if((r=vm_isokendpt(m->m_mmap.forwhom, &n)) != OK) return ESRCH; } else { /* regular mmap, i.e. for caller */ if((r=vm_isokendpt(m->m_source, &n)) != OK) { panic("do_mmap: message from strange source: %d", m->m_source); } } vmp = &vmproc[n]; /* "SUSv3 specifies that mmap() should fail if length is 0" */ if(len <= 0) { return EINVAL; } if(m->m_mmap.fd == -1 || (m->m_mmap.flags & MAP_ANON)) { /* actual memory in some form */ mem_type_t *mt = NULL; if(m->m_mmap.fd != -1) { printf("VM: mmap: fd %d, len 0x%x\n", m->m_mmap.fd, len); return EINVAL; } /* Contiguous phys memory has to be preallocated. */ if((m->m_mmap.flags & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) { return EINVAL; } if(m->m_mmap.flags & MAP_CONTIG) { mt = &mem_type_anon_contig; } else mt = &mem_type_anon; if(!(vr = mmap_region(vmp, addr, m->m_mmap.flags, len, VR_WRITABLE | VR_ANON, mt, execpriv))) { return ENOMEM; } } else { /* File mapping might be disabled */ if(!enable_filemap) return ENXIO; /* For files, we only can't accept writable MAP_SHARED * mappings. */ if((m->m_mmap.flags & MAP_SHARED) && (m->m_mmap.prot & PROT_WRITE)) { return ENXIO; } if(vfs_request(VMVFSREQ_FDLOOKUP, m->m_mmap.fd, vmp, 0, 0, mmap_file_cont, NULL, m, sizeof(*m)) != OK) { printf("VM: vfs_request for mmap failed\n"); return ENXIO; } /* request queued; don't reply. */ return SUSPEND; } /* Return mapping, as seen from process. */ m->m_mmap.retaddr = (void *) vr->vaddr; return OK; }
static int mappedfile_pagefault(struct vmproc *vmp, struct vir_region *region, struct phys_region *ph, int write, vfs_callback_t cb, void *state, int statelen, int *io) { u32_t allocflags; int procfd = region->param.file.fdref->fd; allocflags = vrallocflags(region->flags); assert(ph->ph->refcount > 0); assert(region->param.file.inited); assert(region->param.file.fdref); assert(region->param.file.fdref->dev != NO_DEV); /* Totally new block? Create it. */ if(ph->ph->phys == MAP_NONE) { struct cached_page *cp; u64_t referenced_offset = region->param.file.offset + ph->offset; if(region->param.file.fdref->ino == VMC_NO_INODE) { cp = find_cached_page_bydev(region->param.file.fdref->dev, referenced_offset, VMC_NO_INODE, 0, 1); } else { cp = find_cached_page_byino(region->param.file.fdref->dev, region->param.file.fdref->ino, referenced_offset, 1); } /* * Normally, a cache hit saves a round-trip to the file system * to load the page. However, if the page in the VM cache is * marked for one-time use, then force a round-trip through the * file system anyway, so that the FS can update the page by * by readding it to the cache. Thus, for one-time use pages, * no caching is performed. This approach is correct even in * the light of concurrent requests and disappearing processes * but relies on VM requests to VFS being fully serialized. */ if(cp && (!cb || !(cp->flags & VMSF_ONCE))) { int result = OK; pb_unreferenced(region, ph, 0); pb_link(ph, cp->page, ph->offset, region); if(roundup(ph->offset+region->param.file.clearend, VM_PAGE_SIZE) >= region->length) { result = cow_block(vmp, region, ph, region->param.file.clearend); } else if(result == OK && write) { result = cow_block(vmp, region, ph, 0); } /* Discard one-use pages after mapping them in. */ if (result == OK && (cp->flags & VMSF_ONCE)) rmcache(cp); return result; } if(!cb) { #if 0 printf("VM: mem_file: no callback, returning EFAULT\n"); sys_diagctl_stacktrace(vmp->vm_endpoint); #endif return EFAULT; } if(vfs_request(VMVFSREQ_FDIO, procfd, vmp, referenced_offset, VM_PAGE_SIZE, cb, NULL, state, statelen) != OK) { printf("VM: mappedfile_pagefault: vfs_request failed\n"); return ENOMEM; } *io = 1; return SUSPEND; } if(!write) { #if 0 printf("mappedfile_pagefault: nonwrite fault?\n"); #endif return OK; } return cow_block(vmp, region, ph, 0); }