int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) { struct mm_struct *mm = current->mm; unsigned long vdso_base; int retval = 0; if (!notify_exec()) sim_notify_exec(bprm->filename); down_write(&mm->mmap_sem); vdso_base = VDSO_BASE; retval = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pages); #ifndef __tilegx__ if (!retval) { unsigned long addr = MEM_USER_INTRPT; addr = mmap_region(NULL, addr, INTRPT_SIZE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0); if (addr > (unsigned long) -PAGE_SIZE) retval = (int) addr; } #endif up_write(&mm->mmap_sem); return retval; }
/*===========================================================================* * do_mmap * *===========================================================================*/ int do_mmap(message *m) { int r, n; struct vmproc *vmp; vir_bytes addr = m->VMM_ADDR; struct vir_region *vr = NULL; int execpriv = 0; size_t len = (vir_bytes) m->VMM_LEN; /* RS and VFS can do slightly more special mmap() things */ if(m->m_source == VFS_PROC_NR || m->m_source == RS_PROC_NR) execpriv = 1; if(m->VMM_FLAGS & MAP_THIRDPARTY) { if(!execpriv) return EPERM; if((r=vm_isokendpt(m->VMM_FORWHOM, &n)) != OK) return ESRCH; } else { /* regular mmap, i.e. for caller */ if((r=vm_isokendpt(m->m_source, &n)) != OK) { panic("do_mmap: message from strange source: %d", m->m_source); } } vmp = &vmproc[n]; if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANON)) { /* actual memory in some form */ mem_type_t *mt = NULL; if(m->VMM_FD != -1 || len <= 0) { printf("VM: mmap: fd %d, len 0x%x\n", m->VMM_FD, len); return EINVAL; } /* Contiguous phys memory has to be preallocated. */ if((m->VMM_FLAGS & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) { return EINVAL; } if(m->VMM_FLAGS & MAP_CONTIG) { mt = &mem_type_anon_contig; } else mt = &mem_type_anon; if(!(vr = mmap_region(vmp, addr, m->VMM_FLAGS, len, VR_WRITABLE | VR_ANON, mt, execpriv))) { return ENOMEM; } } else { return ENXIO; } /* Return mapping, as seen from process. */ m->VMM_RETADDR = vr->vaddr; return OK; }
/* * This is really a simplified "vm_mmap". it only handles MPX * bounds tables (the bounds directory is user-allocated). * * Later on, we use the vma->vm_ops to uniquely identify these * VMAs. */ static unsigned long mpx_mmap(unsigned long len) { unsigned long ret; unsigned long addr, pgoff; struct mm_struct *mm = current->mm; vm_flags_t vm_flags; struct vm_area_struct *vma; /* Only bounds table and bounds directory can be allocated here */ if (len != MPX_BD_SIZE_BYTES && len != MPX_BT_SIZE_BYTES) return -EINVAL; down_write(&mm->mmap_sem); /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) { ret = -ENOMEM; goto out; } /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE); if (addr & ~PAGE_MASK) { ret = addr; goto out; } vm_flags = VM_READ | VM_WRITE | VM_MPX | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; /* Set pgoff according to addr for anon_vma */ pgoff = addr >> PAGE_SHIFT; ret = mmap_region(NULL, addr, len, vm_flags, pgoff); if (IS_ERR_VALUE(ret)) goto out; vma = find_vma(mm, ret); if (!vma) { ret = -ENOMEM; goto out; } vma->vm_ops = &mpx_vma_ops; if (vm_flags & VM_LOCKED) { up_write(&mm->mmap_sem); mm_populate(ret, len); return ret; } out: up_write(&mm->mmap_sem); return ret; }
static int mmap_file(struct vmproc *vmp, int vmfd, off_t file_offset, int flags, ino_t ino, dev_t dev, u64_t filesize, vir_bytes addr, vir_bytes len, vir_bytes *retaddr, u16_t clearend, int writable, int mayclosefd) { /* VFS has replied to a VMVFSREQ_FDLOOKUP request. */ struct vir_region *vr; u64_t page_offset; int result = OK; u32_t vrflags = 0; if(writable) vrflags |= VR_WRITABLE; /* Do some page alignments. */ if((page_offset = (file_offset % VM_PAGE_SIZE))) { file_offset -= page_offset; len += page_offset; } len = roundup(len, VM_PAGE_SIZE); /* All numbers should be page-aligned now. */ assert(!(len % VM_PAGE_SIZE)); assert(!(filesize % VM_PAGE_SIZE)); assert(!(file_offset % VM_PAGE_SIZE)); #if 0 /* XXX ld.so relies on longer-than-file mapping */ if((u64_t) len + file_offset > filesize) { printf("VM: truncating mmap dev 0x%x ino %d beyond file size in %d; offset %llu, len %lu, size %llu; ", dev, ino, vmp->vm_endpoint, file_offset, len, filesize); len = filesize - file_offset; return EINVAL; } #endif if(!(vr = mmap_region(vmp, addr, flags, len, vrflags, &mem_type_mappedfile, 0))) { result = ENOMEM; } else { *retaddr = vr->vaddr + page_offset; result = OK; mappedfile_setfile(vmp, vr, vmfd, file_offset, dev, ino, clearend, 1, mayclosefd); } return result; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) { struct mm_struct *mm = current->mm; unsigned long vdso_base; int retval = 0; /* * Notify the simulator that an exec just occurred. * If we can't find the filename of the mapping, just use * whatever was passed as the linux_binprm filename. */ if (!notify_exec()) sim_notify_exec(bprm->filename); down_write(&mm->mmap_sem); /* * MAYWRITE to allow gdb to COW and set breakpoints */ vdso_base = VDSO_BASE; retval = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pages); #ifndef __tilegx__ /* * Set up a user-interrupt mapping here; the user can't * create one themselves since it is above TASK_SIZE. * We make it unwritable by default, so the model for adding * interrupt vectors always involves an mprotect. */ if (!retval) { unsigned long addr = MEM_USER_INTRPT; addr = mmap_region(NULL, addr, INTRPT_SIZE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0); if (addr > (unsigned long) -PAGE_SIZE) retval = (int) addr; } #endif up_write(&mm->mmap_sem); return retval; }
/*===========================================================================* * do_mmap * *===========================================================================*/ int do_mmap(message *m) { int r, n; struct vmproc *vmp; vir_bytes addr = (vir_bytes) m->m_mmap.addr; struct vir_region *vr = NULL; int execpriv = 0; size_t len = (vir_bytes) m->m_mmap.len; /* RS and VFS can do slightly more special mmap() things */ if(m->m_source == VFS_PROC_NR || m->m_source == RS_PROC_NR) execpriv = 1; if(m->m_mmap.flags & MAP_THIRDPARTY) { if(!execpriv) return EPERM; if((r=vm_isokendpt(m->m_mmap.forwhom, &n)) != OK) return ESRCH; } else { /* regular mmap, i.e. for caller */ if((r=vm_isokendpt(m->m_source, &n)) != OK) { panic("do_mmap: message from strange source: %d", m->m_source); } } vmp = &vmproc[n]; /* "SUSv3 specifies that mmap() should fail if length is 0" */ if(len <= 0) { return EINVAL; } if(m->m_mmap.fd == -1 || (m->m_mmap.flags & MAP_ANON)) { /* actual memory in some form */ mem_type_t *mt = NULL; if(m->m_mmap.fd != -1) { printf("VM: mmap: fd %d, len 0x%x\n", m->m_mmap.fd, len); return EINVAL; } /* Contiguous phys memory has to be preallocated. */ if((m->m_mmap.flags & (MAP_CONTIG|MAP_PREALLOC)) == MAP_CONTIG) { return EINVAL; } if(m->m_mmap.flags & MAP_CONTIG) { mt = &mem_type_anon_contig; } else mt = &mem_type_anon; if(!(vr = mmap_region(vmp, addr, m->m_mmap.flags, len, VR_WRITABLE | VR_ANON, mt, execpriv))) { return ENOMEM; } } else { /* File mapping might be disabled */ if(!enable_filemap) return ENXIO; /* For files, we only can't accept writable MAP_SHARED * mappings. */ if((m->m_mmap.flags & MAP_SHARED) && (m->m_mmap.prot & PROT_WRITE)) { return ENXIO; } if(vfs_request(VMVFSREQ_FDLOOKUP, m->m_mmap.fd, vmp, 0, 0, mmap_file_cont, NULL, m, sizeof(*m)) != OK) { printf("VM: vfs_request for mmap failed\n"); return ENXIO; } /* request queued; don't reply. */ return SUSPEND; } /* Return mapping, as seen from process. */ m->m_mmap.retaddr = (void *) vr->vaddr; return OK; }