int main(int argc, char **argv) { const char *path = "/usr/share/dict/words"; if (argc > 1) path = argv[1]; std::cout << "Loading words from " << path << '\n'; test_vector vec; std::vector<int> series; std::mt19937 rnd; test_map m; load_words(path, vec); assert(vec.size() >= 1000); std::cout << "Working with " << vec.size() << " items\n"; std::sort(vec.begin(), vec.end()); rnd.seed(5); int all_sum = 0; for (unsigned int i = 0; i < vec.size(); i++) { all_sum += vec[i].second; series.push_back(all_sum); } std::cout << "Inserting...\n"; for (unsigned int i = 0; i < vec.size(); i++) { m.insert(vec[i]); range_test(rnd, m, vec, series, i + 1); assert(m.sum().sum == series[i]); } std::cout << "Sum: " << m.sum().sum << '\n'; std::cout << "Removing...\n"; for (int i = vec.size() - 1; i >= 0; i--) { assert(m.sum().sum == series[i]); m.erase(vec[i].first); range_test(rnd, m, vec, series, i); } assert(m.sum().sum == 0); return 0; }
int sys_minherit(struct lwp *l, const struct sys_minherit_args *uap, register_t *retval) { /* { syscallarg(void *) addr; syscallarg(int) len; syscallarg(int) inherit; } */ struct proc *p = l->l_proc; vaddr_t addr; vsize_t size, pageoff; vm_inherit_t inherit; int error; addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); inherit = SCARG(uap, inherit); /* * align the address to a page boundary and adjust the size accordingly. */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; size = (vsize_t)round_page(size); error = range_test(addr, size, false); if (error) return error; error = uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr + size, inherit); return error; }
int sys_munmap(struct lwp *l, const struct sys_munmap_args *uap, register_t *retval) { /* { syscallarg(void *) addr; syscallarg(size_t) len; } */ struct proc *p = l->l_proc; vaddr_t addr; vsize_t size, pageoff; struct vm_map *map; struct vm_map_entry *dead_entries; int error; /* * get syscall args. */ addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); /* * align the address to a page boundary and adjust the size accordingly. */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; size = (vsize_t)round_page(size); if (size == 0) return (0); error = range_test(addr, size, false); if (error) return error; map = &p->p_vmspace->vm_map; /* * interesting system call semantic: make sure entire range is * allocated before allowing an unmap. */ vm_map_lock(map); #if 0 if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) { vm_map_unlock(map); return (EINVAL); } #endif uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0); vm_map_unlock(map); if (dead_entries != NULL) uvm_unmap_detach(dead_entries, 0); return (0); }
int bbc_to_gmt(u_long *timbuf) { int i; u_long tmp; int year, month, day, hour, min, sec; read_bbc(); sec = bbc_to_decimal(1, 0); min = bbc_to_decimal(3, 2); /* * Hours are different for some reason. Makes no sense really. */ hour = ((bbc_registers[5] & 0x03) * 10) + bbc_registers[4]; day = bbc_to_decimal(8, 7); month = bbc_to_decimal(10, 9); year = bbc_to_decimal(12, 11) + 1900; range_test(hour, 0, 23); range_test(day, 1, 31); range_test(month, 1, 12); range_test(year, STARTOFTIME, 2038); /* 2038 is the end of time. */ tmp = 0; for (i = STARTOFTIME; i < year; i++) tmp += days_in_year(i); if (leapyear(year) && month > FEBRUARY) tmp++; for (i = 1; i < month; i++) tmp += days_in_month(i); tmp += (day - 1); tmp = ((tmp * 24 + hour) * 60 + min) * 60 + sec; *timbuf = tmp; return(1); }
int sys_mlock(struct lwp *l, const struct sys_mlock_args *uap, register_t *retval) { /* { syscallarg(const void *) addr; syscallarg(size_t) len; } */ struct proc *p = l->l_proc; vaddr_t addr; vsize_t size, pageoff; int error; /* * extract syscall args from uap */ addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); /* * align the address to a page boundary and adjust the size accordingly */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; size = (vsize_t)round_page(size); error = range_test(&p->p_vmspace->vm_map, addr, size, false); if (error) return ENOMEM; if (atop(size) + uvmexp.wired > uvmexp.wiredmax) return EAGAIN; if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) return EAGAIN; error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, false, 0); if (error == EFAULT) error = ENOMEM; return error; }
int sys_munlock(struct lwp *l, const struct sys_munlock_args *uap, register_t *retval) { /* { syscallarg(const void *) addr; syscallarg(size_t) len; } */ struct proc *p = l->l_proc; vaddr_t addr; vsize_t size, pageoff; int error; /* * extract syscall args from uap */ addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); /* * align the address to a page boundary, and adjust the size accordingly */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; size = (vsize_t)round_page(size); error = range_test(&p->p_vmspace->vm_map, addr, size, false); if (error) return ENOMEM; error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, true, 0); if (error) return ENOMEM; return 0; }
int sys_mprotect(struct lwp *l, const struct sys_mprotect_args *uap, register_t *retval) { /* { syscallarg(void *) addr; syscallarg(size_t) len; syscallarg(int) prot; } */ struct proc *p = l->l_proc; vaddr_t addr; vsize_t size, pageoff; vm_prot_t prot; int error; /* * extract syscall args from uap */ addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); prot = SCARG(uap, prot) & VM_PROT_ALL; /* * align the address to a page boundary and adjust the size accordingly. */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; size = round_page(size); error = range_test(&p->p_vmspace->vm_map, addr, size, false); if (error) return EINVAL; error = uvm_map_protect(&p->p_vmspace->vm_map, addr, addr + size, prot, false); return error; }
/* ARGSUSED */ int sys_madvise(struct lwp *l, const struct sys_madvise_args *uap, register_t *retval) { /* { syscallarg(void *) addr; syscallarg(size_t) len; syscallarg(int) behav; } */ struct proc *p = l->l_proc; vaddr_t addr; vsize_t size, pageoff; int advice, error; addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); advice = SCARG(uap, behav); /* * align the address to a page boundary, and adjust the size accordingly */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; size = (vsize_t)round_page(size); error = range_test(&p->p_vmspace->vm_map, addr, size, false); if (error) return EINVAL; switch (advice) { case MADV_NORMAL: case MADV_RANDOM: case MADV_SEQUENTIAL: error = uvm_map_advice(&p->p_vmspace->vm_map, addr, addr + size, advice); break; case MADV_WILLNEED: /* * Activate all these pages, pre-faulting them in if * necessary. */ error = uvm_map_willneed(&p->p_vmspace->vm_map, addr, addr + size); break; case MADV_DONTNEED: /* * Deactivate all these pages. We don't need them * any more. We don't, however, toss the data in * the pages. */ error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, PGO_DEACTIVATE); break; case MADV_FREE: /* * These pages contain no valid data, and may be * garbage-collected. Toss all resources, including * any swap space in use. */ error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size, PGO_FREE); break; case MADV_SPACEAVAIL: /* * XXXMRG What is this? I think it's: * * Ensure that we have allocated backing-store * for these pages. * * This is going to require changes to the page daemon, * as it will free swap space allocated to pages in core. * There's also what to do for device/file/anonymous memory. */ return EINVAL; default: return EINVAL; } return error; }
int sys___msync13(struct lwp *l, const struct sys___msync13_args *uap, register_t *retval) { /* { syscallarg(void *) addr; syscallarg(size_t) len; syscallarg(int) flags; } */ struct proc *p = l->l_proc; vaddr_t addr; vsize_t size, pageoff; struct vm_map *map; int error, flags, uvmflags; bool rv; /* * extract syscall args from the uap */ addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); flags = SCARG(uap, flags); /* sanity check flags */ if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 || (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 || (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC)) return EINVAL; if ((flags & (MS_ASYNC | MS_SYNC)) == 0) flags |= MS_SYNC; /* * align the address to a page boundary and adjust the size accordingly. */ pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; size = (vsize_t)round_page(size); /* * get map */ map = &p->p_vmspace->vm_map; error = range_test(map, addr, size, false); if (error) return ENOMEM; /* * XXXCDC: do we really need this semantic? * * XXX Gak! If size is zero we are supposed to sync "all modified * pages with the region containing addr". Unfortunately, we * don't really keep track of individual mmaps so we approximate * by flushing the range of the map entry containing addr. * This can be incorrect if the region splits or is coalesced * with a neighbor. */ if (size == 0) { struct vm_map_entry *entry; vm_map_lock_read(map); rv = uvm_map_lookup_entry(map, addr, &entry); if (rv == true) { addr = entry->start; size = entry->end - entry->start; } vm_map_unlock_read(map); if (rv == false) return EINVAL; } /* * translate MS_ flags into PGO_ flags */ uvmflags = PGO_CLEANIT; if (flags & MS_INVALIDATE) uvmflags |= PGO_FREE; if (flags & MS_SYNC) uvmflags |= PGO_SYNCIO; error = uvm_map_clean(map, addr, addr+size, uvmflags); return error; }
int sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval) { /* { syscallarg(void *) addr; syscallarg(size_t) len; syscallarg(int) prot; syscallarg(int) flags; syscallarg(int) fd; syscallarg(long) pad; syscallarg(off_t) pos; } */ struct proc *p = l->l_proc; vaddr_t addr; off_t pos; vsize_t size, pageoff, newsize; vm_prot_t prot, maxprot; int flags, fd, advice; vaddr_t defaddr; struct file *fp = NULL; struct uvm_object *uobj; int error; #ifdef PAX_ASLR vaddr_t orig_addr; #endif /* PAX_ASLR */ /* * first, extract syscall args from the uap. */ addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); prot = SCARG(uap, prot) & VM_PROT_ALL; flags = SCARG(uap, flags); fd = SCARG(uap, fd); pos = SCARG(uap, pos); #ifdef PAX_ASLR orig_addr = addr; #endif /* PAX_ASLR */ /* * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and * validate the flags. */ if (flags & MAP_COPY) { flags = (flags & ~MAP_COPY) | MAP_PRIVATE; #if defined(COMPAT_10) && defined(__i386__) /* * Ancient kernel on x86 did not obey PROT_EXEC on i386 at least * and ld.so did not turn it on. We take care of this on amd64 * in compat32. */ prot |= PROT_EXEC; #endif } if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE)) return EINVAL; /* * align file position and save offset. adjust size. */ pageoff = (pos & PAGE_MASK); pos -= pageoff; newsize = size + pageoff; /* add offset */ newsize = (vsize_t)round_page(newsize); /* round up */ if (newsize < size) return ENOMEM; size = newsize; /* * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" */ if (flags & MAP_FIXED) { /* ensure address and file offset are aligned properly */ addr -= pageoff; if (addr & PAGE_MASK) return EINVAL; error = range_test(&p->p_vmspace->vm_map, addr, size, true); if (error) { return error; } } else if (addr == 0 || !(flags & MAP_TRYFIXED)) { /* * not fixed: make sure we skip over the largest * possible heap for non-topdown mapping arrangements. * we will refine our guess later (e.g. to account for * VAC, etc) */ defaddr = p->p_emul->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr, size, p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)) addr = MAX(addr, defaddr); else addr = MIN(addr, defaddr); } /* * check for file mappings (i.e. not anonymous) and verify file. */ advice = UVM_ADV_NORMAL; if ((flags & MAP_ANON) == 0) { if ((fp = fd_getfile(fd)) == NULL) return EBADF; if (fp->f_ops->fo_mmap == NULL) { error = ENODEV; goto out; } error = (*fp->f_ops->fo_mmap)(fp, &pos, size, prot, &flags, &advice, &uobj, &maxprot); if (error) { goto out; } if (uobj == NULL) { flags |= MAP_ANON; fd_putfile(fd); fp = NULL; goto is_anon; } } else { /* MAP_ANON case */ /* * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0? */ if (fd != -1) return EINVAL; is_anon: /* label for SunOS style /dev/zero */ uobj = NULL; maxprot = VM_PROT_ALL; pos = 0; } PAX_MPROTECT_ADJUST(l, &prot, &maxprot); pax_aslr_mmap(l, &addr, orig_addr, flags); /* * now let kernel internal function uvm_mmap do the work. */ error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, flags, advice, uobj, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); /* remember to add offset */ *retval = (register_t)(addr + pageoff); out: if (fp != NULL) fd_putfile(fd); return error; }
static bool clock_to_gmt(satime_t *timbuf) { int i; satime_t tmp; int year, month, day, hour, min, sec; if (machineid == HP_425 && mmuid == MMUID_425_E) { /* 425e uses mcclock on the frodo utility chip */ while ((mc_read(MC_REGA) & MC_REGA_UIP) != 0) continue; sec = mc_read(MC_SEC); min = mc_read(MC_MIN); hour = mc_read(MC_HOUR); day = mc_read(MC_DOM); month = mc_read(MC_MONTH); year = mc_read(MC_YEAR) + 1900; } else { /* Use the traditional HIL bbc for all other models */ read_bbc(); sec = bbc_to_decimal(1, 0); min = bbc_to_decimal(3, 2); /* * Hours are different for some reason. Makes no sense really. */ hour = ((bbc_registers[5] & 0x03) * 10) + bbc_registers[4]; day = bbc_to_decimal(8, 7); month = bbc_to_decimal(10, 9); year = bbc_to_decimal(12, 11) + 1900; } if (year < POSIX_BASE_YEAR) year += 100; #ifdef CLOCK_DEBUG printf("clock todr: %u/%u/%u %u:%u:%u\n", year, month, day, hour, min, sec); #endif range_test(hour, 0, 23); range_test(day, 1, 31); range_test(month, 1, 12); tmp = 0; for (i = POSIX_BASE_YEAR; i < year; i++) tmp += days_per_year(i); if (is_leap_year(year) && month > FEBRUARY) tmp++; for (i = 1; i < month; i++) tmp += days_in_month(i); tmp += (day - 1); tmp = ((tmp * 24 + hour) * 60 + min) * 60 + sec; *timbuf = tmp; return true; }
int sys_mmap(struct lwp *l, const struct sys_mmap_args *uap, register_t *retval) { /* { syscallarg(void *) addr; syscallarg(size_t) len; syscallarg(int) prot; syscallarg(int) flags; syscallarg(int) fd; syscallarg(long) pad; syscallarg(off_t) pos; } */ struct proc *p = l->l_proc; vaddr_t addr; struct vattr va; off_t pos; vsize_t size, pageoff; vm_prot_t prot, maxprot; int flags, fd; vaddr_t defaddr; struct file *fp = NULL; struct vnode *vp; void *handle; int error; #ifdef PAX_ASLR vaddr_t orig_addr; #endif /* PAX_ASLR */ /* * first, extract syscall args from the uap. */ addr = (vaddr_t)SCARG(uap, addr); size = (vsize_t)SCARG(uap, len); prot = SCARG(uap, prot) & VM_PROT_ALL; flags = SCARG(uap, flags); fd = SCARG(uap, fd); pos = SCARG(uap, pos); #ifdef PAX_ASLR orig_addr = addr; #endif /* PAX_ASLR */ /* * Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and * validate the flags. */ if (flags & MAP_COPY) flags = (flags & ~MAP_COPY) | MAP_PRIVATE; if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE)) return (EINVAL); /* * align file position and save offset. adjust size. */ pageoff = (pos & PAGE_MASK); pos -= pageoff; size += pageoff; /* add offset */ size = (vsize_t)round_page(size); /* round up */ /* * now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" */ if (flags & MAP_FIXED) { /* ensure address and file offset are aligned properly */ addr -= pageoff; if (addr & PAGE_MASK) return (EINVAL); error = range_test(addr, size, true); if (error) return error; } else if (addr == 0 || !(flags & MAP_TRYFIXED)) { /* * not fixed: make sure we skip over the largest * possible heap for non-topdown mapping arrangements. * we will refine our guess later (e.g. to account for * VAC, etc) */ defaddr = p->p_emul->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr, size); if (addr == 0 || !(p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)) addr = MAX(addr, defaddr); else addr = MIN(addr, defaddr); } /* * check for file mappings (i.e. not anonymous) and verify file. */ if ((flags & MAP_ANON) == 0) { if ((fp = fd_getfile(fd)) == NULL) return (EBADF); if (fp->f_type != DTYPE_VNODE) { fd_putfile(fd); return (ENODEV); /* only mmap vnodes! */ } vp = fp->f_data; /* convert to vnode */ if (vp->v_type != VREG && vp->v_type != VCHR && vp->v_type != VBLK) { fd_putfile(fd); return (ENODEV); /* only REG/CHR/BLK support mmap */ } if (vp->v_type != VCHR && pos < 0) { fd_putfile(fd); return (EINVAL); } if (vp->v_type != VCHR && (pos + size) < pos) { fd_putfile(fd); return (EOVERFLOW); /* no offset wrapping */ } /* special case: catch SunOS style /dev/zero */ if (vp->v_type == VCHR && (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) { flags |= MAP_ANON; fd_putfile(fd); fp = NULL; goto is_anon; } /* * Old programs may not select a specific sharing type, so * default to an appropriate one. * * XXX: how does MAP_ANON fit in the picture? */ if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) { #if defined(DEBUG) printf("WARNING: defaulted mmap() share type to " "%s (pid %d command %s)\n", vp->v_type == VCHR ? "MAP_SHARED" : "MAP_PRIVATE", p->p_pid, p->p_comm); #endif if (vp->v_type == VCHR) flags |= MAP_SHARED; /* for a device */ else flags |= MAP_PRIVATE; /* for a file */ } /* * MAP_PRIVATE device mappings don't make sense (and aren't * supported anyway). However, some programs rely on this, * so just change it to MAP_SHARED. */ if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) { flags = (flags & ~MAP_PRIVATE) | MAP_SHARED; } /* * now check protection */ maxprot = VM_PROT_EXECUTE; /* check read access */ if (fp->f_flag & FREAD) maxprot |= VM_PROT_READ; else if (prot & PROT_READ) { fd_putfile(fd); return (EACCES); } /* check write access, shared case first */ if (flags & MAP_SHARED) { /* * if the file is writable, only add PROT_WRITE to * maxprot if the file is not immutable, append-only. * otherwise, if we have asked for PROT_WRITE, return * EPERM. */ if (fp->f_flag & FWRITE) { if ((error = VOP_GETATTR(vp, &va, l->l_cred))) { fd_putfile(fd); return (error); } if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) maxprot |= VM_PROT_WRITE; else if (prot & PROT_WRITE) { fd_putfile(fd); return (EPERM); } } else if (prot & PROT_WRITE) { fd_putfile(fd); return (EACCES); } } else { /* MAP_PRIVATE mappings can always write to */ maxprot |= VM_PROT_WRITE; } handle = vp; } else { /* MAP_ANON case */ /* * XXX What do we do about (MAP_SHARED|MAP_PRIVATE) == 0? */ if (fd != -1) return (EINVAL); is_anon: /* label for SunOS style /dev/zero */ handle = NULL; maxprot = VM_PROT_ALL; pos = 0; } #if NVERIEXEC > 0 if (handle != NULL) { /* * Check if the file can be executed indirectly. * * XXX: This gives false warnings about "Incorrect access type" * XXX: if the mapping is not executable. Harmless, but will be * XXX: fixed as part of other changes. */ if (veriexec_verify(l, handle, "(mmap)", VERIEXEC_INDIRECT, NULL)) { /* * Don't allow executable mappings if we can't * indirectly execute the file. */ if (prot & VM_PROT_EXECUTE) { if (fp != NULL) fd_putfile(fd); return (EPERM); } /* * Strip the executable bit from 'maxprot' to make sure * it can't be made executable later. */ maxprot &= ~VM_PROT_EXECUTE; } } #endif /* NVERIEXEC > 0 */ #ifdef PAX_MPROTECT pax_mprotect(l, &prot, &maxprot); #endif /* PAX_MPROTECT */ #ifdef PAX_ASLR pax_aslr(l, &addr, orig_addr, flags); #endif /* PAX_ASLR */ /* * now let kernel internal function uvm_mmap do the work. */ error = uvm_mmap(&p->p_vmspace->vm_map, &addr, size, prot, maxprot, flags, handle, pos, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); if (error == 0) /* remember to add offset */ *retval = (register_t)(addr + pageoff); if (fp != NULL) fd_putfile(fd); return (error); }