static void syscall_handler (struct intr_frame *f) { int syscall_number; ASSERT( sizeof(syscall_number) == 4 ); // assuming x86 // The system call number is in the 32-bit word at the caller's stack pointer. memread_user(f->esp, &syscall_number, sizeof(syscall_number)); _DEBUG_PRINTF ("[DEBUG] system call, number = %d!\n", syscall_number); // Store the esp, which is needed in the page fault handler. // refer to exception.c:page_fault() (see manual 4.3.3) thread_current()->current_esp = f->esp; // Dispatch w.r.t system call number // SYS_*** constants are defined in syscall-nr.h switch (syscall_number) { case SYS_HALT: // 0 { sys_halt(); NOT_REACHED(); break; } case SYS_EXIT: // 1 { int exitcode; memread_user(f->esp + 4, &exitcode, sizeof(exitcode)); sys_exit(exitcode); NOT_REACHED(); break; } case SYS_EXEC: // 2 { void* cmdline; memread_user(f->esp + 4, &cmdline, sizeof(cmdline)); int return_code = sys_exec((const char*) cmdline); f->eax = (uint32_t) return_code; break; } case SYS_WAIT: // 3 { pid_t pid; memread_user(f->esp + 4, &pid, sizeof(pid_t)); int ret = sys_wait(pid); f->eax = (uint32_t) ret; break; } case SYS_CREATE: // 4 { const char* filename; unsigned initial_size; bool return_code; memread_user(f->esp + 4, &filename, sizeof(filename)); memread_user(f->esp + 8, &initial_size, sizeof(initial_size)); return_code = sys_create(filename, initial_size); f->eax = return_code; break; } case SYS_REMOVE: // 5 { const char* filename; bool return_code; memread_user(f->esp + 4, &filename, sizeof(filename)); return_code = sys_remove(filename); f->eax = return_code; break; } case SYS_OPEN: // 6 { const char* filename; int return_code; memread_user(f->esp + 4, &filename, sizeof(filename)); return_code = sys_open(filename); f->eax = return_code; break; } case SYS_FILESIZE: // 7 { int fd, return_code; memread_user(f->esp + 4, &fd, sizeof(fd)); return_code = sys_filesize(fd); f->eax = return_code; break; } case SYS_READ: // 8 { int fd, return_code; void *buffer; unsigned size; memread_user(f->esp + 4, &fd, sizeof(fd)); memread_user(f->esp + 8, &buffer, sizeof(buffer)); memread_user(f->esp + 12, &size, sizeof(size)); return_code = sys_read(fd, buffer, size); f->eax = (uint32_t) return_code; break; } case SYS_WRITE: // 9 { int fd, return_code; const void *buffer; unsigned size; memread_user(f->esp + 4, &fd, sizeof(fd)); memread_user(f->esp + 8, &buffer, sizeof(buffer)); memread_user(f->esp + 12, &size, sizeof(size)); return_code = sys_write(fd, buffer, size); f->eax = (uint32_t) return_code; break; } case SYS_SEEK: // 10 { int fd; unsigned position; memread_user(f->esp + 4, &fd, sizeof(fd)); memread_user(f->esp + 8, &position, sizeof(position)); sys_seek(fd, position); break; } case SYS_TELL: // 11 { int fd; unsigned return_code; memread_user(f->esp + 4, &fd, sizeof(fd)); return_code = sys_tell(fd); f->eax = (uint32_t) return_code; break; } case SYS_CLOSE: // 12 { int fd; memread_user(f->esp + 4, &fd, sizeof(fd)); sys_close(fd); break; } #ifdef VM case SYS_MMAP: // 13 { int fd; void *addr; memread_user(f->esp + 4, &fd, sizeof(fd)); memread_user(f->esp + 8, &addr, sizeof(addr)); mmapid_t ret = sys_mmap (fd, addr); f->eax = ret; break; } case SYS_MUNMAP: // 14 { mmapid_t mid; memread_user(f->esp + 4, &mid, sizeof(mid)); sys_munmap(mid); break; } #endif #ifdef FILESYS case SYS_CHDIR: // 15 { const char* filename; int return_code; memread_user(f->esp + 4, &filename, sizeof(filename)); return_code = sys_chdir(filename); f->eax = return_code; break; } case SYS_MKDIR: // 16 { const char* filename; int return_code; memread_user(f->esp + 4, &filename, sizeof(filename)); return_code = sys_mkdir(filename); f->eax = return_code; break; } case SYS_READDIR: // 17 { int fd; char *name; int return_code; memread_user(f->esp + 4, &fd, sizeof(fd)); memread_user(f->esp + 8, &name, sizeof(name)); return_code = sys_readdir(fd, name); f->eax = return_code; break; } case SYS_ISDIR: // 18 { int fd; int return_code; memread_user(f->esp + 4, &fd, sizeof(fd)); return_code = sys_isdir(fd); f->eax = return_code; break; } case SYS_INUMBER: // 19 { int fd; int return_code; memread_user(f->esp + 4, &fd, sizeof(fd)); return_code = sys_inumber(fd); f->eax = return_code; break; } #endif /* unhandled case */ default: printf("[ERROR] system call %d is unimplemented!\n", syscall_number); // ensure that waiting (parent) process should wake up and terminate. sys_exit(-1); break; } }
asmlinkage void csyscall(struct pt_regs* regs) { unsigned long num = regs->gregs[13]; extern void stack_trace(void); extern void leave_kernel(struct pt_regs* regs); CHECK_STACK(); #if 0 if (user_mode(regs)) { printk("syscall %d; pc == 0x%8x\n", num, get_pc()); stack_trace(); } #endif if (num >= 0 && num < __NR_nocall) { switch(num) { /* * system calls that need the regs */ case __NR_fork: case __NR_clone: case __NR_execve: case __NR_sigsuspend: regs->gregs[0] = ((int (*)(int))(syscall_tab[num]))((int)regs); break; #ifdef DEBUG /* help debug user applications */ case __NR_dbg_break: printk("break: %s\n", regs->gregs[0]); system_break(); break; case __NR_dbg_hexprint: printk("value: %x\n", regs->gregs[0]); break; #endif case __NR_mmap: regs->gregs[0] = sys_mmap(regs); #if 0 dprintk("mmap: returning 0x%8x\n", regs->gregs[0]); #endif break; default: regs->gregs[0] = syscall_tab[num](regs->gregs[0], regs->gregs[1], regs->gregs[2], regs->gregs[3], regs->gregs[4]); break; } } else { regs->gregs[0] = -ENOSYS; } #if 0 printk("csyscall: returning %p\n", regs->gregs[0]); stack_trace(); #endif leave_kernel(regs); }
static int syscall_dispatch(uint32_t sysnum, uint32_t args, regs_t *regs) { switch (sysnum) { case SYS_waitpid: return sys_waitpid((waitpid_args_t *)args); case SYS_exit: do_exit((int)args); panic("exit failed!\n"); return 0; case SYS_thr_exit: kthread_exit((void *)args); panic("thr_exit failed!\n"); return 0; case SYS_thr_yield: sched_make_runnable(curthr); sched_switch(); return 0; case SYS_fork: return sys_fork(regs); case SYS_getpid: return curproc->p_pid; case SYS_sync: sys_sync(); return 0; #ifdef __MOUNTING__ case SYS_mount: return sys_mount((mount_args_t *) args); case SYS_umount: return sys_umount((argstr_t *) args); #endif case SYS_mmap: return (int) sys_mmap((mmap_args_t *) args); case SYS_munmap: return sys_munmap((munmap_args_t *) args); case SYS_open: return sys_open((open_args_t *) args); case SYS_close: return sys_close((int)args); case SYS_read: return sys_read((read_args_t *)args); case SYS_write: return sys_write((write_args_t *)args); case SYS_dup: return sys_dup((int)args); case SYS_dup2: return sys_dup2((dup2_args_t *)args); case SYS_mkdir: return sys_mkdir((mkdir_args_t *)args); case SYS_rmdir: return sys_rmdir((argstr_t *)args); case SYS_unlink: return sys_unlink((argstr_t *)args); case SYS_link: return sys_link((link_args_t *)args); case SYS_rename: return sys_rename((rename_args_t *)args); case SYS_chdir: return sys_chdir((argstr_t *)args); case SYS_getdents: return sys_getdents((getdents_args_t *)args); case SYS_brk: return (int) sys_brk((void *)args); case SYS_lseek: return sys_lseek((lseek_args_t *)args); case SYS_halt: sys_halt(); return -1; case SYS_set_errno: curthr->kt_errno = (int)args; return 0; case SYS_errno: return curthr->kt_errno; case SYS_execve: return sys_execve((execve_args_t *)args, regs); case SYS_stat: return sys_stat((stat_args_t *)args); case SYS_uname: return sys_uname((struct utsname *)args); case SYS_debug: return sys_debug((argstr_t *)args); case SYS_kshell: return sys_kshell((int)args); default: dbg(DBG_ERROR, "ERROR: unknown system call: %d (args: %#08x)\n", sysnum, args); curthr->kt_errno = ENOSYS; return -1; } }
int mmap(uintptr_t * addr_store, size_t len, uint32_t mmap_flags) { return sys_mmap(addr_store, len, mmap_flags); }
int linux_mmap2(struct thread *td, struct linux_mmap2_args *args) { struct proc *p = td->td_proc; struct mmap_args /* { caddr_t addr; size_t len; int prot; int flags; int fd; long pad; off_t pos; } */ bsd_args; int error; struct file *fp; cap_rights_t rights; LINUX_CTR6(mmap2, "0x%lx, %ld, %ld, 0x%08lx, %ld, 0x%lx", args->addr, args->len, args->prot, args->flags, args->fd, args->pgoff); error = 0; bsd_args.flags = 0; fp = NULL; /* * Linux mmap(2): * You must specify exactly one of MAP_SHARED and MAP_PRIVATE */ if (! ((args->flags & LINUX_MAP_SHARED) ^ (args->flags & LINUX_MAP_PRIVATE))) return (EINVAL); if (args->flags & LINUX_MAP_SHARED) bsd_args.flags |= MAP_SHARED; if (args->flags & LINUX_MAP_PRIVATE) bsd_args.flags |= MAP_PRIVATE; if (args->flags & LINUX_MAP_FIXED) bsd_args.flags |= MAP_FIXED; if (args->flags & LINUX_MAP_ANON) bsd_args.flags |= MAP_ANON; else bsd_args.flags |= MAP_NOSYNC; if (args->flags & LINUX_MAP_GROWSDOWN) bsd_args.flags |= MAP_STACK; /* * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC * on Linux/i386. We do this to ensure maximum compatibility. * Linux/ia64 does the same in i386 emulation mode. */ bsd_args.prot = args->prot; if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) bsd_args.prot |= PROT_READ | PROT_EXEC; /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */ bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : args->fd; if (bsd_args.fd != -1) { /* * Linux follows Solaris mmap(2) description: * The file descriptor fildes is opened with * read permission, regardless of the * protection options specified. */ error = fget(td, bsd_args.fd, cap_rights_init(&rights, CAP_MMAP), &fp); if (error != 0 ) return (error); if (fp->f_type != DTYPE_VNODE) { fdrop(fp, td); return (EINVAL); } /* Linux mmap() just fails for O_WRONLY files */ if (!(fp->f_flag & FREAD)) { fdrop(fp, td); return (EACCES); } fdrop(fp, td); } if (args->flags & LINUX_MAP_GROWSDOWN) { /* * The Linux MAP_GROWSDOWN option does not limit auto * growth of the region. Linux mmap with this option * takes as addr the inital BOS, and as len, the initial * region size. It can then grow down from addr without * limit. However, Linux threads has an implicit internal * limit to stack size of STACK_SIZE. Its just not * enforced explicitly in Linux. But, here we impose * a limit of (STACK_SIZE - GUARD_SIZE) on the stack * region, since we can do this with our mmap. * * Our mmap with MAP_STACK takes addr as the maximum * downsize limit on BOS, and as len the max size of * the region. It then maps the top SGROWSIZ bytes, * and auto grows the region down, up to the limit * in addr. * * If we don't use the MAP_STACK option, the effect * of this code is to allocate a stack region of a * fixed size of (STACK_SIZE - GUARD_SIZE). */ if ((caddr_t)PTRIN(args->addr) + args->len > p->p_vmspace->vm_maxsaddr) { /* * Some Linux apps will attempt to mmap * thread stacks near the top of their * address space. If their TOS is greater * than vm_maxsaddr, vm_map_growstack() * will confuse the thread stack with the * process stack and deliver a SEGV if they * attempt to grow the thread stack past their * current stacksize rlimit. To avoid this, * adjust vm_maxsaddr upwards to reflect * the current stacksize rlimit rather * than the maximum possible stacksize. * It would be better to adjust the * mmap'ed region, but some apps do not check * mmap's return value. */ PROC_LOCK(p); p->p_vmspace->vm_maxsaddr = (char *)USRSTACK - lim_cur_proc(p, RLIMIT_STACK); PROC_UNLOCK(p); } /* * This gives us our maximum stack size and a new BOS. * If we're using VM_STACK, then mmap will just map * the top SGROWSIZ bytes, and let the stack grow down * to the limit at BOS. If we're not using VM_STACK * we map the full stack, since we don't have a way * to autogrow it. */ if (args->len > STACK_SIZE - GUARD_SIZE) { bsd_args.addr = (caddr_t)PTRIN(args->addr); bsd_args.len = args->len; } else { bsd_args.addr = (caddr_t)PTRIN(args->addr) - (STACK_SIZE - GUARD_SIZE - args->len); bsd_args.len = STACK_SIZE - GUARD_SIZE; } } else { bsd_args.addr = (caddr_t)PTRIN(args->addr); bsd_args.len = args->len; } bsd_args.pos = (off_t)args->pgoff; error = sys_mmap(td, &bsd_args); LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]); return (error); }
static int vma_remap(unsigned long src, unsigned long dst, unsigned long len) { unsigned long guard = 0, tmp; pr_info("Remap %lx->%lx len %lx\n", src, dst, len); if (src - dst < len) guard = dst; else if (dst - src < len) guard = dst + len - PAGE_SIZE; if (src == dst) return 0; if (guard != 0) { /* * mremap() returns an error if a target and source vma-s are * overlapped. In this case the source vma are remapped in * a temporary place and then remapped to the target address. * Here is one hack to find non-ovelapped temporary place. * * 1. initial placement. We need to move src -> tgt. * | |+++++src+++++| * |-----tgt-----| | * * 2. map a guard page at the non-ovelapped border of a target vma. * | |+++++src+++++| * |G|----tgt----| | * * 3. remap src to any other place. * G prevents src from being remaped on tgt again * | |-------------| -> |+++++src+++++| * |G|---tgt-----| | * * 4. remap src to tgt, no overlapping any longer * |+++++src+++++| <---- |-------------| * |G|---tgt-----| | */ unsigned long addr; /* Map guard page (step 2) */ tmp = sys_mmap((void *) guard, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); if (tmp != guard) { pr_err("Unable to map a guard page %lx (%lx)\n", guard, tmp); return -1; } /* Move src to non-overlapping place (step 3) */ addr = sys_mmap(NULL, len, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); if (addr == (unsigned long) MAP_FAILED) { pr_err("Unable to reserve memory (%lx)\n", addr); return -1; } tmp = sys_mremap(src, len, len, MREMAP_MAYMOVE | MREMAP_FIXED, addr); if (tmp != addr) { pr_err("Unable to remap %lx -> %lx (%lx)\n", src, addr, tmp); return -1; } src = addr; } tmp = sys_mremap(src, len, len, MREMAP_MAYMOVE | MREMAP_FIXED, dst); if (tmp != dst) { pr_err("Unable to remap %lx -> %lx\n", src, dst); return -1; } return 0; }
static void syscall_handler (struct intr_frame *f) { /* Check to see that we can read supplied user memory pointer, using the check_mem_ptr helper() function, in get_word_from_stack(). If the check fails, the process is terminated. */ int syscall_number = (int)get_word_on_stack(f, 0); switch(syscall_number) { case SYS_HALT: { sys_halt(); break; } case SYS_EXIT: { int status = (int)get_word_on_stack(f, 1); sys_exit(status); /* Returns exit status to the kernel. */ f->eax = status; break; } case SYS_EXEC: { const char *cmd_line = (const char *)get_word_on_stack(f, 1); pid_t pid = sys_exec(cmd_line); /* Returns new processes pid. */ f->eax = pid; break; } case SYS_WAIT: { pid_t pid = (pid_t)get_word_on_stack(f, 1); /* Returns child's exit status (pid argument is pid of this child). */ f->eax = sys_wait(pid); break; } case SYS_CREATE: { const char *filename = (const char *)get_word_on_stack(f, 1); unsigned initial_size = (unsigned)get_word_on_stack(f, 2); /* Returns true to the kernel if creation is successful. */ f->eax = (int)sys_create(filename, initial_size); break; } case SYS_REMOVE: { const char *filename = (const char *)get_word_on_stack(f, 1); /* Returns true if successful, and false otherwise. */ f->eax = sys_remove(filename); break; } case SYS_OPEN: { const char *filename = (const char *)get_word_on_stack(f, 1); /* Returns file descriptor of opened file, or -1 if it could not be opened. */ f->eax = sys_open(filename); break; } case SYS_FILESIZE: { int fd = (int)get_word_on_stack(f, 1); /* Returns size of file in bytes. */ f->eax = sys_filesize(fd); break; } case SYS_READ: { int fd = (int)get_word_on_stack(f, 1); void *buffer = (void *)get_word_on_stack(f, 2); unsigned size = (unsigned)get_word_on_stack(f, 3); /* Returns number of bytes actually read, or -1 if it could not be read. */ f->eax = sys_read(fd, buffer, size, f); break; } case SYS_WRITE: { int fd = (int)get_word_on_stack(f, 1); void *buffer = (void *)get_word_on_stack(f, 2); unsigned size = (unsigned)get_word_on_stack(f, 3); /* Returns number of bytes written. */ f->eax = sys_write(fd, buffer, size); break; } case SYS_SEEK: { int fd = (int)get_word_on_stack(f, 1); unsigned position = (int)get_word_on_stack(f, 2); sys_seek(fd, position); break; } case SYS_TELL: { int fd = (int)get_word_on_stack(f, 1); /* Returns the position of the next byte to be read or written in open file 'fd' (in bytes, from start of file). */ f->eax = sys_tell(fd); break; } case SYS_CLOSE: { int fd = (int)get_word_on_stack(f, 1); sys_close(fd); break; } case SYS_MMAP: { int fd = (int)get_word_on_stack(f, 1); void *addr = (void *)get_word_on_stack(f, 2); f->eax = sys_mmap(fd, addr); break; } case SYS_MUNMAP: { mapid_t mapping = (mapid_t)get_word_on_stack(f, 1); sys_munmap(mapping); break; } default: { NOT_REACHED(); } } }
int linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot, int flags, int fd, off_t pos) { struct proc *p = td->td_proc; struct vmspace *vms = td->td_proc->p_vmspace; struct mmap_args /* { caddr_t addr; size_t len; int prot; int flags; int fd; off_t pos; } */ bsd_args; int error; struct file *fp; cap_rights_t rights; LINUX_CTR6(mmap2, "0x%lx, %ld, %ld, 0x%08lx, %ld, 0x%lx", addr, len, prot, flags, fd, pos); error = 0; bsd_args.flags = 0; fp = NULL; /* * Linux mmap(2): * You must specify exactly one of MAP_SHARED and MAP_PRIVATE */ if (!((flags & LINUX_MAP_SHARED) ^ (flags & LINUX_MAP_PRIVATE))) return (EINVAL); if (flags & LINUX_MAP_SHARED) bsd_args.flags |= MAP_SHARED; if (flags & LINUX_MAP_PRIVATE) bsd_args.flags |= MAP_PRIVATE; if (flags & LINUX_MAP_FIXED) bsd_args.flags |= MAP_FIXED; if (flags & LINUX_MAP_ANON) { /* Enforce pos to be on page boundary, then ignore. */ if ((pos & PAGE_MASK) != 0) return (EINVAL); pos = 0; bsd_args.flags |= MAP_ANON; } else bsd_args.flags |= MAP_NOSYNC; if (flags & LINUX_MAP_GROWSDOWN) bsd_args.flags |= MAP_STACK; /* * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC * on Linux/i386 if the binary requires executable stack. * We do this only for IA32 emulation as on native i386 this is does not * make sense without PAE. * * XXX. Linux checks that the file system is not mounted with noexec. */ bsd_args.prot = prot; #if defined(__amd64__) linux_fixup_prot(td, &bsd_args.prot); #endif /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */ bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : fd; if (bsd_args.fd != -1) { /* * Linux follows Solaris mmap(2) description: * The file descriptor fildes is opened with * read permission, regardless of the * protection options specified. */ error = fget(td, bsd_args.fd, cap_rights_init(&rights, CAP_MMAP), &fp); if (error != 0) return (error); if (fp->f_type != DTYPE_VNODE) { fdrop(fp, td); return (EINVAL); } /* Linux mmap() just fails for O_WRONLY files */ if (!(fp->f_flag & FREAD)) { fdrop(fp, td); return (EACCES); } fdrop(fp, td); } if (flags & LINUX_MAP_GROWSDOWN) { /* * The Linux MAP_GROWSDOWN option does not limit auto * growth of the region. Linux mmap with this option * takes as addr the initial BOS, and as len, the initial * region size. It can then grow down from addr without * limit. However, Linux threads has an implicit internal * limit to stack size of STACK_SIZE. Its just not * enforced explicitly in Linux. But, here we impose * a limit of (STACK_SIZE - GUARD_SIZE) on the stack * region, since we can do this with our mmap. * * Our mmap with MAP_STACK takes addr as the maximum * downsize limit on BOS, and as len the max size of * the region. It then maps the top SGROWSIZ bytes, * and auto grows the region down, up to the limit * in addr. * * If we don't use the MAP_STACK option, the effect * of this code is to allocate a stack region of a * fixed size of (STACK_SIZE - GUARD_SIZE). */ if ((caddr_t)addr + len > vms->vm_maxsaddr) { /* * Some Linux apps will attempt to mmap * thread stacks near the top of their * address space. If their TOS is greater * than vm_maxsaddr, vm_map_growstack() * will confuse the thread stack with the * process stack and deliver a SEGV if they * attempt to grow the thread stack past their * current stacksize rlimit. To avoid this, * adjust vm_maxsaddr upwards to reflect * the current stacksize rlimit rather * than the maximum possible stacksize. * It would be better to adjust the * mmap'ed region, but some apps do not check * mmap's return value. */ PROC_LOCK(p); vms->vm_maxsaddr = (char *)p->p_sysent->sv_usrstack - lim_cur_proc(p, RLIMIT_STACK); PROC_UNLOCK(p); } /* * This gives us our maximum stack size and a new BOS. * If we're using VM_STACK, then mmap will just map * the top SGROWSIZ bytes, and let the stack grow down * to the limit at BOS. If we're not using VM_STACK * we map the full stack, since we don't have a way * to autogrow it. */ if (len > STACK_SIZE - GUARD_SIZE) { bsd_args.addr = (caddr_t)addr; bsd_args.len = len; } else { bsd_args.addr = (caddr_t)addr - (STACK_SIZE - GUARD_SIZE - len); bsd_args.len = STACK_SIZE - GUARD_SIZE; } } else { bsd_args.addr = (caddr_t)addr; bsd_args.len = len; } bsd_args.pos = pos; error = sys_mmap(td, &bsd_args); LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]); return (error); }
int main(int argc, char** argv) { uint32_t vcoreid; vcore_lib_init(); if ((vcoreid = vcore_id())) { printf("Should never see me! (from vcore %d)\n", vcoreid); } else { // core 0 printf("Hello from else vcore 0\n"); printf("Multi-Goodbye, world, from PID: %d!\n", getpid()); switch (test) { case TEST_MMAP: printf("Testing MMAP\n"); void *addr; addr = sys_mmap((void*)USTACKTOP - 20*PGSIZE, 8*PGSIZE, 3, MAP_FIXED | MAP_ANONYMOUS, -1, 0); printf("got addr = 0x%08x\n", addr); *(int*)addr = 0xdeadbeef; *(int*)(addr + 3*PGSIZE) = 0xcafebabe; // these should work printf("reading addr: 0x%08x\n", *(int*)addr); printf("reading addr+3pg: 0x%08x\n", *(int*)(addr + 3*PGSIZE)); // this should fault printf("Should page fault and die now.\n"); *(int*)(addr - 3*PGSIZE) = 0xdeadbeef; printf("Should not see me!!!!!!!!!!!!!!!!!!\n"); while(1); case TEST_ONE_CORE: vcore_request_more(1); printf("One core test's core0 is done\n"); printf("Check to see it's on a worker core.\n"); while(1); case TEST_ASK_FOR_TOO_MANY_CORES: vcore_request_more(12); printf("Asked for too many is done\n"); return 0; case TEST_INCREMENTAL_CHANGES: vcore_request_more(4); break; default: vcore_request_more(5); } printf("Should see me if you want to relocate core0's context " "when moving from RUNNING_S\n"); } // vcore0 only below here switch (test) { case TEST_YIELD_OUT_OF_ORDER: udelay(10000000); printf("Core 2 should have yielded, asking for another\n"); vcore_request_more(5); break; case TEST_YIELD_0_OUT_OF_ORDER: udelay(5000000); printf("Core %d yielding\n", vcoreid); sys_yield(0); printf("Core 0 came back where it left off in RUNNING_M!!!\n"); break; } global_tests(vcoreid); printf("Vcore %d Done!\n", vcoreid); while (1); return 0; }
int cloudabi_sys_mem_advise(struct thread *td, struct cloudabi_sys_mem_advise_args *uap) { struct madvise_args madvise_args = { .addr = uap->addr, .len = uap->len }; switch (uap->advice) { case CLOUDABI_ADVICE_DONTNEED: madvise_args.behav = MADV_DONTNEED; break; case CLOUDABI_ADVICE_NORMAL: madvise_args.behav = MADV_NORMAL; break; case CLOUDABI_ADVICE_RANDOM: madvise_args.behav = MADV_RANDOM; break; case CLOUDABI_ADVICE_SEQUENTIAL: madvise_args.behav = MADV_SEQUENTIAL; break; case CLOUDABI_ADVICE_WILLNEED: madvise_args.behav = MADV_WILLNEED; break; default: return (EINVAL); } return (sys_madvise(td, &madvise_args)); } int cloudabi_sys_mem_lock(struct thread *td, struct cloudabi_sys_mem_lock_args *uap) { struct mlock_args mlock_args = { .addr = uap->addr, .len = uap->len }; return (sys_mlock(td, &mlock_args)); } int cloudabi_sys_mem_map(struct thread *td, struct cloudabi_sys_mem_map_args *uap) { struct mmap_args mmap_args = { .addr = uap->addr, .len = uap->len, .fd = uap->fd, .pos = uap->off }; int error; /* Translate flags. */ if (uap->flags & CLOUDABI_MAP_ANON) mmap_args.flags |= MAP_ANON; if (uap->flags & CLOUDABI_MAP_FIXED) mmap_args.flags |= MAP_FIXED; if (uap->flags & CLOUDABI_MAP_PRIVATE) mmap_args.flags |= MAP_PRIVATE; if (uap->flags & CLOUDABI_MAP_SHARED) mmap_args.flags |= MAP_SHARED; /* Translate protection. */ error = convert_mprot(uap->prot, &mmap_args.prot); if (error != 0) return (error); return (sys_mmap(td, &mmap_args)); } int cloudabi_sys_mem_protect(struct thread *td, struct cloudabi_sys_mem_protect_args *uap) { struct mprotect_args mprotect_args = { .addr = uap->addr, .len = uap->len, }; int error; /* Translate protection. */ error = convert_mprot(uap->prot, &mprotect_args.prot); if (error != 0) return (error); return (sys_mprotect(td, &mprotect_args)); } int cloudabi_sys_mem_sync(struct thread *td, struct cloudabi_sys_mem_sync_args *uap) { struct msync_args msync_args = { .addr = uap->addr, .len = uap->len, }; /* Convert flags. */ switch (uap->flags & (CLOUDABI_MS_ASYNC | CLOUDABI_MS_SYNC)) { case CLOUDABI_MS_ASYNC: msync_args.flags |= MS_ASYNC; break; case CLOUDABI_MS_SYNC: msync_args.flags |= MS_SYNC; break; default: return (EINVAL); } if ((uap->flags & CLOUDABI_MS_INVALIDATE) != 0) msync_args.flags |= MS_INVALIDATE; return (sys_msync(td, &msync_args)); } int cloudabi_sys_mem_unlock(struct thread *td, struct cloudabi_sys_mem_unlock_args *uap) { struct munlock_args munlock_args = { .addr = uap->addr, .len = uap->len }; return (sys_munlock(td, &munlock_args)); } int cloudabi_sys_mem_unmap(struct thread *td, struct cloudabi_sys_mem_unmap_args *uap) { struct munmap_args munmap_args = { .addr = uap->addr, .len = uap->len }; return (sys_munmap(td, &munmap_args)); }