Esempio n. 1
0
static void mm_commit_phys(mm_context *ctx, struct mm_mapping *mm, uint64_t physaddr)
{
    uint64_t p;
    uintptr_t start, end;
    int writable;

    start = mm->start_addr;
    end = start + mm->length - 1;
    start &= ~0xfff;
    end = __PAGEROUND(end);
    writable = mm->prot & PM_WRITABLE ? PM_WRITABLE : 0;

    /* No backing required because we're mapping all pages now */
    //mm->object->backing = none;

    for (p=start; p<end; p+=__PAGESIZE)
    {
        struct phys_page *pagestruct;
        uint64_t phys = physaddr;
        physaddr += __PAGESIZE;
        //kdebug("Mapping %016lx to %016lx\n", phys, p);
        pagestruct = phys_to_pagestruct(phys);
        pagestruct->offset = p - start;
        mm->object->page_list.add_tail(pagestruct);
        ++mm->object->npages;
    }
    kdebug("******** mm_commit_phys done ********\n");
}
Esempio n. 2
0
ace_buffer_t *ace_buffer_create(ace_connection_t *connection, size_t size)
{
    ace_buffer_t                    *buf;
    struct ace_buf_create_msg       bufcreatemsg;
    struct ace_buf_create_rpy       bufcreaterpy;
    int                             err;

    /* Now create the buffer structure */
    if ((buf = (ace_buffer_t *)malloc(sizeof(ace_buffer_t))) == NULL)
    {
        return NULL;
    }

    /* Round the size up to the nearest page */
    bufcreatemsg.size = __PAGEROUND(size);

    if ((err = msg_rpc_port(connection->port, ACE_BUF_CREATE, &bufcreatemsg, sizeof(bufcreatemsg), &bufcreaterpy, sizeof(bufcreaterpy))) != 0)
    {
        free(buf);
        errno = -err;
        return NULL;
    }

    buf->size = size;
    buf->pixel_buf = (uint32_t *)bufcreaterpy.vidmem;
    buf->connection = connection;

    //anvil_syslog(0, "ace_buffer_create: returning %d\n", connection->port);

    return buf;
}
Esempio n. 3
0
static void mm_uncommit(mm_context *ctx, struct mm_mapping *mm)
{
    uint64_t p;
    uintptr_t start, end;

    start = mm->start_addr;
    end = start + mm->length - 1;

    start &= ~0xfff;
    end = __PAGEROUND(end);
    //kdebug("s=%016lx e=%016lx\n", start, end);

    for (p=start; p<end; p+=__PAGESIZE)
    {
        //kdebug("Unmapping %016lx\n", p);
        kpage_unmap(ctx->cr3, (void *)p);
    }
}
Esempio n. 4
0
static void mm_check_uncommit(mm_context *ctx, struct mm_mapping *mm)
{
    return;
    uint64_t p;
    uintptr_t start, end;

    start = mm->start_addr;
    end = start + mm->length - 1;

    start &= ~0xfff;
    end = __PAGEROUND(end);
    //kdebug("s=%016lx e=%016lx\n", start, end);

    for (p=start; p<end; p+=__PAGESIZE)
    {
        //kdebug("Unmapping %016lx\n", p);
        if (kpage_get_mapping(ctx->cr3, (void *)p))
        {
            kdebug("FATAL mm_check_uncommit %p\n", p);
            mm_context_dump(ctx);
        }
    }
}
Esempio n. 5
0
static void mm_commit(mm_context *ctx, struct mm_mapping *mm, uint64_t physaddr)
{
    uint64_t p;
    uintptr_t start, end;
    int writable;

    start = mm->start_addr;
    end = start + mm->length - 1;
    start &= ~0xfff;
    end = __PAGEROUND(end);
    writable = mm->prot & PM_WRITABLE ? PM_WRITABLE : 0;

    /* No backing required because we're mapping all pages now */
    mm->object->backing = none;

    for (p=start; p<end; p+=__PAGESIZE)
    {
        struct phys_page *pagestruct;

        if (!physaddr)
        {
            pagestruct = kphys_alloc(physmem_state_user);
        }
        else
        {
            pagestruct = phys_to_pagestruct(physaddr);
            physaddr += __PAGESIZE;
        }
        //kdebug("Mapping %016lx to %016lx\n", phys, p);
        pagestruct->offset = p - start;
        mm->object->page_list.add_tail(pagestruct);
        kpage_map_user((void *)p, pagestruct, mm_pgtable_alloc_callback, ctx, ctx->cr3, writable | PM_USER);
        //kpage_map((void *)p, pagestruct_to_phys(pagestruct), ctx->cr3, writable | PM_USER);
        ++mm->object->npages;
    }
}
Esempio n. 6
0
void *elf_file_load_ram(int pid, char *pfile) {

    elf64_hdr       *pelf_header;
    elf64_phdr      *prog_hdr;
    void            *seg_addr;
    void            *elf_offset;
    int             ph;
    uintptr_t       start, end;
    int             sizeof_phdrs;
    int             err;

    pelf_header = (elf64_hdr *)pfile;

    if (strncmp((const char *)pelf_header->e_ident, "\x7f" "ELF", 4)) {
        anvil_syslog(0, "Incorrect ELF identifier\n");
        return NULL;
    }

    if (pelf_header->e_ident[4] != 2) {
        anvil_syslog(0, "Elf file is not 64 bit\n");
        return NULL;
    }

    if (pelf_header->e_type != 2) {
        anvil_syslog(0, "Elf file is not executable\n");
        return NULL;
    }

    if (pelf_header->e_phnum > 3) {
        anvil_syslog(0, "Elf file has too many segments\n");
        return NULL;
    }

    //anvil_syslog(0, "Elf OK\n");

    sizeof_phdrs = pelf_header->e_phentsize * pelf_header->e_phnum;

    prog_hdr = (elf64_phdr *)((char *)pelf_header + pelf_header->e_phoff);

    for (ph=0; ph<pelf_header->e_phnum; ++ph) {

        int prot;

        if (prog_hdr->p_type != 1) {
            anvil_syslog(0, "Elf file segment is not loadable\n");
            return NULL;
        }

        prot = get_prot(prog_hdr->p_flags);

        /* We found a loadable segment so create a mapping */
        start = prog_hdr->p_vaddr;
        end = start + prog_hdr->p_memsz;

        start &= ~0xfff;
        end = __PAGEROUND(end);

        if (_Mmap(0, 0, end - start, PROT_READ,
                    MAP_ANON, -1, 0, &seg_addr) != 0) {
            anvil_syslog(0, "Out of memory\n");
            return NULL;
        }

        //anvil_syslog(0, "seg_addr = %016lx\n", seg_addr);

        /* Find where, in the elf image the section is */
        elf_offset = (void *)(pfile + prog_hdr->p_offset);
        //anvil_syslog(0, "File image addr = %08x\n", elf_offset);

        /* The segment is zero filled by the _Mmap system call */
        //anvil_syslog(0, "%016lx %016lx %016lx\n",
        //                seg_addr + prog_hdr->p_vaddr - start,
        //                elf_offset,
        //                prog_hdr->p_filesz);

        memcpy(seg_addr + prog_hdr->p_vaddr - start,
                                elf_offset, prog_hdr->p_filesz);

        //anvil_syslog(0, "_Vmm_send: %d %016lx %016lx %018x\n",
        //        pid, start, seg_addr, end - start);
        _Vmm_send(pid, (void *)start, seg_addr, end - start, prot);

        prog_hdr = (elf64_phdr *)
                ((uint64_t)prog_hdr + pelf_header->e_phentsize);
    }

    return (void *)pelf_header->e_entry;
}
Esempio n. 7
0
void *elf_file_load(int pid, int fd) {

    elf64_hdr       elf_header;
    elf64_hdr       *pelf_header;
    elf64_phdr      *pprog_hdr;
    void            *seg_addr;
    void            *elf_offset;
    int             ph;
    uintptr_t       start, end;
    int             sizeof_phdrs;
    int             err;
    char            *p;
    char            *interp_name;
    elf64_dyn       *dyn_seg;
    int             ndyn_items;
    int             interp_fd;
    uintptr_t       entry_offs;
    int             prot;


    interp_name = NULL;
    dyn_seg     = NULL;
    entry_offs  = 0;

    if (read(fd, &elf_header, sizeof(elf_header)) != sizeof(elf_header)) {
        anvil_syslog(0, "Bad ELF header\n");
        while (1);
    }

    pelf_header = &elf_header;

    if (strncmp((const char *)pelf_header->e_ident, "\x7f" "ELF", 4)) {
        anvil_syslog(0, "Incorrect ELF identifier\n");
        return NULL;
    }

    if (pelf_header->e_ident[4] != 2) {
        anvil_syslog(0, "Elf file is not 64 bit\n");
        return NULL;
    }

    if (pelf_header->e_type != 2) {
        anvil_syslog(0, "Elf file is not executable\n");
        return NULL;
    }

//    if (pelf_header->e_phnum > 3) {
//        anvil_syslog(0, "Elf file has too many segments\n");
//        return NULL;
//    }

    //anvil_syslog(0, "Elf OK\n");

    sizeof_phdrs = pelf_header->e_phentsize * pelf_header->e_phnum;

    pprog_hdr = (elf64_phdr *)alloca(sizeof_phdrs);
    memset(pprog_hdr, 0, sizeof_phdrs);

    if (pread(fd, pprog_hdr, sizeof_phdrs, elf_header.e_phoff) == -1) {
        anvil_syslog(0, "Bad prog_hdr header\n");
        while (1);
    }

    for (ph=0; ph<pelf_header->e_phnum; ++ph) {

        switch (pprog_hdr->p_type) {

            case PT_LOAD:
                if (pprog_hdr->p_type != 1) {
                    anvil_syslog(0, "Elf file segment is not loadable\n");
                    return NULL;
                }

                prot = get_prot(pprog_hdr->p_flags);

                /* We found a loadable segment so create a mapping */
                start = pprog_hdr->p_vaddr;
                end = start + pprog_hdr->p_memsz;

                start &= ~0xfff;
                end = __PAGEROUND(end);

                //anvil_syslog(0, "\nstart=%016lx end=%016lx\n", start, end);

                /* Make a mapping in the address of the exec-svr */
                if (_Mmap(0, 0, end - start, PROT_READ,
                            MAP_ANON, -1, 0, &seg_addr) != 0) {
                    anvil_syslog(0, "Out of memory\n");
                    return NULL;
                }

                /*
                 * seg_addr (from the mmap call) is the address in the
                 * exec-svr where we will load the section
                 */

                /* Load the segment from the file */
                //anvil_syslog(0, "pread to %016lx %016lx\n", pprog_hdr->p_filesz, pprog_hdr->p_offset);
                if (pread(fd, seg_addr + pprog_hdr->p_vaddr - start, pprog_hdr->p_filesz, pprog_hdr->p_offset) == -1) {
                    anvil_syslog(0, "Bad load of segment\n");
                    while (1);
                }

                /* Send it to the new process space */
                //anvil_syslog(0, "_Vmm_send: %d %016lx %016lx %018x\n",
                //        pid, start, seg_addr, end - start);
                _Vmm_send(pid, (void *)start, seg_addr, end - start, prot);

            break;

            case PT_INTERP:
                /*
                 * Just record the name of the interpreter. We'll load it in
                 * a minute
                 */
                //anvil_syslog(0, "PT_INTERP %d\n", pprog_hdr->p_filesz);
                interp_name = alloca(pprog_hdr->p_filesz + 1);
                if (pread(fd, interp_name, pprog_hdr->p_filesz, pprog_hdr->p_offset) == -1) {
                    anvil_syslog(0, "Bad load of interp\n");
                    while (1);
                }
                interp_name[pprog_hdr->p_filesz] = 0;
                //anvil_syslog(0, "PT_INTERP is %s\n", interp_name);
                break;

            case PT_DYNAMIC:
                /*
                 */
                //anvil_syslog(0, "PT_DYNAMIC\n");
                dyn_seg = alloca(pprog_hdr->p_filesz);
                if (pread(fd, dyn_seg, pprog_hdr->p_filesz, pprog_hdr->p_offset) == -1) {
                    anvil_syslog(0, "Bad load of dyn_seg\n");
                    while (1);
                }
                ndyn_items = pprog_hdr->p_filesz / sizeof(elf64_dyn);
                //anvil_syslog(0, "PT_DYNAMIC %d items\n", ndyn_items);
                /* Now parse the dyn seg */
                while (ndyn_items) {
                    //anvil_syslog(0, " items %d %016lx\n", dyn_seg->d_tag, dyn_seg->d_un.d_ptr);
                    ++dyn_seg;
                    --ndyn_items;
                }
                break;

            case PT_TLS:
//                anvil_syslog(0, "PT_TLS\n");
//                anvil_syslog(0, "p_type %016lx\n",pprog_hdr->p_type);
//                anvil_syslog(0, "p_flags %016lx\n",pprog_hdr->p_flags);
//                anvil_syslog(0, "p_offset %016lx\n",pprog_hdr->p_offset);
//                anvil_syslog(0, "p_vaddr %016lx\n",pprog_hdr->p_vaddr);
//                anvil_syslog(0, "p_paddr %016lx\n",pprog_hdr->p_paddr);
//                anvil_syslog(0, "p_filesz %016lx\n",pprog_hdr->p_filesz);
//                anvil_syslog(0, "p_memsz %016lx\n",pprog_hdr->p_memsz);
//                anvil_syslog(0, "p_align %016lx\n",pprog_hdr->p_align);
                //*ptls_bss_len += pprog_hdr->p_memsz;
                break;

            default:
                anvil_syslog(0, "Unknown Load type %d\n", pprog_hdr->p_type);
            break;
        }

        pprog_hdr = (elf64_phdr *)
                ((uint64_t)pprog_hdr + pelf_header->e_phentsize);
    }

    if (interp_name != NULL) {
        interp_name = "/lib/libc.so";
        //anvil_syslog(0, "Loading interpreter %s\n", interp_name);
        if ((interp_fd = open(interp_name, 0, 0)) == -1) {
            anvil_syslog(0, "Couldn't open interpreter %s\n", interp_name);
            return NULL;
        }

        /*
         * Now load the interpreter into memory. The interpreter should be
         * entirely PIC so we can load it anywhere.
         */

        if (read(interp_fd, &elf_header, sizeof(elf_header)) != sizeof(elf_header)) {
            anvil_syslog(0, "Bad ELF header\n");
            while (1);
        }

        pelf_header = &elf_header;

        if (strncmp((const char *)pelf_header->e_ident, "\x7f" "ELF", 4)) {
            anvil_syslog(0, "Interp: Incorrect ELF identifier\n");
            return NULL;
        }

        if (pelf_header->e_ident[4] != 2) {
            anvil_syslog(0, "Interp: Elf file is not 64 bit\n");
            return NULL;
        }

        if (pelf_header->e_type != 3) {
            anvil_syslog(0, "Interp: Elf file is not a so\n");
            return NULL;
        }

        anvil_syslog(0, "Interp OK\n");

        sizeof_phdrs = pelf_header->e_phentsize * pelf_header->e_phnum;

        pprog_hdr = (elf64_phdr *)alloca(sizeof_phdrs);
        memset(pprog_hdr, 0, sizeof_phdrs);

        if (pread(interp_fd, pprog_hdr, sizeof_phdrs, elf_header.e_phoff) == -1) {
            anvil_syslog(0, "Interp: Bad prog_hdr header\n");
            while (1);
        }

        for (ph=0; ph<pelf_header->e_phnum; ++ph) {

            switch (pprog_hdr->p_type) {

                case PT_LOAD:
                    if (pprog_hdr->p_type != 1) {
                        anvil_syslog(0, "Interp: Elf file segment is not loadable\n");
                        return NULL;
                    }

                    prot = get_prot(pprog_hdr->p_flags);

                    /* We found a loadable segment so create a mapping */
                    start = pprog_hdr->p_vaddr;
                    end = start + pprog_hdr->p_memsz;

                    start &= ~0xfff;
                    end = __PAGEROUND(end);

                    //anvil_syslog(0, "\nstart=%016lx end=%016lx\n", start, end);

                    /* Make a mapping in the address of the exec-svr */
                    if (_Mmap(0, 0, end - start, PROT_EXEC|PROT_READ|PROT_WRITE,
                                MAP_ANON, -1, 0, &seg_addr) != 0) {
                        anvil_syslog(0, "Interp: Out of memory\n");
                        return NULL;
                    }

                    /*
                     * seg_addr (from the mmap call) is the address in the
                     * exec-svr where we will load the section
                     */

                    /* Load the segment from the file */
                    //anvil_syslog(0, "pread to %016lx %016lx\n", pprog_hdr->p_filesz, pprog_hdr->p_offset);
                    if (pread(interp_fd, seg_addr + pprog_hdr->p_vaddr - start, pprog_hdr->p_filesz, pprog_hdr->p_offset) == -1) {
                        anvil_syslog(0, "Interp: Bad load of segment\n");
                        while (1);
                    }

                    /* Send it to the new process space */
                    //anvil_syslog(0, "_Vmm_send: %d %016lx %016lx %018x\n",
                    //        pid, start, seg_addr, end - start);
                    _Vmm_send(pid, (void *)start+0x100000000, seg_addr, end - start, prot);

                break;

                default:
                    anvil_syslog(0, "Unknown Load type %d\n", pprog_hdr->p_type);
                break;
            }

            pprog_hdr = (elf64_phdr *)
                    ((uint64_t)pprog_hdr + pelf_header->e_phentsize);
        }
        entry_offs += 0x100000000;
        close(interp_fd);
    }

    //anvil_syslog(0, "Start address at %016lx\n", elf_header.e_entry+entry_offs);

    return (void *)elf_header.e_entry + entry_offs;
}
Esempio n. 8
0
int mm_munmap(mm_context *ctx, void *addr, size_t len) {

    struct mm_mapping   *pmap;
    uintptr_t           start;
    uintptr_t           end;

    kdebug("mm_munmap\n");
    mm_context_dump(ctx);

    /* Start with some basic error checking */
    if (len == 0)
    {
        return EINVAL;
    }

    start = (uintptr_t)addr;
    if (start & (__PAGESIZE-1))
    {
        return EINVAL;
    }

    /* Round end up to the page size */
    end = start + __PAGEROUND(len);
    if (end <= start)
    {
        /* It's wrapped around */
        return EINVAL;
    }

    rwlock_wrlock(&ctx->lock);

    if (start < (uintptr_t)ctx->low_addr)
    {
        rwlock_unlock(&ctx->lock);
        return EINVAL;
    }

    if (end > (uintptr_t)ctx->hi_addr)
    {
        rwlock_unlock(&ctx->lock);
        return EINVAL;
    }

    /* Posix says we should remove part of any mapping that contains any
     * address in the range described by addr to addr+len.
     */
    pmap = first_mapping(ctx);

    while (pmap)
    {
        uintptr_t pmap_end = pmap->start_addr + pmap->length;

        /* There are 5 cases to handle */
        if (start <= pmap->start_addr && end > pmap->start_addr
                                                && end < pmap_end)
        {
            /* 1. munmap overlaps the beginning of the pmap range so we will
             * chop the front off the pmap
             */
            pmap->length    -= end - pmap->start_addr;
            pmap->start_addr = end;
            pmap = next_mapping(ctx, pmap);
        }
        else if (start <= pmap->start_addr && end >= pmap_end)
        {
            /* 2. munmap overlaps the entire of the pmap range so we will
             * simply remove the pmap.
             *
             * Move to the next item first
             */
            struct mm_mapping   *del_pmap = pmap;

            pmap = next_mapping(ctx, pmap);
            ctx->mapping_list.rem_item(del_pmap);
            kfree(del_pmap);
        }
        else if (start > pmap->start_addr && start < pmap_end && end >= pmap_end)
        {
            /* 3. munmap overlaps the end of the pmap range so we will chop
             * the end off the pmap
             */
            pmap->length = start - pmap->start_addr;
            pmap = next_mapping(ctx, pmap);
        }
        else if (start > pmap->start_addr && end < pmap_end)
        {
            /* 4. munmap overlaps the middle of the pmap range. In this case we're
             * going to need to split the pmap into 2
             */
            struct mm_mapping   *new_pmap;

            if ((new_pmap = (struct mm_mapping *)kmalloc(sizeof(struct mm_mapping))) == NULL)
            {
                rwlock_unlock(&ctx->lock);
                return ENOMEM;
            }

            /* Copy fd, prot etc. fields */
            memcpy(new_pmap, pmap, sizeof(struct mm_mapping));

            new_pmap->start_addr    = end;
            new_pmap->length        = pmap->start_addr + pmap->length - end;

            /* Now adjust the original pmap */
            pmap->length            = start - pmap->start_addr;

            /* Move to the next mapping */
            pmap = next_mapping(ctx, pmap);

            /* Todo: We need to split the mm_object too. Damn */

            /* Insert the new mapping before the next one */
            ctx->mapping_list.add_before(pmap, new_pmap);
        }
        else
        {
            /* 5. munmap misses entirely the pmap range so move to the next
             * mapping
             */
            pmap = next_mapping(ctx, pmap);
        }
    }

    mm_context_dump(ctx);

    rwlock_unlock(&ctx->lock);

    kdebug("mm_munmap done\n");

    return 0;
}
Esempio n. 9
0
int mm_mmap(mm_context *ctx, void *addr, size_t len,
                    int prot, int flags, int fildes, off_t off, void **paddr) {

    struct mm_mapping   *new_map;
    struct mm_mapping   *pmap;
    struct mm_object    *new_obj;
    uintptr_t           start;
    uintptr_t           end;
    uintptr_t           round_addr;
    size_t              round_len;

    /* Do some error checking */
    if (fildes != -1)
    {
        /* Currently not supported */
        return EINVAL;
    }

    if (len == 0)
    {
        return EINVAL;
    }

    /* Round down the start address */
    round_addr = (uintptr_t)addr & ~0xfff;
    round_len  = (uintptr_t)addr - round_addr + len;
    /* Now round it up */
    round_len = __PAGEROUND(round_len);

    rwlock_wrlock(&ctx->lock);

    if (!addr)
    {
        /* The user doesn't care so we put it above the anon addr. The thinking
         * here is that if we stay high there is still room for brk/sbrk
         * to work. Many mallocs rely on it.
         */
        start = (uintptr_t)ctx->anon_addr;
    }
    else
    {
        start = (uintptr_t)round_addr;
    }

    //kdebug("start = %016lx\n", start);
    //kdebug("len = %d(%x)\n", round_len, round_len);

    /* Find where the new object will fit */
    if (flags & MAP_FIXED)
    {
        /* Todo: Remove existing overlapping maps */
        /* This is an absolute mapping */
        end = start + round_len - 1;
        pmap = first_mapping(ctx);

        while (pmap && !(end < pmap->start_addr))
        {
            pmap = next_mapping(ctx, pmap);
        }
    }
    else
    {
        /* We will take the first fit above start */
        uintptr_t last_end;

        pmap = first_mapping(ctx);
        last_end = start;

        /* Just find a place where it fits that's above start */
        while (pmap && (pmap->start_addr < start))
        {
            pmap = next_mapping(ctx, pmap);
        }

        /* Now find the first fit */
        while (pmap && !((pmap->start_addr - last_end) >= round_len))
        {
            last_end = pmap->start_addr + pmap->length;
            pmap = next_mapping(ctx, pmap);
        }

        start = last_end;
    }

    /* Create an mm_object */
    if ((new_obj = object_new()) == NULL)
    {
        rwlock_unlock(&ctx->lock);
        return ENOMEM;
    }

    /* Create a new mapping */
    if ((new_map = mapping_new(start, round_len, prot, -1, new_obj)) == NULL)
    {
        rwlock_unlock(&ctx->lock);
        return ENOMEM;
    }

    /* Stick new_map before pmap */
    ctx->mapping_list.add_before(pmap, new_map);

    mm_check_uncommit(ctx, pmap);

    /* We support 3 types of mapping */
    if (flags & MAP_PHYS)
    {
        /*
         * 1. Map some physical memory. In this case all the pages will
         * mapped immediately and backing will be 'none'
         */
        new_obj->backing = none;
        mm_commit_phys(ctx, new_map, off);
    }
    else if (fildes >= 0)
    {
        /*
         * 2. Mapping a file - currently not supported
         */

    }
    else if (flags & MAP_ANON)
    {
        /*
         * 3. Map some anonymous memory. Backing will be 'anon'
         */
        new_obj->backing = anon;
    }

    *paddr = (void *)new_map->start_addr;

    cpu_tlb_flush_global();

    mm_context_dump(ctx);

    rwlock_unlock(&ctx->lock);

    pagestruct_audit();

    return 0;
}