Example #1
0
void exn_page_fault(void)
{
	void *addr;
	struct vma *vma;
	unsigned long error;

	error = get_error_code(current->esp);
	MOV("cr2", addr);

	// address not mapped in process address space
	if (!(vma = vma_get(&current->mm, addr))) {
		segfault(SEGV_MAPERR, error, addr);
		return;
	}
	// page not present (demand paging)
	if (!(error & PGF_PERM)) {
		if (vm_map_page(vma, addr) < 0)
			// FIXME: stall until memory available?
			segfault(SEGV_MAPERR, error, addr);
		return;
	}
	// write permission
	if (error & PGF_WRITE) {
		if (vm_write_perm(vma, addr) < 0)
			segfault(SEGV_ACCERR, error, addr);
		return;
	}
	// read permission
	if (vm_read_perm(vma, addr) < 0)
		segfault(SEGV_ACCERR, error, addr);
}
// This area is wired by default and does not take page faults.
// The pages for this area should already have been allocated: this will not
// mark them as such. The area created by this will not be backed by a
// vm_cache or vm_backing_store.
struct vm_area *map_contiguous_memory(struct vm_address_space *space, unsigned int address,
                                      unsigned int size, enum placement place,
                                      const char *name, unsigned int area_flags,
                                      unsigned int phys_addr)
{
    struct vm_area *area;
    unsigned int page_flags;
    unsigned int offset;

    area_flags |= AREA_WIRED;

    rwlock_lock_write(&space->mut);
    area = create_vm_area(&space->area_map, address, size, place, name, area_flags);
    if (area == 0)
    {
        kprintf("create area failed\n");
        goto error1;
    }

    area->cache = 0;

    page_flags = PAGE_PRESENT;

    // We do not do dirty page tracking on these areas, as this is expected to
    // be device memory. Mark pages writable by default if the area is writable.
    if ((area_flags & AREA_WRITABLE) != 0)
        page_flags |= PAGE_WRITABLE;

    if (area->flags & AREA_EXECUTABLE)
        page_flags |= PAGE_EXECUTABLE;

    if (space == &kernel_address_space)
        page_flags |= PAGE_SUPERVISOR | PAGE_GLOBAL;

    // Map the pages
    for (offset = 0; offset < size; offset += PAGE_SIZE)
    {
        vm_map_page(space->translation_map, area->low_address + offset,
                    (phys_addr + offset) | page_flags);
    }

error1:
    rwlock_unlock_write(&space->mut);

    return area;
}
//
// This is always called with the address space lock held, so the area is
// guaranteed not to change. Returns 1 if it sucessfully satisfied the fault, 0
// if it failed for some reason.
//
static int soft_fault(struct vm_address_space *space, const struct vm_area *area,
                      unsigned int address, int is_store)
{
    int got;
    unsigned int page_flags;
    struct vm_page *source_page;
    struct vm_page *dummy_page = 0;
    unsigned int cache_offset;
    struct vm_cache *cache;
    int old_flags;
    int is_cow_page = 0;
    int size_to_read;

    VM_DEBUG("soft fault va %08x %s\n", address, is_store ? "store" : "load");

    // XXX check area protections and fail if this shouldn't be allowed
    if (is_store && (area->flags & AREA_WRITABLE) == 0)
    {
        kprintf("store to read only area %s @%08x\n", area->name, address);
        return 0;
    }

    cache_offset = PAGE_ALIGN(address - area->low_address + area->cache_offset);
    old_flags = disable_interrupts();
    lock_vm_cache();
    assert(area->cache);

    for (cache = area->cache; cache; cache = cache->source)
    {
        VM_DEBUG("searching in cache %p\n", cache);
        source_page = lookup_cache_page(cache, cache_offset);
        if (source_page)
            break;

        if (cache->file && address - area->low_address < area->cache_length)
        {
            VM_DEBUG("reading page from file\n");

            // Read the page from this cache.
            source_page = vm_allocate_page();

            // Insert the page first so, if a collided fault occurs, it will not
            // load a different page (the vm cache lock protects the busy bit)
            source_page->busy = 1;
            insert_cache_page(cache, cache_offset, source_page);
            unlock_vm_cache();
            restore_interrupts(old_flags);

            if (area->cache_length - cache_offset < PAGE_SIZE)
                size_to_read = area->cache_length - cache_offset;
            else
                size_to_read = PAGE_SIZE;

            got = read_file(cache->file, cache_offset,
                            (void*) PA_TO_VA(page_to_pa(source_page)), size_to_read);
            if (got < 0)
            {
                kprintf("failed to read from file\n");
                dec_page_ref(source_page);
                if (dummy_page != 0)
                {
                    disable_interrupts();
                    lock_vm_cache();
                    remove_cache_page(dummy_page);
                    unlock_vm_cache();
                    restore_interrupts(old_flags);
                    dec_page_ref(dummy_page);
                }

                return 0;
            }

            // For BSS, clear out data past the end of the file
            if (size_to_read < PAGE_SIZE)
            {
                memset((char*) PA_TO_VA(page_to_pa(source_page)) + size_to_read, 0,
                       PAGE_SIZE - size_to_read);
            }

            disable_interrupts();
            lock_vm_cache();
            source_page->busy = 0;
            break;
        }

        // Otherwise scan the next cache
        is_cow_page = 1;

        if (cache == area->cache)
        {
            // Insert a dummy page in the top level cache to catch collided faults.
            dummy_page = vm_allocate_page();
            dummy_page->busy = 1;
            insert_cache_page(cache, cache_offset, dummy_page);
        }
    }

    if (source_page == 0)
    {
        assert(dummy_page != 0);

        VM_DEBUG("source page was not found, use empty page\n");

        // No page found, just use the dummy page
        dummy_page->busy = 0;
        source_page = dummy_page;
    }
    else if (is_cow_page)
    {
        // is_cow_page means source_page belongs to another cache.
        assert(dummy_page != 0);
        if (is_store)
        {
            // The dummy page have the contents of the source page copied into it,
            // and will be inserted into the top cache (it's not really a dummy page
            // any more).
            memcpy((void*) PA_TO_VA(page_to_pa(dummy_page)),
                (void*) PA_TO_VA(page_to_pa(source_page)),
                PAGE_SIZE);
            VM_DEBUG("write copy page va %08x dest pa %08x source pa %08x\n",
                address, page_to_pa(dummy_page), page_to_pa(source_page));
            source_page = dummy_page;
            dummy_page->busy = 0;
        }
        else
        {
            // We will map in the read-only page from the source cache.
            // Remove the dummy page from this cache (we do not insert
            // the page into this cache, because we don't own it page).
            remove_cache_page(dummy_page);
            dec_page_ref(dummy_page);

            VM_DEBUG("mapping read-only source page va %08x pa %08x\n", address,
                page_to_pa(source_page));
        }
    }

    assert(source_page != 0);

    // Grab a ref because we are going to map this page
    inc_page_ref(source_page);

    unlock_vm_cache();
    restore_interrupts(old_flags);

    // XXX busy wait for page to finish loading
    while (source_page->busy)
        reschedule();

    if (is_store)
        source_page->dirty = 1; // XXX Locking?

    // It's possible two threads will fault on the same VA and end up mapping
    // the page twice. This is fine, because the code above ensures it will
    // be the same page.
    page_flags = PAGE_PRESENT;

    // If the page is clean, we will mark it not writable. This will fault
    // on the next write, allowing us to update the dirty flag.
    if ((area->flags & AREA_WRITABLE) != 0 && (source_page->dirty || is_store))
        page_flags |= PAGE_WRITABLE;

    if (area->flags & AREA_EXECUTABLE)
        page_flags |= PAGE_EXECUTABLE;

    if (space == &kernel_address_space)
        page_flags |= PAGE_SUPERVISOR | PAGE_GLOBAL;

    vm_map_page(space->translation_map, address, page_to_pa(source_page)
        | page_flags);

    return 1;
}