Ejemplo n.º 1
0
void destroy_area(struct vm_address_space *space, struct vm_area *area)
{
    struct vm_cache *cache;
    unsigned int va;
    unsigned int ptentry;

    rwlock_lock_write(&space->mut);
    cache = area->cache;

    // Unmap all pages in this area
    for (va = area->low_address; va < area->high_address; va += PAGE_SIZE)
    {
        ptentry = query_translation_map(space->translation_map, va);
        if ((ptentry & PAGE_PRESENT) != 0)
        {
            VM_DEBUG("destroy_area: decrementing page ref for va %08x pa %08x\n",
                    va, PAGE_ALIGN(ptentry));
            dec_page_ref(pa_to_page(ptentry));
        }
    }

    destroy_vm_area(area);
    rwlock_unlock_write(&space->mut);
    if (cache)
        dec_cache_ref(cache);
}
Ejemplo n.º 2
0
/**
 * @brief	This function is not called from anywhere and may be removed.
 */
void neue_wlock(neue_t *neue) {

	if (neue) {
		rwlock_lock_write(&(neue->lock));
		//log_pedantic("%20.li granted write lock", thread_get_thread_id());
	}

	return;
}
Ejemplo n.º 3
0
// This area is wired by default and does not take page faults.
// The pages for this area should already have been allocated: this will not
// mark them as such. The area created by this will not be backed by a
// vm_cache or vm_backing_store.
struct vm_area *map_contiguous_memory(struct vm_address_space *space, unsigned int address,
                                      unsigned int size, enum placement place,
                                      const char *name, unsigned int area_flags,
                                      unsigned int phys_addr)
{
    struct vm_area *area;
    unsigned int page_flags;
    unsigned int offset;

    area_flags |= AREA_WIRED;

    rwlock_lock_write(&space->mut);
    area = create_vm_area(&space->area_map, address, size, place, name, area_flags);
    if (area == 0)
    {
        kprintf("create area failed\n");
        goto error1;
    }

    area->cache = 0;

    page_flags = PAGE_PRESENT;

    // We do not do dirty page tracking on these areas, as this is expected to
    // be device memory. Mark pages writable by default if the area is writable.
    if ((area_flags & AREA_WRITABLE) != 0)
        page_flags |= PAGE_WRITABLE;

    if (area->flags & AREA_EXECUTABLE)
        page_flags |= PAGE_EXECUTABLE;

    if (space == &kernel_address_space)
        page_flags |= PAGE_SUPERVISOR | PAGE_GLOBAL;

    // Map the pages
    for (offset = 0; offset < size; offset += PAGE_SIZE)
    {
        vm_map_page(space->translation_map, area->low_address + offset,
                    (phys_addr + offset) | page_flags);
    }

error1:
    rwlock_unlock_write(&space->mut);

    return area;
}
Ejemplo n.º 4
0
struct vm_area *create_area(struct vm_address_space *space, unsigned int address,
                            unsigned int size, enum placement place,
                            const char *name, unsigned int flags,
                            struct vm_cache *cache, unsigned int cache_offset)
{
    struct vm_area *area;
    unsigned int fault_addr;

    // Anonymous area, create a cache if non is specified.
    if (cache == 0)
        cache = create_vm_cache(0);

    rwlock_lock_write(&space->mut);
    area = create_vm_area(&space->area_map, address, size, place, name, flags);
    if (area == 0)
    {
        kprintf("create area failed\n");
        goto error1;
    }

    area->cache = cache;
    area->cache_offset = cache_offset;
    area->cache_length = size;
    if (flags & AREA_WIRED)
    {
        for (fault_addr = area->low_address; fault_addr < area->high_address;
                fault_addr += PAGE_SIZE)
        {
            if (!soft_fault(space, area, fault_addr, 1))
                panic("create_area: soft fault failed");
        }
    }

error1:
    rwlock_unlock_write(&space->mut);

    return area;
}