Пример #1
0
static dma_addr_t alpha_noop_map_page(struct device *dev, struct page *page,
				      unsigned long offset, size_t size,
				      enum dma_data_direction dir,
				      struct dma_attrs *attrs)
{
	return page_to_pa(page) + offset;
}
Пример #2
0
int pgt_map_mmap_page(struct task_page_table *table,
		struct page *page, unsigned long user_addr)
{
	/*
	 * for mmap page need to map PAGE_SIZE pte on time
	 * for searching purpose for example on ARM need to
	 * map 4M not 1M
	 */
	unsigned long pte;
	struct page *pte_page;

	pte_page = pgt_get_pte_page(table->pde_base, user_addr);
	if (!pte_page) {
		pte_page = pgt_map_new_pde_entry(&table->mmap_list,
				table->pde_base, user_addr);
		if (!pte_page)
			return -ENOMEM;
	}

	pte = pgt_get_pte_entry_addr(page_to_va(pte_page), user_addr);

	/* update the free nr */
	pte_page->pinfo++;
	mmu_create_pte_entry(pte, page_to_pa(page), user_addr);
	page_set_map_address(page, user_addr);

	return 0;
}
Пример #3
0
void vm_map_page(struct vm_translation_map *map, unsigned int va, unsigned int pa)
{
    int vpindex = va / PAGE_SIZE;
    int pgdindex = vpindex / 1024;
    int pgtindex = vpindex % 1024;
    unsigned int *pgdir;
    unsigned int *pgtbl;
    struct list_node *other_map;
    unsigned int new_pgt;
    int old_flags;

    if (va >= KERNEL_BASE)
    {
        // Map into kernel space
        old_flags = acquire_spinlock_int(&kernel_space_lock);

        // The page tables for kernel space are shared by all page directories.
        // Check the first page directory to see if this is present. If not,
        // allocate a new one and stick it into all page directories.
        pgdir = (unsigned int*) PA_TO_VA(kernel_map.page_dir);
        if ((pgdir[pgdindex] & PAGE_PRESENT) == 0)
        {
            new_pgt = page_to_pa(vm_allocate_page()) | PAGE_PRESENT;
            list_for_each(&map_list, other_map, struct list_node)
            {
                pgdir = (unsigned int*) PA_TO_VA(((struct vm_translation_map*)other_map)->page_dir);
                pgdir[pgdindex] = new_pgt;
            }
        }
Пример #4
0
static int pgt_map_normal_memory(struct task_page_table *table,
		struct list_head *mem_list, unsigned long map_base)
{
	struct page *page;
	unsigned long base = map_base;
	unsigned long pte_end = 0, pte = 0;
	struct list_head *list = list_next(mem_list);

	if (!map_base)
		return -EINVAL;

	while (list != mem_list){
		if (pte == pte_end) {
			pte = pgt_get_mapped_pte_addr(table, map_base);
			if (!pte)
				return -ENOMEM;

			pte_end = min_align(pte + PTES_PER_PDE, PTES_PER_PDE);
		}

		page = list_to_page(list);
		mmu_create_pte_entry(pte, page_to_pa(page), base);
		page_set_map_address(page, base);
		base += PAGE_SIZE;

		pte += sizeof(unsigned long);
		list = list_next(list);
	}

	return 0;
}
Пример #5
0
/* temp implement TBD */
unsigned long
pgt_map_temp_page(struct task_page_table *table, struct page *page)
{
	unsigned long pte_base =
		table->pgt_temp_buffer.tbuf_pte_base;

	mmu_create_pte_entry(pte_base, page_to_pa(page),
			KERNEL_TEMP_BUFFER_BASE);

	return KERNEL_TEMP_BUFFER_BASE;
}
Пример #6
0
static int __pgt_map_page(struct task_page_table *table,
		struct page *page, unsigned long user_addr)
{
	unsigned long pte_addr = 0;

	pte_addr = pgt_get_mapped_pte_addr(table, user_addr);
	if (!pte_addr)
		return -ENOMEM;


	mmu_create_pte_entry(pte_addr, page_to_pa(page), user_addr);
	page_set_map_address(page, user_addr);

	return 0;

}
Пример #7
0
struct vm_translation_map *create_translation_map(void)
{
    struct vm_translation_map *map;
    int old_flags;

    map = slab_alloc(&translation_map_slab);
    map->page_dir = page_to_pa(vm_allocate_page());

    old_flags = acquire_spinlock_int(&kernel_space_lock);
    // Copy kernel page tables into new page directory
    memcpy((unsigned int*) PA_TO_VA(map->page_dir) + 768,
           (unsigned int*) PA_TO_VA(kernel_map.page_dir) + 768,
           256 * sizeof(unsigned int));

    map->asid = next_asid++;
    map->lock = 0;

    list_add_tail(&map_list, (struct list_node*) map);
    release_spinlock_int(&kernel_space_lock, old_flags);

    return map;
}
Пример #8
0
static struct page *
pgt_map_new_pde_entry(struct pte_cache_list *clist,
		unsigned long pde_base,
		unsigned long user_address)
{
	int i;
	struct page *page;
	unsigned long pte_free_base;
	unsigned long pde;
	unsigned long user_base;

	user_base = min_align(user_address, MAP_SIZE_PER_PAGE);
	pde = pgt_get_pde_entry_addr(pde_base, user_base);

	/* alloc new memory for pte pgt */
	page = alloc_new_pte_page(clist);
	if (!page)
		return NULL;

	pte_free_base = page_to_pa(page);

	/*
	 * PDE size is 4M so 4k size can map 1 pde entry
	 * but arm PDE size is 1M, so 4k size can map 4
	 * pde entry
	 */
	for (i = 0; i < PTE_TABLE_PER_PAGE; i++) {
		mmu_create_pde_entry(pde, pte_free_base, user_base);
		pde += PDE_ENTRY_SIZE;
		pte_free_base += PTE_TABLE_SIZE;
		user_base += PDE_MAP_SIZE;
	}

	page_set_pdata(page, user_base);
	page->pinfo = 0;

	return page;
}
Пример #9
0
//
// This is always called with the address space lock held, so the area is
// guaranteed not to change. Returns 1 if it sucessfully satisfied the fault, 0
// if it failed for some reason.
//
static int soft_fault(struct vm_address_space *space, const struct vm_area *area,
                      unsigned int address, int is_store)
{
    int got;
    unsigned int page_flags;
    struct vm_page *source_page;
    struct vm_page *dummy_page = 0;
    unsigned int cache_offset;
    struct vm_cache *cache;
    int old_flags;
    int is_cow_page = 0;
    int size_to_read;

    VM_DEBUG("soft fault va %08x %s\n", address, is_store ? "store" : "load");

    // XXX check area protections and fail if this shouldn't be allowed
    if (is_store && (area->flags & AREA_WRITABLE) == 0)
    {
        kprintf("store to read only area %s @%08x\n", area->name, address);
        return 0;
    }

    cache_offset = PAGE_ALIGN(address - area->low_address + area->cache_offset);
    old_flags = disable_interrupts();
    lock_vm_cache();
    assert(area->cache);

    for (cache = area->cache; cache; cache = cache->source)
    {
        VM_DEBUG("searching in cache %p\n", cache);
        source_page = lookup_cache_page(cache, cache_offset);
        if (source_page)
            break;

        if (cache->file && address - area->low_address < area->cache_length)
        {
            VM_DEBUG("reading page from file\n");

            // Read the page from this cache.
            source_page = vm_allocate_page();

            // Insert the page first so, if a collided fault occurs, it will not
            // load a different page (the vm cache lock protects the busy bit)
            source_page->busy = 1;
            insert_cache_page(cache, cache_offset, source_page);
            unlock_vm_cache();
            restore_interrupts(old_flags);

            if (area->cache_length - cache_offset < PAGE_SIZE)
                size_to_read = area->cache_length - cache_offset;
            else
                size_to_read = PAGE_SIZE;

            got = read_file(cache->file, cache_offset,
                            (void*) PA_TO_VA(page_to_pa(source_page)), size_to_read);
            if (got < 0)
            {
                kprintf("failed to read from file\n");
                dec_page_ref(source_page);
                if (dummy_page != 0)
                {
                    disable_interrupts();
                    lock_vm_cache();
                    remove_cache_page(dummy_page);
                    unlock_vm_cache();
                    restore_interrupts(old_flags);
                    dec_page_ref(dummy_page);
                }

                return 0;
            }

            // For BSS, clear out data past the end of the file
            if (size_to_read < PAGE_SIZE)
            {
                memset((char*) PA_TO_VA(page_to_pa(source_page)) + size_to_read, 0,
                       PAGE_SIZE - size_to_read);
            }

            disable_interrupts();
            lock_vm_cache();
            source_page->busy = 0;
            break;
        }

        // Otherwise scan the next cache
        is_cow_page = 1;

        if (cache == area->cache)
        {
            // Insert a dummy page in the top level cache to catch collided faults.
            dummy_page = vm_allocate_page();
            dummy_page->busy = 1;
            insert_cache_page(cache, cache_offset, dummy_page);
        }
    }

    if (source_page == 0)
    {
        assert(dummy_page != 0);

        VM_DEBUG("source page was not found, use empty page\n");

        // No page found, just use the dummy page
        dummy_page->busy = 0;
        source_page = dummy_page;
    }
    else if (is_cow_page)
    {
        // is_cow_page means source_page belongs to another cache.
        assert(dummy_page != 0);
        if (is_store)
        {
            // The dummy page have the contents of the source page copied into it,
            // and will be inserted into the top cache (it's not really a dummy page
            // any more).
            memcpy((void*) PA_TO_VA(page_to_pa(dummy_page)),
                (void*) PA_TO_VA(page_to_pa(source_page)),
                PAGE_SIZE);
            VM_DEBUG("write copy page va %08x dest pa %08x source pa %08x\n",
                address, page_to_pa(dummy_page), page_to_pa(source_page));
            source_page = dummy_page;
            dummy_page->busy = 0;
        }
        else
        {
            // We will map in the read-only page from the source cache.
            // Remove the dummy page from this cache (we do not insert
            // the page into this cache, because we don't own it page).
            remove_cache_page(dummy_page);
            dec_page_ref(dummy_page);

            VM_DEBUG("mapping read-only source page va %08x pa %08x\n", address,
                page_to_pa(source_page));
        }
    }

    assert(source_page != 0);

    // Grab a ref because we are going to map this page
    inc_page_ref(source_page);

    unlock_vm_cache();
    restore_interrupts(old_flags);

    // XXX busy wait for page to finish loading
    while (source_page->busy)
        reschedule();

    if (is_store)
        source_page->dirty = 1; // XXX Locking?

    // It's possible two threads will fault on the same VA and end up mapping
    // the page twice. This is fine, because the code above ensures it will
    // be the same page.
    page_flags = PAGE_PRESENT;

    // If the page is clean, we will mark it not writable. This will fault
    // on the next write, allowing us to update the dirty flag.
    if ((area->flags & AREA_WRITABLE) != 0 && (source_page->dirty || is_store))
        page_flags |= PAGE_WRITABLE;

    if (area->flags & AREA_EXECUTABLE)
        page_flags |= PAGE_EXECUTABLE;

    if (space == &kernel_address_space)
        page_flags |= PAGE_SUPERVISOR | PAGE_GLOBAL;

    vm_map_page(space->translation_map, address, page_to_pa(source_page)
        | page_flags);

    return 1;
}
Пример #10
0
int init_task_page_table(struct task_page_table *table)
{
	unsigned long base = 0;
	struct page *page;
	struct pgt_temp_buffer *tb = &table->pgt_temp_buffer;

	if (!table)
		return -EINVAL;

	/*
	 * if the page table has been alloced
	 * we reinit the pde and pte page table
	 */
	if (!table->pde_base) {
		memset((char *)table, 0,
			sizeof(struct task_page_table));

		page = alloc_new_pde();
		if (!page) {
			kernel_error("No memory for task PDE\n");
			return -ENOMEM;
		}

		table->pde_base = page_to_va(page);
		table->pde_base_pa = page_to_pa(page);

		/*
		 * init temp buffer
		 */
		tb->tbuf_pte_page = request_pages(1, GFP_PGT);
		if (!tb->tbuf_pte_page) {
			release_pde(base);
			return -ENOMEM;
		}

		tb->tbuf_pte_base =
			page_to_va(tb->tbuf_pte_page);
		tb->tbuf_page_nr = PTES_PER_PDE;
	}

	/* 
	 * if do memset op here, it will cause much time
	 * to be fix
	 */
	mmu_copy_kernel_pde(table->pde_base);
	init_pte(table);

	/*
	 * init temp_buffer member
	 */

	base = pgt_get_pde_entry_addr(table->pde_base,
			KERNEL_TEMP_BUFFER_BASE);

	mmu_create_pde_entry(base,
			page_to_pa(tb->tbuf_pte_page),
			KERNEL_TEMP_BUFFER_BASE);

	table->mmap_current_base = PROCESS_USER_MMAP_BASE;

	return 0;
}