Exemple #1
0
/** Initializes page tables.
 *
 * 1:1 virtual-physical mapping is created in kernel address space. Mapping
 * for table with exception vectors is also created.
 */
void page_arch_init(void)
{
	int flags = PAGE_CACHEABLE;
	page_mapping_operations = &pt_mapping_operations;

	page_table_lock(AS_KERNEL, true);
	
	uintptr_t cur;

	/* Kernel identity mapping */
	for (cur = PHYSMEM_START_ADDR;
	    cur < min(config.identity_size, config.physmem_end);
	    cur += FRAME_SIZE)
		page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
	
#ifdef HIGH_EXCEPTION_VECTORS
	/* Create mapping for exception table at high offset */
	uintptr_t ev_frame = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_NONE);
	page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, ev_frame, flags);
#else
#error "Only high exception vector supported now"
#endif

	page_table_unlock(AS_KERNEL, true);
	
	as_switch(NULL, AS_KERNEL);
	
	boot_page_table_free();
}
Exemple #2
0
void page_arch_init(void)
{
	if (config.cpu_active > 1) {
		write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
		return;
	}

	uintptr_t cur;
	unsigned int identity_flags =
	    PAGE_GLOBAL | PAGE_CACHEABLE | PAGE_EXEC | PAGE_WRITE | PAGE_READ;
		
	page_mapping_operations = &pt_mapping_operations;
		
	page_table_lock(AS_KERNEL, true);
		
	/*
	 * PA2KA(identity) mapping for all low-memory frames.
	 */
	for (cur = 0; cur < min(config.identity_size, config.physmem_end);
	    cur += FRAME_SIZE)
		page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags);
		
	page_table_unlock(AS_KERNEL, true);
		
	exc_register(14, "page_fault", true, (iroutine_t) page_fault);
	write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
Exemple #3
0
/** Unmap kernel non-identity page.
 *
 * @param[in] page	Non-identity page to be unmapped.
 */
static void km_unmap_deferred(uintptr_t page)
{
	page_table_lock(AS_KERNEL, true);

	if (deferred_pages == DEFERRED_PAGES_MAX) {
		(void) km_flush_deferred();
		deferred_pages = 0;
	}

	deferred_page[deferred_pages++] = page;

	page_table_unlock(AS_KERNEL, true);
}
Exemple #4
0
static void km_unmap_aligned(uintptr_t vaddr, size_t size)
{
	uintptr_t offs;
	ipl_t ipl;

	ASSERT(ALIGN_DOWN(vaddr, PAGE_SIZE) == vaddr);
	ASSERT(ALIGN_UP(size, PAGE_SIZE) == size);

	page_table_lock(AS_KERNEL, true);

	ipl = tlb_shootdown_start(TLB_INVL_ASID, ASID_KERNEL, 0, 0);

	for (offs = 0; offs < size; offs += PAGE_SIZE)
		page_mapping_remove(AS_KERNEL, vaddr + offs);

	tlb_invalidate_asid(ASID_KERNEL);

	as_invalidate_translation_cache(AS_KERNEL, 0, -1);
	tlb_shootdown_finalize(ipl);
	page_table_unlock(AS_KERNEL, true);

	km_page_free(vaddr, size);
}
Exemple #5
0
static uintptr_t
km_map_aligned(uintptr_t paddr, size_t size, unsigned int flags)
{
	uintptr_t vaddr;
	size_t align;
	uintptr_t offs;

	ASSERT(ALIGN_DOWN(paddr, FRAME_SIZE) == paddr);
	ASSERT(ALIGN_UP(size, FRAME_SIZE) == size);

	/* Enforce natural or at least PAGE_SIZE alignment. */
	align = ispwr2(size) ? size : (1U << (fnzb(size) + 1));
	vaddr = km_page_alloc(size, max(PAGE_SIZE, align));

	page_table_lock(AS_KERNEL, true);
	for (offs = 0; offs < size; offs += PAGE_SIZE) {
		page_mapping_insert(AS_KERNEL, vaddr + offs, paddr + offs,
		    flags);
	}
	page_table_unlock(AS_KERNEL, true);
	
	return vaddr;
}
Exemple #6
0
void page_arch_init(void)
{
	int flags = PAGE_CACHEABLE | PAGE_EXEC;
	page_mapping_operations = &pt_mapping_operations;
	
	page_table_lock(AS_KERNEL, true);
	
	/* Kernel identity mapping */
	// FIXME:
	// We need to consider the possibility that
	// identity_base > identity_size and physmem_end.
	// This might lead to overflow if identity_size is too big.
	for (uintptr_t cur = PHYSMEM_START_ADDR;
	    cur < min(KA2PA(config.identity_base) +
	    config.identity_size, config.physmem_end);
	    cur += FRAME_SIZE)
		page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
	
	page_table_unlock(AS_KERNEL, true);
	as_switch(NULL, AS_KERNEL);
	
	/* Switch MMU to new context table */
	asi_u32_write(ASI_MMUREGS, MMU_CONTEXT_TABLE, KA2PA(as_context_table) >> 4);
}