Пример #1
0
void bt_mmu_switch(bt_pgd_t pgd_h) {
	bt_pgd_t pgd = GET_PGD(pgd_h);
	BT_u32 asid = GET_ASID(pgd_h);
	bt_paddr_t phys = (bt_paddr_t) bt_virt_to_phys(pgd);

	if(phys != current_user_ttb()) {
		bt_mmu_switch_ttb(phys, asid);
	}
}
Пример #2
0
void bt_mmu_init(struct bt_mmumap *mmumap) {
	BT_CacheInit(&g_ptCache, MMU_L2TBL_SIZE);	// Create a cache of 1K, 1K aligned page tables.
	// Set-up proper kernel page-tables, so that the super-sections will always be valid and can be copied directly to
	// The process PGD's on creation.

	bt_pgd_t 	pgd 	= (bt_pgd_t) g_MMUTable;
	BT_u32 		index;

	for(index = (0xC0000000 / 0x00100000); index < 0x1000; index++) {
		bt_pte_t pte;
		pte = (bt_pte_t) &kernel_pages[index-0xC00];
		memset(pte, 0, MMU_L2TBL_SIZE);

		// Setup all the pages with an identity mapping for this region.
		bt_paddr_t pa = (index * 0x00100000);
		BT_u32 i;
		for(i = 0; i < 0x00100000 / 4096; i++) {
			BT_u32 section = pgd[index];
			BT_u32 flag = 0;
			if(section) {
				// It must be a mapping to memory, therefore do normal kernel mode caching.
				//	IOMAPPINGs will be added later during driver probes.
				flag = MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_SYSTEM;
			}
			pte[i] = (BT_u32) bt_virt_to_phys(pa+(4096 * i)) | flag;
		}

		// Page Table is now valid. We can make the pgd point to it for these regions.
		// If region is within the coherent pool, then configure as an uncached section
#ifdef BT_CONFIG_MEM_PAGE_COHERENT_POOL
		bt_kernel_params *params = bt_get_kernel_params();
		bt_paddr_t phys 			= bt_virt_to_phys(index * 0x00100000);
		if(params->coherent && (phys >= params->coherent && phys < params->coherent + BT_SECTION_ALIGN(BT_CONFIG_MEM_PAGE_COHERENT_LENGTH))) {
			pgd[index] = phys | MMU_SECTION | MMU_SECTION_SYSTEM;
		} else {
			pgd[index] = (bt_paddr_t) bt_virt_to_phys(pte) | MMU_PDE_PRESENT;
		}
#else
		pgd[index] = (bt_paddr_t) bt_virt_to_phys(pte) | MMU_PDE_PRESENT;
#endif
	}

	bt_mmu_flush_tlb();
}
Пример #3
0
void BT_kFree(void *p) {
	BT_CACHE **tag = (BT_CACHE **) p;
	tag -= 1;

	BT_CACHE *pCache = *tag;
	if(pCache) {
		BT_CacheFree(pCache, tag);
	} else {
		bt_page_free((BT_PHYS_ADDR) bt_virt_to_phys(tag));
	}
}
Пример #4
0
void bt_mmu_terminate(bt_pgd_t pgd_h) {

	int i;
	bt_pgd_t pgd = GET_PGD(pgd_h);
	bt_pte_t pte;

	bt_mmu_flush_tlb();

	// Release all user page tables.
	for(i = 0; i < PAGE_DIR(0xC0000000); i++) {
		pte = (bt_pte_t) pgd[i];
		if(pte) {
			BT_CacheFree(&g_ptCache, (void *) ((BT_u32) pte & MMU_PTE_ADDRESS));
		}
	}

	bt_page_free(bt_virt_to_phys(pgd), MMU_L1TBL_SIZE);
}
Пример #5
0
void BT_kFree(void *p) {

	if(!p) return;

	struct MEM_TAG *tag = (struct MEM_TAG *) p;
	tag -= 1;

	struct MAGIC_TAG *postmem = (struct MAGIC_TAG *) ((BT_u8 *) (tag+1) + tag->size);

	if(!verify_tag(&tag->tag_0) || !verify_tag(&tag->tag_1) || !verify_tag(postmem)) {
		BT_kPrint("Kernel Panic - Corrupted FREE");
		while(1) {
			;
		}
	}

	BT_CACHE *pCache = tag->pCache;
	if(pCache) {
		BT_CacheFree(pCache, tag);
	} else {
		bt_page_free((BT_PHYS_ADDR) bt_virt_to_phys(tag), tag->size+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG));
	}
}
Пример #6
0
int bt_mmu_map(bt_pgd_t pgd_h, bt_paddr_t pa, bt_vaddr_t va, BT_u32 size, int type) {
	BT_u32 flag = 0;
	bt_pte_t pte;
	bt_paddr_t pg;
	BT_u32 ng = 0;
	bt_pgd_t pgd = GET_PGD(pgd_h);

	if((va + size) < 0xC0000000) {
		ng = MMU_PTE_NG;
	}

	pa = BT_PAGE_TRUNC(pa);		// Ensure correct alignments.
	va = BT_PAGE_TRUNC(va);
	size = BT_PAGE_ALIGN(size);

	switch(type) {				// Build up the ARM MMU flags from BT page types.
	case BT_PAGE_UNMAP:
		flag = 0;
		break;

	case BT_PAGE_READ:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_USER_RO);
		break;

	case BT_PAGE_WRITE:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_USER_RW);
		break;

	case BT_PAGE_SYSTEM:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_SYSTEM);
		break;

	case BT_PAGE_IOMEM:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_SYSTEM);
		break;

	default:
		//do_kernel_panic("bt_mmu_map");
		return -1;
		break;
	}

	bt_mmu_flush_tlb();

	while(size > 0) {
		if(pte_present(pgd, va)) {
			pte = virt_to_pte(pgd, va);		// Get the page table from PGD.
		} else {
			// If its a section or super-section then return an error! - (Kernel coherent pool?).
			pg = (bt_paddr_t) BT_CacheAlloc(&g_ptCache);
			if(!pg) {
				return -1;
			}

			memset((void *)pg, 0, MMU_L2TBL_SIZE);
			pte = (bt_pte_t) pg;
			pgd[PAGE_DIR(va)] = (BT_u32) bt_virt_to_phys(pte) | MMU_PDE_PRESENT;
		}

		pte[PAGE_TABLE(va)] = (BT_u32) pa | flag | ng;

		pa += BT_PAGE_SIZE;
		va += BT_PAGE_SIZE;
		size -= BT_PAGE_SIZE;
	}

	bt_mmu_flush_tlb();

	return 0;
}