Exemplo n.º 1
0
void *BT_kMalloc(BT_u32 ulSize) {

	void *p;

	if(!ulSize) {
		return NULL;
	}

	ulSize=(ulSize+3)&0xFFFFFFFC;

	BT_CACHE *pCache = BT_GetSuitableCache(ulSize+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG));
	if(pCache) {
		p = BT_CacheAlloc(pCache);
	} else {
		bt_paddr_t phys = bt_page_alloc(ulSize+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG));
		if(!phys) {
			return NULL;
		}
		p = (void *) bt_phys_to_virt(phys);
	}

	if(!p) {
		return NULL;
	}

	struct MEM_TAG *tag = (struct MEM_TAG *) p;
	tag->pCache = pCache;
	tag->size = ulSize;
	set_magic(&tag->tag_0);
	set_magic(&tag->tag_1);
	struct MAGIC_TAG *mempost = (struct MAGIC_TAG *) ((BT_u8 *) (tag+1) + ulSize);
	set_magic(mempost);

	/*
	 *	Before the allocated memory we place a pointer to the pCache.
	 *	This will be 0 in the case of a page allocation!
	 */

	return ((void *) (tag + 1));
}
Exemplo n.º 2
0
void *BT_kMalloc(BT_u32 ulSize) {

	void *p;

	BT_CACHE *pCache = BT_GetSuitableCache(ulSize+sizeof(BT_CACHE *));
	if(pCache) {
		p = BT_CacheAlloc(pCache);
	} else {
		p = (void *) bt_phys_to_virt(bt_page_alloc(ulSize+sizeof(BT_CACHE *)));
		if(!p) {
			return NULL;
		}
	}

	BT_CACHE **tag = (BT_CACHE **) p;
    *tag = pCache;

	/*
	 *	Before the allocated memory we place a pointer to the pCache.
	 *	This will be 0 in the case of a page allocation!
	 */

	return ((void *) (tag + 1));
}
Exemplo n.º 3
0
int bt_mmu_map(bt_pgd_t pgd_h, bt_paddr_t pa, bt_vaddr_t va, BT_u32 size, int type) {
	BT_u32 flag = 0;
	bt_pte_t pte;
	bt_paddr_t pg;
	BT_u32 ng = 0;
	bt_pgd_t pgd = GET_PGD(pgd_h);

	if((va + size) < 0xC0000000) {
		ng = MMU_PTE_NG;
	}

	pa = BT_PAGE_TRUNC(pa);		// Ensure correct alignments.
	va = BT_PAGE_TRUNC(va);
	size = BT_PAGE_ALIGN(size);

	switch(type) {				// Build up the ARM MMU flags from BT page types.
	case BT_PAGE_UNMAP:
		flag = 0;
		break;

	case BT_PAGE_READ:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_USER_RO);
		break;

	case BT_PAGE_WRITE:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_USER_RW);
		break;

	case BT_PAGE_SYSTEM:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_SYSTEM);
		break;

	case BT_PAGE_IOMEM:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_SYSTEM);
		break;

	default:
		//do_kernel_panic("bt_mmu_map");
		return -1;
		break;
	}

	bt_mmu_flush_tlb();

	while(size > 0) {
		if(pte_present(pgd, va)) {
			pte = virt_to_pte(pgd, va);		// Get the page table from PGD.
		} else {
			// If its a section or super-section then return an error! - (Kernel coherent pool?).
			pg = (bt_paddr_t) BT_CacheAlloc(&g_ptCache);
			if(!pg) {
				return -1;
			}

			memset((void *)pg, 0, MMU_L2TBL_SIZE);
			pte = (bt_pte_t) pg;
			pgd[PAGE_DIR(va)] = (BT_u32) bt_virt_to_phys(pte) | MMU_PDE_PRESENT;
		}

		pte[PAGE_TABLE(va)] = (BT_u32) pa | flag | ng;

		pa += BT_PAGE_SIZE;
		va += BT_PAGE_SIZE;
		size -= BT_PAGE_SIZE;
	}

	bt_mmu_flush_tlb();

	return 0;
}