void BT_kFree(void *p) { BT_CACHE **tag = (BT_CACHE **) p; tag -= 1; BT_CACHE *pCache = *tag; if(pCache) { BT_CacheFree(pCache, tag); } else { bt_page_free((BT_PHYS_ADDR) bt_virt_to_phys(tag)); } }
static bt_paddr_t create_pgd(void) { bt_paddr_t pg, pgd; pg = bt_page_alloc(MMU_L1TBL_SIZE * 2); if(!pg) { return 0; } pgd = PGD_ALIGN(pg); // Here we should free the uneeded (unaligned part), but this requires // some small changes to the page allocator api. BT_u32 gap = (BT_u32) (pgd - pg); if(gap) { bt_page_free(pg, gap); } bt_page_free((pgd + MMU_L1TBL_SIZE), (MMU_L1TBL_SIZE - gap)); return pgd | (g_asid++); }
void bt_mmu_terminate(bt_pgd_t pgd_h) { int i; bt_pgd_t pgd = GET_PGD(pgd_h); bt_pte_t pte; bt_mmu_flush_tlb(); // Release all user page tables. for(i = 0; i < PAGE_DIR(0xC0000000); i++) { pte = (bt_pte_t) pgd[i]; if(pte) { BT_CacheFree(&g_ptCache, (void *) ((BT_u32) pte & MMU_PTE_ADDRESS)); } } bt_page_free(bt_virt_to_phys(pgd), MMU_L1TBL_SIZE); }
void BT_kFree(void *p) { if(!p) return; struct MEM_TAG *tag = (struct MEM_TAG *) p; tag -= 1; struct MAGIC_TAG *postmem = (struct MAGIC_TAG *) ((BT_u8 *) (tag+1) + tag->size); if(!verify_tag(&tag->tag_0) || !verify_tag(&tag->tag_1) || !verify_tag(postmem)) { BT_kPrint("Kernel Panic - Corrupted FREE"); while(1) { ; } } BT_CACHE *pCache = tag->pCache; if(pCache) { BT_CacheFree(pCache, tag); } else { bt_page_free((BT_PHYS_ADDR) bt_virt_to_phys(tag), tag->size+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG)); } }