void BT_kFree(void *p) { BT_CACHE **tag = (BT_CACHE **) p; tag -= 1; BT_CACHE *pCache = *tag; if(pCache) { BT_CacheFree(pCache, tag); } else { bt_page_free((BT_PHYS_ADDR) bt_virt_to_phys(tag)); } }
void bt_mmu_terminate(bt_pgd_t pgd_h) { int i; bt_pgd_t pgd = GET_PGD(pgd_h); bt_pte_t pte; bt_mmu_flush_tlb(); // Release all user page tables. for(i = 0; i < PAGE_DIR(0xC0000000); i++) { pte = (bt_pte_t) pgd[i]; if(pte) { BT_CacheFree(&g_ptCache, (void *) ((BT_u32) pte & MMU_PTE_ADDRESS)); } } bt_page_free(bt_virt_to_phys(pgd), MMU_L1TBL_SIZE); }
void BT_kFree(void *p) { if(!p) return; struct MEM_TAG *tag = (struct MEM_TAG *) p; tag -= 1; struct MAGIC_TAG *postmem = (struct MAGIC_TAG *) ((BT_u8 *) (tag+1) + tag->size); if(!verify_tag(&tag->tag_0) || !verify_tag(&tag->tag_1) || !verify_tag(postmem)) { BT_kPrint("Kernel Panic - Corrupted FREE"); while(1) { ; } } BT_CACHE *pCache = tag->pCache; if(pCache) { BT_CacheFree(pCache, tag); } else { bt_page_free((BT_PHYS_ADDR) bt_virt_to_phys(tag), tag->size+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG)); } }