static BT_ERROR extend_cache(BT_CACHE *pCache) { BT_u32 ulSize = BT_PAGE_ALIGN(pCache->ulObjectSize); void *p = (void *) bt_page_alloc(ulSize); if(!p) { return BT_ERR_NO_MEMORY; } struct block *pBlock = (struct block *) bt_phys_to_virt(p); init_attach_block(pCache, pBlock, ulSize); return BT_ERR_NONE; }
static bt_paddr_t create_pgd(void) { bt_paddr_t pg, pgd; pg = bt_page_alloc(MMU_L1TBL_SIZE * 2); if(!pg) { return 0; } pgd = PGD_ALIGN(pg); // Here we should free the uneeded (unaligned part), but this requires // some small changes to the page allocator api. BT_u32 gap = (BT_u32) (pgd - pg); if(gap) { bt_page_free(pg, gap); } bt_page_free((pgd + MMU_L1TBL_SIZE), (MMU_L1TBL_SIZE - gap)); return pgd | (g_asid++); }
void *BT_kMalloc(BT_u32 ulSize) { void *p; if(!ulSize) { return NULL; } ulSize=(ulSize+3)&0xFFFFFFFC; BT_CACHE *pCache = BT_GetSuitableCache(ulSize+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG)); if(pCache) { p = BT_CacheAlloc(pCache); } else { bt_paddr_t phys = bt_page_alloc(ulSize+sizeof(struct MEM_TAG)+sizeof(struct MAGIC_TAG)); if(!phys) { return NULL; } p = (void *) bt_phys_to_virt(phys); } if(!p) { return NULL; } struct MEM_TAG *tag = (struct MEM_TAG *) p; tag->pCache = pCache; tag->size = ulSize; set_magic(&tag->tag_0); set_magic(&tag->tag_1); struct MAGIC_TAG *mempost = (struct MAGIC_TAG *) ((BT_u8 *) (tag+1) + ulSize); set_magic(mempost); /* * Before the allocated memory we place a pointer to the pCache. * This will be 0 in the case of a page allocation! */ return ((void *) (tag + 1)); }
void *BT_kMalloc(BT_u32 ulSize) { void *p; BT_CACHE *pCache = BT_GetSuitableCache(ulSize+sizeof(BT_CACHE *)); if(pCache) { p = BT_CacheAlloc(pCache); } else { p = (void *) bt_phys_to_virt(bt_page_alloc(ulSize+sizeof(BT_CACHE *))); if(!p) { return NULL; } } BT_CACHE **tag = (BT_CACHE **) p; *tag = pCache; /* * Before the allocated memory we place a pointer to the pCache. * This will be 0 in the case of a page allocation! */ return ((void *) (tag + 1)); }