kstat_t krhino_add_mm_region(k_mm_head *mmhead, void *addr, size_t len) { void *orig_addr; k_mm_region_info_t *region; k_mm_list_t *firstblk, *nextblk; NULL_PARA_CHK(mmhead); NULL_PARA_CHK(addr); orig_addr = addr; addr = (void *) MM_ALIGN_UP((size_t)addr); len -= (size_t)addr - (size_t)orig_addr; len = MM_ALIGN_DOWN(len); if ( !len || len < sizeof(k_mm_region_info_t) + MMLIST_HEAD_SIZE * 3 + MM_MIN_SIZE) { return RHINO_MM_POOL_SIZE_ERR; } memset(addr, 0, len); MM_CRITICAL_ENTER(mmhead); firstblk = init_mm_region(addr, len); nextblk = MM_GET_NEXT_BLK(firstblk); /* Inserting the area in the list of linked areas */ region = (k_mm_region_info_t *)firstblk->mbinfo.buffer; region->next = mmhead->regioninfo; mmhead->regioninfo = region; #if (RHINO_CONFIG_MM_DEBUG > 0u) nextblk->dye = RHINO_MM_CORRUPT_DYE; nextblk->owner = 0; #endif #if (K_MM_STATISTIC > 0) /* keep "used_size" not changed. change "used_size" here then k_mm_free will decrease it. */ mmhead->used_size += MM_GET_BLK_SIZE(nextblk); #endif MM_CRITICAL_EXIT(mmhead); /*mark nextblk as free*/ k_mm_free(mmhead, nextblk->mbinfo.buffer); return RHINO_SUCCESS; }
void mm_addregion(FAR void *heapstart, size_t heapsize) { FAR struct mm_freenode_s *node; uintptr_t heapbase; uintptr_t heapend; #if CONFIG_MM_REGIONS > 1 int IDX = g_nregions; #else # define IDX 0 #endif /* If the MCU handles wide addresses but the memory manager * is configured for a small heap, then verify that the caller * not doing something crazy. */ #if defined(CONFIG_MM_SMALL) && !defined(CONFIG_SMALL_MEMORY) DEBUGASSERT(heapsize <= MMSIZE_MAX+1); #endif /* Adjust the provide heap start and size so that they are * both aligned with the MM_MIN_CHUNK size. */ heapbase = MM_ALIGN_UP((uintptr_t)heapstart); heapend = MM_ALIGN_DOWN((uintptr_t)heapstart + (uintptr_t)heapsize); heapsize = heapend - heapbase; mlldbg("Region %d: base=%p size=%u\n", IDX+1, heapstart, heapsize); /* Add the size of this region to the total size of the heap */ g_heapsize += heapsize; /* Create two "allocated" guard nodes at the beginning and end of * the heap. These only serve to keep us from allocating outside * of the heap. * * And create one free node between the guard nodes that contains * all available memory. */ g_heapstart[IDX] = (FAR struct mm_allocnode_s *)heapbase; g_heapstart[IDX]->size = SIZEOF_MM_ALLOCNODE; g_heapstart[IDX]->preceding = MM_ALLOC_BIT; node = (FAR struct mm_freenode_s *)(heapbase + SIZEOF_MM_ALLOCNODE); node->size = heapsize - 2*SIZEOF_MM_ALLOCNODE; node->preceding = SIZEOF_MM_ALLOCNODE; g_heapend[IDX] = (FAR struct mm_allocnode_s *)(heapend - SIZEOF_MM_ALLOCNODE); g_heapend[IDX]->size = SIZEOF_MM_ALLOCNODE; g_heapend[IDX]->preceding = node->size | MM_ALLOC_BIT; #undef IDX #if CONFIG_MM_REGIONS > 1 g_nregions++; #endif /* Add the single, large free node to the nodelist */ mm_addfreechunk(node); }
kstat_t krhino_init_mm_head(k_mm_head **ppmmhead, void *addr, size_t len ) { k_mm_list_t *nextblk; k_mm_list_t *firstblk; k_mm_head *pmmhead; void *orig_addr; #if (RHINO_CONFIG_MM_BLK > 0) mblk_pool_t *mmblk_pool; kstat_t stat; #endif NULL_PARA_CHK(ppmmhead); NULL_PARA_CHK(addr); /*check paramters, addr and len need algin 1. the length at least need RHINO_CONFIG_MM_TLF_BLK_SIZE for fixed size memory block 2. and also ast least have 1k for user alloced */ orig_addr = addr; addr = (void *) MM_ALIGN_UP((size_t)addr); len -= (size_t)addr - (size_t)orig_addr; len = MM_ALIGN_DOWN(len); if ( len == 0 || len < MIN_FREE_MEMORY_SIZE + RHINO_CONFIG_MM_TLF_BLK_SIZE || len > MM_MAX_SIZE) { return RHINO_MM_POOL_SIZE_ERR; } pmmhead = (k_mm_head *)addr; /* Zeroing the memory head */ memset(pmmhead, 0, sizeof(k_mm_head)); #if (RHINO_CONFIG_MM_REGION_MUTEX > 0) krhino_mutex_create(&pmmhead->mm_mutex, "mm_mutex"); #else krhino_spin_lock_init(&pmmhead->mm_lock); #endif firstblk = init_mm_region((void *)((size_t)addr + MM_ALIGN_UP(sizeof(k_mm_head))), MM_ALIGN_DOWN(len - sizeof(k_mm_head))); pmmhead->regioninfo = (k_mm_region_info_t *)firstblk->mbinfo.buffer; nextblk = MM_GET_NEXT_BLK(firstblk); *ppmmhead = pmmhead; /*mark it as free and set it to bitmap*/ #if (RHINO_CONFIG_MM_DEBUG > 0u) nextblk->dye = RHINO_MM_CORRUPT_DYE; nextblk->owner = 0; #endif /* release free blk */ k_mm_free(pmmhead, nextblk->mbinfo.buffer); /*after free, we need acess mmhead and nextblk again*/ #if (K_MM_STATISTIC > 0) pmmhead->free_size = MM_GET_BUF_SIZE(nextblk); pmmhead->used_size = len - MM_GET_BUF_SIZE(nextblk); pmmhead->maxused_size = pmmhead->used_size; #endif /* default no fixblk */ pmmhead->fix_pool = NULL; #if (RHINO_CONFIG_MM_BLK > 0) /* note: stats_addsize inside */ mmblk_pool = k_mm_alloc(pmmhead, RHINO_CONFIG_MM_TLF_BLK_SIZE + MM_ALIGN_UP(sizeof(mblk_pool_t))); if (mmblk_pool) { stat = krhino_mblk_pool_init(mmblk_pool, "fixed_mm_blk", (void *)((size_t)mmblk_pool + MM_ALIGN_UP(sizeof(mblk_pool_t))), RHINO_CONFIG_MM_BLK_SIZE, RHINO_CONFIG_MM_TLF_BLK_SIZE); if (stat == RHINO_SUCCESS) { pmmhead->fix_pool = mmblk_pool; #if (K_MM_STATISTIC > 0) stats_removesize(pmmhead, RHINO_CONFIG_MM_TLF_BLK_SIZE); #endif } else { /* note: stats_removesize inside */ k_mm_free(pmmhead, mmblk_pool); } #if (K_MM_STATISTIC > 0) pmmhead->maxused_size = pmmhead->used_size; #endif } #endif return RHINO_SUCCESS; }