Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, POINTER_SIZE_INT size) { assert(size >= GC_LOS_OBJ_SIZE_THRESHOLD); size = ALIGN_UP_TO_KILO(size); unsigned int index = pool_list_index_with_size(size); /* Get first list index that is not empty */ index = pool_list_get_next_flag(pool, index); assert(index <= NUM_FREE_LIST); /*No free area left*/ if(index == NUM_FREE_LIST) return NULL; Bidir_List* list = (Bidir_List*)&pool->sized_area_list[index]; Free_Area* area = (Free_Area*)list->next; if(index != MAX_LIST_INDEX) return area; /* Else, for last bucket MAX_LIST_INDEX, we must traverse it */ while( area != (Free_Area*)list ){ if(area->size >= size) return area; area = (Free_Area*)(area->next); } return NULL; }
void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size){ void* p_result = NULL; Free_Area_Pool* pool = lspace->free_pool; unsigned int list_hint = pool_list_index_with_size(alloc_size); list_hint = pool_list_get_next_flag(pool, list_hint); while((!p_result) && (list_hint <= MAX_LIST_INDEX)){ /*List hint is not the last list, so look for it in former lists.*/ if(list_hint < MAX_LIST_INDEX){ p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size); if(p_result){ memset(p_result, 0, alloc_size); uint64 vold = lspace->last_alloced_size; uint64 vnew = vold + alloc_size; while( vold != port_atomic_cas64(&lspace->last_alloced_size, vnew, vold) ){ vold = lspace->last_alloced_size; vnew = vold + alloc_size; } return p_result; }else{ list_hint ++; list_hint = pool_list_get_next_flag(pool, list_hint); continue; } } /*List hint is the last list, so look for it in the last list.*/ else { p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size); if(p_result){ memset(p_result, 0, alloc_size); uint64 vold = lspace->last_alloced_size; uint64 vnew = vold + alloc_size; while( vold != port_atomic_cas64(&lspace->last_alloced_size, vnew, vold) ){ vold = lspace->last_alloced_size; vnew = vold + alloc_size; } return p_result; } else break; } } return p_result; }