Free_Area* free_pool_find_size_area(Free_Area_Pool* pool, POINTER_SIZE_INT size) { assert(size >= GC_LOS_OBJ_SIZE_THRESHOLD); size = ALIGN_UP_TO_KILO(size); unsigned int index = pool_list_index_with_size(size); /* Get first list index that is not empty */ index = pool_list_get_next_flag(pool, index); assert(index <= NUM_FREE_LIST); /*No free area left*/ if(index == NUM_FREE_LIST) return NULL; Bidir_List* list = (Bidir_List*)&pool->sized_area_list[index]; Free_Area* area = (Free_Area*)list->next; if(index != MAX_LIST_INDEX) return area; /* Else, for last bucket MAX_LIST_INDEX, we must traverse it */ while( area != (Free_Area*)list ){ if(area->size >= size) return area; area = (Free_Area*)(area->next); } return NULL; }
static void* free_pool_last_list_atomic_take_area_piece(Free_Area_Pool* pool, POINTER_SIZE_INT size) { void* p_result; POINTER_SIZE_SINT remain_size = 0; POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size); Free_Area* free_area = NULL; Free_Area* new_area = NULL; unsigned int new_list_nr = 0; Lockable_Bidir_List* head = &(pool->sized_area_list[MAX_LIST_INDEX]); free_pool_lock_nr_list(pool, MAX_LIST_INDEX ); /*The last list is empty.*/ if(free_pool_nr_list_is_empty(pool, MAX_LIST_INDEX)){ free_pool_unlock_nr_list(pool, MAX_LIST_INDEX ); return NULL; } free_area = (Free_Area*)(head->next); while( free_area != (Free_Area*)head ){ remain_size = free_area->size - alloc_size; if( remain_size >= GC_LOS_OBJ_SIZE_THRESHOLD){ new_list_nr = pool_list_index_with_size(remain_size); p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size); if(new_list_nr == MAX_LIST_INDEX){ free_area->size = remain_size; free_pool_unlock_nr_list(pool, MAX_LIST_INDEX); return p_result; }else{ free_pool_remove_area(pool, free_area); free_pool_unlock_nr_list(pool, MAX_LIST_INDEX); free_area->size = remain_size; free_pool_lock_nr_list(pool, new_list_nr); free_pool_add_area(pool, free_area); free_pool_unlock_nr_list(pool, new_list_nr); return p_result; } } else if(remain_size >= 0) { free_pool_remove_area(pool, free_area); free_pool_unlock_nr_list(pool, MAX_LIST_INDEX); p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size); if(remain_size > 0){ assert((remain_size >= KB) && (remain_size < GC_LOS_OBJ_SIZE_THRESHOLD)); free_area->size = remain_size; } return p_result; } else free_area = (Free_Area*)free_area->next; } /*No adequate area in the last list*/ free_pool_unlock_nr_list(pool, MAX_LIST_INDEX ); return NULL; }
static void* free_pool_former_lists_atomic_take_area_piece(Free_Area_Pool* pool, unsigned int list_hint, POINTER_SIZE_INT size) { Free_Area* free_area; void* p_result; POINTER_SIZE_SINT remain_size; POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size); unsigned int new_list_nr = 0; Lockable_Bidir_List* head = &pool->sized_area_list[list_hint]; assert(list_hint < MAX_LIST_INDEX); free_pool_lock_nr_list(pool, list_hint); /*Other LOS allocation may race with this one, so check list status here.*/ if(free_pool_nr_list_is_empty(pool, list_hint)){ free_pool_unlock_nr_list(pool, list_hint); return NULL; } free_area = (Free_Area*)(head->next); /*if the list head is not NULL, it definitely satisfies the request. */ remain_size = free_area->size - alloc_size; assert(remain_size >= 0); if( remain_size >= GC_LOS_OBJ_SIZE_THRESHOLD){ new_list_nr = pool_list_index_with_size(remain_size); p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size); if(new_list_nr == list_hint){ free_area->size = remain_size; free_pool_unlock_nr_list(pool, list_hint); return p_result; }else{ free_pool_remove_area(pool, free_area); free_pool_unlock_nr_list(pool, list_hint); free_area->size = remain_size; free_pool_lock_nr_list(pool, new_list_nr); free_pool_add_area(pool, free_area); free_pool_unlock_nr_list(pool, new_list_nr); return p_result; } } else { free_pool_remove_area(pool, free_area); free_pool_unlock_nr_list(pool, list_hint); p_result = (void*)((POINTER_SIZE_INT)free_area + remain_size); if(remain_size > 0){ assert((remain_size >= KB) && (remain_size < GC_LOS_OBJ_SIZE_THRESHOLD)); free_area->size = remain_size; } return p_result; } assert(0); return NULL; }
void* lspace_try_alloc(Lspace* lspace, POINTER_SIZE_INT alloc_size){ void* p_result = NULL; Free_Area_Pool* pool = lspace->free_pool; unsigned int list_hint = pool_list_index_with_size(alloc_size); list_hint = pool_list_get_next_flag(pool, list_hint); while((!p_result) && (list_hint <= MAX_LIST_INDEX)){ /*List hint is not the last list, so look for it in former lists.*/ if(list_hint < MAX_LIST_INDEX){ p_result = free_pool_former_lists_atomic_take_area_piece(pool, list_hint, alloc_size); if(p_result){ memset(p_result, 0, alloc_size); uint64 vold = lspace->last_alloced_size; uint64 vnew = vold + alloc_size; while( vold != port_atomic_cas64(&lspace->last_alloced_size, vnew, vold) ){ vold = lspace->last_alloced_size; vnew = vold + alloc_size; } return p_result; }else{ list_hint ++; list_hint = pool_list_get_next_flag(pool, list_hint); continue; } } /*List hint is the last list, so look for it in the last list.*/ else { p_result = free_pool_last_list_atomic_take_area_piece(pool, alloc_size); if(p_result){ memset(p_result, 0, alloc_size); uint64 vold = lspace->last_alloced_size; uint64 vnew = vold + alloc_size; while( vold != port_atomic_cas64(&lspace->last_alloced_size, vnew, vold) ){ vold = lspace->last_alloced_size; vnew = vold + alloc_size; } return p_result; } else break; } } return p_result; }