void MemPool_FixedArea::compact() {
	
	Uint32 prev_chunk_end_pos=0;
	
	for (Uint32 i=0;i<chunks_allocated;i++) {


		MemChunk &chunk=mem_chunks[ chunk_map[ i ] ];

		/* determine hole size to previous chunk */

		int hole_size=chunk.pos-prev_chunk_end_pos;

		/* if we can compact, do it */
		if (hole_size>0 && !chunk.lock) {
			
			COMPACT_CHUNK(chunk,prev_chunk_end_pos);
			
		}
		
		/* prepare for next one */
		prev_chunk_end_pos=chunk.end();
	}
	
	
}
예제 #2
0
void PoolAllocator::compact_up(int p_from) {

	uint32_t next_entry_end_pos=pool_size; // - static_area_size;

	for (int i=entry_count-1;i>=p_from;i--) {


		Entry &entry=entry_array[ entry_indices[ i ] ];

		/* determine hole size to nextious entry */

		int hole_size=next_entry_end_pos-(entry.pos+aligned(entry.len));

		/* if we can compact, do it */
		if (hole_size>0 && !entry.lock) {

			COMPACT_CHUNK(entry,(next_entry_end_pos-aligned(entry.len)));

		}

		/* prepare for next one */
		next_entry_end_pos=entry.pos;
	}

}
예제 #3
0
void PoolAllocator::compact(int p_up_to) {

	uint32_t prev_entry_end_pos=0;

	if (p_up_to<0)
		p_up_to=entry_count;
	for (int i=0;i<p_up_to;i++) {


		Entry &entry=entry_array[ entry_indices[ i ] ];

		/* determine hole size to previous entry */

		int hole_size=entry.pos-prev_entry_end_pos;

		/* if we can compact, do it */
		if (hole_size>0 && !entry.lock) {

			COMPACT_CHUNK(entry,prev_entry_end_pos);

		}

		/* prepare for next one */
		prev_entry_end_pos=entry_end(entry);
	}


}
bool MemPool_FixedArea::realloc_mem(MemPool_Handle p_handle,Uint32 p_new_amount) {
	
	ERR_FAIL_COND_V( !is_mem_valid(p_handle),true );
	
	MemChunk *chunk=(MemChunk*)get_private( p_handle );
	
	ERR_FAIL_COND_V(chunk->lock,true); //it's locked
	
	ChunkMapPos chunk_map_pos;
	
	ERR_FAIL_COND_V( find_chunk_map(&chunk_map_pos,chunk), true );
	
	Uint32 new_len_request=p_new_amount;
	
	{ /* Try strategies that can resize without touching other blocks first */
		
		Uint32 next_chunk_begin_pos=(chunk_map_pos==(chunks_allocated-1))?mem_area_size:mem_chunks[ chunk_map[ chunk_map_pos+1 ] ].pos;
		
		/* FIRST let's try to see if we can resize without moving any data */
		if ( (chunk->pos+new_len_request)<=next_chunk_begin_pos ) {
			
			chunk->len=new_len_request;
			return false; //nothing much was needed to do!
		}
		
		Uint32 prev_chunk_end_pos=(chunk_map_pos==0)?0:mem_chunks[ chunk_map[ chunk_map_pos-1 ] ].end(); //alloc either at begining or
		
		/* SECOND, If we can resize withouth moving the other blocks, try it */
		
		if ( (next_chunk_begin_pos-prev_chunk_end_pos)>=new_len_request ) {
			
			COMPACT_CHUNK(*chunk,prev_chunk_end_pos);
			return false;
			
		}
		
		
	}	
	
	/* If nothing worked, try to move it to the end */
	Uint32 last_block_end_pos=mem_chunks[ chunk_map[ chunks_allocated-1 ] ].end();
	/* Can move to the end? */
	if ( (mem_area_size-last_block_end_pos)<new_len_request ) {
		/* No, can't move, try compacting */
		compact();
		//need to compute this again
		last_block_end_pos=mem_chunks[ chunk_map[ chunks_allocated-1 ] ].end();
		ERR_FAIL_COND_V( (mem_area_size-last_block_end_pos)<new_len_request, true );
		
	}
	
	/* Move it */
	COMPACT_CHUNK(*chunk,last_block_end_pos);
	/* update map */
	ChunkMemPos chunk_mem_pos=chunk_map[ chunk_map_pos ];
	 
	for (Uint32 i=chunk_map_pos;i<(chunks_allocated-1);i++) {
		
		chunk_map[ i ] = chunk_map [ i+1 ];
	}
	
	chunk_map[chunks_allocated-1]=chunk_mem_pos;
		
	chunk->len=new_len_request;		
	
	return false;
}