Ejemplo n.º 1
0
static void	*__mem_malloc(zbx_mem_info_t *info, zbx_uint64_t size)
{
	int		index;
	void		*chunk;
	zbx_uint64_t	chunk_size;

	size = mem_proper_alloc_size(size);

	/* try to find an appropriate chunk in special buckets */

	index = mem_bucket_by_size(size);

	while (index < MEM_BUCKET_COUNT - 1 && NULL == info->buckets[index])
		index++;

	chunk = info->buckets[index];

	if (index == MEM_BUCKET_COUNT - 1)
	{
		/* otherwise, find a chunk big enough according to first-fit strategy */

		int		counter = 0;
		zbx_uint64_t	skip_min = __UINT64_C(0xffffffffffffffff), skip_max = __UINT64_C(0);
		while (NULL != chunk && CHUNK_SIZE(chunk) < size)
		{
			counter++;
			skip_min = MIN(skip_min, CHUNK_SIZE(chunk));
			skip_max = MAX(skip_max, CHUNK_SIZE(chunk));
			chunk = mem_get_next_chunk(chunk);
		}

		/* don't log errors if malloc can return null in low memory situations */
		if (0 == info->allow_oom)
		{
			if (NULL == chunk)
				zabbix_log(LOG_LEVEL_CRIT, "__mem_malloc: skipped %d asked %u skip_min %u skip_max %u",
						counter, size, skip_min, skip_max);
			else if (counter >= 100)
				zabbix_log(LOG_LEVEL_DEBUG, "__mem_malloc: skipped %d asked %u skip_min %u skip_max %u size %u",
						counter, size, skip_min, skip_max, CHUNK_SIZE(chunk));
		}
	}

	if (NULL == chunk)
		return NULL;

	chunk_size = CHUNK_SIZE(chunk);
	mem_unlink_chunk(info, chunk);

	/* either use the full chunk or split it */

	if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC)
	{
		info->used_size += chunk_size;
		info->free_size -= chunk_size;

		mem_set_used_chunk_size(chunk, chunk_size);
	}
	else
	{
		void		*new_chunk;
		zbx_uint64_t	new_chunk_size;

		new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD;
		new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
		mem_set_chunk_size(new_chunk, new_chunk_size);
		mem_link_chunk(info, new_chunk);

		info->used_size += size;
		info->free_size -= chunk_size;
		info->free_size += new_chunk_size;

		mem_set_used_chunk_size(chunk, size);
	}

	return chunk;
}
Ejemplo n.º 2
0
static void	*__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size)
{
	void		*chunk, *new_chunk, *next_chunk;
	zbx_uint64_t	chunk_size, new_chunk_size;
	int		next_free;

	size = mem_proper_alloc_size(size);

	chunk = old - MEM_SIZE_FIELD;
	chunk_size = CHUNK_SIZE(chunk);

	next_chunk = chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD;
	next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk));

	if (size <= chunk_size)
	{
		/* do not reallocate if not much is freed */
		/* we are likely to want more memory again */
		if (size > chunk_size / 4)
			return chunk;

		if (next_free)
		{
			/* merge with next chunk */

			info->used_size -= chunk_size;
			info->used_size += size;
			info->free_size += chunk_size + 2 * MEM_SIZE_FIELD;
			info->free_size -= size + 2 * MEM_SIZE_FIELD;

			new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD;
			new_chunk_size = CHUNK_SIZE(next_chunk) + (chunk_size - size);

			mem_unlink_chunk(info, next_chunk);

			mem_set_chunk_size(new_chunk, new_chunk_size);
			mem_link_chunk(info, new_chunk);

			mem_set_used_chunk_size(chunk, size);
		}
		else
		{
			/* split the current one */

			info->used_size -= chunk_size;
			info->used_size += size;
			info->free_size += chunk_size;
			info->free_size -= size + 2 * MEM_SIZE_FIELD;

			new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD;
			new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;

			mem_set_chunk_size(new_chunk, new_chunk_size);
			mem_link_chunk(info, new_chunk);

			mem_set_used_chunk_size(chunk, size);
		}

		return chunk;
	}

	if (next_free && chunk_size + 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk) >= size)
	{
		info->used_size -= chunk_size;
		info->free_size += chunk_size;

		chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk);

		mem_unlink_chunk(info, next_chunk);

		/* either use the full next_chunk or split it */

		if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC)
		{
			info->used_size += chunk_size;
			info->free_size -= chunk_size;

			mem_set_used_chunk_size(chunk, chunk_size);
		}
		else
		{
			new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD;
			new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
			mem_set_chunk_size(new_chunk, new_chunk_size);
			mem_link_chunk(info, new_chunk);

			info->used_size += size;
			info->free_size -= chunk_size;
			info->free_size += new_chunk_size;

			mem_set_used_chunk_size(chunk, size);
		}

		return chunk;
	}
	else if (NULL != (new_chunk = __mem_malloc(info, size)))
	{
		memcpy(new_chunk + MEM_SIZE_FIELD, chunk + MEM_SIZE_FIELD, chunk_size);

		__mem_free(info, old);

		return new_chunk;
	}
	else
	{
		void	*tmp = NULL;

		tmp = zbx_malloc(tmp, chunk_size);

		memcpy(tmp, chunk + MEM_SIZE_FIELD, chunk_size);

		__mem_free(info, old);

		new_chunk = __mem_malloc(info, size);

		if (NULL != new_chunk)
		{
			memcpy(new_chunk + MEM_SIZE_FIELD, tmp, chunk_size);
		}
		else
		{
			int	index;
			void	*last_chunk;

			index = mem_bucket_by_size(chunk_size);
			last_chunk = info->buckets[index];

			mem_unlink_chunk(info, last_chunk);

			if (chunk != last_chunk)
			{
				/* The chunk was merged with a free space on the left during  */
				/* __mem_free() operation. The left chunk must be restored to */
				/* its previous state to avoid memory leaks.                  */
				/* We can safely ignore if the chunk was merged on the right  */
				/* as it will just increase the size of allocated chunk.      */
				zbx_uint64_t	left_size;

				left_size = chunk - last_chunk - 2 * MEM_SIZE_FIELD;
				mem_set_chunk_size(chunk, CHUNK_SIZE(chunk) - left_size - 2 * MEM_SIZE_FIELD);
				mem_set_chunk_size(last_chunk, left_size);
				mem_link_chunk(info, last_chunk);
			}

			memcpy(chunk + MEM_SIZE_FIELD, tmp, chunk_size);
		}

		zbx_free(tmp);

		return new_chunk;
	}
}
Ejemplo n.º 3
0
static void	*__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size)
{
	void		*chunk, *new_chunk, *next_chunk;
	zbx_uint64_t	chunk_size, new_chunk_size;
	int		next_free;

	size = mem_proper_alloc_size(size);

	chunk = (void *)((char *)old - MEM_SIZE_FIELD);
	chunk_size = CHUNK_SIZE(chunk);

	next_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD);
	next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk));

	if (size <= chunk_size)
	{
		/* do not reallocate if not much is freed */
		/* we are likely to want more memory again */
		if (size > chunk_size / 4)
			return chunk;

		if (next_free)
		{
			/* merge with next chunk */

			info->used_size -= chunk_size - size;
			info->free_size += chunk_size - size;

			new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
			new_chunk_size = CHUNK_SIZE(next_chunk) + (chunk_size - size);

			mem_unlink_chunk(info, next_chunk);

			mem_set_chunk_size(new_chunk, new_chunk_size);
			mem_link_chunk(info, new_chunk);

			mem_set_used_chunk_size(chunk, size);
		}
		else
		{
			/* split the current one */

			info->used_size -= chunk_size - size;
			info->free_size += chunk_size - size - 2 * MEM_SIZE_FIELD;

			new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
			new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;

			mem_set_chunk_size(new_chunk, new_chunk_size);
			mem_link_chunk(info, new_chunk);

			mem_set_used_chunk_size(chunk, size);
		}

		return chunk;
	}

	if (next_free && chunk_size + 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk) >= size)
	{
		info->used_size -= chunk_size;
		info->free_size += chunk_size + 2 * MEM_SIZE_FIELD;

		chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk);

		mem_unlink_chunk(info, next_chunk);

		/* either use the full next_chunk or split it */

		if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC)
		{
			info->used_size += chunk_size;
			info->free_size -= chunk_size;

			mem_set_used_chunk_size(chunk, chunk_size);
		}
		else
		{
			new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD);
			new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD;
			mem_set_chunk_size(new_chunk, new_chunk_size);
			mem_link_chunk(info, new_chunk);

			info->used_size += size;
			info->free_size -= chunk_size;
			info->free_size += new_chunk_size;

			mem_set_used_chunk_size(chunk, size);
		}

		return chunk;
	}
	else if (NULL != (new_chunk = __mem_malloc(info, size)))
	{
		memcpy((char *)new_chunk + MEM_SIZE_FIELD, (char *)chunk + MEM_SIZE_FIELD, chunk_size);

		__mem_free(info, old);

		return new_chunk;
	}
	else
	{
		void	*tmp = NULL;

		/* check if there would be enough space if the current chunk */
		/* would be freed before allocating a new one                */
		new_chunk_size = chunk_size;

		if (0 != next_free)
			new_chunk_size += CHUNK_SIZE(next_chunk) + 2 * MEM_SIZE_FIELD;

		if (info->lo_bound < chunk && FREE_CHUNK((char *)chunk - MEM_SIZE_FIELD))
			new_chunk_size += CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) + 2 * MEM_SIZE_FIELD;

		if (size > new_chunk_size)
			return NULL;

		tmp = zbx_malloc(tmp, chunk_size);

		memcpy(tmp, (char *)chunk + MEM_SIZE_FIELD, chunk_size);

		__mem_free(info, old);

		if (NULL == (new_chunk = __mem_malloc(info, size)))
		{
			THIS_SHOULD_NEVER_HAPPEN;
			exit(EXIT_FAILURE);
		}

		memcpy((char *)new_chunk + MEM_SIZE_FIELD, tmp, chunk_size);

		zbx_free(tmp);

		return new_chunk;
	}
}