void zbx_mem_clear(zbx_mem_info_t *info) { const char *__function_name = "zbx_mem_clear"; int index; zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __function_name); LOCK_INFO; memset(info->buckets, 0, MEM_BUCKET_COUNT * ZBX_PTR_SIZE); index = mem_bucket_by_size(info->total_size); info->buckets[index] = info->lo_bound; mem_set_chunk_size(info->buckets[index], info->total_size); mem_set_prev_chunk(info->buckets[index], NULL); mem_set_next_chunk(info->buckets[index], NULL); info->used_size = 0; info->free_size = info->total_size; UNLOCK_INFO; zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __function_name); }
void zbx_mem_create(zbx_mem_info_t **info, key_t shm_key, int lock_name, zbx_uint64_t size, const char *descr, const char *param, int allow_oom) { const char *__function_name = "zbx_mem_create"; int shm_id, index; void *base; descr = ZBX_NULL2STR(descr); param = ZBX_NULL2STR(param); zabbix_log(LOG_LEVEL_DEBUG, "In %s() descr:'%s' param:'%s' size:" ZBX_FS_SIZE_T, __function_name, descr, param, (zbx_fs_size_t)size); /* allocate shared memory */ if (4 != ZBX_PTR_SIZE && 8 != ZBX_PTR_SIZE) { zabbix_log(LOG_LEVEL_CRIT, "failed assumption about pointer size (" ZBX_FS_SIZE_T " not in {4, 8})", (zbx_fs_size_t)ZBX_PTR_SIZE); exit(FAIL); } if (!(MEM_MIN_SIZE <= size && size <= MEM_MAX_SIZE)) { zabbix_log(LOG_LEVEL_CRIT, "requested size " ZBX_FS_SIZE_T " not within bounds [" ZBX_FS_UI64 " <= size <= " ZBX_FS_UI64 "]", (zbx_fs_size_t)size, MEM_MIN_SIZE, MEM_MAX_SIZE); exit(FAIL); } if (-1 == (shm_id = zbx_shmget(shm_key, size))) { zabbix_log(LOG_LEVEL_CRIT, "cannot allocate shared memory for %s", descr); exit(FAIL); } if ((void *)(-1) == (base = shmat(shm_id, NULL, 0))) { zabbix_log(LOG_LEVEL_CRIT, "cannot attach shared memory for %s: %s", descr, zbx_strerror(errno)); exit(FAIL); } /* allocate zbx_mem_info_t structure, its buckets, and description inside shared memory */ *info = ALIGN8(base); (*info)->shm_id = shm_id; (*info)->orig_size = size; size -= (void *)(*info + 1) - base; base = (void *)(*info + 1); (*info)->buckets = ALIGNPTR(base); memset((*info)->buckets, 0, MEM_BUCKET_COUNT * ZBX_PTR_SIZE); size -= (void *)((*info)->buckets + MEM_BUCKET_COUNT) - base; base = (void *)((*info)->buckets + MEM_BUCKET_COUNT); zbx_strlcpy(base, descr, size); (*info)->mem_descr = base; size -= strlen(descr) + 1; base += strlen(descr) + 1; zbx_strlcpy(base, param, size); (*info)->mem_param = base; size -= strlen(param) + 1; base += strlen(param) + 1; (*info)->allow_oom = allow_oom; /* allocate mutex */ if (ZBX_NO_MUTEX != lock_name) { (*info)->use_lock = 1; if (ZBX_MUTEX_ERROR == zbx_mutex_create_force(&((*info)->mem_lock), lock_name)) { zabbix_log(LOG_LEVEL_CRIT, "cannot create mutex for %s", descr); exit(FAIL); } } else (*info)->use_lock = 0; /* prepare shared memory for further allocation by creating one big chunk */ (*info)->lo_bound = ALIGN8(base); (*info)->hi_bound = ALIGN8(base + size - 8); (*info)->total_size = (zbx_uint64_t)((*info)->hi_bound - (*info)->lo_bound - 2 * MEM_SIZE_FIELD); index = mem_bucket_by_size((*info)->total_size); (*info)->buckets[index] = (*info)->lo_bound; mem_set_chunk_size((*info)->buckets[index], (*info)->total_size); mem_set_prev_chunk((*info)->buckets[index], NULL); mem_set_next_chunk((*info)->buckets[index], NULL); (*info)->used_size = 0; (*info)->free_size = (*info)->total_size; zabbix_log(LOG_LEVEL_DEBUG, "valid user addresses: [%p, %p] total size: " ZBX_FS_SIZE_T, (*info)->lo_bound + MEM_SIZE_FIELD, (*info)->hi_bound - MEM_SIZE_FIELD, (zbx_fs_size_t)(*info)->total_size); zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __function_name); }
static void __mem_free(zbx_mem_info_t *info, void *ptr) { void *chunk; void *prev_chunk, *next_chunk; zbx_uint64_t chunk_size; int prev_free, next_free; chunk = ptr - MEM_SIZE_FIELD; chunk_size = CHUNK_SIZE(chunk); info->used_size -= chunk_size; info->free_size += chunk_size; /* see if we can merge with previous and next chunks */ next_chunk = chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD; prev_free = (info->lo_bound < chunk && FREE_CHUNK(chunk - MEM_SIZE_FIELD)); next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk)); if (prev_free && next_free) { info->free_size += 4 * MEM_SIZE_FIELD; prev_chunk = chunk - MEM_SIZE_FIELD - CHUNK_SIZE(chunk - MEM_SIZE_FIELD) - MEM_SIZE_FIELD; chunk_size += 4 * MEM_SIZE_FIELD + CHUNK_SIZE(prev_chunk) + CHUNK_SIZE(next_chunk); mem_unlink_chunk(info, prev_chunk); mem_unlink_chunk(info, next_chunk); chunk = prev_chunk; mem_set_chunk_size(chunk, chunk_size); mem_link_chunk(info, chunk); } else if (prev_free) { info->free_size += 2 * MEM_SIZE_FIELD; prev_chunk = chunk - MEM_SIZE_FIELD - CHUNK_SIZE(chunk - MEM_SIZE_FIELD) - MEM_SIZE_FIELD; chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(prev_chunk); mem_unlink_chunk(info, prev_chunk); chunk = prev_chunk; mem_set_chunk_size(chunk, chunk_size); mem_link_chunk(info, chunk); } else if (next_free) { info->free_size += 2 * MEM_SIZE_FIELD; chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk); mem_unlink_chunk(info, next_chunk); mem_set_chunk_size(chunk, chunk_size); mem_link_chunk(info, chunk); } else { mem_set_chunk_size(chunk, chunk_size); mem_link_chunk(info, chunk); } }
static void *__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size) { void *chunk, *new_chunk, *next_chunk; zbx_uint64_t chunk_size, new_chunk_size; int next_free; size = mem_proper_alloc_size(size); chunk = old - MEM_SIZE_FIELD; chunk_size = CHUNK_SIZE(chunk); next_chunk = chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD; next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk)); if (size <= chunk_size) { /* do not reallocate if not much is freed */ /* we are likely to want more memory again */ if (size > chunk_size / 4) return chunk; if (next_free) { /* merge with next chunk */ info->used_size -= chunk_size; info->used_size += size; info->free_size += chunk_size + 2 * MEM_SIZE_FIELD; info->free_size -= size + 2 * MEM_SIZE_FIELD; new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD; new_chunk_size = CHUNK_SIZE(next_chunk) + (chunk_size - size); mem_unlink_chunk(info, next_chunk); mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); mem_set_used_chunk_size(chunk, size); } else { /* split the current one */ info->used_size -= chunk_size; info->used_size += size; info->free_size += chunk_size; info->free_size -= size + 2 * MEM_SIZE_FIELD; new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD; new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); mem_set_used_chunk_size(chunk, size); } return chunk; } if (next_free && chunk_size + 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk) >= size) { info->used_size -= chunk_size; info->free_size += chunk_size; chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk); mem_unlink_chunk(info, next_chunk); /* either use the full next_chunk or split it */ if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC) { info->used_size += chunk_size; info->free_size -= chunk_size; mem_set_used_chunk_size(chunk, chunk_size); } else { new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD; new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); info->used_size += size; info->free_size -= chunk_size; info->free_size += new_chunk_size; mem_set_used_chunk_size(chunk, size); } return chunk; } else if (NULL != (new_chunk = __mem_malloc(info, size))) { memcpy(new_chunk + MEM_SIZE_FIELD, chunk + MEM_SIZE_FIELD, chunk_size); __mem_free(info, old); return new_chunk; } else { void *tmp = NULL; tmp = zbx_malloc(tmp, chunk_size); memcpy(tmp, chunk + MEM_SIZE_FIELD, chunk_size); __mem_free(info, old); new_chunk = __mem_malloc(info, size); if (NULL != new_chunk) { memcpy(new_chunk + MEM_SIZE_FIELD, tmp, chunk_size); } else { int index; void *last_chunk; index = mem_bucket_by_size(chunk_size); last_chunk = info->buckets[index]; mem_unlink_chunk(info, last_chunk); if (chunk != last_chunk) { /* The chunk was merged with a free space on the left during */ /* __mem_free() operation. The left chunk must be restored to */ /* its previous state to avoid memory leaks. */ /* We can safely ignore if the chunk was merged on the right */ /* as it will just increase the size of allocated chunk. */ zbx_uint64_t left_size; left_size = chunk - last_chunk - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(chunk, CHUNK_SIZE(chunk) - left_size - 2 * MEM_SIZE_FIELD); mem_set_chunk_size(last_chunk, left_size); mem_link_chunk(info, last_chunk); } memcpy(chunk + MEM_SIZE_FIELD, tmp, chunk_size); } zbx_free(tmp); return new_chunk; } }
static void *__mem_malloc(zbx_mem_info_t *info, zbx_uint64_t size) { int index; void *chunk; zbx_uint64_t chunk_size; size = mem_proper_alloc_size(size); /* try to find an appropriate chunk in special buckets */ index = mem_bucket_by_size(size); while (index < MEM_BUCKET_COUNT - 1 && NULL == info->buckets[index]) index++; chunk = info->buckets[index]; if (index == MEM_BUCKET_COUNT - 1) { /* otherwise, find a chunk big enough according to first-fit strategy */ int counter = 0; zbx_uint64_t skip_min = __UINT64_C(0xffffffffffffffff), skip_max = __UINT64_C(0); while (NULL != chunk && CHUNK_SIZE(chunk) < size) { counter++; skip_min = MIN(skip_min, CHUNK_SIZE(chunk)); skip_max = MAX(skip_max, CHUNK_SIZE(chunk)); chunk = mem_get_next_chunk(chunk); } /* don't log errors if malloc can return null in low memory situations */ if (0 == info->allow_oom) { if (NULL == chunk) zabbix_log(LOG_LEVEL_CRIT, "__mem_malloc: skipped %d asked %u skip_min %u skip_max %u", counter, size, skip_min, skip_max); else if (counter >= 100) zabbix_log(LOG_LEVEL_DEBUG, "__mem_malloc: skipped %d asked %u skip_min %u skip_max %u size %u", counter, size, skip_min, skip_max, CHUNK_SIZE(chunk)); } } if (NULL == chunk) return NULL; chunk_size = CHUNK_SIZE(chunk); mem_unlink_chunk(info, chunk); /* either use the full chunk or split it */ if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC) { info->used_size += chunk_size; info->free_size -= chunk_size; mem_set_used_chunk_size(chunk, chunk_size); } else { void *new_chunk; zbx_uint64_t new_chunk_size; new_chunk = chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD; new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); info->used_size += size; info->free_size -= chunk_size; info->free_size += new_chunk_size; mem_set_used_chunk_size(chunk, size); } return chunk; }
static void *__mem_realloc(zbx_mem_info_t *info, void *old, zbx_uint64_t size) { void *chunk, *new_chunk, *next_chunk; zbx_uint64_t chunk_size, new_chunk_size; int next_free; size = mem_proper_alloc_size(size); chunk = (void *)((char *)old - MEM_SIZE_FIELD); chunk_size = CHUNK_SIZE(chunk); next_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + chunk_size + MEM_SIZE_FIELD); next_free = (next_chunk < info->hi_bound && FREE_CHUNK(next_chunk)); if (size <= chunk_size) { /* do not reallocate if not much is freed */ /* we are likely to want more memory again */ if (size > chunk_size / 4) return chunk; if (next_free) { /* merge with next chunk */ info->used_size -= chunk_size - size; info->free_size += chunk_size - size; new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD); new_chunk_size = CHUNK_SIZE(next_chunk) + (chunk_size - size); mem_unlink_chunk(info, next_chunk); mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); mem_set_used_chunk_size(chunk, size); } else { /* split the current one */ info->used_size -= chunk_size - size; info->free_size += chunk_size - size - 2 * MEM_SIZE_FIELD; new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD); new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); mem_set_used_chunk_size(chunk, size); } return chunk; } if (next_free && chunk_size + 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk) >= size) { info->used_size -= chunk_size; info->free_size += chunk_size + 2 * MEM_SIZE_FIELD; chunk_size += 2 * MEM_SIZE_FIELD + CHUNK_SIZE(next_chunk); mem_unlink_chunk(info, next_chunk); /* either use the full next_chunk or split it */ if (chunk_size < size + 2 * MEM_SIZE_FIELD + MEM_MIN_ALLOC) { info->used_size += chunk_size; info->free_size -= chunk_size; mem_set_used_chunk_size(chunk, chunk_size); } else { new_chunk = (void *)((char *)chunk + MEM_SIZE_FIELD + size + MEM_SIZE_FIELD); new_chunk_size = chunk_size - size - 2 * MEM_SIZE_FIELD; mem_set_chunk_size(new_chunk, new_chunk_size); mem_link_chunk(info, new_chunk); info->used_size += size; info->free_size -= chunk_size; info->free_size += new_chunk_size; mem_set_used_chunk_size(chunk, size); } return chunk; } else if (NULL != (new_chunk = __mem_malloc(info, size))) { memcpy((char *)new_chunk + MEM_SIZE_FIELD, (char *)chunk + MEM_SIZE_FIELD, chunk_size); __mem_free(info, old); return new_chunk; } else { void *tmp = NULL; /* check if there would be enough space if the current chunk */ /* would be freed before allocating a new one */ new_chunk_size = chunk_size; if (0 != next_free) new_chunk_size += CHUNK_SIZE(next_chunk) + 2 * MEM_SIZE_FIELD; if (info->lo_bound < chunk && FREE_CHUNK((char *)chunk - MEM_SIZE_FIELD)) new_chunk_size += CHUNK_SIZE((char *)chunk - MEM_SIZE_FIELD) + 2 * MEM_SIZE_FIELD; if (size > new_chunk_size) return NULL; tmp = zbx_malloc(tmp, chunk_size); memcpy(tmp, (char *)chunk + MEM_SIZE_FIELD, chunk_size); __mem_free(info, old); if (NULL == (new_chunk = __mem_malloc(info, size))) { THIS_SHOULD_NEVER_HAPPEN; exit(EXIT_FAILURE); } memcpy((char *)new_chunk + MEM_SIZE_FIELD, tmp, chunk_size); zbx_free(tmp); return new_chunk; } }