void fm_free(struct fm_block* qm, void* p) #endif { struct fm_frag* f,*n; #ifdef DBG_F_MALLOC LM_DBG("params(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line); if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){ LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p); abort(); } #endif if (p==0) { LM_DBG("free(0) called\n"); return; } f=(struct fm_frag*) ((char*)p-sizeof(struct fm_frag)); #ifdef DBG_F_MALLOC LM_DBG("freeing block alloc'ed from %s: %s(%ld)\n", f->file, f->func, f->line); f->file=file; f->func=func; f->line=line; #endif join: if( qm->large_limit < qm->large_space ) goto no_join; n = FRAG_NEXT(f); if (((char*)n < (char*)qm->last_frag) && n->prev ) { fm_remove_free(qm, n); /* join */ f->size += n->size + FRAG_OVERHEAD; #if defined(DBG_F_MALLOC) || defined(STATISTICS) //qm->real_used -= FRAG_OVERHEAD; qm->used += FRAG_OVERHEAD; #endif goto join; } no_join: fm_insert_free(qm, f); #if defined(DBG_F_MALLOC) || defined(STATISTICS) qm->fragments -= 1; #endif pkg_threshold_check(); }
void *hp_pkg_malloc(struct hp_block *hpb, unsigned long size) { struct hp_frag *frag; unsigned int hash; /* size must be a multiple of ROUNDTO */ size = ROUNDUP(size); /* search for a suitable free frag */ for (hash = GET_HASH(size); hash < HP_HASH_SIZE; hash++) { frag = hpb->free_hash[hash].first; for (; frag; frag = frag->u.nxt_free) if (frag->size >= size) goto found; /* try in a bigger bucket */ } /* out of memory... we have to shut down */ LM_CRIT("not enough memory, please increase the \"-M\" parameter!\n"); abort(); found: hp_frag_detach(hpb, frag); update_stats_pkg_frag_detach(hpb, frag); /* split the fragment if possible */ pkg_frag_split(hpb, frag, size); if (hpb->real_used > hpb->max_real_used) hpb->max_real_used = hpb->real_used; pkg_threshold_check(); return (char *)frag + sizeof *frag; }
void *hp_pkg_realloc(struct hp_block *hpb, void *p, unsigned long size) { struct hp_frag *f; unsigned long diff; unsigned long orig_size; struct hp_frag *next; void *ptr; if (size == 0) { if (p) hp_pkg_free(hpb, p); return NULL; } if (!p) return hp_pkg_malloc(hpb, size); f = FRAG_OF(p); size = ROUNDUP(size); orig_size = f->size; /* shrink operation */ if (orig_size > size) { pkg_frag_split(hpb, f, size); /* grow operation */ } else if (orig_size < size) { diff = size - orig_size; next = FRAG_NEXT(f); /* try to join with a large enough adjacent free fragment */ if (next < hpb->last_frag && next->prev && (next->size + FRAG_OVERHEAD) >= diff) { hp_frag_detach(hpb, next); update_stats_pkg_frag_detach(hpb, next); f->size += next->size + FRAG_OVERHEAD; /* split the result if necessary */ if (f->size > size) pkg_frag_split(hpb, f, size); } else { /* could not join => realloc */ ptr = hp_pkg_malloc(hpb, size); if (ptr) { /* copy, need by libssl */ memcpy(ptr, p, orig_size); hp_pkg_free(hpb, p); } p = ptr; } if (hpb->real_used > hpb->max_real_used) hpb->max_real_used = hpb->real_used; } pkg_threshold_check(); return p; }
void* fm_realloc(struct fm_block* qm, void* p, unsigned long size) #endif { struct fm_frag *f; unsigned long diff; unsigned long orig_size; struct fm_frag *n; void *ptr; #ifdef DBG_F_MALLOC LM_DBG("params(%p, %p, %lu), called from %s: %s(%d)\n", qm, p, size, file, func, line); if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){ LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p); abort(); } #endif if (size==0) { if (p) #ifdef DBG_F_MALLOC fm_free(qm, p, file, func, line); #else fm_free(qm, p); #endif pkg_threshold_check(); return 0; } if (p==0) #ifdef DBG_F_MALLOC return fm_malloc(qm, size, file, func, line); #else return fm_malloc(qm, size); #endif f=(struct fm_frag*) ((char*)p-sizeof(struct fm_frag)); #ifdef DBG_F_MALLOC LM_DBG("realloc'ing frag %p alloc'ed from %s: %s(%ld)\n", f, f->file, f->func, f->line); #endif size=ROUNDUP(size); orig_size=f->size; if (f->size > size){ /* shrink */ #ifdef DBG_F_MALLOC LM_DBG("shrinking from %lu to %lu\n", f->size, size); fm_split_frag(qm, f, size, file, "frag. from fm_realloc", line); #else fm_split_frag(qm, f, size); #endif }else if (f->size<size){ /* grow */ #ifdef DBG_F_MALLOC LM_DBG("growing from %lu to %lu\n", f->size, size); #endif diff=size-f->size; n=FRAG_NEXT(f); if (((char*)n < (char*)qm->last_frag) && n->prev && ((n->size+FRAG_OVERHEAD)>=diff)){ fm_remove_free(qm,n); /* join */ f->size += n->size + FRAG_OVERHEAD; #if defined(DBG_F_MALLOC) || defined(STATISTICS) //qm->real_used -= FRAG_OVERHEAD; qm->used += FRAG_OVERHEAD; #endif /* split it if necessary */ if (f->size > size){ #ifdef DBG_F_MALLOC fm_split_frag(qm, f, size, file, "fragm. from fm_realloc", line); #else fm_split_frag(qm, f, size); #endif } }else{ /* could not join => realloc */ #ifdef DBG_F_MALLOC ptr=fm_malloc(qm, size, file, func, line); #else ptr = fm_malloc(qm, size); #endif if (ptr) { /* copy, need by libssl */ memcpy(ptr, p, orig_size); #ifdef DBG_F_MALLOC fm_free(qm, p, file, func, line); #else fm_free(qm, p); #endif } p = ptr; } }else{ /* do nothing */ #ifdef DBG_F_MALLOC LM_DBG("doing nothing, same size: %lu - %lu\n", f->size, size); #endif } #ifdef DBG_F_MALLOC LM_DBG("returning %p\n", p); #endif #if defined(DBG_F_MALLOC) || defined(STATISTICS) if (qm->max_real_used<qm->real_used) qm->max_real_used=qm->real_used; #endif pkg_threshold_check(); return p; }
void* fm_malloc(struct fm_block* qm, unsigned long size) #endif { struct fm_frag* frag,*n; unsigned int hash; #ifdef DBG_F_MALLOC LM_DBG("params (%p, %lu), called from %s: %s(%d)\n", qm, size, file, func, line); #endif /*size must be a multiple of 8*/ size=ROUNDUP(size); /*search for a suitable free frag*/ for(hash=GET_HASH(size);hash<F_HASH_SIZE;hash++){ frag=qm->free_hash[hash].first; for( ; frag; frag = frag->u.nxt_free ) if ( frag->size >= size ) goto found; /* try in a bigger bucket */ } /* not found, bad! */ LM_WARN("Not enough free memory, will atempt defragmenation\n"); for( frag = qm->first_frag; (char*)frag < (char*)qm->last_frag; ) { n = FRAG_NEXT(frag); if ( ((char*)n < (char*)qm->last_frag) && n->prev && frag->prev ) { /* detach frag*/ fm_remove_free(qm, frag); do { fm_remove_free(qm, n); frag->size += n->size + FRAG_OVERHEAD; #if defined(DBG_F_MALLOC) || defined(STATISTICS) //qm->real_used -= FRAG_OVERHEAD; qm->used += FRAG_OVERHEAD; #endif if( frag->size >size ) goto solved; n = FRAG_NEXT(frag); } while ( ((char*)n < (char*)qm->last_frag) && n->prev); fm_insert_free(qm,frag); } frag = n; } pkg_threshold_check(); return 0; found: /* we found it!*/ fm_remove_free(qm,frag); /*see if we'll use full frag, or we'll split it in 2*/ #ifdef DBG_F_MALLOC fm_split_frag(qm, frag, size, file, func, line); frag->file=file; frag->func=func; frag->line=line; frag->check=ST_CHECK_PATTERN; LM_DBG("params(%p, %lu), returns address %p \n", qm, size, (char*)frag+sizeof(struct fm_frag)); #else fm_split_frag(qm, frag, size); #endif solved: #if defined(DBG_F_MALLOC) || defined(STATISTICS) if (qm->max_real_used<qm->real_used) qm->max_real_used=qm->real_used; #endif pkg_threshold_check(); return (char*)frag+sizeof(struct fm_frag); }
void* fm_malloc(struct fm_block* qm, unsigned long size) #endif { struct fm_frag* frag,*n; unsigned int hash; #ifdef DBG_MALLOC LM_GEN1(memlog, "%s_malloc(%lu), called from %s: %s(%d)\n", qm->name, size, file, func, line); #endif /*size must be a multiple of 8*/ size=ROUNDUP(size); /*search for a suitable free frag*/ for(hash=GET_HASH(size); hash<F_HASH_SIZE; hash++) { frag=qm->free_hash[hash].first; for( ; frag; frag = frag->u.nxt_free ) if ( frag->size >= size ) goto found; /* try in a bigger bucket */ } /* not found, bad! */ #if defined(DBG_MALLOC) || defined(STATISTICS) LM_ERR(oom_errorf, qm->name, qm->size - qm->real_used, qm->name[0] == 'p' ? "M" : "m"); LM_INFO("attempting defragmentation... (need %lu bytes)\n", size); #else LM_ERR(oom_nostats_errorf, qm->name, qm->name[0] == 'p' ? "M" : "m"); LM_INFO("attempting defragmentation... (need %lu bytes)\n", size); #endif for( frag = qm->first_frag; (char*)frag < (char*)qm->last_frag; ) { n = FRAG_NEXT(frag); if ( ((char*)n < (char*)qm->last_frag) && n->prev && frag->prev ) { /* detach frag*/ fm_remove_free(qm, frag); do { fm_remove_free(qm, n); frag->size += n->size + FRAG_OVERHEAD; #if defined(DBG_MALLOC) || defined(STATISTICS) //qm->real_used -= FRAG_OVERHEAD; qm->used += FRAG_OVERHEAD; #endif if( frag->size >size ) { #ifdef DBG_MALLOC /* mark it as "busy" */ frag->is_free = 0; #endif goto solved; } n = FRAG_NEXT(frag); } while ( ((char*)n < (char*)qm->last_frag) && n->prev); fm_insert_free(qm,frag); } frag = n; } LM_INFO("unable to alloc a big enough fragment!\n"); pkg_threshold_check(); return 0; found: /* we found it!*/ fm_remove_free(qm,frag); #ifdef DBG_MALLOC /* mark it as "busy" */ frag->is_free = 0; #endif /*see if we'll use full frag, or we'll split it in 2*/ #ifdef DBG_MALLOC fm_split_frag(qm, frag, size, file, func, line); frag->file=file; frag->func=func; frag->line=line; frag->check=ST_CHECK_PATTERN; LM_GEN1(memlog, "%s_malloc(%lu), returns address %p\n", qm->name, size, (char*)frag+sizeof(struct fm_frag)); #else fm_split_frag(qm, frag, size); #endif solved: #if defined(DBG_MALLOC) || defined(STATISTICS) if (qm->max_real_used<qm->real_used) qm->max_real_used=qm->real_used; qm->fragments += 1; #endif pkg_threshold_check(); return (char*)frag+sizeof(struct fm_frag); }
void vqm_free(struct vqm_block* qm, void* p) #endif { struct vqm_frag *f, *next, *prev, *first_big; unsigned char b; #ifdef DBG_QM_MALLOC LM_GEN1(memlog,"params (%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line); if (p>(void *)qm->core_end || p<(void*)qm->init_core){ LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p); abort(); } #endif if (p==0) { LM_WARN("free(0) called\n"); return; } f=(struct vqm_frag*) ((char*)p-sizeof(struct vqm_frag)); b=f->u.inuse.bucket; #ifdef DBG_QM_MALLOC VQM_DEBUG_FRAG(qm, f); if ( ! FRAG_ISUSED(f) ) { LM_CRIT("freeing already freed pointer, first freed: %s: %s(%d) " "- aborting\n", f->file, f->func, f->line); abort(); } if ( b>MAX_BUCKET ) { LM_CRIT("fragment with too high bucket nr: " "%d, allocated: %s: %s(%d) - aborting\n", b, f->file, f->func, f->line); abort(); } LM_GEN1(memlog,"freeing %d bucket block alloc'ed from %s: %s(%d)\n", f->u.inuse.bucket, f->file, f->func, f->line); f->file=file; f->func=func; f->line=line; qm->usage[ f->u.inuse.bucket ]--; #endif if (IS_BIGBUCKET(qm,b)) { next=FRAG_NEXT(f); if ((char *)next +sizeof( struct vqm_frag) < qm->core_end) { VQM_DEBUG_FRAG(qm, next); if (! FRAG_ISUSED(next)) { /* coalesce with next fragment */ LM_DBG("coalesced with next\n"); vqm_detach_free(qm, next); f->size+=next->size; FRAG_END(f)->size=f->size; } } first_big = qm->next_free[b]; if (first_big && f>first_big) { prev=FRAG_PREV(f); VQM_DEBUG_FRAG(qm, prev); if (!FRAG_ISUSED(prev)) { /* coalesce with prev fragment */ LM_DBG("coalesced with prev\n"); vqm_detach_free(qm, prev ); prev->size+=f->size; f=prev; FRAG_END(f)->size=f->size; } } if ((char *)f==qm->big_chunks) { /* release unused core */ LM_DBG("big chunk released\n"); qm->free_core+=f->size; qm->big_chunks+=f->size; pkg_threshold_check(); return; } first_big = qm->next_free[b]; /* fix reverse link (used only for BIG_BUCKET */ if (first_big) FRAG_END(first_big)->prv_free=f; FRAG_END(f)->prv_free=0; } else first_big = qm->next_free[b]; f->u.nxt_free = first_big; /* also clobbers magic */ qm->next_free[b] = f; pkg_threshold_check(); }
void* vqm_malloc(struct vqm_block* qm, unsigned int size) #endif { struct vqm_frag *new_chunk, *f; unsigned char bucket; #ifdef DBG_QM_MALLOC unsigned int demanded_size; LM_GEN1( memlog, "params (%p, %d) called from %s: %s(%d)\n", qm, size, file, func, line); demanded_size = size; #endif new_chunk=0; /* what's the bucket? what's the total size incl. overhead? */ bucket = size2bucket( qm, &size ); if (IS_BIGBUCKET(qm, bucket)) { /* the kilo-bucket uses first-fit */ #ifdef DBG_QM_MALLOC LM_GEN1( memlog, "processing a big fragment\n"); #endif for (f=qm->next_free[bucket] ; f; f=f->u.nxt_free ) if (f->size>=size) { /* first-fit */ new_chunk=f; VQM_DEBUG_FRAG(qm, f); vqm_detach_free(qm,f); break; } } else if ( (new_chunk=qm->next_free[ bucket ]) ) { /*fixed size bucket*/ VQM_DEBUG_FRAG(qm, new_chunk); /*detach it from the head of bucket's free list*/ qm->next_free[ bucket ] = new_chunk->u.nxt_free; } if (!new_chunk) { /* no chunk can be reused; slice one from the core */ new_chunk=MORE_CORE( qm, bucket, size ); if (!new_chunk) { #ifdef DBG_QM_MALLOC LM_GEN1(memlog, "params (%p, %d) called from %s: %s(%d)\n", qm, size, file, func, line); #else LM_DBG("params (%p, %d) called from %s: %s(%d)\n", qm, size); #endif pkg_threshold_check(); return 0; } } new_chunk->u.inuse.magic = FR_USED; new_chunk->u.inuse.bucket=bucket; #ifdef DBG_QM_MALLOC new_chunk->file=file; new_chunk->func=func; new_chunk->line=line; new_chunk->demanded_size=demanded_size; qm->usage[ bucket ]++; LM_GEN1( memlog,"params ( %p, %d ) returns address %p in bucket %d, " "real-size %d\n", qm, demanded_size, (char*)new_chunk+sizeof(struct vqm_frag), bucket, size ); new_chunk->end_check=(char*)new_chunk+ sizeof(struct vqm_frag)+demanded_size; memcpy( new_chunk->end_check, END_CHECK_PATTERN, END_CHECK_PATTERN_LEN ); new_chunk->check=ST_CHECK_PATTERN; #endif pkg_threshold_check(); return (char*)new_chunk+sizeof(struct vqm_frag); }
int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size) #endif { unsigned long rest; struct qm_frag* n; struct qm_frag_end* end; rest=f->size-new_size; #ifdef MEM_FRAG_AVOIDANCE if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))|| (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/ #else if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){ #endif f->size=new_size; /*split the fragment*/ end=FRAG_END(f); end->size=new_size; n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end)); n->size=rest-FRAG_OVERHEAD; FRAG_END(n)->size=n->size; FRAG_CLEAR_USED(n); /* never used */ qm->used-=FRAG_OVERHEAD; #ifdef DBG_QM_MALLOC end->check1=END_CHECK_PATTERN1; end->check2=END_CHECK_PATTERN2; /* frag created by malloc, mark it*/ n->file=file; n->func=func; n->line=line; n->check=ST_CHECK_PATTERN; #endif /* reinsert n in free list*/ qm_insert_free(qm, n); return 0; }else{ /* we cannot split this fragment any more */ return -1; } } #ifdef DBG_QM_MALLOC void* qm_malloc(struct qm_block* qm, unsigned long size, const char* file, const char* func, unsigned int line) #else void* qm_malloc(struct qm_block* qm, unsigned long size) #endif { struct qm_frag* f; int hash; #ifdef DBG_QM_MALLOC unsigned int list_cntr; list_cntr = 0; LM_GEN1( memlog, "params (%p, %lu), called from %s: %s(%d)\n", qm, size, file, func, line); #endif /*size must be a multiple of 8*/ size=ROUNDUP(size); if (size>(qm->size-qm->real_used)) { pkg_threshold_check(); return 0; } /*search for a suitable free frag*/ #ifdef DBG_QM_MALLOC if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){ #else if ((f=qm_find_free(qm, size, &hash))!=0){ #endif /* we found it!*/ /*detach it from the free list*/ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); #endif qm_detach_free(qm, f); /*mark it as "busy"*/ f->u.is_free=0; qm->free_hash[hash].no--; /* we ignore split return */ #ifdef DBG_QM_MALLOC split_frag(qm, f, size, file, "fragm. from qm_malloc", line); #else split_frag(qm, f, size); #endif if (qm->max_real_used<qm->real_used) qm->max_real_used=qm->real_used; #ifdef DBG_QM_MALLOC f->file=file; f->func=func; f->line=line; f->check=ST_CHECK_PATTERN; /* FRAG_END(f)->check1=END_CHECK_PATTERN1; FRAG_END(f)->check2=END_CHECK_PATTERN2;*/ LM_GEN1( memlog, "params (%p, %lu), returns address %p frag. %p " "(size=%lu) on %d -th hit\n", qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr ); #endif pkg_threshold_check(); return (char*)f+sizeof(struct qm_frag); } pkg_threshold_check(); return 0; } #ifdef DBG_QM_MALLOC void qm_free(struct qm_block* qm, void* p, const char* file, const char* func, unsigned int line) #else void qm_free(struct qm_block* qm, void* p) #endif { struct qm_frag* f; struct qm_frag* prev; struct qm_frag* next; unsigned long size; #ifdef DBG_QM_MALLOC LM_GEN1( memlog, "params(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line); if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){ LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p); abort(); } #endif if (p==0) { LM_WARN("free(0) called\n"); return; } f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag)); #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); if (f->u.is_free){ LM_CRIT("freeing already freed pointer," " first free: %s: %s(%ld) - aborting\n", f->file, f->func, f->line); abort(); } LM_GEN1( memlog, "freeing frag. %p alloc'ed from %s: %s(%ld)\n", f, f->file, f->func, f->line); #endif size=f->size; /* join packets if possible*/ prev=next=0; next=FRAG_NEXT(f); if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){ /* join */ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, next); #endif qm_detach_free(qm, next); size+=next->size+FRAG_OVERHEAD; qm->used+=FRAG_OVERHEAD; qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */ } if (f > qm->first_frag){ prev=FRAG_PREV(f); /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f- sizeof(struct qm_frag_end))->size);*/ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, prev); #endif if (prev->u.is_free){ /*join*/ qm_detach_free(qm, prev); size+=prev->size+FRAG_OVERHEAD; qm->used+=FRAG_OVERHEAD; qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */ f=prev; } } f->size=size; FRAG_END(f)->size=f->size; #ifdef DBG_QM_MALLOC f->file=file; f->func=func; f->line=line; #endif qm_insert_free(qm, f); pkg_threshold_check(); } #ifdef DBG_QM_MALLOC void* qm_realloc(struct qm_block* qm, void* p, unsigned long size, const char* file, const char* func, unsigned int line) #else void* qm_realloc(struct qm_block* qm, void* p, unsigned long size) #endif { struct qm_frag* f; unsigned long diff; unsigned long orig_size; struct qm_frag* n; void* ptr; #ifdef DBG_QM_MALLOC LM_GEN1( memlog, "params (%p, %p, %lu), called from %s: %s(%d)\n", qm, p, size, file, func, line); if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){ LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p); abort(); } #endif if (size==0) { if (p) #ifdef DBG_QM_MALLOC qm_free(qm, p, file, func, line); #else qm_free(qm, p); #endif pkg_threshold_check(); return 0; } if (p==0) #ifdef DBG_QM_MALLOC return qm_malloc(qm, size, file, func, line); #else return qm_malloc(qm, size); #endif f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag)); #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); LM_GEN1( memlog, "realloc'ing frag %p alloc'ed from %s: %s(%ld)\n", f, f->file, f->func, f->line); if (f->u.is_free){ LM_CRIT("trying to realloc an already freed " "pointer %p , fragment %p -- aborting\n", p, f); abort(); } #endif /* find first acceptable size */ size=ROUNDUP(size); if (f->size > size){ orig_size=f->size; /* shrink */ #ifdef DBG_QM_MALLOC LM_GEN1(memlog,"shrinking from %lu to %lu\n", f->size, size); if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){ LM_GEN1(memlog,"shrinked successful\n"); } #else split_frag(qm, f, size); #endif }else if (f->size < size){ /* grow */ #ifdef DBG_QM_MALLOC LM_GEN1( memlog, "growing from %lu to %lu\n", f->size, size); #endif orig_size=f->size; diff=size-f->size; n=FRAG_NEXT(f); if (((char*)n < (char*)qm->last_frag_end) && (n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){ /* join */ qm_detach_free(qm, n); qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/ f->size+=n->size+FRAG_OVERHEAD; qm->used+=FRAG_OVERHEAD; FRAG_END(f)->size=f->size; /* end checks should be ok */ /* split it if necessary */ if (f->size > size ){ #ifdef DBG_QM_MALLOC split_frag(qm, f, size, file, "fragm. from qm_realloc", line); #else split_frag(qm, f, size); #endif } }else{ /* could not join => realloc */ #ifdef DBG_QM_MALLOC ptr=qm_malloc(qm, size, file, func, line); #else ptr=qm_malloc(qm, size); #endif if (ptr) { /* copy, need by libssl */ memcpy(ptr, p, orig_size); #ifdef DBG_QM_MALLOC qm_free(qm, p, file, func, line); #else qm_free(qm, p); #endif } p=ptr; } }else{ /* do nothing */ #ifdef DBG_QM_MALLOC LM_GEN1(memlog,"doing nothing, same size: %lu - %lu\n", f->size, size); #endif } #ifdef DBG_QM_MALLOC LM_GEN1(memlog,"returning %p\n", p); #endif pkg_threshold_check(); return p; } void qm_status(struct qm_block* qm) { struct qm_frag* f; int i,j; int h; int unused; LM_GEN1(memdump, "qm_status (%p):\n", qm); if (!qm) return; LM_GEN1(memdump, " heap size= %lu\n", qm->size); LM_GEN1(memdump, " used= %lu, used+overhead=%lu, free=%lu\n", qm->used, qm->real_used, qm->size-qm->real_used); LM_GEN1(memdump, " max used (+overhead)= %lu\n", qm->max_real_used); LM_GEN1(memdump, "dumping all alloc'ed. fragments:\n"); for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f) ,i++){ if (! f->u.is_free){ LM_GEN1(memdump," %3d. %c address=%p frag=%p size=%lu used=%d\n", i, (f->u.is_free)?'a':'N', (char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f)); #ifdef DBG_QM_MALLOC LM_GEN1(memdump, " %s from %s: %s(%ld)\n", (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line); LM_GEN1(memdump, " start check=%lx, end check= %lx, %lx\n", f->check, FRAG_END(f)->check1, FRAG_END(f)->check2); #endif } } LM_GEN1(memdump, "dumping free list stats :\n"); for(h=0,i=0;h<QM_HASH_SIZE;h++){ unused=0; for (f=qm->free_hash[h].head.u.nxt_free,j=0; f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){ if (!FRAG_WAS_USED(f)){ unused++; #ifdef DBG_QM_MALLOC LM_GEN1(memdump, "unused fragm.: hash = %3d, fragment %p," " address %p size %lu, created from %s: %s(%lu)\n", h, f, (char*)f+sizeof(struct qm_frag), f->size, f->file, f->func, f->line); #endif } } if (j) LM_GEN1(memdump, "hash= %3d. fragments no.: %5d, unused: %5d\n" "\t\t bucket size: %9lu - %9ld (first %9lu)\n", h, j, unused, UN_HASH(h), ((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h), qm->free_hash[h].head.u.nxt_free->size ); if (j!=qm->free_hash[h].no){ LM_CRIT("different free frag. count: %d!=%lu" " for hash %3d\n", j, qm->free_hash[h].no, h); } } LM_GEN1(memdump, "-----------------------------\n"); } /* fills a malloc info structure with info about the block * if a parameter is not supported, it will be filled with 0 */ void qm_info(struct qm_block* qm, struct mem_info* info) { int r; long total_frags; total_frags=0; memset(info,0, sizeof(*info)); info->total_size=qm->size; info->min_frag=MIN_FRAG_SIZE; info->free=qm->size-qm->real_used; info->used=qm->used; info->real_used=qm->real_used; info->max_used=qm->max_real_used; for(r=0;r<QM_HASH_SIZE; r++){ total_frags+=qm->free_hash[r].no; } info->total_frags=total_frags; } int qm_mem_check(struct qm_block *qm) { struct qm_frag *f; int i = 0; for (f = qm->first_frag; (char *)f < (char *)qm->last_frag_end; f = FRAG_NEXT(f), i++) { qm_debug_frag(qm, f); } LM_DBG("fragments: %d\n", i); return i; }