Exemplo n.º 1
0
void vqm_status(struct vqm_block* qm)
{
	struct vqm_frag* f;
	unsigned int i,on_list;

	LOG(memlog, "vqm_status (%p):\n", qm);
	if (!qm) return;
	LOG(memlog, " heap size= %d, available: %d\n", 
		qm->core_end-qm->init_core, qm->free_core );
	
	LOG(memlog, "dumping unfreed fragments:\n");
	for (f=(struct vqm_frag*)qm->init_core, i=0;(char*)f<(char*)qm->core;
		f=FRAG_NEXT(f) ,i++) if ( FRAG_ISUSED(f) ) dump_frag(f, i);

	LOG(memlog, "dumping unfreed big fragments:\n");
    for (f=(struct vqm_frag*)qm->big_chunks,i=0;(char*)f<(char*)qm->core_end;
		f=FRAG_NEXT(f) ,i++) if ( FRAG_ISUSED(f) ) dump_frag( f, i );

#ifdef DBG_QM_MALLOC
	DBG("dumping bucket statistics:\n");
	for (i=0; i<=BIG_BUCKET(qm); i++) {
		for(on_list=0, f=qm->next_free[i]; f; f=f->u.nxt_free ) on_list++;
		LOG(L_DBG, "    %3d. bucket: in use: %ld, on free list: %d\n", 
			i, qm->usage[i], on_list );
	}
#endif
	LOG(memlog, "-----------------------------\n");
}
Exemplo n.º 2
0
void hp_pkg_free(struct hp_block *hpb, void *p)
{
	struct hp_frag *f, *next;

	if (!p) {
		LM_WARN("free(0) called\n");
		return;
	}

	f = FRAG_OF(p);

	/*
	 * for private memory, coalesce as many consecutive fragments as possible
	 * The same operation is not performed for shared memory, because:
	 *		- performance penalties introduced by additional locking logic
	 *		- the allocator itself actually favours fragmentation and reusage
	 */
	for (;;) {
		next = FRAG_NEXT(f);
		if (next >= hpb->last_frag || !next->prev)
			break;

		hp_frag_detach(hpb, next);
		update_stats_pkg_frag_detach(hpb, next);

		f->size += next->size + FRAG_OVERHEAD;
		update_stats_pkg_frag_merge(hpb);
	}

	hp_frag_attach(hpb, f);
	update_stats_pkg_frag_attach(hpb, f);
}
Exemplo n.º 3
0
void set_indexes(int core_index) {

	struct fm_frag* f;
	for (f=shm_block->first_frag; (char*)f<(char*)shm_block->last_frag; f=FRAG_NEXT(f))
		if (!f->is_free)
			f->statistic_index = core_index;
}
Exemplo n.º 4
0
void __shm_frag_split_unsafe(struct hp_block *hpb, struct hp_frag *frag,
							unsigned long size)
{
	unsigned long rest;
	struct hp_frag *n;

#ifdef HP_MALLOC_FAST_STATS
	hpb->free_hash[PEEK_HASH_RR(hpb, frag->size)].total_no--;
	hpb->free_hash[PEEK_HASH_RR(hpb, size)].total_no++;
#endif

	rest = frag->size - size;
	frag->size = size;

	/* split the fragment */
	n = FRAG_NEXT(frag);
	n->size = rest - FRAG_OVERHEAD;

#ifdef HP_MALLOC_FAST_STATS
	hpb->free_hash[PEEK_HASH_RR(hpb, n->size)].total_no++;
#endif

	hp_frag_attach(hpb, n);

	if (stats_are_ready())
		update_stats_shm_frag_attach(n);
	else {
		hpb->used -= n->size;
		hpb->real_used -= n->size;
	}
}
Exemplo n.º 5
0
 /* size should already be rounded-up */
void __shm_frag_split(struct hp_block *hpb, struct hp_frag *frag,
					 unsigned long size, unsigned int old_hash)
{
	unsigned long rest, hash;
	struct hp_frag *n;

#ifdef HP_MALLOC_FAST_STATS
	hpb->free_hash[PEEK_HASH_RR(hpb, frag->size)].total_no--;
	hpb->free_hash[PEEK_HASH_RR(hpb, size)].total_no++;
#endif

	rest = frag->size - size;
	frag->size = size;

	/* split the fragment */
	n = FRAG_NEXT(frag);
	n->size = rest - FRAG_OVERHEAD;

	/* insert the newly obtained hp_frag in its free list */
	hash = PEEK_HASH_RR(hpb, n->size);

	if (hash != old_hash)
		SHM_LOCK(hash);

	hp_frag_attach(hpb, n);

#ifdef HP_MALLOC_FAST_STATS
	hpb->free_hash[hash].total_no++;
#endif

	if (hash != old_hash)
		SHM_UNLOCK(hash);
}
Exemplo n.º 6
0
	void qm_sums(struct vqm_block* qm)
	{
		struct vqm_frag* f;
		int i;
		mem_counter *root=0,*x;
		lock_get(process_lock);
		if (process_no!=0)
			LOG(memlog, "vqm_sums (%p): PKG[%s]\n", qm,pt[process_no].desc);
		else 
			LOG(memlog, "vqm_sums (%p): PKG[0]/SHM \n",qm);
		if (!qm) return;
	
		LOG(memlog, "summarizing all alloc'ed. fragments:\n");
		
		for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f),i++){
			if (! f->u.is_free){
				x = get_mem_counter(&root,f);
				x->count++;
				x->size+=f->size;
			}
		}
		x = root;
		while(x){
			LOG(memlog, " count=%6d size=%10lu bytes from %s: %s(%ld)\n",
				x->count,x->size,
				x->file, x->func, x->line
				);
			root = x->next;
			free(x);
			x = root;
		}
		LOG(memlog, "-----------------------------\n");
		lock_release(process_lock);
	}
Exemplo n.º 7
0
void hp_stats_core_init(struct hp_block *hp, int core_index)
{
	struct hp_frag *f;

	for (f=hp->first_frag; (char*)f<(char*)hp->last_frag; f=FRAG_NEXT(f))
		if (!frag_is_free(f))
			f->statistic_index = core_index;
}
Exemplo n.º 8
0
void fm_free(struct fm_block* qm, void* p)
#endif
{
	struct fm_frag* f,*n;

	#ifdef DBG_F_MALLOC
	LM_DBG("params(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
	if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){
		LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p);
		abort();
	}
	#endif
	if (p==0) {
		LM_DBG("free(0) called\n");
		return;
	}
	f=(struct fm_frag*) ((char*)p-sizeof(struct fm_frag));

	#ifdef DBG_F_MALLOC
	LM_DBG("freeing block alloc'ed from %s: %s(%ld)\n", f->file, f->func,
			f->line);
	f->file=file;
	f->func=func;
	f->line=line;
	#endif

join:

	if( qm->large_limit < qm->large_space )
		goto no_join;

	n = FRAG_NEXT(f);

	if (((char*)n < (char*)qm->last_frag) &&  n->prev )
	{

		fm_remove_free(qm, n);
		/* join */
		f->size += n->size + FRAG_OVERHEAD;

		#if defined(DBG_F_MALLOC) || defined(STATISTICS)
		//qm->real_used -= FRAG_OVERHEAD;
		qm->used += FRAG_OVERHEAD;
		#endif

		goto join;
	}

no_join:

	fm_insert_free(qm, f);
#if defined(DBG_F_MALLOC) || defined(STATISTICS)
	qm->fragments -= 1;
#endif
	pkg_threshold_check();
}
Exemplo n.º 9
0
void __pkg_frag_split(struct hp_block *hpb, struct hp_frag *frag,
							 unsigned long size)
{
	unsigned long rest;
	struct hp_frag *n;

	rest = frag->size - size;
	frag->size = size;

	/* split the fragment */
	n = FRAG_NEXT(frag);
	n->size = rest - FRAG_OVERHEAD;

	hp_frag_attach(hpb, n);
	update_stats_pkg_frag_attach(hpb, n);
}
Exemplo n.º 10
0
void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
#endif
{
	struct fm_frag *f;
	unsigned long diff;
	unsigned long orig_size;
	struct fm_frag *n;
	void *ptr;
	
	
	#ifdef DBG_F_MALLOC
	LM_DBG("params(%p, %p, %lu), called from %s: %s(%d)\n", qm, p, size,
			file, func, line);
	if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
		LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p);
		abort();
	}
	#endif
	if (size==0) {
		if (p)
	#ifdef DBG_F_MALLOC
			fm_free(qm, p, file, func, line);
	#else
			fm_free(qm, p);
	#endif
		pkg_threshold_check();
		return 0;
	}
	if (p==0)
	#ifdef DBG_F_MALLOC
		return fm_malloc(qm, size, file, func, line);
	#else
		return fm_malloc(qm, size);
	#endif
	f=(struct fm_frag*) ((char*)p-sizeof(struct fm_frag));
	#ifdef DBG_F_MALLOC
	LM_DBG("realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
	#endif
	size=ROUNDUP(size);
	orig_size=f->size;
	if (f->size > size){
		/* shrink */
		#ifdef DBG_F_MALLOC
		LM_DBG("shrinking from %lu to %lu\n", f->size, size);
		fm_split_frag(qm, f, size, file, "frag. from fm_realloc", line);
		#else
		fm_split_frag(qm, f, size);
		#endif

	}else if (f->size<size){
		/* grow */
		
		#ifdef DBG_F_MALLOC
		LM_DBG("growing from %lu to %lu\n", f->size, size);
		#endif
		
		diff=size-f->size;
		n=FRAG_NEXT(f);
		
		if (((char*)n < (char*)qm->last_frag) &&  n->prev &&
		 ((n->size+FRAG_OVERHEAD)>=diff)){

			fm_remove_free(qm,n);
			/* join */
			f->size += n->size + FRAG_OVERHEAD;

			#if defined(DBG_F_MALLOC) || defined(STATISTICS)
			//qm->real_used -= FRAG_OVERHEAD;
			qm->used += FRAG_OVERHEAD;
			#endif

			/* split it if necessary */
			if (f->size > size){
				#ifdef DBG_F_MALLOC
				fm_split_frag(qm, f, size, file, "fragm. from fm_realloc",
						line);
				#else
				fm_split_frag(qm, f, size);
				#endif
			}
		}else{
			/* could not join => realloc */
			#ifdef DBG_F_MALLOC
			ptr=fm_malloc(qm, size, file, func, line);
			#else
			ptr = fm_malloc(qm, size);
			#endif
			if (ptr) {
				/* copy, need by libssl */
				memcpy(ptr, p, orig_size);
				#ifdef DBG_F_MALLOC
				fm_free(qm, p, file, func, line);
				#else
				fm_free(qm, p);
				#endif
			}
			p = ptr;
		}
	}else{
		/* do nothing */
	#ifdef DBG_F_MALLOC
		LM_DBG("doing nothing, same size: %lu - %lu\n", f->size, size);
	#endif
	}
	#ifdef DBG_F_MALLOC
	LM_DBG("returning %p\n", p);
	#endif

	#if defined(DBG_F_MALLOC) || defined(STATISTICS)
	if (qm->max_real_used<qm->real_used)
		qm->max_real_used=qm->real_used;
	#endif

	pkg_threshold_check();
	return p;
}
Exemplo n.º 11
0
void* fm_malloc(struct fm_block* qm, unsigned long size)
#endif
{
	struct fm_frag* frag,*n;
	unsigned int hash;
		
	#ifdef DBG_F_MALLOC
	LM_DBG("params (%p, %lu), called from %s: %s(%d)\n", qm, size, file, func,
			line);
	#endif
	
	/*size must be a multiple of 8*/
	size=ROUNDUP(size);
	
	/*search for a suitable free frag*/

	for(hash=GET_HASH(size);hash<F_HASH_SIZE;hash++){
		frag=qm->free_hash[hash].first;
		for( ; frag; frag = frag->u.nxt_free )
			if ( frag->size >= size ) goto found;
		/* try in a bigger bucket */
	}
	/* not found, bad! */

	LM_WARN("Not enough free memory, will atempt defragmenation\n");

	for( frag = qm->first_frag; (char*)frag < (char*)qm->last_frag;  )
	{
		n = FRAG_NEXT(frag);

		if ( ((char*)n < (char*)qm->last_frag) &&  n->prev && frag->prev )
		{
			/* detach frag*/
			fm_remove_free(qm, frag);
			
			do
			{
				fm_remove_free(qm, n);
				frag->size += n->size + FRAG_OVERHEAD;

				#if defined(DBG_F_MALLOC) || defined(STATISTICS)
				//qm->real_used -= FRAG_OVERHEAD;
				qm->used += FRAG_OVERHEAD;
				#endif

				if( frag->size >size )
					goto solved;
				
				n = FRAG_NEXT(frag);
			}
			while
			( ((char*)n < (char*)qm->last_frag) &&  n->prev);

			fm_insert_free(qm,frag);
			
		}

		frag = n;
	}

	pkg_threshold_check();
	return 0;


		

found:
	/* we found it!*/
	
	fm_remove_free(qm,frag);
	
	/*see if we'll use full frag, or we'll split it in 2*/
	
	#ifdef DBG_F_MALLOC
	fm_split_frag(qm, frag, size, file, func, line);

	frag->file=file;
	frag->func=func;
	frag->line=line;
	frag->check=ST_CHECK_PATTERN;
	LM_DBG("params(%p, %lu), returns address %p \n", qm, size,
		(char*)frag+sizeof(struct fm_frag));
	#else
	fm_split_frag(qm, frag, size);
	#endif

solved:

	#if defined(DBG_F_MALLOC) || defined(STATISTICS)
	if (qm->max_real_used<qm->real_used)
		qm->max_real_used=qm->real_used;
	#endif

	pkg_threshold_check();
	return (char*)frag+sizeof(struct fm_frag);
}
Exemplo n.º 12
0
void fm_split_frag(struct fm_block* qm, struct fm_frag* frag,
					unsigned long size)
#endif
{
	unsigned long rest;
	struct fm_frag* n;

	rest=frag->size-size;
	#ifdef MEM_FRAG_AVOIDANCE
	if ((rest> (FRAG_OVERHEAD+F_MALLOC_OPTIMIZE))||
		(rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
	#else
	if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
	#endif
		frag->size=size;
		/*split the fragment*/
		n=FRAG_NEXT(frag);
		n->size=rest-FRAG_OVERHEAD;

		/*
		 * The real used memory does not increase, as the frag memory is not
		 * freed from real_used. On the other hand, the used size should
		 * decrease, because the new fragment is not "useful data" - razvanc

		#if defined(DBG_F_MALLOC) || defined(STATISTICS)
		qm->real_used+=FRAG_OVERHEAD;
		#endif

		 */
		#if defined(DBG_F_MALLOC) || defined(STATISTICS)
		qm->used-=FRAG_OVERHEAD;
		#endif

		#ifdef DBG_F_MALLOC
		/* frag created by malloc, mark it*/
		n->file=file;
		n->func="frag. from fm_malloc";
		n->line=line;
		n->check=ST_CHECK_PATTERN;
		#endif
		/* reinsert n in free list*/
		fm_insert_free(qm, n);
	}else{
		/* we cannot split this fragment any more => alloc all of it*/
	}
}



/* init malloc and return a fm_block*/
struct fm_block* fm_malloc_init(char* address, unsigned long size)
{
	char* start;
	char* end;
	struct fm_block* qm;
	unsigned long init_overhead;
	
	/* make address and size multiple of 8*/
	start=(char*)ROUNDUP((unsigned long) address);
	LM_DBG("F_OPTIMIZE=%lu, /ROUNDTO=%lu\n",
			F_MALLOC_OPTIMIZE, F_MALLOC_OPTIMIZE/ROUNDTO);
	LM_DBG("F_HASH_SIZE=%lu, fm_block size=%lu\n",
			F_HASH_SIZE, (long)sizeof(struct fm_block));
	LM_DBG("params (%p, %lu), start=%p\n", address, size, start);

	if (size<(unsigned long)(start-address)) return 0;
	size-=(start-address);
	if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
	size=ROUNDDOWN(size);

	init_overhead=(ROUNDUP(sizeof(struct fm_block))+ 2 * FRAG_OVERHEAD);
	
	
	if (size < init_overhead)
	{
		/* not enough mem to create our control structures !!!*/
		return 0;
	}
	end=start+size;
	qm=(struct fm_block*)start;
	memset(qm, 0, sizeof(struct fm_block));
	qm->size=size;

	#if defined(DBG_F_MALLOC) || defined(STATISTICS)

	qm->used=size-init_overhead;
	qm->real_used=size;
	qm->max_real_used=init_overhead;
	#endif
	
	qm->first_frag=(struct fm_frag*)(start+ROUNDUP(sizeof(struct fm_block)));
	qm->last_frag=(struct fm_frag*)(end-sizeof(struct fm_frag));
	/* init initial fragment*/
	qm->first_frag->size=size-init_overhead;
	qm->last_frag->size=0;

	qm->last_frag->prev=NULL;
	qm->first_frag->prev=NULL;
	
	#ifdef DBG_F_MALLOC
	qm->first_frag->check=ST_CHECK_PATTERN;
	qm->last_frag->check=END_CHECK_PATTERN1;
	#endif
	
	/* link initial fragment into the free list*/

	qm->large_space = 0;
	qm->large_limit = qm->size / 100 * F_MALLOC_DEFRAG_PERCENT;

	if( qm->large_limit < F_MALLOC_DEFRAG_LIMIT )
		qm->large_limit = F_MALLOC_DEFRAG_LIMIT;

	fm_insert_free(qm, qm->first_frag);
	
	
	return qm;
}
Exemplo n.º 13
0
void *hp_pkg_realloc(struct hp_block *hpb, void *p, unsigned long size)
{
	struct hp_frag *f;
	unsigned long diff;
	unsigned long orig_size;
	struct hp_frag *next;
	void *ptr;
	
	if (size == 0) {
		if (p)
			hp_pkg_free(hpb, p);

		return NULL;
	}

	if (!p)
		return hp_pkg_malloc(hpb, size);

	f = FRAG_OF(p);

	size = ROUNDUP(size);
	orig_size = f->size;

	/* shrink operation */
	if (orig_size > size) {
		pkg_frag_split(hpb, f, size);

	/* grow operation */
	} else if (orig_size < size) {
		
		diff = size - orig_size;
		next = FRAG_NEXT(f);

		/* try to join with a large enough adjacent free fragment */
		if (next < hpb->last_frag && next->prev &&
		    (next->size + FRAG_OVERHEAD) >= diff) {

			hp_frag_detach(hpb, next);
			update_stats_pkg_frag_detach(hpb, next);

			f->size += next->size + FRAG_OVERHEAD;

			/* split the result if necessary */
			if (f->size > size)
				pkg_frag_split(hpb, f, size);

		} else {
			/* could not join => realloc */
			ptr = hp_pkg_malloc(hpb, size);
			if (ptr) {
				/* copy, need by libssl */
				memcpy(ptr, p, orig_size);
				hp_pkg_free(hpb, p);
			}
			p = ptr;
		}

		if (hpb->real_used > hpb->max_real_used)
			hpb->max_real_used = hpb->real_used;
	}

	pkg_threshold_check();
	return p;
}
Exemplo n.º 14
0
void fm_status(struct fm_block* qm)
{
    struct fm_frag* f;
    unsigned int i,j;
    unsigned int h;
    int unused;
    unsigned long size;

#ifdef DBG_MALLOC
    mem_dbg_htable_t allocd;
    struct mem_dbg_entry *it;
#endif

    LM_GEN1(memdump, "fm_status (%p):\n", qm);
    if (!qm) return;

    LM_GEN1(memdump, " heap size= %ld\n", qm->size);
#if defined(DBG_MALLOC) || defined(STATISTICS)
    LM_GEN1(memdump, " used= %lu, used+overhead=%lu, free=%lu\n",
            qm->used, qm->real_used, qm->size-qm->used);
    LM_GEN1(memdump, " max used (+overhead)= %lu\n", qm->max_real_used);
#endif

#if defined(DBG_MALLOC)
    dbg_ht_init(allocd);

    for (f=qm->first_frag; (char*)f<(char*)qm->last_frag; f=FRAG_NEXT(f))
        if (!f->is_free)
            if (dbg_ht_update(allocd, f->file, f->func, f->line, f->size) < 0) {
                LM_ERR("Unable to update alloc'ed. memory summary\n");
                dbg_ht_free(allocd);
                return;
            }

    LM_GEN1(memdump, " dumping summary of all alloc'ed. fragments:\n");
    for(i=0; i < DBG_HASH_SIZE; i++) {
        it = allocd[i];
        while (it) {
            LM_GEN1(memdump, " %10lu : %lu x [%s: %s, line %lu]\n",
                    it->size, it->no_fragments, it->file, it->func, it->line);
            it = it->next;
        }
    }

    dbg_ht_free(allocd);
#endif

    LM_GEN1(memdump, "dumping free list:\n");
    for(h=0,i=0,size=0; h<F_HASH_SIZE; h++) {
        unused=0;
        for (f=qm->free_hash[h].first,j=0; f;
                size+=f->size,f=f->u.nxt_free,i++,j++) { }
        if (j) LM_GEN1(memdump,"hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
                           " bucket size: %9lu - %9lu (first %9lu)\n",
                           h, j, unused, UN_HASH(h),
                           ((h<=F_MALLOC_OPTIMIZE/ROUNDTO)?1:2)* UN_HASH(h),
                           qm->free_hash[h].first->size
                          );
        if (j!=qm->free_hash[h].no) {
            LM_CRIT("different free frag. count: %d!=%ld"
                    " for hash %3d\n", j, qm->free_hash[h].no, h);
        }

    }
    LM_GEN1(memdump, "TOTAL: %6d free fragments = %6lu free bytes\n", i, size);
    LM_GEN1(memdump, "TOTAL: %ld large bytes\n", qm->large_space );
    LM_GEN1(memdump, "TOTAL: %u overhead\n", (unsigned int)FRAG_OVERHEAD );
    LM_GEN1(memdump, "-----------------------------\n");
}
Exemplo n.º 15
0
void* fm_malloc(struct fm_block* qm, unsigned long size)
#endif
{
    struct fm_frag* frag,*n;
    unsigned int hash;

#ifdef DBG_MALLOC
    LM_GEN1(memlog, "%s_malloc(%lu), called from %s: %s(%d)\n", qm->name, size, file, func,
            line);
#endif

    /*size must be a multiple of 8*/
    size=ROUNDUP(size);

    /*search for a suitable free frag*/

    for(hash=GET_HASH(size); hash<F_HASH_SIZE; hash++) {
        frag=qm->free_hash[hash].first;
        for( ; frag; frag = frag->u.nxt_free )
            if ( frag->size >= size ) goto found;
        /* try in a bigger bucket */
    }
    /* not found, bad! */

#if defined(DBG_MALLOC) || defined(STATISTICS)
    LM_ERR(oom_errorf, qm->name, qm->size - qm->real_used,
           qm->name[0] == 'p' ? "M" : "m");
    LM_INFO("attempting defragmentation... (need %lu bytes)\n", size);
#else
    LM_ERR(oom_nostats_errorf, qm->name, qm->name[0] == 'p' ? "M" : "m");
    LM_INFO("attempting defragmentation... (need %lu bytes)\n", size);
#endif

    for( frag = qm->first_frag; (char*)frag < (char*)qm->last_frag;  )
    {
        n = FRAG_NEXT(frag);

        if ( ((char*)n < (char*)qm->last_frag) &&  n->prev && frag->prev )
        {
            /* detach frag*/
            fm_remove_free(qm, frag);

            do
            {
                fm_remove_free(qm, n);
                frag->size += n->size + FRAG_OVERHEAD;

#if defined(DBG_MALLOC) || defined(STATISTICS)
                //qm->real_used -= FRAG_OVERHEAD;
                qm->used += FRAG_OVERHEAD;
#endif

                if( frag->size >size ) {
#ifdef DBG_MALLOC
                    /* mark it as "busy" */
                    frag->is_free = 0;
#endif

                    goto solved;
                }

                n = FRAG_NEXT(frag);
            }
            while
            ( ((char*)n < (char*)qm->last_frag) &&  n->prev);

            fm_insert_free(qm,frag);

        }

        frag = n;
    }

    LM_INFO("unable to alloc a big enough fragment!\n");
    pkg_threshold_check();
    return 0;


found:
    /* we found it!*/

    fm_remove_free(qm,frag);

#ifdef DBG_MALLOC
    /* mark it as "busy" */
    frag->is_free = 0;
#endif

    /*see if we'll use full frag, or we'll split it in 2*/

#ifdef DBG_MALLOC
    fm_split_frag(qm, frag, size, file, func, line);

    frag->file=file;
    frag->func=func;
    frag->line=line;
    frag->check=ST_CHECK_PATTERN;
    LM_GEN1(memlog, "%s_malloc(%lu), returns address %p\n", qm->name, size,
            (char*)frag+sizeof(struct fm_frag));
#else
    fm_split_frag(qm, frag, size);
#endif

solved:

#if defined(DBG_MALLOC) || defined(STATISTICS)
    if (qm->max_real_used<qm->real_used)
        qm->max_real_used=qm->real_used;
    qm->fragments += 1;
#endif

    pkg_threshold_check();
    return (char*)frag+sizeof(struct fm_frag);
}
Exemplo n.º 16
0
int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
#endif
{
	unsigned long rest;
	struct qm_frag* n;
	struct qm_frag_end* end;
	
	rest=f->size-new_size;
#ifdef MEM_FRAG_AVOIDANCE
	if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
		(rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
#else
	if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
#endif
		f->size=new_size;
		/*split the fragment*/
		end=FRAG_END(f);
		end->size=new_size;
		n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
		n->size=rest-FRAG_OVERHEAD;
		FRAG_END(n)->size=n->size;
		FRAG_CLEAR_USED(n); /* never used */
		qm->real_used+=FRAG_OVERHEAD;
#ifdef DBG_QM_MALLOC
		end->check1=END_CHECK_PATTERN1;
		end->check2=END_CHECK_PATTERN2;
		/* frag created by malloc, mark it*/
		n->file=file;
		n->func=func;
		n->line=line;
		n->check=ST_CHECK_PATTERN;
#endif
		/* reinsert n in free list*/
		qm_insert_free(qm, n);
		return 0;
	}else{
			/* we cannot split this fragment any more */
		return -1;
	}
}



#ifdef DBG_QM_MALLOC
void* qm_malloc(struct qm_block* qm, unsigned long size,
					const char* file, const char* func, unsigned int line)
#else
void* qm_malloc(struct qm_block* qm, unsigned long size)
#endif
{
	struct qm_frag* f;
	int hash;
	
#ifdef DBG_QM_MALLOC
	unsigned int list_cntr;

	list_cntr = 0;
	MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
			line);
#endif
	/*size must be a multiple of 8*/
	size=ROUNDUP(size);
	if (size>(qm->size-qm->real_used)) return 0;

	/*search for a suitable free frag*/
#ifdef DBG_QM_MALLOC
	if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
#else
	if ((f=qm_find_free(qm, size, &hash))!=0){
#endif
		/* we found it!*/
		/*detach it from the free list*/
#ifdef DBG_QM_MALLOC
			qm_debug_frag(qm, f);
#endif
		qm_detach_free(qm, f);
		/*mark it as "busy"*/
		f->u.is_free=0;
		qm->free_hash[hash].no--;
		/* we ignore split return */
#ifdef DBG_QM_MALLOC
		split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
#else
		split_frag(qm, f, size);
#endif
		qm->real_used+=f->size;
		qm->used+=f->size;
		if (qm->max_real_used<qm->real_used)
			qm->max_real_used=qm->real_used;
#ifdef DBG_QM_MALLOC
		f->file=file;
		f->func=func;
		f->line=line;
		f->check=ST_CHECK_PATTERN;
		/*  FRAG_END(f)->check1=END_CHECK_PATTERN1;
			FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
		MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d"
				" -th hit\n",
			 qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
#endif
		return (char*)f+sizeof(struct qm_frag);
	}
	return 0;
}



#ifdef DBG_QM_MALLOC
void qm_free(struct qm_block* qm, void* p, const char* file, const char* func, 
				unsigned int line)
#else
void qm_free(struct qm_block* qm, void* p)
#endif
{
	struct qm_frag* f;
	struct qm_frag* prev;
	struct qm_frag* next;
	unsigned long size;

#ifdef DBG_QM_MALLOC
	MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
	if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
		LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
				"aborting\n", p);
		abort();
	}
#endif
	if (p==0) {
		LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
		return;
	}
	prev=next=0;
	f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
#ifdef DBG_QM_MALLOC
	qm_debug_frag(qm, f);
	if (f->u.is_free){
		LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer,"
				" first free: %s: %s(%ld) - aborting\n",
				f->file, f->func, f->line);
		abort();
	}
	MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
#endif
	size=f->size;
	qm->used-=size;
	qm->real_used-=size;

#ifdef QM_JOIN_FREE
	/* join packets if possible*/
	next=FRAG_NEXT(f);
	if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
		/* join */
#ifdef DBG_QM_MALLOC
		qm_debug_frag(qm, next);
#endif
		qm_detach_free(qm, next);
		size+=next->size+FRAG_OVERHEAD;
		qm->real_used-=FRAG_OVERHEAD;
		qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
	}
	
	if (f > qm->first_frag){
		prev=FRAG_PREV(f);
		/*	(struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
								sizeof(struct qm_frag_end))->size);*/
#ifdef DBG_QM_MALLOC
		qm_debug_frag(qm, prev);
#endif
		if (prev->u.is_free){
			/*join*/
			qm_detach_free(qm, prev);
			size+=prev->size+FRAG_OVERHEAD;
			qm->real_used-=FRAG_OVERHEAD;
			qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
			f=prev;
		}
	}
	f->size=size;
	FRAG_END(f)->size=f->size;
#endif /* QM_JOIN_FREE*/
#ifdef DBG_QM_MALLOC
	f->file=file;
	f->func=func;
	f->line=line;
#endif
	qm_insert_free(qm, f);
}



#ifdef DBG_QM_MALLOC
void* qm_realloc(struct qm_block* qm, void* p, unsigned long size,
					const char* file, const char* func, unsigned int line)
#else
void* qm_realloc(struct qm_block* qm, void* p, unsigned long size)
#endif
{
	struct qm_frag* f;
	unsigned long diff;
	unsigned long orig_size;
	struct qm_frag* n;
	void* ptr;
	
	
#ifdef DBG_QM_MALLOC
	MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
			file, func, line);
	if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
		LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
				"aborting\n", p);
		abort();
	}
#endif
	
	if (size==0) {
		if (p)
#ifdef DBG_QM_MALLOC
			qm_free(qm, p, file, func, line);
#else
			qm_free(qm, p);
#endif
		return 0;
	}
	if (p==0)
#ifdef DBG_QM_MALLOC
		return qm_malloc(qm, size, file, func, line);
#else
		return qm_malloc(qm, size);
#endif
	f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
#ifdef DBG_QM_MALLOC
	qm_debug_frag(qm, f);
	MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
	if (f->u.is_free){
		LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed "
				"pointer %p , fragment %p -- aborting\n", p, f);
		abort();
	}
#endif
	/* find first acceptable size */
	size=ROUNDUP(size);
	if (f->size > size){
		orig_size=f->size;
		/* shrink */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size);
		if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
		MDBG("qm_realloc : shrinked successful\n");
#else
		if(split_frag(qm, f, size)!=0){
#endif
			/* update used sizes: freed the spitted frag */
			qm->real_used-=(orig_size-f->size-FRAG_OVERHEAD);
			qm->used-=(orig_size-f->size);
		}
		
	}else if (f->size < size){
		/* grow */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size);
#endif
			orig_size=f->size;
			diff=size-f->size;
			n=FRAG_NEXT(f);
			if (((char*)n < (char*)qm->last_frag_end) && 
					(n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
				/* join  */
				qm_detach_free(qm, n);
				qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
				f->size+=n->size+FRAG_OVERHEAD;
				qm->real_used-=FRAG_OVERHEAD;
				FRAG_END(f)->size=f->size;
				/* end checks should be ok */
				/* split it if necessary */
				if (f->size > size ){
	#ifdef DBG_QM_MALLOC
					split_frag(qm, f, size, file, "fragm. from qm_realloc",
										line);
	#else
					split_frag(qm, f, size);
	#endif
				}
				qm->real_used+=(f->size-orig_size);
				qm->used+=(f->size-orig_size);
			}else{
				/* could not join => realloc */
	#ifdef DBG_QM_MALLOC
				ptr=qm_malloc(qm, size, file, func, line);
	#else
				ptr=qm_malloc(qm, size);
	#endif
				if (ptr){
					/* copy, need by libssl */
					memcpy(ptr, p, orig_size);
	#ifdef DBG_QM_MALLOC
					qm_free(qm, p, file, func, line);
	#else
					qm_free(qm, p);
	#endif
				}
				p=ptr;
			}
	}else{
		/* do nothing */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n",
				f->size, size);
#endif
	}
#ifdef DBG_QM_MALLOC
	MDBG("qm_realloc: returning %p\n", p);
#endif
	return p;
}




void qm_status(struct qm_block* qm)
{
	struct qm_frag* f;
	int i,j;
	int h;
	int unused;

	LOG(memlog, "qm_status (%p):\n", qm);
	if (!qm) return;

	LOG(memlog, " heap size= %lu\n", qm->size);
	LOG(memlog, " used= %lu, used+overhead=%lu, free=%lu\n",
			qm->used, qm->real_used, qm->size-qm->real_used);
	LOG(memlog, " max used (+overhead)= %lu\n", qm->max_real_used);
	
	LOG(memlog, "dumping all alloc'ed. fragments:\n");
	for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
			,i++){
		if (! f->u.is_free){
			LOG(memlog, "    %3d. %c  address=%p frag=%p size=%lu used=%d\n",
				i, 
				(f->u.is_free)?'a':'N',
				(char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
#ifdef DBG_QM_MALLOC
			LOG(memlog, "            %s from %s: %s(%ld)\n",
				(f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
			LOG(memlog, "        start check=%lx, end check= %lx, %lx\n",
				f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
#endif
		}
	}
	LOG(memlog, "dumping free list stats :\n");
	for(h=0,i=0;h<QM_HASH_SIZE;h++){
		unused=0;
		for (f=qm->free_hash[h].head.u.nxt_free,j=0; 
				f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
				if (!FRAG_WAS_USED(f)){
					unused++;
#ifdef DBG_QM_MALLOC
					LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
						" address %p size %lu, created from %s: %s(%lu)\n",
					    h, f, (char*)f+sizeof(struct qm_frag), f->size,
						f->file, f->func, f->line);
#endif
				}
		}

		if (j) LOG(memlog, "hash= %3d. fragments no.: %5d, unused: %5d\n"
					"\t\t bucket size: %9lu - %9ld (first %9lu)\n",
					h, j, unused, UN_HASH(h),
					((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
					qm->free_hash[h].head.u.nxt_free->size
				);
		if (j!=qm->free_hash[h].no){
			LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
				" for hash %3d\n", j, qm->free_hash[h].no, h);
		}

	}
	LOG(memlog, "-----------------------------\n");
}


/* fills a malloc info structure with info about the block
 * if a parameter is not supported, it will be filled with 0 */
void qm_info(struct qm_block* qm, struct mem_info* info)
{
	int r;
	long total_frags;
	
	total_frags=0;
	memset(info,0, sizeof(*info));
	info->total_size=qm->size;
	info->min_frag=MIN_FRAG_SIZE;
	info->free=qm->size-qm->real_used;
	info->used=qm->used;
	info->real_used=qm->real_used;
	info->max_used=qm->max_real_used;
	for(r=0;r<QM_HASH_SIZE; r++){
		total_frags+=qm->free_hash[r].no;
	}
	info->total_frags=total_frags;
}

/* returns how much free memory is available
 * it never returns an error (unlike fm_available) */
unsigned long qm_available(struct qm_block* qm)
{
	return qm->size-qm->real_used;
}

#ifdef DBG_QM_MALLOC

	typedef struct _mem_counter{
		const char *file;
		const char *func;
		unsigned long line;
		
		unsigned long size;
		int count;
		
		struct _mem_counter *next;
	} mem_counter;
	
	mem_counter* get_mem_counter(mem_counter **root,struct qm_frag* f)
	{
		mem_counter *x;
		if (!*root) goto make_new;
		for(x=*root;x;x=x->next)
			if (x->file == f->file && x->func == f->func && x->line == f->line)
				return x;
	make_new:	
		x = malloc(sizeof(mem_counter));
		x->file = f->file;
		x->func = f->func;
		x->line = f->line;
		x->count = 0;
		x->size = 0;
		x->next = *root;
		*root = x;
		return x;
	}
	
	#include "../locking.h"
	#include "../pt.h"
	
	extern gen_lock_t* process_lock;
	extern struct process_table *pt;
	extern int process_no;
	
	void qm_sums(struct qm_block* qm)
	{
		struct qm_frag* f;
		int i;		
		int total_count=0;
		long unsigned int total_size=0;				
		int memlog=L_ERR;
		mem_counter *root=0,*x;
		//lock_get(process_lock);
		if (process_no!=0)
			LOG(memlog, "qm_sums (%p): PKG[%s]\n", qm,pt[process_no].desc);
		else 
			LOG(memlog, "qm_sums (%p): PKG[0]/SHM \n",qm);
		if (!qm) {
			//lock_release(process_lock);
			return;
		}
	
		LOG(memlog, "summarizing all alloc'ed. fragments:\n");
		
		for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f),i++){
			if (! f->u.is_free){
				x = get_mem_counter(&root,f);
				x->count++;
				x->size+=f->size;
			}
		}
		x = root;
		while(x){
			LOG(memlog, " count=%6d size=%10lu bytes from %s: %s(%ld)\n",
				x->count,x->size,
				x->file, x->func, x->line
				);
			total_count+=x->count;total_size+=x->size;				
			root = x->next;
			free(x);
			x = root;
		}
		LOG(memlog, " count=%6d size=%10lu bytes in total\n",total_count,total_size);
		LOG(memlog, "-----------------------------\n");
		//lock_release(process_lock);
	}
Exemplo n.º 17
0
void vqm_free(struct vqm_block* qm, void* p)
#endif
{
	struct vqm_frag *f, *next, *prev, *first_big;
	unsigned char b;

#ifdef DBG_QM_MALLOC
	DBG("vqm_free(%p, %p), called from %s: %s(%d)\n", 
		qm, p, file, func, line);
	if (p>(void *)qm->core_end || p<(void*)qm->init_core){
		LOG(L_CRIT, "BUG: vqm_free: bad pointer %p (out of memory block!) - "
				"aborting\n", p);
		abort();
	}
#endif
	if (p==0) {
		DBG("WARNING:vqm_free: free(0) called\n");
		return;
	}
	f=(struct  vqm_frag*) ((char*)p-sizeof(struct vqm_frag));
	b=f->u.inuse.bucket;
#ifdef DBG_QM_MALLOC
	VQM_DEBUG_FRAG(qm, f);
	if ( ! FRAG_ISUSED(f) ) {
		LOG(L_CRIT, "BUG: vqm_free: freeing already freed pointer,"
				" first freed: %s: %s(%d) - aborting\n",
				f->file, f->func, f->line);
		abort();
	}
	if ( b>MAX_BUCKET ) {
		LOG(L_CRIT, "BUG: vqm_free: fragment with too high bucket nr: "
				"%d, allocated: %s: %s(%d) - aborting\n",
				b, f->file, f->func, f->line); 
		abort();
	}
	DBG("vqm_free: freeing %d bucket block alloc'ed from %s: %s(%d)\n", 
		f->u.inuse.bucket, f->file, f->func, f->line);
	f->file=file; f->func=func; f->line=line;
	qm->usage[ f->u.inuse.bucket ]--;
#endif
	if (IS_BIGBUCKET(qm,b)) {
		next=FRAG_NEXT(f);
		if  ((char *)next +sizeof( struct vqm_frag) < qm->core_end) {
			VQM_DEBUG_FRAG(qm, next);
			if (! FRAG_ISUSED(next)) { /* coalesce with next fragment */
				DBG("vqm_free: coalesced with next\n");
				vqm_detach_free(qm, next);
				f->size+=next->size;
				FRAG_END(f)->size=f->size;
			}
		}
		first_big = qm->next_free[b];
		if (first_big &&  f>first_big) {
			prev=FRAG_PREV(f);
			VQM_DEBUG_FRAG(qm, prev);
			if (!FRAG_ISUSED(prev)) { /* coalesce with prev fragment */
				DBG("vqm_free: coalesced with prev\n");
				vqm_detach_free(qm, prev );
				prev->size+=f->size;
				f=prev;
				FRAG_END(f)->size=f->size;
			}
		}
		if ((char *)f==qm->big_chunks) { /* release unused core */
			DBG("vqm_free: big chunk released\n");
			qm->free_core+=f->size;
			qm->big_chunks+=f->size;
			return;
		}		
		first_big = qm->next_free[b];
		/* fix reverse link (used only for BIG_BUCKET */
		if (first_big) FRAG_END(first_big)->prv_free=f;
		FRAG_END(f)->prv_free=0;
	} else first_big = qm->next_free[b];
	f->u.nxt_free = first_big; /* also clobbers magic */
	qm->next_free[b] = f;
}
Exemplo n.º 18
0
/**
 * on-demand memory fragmentation, based on an input pattern file
 */
int hp_mem_warming(struct hp_block *hpb)
{
	struct size_fraction {
		int hash_index;

		double amount;
		unsigned long fragments;

		struct size_fraction *next;
	};

	struct size_fraction *sf, *it, *sorted_sf = NULL;
	FILE *f;
	size_t rc;
	unsigned long roundto, hash_size;
	long long bucket_mem;
	int i, c = 0;
	unsigned int current_frag_size;
	struct hp_frag *big_frag;
	unsigned int optimized_buckets;

	f = fopen(mem_warming_pattern_file, "r");
	if (!f) {
		LM_ERR("failed to open pattern file %s: %d - %s\n",
		        mem_warming_pattern_file, errno, strerror(errno));
		return -1;
	}

	rc = fscanf(f, "%lu %lu\n", &roundto, &hash_size);
	if (rc != 2) {
		LM_ERR("failed to read from %s: bad file format\n",
		        mem_warming_pattern_file);
		goto out;
	}
	rc = 0;

	if (roundto != ROUNDTO || hash_size != HP_HASH_SIZE) {
		LM_ERR("incompatible pattern file data: [HP_HASH_SIZE: %lu-%lu] "
		       "[ROUNDTO: %lu-%lu]\n", hash_size, HP_HASH_SIZE, roundto, ROUNDTO);
		rc = -1;
		goto out;
	}

	/* read bucket usage percentages and sort them by number of fragments */
	for (i = 0; i < HP_LINEAR_HASH_SIZE; i++) {

		sf = malloc(sizeof *sf);
		if (!sf) {
			LM_INFO("malloc failed, skipping shm warming\n");
			rc = -1;
			goto out_free;
		}

		sf->hash_index = i;
		sf->next = NULL;

		if (fscanf(f, "%lf", &sf->amount) != 1) {
			LM_CRIT("%s appears to be corrupt. Please remove it first\n",
			         mem_warming_pattern_file);
			abort();
		}
		
		if (i == 0)
			sf->fragments = 0;
		else
			sf->fragments = sf->amount * hpb->size / (ROUNDTO * i);

		if (!sorted_sf)
			sorted_sf = sf;
		else {
			for (it = sorted_sf;
			     it->next && it->next->fragments > sf->fragments;
				 it = it->next)
				;

			if (it->fragments < sf->fragments) {
				sf->next = sorted_sf;
				sorted_sf = sf;
			} else {
				sf->next = it->next;
				it->next = sf;
			}
		}
	}

	/* only optimize the configured number of buckets */
	optimized_buckets = (float)shm_hash_split_percentage / 100 * HP_LINEAR_HASH_SIZE;

	LM_INFO("Optimizing %u / %lu mem buckets\n", optimized_buckets,
	         HP_LINEAR_HASH_SIZE);

	sf = sorted_sf;
	for (i = 0; i < optimized_buckets; i++) {
		hpb->free_hash[sf->hash_index].is_optimized = 1;
		sf = sf->next;
	}

	big_frag = hpb->first_frag;

	/* populate each free hash bucket with proper number of fragments */
	for (sf = sorted_sf; sf; sf = sf->next) {
		LM_INFO("[%d][%s] fraction: %.12lf total mem: %llu, %lu\n", sf->hash_index,
		         hpb->free_hash[sf->hash_index].is_optimized ? "X" : " ",
				 sf->amount, (unsigned long long) (sf->amount *
				 hpb->size * mem_warming_percentage / 100),
				 ROUNDTO * sf->hash_index);

		current_frag_size = ROUNDTO * sf->hash_index;
		bucket_mem = sf->amount * hpb->size * mem_warming_percentage / 100;

		/* create free fragments worth of 'bucket_mem' memory */
		while (bucket_mem >= FRAG_OVERHEAD + current_frag_size) {
			hp_frag_detach(hpb, big_frag);
			if (stats_are_ready())
				update_stats_shm_frag_detach(big_frag);
			else {
				hpb->used += big_frag->size;
				hpb->real_used += big_frag->size + FRAG_OVERHEAD;
			}

			/* trim-insert operation on the big free fragment */
			shm_frag_split_unsafe(hpb, big_frag, current_frag_size);

			/*
			 * "big_frag" now points to a smaller, free and detached frag.
			 *
			 * With optimized buckets, inserts will be automagically
			 * balanced within their dedicated hashes
			 */
			hp_frag_attach(hpb, big_frag);
			if (stats_are_ready())
				update_stats_shm_frag_attach(big_frag);
			else {
				hpb->used -= big_frag->size;
				hpb->real_used -= big_frag->size + FRAG_OVERHEAD;
			}

			big_frag = FRAG_NEXT(big_frag);

			bucket_mem -= FRAG_OVERHEAD + current_frag_size;

			if (c % 1000000 == 0)
				LM_INFO("%d| %lld %p\n", c, bucket_mem, big_frag);

			c++;
		}
	}

out_free:
	while (sorted_sf) {
		sf = sorted_sf;
		sorted_sf = sorted_sf->next;
		free(sf);
	}

out:
	fclose(f);
	return rc;
}
Exemplo n.º 19
0
void sfm_split_frag(struct sfm_block* qm, struct sfm_frag* frag,
					unsigned long size)
#endif
{
	unsigned long rest;
	struct sfm_frag* n;
	int bigger_rest;
	
	rest=frag->size-size;
#ifdef MEM_FRAG_AVOIDANCE
	if ((rest> (FRAG_OVERHEAD+SF_MALLOC_OPTIMIZE))||
		(rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
		bigger_rest=1;
#else
	if (rest>(FRAG_OVERHEAD+SF_MIN_FRAG_SIZE)){
		bigger_rest=rest>=(size+FRAG_OVERHEAD);
#endif
		frag->size=size;
		/*split the fragment*/
		n=FRAG_NEXT(frag);
		n->size=rest-FRAG_OVERHEAD;
		n->id=pool_id;
		FRAG_CLEAR_USED(n); /* never used */
#ifdef DBG_F_MALLOC
		/* frag created by malloc, mark it*/
		n->file=file;
		n->func="frag. from sfm_malloc";
		n->line=line;
		n->check=ST_CHECK_PATTERN;
#endif
		/* reinsert n in free list*/
		sfm_insert_free(qm, n, bigger_rest);
	}else{
		/* we cannot split this fragment any more => alloc all of it*/
	}
}



/* init malloc and return a sfm_block*/
struct sfm_block* sfm_malloc_init(char* address, unsigned long size, int type)
{
	char* start;
	char* end;
	struct sfm_block* qm;
	unsigned long init_overhead;
	int r;
#ifdef SFM_LOCK_PER_BUCKET
	int i;
#endif
	
	/* make address and size multiple of 8*/
	start=(char*)ROUNDUP((unsigned long) address);
	DBG("sfm_malloc_init: SF_OPTIMIZE=%lu, /SF_ROUNDTO=%lu\n",
			SF_MALLOC_OPTIMIZE, SF_MALLOC_OPTIMIZE/SF_ROUNDTO);
	DBG("sfm_malloc_init: SF_HASH_SIZE=%lu, sfm_block size=%lu\n",
			SF_HASH_SIZE, (long)sizeof(struct sfm_block));
	DBG("sfm_malloc_init(%p, %lu), start=%p\n", address, size, start);

	if (size<start-address) return 0;
	size-=(start-address);
	if (size <(SF_MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0;
	size=ROUNDDOWN(size);

	init_overhead=INIT_OVERHEAD;
	
	
	if (size < init_overhead)
	{
		/* not enough mem to create our control structures !!!*/
		return 0;
	}
	end=start+size;
	qm=(struct sfm_block*)start;
	memset(qm, 0, sizeof(struct sfm_block));
	qm->size=size;
	qm->type = type;
	size-=init_overhead;
	
	qm->first_frag=(struct sfm_frag*)(start+ROUNDUP(sizeof(struct sfm_block)));
	qm->last_frag=(struct sfm_frag*)(end-sizeof(struct sfm_frag));
	/* init initial fragment*/
	qm->first_frag->size=size;
	qm->first_frag->id=(unsigned long)-1; /* not in a pool */
	qm->last_frag->size=0;
	
#ifdef DBG_F_MALLOC
	qm->first_frag->check=ST_CHECK_PATTERN;
	qm->last_frag->check=END_CHECK_PATTERN1;
#endif
	
	/* link initial fragment into the free list*/
	
	sfm_insert_free(qm, qm->first_frag, 0);
	sfm_max_hash=GET_HASH(size);
	
	/* init locks */
	if (lock_init(&qm->get_and_split)==0)
		goto error;
#ifdef SFM_ONE_LOCK
	if (lock_init(&qm->lock)==0){
		lock_destroy(&qm->get_and_split);
		goto error;
	}
	for (r=0; r<SFM_POOLS_NO; r++){
		if (lock_init(&qm->pool[r].lock)==0){
			for (;r>0; r--) lock_destroy(&qm->pool[r-1].lock);
			lock_destroy(&qm->lock);
			lock_destroy(&qm->get_and_split);
			goto error;
		}
	}
#elif defined(SFM_LOCK_PER_BUCKET)
	for (r=0; r<SF_HASH_SIZE; r++)
		if (lock_init(&qm->free_hash[r].lock)==0){
			for(;r>0; r--) lock_destroy(&qm->free_hash[r-1].lock);
			lock_destroy(&qm->get_and_split);
			goto error;
		}
	for (i=0; i<SFM_POOLS_NO; i++){
		for (r=0; r<SF_HASH_POOL_SIZE; r++)
			if (lock_init(&qm->pool[i].pool_hash[r].lock)==0){
				for(;r>0; r--) lock_destroy(&qm->pool[i].poo_hash[r].lock);
				for(; i>0; i--){
					for (r=0; r<SF_HASH_POOL_SIZE; r++)
						lock_destroy(&qm->pool[i].pool_hash[r].lock);
				}
				for (r=0; r<SF_HASH_SIZE; r++)
					lock_destroy(&qm->free_hash[r].lock);
				lock_destroy(&qm->get_and_split);
				goto error;
			}
	}
#endif
	qm->is_init=1;
	return qm;
error:
	return 0;
}
Exemplo n.º 20
0
int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
#endif
{
	unsigned long rest;
	struct qm_frag* n;
	struct qm_frag_end* end;
	
	rest=f->size-new_size;
#ifdef MEM_FRAG_AVOIDANCE
	if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
		(rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
#else
	if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
#endif
		f->size=new_size;
		/*split the fragment*/
		end=FRAG_END(f);
		end->size=new_size;
		n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
		n->size=rest-FRAG_OVERHEAD;
		FRAG_END(n)->size=n->size;
		FRAG_CLEAR_USED(n); /* never used */
		qm->real_used+=FRAG_OVERHEAD;
#ifdef DBG_QM_MALLOC
		end->check1=END_CHECK_PATTERN1;
		end->check2=END_CHECK_PATTERN2;
		/* frag created by malloc, mark it*/
		n->file=file;
		n->func=func;
		n->line=line;
		n->check=ST_CHECK_PATTERN;
#endif
		/* reinsert n in free list*/
		qm_insert_free(qm, n);
		return 0;
	}else{
			/* we cannot split this fragment any more */
		return -1;
	}
}



#ifdef DBG_QM_MALLOC
void* qm_malloc(void* qmp, unsigned long size,
					const char* file, const char* func, unsigned int line)
#else
void* qm_malloc(void* qmp, unsigned long size)
#endif
{
	struct qm_block* qm;
	struct qm_frag* f;
	int hash;
#ifdef DBG_QM_MALLOC
	unsigned int list_cntr;
#endif

	qm = (struct qm_block*)qmp;
	
#ifdef DBG_QM_MALLOC
	list_cntr = 0;
	MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
			line);
#endif
	/*malloc(0) should return a valid pointer according to specs*/
	if(unlikely(size==0)) size=4;
	/*size must be a multiple of 8*/
	size=ROUNDUP(size);
	if (size>(qm->size-qm->real_used)) return 0;

	/*search for a suitable free frag*/
#ifdef DBG_QM_MALLOC
	if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
#else
	if ((f=qm_find_free(qm, size, &hash))!=0){
#endif
		/* we found it!*/
		/*detach it from the free list*/
#ifdef DBG_QM_MALLOC
			qm_debug_frag(qm, f);
#endif
		qm_detach_free(qm, f);
		/*mark it as "busy"*/
		f->u.is_free=0;
		qm->free_hash[hash].no--;
		qm->ffrags--;
		/* we ignore split return */
#ifdef DBG_QM_MALLOC
		split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
#else
		split_frag(qm, f, size);
#endif
		qm->real_used+=f->size;
		qm->used+=f->size;
		if (qm->max_real_used<qm->real_used)
			qm->max_real_used=qm->real_used;
#ifdef DBG_QM_MALLOC
		f->file=file;
		f->func=func;
		f->line=line;
		f->check=ST_CHECK_PATTERN;
		/*  FRAG_END(f)->check1=END_CHECK_PATTERN1;
			FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
		MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d"
				" -th hit\n",
			 qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
#endif
#ifdef MALLOC_STATS
		if(qm->type==MEM_TYPE_PKG) {
			sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
		}
#endif
		return (char*)f+sizeof(struct qm_frag);
	}
	return 0;
}



#ifdef DBG_QM_MALLOC
void qm_free(void* qmp, void* p, const char* file, const char* func, 
				unsigned int line)
#else
void qm_free(void* qmp, void* p)
#endif
{
	struct qm_block* qm;
	struct qm_frag* f;
	unsigned long size;
#ifdef MEM_JOIN_FREE
	struct qm_frag* next;
	struct qm_frag* prev;
#endif /* MEM_JOIN_FREE*/

	qm = (struct qm_block*)qmp;

#ifdef DBG_QM_MALLOC
	MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
#endif

	if (p==0) {
#ifdef DBG_QM_MALLOC
		LOG(L_WARN, "WARNING:qm_free: free(0) called from %s: %s(%d)\n", file, func, line);
#else
		LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
#endif
		return;
	}

#ifdef DBG_QM_MALLOC
	if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
		LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!)"
				" called from %s: %s(%d) - aborting\n", p, file, func, line);
		if(likely(cfg_get(core, core_cfg, mem_safety)==0))
			abort();
		else return;
	}
#endif

	f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));

#ifdef DBG_QM_MALLOC
	qm_debug_frag(qm, f);
	if (f->u.is_free){
		LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer (%p),"
				" called from %s: %s(%d), first free %s: %s(%ld) - aborting\n",
				p, file, func, line, f->file, f->func, f->line);
		if(likely(cfg_get(core, core_cfg, mem_safety)==0))
			abort();
		else return;
	}
	MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
#endif
	if (unlikely(f->u.is_free)){
		LM_INFO("freeing a free fragment (%p/%p) - ignore\n",
				f, p);
		return;
	}

	size=f->size;
	qm->used-=size;
	qm->real_used-=size;

#ifdef MEM_JOIN_FREE
	if(unlikely(cfg_get(core, core_cfg, mem_join)!=0)) {
		next=prev=0;
		/* mark this fragment as used (might fall into the middle of joined frags)
		  to give us an extra chance of detecting a double free call (if the joined
		  fragment has not yet been reused) */
		f->u.nxt_free=(void*)0x1L; /* bogus value, just to mark it as free */
		/* join packets if possible*/
		next=FRAG_NEXT(f);
		if (((char*)next < (char*)qm->last_frag_end) && (next->u.is_free)){
			/* join next packet */
#ifdef DBG_QM_MALLOC
			qm_debug_frag(qm, next);
#endif
			qm_detach_free(qm, next);
			size+=next->size+FRAG_OVERHEAD;
			qm->real_used-=FRAG_OVERHEAD;
			qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
			qm->ffrags--;
		}
	
		if (f > qm->first_frag){
			prev=FRAG_PREV(f);
			/*	(struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
								sizeof(struct qm_frag_end))->size);*/
			if (prev->u.is_free){
				/* join prev packet */
#ifdef DBG_QM_MALLOC
				qm_debug_frag(qm, prev);
#endif
				qm_detach_free(qm, prev);
				size+=prev->size+FRAG_OVERHEAD;
				qm->real_used-=FRAG_OVERHEAD;
				qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
				qm->ffrags--;
				f=prev;
			}
		}
		f->size=size;
		FRAG_END(f)->size=f->size;
	} /* if cfg_core->mem_join */
#endif /* MEM_JOIN_FREE*/
#ifdef DBG_QM_MALLOC
	f->file=file;
	f->func=func;
	f->line=line;
#endif
	qm_insert_free(qm, f);
#ifdef MALLOC_STATS
	if(qm->type==MEM_TYPE_PKG) {
		sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
	}
#endif
}



#ifdef DBG_QM_MALLOC
void* qm_realloc(void* qmp, void* p, unsigned long size,
					const char* file, const char* func, unsigned int line)
#else
void* qm_realloc(void* qmp, void* p, unsigned long size)
#endif
{
	struct qm_block* qm;
	struct qm_frag* f;
	unsigned long diff;
	unsigned long orig_size;
	struct qm_frag* n;
	void* ptr;

	qm = (struct qm_block*)qmp;

#ifdef DBG_QM_MALLOC
	MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
			file, func, line);
	if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
		LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
				"aborting\n", p);
		abort();
	}
#endif
	
	if (size==0) {
		if (p)
#ifdef DBG_QM_MALLOC
			qm_free(qm, p, file, func, line);
#else
			qm_free(qm, p);
#endif
		return 0;
	}
	if (p==0)
#ifdef DBG_QM_MALLOC
		return qm_malloc(qm, size, file, func, line);
#else
		return qm_malloc(qm, size);
#endif
	f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
#ifdef DBG_QM_MALLOC
	qm_debug_frag(qm, f);
	MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
	if (f->u.is_free){
		LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed "
				"pointer %p , fragment %p -- aborting\n", p, f);
		abort();
	}
#endif
	/* find first acceptable size */
	size=ROUNDUP(size);
	if (f->size > size){
		orig_size=f->size;
		/* shrink */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size);
		if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
		MDBG("qm_realloc : shrinked successful\n");
#else
		if(split_frag(qm, f, size)!=0){
#endif
			/* update used sizes: freed the splited frag */
			/* split frag already adds FRAG_OVERHEAD for the newly created
			   free frag, so here we only need orig_size-f->size for real used
			 */
			qm->real_used-=(orig_size-f->size);
			qm->used-=(orig_size-f->size);
		}
		
	}else if (f->size < size){
		/* grow */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size);
#endif
			orig_size=f->size;
			diff=size-f->size;
			n=FRAG_NEXT(f);
			if (((char*)n < (char*)qm->last_frag_end) && 
					(n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
				/* join  */
				qm_detach_free(qm, n);
				qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
				qm->ffrags--;
				f->size+=n->size+FRAG_OVERHEAD;
				qm->real_used-=FRAG_OVERHEAD;
				FRAG_END(f)->size=f->size;
				/* end checks should be ok */
				/* split it if necessary */
				if (f->size > size ){
	#ifdef DBG_QM_MALLOC
					split_frag(qm, f, size, file, "fragm. from qm_realloc",
										line);
	#else
					split_frag(qm, f, size);
	#endif
				}
				qm->real_used+=(f->size-orig_size);
				qm->used+=(f->size-orig_size);
			}else{
				/* could not join => realloc */
	#ifdef DBG_QM_MALLOC
				ptr=qm_malloc(qm, size, file, func, line);
	#else
				ptr=qm_malloc(qm, size);
	#endif
				if (ptr){
					/* copy, need by libssl */
					memcpy(ptr, p, orig_size);
				}
	#ifdef DBG_QM_MALLOC
				qm_free(qm, p, file, func, line);
	#else
				qm_free(qm, p);
	#endif
				p=ptr;
			}
	}else{
		/* do nothing */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n",
				f->size, size);
#endif
	}
#ifdef DBG_QM_MALLOC
	MDBG("qm_realloc: returning %p\n", p);
#endif
#ifdef MALLOC_STATS
	if(qm->type==MEM_TYPE_PKG) {
		sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
	}
#endif
	return p;
}


void qm_check(struct qm_block* qm)
{
	struct qm_frag* f;
	long fcount = 0;
	int memlog;
	
	memlog=cfg_get(core, core_cfg, memlog);
	LOG(memlog, "DEBUG: qm_check()\n");
	f = qm->first_frag;
	while ((char*)f < (char*)qm->last_frag_end) {
		fcount++;
		/* check struct qm_frag */
#ifdef DBG_QM_MALLOC
		if (f->check!=ST_CHECK_PATTERN){
			LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
					"beginning overwritten(%lx)!\n",
					f, (char*)f + sizeof(struct qm_frag),
					f->check);
			qm_status(qm);
			abort();
		};
#endif
		if (f + sizeof(struct qm_frag) + f->size + sizeof(struct qm_frag_end) > qm->first_frag + qm->size) {
			LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
				"bad size: %lu (frag end: %p > end of block: %p)\n",
				f, (char*)f + sizeof(struct qm_frag) + sizeof(struct qm_frag_end), f->size,
				f + sizeof(struct qm_frag) + f->size, qm->first_frag + qm->size);
			qm_status(qm);
			abort();
		}
		/* check struct qm_frag_end */
		if (FRAG_END(f)->size != f->size) {
			LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
				"size in qm_frag and qm_frag_end does not match: frag->size=%lu, frag_end->size=%lu)\n",
				f, (char*)f + sizeof(struct qm_frag),
				f->size, FRAG_END(f)->size);
			qm_status(qm);
			abort();
		}
#ifdef DBG_QM_MALLOC
		if ((FRAG_END(f)->check1 != END_CHECK_PATTERN1) ||
			(FRAG_END(f)->check2 != END_CHECK_PATTERN2)) {
			LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
						" end overwritten(%lx, %lx)!\n",
					f, (char*)f + sizeof(struct qm_frag), 
					FRAG_END(f)->check1, FRAG_END(f)->check2);
			qm_status(qm);
			abort();
		}
#endif
		f = FRAG_NEXT(f);
	}

	LOG(memlog, "DEBUG: qm_check: %lu fragments OK\n", fcount);
}

void qm_status(void* qmp)
{
	struct qm_block* qm;
	struct qm_frag* f;
	int i,j;
	int h;
	int unused;
	int memlog;
	int mem_summary;

	qm = (struct qm_block*)qmp;

	memlog=cfg_get(core, core_cfg, memlog);
	mem_summary=cfg_get(core, core_cfg, mem_summary);
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "(%p):\n", qm);
	if (!qm) return;

	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "heap size= %lu\n",
			qm->size);
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"used= %lu, used+overhead=%lu, free=%lu\n",
			qm->used, qm->real_used, qm->size-qm->real_used);
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"max used (+overhead)= %lu\n", qm->max_real_used);
	
	if (mem_summary & 16) return;

	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"dumping all alloc'ed. fragments:\n");
	for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
			,i++){
		if (! f->u.is_free){
			LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
					"   %3d. %c  address=%p frag=%p size=%lu used=%d\n",
				i,
				(f->u.is_free)?'a':'N',
				(char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
#ifdef DBG_QM_MALLOC
			LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
					"          %s from %s: %s(%ld)\n",
				(f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
			LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
					"         start check=%lx, end check= %lx, %lx\n",
				f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
#endif
		}
	}
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"dumping free list stats :\n");
	for(h=0,i=0;h<QM_HASH_SIZE;h++){
		unused=0;
		for (f=qm->free_hash[h].head.u.nxt_free,j=0; 
				f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
				if (!FRAG_WAS_USED(f)){
					unused++;
#ifdef DBG_QM_MALLOC
					LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
						"unused fragm.: hash = %3d, fragment %p,"
						" address %p size %lu, created from %s: %s(%lu)\n",
					    h, f, (char*)f+sizeof(struct qm_frag), f->size,
						f->file, f->func, f->line);
#endif
				}
		}

		if (j) LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
				"hash= %3d. fragments no.: %5d, unused: %5d\n"
					"\t\t bucket size: %9lu - %9ld (first %9lu)\n",
					h, j, unused, UN_HASH(h),
					((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
					qm->free_hash[h].head.u.nxt_free->size
				);
		if (j!=qm->free_hash[h].no){
			LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
				" for hash %3d\n", j, qm->free_hash[h].no, h);
		}

	}
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"-----------------------------\n");
}


/* fills a malloc info structure with info about the block
 * if a parameter is not supported, it will be filled with 0 */
void qm_info(void* qmp, struct mem_info* info)
{
	struct qm_block* qm;

	qm = (struct qm_block*)qmp;

	memset(info,0, sizeof(*info));
	info->total_size=qm->size;
	info->min_frag=MIN_FRAG_SIZE;
	info->free=qm->size-qm->real_used;
	info->used=qm->used;
	info->real_used=qm->real_used;
	info->max_used=qm->max_real_used;
	info->total_frags=qm->ffrags;
}


/* returns how much free memory is available
 * it never returns an error (unlike fm_available) */
unsigned long qm_available(void* qmp)
{
	struct qm_block* qm;

	qm = (struct qm_block*)qmp;

	return qm->size-qm->real_used;
}



#ifdef DBG_QM_MALLOC

typedef struct _mem_counter{
	const char *file;
	const char *func;
	unsigned long line;
	
	unsigned long size;
	int count;
	
	struct _mem_counter *next;
} mem_counter;

static mem_counter* get_mem_counter(mem_counter **root, struct qm_frag* f)
{
	mem_counter *x;
	if (!*root) goto make_new;
	for(x=*root;x;x=x->next)
		if (x->file == f->file && x->func == f->func && x->line == f->line)
			return x;
make_new:	
	x = malloc(sizeof(mem_counter));
	x->file = f->file;
	x->func = f->func;
	x->line = f->line;
	x->count = 0;
	x->size = 0;
	x->next = *root;
	*root = x;
	return x;
}



void qm_sums(void* qmp)
{
	struct qm_block* qm;
	struct qm_frag* f;
	int i;
	mem_counter *root, *x;
	int memlog;
	
	qm = (struct qm_block*)qmp;

	root=0;
	if (!qm) return;
	
	memlog=cfg_get(core, core_cfg, memlog);
	LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
			"summarizing all alloc'ed. fragments:\n");
	
	for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;
			f=FRAG_NEXT(f),i++){
		if (! f->u.is_free){
			x = get_mem_counter(&root,f);
			x->count++;
			x->size+=f->size;
		}
	}
	x = root;
	while(x){
		LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
				" count=%6d size=%10lu bytes from %s: %s(%ld)\n",
			x->count,x->size,
			x->file, x->func, x->line
			);
		root = x->next;
		free(x);
		x = root;
	}
	LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
			"-----------------------------\n");
}
#else

void qm_sums(void* qm)
{
	return;
}
#endif /* DBG_QM_MALLOC */


/*memory manager core api*/
static char *_qm_mem_name = "q_malloc";

/* PKG - private memory API*/
static char *_qm_pkg_pool = 0;
static struct qm_block *_qm_pkg_block = 0;

/**
 * \brief Destroy memory pool
 */
void qm_malloc_destroy_pkg_manager(void)
{
	if (_qm_pkg_pool) {
		free(_qm_pkg_pool);
		_qm_pkg_pool = 0;
	}
	_qm_pkg_block = 0;
}

/**
 * \brief Init memory pool
 */
int qm_malloc_init_pkg_manager(void)
{
	sr_pkg_api_t ma;
	_qm_pkg_pool = malloc(pkg_mem_size);
	if (_qm_pkg_pool)
		_qm_pkg_block=qm_malloc_init(_qm_pkg_pool, pkg_mem_size, MEM_TYPE_PKG);
	if (_qm_pkg_block==0){
		LOG(L_CRIT, "could not initialize qm memory pool\n");
		fprintf(stderr, "Too much qm pkg memory demanded: %ld bytes\n",
						pkg_mem_size);
		return -1;
	}

	memset(&ma, 0, sizeof(sr_pkg_api_t));
	ma.mname = _qm_mem_name;
	ma.mem_pool = _qm_pkg_pool;
	ma.mem_block = _qm_pkg_block;
	ma.xmalloc = qm_malloc;
	ma.xfree = qm_free;
	ma.xrealloc = qm_realloc;
	ma.xstatus = qm_status;
	ma.xinfo = qm_info;
	ma.xavailable = qm_available;
	ma.xsums = qm_sums;
	ma.xdestroy = qm_malloc_destroy_pkg_manager;

	return pkg_init_api(&ma);
}


/* SHM - shared memory API*/
static void *_qm_shm_pool = 0;
static struct qm_block *_qm_shm_block = 0;

/*SHM wrappers to sync the access to memory block*/
#ifdef DBG_QM_MALLOC
void* qm_shm_malloc(void* qmp, unsigned long size,
					const char* file, const char* func, unsigned int line)
{
	void *r;
	shm_lock();
	r = qm_malloc(qmp, size, file, func, line);
	shm_unlock();
	return r;
}
Exemplo n.º 21
0
int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
#endif
{
	unsigned long rest;
	struct qm_frag* n;
	struct qm_frag_end* end;
	
	rest=f->size-new_size;
#ifdef MEM_FRAG_AVOIDANCE
	if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
		(rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
#else
	if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
#endif
		f->size=new_size;
		/*split the fragment*/
		end=FRAG_END(f);
		end->size=new_size;
		n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
		n->size=rest-FRAG_OVERHEAD;
		FRAG_END(n)->size=n->size;
		FRAG_CLEAR_USED(n); /* never used */
		qm->real_used+=FRAG_OVERHEAD;
#ifdef DBG_QM_MALLOC
		end->check1=END_CHECK_PATTERN1;
		end->check2=END_CHECK_PATTERN2;
		/* frag created by malloc, mark it*/
		n->file=file;
		n->func=func;
		n->line=line;
		n->check=ST_CHECK_PATTERN;
#endif
		/* reinsert n in free list*/
		qm_insert_free(qm, n);
		return 0;
	}else{
			/* we cannot split this fragment any more */
		return -1;
	}
}



#ifdef DBG_QM_MALLOC
void* qm_malloc(struct qm_block* qm, unsigned long size,
					const char* file, const char* func, unsigned int line)
#else
void* qm_malloc(struct qm_block* qm, unsigned long size)
#endif
{
	struct qm_frag* f;
	int hash;
	
#ifdef DBG_QM_MALLOC
	unsigned int list_cntr;

	list_cntr = 0;
	LM_GEN1( memlog, "params (%p, %lu), called from %s: %s(%d)\n",
		qm, size, file, func, line);
#endif
	/*size must be a multiple of 8*/
	size=ROUNDUP(size);
	if (size>(qm->size-qm->real_used)) return 0;

	/*search for a suitable free frag*/
#ifdef DBG_QM_MALLOC
	if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
#else
	if ((f=qm_find_free(qm, size, &hash))!=0){
#endif
		/* we found it!*/
		/*detach it from the free list*/
#ifdef DBG_QM_MALLOC
			qm_debug_frag(qm, f);
#endif
		qm_detach_free(qm, f);
		/*mark it as "busy"*/
		f->u.is_free=0;
		qm->free_hash[hash].no--;
		/* we ignore split return */
#ifdef DBG_QM_MALLOC
		split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
#else
		split_frag(qm, f, size);
#endif
		qm->real_used+=f->size;
		qm->used+=f->size;
		if (qm->max_real_used<qm->real_used)
			qm->max_real_used=qm->real_used;
#ifdef DBG_QM_MALLOC
		f->file=file;
		f->func=func;
		f->line=line;
		f->check=ST_CHECK_PATTERN;
		/*  FRAG_END(f)->check1=END_CHECK_PATTERN1;
			FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
		LM_GEN1( memlog, "params (%p, %lu), returns address %p frag. %p "
			"(size=%lu) on %d -th hit\n",
			 qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
#endif
		return (char*)f+sizeof(struct qm_frag);
	}
	return 0;
}



#ifdef DBG_QM_MALLOC
void qm_free(struct qm_block* qm, void* p, const char* file, const char* func, 
				unsigned int line)
#else
void qm_free(struct qm_block* qm, void* p)
#endif
{
	struct qm_frag* f;
	unsigned long size;
#ifdef QM_JOIN_FREE
	struct qm_frag* next;
	struct qm_frag* prev;
#endif

#ifdef DBG_QM_MALLOC
	LM_GEN1( memlog, "params(%p, %p), called from %s: %s(%d)\n",
		qm, p, file, func, line);
	if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
		LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p);
		abort();
	}
#endif
	if (p==0) {
		LM_WARN("free(0) called\n");
		return;
	}
#ifdef QM_JOIN_FREE
	prev=next=0;
#endif
	f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
#ifdef DBG_QM_MALLOC
	qm_debug_frag(qm, f);
	if (f->u.is_free){
		LM_CRIT("freeing already freed pointer,"
				" first free: %s: %s(%ld) - aborting\n",
				f->file, f->func, f->line);
		abort();
	}
	LM_GEN1( memlog, "freeing frag. %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
#endif
	size=f->size;
	qm->used-=size;
	qm->real_used-=size;

#ifdef QM_JOIN_FREE
	/* join packets if possible*/
	next=FRAG_NEXT(f);
	if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){
		/* join */
#ifdef DBG_QM_MALLOC
		qm_debug_frag(qm, next);
#endif
		qm_detach_free(qm, next);
		size+=next->size+FRAG_OVERHEAD;
		qm->real_used-=FRAG_OVERHEAD;
		qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
	}
	
	if (f > qm->first_frag){
		prev=FRAG_PREV(f);
		/*	(struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
								sizeof(struct qm_frag_end))->size);*/
#ifdef DBG_QM_MALLOC
		qm_debug_frag(qm, prev);
#endif
		if (prev->u.is_free){
			/*join*/
			qm_detach_free(qm, prev);
			size+=prev->size+FRAG_OVERHEAD;
			qm->real_used-=FRAG_OVERHEAD;
			qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
			f=prev;
		}
	}
	f->size=size;
	FRAG_END(f)->size=f->size;
#endif /* QM_JOIN_FREE*/
#ifdef DBG_QM_MALLOC
	f->file=file;
	f->func=func;
	f->line=line;
#endif
	qm_insert_free(qm, f);
}



#ifdef DBG_QM_MALLOC
void* qm_realloc(struct qm_block* qm, void* p, unsigned long size,
					const char* file, const char* func, unsigned int line)
#else
void* qm_realloc(struct qm_block* qm, void* p, unsigned long size)
#endif
{
	struct qm_frag* f;
	unsigned long diff;
	unsigned long orig_size;
	struct qm_frag* n;
	void* ptr;
	
	
#ifdef DBG_QM_MALLOC
	LM_GEN1( memlog, "params (%p, %p, %lu), called from %s: %s(%d)\n",
		qm, p, size, file, func, line);
	if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
		LM_CRIT("bad pointer %p (out of memory block!) - aborting\n", p);
		abort();
	}
#endif
	
	if (size==0) {
		if (p)
#ifdef DBG_QM_MALLOC
			qm_free(qm, p, file, func, line);
#else
			qm_free(qm, p);
#endif
		return 0;
	}
	if (p==0)
#ifdef DBG_QM_MALLOC
		return qm_malloc(qm, size, file, func, line);
#else
		return qm_malloc(qm, size);
#endif
	f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
#ifdef DBG_QM_MALLOC
	qm_debug_frag(qm, f);
	LM_GEN1( memlog, "realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
	if (f->u.is_free){
		LM_CRIT("trying to realloc an already freed "
				"pointer %p , fragment %p -- aborting\n", p, f);
		abort();
	}
#endif
	/* find first acceptable size */
	size=ROUNDUP(size);
	if (f->size > size){
		orig_size=f->size;
		/* shrink */
#ifdef DBG_QM_MALLOC
		LM_GEN1(memlog,"shrinking from %lu to %lu\n", f->size, size);
		if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
		LM_GEN1(memlog,"shrinked successful\n");
#else
		if(split_frag(qm, f, size)!=0){
#endif
			/* update used sizes: freed the spitted frag */
			qm->real_used-=(orig_size-f->size-FRAG_OVERHEAD);
			qm->used-=(orig_size-f->size);
		}
		
	}else if (f->size < size){
		/* grow */
#ifdef DBG_QM_MALLOC
		LM_GEN1( memlog, "growing from %lu to %lu\n", f->size, size);
#endif
			orig_size=f->size;
			diff=size-f->size;
			n=FRAG_NEXT(f);
			if (((char*)n < (char*)qm->last_frag_end) && 
					(n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
				/* join  */
				qm_detach_free(qm, n);
				qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
				f->size+=n->size+FRAG_OVERHEAD;
				qm->real_used-=FRAG_OVERHEAD;
				FRAG_END(f)->size=f->size;
				/* end checks should be ok */
				/* split it if necessary */
				if (f->size > size ){
	#ifdef DBG_QM_MALLOC
					split_frag(qm, f, size, file, "fragm. from qm_realloc",
										line);
	#else
					split_frag(qm, f, size);
	#endif
				}
				qm->real_used+=(f->size-orig_size);
				qm->used+=(f->size-orig_size);
			}else{
				/* could not join => realloc */
	#ifdef DBG_QM_MALLOC
				ptr=qm_malloc(qm, size, file, func, line);
	#else
				ptr=qm_malloc(qm, size);
	#endif
				if (ptr) {
					/* copy, need by libssl */
					memcpy(ptr, p, orig_size);
	#ifdef DBG_QM_MALLOC
					qm_free(qm, p, file, func, line);
	#else
					qm_free(qm, p);
	#endif
				}
				p=ptr;
			}
	}else{
		/* do nothing */
#ifdef DBG_QM_MALLOC
		LM_GEN1(memlog,"doing nothing, same size: %lu - %lu\n", f->size, size);
#endif
	}
#ifdef DBG_QM_MALLOC
	LM_GEN1(memlog,"returning %p\n", p);
#endif
	return p;
}




void qm_status(struct qm_block* qm)
{
	struct qm_frag* f;
	int i,j;
	int h;
	int unused;

	LM_GEN1(memdump, "qm_status (%p):\n", qm);
	if (!qm) return;

	LM_GEN1(memdump, " heap size= %lu\n", qm->size);
	LM_GEN1(memdump, " used= %lu, used+overhead=%lu, free=%lu\n",
			qm->used, qm->real_used, qm->size-qm->real_used);
	LM_GEN1(memdump, " max used (+overhead)= %lu\n", qm->max_real_used);
	
	LM_GEN1(memdump, "dumping all alloc'ed. fragments:\n");
	for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
			,i++){
		if (! f->u.is_free){
			LM_GEN1(memdump,"    %3d. %c  address=%p frag=%p size=%lu used=%d\n",
				i, 
				(f->u.is_free)?'a':'N',
				(char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
#ifdef DBG_QM_MALLOC
			LM_GEN1(memdump, "            %s from %s: %s(%ld)\n",
				(f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
			LM_GEN1(memdump, "        start check=%lx, end check= %lx, %lx\n",
				f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
#endif
		}
	}
	LM_GEN1(memdump, "dumping free list stats :\n");
	for(h=0,i=0;h<QM_HASH_SIZE;h++){
		unused=0;
		for (f=qm->free_hash[h].head.u.nxt_free,j=0; 
				f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
				if (!FRAG_WAS_USED(f)){
					unused++;
#ifdef DBG_QM_MALLOC
					LM_GEN1(memdump, "unused fragm.: hash = %3d, fragment %p,"
						" address %p size %lu, created from %s: %s(%lu)\n",
					    h, f, (char*)f+sizeof(struct qm_frag), f->size,
						f->file, f->func, f->line);
#endif
				}
		}

		if (j) LM_GEN1(memdump, "hash= %3d. fragments no.: %5d, unused: %5d\n"
					"\t\t bucket size: %9lu - %9ld (first %9lu)\n",
					h, j, unused, UN_HASH(h),
					((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
					qm->free_hash[h].head.u.nxt_free->size
				);
		if (j!=qm->free_hash[h].no){
			LM_CRIT("different free frag. count: %d!=%lu"
				" for hash %3d\n", j, qm->free_hash[h].no, h);
		}

	}
	LM_GEN1(memdump, "-----------------------------\n");
}


/* fills a malloc info structure with info about the block
 * if a parameter is not supported, it will be filled with 0 */
void qm_info(struct qm_block* qm, struct mem_info* info)
{
	int r;
	long total_frags;
	
	total_frags=0;
	memset(info,0, sizeof(*info));
	info->total_size=qm->size;
	info->min_frag=MIN_FRAG_SIZE;
	info->free=qm->size-qm->real_used;
	info->used=qm->used;
	info->real_used=qm->real_used;
	info->max_used=qm->max_real_used;
	for(r=0;r<QM_HASH_SIZE; r++){
		total_frags+=qm->free_hash[r].no;
	}
	info->total_frags=total_frags;
}
Exemplo n.º 22
0
void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size)
#endif
{
	struct sfm_frag *f;
	unsigned long orig_size;
	void *ptr;
#ifndef SFM_REALLOC_REMALLOC
	struct sfm_frag *n;
	struct sfm_frag **pf;
	unsigned long diff;
	unsigned long p_id;
	int hash;
	unsigned long n_size;
	struct sfm_pool * pool;
#endif
	
#ifdef DBG_F_MALLOC
	MDBG("sfm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
			file, func, line);
	if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){
		LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - "
				"aborting\n", p);
		abort();
	}
#endif
	if (size==0) {
		if (p)
#ifdef DBG_F_MALLOC
			sfm_free(qm, p, file, func, line);
#else
			sfm_free(qm, p);
#endif
		return 0;
	}
	if (p==0)
#ifdef DBG_F_MALLOC
		return sfm_malloc(qm, size, file, func, line);
#else
		return sfm_malloc(qm, size);
#endif
	f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag));
#ifdef DBG_F_MALLOC
	MDBG("sfm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
#endif
	size=ROUNDUP(size);
	orig_size=f->size;
	if (f->size > size){
		/* shrink */
#ifdef DBG_F_MALLOC
		MDBG("sfm_realloc: shrinking from %lu to %lu\n", f->size, size);
		sfm_split_frag(qm, f, size, file, "frag. from sfm_realloc", line);
#else
		sfm_split_frag(qm, f, size);
#endif
	}else if (f->size<size){
		/* grow */
#ifdef DBG_F_MALLOC
		MDBG("sfm_realloc: growing from %lu to %lu\n", f->size, size);
#endif
#ifndef SFM_REALLOC_REMALLOC
/* should set a magic value in list head and in push/pop if magic value =>
 * lock and wait */
#error LL_MALLOC realloc not finished yet
		diff=size-f->size;
		n=FRAG_NEXT(f);
		if (((char*)n < (char*)qm->last_frag) && 
				(n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
			/* join  */
			/* detach n from the free list */
try_again:
			p_id=n->id;
			n_size=n->size;
			if ((unlikely(p_id >=SFM_POOLS_NO))){
				hash=GET_HASH(n_size);
				SFM_MAIN_HASH_LOCK(qm, hash);
				if (unlikely((n->u.nxt_free==0) ||
							((n->size+FRAG_OVERHEAD)<diff))){ 
					SFM_MAIN_HASH_UNLOCK(qm, hash);
					goto not_found;
				}
				if (unlikely((n->id!=p_id) || (n->size!=n_size))){
					/* fragment still free, but changed, either 
					 * moved to another pool or has a diff. size */
					SFM_MAIN_HASH_UNLOCK(qm, hash);
					goto try_again;
				}
				pf=&(qm->free_hash[hash].first);
				/* find it */
				for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
				if (*pf==0){
					SFM_MAIN_HASH_UNLOCK(qm, hash);
					/* not found, bad! */
					LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
							    "free " "list (hash=%d)\n", n, hash);
					/* somebody is in the process of changing it ? */
					goto not_found;
				}
				/* detach */
				*pf=n->u.nxt_free;
				n->u.nxt_free=0; /* mark it immediately as detached */
				qm->free_hash[hash].no--;
				SFM_MAIN_HASH_UNLOCK(qm, hash);
				/* join */
				f->size+=n->size+FRAG_OVERHEAD;
				/* split it if necessary */
				if (f->size > size){
			#ifdef DBG_F_MALLOC
					sfm_split_frag(qm, f, size, file, "fragm. from "
									"sfm_realloc", line);
			#else
					sfm_split_frag(qm, f, size);
			#endif
				}
			}else{ /* p_id < SFM_POOLS_NO (=> in a pool )*/
				hash=GET_SMALL_HASH(n_size);
				pool=&qm->pool[p_id];
				SFM_POOL_LOCK(pool, hash);
				if (unlikely((n->u.nxt_free==0) ||
							((n->size+FRAG_OVERHEAD)<diff))){
					SFM_POOL_UNLOCK(pool, hash);
					goto not_found;
				}
				if (unlikely((n->id!=p_id) || (n->size!=n_size))){
					/* fragment still free, but changed, either 
					 * moved to another pool or has a diff. size */
					SFM_POOL_UNLOCK(pool, hash);
					goto try_again;
				}
				pf=&(pool->pool_hash[hash].first);
				/* find it */
				for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */
				if (*pf==0){
					SFM_POOL_UNLOCK(pool, hash);
					/* not found, bad! */
					LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in "
							    "free " "list (hash=%d)\n", n, hash);
					/* somebody is in the process of changing it ? */
					goto not_found;
				}
				/* detach */
				*pf=n->u.nxt_free;
				n->u.nxt_free=0; /* mark it immediately as detached */
				pool->pool_hash[hash].no--;
				SFM_POOL_UNLOCK(pool, hash);
				/* join */
				f->size+=n->size+FRAG_OVERHEAD;
				/* split it if necessary */
				if (f->size > size){
			#ifdef DBG_F_MALLOC
					sfm_split_frag(qm, f, size, file, "fragm. from "
									"sfm_realloc", line);
			#else
					sfm_split_frag(qm, f, size);
			#endif
				}
			}
		}else{
not_found:
			/* could not join => realloc */
#else/* SFM_REALLOC_REMALLOC */ 
		{
#endif /* SFM_REALLOC_REMALLOC */
	#ifdef DBG_F_MALLOC
			ptr=sfm_malloc(qm, size, file, func, line);
	#else
			ptr=sfm_malloc(qm, size);
	#endif
			if (ptr){
				/* copy, need by libssl */
				memcpy(ptr, p, orig_size);
	#ifdef DBG_F_MALLOC
				sfm_free(qm, p, file, func, line);
	#else
				sfm_free(qm, p);
	#endif
			}
			p=ptr;
		}
	}else{