Ejemplo n.º 1
0
void shm_event_raise(long used, long size, long perc)
{
	evi_params_p list = 0;

	*event_shm_pending = 1;
	*event_shm_last = perc;

	// event has to be triggered - check for subscribers
	if (!evi_probe_event(EVI_SHM_THRESHOLD_ID)) {
		goto end;
	}

	if (!(list = evi_get_params()))
		goto end;
	if (evi_param_add_int(list, &shm_usage_str, (int *)&perc)) {
		LM_ERR("unable to add usage parameter\n");
		goto end;
	}
	if (evi_param_add_int(list, &shm_threshold_str, (int *)&event_shm_threshold)) {
		LM_ERR("unable to add threshold parameter\n");
		goto end;
	}
	if (evi_param_add_int(list, &shm_used_str, (int *)&used)) {
		LM_ERR("unable to add used parameter\n");
		goto end;
	}
	if (evi_param_add_int(list, &shm_size_str, (int *)&size)) {
		LM_ERR("unable to add size parameter\n");
		goto end;
	}

	/*
	 * event has to be raised without the lock otherwise a deadlock will be
	 * generated by the transport modules, or by the event_route processing
	 */
#ifdef HP_MALLOC
	shm_unlock(0);
#else
	shm_unlock();
#endif

	if (evi_raise_event(EVI_SHM_THRESHOLD_ID, list)) {
		LM_ERR("unable to send shm threshold event\n");
	}

#ifdef HP_MALLOC
	shm_lock(0);
#else
	shm_lock();
#endif

	list = 0;
end:
	if (list)
		evi_free_params(list);
	*event_shm_pending = 0;
}
Ejemplo n.º 2
0
void qm_shm_info(void* qmp, struct mem_info* info)
{
	shm_lock();
	qm_info(qmp, info);
	shm_unlock();

}
Ejemplo n.º 3
0
bool
SHMSrc::resize_area()
{
    while ((sizeof(SHMHeader) + shm_area_->buffer_size) > shm_area_len_) {
        size_t new_size = sizeof(SHMHeader) + shm_area_->buffer_size;

        shm_unlock();
        if (munmap(shm_area_, shm_area_len_)) {
            std::cerr << "Could not unmap shared area" << std::endl;
            perror(strerror(errno));
            return false;
        }

        shm_area_ = static_cast<SHMHeader*>(mmap(NULL, new_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, 0));
        shm_area_len_ = new_size;

        if (!shm_area_) {
            shm_area_ = 0;
            std::cerr << "Could not remap shared area" << std::endl;
            return false;
        }

        shm_area_len_ = new_size;
        shm_lock();
    }
    return true;
}
Ejemplo n.º 4
0
bool SHMSink::resize_area(size_t desired_length)
{
    if (desired_length <= shm_area_len_)
        return true;

    shm_unlock();

    if (munmap(shm_area_, shm_area_len_)) {
        ERROR("Could not unmap shared area");
        strErr();
        return false;
    }

    if (ftruncate(fd_, desired_length)) {
        ERROR("Could not resize shared area");
        strErr();
        return false;
    }

    shm_area_ = static_cast<SHMHeader*>(mmap(NULL, desired_length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, 0));
    shm_area_len_ = desired_length;

    if (shm_area_ == MAP_FAILED) {
        shm_area_ = 0;
        ERROR("Could not remap shared area");
        return false;
    }

    shm_lock();
    return true;
}
Ejemplo n.º 5
0
void qm_shm_free(void* qmp, void* p, const char* file, const char* func,
				unsigned int line)
{
	shm_lock();
	qm_free(qmp, p, file, func, line);
	shm_unlock();
}
Ejemplo n.º 6
0
int shm_callback(int mode, struct kern_ipc_perm *ipc, struct ipc_namespace *ns)
{
    switch(mode) {
    
        case 0:
            printf("key        shmid      owner      perms      bytes      nattch     status \n");
            break;
        case 1:
            if(id==-1 || id==ipc->id) {
                struct shmid_kernel *shp = shm_lock(ns, ipc->id);
                printf("0x%08x %-10d %-10d %-10o %-10ld %-10ld %-6s %-6s\n",
                ipc->key,
                ipc->id,
                ipc->uid,
                ipc->mode & 0777,
                shp->shm_segsz,
                shp->shm_nattch,
                ipc->mode & SHM_DEST ? "dest" : " ",
                ipc->mode & SHM_LOCKED ? "locked" : " ");
            }
        break;
        case 2:
        break;
    }
    return 1;
}
Ejemplo n.º 7
0
unsigned long shm_available_safe()
{
	unsigned long ret;
	shm_lock();
	ret = shm_available();
	shm_unlock();
	return ret;
}
Ejemplo n.º 8
0
void* qm_shm_realloc(void* qmp, void* p, unsigned long size)
{
	void *r;
	shm_lock();
	r = qm_realloc(qmp, p, size);
	shm_unlock();
	return r;
}
Ejemplo n.º 9
0
unsigned long qm_shm_available(void* qmp)
{
	unsigned long r;
	shm_lock();
	r = qm_available(qmp);
	shm_unlock();
	return r;
}
Ejemplo n.º 10
0
void* qm_shm_realloc(void* qmp, void* p, unsigned long size,
					const char* file, const char* func, unsigned int line)
{
	void *r;
	shm_lock();
	r = qm_realloc(qmp, p, size, file, func, line);
	shm_unlock();
	return r;
}
Ejemplo n.º 11
0
inline static void* sh_realloc(void* p, unsigned int size)
{
	void *r;
	shm_lock(); 
	shm_free_unsafe(p);
	r=shm_malloc_unsafe(size);
	shm_unlock();
	return r;
}
Ejemplo n.º 12
0
void* qm_shm_resize(void* qmp, void* p, unsigned long size)
{
	void *r;
	shm_lock();
	if(p) qm_free(qmp, p);
	r = qm_malloc(qmp, size);
	shm_unlock();
	return r;
}
Ejemplo n.º 13
0
static inline void shm_inc (int id) {
	struct shmid_kernel *shp;

	if(!(shp = shm_lock(id)))
		BUG();
	shp->shm_atim = CURRENT_TIME;
	shp->shm_lprid = current->pid;
	shp->shm_nattch++;
	shm_unlock(id);
}
Ejemplo n.º 14
0
static inline void shm_inc (int id) {
	struct shmid_kernel *shp;

	if(!(shp = shm_lock(id)))
		BUG();
	shp->shm_atim = get_seconds();
	shp->shm_lprid = current->tgid;
	shp->shm_nattch++;
	shm_unlock(shp);
}
Ejemplo n.º 15
0
void SHMSrc::render(char *dest, size_t len)
{
    shm_lock();

    while (buffer_gen_ == shm_area_->buffer_gen) {
        shm_unlock();
        std::cerr << "Waiting for next buffer" << std::endl;;
        sem_wait(&shm_area_->notification);

        shm_lock();
    }

    if (!resize_area())
        return;

    std::cerr << "Reading from buffer!" << std::endl;
    memcpy(dest, shm_area_->data, len);
    buffer_gen_ = shm_area_->buffer_gen;
    shm_unlock();
}
Ejemplo n.º 16
0
/* append a newly received tag from a 200/INVITE to
 * transaction's set; (only safe if called from within
 * a REPLY_LOCK); it returns 1 if such a to tag already
 * exists
 */
inline static int update_totag_set(struct cell *t, struct sip_msg *ok)
{
	struct totag_elem *i, *n;
	str *tag;
	char *s;

	if (!ok->to || !ok->to->parsed) {
		LM_ERR("to not parsed\n");
		return 0;
	}
	tag=&get_to(ok)->tag_value;
	if (!tag->s) {
		LM_DBG("no tag in to\n");
		return 0;
	}

	for (i=t->fwded_totags; i; i=i->next) {
		if (i->tag.len==tag->len
				&& memcmp(i->tag.s, tag->s, tag->len) ==0 ){
			/* to tag already recorded */
#ifdef XL_DEBUG
			LM_CRIT("totag retransmission\n");
#else
			LM_DBG("totag retransmission\n");
#endif
			return 1;
		}
	}
	/* that's a new to-tag -- record it */
#ifndef HP_MALLOC
	shm_lock();
	n=(struct totag_elem*) shm_malloc_unsafe(sizeof(struct totag_elem));
	s=(char *)shm_malloc_unsafe(tag->len);
	shm_unlock();
#else
	n=(struct totag_elem*) shm_malloc(sizeof(struct totag_elem));
	s=(char *)shm_malloc(tag->len);
#endif
	if (!s || !n) {
		LM_ERR("no more share memory \n");
		if (n) shm_free(n);
		if (s) shm_free(s);
		return 0;
	}
	memset(n, 0, sizeof(struct totag_elem));
	memcpy(s, tag->s, tag->len );
	n->tag.s=s;n->tag.len=tag->len;
	n->next=t->fwded_totags;
	t->fwded_totags=n;
	LM_DBG("new totag \n");
	return 0;
}
Ejemplo n.º 17
0
void SHMSink::render(const std::vector<unsigned char> &data)
{
    shm_lock();

    if (!resize_area(sizeof(SHMHeader) + data.size()))
        return;

    memcpy(shm_area_->data, data.data(), data.size());
    shm_area_->buffer_size = data.size();
    shm_area_->buffer_gen++;
    sem_post(&shm_area_->notification);
    shm_unlock();
}
Ejemplo n.º 18
0
/* This is called by fork, once for every shm attach. */
static void shm_open(struct vm_area_struct *vma)
{
	struct file *file = vma->vm_file;
	struct shm_file_data *sfd = shm_file_data(file);
	struct shmid_kernel *shp;

	shp = shm_lock(sfd->ns, sfd->id);
	BUG_ON(IS_ERR(shp));
	shp->shm_atim = get_seconds();
	shp->shm_lprid = task_tgid_vnr(current);
	shp->shm_nattch++;
	shm_unlock(shp);
}
Ejemplo n.º 19
0
void SHMSink::render_callback(VideoProvider &provider, size_t bytes)
{
    shm_lock();

    if (!resize_area(sizeof(SHMHeader) + bytes)) {
        ERROR("Could not resize area");
        return;
    }

    provider.fillBuffer(static_cast<void*>(shm_area_->data));
    shm_area_->buffer_size = bytes;
    shm_area_->buffer_gen++;
    sem_post(&shm_area_->notification);
    shm_unlock();
}
Ejemplo n.º 20
0
void free_cell( struct cell* dead_cell )
{
	char *b;
	int i;
	struct sip_msg *rpl;
	struct totag_elem *tt, *foo;

	release_cell_lock( dead_cell );
	shm_lock();

	/* UA Server */
	if ( dead_cell->uas.request )
		sip_msg_free_unsafe( dead_cell->uas.request );
	if ( dead_cell->uas.response.buffer )
		shm_free_unsafe( dead_cell->uas.response.buffer );

	/* completion callback */
	if (dead_cell->cbp) shm_free_unsafe(dead_cell->cbp);

	/* UA Clients */
	for ( i =0 ; i<dead_cell->nr_of_outgoings;  i++ )
	{
		/* retransmission buffer */
		if ( (b=dead_cell->uac[i].request.buffer) )
			shm_free_unsafe( b );
		b=dead_cell->uac[i].local_cancel.buffer;
		if (b!=0 && b!=BUSY_BUFFER)
			shm_free_unsafe( b );
		rpl=dead_cell->uac[i].reply;
		if (rpl && rpl!=FAKED_REPLY) {
			sip_msg_free_unsafe( rpl );
		}
	}

	/* collected to tags */
	tt=dead_cell->fwded_totags;
	while(tt) {
		foo=tt->next;
		shm_free_unsafe(tt->tag.s);
		shm_free_unsafe(tt);
		tt=foo;
	}

	/* the cell's body */
	shm_free_unsafe( dead_cell );

	shm_unlock();
}
Ejemplo n.º 21
0
void shm_exit_ns(struct ipc_namespace *ns)
{
	int i;
	struct shmid_kernel *shp;

	mutex_lock(&shm_ids(ns).mutex);
	for (i = 0; i <= shm_ids(ns).max_id; i++) {
		shp = shm_lock(ns, i);
		if (shp == NULL)
			continue;

		do_shm_rmid(ns, shp);
	}
	mutex_unlock(&shm_ids(ns).mutex);

	ipc_fini_ids(ns->ids[IPC_SHM_IDS]);
	kfree(ns->ids[IPC_SHM_IDS]);
	ns->ids[IPC_SHM_IDS] = NULL;
}
Ejemplo n.º 22
0
Archivo: log.c Proyecto: millken/merry
void copy_buf_to_shm_log_buf(logf_t *_logf)
{
    if(!_logf || _logf->_inner_log_buf_len < 1) {
        return;
    }

    shm_lock(_logf->_shm_log_buf);

    if(*(_logf->log_buf_len) + _logf->_inner_log_buf_len > _logf->log_buf_size) {
        write(_logf->LOG_FD, _logf->log_buf, *(_logf->log_buf_len));
        *(_logf->log_buf_len) = 0;
    }

    memcpy(_logf->log_buf + (*_logf->log_buf_len), _logf->_inner_log_buf, _logf->_inner_log_buf_len);
    *(_logf->log_buf_len) += _logf->_inner_log_buf_len;
    shm_unlock(_logf->_shm_log_buf);

    _logf->_inner_log_buf_len = 0;
}
Ejemplo n.º 23
0
/*
 * remove the attach descriptor vma.
 * free memory for segment if it is marked destroyed.
 * The descriptor has already been removed from the current->mm->mmap list
 * and will later be kfree()d.
 */
static void shm_close(struct vm_area_struct *vma)
{
	struct file * file = vma->vm_file;
	struct shm_file_data *sfd = shm_file_data(file);
	struct shmid_kernel *shp;
	struct ipc_namespace *ns = sfd->ns;

	down_write(&shm_ids(ns).rwsem);
	/* remove from the list of attaches of the shm segment */
	shp = shm_lock(ns, sfd->id);
	BUG_ON(IS_ERR(shp));
	shp->shm_lprid = task_tgid_vnr(current);
	shp->shm_dtim = get_seconds();
	shp->shm_nattch--;
	if (shm_may_destroy(ns, shp))
		shm_destroy(ns, shp);
	else
		shm_unlock(shp);
	up_write(&shm_ids(ns).rwsem);
}
Ejemplo n.º 24
0
inline static void* sh_realloc(void* p, unsigned int size)
{
	void *r;

#ifndef HP_MALLOC
	shm_lock(); 
	shm_free_unsafe(p);
	r = shm_malloc_unsafe(size);
#else
	shm_free(p);
	r = shm_malloc(size);
#endif

	shm_threshold_check();

#ifndef HP_MALLOC
	shm_unlock(); 
#endif

	return r;
}
Ejemplo n.º 25
0
/**
 * @brief 書き込まれたTIDが指定したtid以上になるまで待つ
 * @retval 1 成功
 * @retval 0 失敗
 * @retval -1 エラー
 */
int shm_cond_wait( ssm_header *shm_p, SSM_tid tid )
{
	int ret = 0;
	struct timeval now;
	struct timespec tout;
	
	if( tid <= shm_get_tid_top( shm_p ) )
		return 1;
	gettimeofday( &now, NULL );
	tout.tv_sec = now.tv_sec + 1;
	tout.tv_nsec = now.tv_usec * 1000;


	if( !shm_lock( shm_p ) )
		return -1;
	while( tid > shm_get_tid_top( shm_p ) && (ret == 0) )
	{
		ret = pthread_cond_timedwait( &shm_p->cond, &shm_p->mutex, &tout );
	}
	if( !shm_unlock( shm_p ) )
		return -1;
	return ( ret == 0 );
}
Ejemplo n.º 26
0
struct mi_root *mi_shm_check(struct mi_root *cmd, void *param)
{
#if defined(QM_MALLOC) && defined(DBG_MALLOC)
	struct mi_root *root;
	int ret;

	shm_lock();
	ret = qm_mem_check(shm_block);
	shm_unlock();

	/* any return means success; print the number of fragments now */
	root = init_mi_tree(200, MI_SSTR(MI_OK));

	if (!addf_mi_node_child(&root->node, 0, MI_SSTR("total_fragments"), "%d", ret)) {
		LM_ERR("failed to add MI node\n");
		free_mi_tree(root);
		return NULL;
	}

	return root;
#endif

	return NULL;
}
Ejemplo n.º 27
0
void SHMSink::render_frame(VideoFrame& src)
{
    VideoFrame dst;
    VideoScaler scaler;

    const int width = src.getWidth();
    const int height = src.getHeight();
    const int format = VIDEO_PIXFMT_BGRA;
    size_t bytes = dst.getSize(width, height, format);

    shm_lock();

    if (!resize_area(sizeof(SHMHeader) + bytes)) {
        ERROR("Could not resize area");
        return;
    }

    dst.setDestination(shm_area_->data, width, height, format);
    scaler.scale(src, dst);

#ifdef DEBUG_FPS
    const std::chrono::time_point<std::chrono::system_clock> currentTime = std::chrono::system_clock::now();
    const std::chrono::duration<double> seconds = currentTime - lastFrameDebug_;
    frameCount_++;
    if (seconds.count() > 1) {
        DEBUG("%s: FPS %f", shm_name_.c_str(), frameCount_ / seconds.count());
        frameCount_ = 0;
        lastFrameDebug_ = currentTime;
    }
#endif

    shm_area_->buffer_size = bytes;
    shm_area_->buffer_gen++;
    sem_post(&shm_area_->notification);
    shm_unlock();
}
Ejemplo n.º 28
0
void qm_shm_status(void* qmp)
{
	shm_lock();
	qm_status(qmp);
	shm_unlock();
}
Ejemplo n.º 29
0
void qm_shm_sums(void* qmp)
{
	shm_lock();
	qm_sums(qmp);
	shm_unlock();
}
Ejemplo n.º 30
0
int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size)
#endif
{
	unsigned long rest;
	struct qm_frag* n;
	struct qm_frag_end* end;
	
	rest=f->size-new_size;
#ifdef MEM_FRAG_AVOIDANCE
	if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
		(rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
#else
	if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
#endif
		f->size=new_size;
		/*split the fragment*/
		end=FRAG_END(f);
		end->size=new_size;
		n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
		n->size=rest-FRAG_OVERHEAD;
		FRAG_END(n)->size=n->size;
		FRAG_CLEAR_USED(n); /* never used */
		qm->real_used+=FRAG_OVERHEAD;
#ifdef DBG_QM_MALLOC
		end->check1=END_CHECK_PATTERN1;
		end->check2=END_CHECK_PATTERN2;
		/* frag created by malloc, mark it*/
		n->file=file;
		n->func=func;
		n->line=line;
		n->check=ST_CHECK_PATTERN;
#endif
		/* reinsert n in free list*/
		qm_insert_free(qm, n);
		return 0;
	}else{
			/* we cannot split this fragment any more */
		return -1;
	}
}



#ifdef DBG_QM_MALLOC
void* qm_malloc(void* qmp, unsigned long size,
					const char* file, const char* func, unsigned int line)
#else
void* qm_malloc(void* qmp, unsigned long size)
#endif
{
	struct qm_block* qm;
	struct qm_frag* f;
	int hash;
#ifdef DBG_QM_MALLOC
	unsigned int list_cntr;
#endif

	qm = (struct qm_block*)qmp;
	
#ifdef DBG_QM_MALLOC
	list_cntr = 0;
	MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func,
			line);
#endif
	/*malloc(0) should return a valid pointer according to specs*/
	if(unlikely(size==0)) size=4;
	/*size must be a multiple of 8*/
	size=ROUNDUP(size);
	if (size>(qm->size-qm->real_used)) return 0;

	/*search for a suitable free frag*/
#ifdef DBG_QM_MALLOC
	if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
#else
	if ((f=qm_find_free(qm, size, &hash))!=0){
#endif
		/* we found it!*/
		/*detach it from the free list*/
#ifdef DBG_QM_MALLOC
			qm_debug_frag(qm, f);
#endif
		qm_detach_free(qm, f);
		/*mark it as "busy"*/
		f->u.is_free=0;
		qm->free_hash[hash].no--;
		qm->ffrags--;
		/* we ignore split return */
#ifdef DBG_QM_MALLOC
		split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
#else
		split_frag(qm, f, size);
#endif
		qm->real_used+=f->size;
		qm->used+=f->size;
		if (qm->max_real_used<qm->real_used)
			qm->max_real_used=qm->real_used;
#ifdef DBG_QM_MALLOC
		f->file=file;
		f->func=func;
		f->line=line;
		f->check=ST_CHECK_PATTERN;
		/*  FRAG_END(f)->check1=END_CHECK_PATTERN1;
			FRAG_END(f)->check2=END_CHECK_PATTERN2;*/
		MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d"
				" -th hit\n",
			 qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr );
#endif
#ifdef MALLOC_STATS
		if(qm->type==MEM_TYPE_PKG) {
			sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
		}
#endif
		return (char*)f+sizeof(struct qm_frag);
	}
	return 0;
}



#ifdef DBG_QM_MALLOC
void qm_free(void* qmp, void* p, const char* file, const char* func, 
				unsigned int line)
#else
void qm_free(void* qmp, void* p)
#endif
{
	struct qm_block* qm;
	struct qm_frag* f;
	unsigned long size;
#ifdef MEM_JOIN_FREE
	struct qm_frag* next;
	struct qm_frag* prev;
#endif /* MEM_JOIN_FREE*/

	qm = (struct qm_block*)qmp;

#ifdef DBG_QM_MALLOC
	MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
#endif

	if (p==0) {
#ifdef DBG_QM_MALLOC
		LOG(L_WARN, "WARNING:qm_free: free(0) called from %s: %s(%d)\n", file, func, line);
#else
		LOG(L_WARN, "WARNING:qm_free: free(0) called\n");
#endif
		return;
	}

#ifdef DBG_QM_MALLOC
	if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){
		LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!)"
				" called from %s: %s(%d) - aborting\n", p, file, func, line);
		if(likely(cfg_get(core, core_cfg, mem_safety)==0))
			abort();
		else return;
	}
#endif

	f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));

#ifdef DBG_QM_MALLOC
	qm_debug_frag(qm, f);
	if (f->u.is_free){
		LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer (%p),"
				" called from %s: %s(%d), first free %s: %s(%ld) - aborting\n",
				p, file, func, line, f->file, f->func, f->line);
		if(likely(cfg_get(core, core_cfg, mem_safety)==0))
			abort();
		else return;
	}
	MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
#endif
	if (unlikely(f->u.is_free)){
		LM_INFO("freeing a free fragment (%p/%p) - ignore\n",
				f, p);
		return;
	}

	size=f->size;
	qm->used-=size;
	qm->real_used-=size;

#ifdef MEM_JOIN_FREE
	if(unlikely(cfg_get(core, core_cfg, mem_join)!=0)) {
		next=prev=0;
		/* mark this fragment as used (might fall into the middle of joined frags)
		  to give us an extra chance of detecting a double free call (if the joined
		  fragment has not yet been reused) */
		f->u.nxt_free=(void*)0x1L; /* bogus value, just to mark it as free */
		/* join packets if possible*/
		next=FRAG_NEXT(f);
		if (((char*)next < (char*)qm->last_frag_end) && (next->u.is_free)){
			/* join next packet */
#ifdef DBG_QM_MALLOC
			qm_debug_frag(qm, next);
#endif
			qm_detach_free(qm, next);
			size+=next->size+FRAG_OVERHEAD;
			qm->real_used-=FRAG_OVERHEAD;
			qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
			qm->ffrags--;
		}
	
		if (f > qm->first_frag){
			prev=FRAG_PREV(f);
			/*	(struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f-
								sizeof(struct qm_frag_end))->size);*/
			if (prev->u.is_free){
				/* join prev packet */
#ifdef DBG_QM_MALLOC
				qm_debug_frag(qm, prev);
#endif
				qm_detach_free(qm, prev);
				size+=prev->size+FRAG_OVERHEAD;
				qm->real_used-=FRAG_OVERHEAD;
				qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
				qm->ffrags--;
				f=prev;
			}
		}
		f->size=size;
		FRAG_END(f)->size=f->size;
	} /* if cfg_core->mem_join */
#endif /* MEM_JOIN_FREE*/
#ifdef DBG_QM_MALLOC
	f->file=file;
	f->func=func;
	f->line=line;
#endif
	qm_insert_free(qm, f);
#ifdef MALLOC_STATS
	if(qm->type==MEM_TYPE_PKG) {
		sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
	}
#endif
}



#ifdef DBG_QM_MALLOC
void* qm_realloc(void* qmp, void* p, unsigned long size,
					const char* file, const char* func, unsigned int line)
#else
void* qm_realloc(void* qmp, void* p, unsigned long size)
#endif
{
	struct qm_block* qm;
	struct qm_frag* f;
	unsigned long diff;
	unsigned long orig_size;
	struct qm_frag* n;
	void* ptr;

	qm = (struct qm_block*)qmp;

#ifdef DBG_QM_MALLOC
	MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size,
			file, func, line);
	if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){
		LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - "
				"aborting\n", p);
		abort();
	}
#endif
	
	if (size==0) {
		if (p)
#ifdef DBG_QM_MALLOC
			qm_free(qm, p, file, func, line);
#else
			qm_free(qm, p);
#endif
		return 0;
	}
	if (p==0)
#ifdef DBG_QM_MALLOC
		return qm_malloc(qm, size, file, func, line);
#else
		return qm_malloc(qm, size);
#endif
	f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag));
#ifdef DBG_QM_MALLOC
	qm_debug_frag(qm, f);
	MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n",
			f, f->file, f->func, f->line);
	if (f->u.is_free){
		LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed "
				"pointer %p , fragment %p -- aborting\n", p, f);
		abort();
	}
#endif
	/* find first acceptable size */
	size=ROUNDUP(size);
	if (f->size > size){
		orig_size=f->size;
		/* shrink */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size);
		if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){
		MDBG("qm_realloc : shrinked successful\n");
#else
		if(split_frag(qm, f, size)!=0){
#endif
			/* update used sizes: freed the splited frag */
			/* split frag already adds FRAG_OVERHEAD for the newly created
			   free frag, so here we only need orig_size-f->size for real used
			 */
			qm->real_used-=(orig_size-f->size);
			qm->used-=(orig_size-f->size);
		}
		
	}else if (f->size < size){
		/* grow */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size);
#endif
			orig_size=f->size;
			diff=size-f->size;
			n=FRAG_NEXT(f);
			if (((char*)n < (char*)qm->last_frag_end) && 
					(n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
				/* join  */
				qm_detach_free(qm, n);
				qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
				qm->ffrags--;
				f->size+=n->size+FRAG_OVERHEAD;
				qm->real_used-=FRAG_OVERHEAD;
				FRAG_END(f)->size=f->size;
				/* end checks should be ok */
				/* split it if necessary */
				if (f->size > size ){
	#ifdef DBG_QM_MALLOC
					split_frag(qm, f, size, file, "fragm. from qm_realloc",
										line);
	#else
					split_frag(qm, f, size);
	#endif
				}
				qm->real_used+=(f->size-orig_size);
				qm->used+=(f->size-orig_size);
			}else{
				/* could not join => realloc */
	#ifdef DBG_QM_MALLOC
				ptr=qm_malloc(qm, size, file, func, line);
	#else
				ptr=qm_malloc(qm, size);
	#endif
				if (ptr){
					/* copy, need by libssl */
					memcpy(ptr, p, orig_size);
				}
	#ifdef DBG_QM_MALLOC
				qm_free(qm, p, file, func, line);
	#else
				qm_free(qm, p);
	#endif
				p=ptr;
			}
	}else{
		/* do nothing */
#ifdef DBG_QM_MALLOC
		MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n",
				f->size, size);
#endif
	}
#ifdef DBG_QM_MALLOC
	MDBG("qm_realloc: returning %p\n", p);
#endif
#ifdef MALLOC_STATS
	if(qm->type==MEM_TYPE_PKG) {
		sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
	}
#endif
	return p;
}


void qm_check(struct qm_block* qm)
{
	struct qm_frag* f;
	long fcount = 0;
	int memlog;
	
	memlog=cfg_get(core, core_cfg, memlog);
	LOG(memlog, "DEBUG: qm_check()\n");
	f = qm->first_frag;
	while ((char*)f < (char*)qm->last_frag_end) {
		fcount++;
		/* check struct qm_frag */
#ifdef DBG_QM_MALLOC
		if (f->check!=ST_CHECK_PATTERN){
			LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
					"beginning overwritten(%lx)!\n",
					f, (char*)f + sizeof(struct qm_frag),
					f->check);
			qm_status(qm);
			abort();
		};
#endif
		if (f + sizeof(struct qm_frag) + f->size + sizeof(struct qm_frag_end) > qm->first_frag + qm->size) {
			LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
				"bad size: %lu (frag end: %p > end of block: %p)\n",
				f, (char*)f + sizeof(struct qm_frag) + sizeof(struct qm_frag_end), f->size,
				f + sizeof(struct qm_frag) + f->size, qm->first_frag + qm->size);
			qm_status(qm);
			abort();
		}
		/* check struct qm_frag_end */
		if (FRAG_END(f)->size != f->size) {
			LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) "
				"size in qm_frag and qm_frag_end does not match: frag->size=%lu, frag_end->size=%lu)\n",
				f, (char*)f + sizeof(struct qm_frag),
				f->size, FRAG_END(f)->size);
			qm_status(qm);
			abort();
		}
#ifdef DBG_QM_MALLOC
		if ((FRAG_END(f)->check1 != END_CHECK_PATTERN1) ||
			(FRAG_END(f)->check2 != END_CHECK_PATTERN2)) {
			LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)"
						" end overwritten(%lx, %lx)!\n",
					f, (char*)f + sizeof(struct qm_frag), 
					FRAG_END(f)->check1, FRAG_END(f)->check2);
			qm_status(qm);
			abort();
		}
#endif
		f = FRAG_NEXT(f);
	}

	LOG(memlog, "DEBUG: qm_check: %lu fragments OK\n", fcount);
}

void qm_status(void* qmp)
{
	struct qm_block* qm;
	struct qm_frag* f;
	int i,j;
	int h;
	int unused;
	int memlog;
	int mem_summary;

	qm = (struct qm_block*)qmp;

	memlog=cfg_get(core, core_cfg, memlog);
	mem_summary=cfg_get(core, core_cfg, mem_summary);
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "(%p):\n", qm);
	if (!qm) return;

	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "heap size= %lu\n",
			qm->size);
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"used= %lu, used+overhead=%lu, free=%lu\n",
			qm->used, qm->real_used, qm->size-qm->real_used);
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"max used (+overhead)= %lu\n", qm->max_real_used);
	
	if (mem_summary & 16) return;

	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"dumping all alloc'ed. fragments:\n");
	for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
			,i++){
		if (! f->u.is_free){
			LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
					"   %3d. %c  address=%p frag=%p size=%lu used=%d\n",
				i,
				(f->u.is_free)?'a':'N',
				(char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
#ifdef DBG_QM_MALLOC
			LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
					"          %s from %s: %s(%ld)\n",
				(f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
			LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
					"         start check=%lx, end check= %lx, %lx\n",
				f->check, FRAG_END(f)->check1, FRAG_END(f)->check2);
#endif
		}
	}
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"dumping free list stats :\n");
	for(h=0,i=0;h<QM_HASH_SIZE;h++){
		unused=0;
		for (f=qm->free_hash[h].head.u.nxt_free,j=0; 
				f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
				if (!FRAG_WAS_USED(f)){
					unused++;
#ifdef DBG_QM_MALLOC
					LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
						"unused fragm.: hash = %3d, fragment %p,"
						" address %p size %lu, created from %s: %s(%lu)\n",
					    h, f, (char*)f+sizeof(struct qm_frag), f->size,
						f->file, f->func, f->line);
#endif
				}
		}

		if (j) LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
				"hash= %3d. fragments no.: %5d, unused: %5d\n"
					"\t\t bucket size: %9lu - %9ld (first %9lu)\n",
					h, j, unused, UN_HASH(h),
					((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
					qm->free_hash[h].head.u.nxt_free->size
				);
		if (j!=qm->free_hash[h].no){
			LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
				" for hash %3d\n", j, qm->free_hash[h].no, h);
		}

	}
	LOG_(DEFAULT_FACILITY, memlog, "qm_status: ",
			"-----------------------------\n");
}


/* fills a malloc info structure with info about the block
 * if a parameter is not supported, it will be filled with 0 */
void qm_info(void* qmp, struct mem_info* info)
{
	struct qm_block* qm;

	qm = (struct qm_block*)qmp;

	memset(info,0, sizeof(*info));
	info->total_size=qm->size;
	info->min_frag=MIN_FRAG_SIZE;
	info->free=qm->size-qm->real_used;
	info->used=qm->used;
	info->real_used=qm->real_used;
	info->max_used=qm->max_real_used;
	info->total_frags=qm->ffrags;
}


/* returns how much free memory is available
 * it never returns an error (unlike fm_available) */
unsigned long qm_available(void* qmp)
{
	struct qm_block* qm;

	qm = (struct qm_block*)qmp;

	return qm->size-qm->real_used;
}



#ifdef DBG_QM_MALLOC

typedef struct _mem_counter{
	const char *file;
	const char *func;
	unsigned long line;
	
	unsigned long size;
	int count;
	
	struct _mem_counter *next;
} mem_counter;

static mem_counter* get_mem_counter(mem_counter **root, struct qm_frag* f)
{
	mem_counter *x;
	if (!*root) goto make_new;
	for(x=*root;x;x=x->next)
		if (x->file == f->file && x->func == f->func && x->line == f->line)
			return x;
make_new:	
	x = malloc(sizeof(mem_counter));
	x->file = f->file;
	x->func = f->func;
	x->line = f->line;
	x->count = 0;
	x->size = 0;
	x->next = *root;
	*root = x;
	return x;
}



void qm_sums(void* qmp)
{
	struct qm_block* qm;
	struct qm_frag* f;
	int i;
	mem_counter *root, *x;
	int memlog;
	
	qm = (struct qm_block*)qmp;

	root=0;
	if (!qm) return;
	
	memlog=cfg_get(core, core_cfg, memlog);
	LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
			"summarizing all alloc'ed. fragments:\n");
	
	for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;
			f=FRAG_NEXT(f),i++){
		if (! f->u.is_free){
			x = get_mem_counter(&root,f);
			x->count++;
			x->size+=f->size;
		}
	}
	x = root;
	while(x){
		LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
				" count=%6d size=%10lu bytes from %s: %s(%ld)\n",
			x->count,x->size,
			x->file, x->func, x->line
			);
		root = x->next;
		free(x);
		x = root;
	}
	LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ",
			"-----------------------------\n");
}
#else

void qm_sums(void* qm)
{
	return;
}
#endif /* DBG_QM_MALLOC */


/*memory manager core api*/
static char *_qm_mem_name = "q_malloc";

/* PKG - private memory API*/
static char *_qm_pkg_pool = 0;
static struct qm_block *_qm_pkg_block = 0;

/**
 * \brief Destroy memory pool
 */
void qm_malloc_destroy_pkg_manager(void)
{
	if (_qm_pkg_pool) {
		free(_qm_pkg_pool);
		_qm_pkg_pool = 0;
	}
	_qm_pkg_block = 0;
}

/**
 * \brief Init memory pool
 */
int qm_malloc_init_pkg_manager(void)
{
	sr_pkg_api_t ma;
	_qm_pkg_pool = malloc(pkg_mem_size);
	if (_qm_pkg_pool)
		_qm_pkg_block=qm_malloc_init(_qm_pkg_pool, pkg_mem_size, MEM_TYPE_PKG);
	if (_qm_pkg_block==0){
		LOG(L_CRIT, "could not initialize qm memory pool\n");
		fprintf(stderr, "Too much qm pkg memory demanded: %ld bytes\n",
						pkg_mem_size);
		return -1;
	}

	memset(&ma, 0, sizeof(sr_pkg_api_t));
	ma.mname = _qm_mem_name;
	ma.mem_pool = _qm_pkg_pool;
	ma.mem_block = _qm_pkg_block;
	ma.xmalloc = qm_malloc;
	ma.xfree = qm_free;
	ma.xrealloc = qm_realloc;
	ma.xstatus = qm_status;
	ma.xinfo = qm_info;
	ma.xavailable = qm_available;
	ma.xsums = qm_sums;
	ma.xdestroy = qm_malloc_destroy_pkg_manager;

	return pkg_init_api(&ma);
}


/* SHM - shared memory API*/
static void *_qm_shm_pool = 0;
static struct qm_block *_qm_shm_block = 0;

/*SHM wrappers to sync the access to memory block*/
#ifdef DBG_QM_MALLOC
void* qm_shm_malloc(void* qmp, unsigned long size,
					const char* file, const char* func, unsigned int line)
{
	void *r;
	shm_lock();
	r = qm_malloc(qmp, size, file, func, line);
	shm_unlock();
	return r;
}