void sfm_free(struct sfm_block* qm, void* p) #endif { struct sfm_frag* f; #ifdef DBG_F_MALLOC MDBG("sfm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line); if (p>(void*)qm->last_frag || p<(void*)qm->first_frag){ LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - " "aborting\n", p); abort(); } #endif if (unlikely(p==0)) { LOG(L_WARN, "WARNING: sfm_free: free(0) called\n"); return; } f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag)); #ifdef DBG_F_MALLOC MDBG("sfm_free: freeing block alloc'ed from %s: %s(%ld)\n", f->file, f->func, f->line); #endif #ifdef DBG_F_MALLOC f->file=file; f->func=func; f->line=line; #endif sfm_insert_free(qm, f, 0); }
void* _shm_resize( void* p , unsigned int s) #endif { #ifdef VQ_MALLOC struct vqm_frag *f; #endif if (p==0) { DBG("WARNING:vqm_resize: resize(0) called\n"); return shm_malloc( s ); } # ifdef DBG_QM_MALLOC # ifdef VQ_MALLOC f=(struct vqm_frag*) ((char*)p-sizeof(struct vqm_frag)); MDBG("_shm_resize(%p, %d), called from %s: %s(%d)\n", p, s, file, func, line); VQM_DEBUG_FRAG(shm_block, f); if (p>(void *)shm_block->core_end || p<(void*)shm_block->init_core){ LOG(L_CRIT, "BUG: vqm_free: bad pointer %p (out of memory block!) - " "aborting\n", p); abort(); } #endif # endif return sh_realloc( p, s ); }
void mh_send_ba(const struct in6_addr_bundle *addrs, uint8_t status, uint8_t flags, uint16_t sequence, const struct timespec *lifetime, const uint8_t *key, int iif) { int iovlen = 1; struct ip6_mh_binding_ack *ba; struct iovec mh_vec[2]; MDBG("status %d\n", status); ba = mh_create(mh_vec, IP6_MH_TYPE_BACK); if (!ba) return; ba->ip6mhba_status = status; ba->ip6mhba_flags = flags; ba->ip6mhba_seqno = htons(sequence); ba->ip6mhba_lifetime = htons(lifetime->tv_sec >> 2); if (status < IP6_MH_BAS_UNSPECIFIED && !conf.NonVolatileBindingCache) { struct timespec refresh; tsclear(refresh); if (conf.pmgr.use_bradv(addrs->dst, addrs->bind_coa, addrs->src, lifetime, &refresh) && tsbefore(*lifetime, refresh)) mh_create_opt_refresh_advice(&mh_vec[iovlen++], refresh.tv_sec); } if (key) mh_create_opt_auth_data(&mh_vec[iovlen++]); mh_send(addrs, mh_vec, iovlen, key, iif); free_iov_data(mh_vec, iovlen); }
void mh_send_be(struct in6_addr *dst, struct in6_addr *hoa, struct in6_addr *src, uint8_t status, int iif) { struct ip6_mh_binding_error *be; struct iovec iov; struct in6_addr_bundle out; if (IN6_IS_ADDR_UNSPECIFIED(dst) || IN6_IS_ADDR_LOOPBACK(dst) || IN6_IS_ADDR_MULTICAST(dst)) { MDBG("Omit BE for non-unicast " "%x:%x:%x:%x:%x:%x:%x:%x\n", NIP6ADDR(dst)); return; } out.remote_coa = NULL; out.local_coa = NULL; be = mh_create(&iov, IP6_MH_TYPE_BERROR); if (!be) return; be->ip6mhbe_status = status; out.src = src; out.dst = dst; if (hoa) be->ip6mhbe_homeaddr = *hoa; out.dst = dst; mh_send(&out, &iov, 1, NULL, iif); free_iov_data(&iov, 1); }
int igmp6_event_report(struct sk_buff *skb) { struct ifmcaddr6 *ma; struct in6_addr *addrp; struct inet6_dev *idev; struct icmp6hdr *hdr; #ifdef CONFIG_IPV6_MLD6_DEBUG char abuf1[128], abuf2[128]; unsigned long resptime; in6_ntop(&skb->nh.ipv6h->saddr, abuf1); in6_ntop(&skb->nh.ipv6h->daddr, abuf2); MDBG((KERN_DEBUG "igmp6_event_report(skb=%p): saddr=%s, daddr=%s\n", skb, abuf1, abuf2)); #endif /* Our own report looped back. Ignore it. */ if (skb->pkt_type == PACKET_LOOPBACK) return 0; if (!pskb_may_pull(skb, sizeof(struct in6_addr))) return -EINVAL; hdr = (struct icmp6hdr*) skb->h.raw; /* Drop reports with not link local source */ if (!(ipv6_addr_type(&skb->nh.ipv6h->saddr)&IPV6_ADDR_LINKLOCAL)) return -EINVAL; #ifdef CONFIG_IPV6_MLD6_DEBUG resptime = ntohs(hdr->icmp6_maxdelay); #endif addrp = (struct in6_addr *) (hdr + 1); #ifdef CONFIG_IPV6_MLD6_DEBUG in6_ntop(addrp, abuf1); MDBG3((KERN_DEBUG "igmp6_event_report(): maxdelay=%lu, addr=%s\n", resptime, abuf1)); #endif if (!ipv6_addr_is_multicast(addrp)) goto drop; idev = in6_dev_get(skb->dev); if (idev == NULL) return -ENODEV; /* * Cancel the timer for this group */ read_lock(&idev->lock); for (ma = idev->mc_list; ma; ma=ma->next) { if (ipv6_addr_cmp(&ma->mca_addr, addrp) == 0) { spin_lock(&ma->mca_lock); if (del_timer(&ma->mca_timer)) atomic_dec(&ma->mca_refcnt); #ifndef CONFIG_IPV6_MLD6_ALL_DONE ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING); #else ma->mca_flags &= ~MAF_TIMER_RUNNING; #endif spin_unlock(&ma->mca_lock); break; } } read_unlock(&idev->lock); in6_dev_put(idev); drop: return 0; }
void* sfm_realloc(struct sfm_block* qm, void* p, unsigned long size) #endif { struct sfm_frag *f; unsigned long orig_size; void *ptr; #ifndef SFM_REALLOC_REMALLOC struct sfm_frag *n; struct sfm_frag **pf; unsigned long diff; unsigned long p_id; int hash; unsigned long n_size; struct sfm_pool * pool; #endif #ifdef DBG_F_MALLOC MDBG("sfm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size, file, func, line); if ((p)&&(p>(void*)qm->last_frag || p<(void*)qm->first_frag)){ LOG(L_CRIT, "BUG: sfm_free: bad pointer %p (out of memory block!) - " "aborting\n", p); abort(); } #endif if (size==0) { if (p) #ifdef DBG_F_MALLOC sfm_free(qm, p, file, func, line); #else sfm_free(qm, p); #endif return 0; } if (p==0) #ifdef DBG_F_MALLOC return sfm_malloc(qm, size, file, func, line); #else return sfm_malloc(qm, size); #endif f=(struct sfm_frag*) ((char*)p-sizeof(struct sfm_frag)); #ifdef DBG_F_MALLOC MDBG("sfm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n", f, f->file, f->func, f->line); #endif size=ROUNDUP(size); orig_size=f->size; if (f->size > size){ /* shrink */ #ifdef DBG_F_MALLOC MDBG("sfm_realloc: shrinking from %lu to %lu\n", f->size, size); sfm_split_frag(qm, f, size, file, "frag. from sfm_realloc", line); #else sfm_split_frag(qm, f, size); #endif }else if (f->size<size){ /* grow */ #ifdef DBG_F_MALLOC MDBG("sfm_realloc: growing from %lu to %lu\n", f->size, size); #endif #ifndef SFM_REALLOC_REMALLOC /* should set a magic value in list head and in push/pop if magic value => * lock and wait */ #error LL_MALLOC realloc not finished yet diff=size-f->size; n=FRAG_NEXT(f); if (((char*)n < (char*)qm->last_frag) && (n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){ /* join */ /* detach n from the free list */ try_again: p_id=n->id; n_size=n->size; if ((unlikely(p_id >=SFM_POOLS_NO))){ hash=GET_HASH(n_size); SFM_MAIN_HASH_LOCK(qm, hash); if (unlikely((n->u.nxt_free==0) || ((n->size+FRAG_OVERHEAD)<diff))){ SFM_MAIN_HASH_UNLOCK(qm, hash); goto not_found; } if (unlikely((n->id!=p_id) || (n->size!=n_size))){ /* fragment still free, but changed, either * moved to another pool or has a diff. size */ SFM_MAIN_HASH_UNLOCK(qm, hash); goto try_again; } pf=&(qm->free_hash[hash].first); /* find it */ for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */ if (*pf==0){ SFM_MAIN_HASH_UNLOCK(qm, hash); /* not found, bad! */ LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in " "free " "list (hash=%d)\n", n, hash); /* somebody is in the process of changing it ? */ goto not_found; } /* detach */ *pf=n->u.nxt_free; n->u.nxt_free=0; /* mark it immediately as detached */ qm->free_hash[hash].no--; SFM_MAIN_HASH_UNLOCK(qm, hash); /* join */ f->size+=n->size+FRAG_OVERHEAD; /* split it if necessary */ if (f->size > size){ #ifdef DBG_F_MALLOC sfm_split_frag(qm, f, size, file, "fragm. from " "sfm_realloc", line); #else sfm_split_frag(qm, f, size); #endif } }else{ /* p_id < SFM_POOLS_NO (=> in a pool )*/ hash=GET_SMALL_HASH(n_size); pool=&qm->pool[p_id]; SFM_POOL_LOCK(pool, hash); if (unlikely((n->u.nxt_free==0) || ((n->size+FRAG_OVERHEAD)<diff))){ SFM_POOL_UNLOCK(pool, hash); goto not_found; } if (unlikely((n->id!=p_id) || (n->size!=n_size))){ /* fragment still free, but changed, either * moved to another pool or has a diff. size */ SFM_POOL_UNLOCK(pool, hash); goto try_again; } pf=&(pool->pool_hash[hash].first); /* find it */ for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));/*FIXME slow */ if (*pf==0){ SFM_POOL_UNLOCK(pool, hash); /* not found, bad! */ LOG(L_WARN, "WARNING: sfm_realloc: could not find %p in " "free " "list (hash=%d)\n", n, hash); /* somebody is in the process of changing it ? */ goto not_found; } /* detach */ *pf=n->u.nxt_free; n->u.nxt_free=0; /* mark it immediately as detached */ pool->pool_hash[hash].no--; SFM_POOL_UNLOCK(pool, hash); /* join */ f->size+=n->size+FRAG_OVERHEAD; /* split it if necessary */ if (f->size > size){ #ifdef DBG_F_MALLOC sfm_split_frag(qm, f, size, file, "fragm. from " "sfm_realloc", line); #else sfm_split_frag(qm, f, size); #endif } } }else{ not_found: /* could not join => realloc */ #else/* SFM_REALLOC_REMALLOC */ { #endif /* SFM_REALLOC_REMALLOC */ #ifdef DBG_F_MALLOC ptr=sfm_malloc(qm, size, file, func, line); #else ptr=sfm_malloc(qm, size); #endif if (ptr){ /* copy, need by libssl */ memcpy(ptr, p, orig_size); #ifdef DBG_F_MALLOC sfm_free(qm, p, file, func, line); #else sfm_free(qm, p); #endif } p=ptr; } }else{
void* sfm_malloc(struct sfm_block* qm, unsigned long size) #endif { struct sfm_frag* frag; int hash; unsigned int i; #ifdef DBG_F_MALLOC MDBG("sfm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func, line); #endif /*size must be a multiple of 8*/ size=ROUNDUP(size); /* if (size>(qm->size-qm->real_used)) return 0; */ /* check if our pool id is set */ sfm_fix_pool_id(qm); /*search for a suitable free frag*/ if (likely(size<=SF_POOL_MAX_SIZE)){ hash=GET_SMALL_HASH(size); /* try first in our pool */ #ifdef DBG_F_MALLOC if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size, file, func, line))!=0)) goto found; /* try in the "main" free hash, go through all the hash */ if (likely((frag=main_get_frag(qm, hash, size, file, func, line))!=0)) goto found; /* really low mem , try in other pools */ for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){ if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size, file, func, line))!=0) goto found; } #else if (likely((frag=pool_get_frag(qm, &qm->pool[pool_id], hash, size)) !=0 )) goto found; /* try in the "main" free hash, go through all the hash */ if (likely((frag=main_get_frag(qm, hash, size))!=0)) goto found; /* really low mem , try in other pools */ for (i=(pool_id+1); i< (pool_id+SFM_POOLS_NO); i++){ if ((frag=pool_get_frag(qm, &qm->pool[i%SFM_POOLS_NO], hash, size)) !=0 ) goto found; } #endif /* not found, bad! */ return 0; }else{ hash=GET_BIG_HASH(size); #ifdef DBG_F_MALLOC if ((frag=main_get_frag(qm, hash, size, file, func, line))==0) return 0; /* not found, bad! */ #else if ((frag=main_get_frag(qm, hash, size))==0) return 0; /* not found, bad! */ #endif } found: /* we found it!*/ #ifdef DBG_F_MALLOC frag->file=file; frag->func=func; frag->line=line; frag->check=ST_CHECK_PATTERN; MDBG("sfm_malloc(%p, %lu) returns address %p \n", qm, size, (char*)frag+sizeof(struct sfm_frag)); #endif FRAG_MARK_USED(frag); /* mark it as used */ return (char*)frag+sizeof(struct sfm_frag); }
int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size) #endif { unsigned long rest; struct qm_frag* n; struct qm_frag_end* end; rest=f->size-new_size; #ifdef MEM_FRAG_AVOIDANCE if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))|| (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/ #else if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){ #endif f->size=new_size; /*split the fragment*/ end=FRAG_END(f); end->size=new_size; n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end)); n->size=rest-FRAG_OVERHEAD; FRAG_END(n)->size=n->size; FRAG_CLEAR_USED(n); /* never used */ qm->real_used+=FRAG_OVERHEAD; #ifdef DBG_QM_MALLOC end->check1=END_CHECK_PATTERN1; end->check2=END_CHECK_PATTERN2; /* frag created by malloc, mark it*/ n->file=file; n->func=func; n->line=line; n->check=ST_CHECK_PATTERN; #endif /* reinsert n in free list*/ qm_insert_free(qm, n); return 0; }else{ /* we cannot split this fragment any more */ return -1; } } #ifdef DBG_QM_MALLOC void* qm_malloc(void* qmp, unsigned long size, const char* file, const char* func, unsigned int line) #else void* qm_malloc(void* qmp, unsigned long size) #endif { struct qm_block* qm; struct qm_frag* f; int hash; #ifdef DBG_QM_MALLOC unsigned int list_cntr; #endif qm = (struct qm_block*)qmp; #ifdef DBG_QM_MALLOC list_cntr = 0; MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func, line); #endif /*malloc(0) should return a valid pointer according to specs*/ if(unlikely(size==0)) size=4; /*size must be a multiple of 8*/ size=ROUNDUP(size); if (size>(qm->size-qm->real_used)) return 0; /*search for a suitable free frag*/ #ifdef DBG_QM_MALLOC if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){ #else if ((f=qm_find_free(qm, size, &hash))!=0){ #endif /* we found it!*/ /*detach it from the free list*/ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); #endif qm_detach_free(qm, f); /*mark it as "busy"*/ f->u.is_free=0; qm->free_hash[hash].no--; qm->ffrags--; /* we ignore split return */ #ifdef DBG_QM_MALLOC split_frag(qm, f, size, file, "fragm. from qm_malloc", line); #else split_frag(qm, f, size); #endif qm->real_used+=f->size; qm->used+=f->size; if (qm->max_real_used<qm->real_used) qm->max_real_used=qm->real_used; #ifdef DBG_QM_MALLOC f->file=file; f->func=func; f->line=line; f->check=ST_CHECK_PATTERN; /* FRAG_END(f)->check1=END_CHECK_PATTERN1; FRAG_END(f)->check2=END_CHECK_PATTERN2;*/ MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d" " -th hit\n", qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr ); #endif #ifdef MALLOC_STATS if(qm->type==MEM_TYPE_PKG) { sr_event_exec(SREV_PKG_UPDATE_STATS, 0); } #endif return (char*)f+sizeof(struct qm_frag); } return 0; } #ifdef DBG_QM_MALLOC void qm_free(void* qmp, void* p, const char* file, const char* func, unsigned int line) #else void qm_free(void* qmp, void* p) #endif { struct qm_block* qm; struct qm_frag* f; unsigned long size; #ifdef MEM_JOIN_FREE struct qm_frag* next; struct qm_frag* prev; #endif /* MEM_JOIN_FREE*/ qm = (struct qm_block*)qmp; #ifdef DBG_QM_MALLOC MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line); #endif if (p==0) { #ifdef DBG_QM_MALLOC LOG(L_WARN, "WARNING:qm_free: free(0) called from %s: %s(%d)\n", file, func, line); #else LOG(L_WARN, "WARNING:qm_free: free(0) called\n"); #endif return; } #ifdef DBG_QM_MALLOC if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){ LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!)" " called from %s: %s(%d) - aborting\n", p, file, func, line); if(likely(cfg_get(core, core_cfg, mem_safety)==0)) abort(); else return; } #endif f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag)); #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); if (f->u.is_free){ LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer (%p)," " called from %s: %s(%d), first free %s: %s(%ld) - aborting\n", p, file, func, line, f->file, f->func, f->line); if(likely(cfg_get(core, core_cfg, mem_safety)==0)) abort(); else return; } MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n", f, f->file, f->func, f->line); #endif if (unlikely(f->u.is_free)){ LM_INFO("freeing a free fragment (%p/%p) - ignore\n", f, p); return; } size=f->size; qm->used-=size; qm->real_used-=size; #ifdef MEM_JOIN_FREE if(unlikely(cfg_get(core, core_cfg, mem_join)!=0)) { next=prev=0; /* mark this fragment as used (might fall into the middle of joined frags) to give us an extra chance of detecting a double free call (if the joined fragment has not yet been reused) */ f->u.nxt_free=(void*)0x1L; /* bogus value, just to mark it as free */ /* join packets if possible*/ next=FRAG_NEXT(f); if (((char*)next < (char*)qm->last_frag_end) && (next->u.is_free)){ /* join next packet */ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, next); #endif qm_detach_free(qm, next); size+=next->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */ qm->ffrags--; } if (f > qm->first_frag){ prev=FRAG_PREV(f); /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f- sizeof(struct qm_frag_end))->size);*/ if (prev->u.is_free){ /* join prev packet */ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, prev); #endif qm_detach_free(qm, prev); size+=prev->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */ qm->ffrags--; f=prev; } } f->size=size; FRAG_END(f)->size=f->size; } /* if cfg_core->mem_join */ #endif /* MEM_JOIN_FREE*/ #ifdef DBG_QM_MALLOC f->file=file; f->func=func; f->line=line; #endif qm_insert_free(qm, f); #ifdef MALLOC_STATS if(qm->type==MEM_TYPE_PKG) { sr_event_exec(SREV_PKG_UPDATE_STATS, 0); } #endif } #ifdef DBG_QM_MALLOC void* qm_realloc(void* qmp, void* p, unsigned long size, const char* file, const char* func, unsigned int line) #else void* qm_realloc(void* qmp, void* p, unsigned long size) #endif { struct qm_block* qm; struct qm_frag* f; unsigned long diff; unsigned long orig_size; struct qm_frag* n; void* ptr; qm = (struct qm_block*)qmp; #ifdef DBG_QM_MALLOC MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size, file, func, line); if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){ LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - " "aborting\n", p); abort(); } #endif if (size==0) { if (p) #ifdef DBG_QM_MALLOC qm_free(qm, p, file, func, line); #else qm_free(qm, p); #endif return 0; } if (p==0) #ifdef DBG_QM_MALLOC return qm_malloc(qm, size, file, func, line); #else return qm_malloc(qm, size); #endif f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag)); #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n", f, f->file, f->func, f->line); if (f->u.is_free){ LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed " "pointer %p , fragment %p -- aborting\n", p, f); abort(); } #endif /* find first acceptable size */ size=ROUNDUP(size); if (f->size > size){ orig_size=f->size; /* shrink */ #ifdef DBG_QM_MALLOC MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size); if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){ MDBG("qm_realloc : shrinked successful\n"); #else if(split_frag(qm, f, size)!=0){ #endif /* update used sizes: freed the splited frag */ /* split frag already adds FRAG_OVERHEAD for the newly created free frag, so here we only need orig_size-f->size for real used */ qm->real_used-=(orig_size-f->size); qm->used-=(orig_size-f->size); } }else if (f->size < size){ /* grow */ #ifdef DBG_QM_MALLOC MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size); #endif orig_size=f->size; diff=size-f->size; n=FRAG_NEXT(f); if (((char*)n < (char*)qm->last_frag_end) && (n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){ /* join */ qm_detach_free(qm, n); qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/ qm->ffrags--; f->size+=n->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; FRAG_END(f)->size=f->size; /* end checks should be ok */ /* split it if necessary */ if (f->size > size ){ #ifdef DBG_QM_MALLOC split_frag(qm, f, size, file, "fragm. from qm_realloc", line); #else split_frag(qm, f, size); #endif } qm->real_used+=(f->size-orig_size); qm->used+=(f->size-orig_size); }else{ /* could not join => realloc */ #ifdef DBG_QM_MALLOC ptr=qm_malloc(qm, size, file, func, line); #else ptr=qm_malloc(qm, size); #endif if (ptr){ /* copy, need by libssl */ memcpy(ptr, p, orig_size); } #ifdef DBG_QM_MALLOC qm_free(qm, p, file, func, line); #else qm_free(qm, p); #endif p=ptr; } }else{ /* do nothing */ #ifdef DBG_QM_MALLOC MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n", f->size, size); #endif } #ifdef DBG_QM_MALLOC MDBG("qm_realloc: returning %p\n", p); #endif #ifdef MALLOC_STATS if(qm->type==MEM_TYPE_PKG) { sr_event_exec(SREV_PKG_UPDATE_STATS, 0); } #endif return p; } void qm_check(struct qm_block* qm) { struct qm_frag* f; long fcount = 0; int memlog; memlog=cfg_get(core, core_cfg, memlog); LOG(memlog, "DEBUG: qm_check()\n"); f = qm->first_frag; while ((char*)f < (char*)qm->last_frag_end) { fcount++; /* check struct qm_frag */ #ifdef DBG_QM_MALLOC if (f->check!=ST_CHECK_PATTERN){ LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) " "beginning overwritten(%lx)!\n", f, (char*)f + sizeof(struct qm_frag), f->check); qm_status(qm); abort(); }; #endif if (f + sizeof(struct qm_frag) + f->size + sizeof(struct qm_frag_end) > qm->first_frag + qm->size) { LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) " "bad size: %lu (frag end: %p > end of block: %p)\n", f, (char*)f + sizeof(struct qm_frag) + sizeof(struct qm_frag_end), f->size, f + sizeof(struct qm_frag) + f->size, qm->first_frag + qm->size); qm_status(qm); abort(); } /* check struct qm_frag_end */ if (FRAG_END(f)->size != f->size) { LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p) " "size in qm_frag and qm_frag_end does not match: frag->size=%lu, frag_end->size=%lu)\n", f, (char*)f + sizeof(struct qm_frag), f->size, FRAG_END(f)->size); qm_status(qm); abort(); } #ifdef DBG_QM_MALLOC if ((FRAG_END(f)->check1 != END_CHECK_PATTERN1) || (FRAG_END(f)->check2 != END_CHECK_PATTERN2)) { LOG(L_CRIT, "BUG: qm_*: fragm. %p (address %p)" " end overwritten(%lx, %lx)!\n", f, (char*)f + sizeof(struct qm_frag), FRAG_END(f)->check1, FRAG_END(f)->check2); qm_status(qm); abort(); } #endif f = FRAG_NEXT(f); } LOG(memlog, "DEBUG: qm_check: %lu fragments OK\n", fcount); } void qm_status(void* qmp) { struct qm_block* qm; struct qm_frag* f; int i,j; int h; int unused; int memlog; int mem_summary; qm = (struct qm_block*)qmp; memlog=cfg_get(core, core_cfg, memlog); mem_summary=cfg_get(core, core_cfg, mem_summary); LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "(%p):\n", qm); if (!qm) return; LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "heap size= %lu\n", qm->size); LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "used= %lu, used+overhead=%lu, free=%lu\n", qm->used, qm->real_used, qm->size-qm->real_used); LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "max used (+overhead)= %lu\n", qm->max_real_used); if (mem_summary & 16) return; LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "dumping all alloc'ed. fragments:\n"); for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f) ,i++){ if (! f->u.is_free){ LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", " %3d. %c address=%p frag=%p size=%lu used=%d\n", i, (f->u.is_free)?'a':'N', (char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f)); #ifdef DBG_QM_MALLOC LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", " %s from %s: %s(%ld)\n", (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line); LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", " start check=%lx, end check= %lx, %lx\n", f->check, FRAG_END(f)->check1, FRAG_END(f)->check2); #endif } } LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "dumping free list stats :\n"); for(h=0,i=0;h<QM_HASH_SIZE;h++){ unused=0; for (f=qm->free_hash[h].head.u.nxt_free,j=0; f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){ if (!FRAG_WAS_USED(f)){ unused++; #ifdef DBG_QM_MALLOC LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "unused fragm.: hash = %3d, fragment %p," " address %p size %lu, created from %s: %s(%lu)\n", h, f, (char*)f+sizeof(struct qm_frag), f->size, f->file, f->func, f->line); #endif } } if (j) LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "hash= %3d. fragments no.: %5d, unused: %5d\n" "\t\t bucket size: %9lu - %9ld (first %9lu)\n", h, j, unused, UN_HASH(h), ((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h), qm->free_hash[h].head.u.nxt_free->size ); if (j!=qm->free_hash[h].no){ LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu" " for hash %3d\n", j, qm->free_hash[h].no, h); } } LOG_(DEFAULT_FACILITY, memlog, "qm_status: ", "-----------------------------\n"); } /* fills a malloc info structure with info about the block * if a parameter is not supported, it will be filled with 0 */ void qm_info(void* qmp, struct mem_info* info) { struct qm_block* qm; qm = (struct qm_block*)qmp; memset(info,0, sizeof(*info)); info->total_size=qm->size; info->min_frag=MIN_FRAG_SIZE; info->free=qm->size-qm->real_used; info->used=qm->used; info->real_used=qm->real_used; info->max_used=qm->max_real_used; info->total_frags=qm->ffrags; } /* returns how much free memory is available * it never returns an error (unlike fm_available) */ unsigned long qm_available(void* qmp) { struct qm_block* qm; qm = (struct qm_block*)qmp; return qm->size-qm->real_used; } #ifdef DBG_QM_MALLOC typedef struct _mem_counter{ const char *file; const char *func; unsigned long line; unsigned long size; int count; struct _mem_counter *next; } mem_counter; static mem_counter* get_mem_counter(mem_counter **root, struct qm_frag* f) { mem_counter *x; if (!*root) goto make_new; for(x=*root;x;x=x->next) if (x->file == f->file && x->func == f->func && x->line == f->line) return x; make_new: x = malloc(sizeof(mem_counter)); x->file = f->file; x->func = f->func; x->line = f->line; x->count = 0; x->size = 0; x->next = *root; *root = x; return x; } void qm_sums(void* qmp) { struct qm_block* qm; struct qm_frag* f; int i; mem_counter *root, *x; int memlog; qm = (struct qm_block*)qmp; root=0; if (!qm) return; memlog=cfg_get(core, core_cfg, memlog); LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ", "summarizing all alloc'ed. fragments:\n"); for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end; f=FRAG_NEXT(f),i++){ if (! f->u.is_free){ x = get_mem_counter(&root,f); x->count++; x->size+=f->size; } } x = root; while(x){ LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ", " count=%6d size=%10lu bytes from %s: %s(%ld)\n", x->count,x->size, x->file, x->func, x->line ); root = x->next; free(x); x = root; } LOG_(DEFAULT_FACILITY, memlog, "qm_sums: ", "-----------------------------\n"); } #else void qm_sums(void* qm) { return; } #endif /* DBG_QM_MALLOC */ /*memory manager core api*/ static char *_qm_mem_name = "q_malloc"; /* PKG - private memory API*/ static char *_qm_pkg_pool = 0; static struct qm_block *_qm_pkg_block = 0; /** * \brief Destroy memory pool */ void qm_malloc_destroy_pkg_manager(void) { if (_qm_pkg_pool) { free(_qm_pkg_pool); _qm_pkg_pool = 0; } _qm_pkg_block = 0; } /** * \brief Init memory pool */ int qm_malloc_init_pkg_manager(void) { sr_pkg_api_t ma; _qm_pkg_pool = malloc(pkg_mem_size); if (_qm_pkg_pool) _qm_pkg_block=qm_malloc_init(_qm_pkg_pool, pkg_mem_size, MEM_TYPE_PKG); if (_qm_pkg_block==0){ LOG(L_CRIT, "could not initialize qm memory pool\n"); fprintf(stderr, "Too much qm pkg memory demanded: %ld bytes\n", pkg_mem_size); return -1; } memset(&ma, 0, sizeof(sr_pkg_api_t)); ma.mname = _qm_mem_name; ma.mem_pool = _qm_pkg_pool; ma.mem_block = _qm_pkg_block; ma.xmalloc = qm_malloc; ma.xfree = qm_free; ma.xrealloc = qm_realloc; ma.xstatus = qm_status; ma.xinfo = qm_info; ma.xavailable = qm_available; ma.xsums = qm_sums; ma.xdestroy = qm_malloc_destroy_pkg_manager; return pkg_init_api(&ma); } /* SHM - shared memory API*/ static void *_qm_shm_pool = 0; static struct qm_block *_qm_shm_block = 0; /*SHM wrappers to sync the access to memory block*/ #ifdef DBG_QM_MALLOC void* qm_shm_malloc(void* qmp, unsigned long size, const char* file, const char* func, unsigned int line) { void *r; shm_lock(); r = qm_malloc(qmp, size, file, func, line); shm_unlock(); return r; }
/** * mh_recv - receive mobility header signaling message * @msg: buffer to store message in * @addr: packet source address * @pkt_info: packet destination and interface * @haoaddr: address in home address option (if any) * @rtaddr: address in routing header type 2 (if any) * * Waits for a packet from mobility header @mh_sock.fd socket. * Stores information about the packet to @addr, @pkt_info, @hoa, and * @rtaddr. Packet data (i.e. mobility header) is stored in @msg. * Returns length of packet data received, or negative error value on * failure. **/ ssize_t mh_recv(unsigned char *msg, size_t msglen, struct sockaddr_in6 *addr, struct in6_pktinfo *pkt_info, struct in6_addr *haoaddr, struct in6_addr *rtaddr) { struct ip6_mh *mh; struct msghdr mhdr; struct cmsghdr *cmsg; struct iovec iov; static unsigned char chdr[CMSG_BUF_LEN]; void *databufp = NULL; int sockfd = mh_sock.fd; socklen_t hao_len; ssize_t len; iov.iov_len = msglen; iov.iov_base = (unsigned char *)msg; mhdr.msg_name = (void *)addr; mhdr.msg_namelen = sizeof(struct sockaddr_in6); mhdr.msg_iov = &iov; mhdr.msg_iovlen = 1; mhdr.msg_control = (void *)&chdr; mhdr.msg_controllen = CMSG_BUF_LEN; if ((len = recvmsg(sockfd, &mhdr, 0)) < 0) return -errno; memset(haoaddr, 0, sizeof(*haoaddr)); memset(rtaddr, 0, sizeof(*rtaddr)); for (cmsg = CMSG_FIRSTHDR(&mhdr); cmsg; cmsg = CMSG_NXTHDR(&mhdr, cmsg)) { int ret = 0; if (cmsg->cmsg_level != IPPROTO_IPV6) continue; switch (cmsg->cmsg_type) { case IPV6_PKTINFO: memcpy(pkt_info, CMSG_DATA(cmsg), sizeof(*pkt_info)); break; case IPV6_DSTOPTS: ret = inet6_opt_find(CMSG_DATA(cmsg), cmsg->cmsg_len, 0, IP6OPT_HOME_ADDRESS, &hao_len, &databufp); if (ret >= 0 && databufp != NULL && hao_len == sizeof(struct in6_addr)) { *haoaddr = *(struct in6_addr *) databufp; } break; case IPV6_RTHDR: if (inet6_rth_gettype(CMSG_DATA(cmsg)) == IPV6_RTHDR_TYPE_2) { struct in6_addr *seg = NULL; /* Kernel already processed routing * header type 2 for us */ seg = inet6_rth_getaddr(CMSG_DATA(cmsg), 0); if (!seg) MDBG("Invalid rth\n"); else *rtaddr = *seg; } break; } } mh = (struct ip6_mh *)msg; if (mh->ip6mh_type > IP6_MH_TYPE_MAX) { struct in6_addr *src, *dst, *hoa; if (!IN6_IS_ADDR_UNSPECIFIED(rtaddr)) src = rtaddr; else src = &pkt_info->ipi6_addr; if (!IN6_IS_ADDR_UNSPECIFIED(haoaddr)) { hoa = &addr->sin6_addr; dst = haoaddr; } else { hoa = NULL; dst = &addr->sin6_addr; } mh_send_be(dst, hoa, src, IP6_MH_BES_UNKNOWN_MH, pkt_info->ipi6_ifindex); return -EINVAL; } /* No need to perform any other validity checks, since kernel * does this for us. */ return len; }
/** * mh_send - send mobility header message * @addrs: bundle of addresses * @mh_vec: scatter/gather array * @iovlen: array block count * @bind_key: key for calculating binding auth. data * * Sends a mobility header message to @dst with @src source address. * Mobility header is created from the @mh_vec vector array created by * the caller and initialized with mh_create() and mh_create_opt_*() * calls. Padding is done automatically and mobility header length is * set. Binding authorization data is calculated if present. Returns * number of bytes sent on success, otherwise negative error value. **/ int mh_send(const struct in6_addr_bundle *addrs, const struct iovec *mh_vec, int iovlen, const uint8_t *bind_key, int oif) { struct ip6_mh_opt_auth_data lbad; struct sockaddr_in6 daddr; struct iovec iov[2*(IP6_MHOPT_MAX+1)]; struct msghdr msg; struct cmsghdr *cmsg; int cmsglen; struct in6_pktinfo pinfo; int ret = 0, on = 1; struct ip6_mh *mh; int iov_count; socklen_t rthlen = 0; iov_count = mh_try_pad(mh_vec, iov, iovlen); mh = (struct ip6_mh *)iov[0].iov_base; mh->ip6mh_hdrlen = (mh_length(iov, iov_count) >> 3) - 1; /* * We use MH out policy for all address. Then we should update it * to refresh its bundle in kernel to be used with correct * route, IPsec SA and neighbor cache entry for the destination. * IKE daemon does the same thing for rekeying process. */ if (xfrm_cn_policy_mh_out_touch(1) < 0) { MDBG("MH out policy touch failed: BA for " "%x:%x:%x:%x:%x:%x:%x:%x\n", NIP6ADDR(addrs->dst)); } MDBG("sending MH type %d\n" "from %x:%x:%x:%x:%x:%x:%x:%x\n" "to %x:%x:%x:%x:%x:%x:%x:%x\n", mh->ip6mh_type, NIP6ADDR(addrs->src), NIP6ADDR(addrs->dst)); if (addrs->local_coa) MDBG("local CoA %x:%x:%x:%x:%x:%x:%x:%x\n", NIP6ADDR(addrs->local_coa)); if (addrs->remote_coa) MDBG("remote CoA %x:%x:%x:%x:%x:%x:%x:%x\n", NIP6ADDR(addrs->remote_coa)); if (bind_key) { assert(iov_count > 1); struct ip6_mh_opt_auth_data *bauth; struct iovec *biov; struct in6_addr *cn = NULL; MDBG("Adding bind auth data\n"); if (mh->ip6mh_type == IP6_MH_TYPE_BU) cn = addrs->dst; else cn = addrs->src; assert(addrs->bind_coa != NULL && cn != NULL); biov = &iov[iov_count - 1]; bauth = (struct ip6_mh_opt_auth_data *)biov->iov_base; if (bauth->ip6moad_type == IP6_MHOPT_BAUTH) { size_t orig_len = biov->iov_len; MDBG("Adding auth_data\n"); memcpy(&lbad, bauth, sizeof(lbad)); /* temporarily set iov_len to option header * length for auth data calculation */ biov->iov_len -= MIPV6_DIGEST_LEN; biov->iov_base = &lbad; calculate_auth_data(iov, iov_count, addrs->bind_coa, cn, bind_key, lbad.ip6moad_data); biov->iov_len = orig_len; } } memset(&daddr, 0, sizeof(struct sockaddr_in6)); daddr.sin6_family = AF_INET6; daddr.sin6_addr = *addrs->dst; daddr.sin6_port = htons(IPPROTO_MH); memset(&pinfo, 0, sizeof(pinfo)); pinfo.ipi6_addr = *addrs->src; pinfo.ipi6_ifindex = oif; cmsglen = CMSG_SPACE(sizeof(pinfo)); if (addrs->remote_coa != NULL) { rthlen = inet6_rth_space(IPV6_RTHDR_TYPE_2, 1); if (!rthlen) { MDBG("inet6_rth_space error\n"); return -1; } cmsglen += CMSG_SPACE(rthlen); } cmsg = malloc(cmsglen); if (cmsg == NULL) { MDBG("malloc failed\n"); return -ENOMEM; } memset(cmsg, 0, cmsglen); memset(&msg, 0, sizeof(msg)); msg.msg_control = cmsg; msg.msg_controllen = cmsglen; msg.msg_iov = iov; msg.msg_iovlen = iov_count; msg.msg_name = (void *)&daddr; msg.msg_namelen = sizeof(daddr); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_len = CMSG_LEN(sizeof(pinfo)); cmsg->cmsg_level = IPPROTO_IPV6; cmsg->cmsg_type = IPV6_PKTINFO; memcpy(CMSG_DATA(cmsg), &pinfo, sizeof(pinfo)); if (addrs->remote_coa != NULL) { void *rthp; cmsg = CMSG_NXTHDR(&msg, cmsg); if (cmsg == NULL) { free(msg.msg_control); MDBG("internal error\n"); return -2; } cmsg->cmsg_len = CMSG_LEN(rthlen); cmsg->cmsg_level = IPPROTO_IPV6; cmsg->cmsg_type = IPV6_RTHDR; rthp = CMSG_DATA(cmsg); rthp = inet6_rth_init(rthp, rthlen, IPV6_RTHDR_TYPE_2, 1); if (rthp == NULL) { free(msg.msg_control); MDBG("inet6_rth_init error\n"); return -3; } inet6_rth_add(rthp, addrs->remote_coa); rthp = NULL; } pthread_mutex_lock(&mh_sock.send_mutex); setsockopt(mh_sock.fd, IPPROTO_IPV6, IPV6_PKTINFO, &on, sizeof(int)); ret = sendmsg(mh_sock.fd, &msg, 0); if (ret < 0) dbg("sendmsg: %s\n", strerror(errno)); pthread_mutex_unlock(&mh_sock.send_mutex); free(msg.msg_control); return ret; }
int mh_bu_parse(struct ip6_mh_binding_update *bu, ssize_t len, const struct in6_addr_bundle *in_addrs, struct in6_addr_bundle *out_addrs, struct mh_options *mh_opts, struct timespec *lifetime, uint8_t *key) { struct in6_addr *our_addr, *peer_addr, *remote_coa; struct ip6_mh_opt_altcoa *alt_coa; struct ip6_mh_opt_nonce_index *non_ind; struct ip6_mh_opt_auth_data *bauth; uint16_t bu_flags; int ret; MDBG("Binding Update Received\n"); if (len < sizeof(struct ip6_mh_binding_update) || mh_opt_parse(&bu->ip6mhbu_hdr, len, sizeof(struct ip6_mh_binding_update), mh_opts) < 0) return -1; peer_addr = in_addrs->src; if (!in6_is_addr_routable_unicast(peer_addr)) return -1; remote_coa = in_addrs->remote_coa; if (remote_coa && !IN6_ARE_ADDR_EQUAL(remote_coa, peer_addr)) out_addrs->remote_coa = remote_coa; else out_addrs->remote_coa = NULL; alt_coa = mh_opt(&bu->ip6mhbu_hdr, mh_opts, IP6_MHOPT_ALTCOA); if (alt_coa) out_addrs->bind_coa = &alt_coa->ip6moa_addr; else out_addrs->bind_coa = in_addrs->remote_coa; our_addr = in_addrs->dst; tsclear(*lifetime); if (out_addrs->bind_coa) { if (!in6_is_addr_routable_unicast(out_addrs->bind_coa)) return -1; if (!IN6_ARE_ADDR_EQUAL(out_addrs->bind_coa, peer_addr)) { /* check that there is no circular reference */ if (bce_exists(our_addr, out_addrs->bind_coa)) return -1; tssetsec(*lifetime, ntohs(bu->ip6mhbu_lifetime) << 2); } } /* Use Home address of MN for calculating BU and BA auth data * for deregs. */ if (!out_addrs->bind_coa) out_addrs->bind_coa = in_addrs->src; bu_flags = bu->ip6mhbu_flags; out_addrs->src = in_addrs->dst; out_addrs->dst = in_addrs->src; out_addrs->local_coa = NULL; non_ind = mh_opt(&bu->ip6mhbu_hdr, mh_opts, IP6_MHOPT_NONCEID); if (bu_flags & IP6_MH_BU_HOME) return non_ind ? -1 : 0; if (!non_ind) return -1; MDBG("src %x:%x:%x:%x:%x:%x:%x:%x\n", NIP6ADDR(peer_addr)); MDBG("coa %x:%x:%x:%x:%x:%x:%x:%x\n", NIP6ADDR(out_addrs->bind_coa)); if (tsisset(*lifetime)) ret = rr_cn_calc_Kbm(ntohs(non_ind->ip6moni_home_nonce), ntohs(non_ind->ip6moni_coa_nonce), peer_addr, out_addrs->bind_coa, key); else /* Only use home nonce and address for dereg. */ ret = rr_cn_calc_Kbm(ntohs(non_ind->ip6moni_home_nonce), 0, peer_addr, NULL, key); if (ret) return ret; bauth = mh_opt(&bu->ip6mhbu_hdr, mh_opts, IP6_MHOPT_BAUTH); if (!bauth) return -1; /* Authenticator is calculated with MH checksum set to 0 */ bu->ip6mhbu_hdr.ip6mh_cksum = 0; if (mh_verify_auth_data(bu, len, bauth, out_addrs->bind_coa, our_addr, key) < 0) return -1; return IP6_MH_BAS_ACCEPTED; }
int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned long new_size) #endif { unsigned long rest; struct qm_frag* n; struct qm_frag_end* end; rest=f->size-new_size; #ifdef MEM_FRAG_AVOIDANCE if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))|| (rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/ #else if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){ #endif f->size=new_size; /*split the fragment*/ end=FRAG_END(f); end->size=new_size; n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end)); n->size=rest-FRAG_OVERHEAD; FRAG_END(n)->size=n->size; FRAG_CLEAR_USED(n); /* never used */ qm->real_used+=FRAG_OVERHEAD; #ifdef DBG_QM_MALLOC end->check1=END_CHECK_PATTERN1; end->check2=END_CHECK_PATTERN2; /* frag created by malloc, mark it*/ n->file=file; n->func=func; n->line=line; n->check=ST_CHECK_PATTERN; #endif /* reinsert n in free list*/ qm_insert_free(qm, n); return 0; }else{ /* we cannot split this fragment any more */ return -1; } } #ifdef DBG_QM_MALLOC void* qm_malloc(struct qm_block* qm, unsigned long size, const char* file, const char* func, unsigned int line) #else void* qm_malloc(struct qm_block* qm, unsigned long size) #endif { struct qm_frag* f; int hash; #ifdef DBG_QM_MALLOC unsigned int list_cntr; list_cntr = 0; MDBG("qm_malloc(%p, %lu) called from %s: %s(%d)\n", qm, size, file, func, line); #endif /*size must be a multiple of 8*/ size=ROUNDUP(size); if (size>(qm->size-qm->real_used)) return 0; /*search for a suitable free frag*/ #ifdef DBG_QM_MALLOC if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){ #else if ((f=qm_find_free(qm, size, &hash))!=0){ #endif /* we found it!*/ /*detach it from the free list*/ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); #endif qm_detach_free(qm, f); /*mark it as "busy"*/ f->u.is_free=0; qm->free_hash[hash].no--; /* we ignore split return */ #ifdef DBG_QM_MALLOC split_frag(qm, f, size, file, "fragm. from qm_malloc", line); #else split_frag(qm, f, size); #endif qm->real_used+=f->size; qm->used+=f->size; if (qm->max_real_used<qm->real_used) qm->max_real_used=qm->real_used; #ifdef DBG_QM_MALLOC f->file=file; f->func=func; f->line=line; f->check=ST_CHECK_PATTERN; /* FRAG_END(f)->check1=END_CHECK_PATTERN1; FRAG_END(f)->check2=END_CHECK_PATTERN2;*/ MDBG("qm_malloc(%p, %lu) returns address %p frag. %p (size=%lu) on %d" " -th hit\n", qm, size, (char*)f+sizeof(struct qm_frag), f, f->size, list_cntr ); #endif return (char*)f+sizeof(struct qm_frag); } return 0; } #ifdef DBG_QM_MALLOC void qm_free(struct qm_block* qm, void* p, const char* file, const char* func, unsigned int line) #else void qm_free(struct qm_block* qm, void* p) #endif { struct qm_frag* f; struct qm_frag* prev; struct qm_frag* next; unsigned long size; #ifdef DBG_QM_MALLOC MDBG("qm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line); if (p>(void*)qm->last_frag_end || p<(void*)qm->first_frag){ LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - " "aborting\n", p); abort(); } #endif if (p==0) { LOG(L_WARN, "WARNING:qm_free: free(0) called\n"); return; } prev=next=0; f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag)); #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); if (f->u.is_free){ LOG(L_CRIT, "BUG: qm_free: freeing already freed pointer," " first free: %s: %s(%ld) - aborting\n", f->file, f->func, f->line); abort(); } MDBG("qm_free: freeing frag. %p alloc'ed from %s: %s(%ld)\n", f, f->file, f->func, f->line); #endif size=f->size; qm->used-=size; qm->real_used-=size; #ifdef QM_JOIN_FREE /* join packets if possible*/ next=FRAG_NEXT(f); if (((char*)next < (char*)qm->last_frag_end) &&( next->u.is_free)){ /* join */ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, next); #endif qm_detach_free(qm, next); size+=next->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */ } if (f > qm->first_frag){ prev=FRAG_PREV(f); /* (struct qm_frag*)((char*)f - (struct qm_frag_end*)((char*)f- sizeof(struct qm_frag_end))->size);*/ #ifdef DBG_QM_MALLOC qm_debug_frag(qm, prev); #endif if (prev->u.is_free){ /*join*/ qm_detach_free(qm, prev); size+=prev->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */ f=prev; } } f->size=size; FRAG_END(f)->size=f->size; #endif /* QM_JOIN_FREE*/ #ifdef DBG_QM_MALLOC f->file=file; f->func=func; f->line=line; #endif qm_insert_free(qm, f); } #ifdef DBG_QM_MALLOC void* qm_realloc(struct qm_block* qm, void* p, unsigned long size, const char* file, const char* func, unsigned int line) #else void* qm_realloc(struct qm_block* qm, void* p, unsigned long size) #endif { struct qm_frag* f; unsigned long diff; unsigned long orig_size; struct qm_frag* n; void* ptr; #ifdef DBG_QM_MALLOC MDBG("qm_realloc(%p, %p, %lu) called from %s: %s(%d)\n", qm, p, size, file, func, line); if ((p)&&(p>(void*)qm->last_frag_end || p<(void*)qm->first_frag)){ LOG(L_CRIT, "BUG: qm_free: bad pointer %p (out of memory block!) - " "aborting\n", p); abort(); } #endif if (size==0) { if (p) #ifdef DBG_QM_MALLOC qm_free(qm, p, file, func, line); #else qm_free(qm, p); #endif return 0; } if (p==0) #ifdef DBG_QM_MALLOC return qm_malloc(qm, size, file, func, line); #else return qm_malloc(qm, size); #endif f=(struct qm_frag*) ((char*)p-sizeof(struct qm_frag)); #ifdef DBG_QM_MALLOC qm_debug_frag(qm, f); MDBG("qm_realloc: realloc'ing frag %p alloc'ed from %s: %s(%ld)\n", f, f->file, f->func, f->line); if (f->u.is_free){ LOG(L_CRIT, "BUG:qm_realloc: trying to realloc an already freed " "pointer %p , fragment %p -- aborting\n", p, f); abort(); } #endif /* find first acceptable size */ size=ROUNDUP(size); if (f->size > size){ orig_size=f->size; /* shrink */ #ifdef DBG_QM_MALLOC MDBG("qm_realloc: shrinking from %lu to %lu\n", f->size, size); if(split_frag(qm, f, size, file, "fragm. from qm_realloc", line)!=0){ MDBG("qm_realloc : shrinked successful\n"); #else if(split_frag(qm, f, size)!=0){ #endif /* update used sizes: freed the spitted frag */ qm->real_used-=(orig_size-f->size-FRAG_OVERHEAD); qm->used-=(orig_size-f->size); } }else if (f->size < size){ /* grow */ #ifdef DBG_QM_MALLOC MDBG("qm_realloc: growing from %lu to %lu\n", f->size, size); #endif orig_size=f->size; diff=size-f->size; n=FRAG_NEXT(f); if (((char*)n < (char*)qm->last_frag_end) && (n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){ /* join */ qm_detach_free(qm, n); qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/ f->size+=n->size+FRAG_OVERHEAD; qm->real_used-=FRAG_OVERHEAD; FRAG_END(f)->size=f->size; /* end checks should be ok */ /* split it if necessary */ if (f->size > size ){ #ifdef DBG_QM_MALLOC split_frag(qm, f, size, file, "fragm. from qm_realloc", line); #else split_frag(qm, f, size); #endif } qm->real_used+=(f->size-orig_size); qm->used+=(f->size-orig_size); }else{ /* could not join => realloc */ #ifdef DBG_QM_MALLOC ptr=qm_malloc(qm, size, file, func, line); #else ptr=qm_malloc(qm, size); #endif if (ptr){ /* copy, need by libssl */ memcpy(ptr, p, orig_size); #ifdef DBG_QM_MALLOC qm_free(qm, p, file, func, line); #else qm_free(qm, p); #endif } p=ptr; } }else{ /* do nothing */ #ifdef DBG_QM_MALLOC MDBG("qm_realloc: doing nothing, same size: %lu - %lu\n", f->size, size); #endif } #ifdef DBG_QM_MALLOC MDBG("qm_realloc: returning %p\n", p); #endif return p; } void qm_status(struct qm_block* qm) { struct qm_frag* f; int i,j; int h; int unused; LOG(memlog, "qm_status (%p):\n", qm); if (!qm) return; LOG(memlog, " heap size= %lu\n", qm->size); LOG(memlog, " used= %lu, used+overhead=%lu, free=%lu\n", qm->used, qm->real_used, qm->size-qm->real_used); LOG(memlog, " max used (+overhead)= %lu\n", qm->max_real_used); LOG(memlog, "dumping all alloc'ed. fragments:\n"); for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f) ,i++){ if (! f->u.is_free){ LOG(memlog, " %3d. %c address=%p frag=%p size=%lu used=%d\n", i, (f->u.is_free)?'a':'N', (char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f)); #ifdef DBG_QM_MALLOC LOG(memlog, " %s from %s: %s(%ld)\n", (f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line); LOG(memlog, " start check=%lx, end check= %lx, %lx\n", f->check, FRAG_END(f)->check1, FRAG_END(f)->check2); #endif } } LOG(memlog, "dumping free list stats :\n"); for(h=0,i=0;h<QM_HASH_SIZE;h++){ unused=0; for (f=qm->free_hash[h].head.u.nxt_free,j=0; f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){ if (!FRAG_WAS_USED(f)){ unused++; #ifdef DBG_QM_MALLOC LOG(memlog, "unused fragm.: hash = %3d, fragment %p," " address %p size %lu, created from %s: %s(%lu)\n", h, f, (char*)f+sizeof(struct qm_frag), f->size, f->file, f->func, f->line); #endif } } if (j) LOG(memlog, "hash= %3d. fragments no.: %5d, unused: %5d\n" "\t\t bucket size: %9lu - %9ld (first %9lu)\n", h, j, unused, UN_HASH(h), ((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h), qm->free_hash[h].head.u.nxt_free->size ); if (j!=qm->free_hash[h].no){ LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu" " for hash %3d\n", j, qm->free_hash[h].no, h); } } LOG(memlog, "-----------------------------\n"); } /* fills a malloc info structure with info about the block * if a parameter is not supported, it will be filled with 0 */ void qm_info(struct qm_block* qm, struct mem_info* info) { int r; long total_frags; total_frags=0; memset(info,0, sizeof(*info)); info->total_size=qm->size; info->min_frag=MIN_FRAG_SIZE; info->free=qm->size-qm->real_used; info->used=qm->used; info->real_used=qm->real_used; info->max_used=qm->max_real_used; for(r=0;r<QM_HASH_SIZE; r++){ total_frags+=qm->free_hash[r].no; } info->total_frags=total_frags; } /* returns how much free memory is available * it never returns an error (unlike fm_available) */ unsigned long qm_available(struct qm_block* qm) { return qm->size-qm->real_used; } #ifdef DBG_QM_MALLOC typedef struct _mem_counter{ const char *file; const char *func; unsigned long line; unsigned long size; int count; struct _mem_counter *next; } mem_counter; mem_counter* get_mem_counter(mem_counter **root,struct qm_frag* f) { mem_counter *x; if (!*root) goto make_new; for(x=*root;x;x=x->next) if (x->file == f->file && x->func == f->func && x->line == f->line) return x; make_new: x = malloc(sizeof(mem_counter)); x->file = f->file; x->func = f->func; x->line = f->line; x->count = 0; x->size = 0; x->next = *root; *root = x; return x; } #include "../locking.h" #include "../pt.h" extern gen_lock_t* process_lock; extern struct process_table *pt; extern int process_no; void qm_sums(struct qm_block* qm) { struct qm_frag* f; int i; int total_count=0; long unsigned int total_size=0; int memlog=L_ERR; mem_counter *root=0,*x; //lock_get(process_lock); if (process_no!=0) LOG(memlog, "qm_sums (%p): PKG[%s]\n", qm,pt[process_no].desc); else LOG(memlog, "qm_sums (%p): PKG[0]/SHM \n",qm); if (!qm) { //lock_release(process_lock); return; } LOG(memlog, "summarizing all alloc'ed. fragments:\n"); for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f),i++){ if (! f->u.is_free){ x = get_mem_counter(&root,f); x->count++; x->size+=f->size; } } x = root; while(x){ LOG(memlog, " count=%6d size=%10lu bytes from %s: %s(%ld)\n", x->count,x->size, x->file, x->func, x->line ); total_count+=x->count;total_size+=x->size; root = x->next; free(x); x = root; } LOG(memlog, " count=%6d size=%10lu bytes in total\n",total_count,total_size); LOG(memlog, "-----------------------------\n"); //lock_release(process_lock); }