/* Note that numNameBytes includes the NUL byte! */ static MMessageField * AllocMMessageField(const char * fieldName, uint32 numNameBytes, uint32 typeCode, uint32 numItems, uint32 itemSize) { if (numItems > 0) { const uint32 dataSize = numItems*itemSize; const uint32 allocSize = sizeof(MMessageField)+dataSize+numNameBytes; MMessageField * ret = (MMessageField *) MMalloc(allocSize); if (ret) { ret->prevField = ret->nextField = NULL; ret->allocSize = allocSize; ret->nameBytes = numNameBytes; ret->typeCode = typeCode; ret->itemSize = itemSize; ret->numItems = numItems; ret->data = ((char *)ret) + sizeof(MMessageField); /* data bytes start immediately after the struct */ ret->name = ((char *)ret->data)+dataSize; /* name starts right after the data */ ret->isFixedSize = ret->isFlattenable = MTrue; memset(ret->data, 0, dataSize); memcpy((void *)ret->name, fieldName, numNameBytes); ((char *)ret->name)[ret->nameBytes-1] = '\0'; /* paranoia: guarantee that the field name is terminated */ } return ret; } else return NULL; /* we don't allow zero-item fields! */ }
CallRealmInfo * RealmInfoDup (CallRealmInfo *ri, int mallocfn) { char fn[] = "RealmInfoDup()"; CallRealmInfo *realmInfo = NULL; if (ri == NULL) { return NULL; } realmInfo = (CallRealmInfo *)MMalloc (mallocfn, sizeof (CallRealmInfo)); if (realmInfo == NULL) { NETERROR (MSIP, ("%s malloc failed for realminfo", fn)); return NULL; } memcpy(realmInfo, ri, sizeof(CallRealmInfo)); if (ri->sipdomain) { realmInfo->sipdomain = MStrdup(mallocfn, ri->sipdomain); } return realmInfo; }
MByteBuffer * MBAllocByteBuffer(uint32 numBytes, MBool clearBytes) { MByteBuffer * mbb = (MByteBuffer *) MMalloc((sizeof(MByteBuffer)+numBytes)-1); /* -1 since at least 1 data byte is in the struct */ if (mbb) { mbb->numBytes = numBytes; if (clearBytes) memset(&mbb->bytes, 0, numBytes); } return mbb; }
MMessage * MMAllocMessage(uint32 what) { MMessage * ret = (MMessage *) MMalloc(sizeof(MMessage)); if (ret) { ret->what = what; ret->numFields = 0; ret->firstField = ret->lastField = NULL; } return ret; }
void * MRealloc(void * oldBuf, uint32 newSize) { uint32 oldSize = oldBuf ? (*(((uint32*)oldBuf)-1)) : 0; if (newSize == oldSize) return oldBuf; else { void * newBuf = (newSize > 0) ? MMalloc(newSize) : NULL; if ((newSize > 0)&&(newBuf == NULL)) return NULL; // out-of-memory error! Avoid side effects if ((newBuf)&&(oldBuf)) memcpy(newBuf, oldBuf, (newSize<oldSize)?newSize:oldSize); if (oldBuf) MFree(oldBuf); return newBuf; } }
static MMessageField * CloneMMessageField(const MMessageField * cloneMe) { MMessageField * clone = (MMessageField *) MMalloc(cloneMe->allocSize); if (clone) { memcpy(clone, cloneMe, cloneMe->isFixedSize ? cloneMe->allocSize : sizeof(MMessageField)); clone->prevField = clone->nextField = NULL; clone->data = ((char *)clone) + (((char *)cloneMe->data)-((char *)cloneMe)); clone->name = ((const char *)clone) + (cloneMe->name-((const char *)cloneMe)); if (clone->isFixedSize == MFalse) { /* Oops, this field has alloced-pointer semantics, so we have to clone all the items too */ if (clone->typeCode == B_MESSAGE_TYPE) { MMessage ** dstArray = (MMessage **) clone->data; const MMessage ** srcArray = (const MMessage **) cloneMe->data; int32 i; for (i=cloneMe->numItems-1; i>=0; i--) { if (srcArray[i]) { if ((dstArray[i] = MMCloneMessage(srcArray[i])) == NULL) { /* Allocation failure! Roll back previous allocs and fail cleanly */ int32 j; for (j=cloneMe->numItems-1; j>i; j--) MMFreeMessage(dstArray[j]); MFree(clone); return NULL; } } else dstArray[i] = NULL; } } else { MByteBuffer ** dstArray = (MByteBuffer **) clone->data; const MByteBuffer ** srcArray = (const MByteBuffer **) cloneMe->data; int32 i; for (i=cloneMe->numItems-1; i>=0; i--) { if (srcArray[i]) { if ((dstArray[i] = MBCloneByteBuffer(srcArray[i])) == NULL) { /* Allocation failure! Roll back previous allocs and fail cleanly */ int32 j; for (j=cloneMe->numItems-1; j>i; j--) MBFreeByteBuffer(dstArray[j]); MFree(clone); return NULL; } } else dstArray[i] = NULL; } } /* copy the name too, since we didn't do it above */ memcpy((char *)clone->name, cloneMe->name, clone->nameBytes); } } return clone; }
PTR MM_dfl_alloc( MM_POOL *pool, i4 size, STATUS *stat, CL_ERR_DESC *err ) { MM_DFL_BLOCK *bigblock; i4 minsize; i4 i; alloced++; if( !(++requests % 100000) ) MM_dfl_stat( pool ); /* Round up to powers of 2, quartered */ /* This breaks all requests up into approx 30 * 4 buckets */ for( i = 2; ; i++ ) if( ( minsize = ( 5 << i ) ) >= size || ( minsize = ( 6 << i ) ) >= size || ( minsize = ( 7 << i ) ) >= size || ( minsize = ( 8 << i ) ) >= size ) break; /* Scan free list for appropriate chunk */ /* If we find one exactly at minsize, that's our winner, */ /* but we'll settle for any chunk smaller than bestsize. */ /* Otherwise, we leave the free list alone and go to the well. */ { MM_BLOCK *chunk = 0; MM_BLOCK *p, *pp = 0, *spp = 0; i4 bestsize = minsize * 32; for( p = pool->free_list; p && bestsize > minsize; pp = p, p = p->next ) if( p->size >= size && p->size < bestsize ) { spp = pp; chunk = p; bestsize = p->size; } if( chunk ) { if( spp ) spp->next = chunk->next; else pool->free_list = chunk->next; chunk->next = (MM_BLOCK *)(SCALARP)( chunk->size - size ); wastesiz += chunk->size - size; return (PTR)( chunk + 1 ); } } /* Check the last bigblock allocated for room at its tail. */ /* If the piece there is large enough, we'll cut our chunk */ /* and leave the rest behind. If the piece is not big enough, */ /* well make it into a chunk and put it on the free chain. We */ /* have to do this because we can only cut new chunks from the */ /* tail of the last allocated bigblock, and we're about to */ /* allocate a new bigblock. */ bigblock = (MM_DFL_BLOCK *)pool->alloc_list; if( bigblock ) { MM_BLOCK *chunk; i4 chunksize; chunk = (MM_BLOCK *)((char *)bigblock + bigblock->used ); chunksize = bigblock->hdr.size - bigblock->used - sizeof( *chunk ); /* If the remaining room is big enough, cut us off a piece. */ /* If not, put it on the free list since this bigblock is no */ /* longer going to be accessible at the head of alloc_list. */ if( minsize <= chunksize ) { bigblock->used += minsize + sizeof( MM_BLOCK ); chunk->size = minsize; chunk->next = 0; return (PTR)( chunk + 1 ); } else if( chunksize > 0 ) { chunk->size = chunksize; chunk->next = 0; bigblock->used += chunksize + sizeof( MM_BLOCK ); MM_dfl_free( pool, (PTR)( chunk + 1 ), err ); } } /* Beggar - must get more memory */ /* Allocate a new bigblock (of at least the size we need) and */ /* cut our chunk from it. */ { MM_BLOCK *chunk; i4 bigsize; /* Allocate a large block from the parent. */ /* Sizing steps are: */ /* - requested size plus our overhead */ /* - moved up to requested expand_size */ /* - rounded up to optimum size for parent */ bigsize = size + sizeof( MM_DFL_BLOCK ); bigsize = max( bigsize, pool->expand_size ); bigsize = MM_ROUND_SIZE( bigsize, pool->parent->service_size, pool->parent->service_cost ); bigblock = (MM_DFL_BLOCK *) MMalloc( pool->parent, bigsize, stat, err ); if( !bigblock ) return 0; bigblock->used = sizeof( MM_DFL_BLOCK ); bigblock->hdr.size = bigsize; bigblock->hdr.next = pool->alloc_list; pool->alloc_list = &bigblock->hdr; chunk = (MM_BLOCK *)((char *)bigblock + bigblock->used ); bigblock->used += minsize + sizeof( MM_BLOCK ); chunk->size = minsize; chunk->next = 0; return (PTR)( chunk + 1 ); } }