/** * Attempt to allocate an object in this TLH. */ void * MM_TLHAllocationSupport::allocateFromTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocDescription, bool shouldCollectOnFailure) { void *memPtr = NULL; Assert_MM_true(!env->getExtensions()->isSegregatedHeap()); uintptr_t sizeInBytesRequired = allocDescription->getContiguousBytes(); /* If there's insufficient space, refresh the current TLH */ if (sizeInBytesRequired > getSize()) { refresh(env, allocDescription, shouldCollectOnFailure); } /* Try to fit the allocate into the current TLH */ if(sizeInBytesRequired <= getSize()) { memPtr = (void *)getAlloc(); setAlloc((void *)((uintptr_t)getAlloc() + sizeInBytesRequired)); #if defined(OMR_GC_TLH_PREFETCH_FTA) if (*_pointerToTlhPrefetchFTA < (intptr_t)sizeInBytesRequired) { *_pointerToTlhPrefetchFTA = 0; } else { *_pointerToTlhPrefetchFTA -= (intptr_t)sizeInBytesRequired; } #endif /* OMR_GC_TLH_PREFETCH_FTA */ allocDescription->setObjectFlags(getObjectFlags()); allocDescription->setMemorySubSpace((MM_MemorySubSpace *)_tlh->memorySubSpace); allocDescription->completedFromTlh(); } return memPtr; };
static void *coalesce(void *bp) { unsigned int prev_allocfield = getAlloc(getBlockFooter(getPrevBlock(bp))); unsigned int next_allocfield = getAlloc(getBlockHeader(getNextBlock(bp))); unsigned int size = getSize(getBlockHeader(bp)); if(prev_allocfield && next_allocfield) return bp; if(prev_allocfield && !next_allocfield) { size += getSize(getBlockHeader(getNextBlock(bp))); put(getBlockHeader(bp),pack(size,0)); put(getBlockFooter(bp),pack(size,0)); } else if(!prev_allocfield && next_allocfield) { size += getSize(getBlockHeader(getPrevBlock(bp))); put(getBlockHeader(getPrevBlock(bp)),pack(size,0)); put(getBlockFooter(bp),pack(size,0)); } else { size += getSize(getBlockHeader(getPrevBlock(bp))) + getSize(getBlockHeader(getNextBlock(bp))); put(getBlockHeader(getPrevBlock(bp)),pack(size,0)); put(getBlockFooter(getNextBlock(bp)),pack(size,0)); bp = getPrevBlock(bp); } return bp; }
/* coalesce free blocks */ static void *coalesce(void *bp){ size_t prevAllocBit = getPrevAlloc(getHeader(bp)); size_t nextAllocBit = getAlloc(getHeader(nextBlock(bp))); size_t size = getSize(getHeader(bp)); size_t index; size_t prevPAlloc; /* both the previous and next block allocated */ if (prevAllocBit && nextAllocBit) { setPrevFree(getHeader(nextBlock(bp))); } /* Only previous block is free */ else if (!prevAllocBit && nextAllocBit) { size += getSize(getHeader(prevBlock(bp))); prevPAlloc = getPrevAlloc(getHeader(prevBlock(bp))); deleteListNode(prevBlock(bp)); put(getFooter(bp), size); bp = prevBlock(bp); put(getHeader(bp), pack(size, prevPAlloc, 0)); setPrevFree(getHeader(nextBlock(bp))); } /* Only next block is free */ else if (prevAllocBit && !nextAllocBit) { size += getSize(getHeader(nextBlock(bp))); deleteListNode(nextBlock(bp)); put(getHeader(bp), pack(size, prevAllocBit, 0)); put(getFooter(bp), size); } /* Both previous and next block are free */ else { size += getSize(getHeader(prevBlock(bp))) + getSize(getFooter(nextBlock(bp))); prevPAlloc = getPrevAlloc(getHeader(prevBlock(bp))); deleteListNode(nextBlock(bp)); deleteListNode(prevBlock(bp)); put(getHeader(prevBlock(bp)), pack(size, prevPAlloc, 0)); put(getFooter(nextBlock(bp)), size); bp = prevBlock(bp); } index = getIndex(size); insertListNode(bp, index); return bp; }
static void *find_first_fit(unsigned int size) { unsigned int blocksize; char *bp; if(size <= 0) return NULL; bp = heap_listp; blocksize = getSize(getBlockHeader(bp)); while(blocksize) { if(blocksize >= size && !getAlloc(getBlockHeader(bp))) return bp; bp = getNextBlock(bp); blocksize = getSize(bp); } return NULL; }
void mprof::CompactnessAnalysis::build( const struct MprofRecordAlloc * in_record, std::vector<size_t>::const_iterator in_orderBegin, std::vector<size_t>::const_iterator in_orderEnd ) { std::list<size_t> retryList; size_t lastIndex = *in_orderBegin; for( std::vector<size_t>::const_iterator orderItr = in_orderBegin; orderItr != in_orderEnd; ++orderItr ) { if( addRecord( in_record, *orderItr ) ) { lastIndex = *orderItr; //keep retrying--FIXME: handle realloc better( damn you realloc ) bool progress; do { progress = false; for( std::list<size_t>::iterator retryItr = retryList.begin(); retryItr != retryList.end(); ) { if( addRecord( in_record, *retryItr ) ) { if( olderThan( in_record + *retryItr, in_record + lastIndex ) ) { std::cerr << "Info: mprof::CompactnessAnalysis::build: detected scheduler/timestamp inversion at index '" << *orderItr << "' of " << deltaT( in_record + *retryItr, in_record + lastIndex ) << " microseconds" << std::endl; } lastIndex = *retryItr; retryItr = retryList.erase( retryItr ); progress = true; } else { ++retryItr; } } } while( progress ); } else { if( in_record[ *orderItr ].header.mode == MPROF_MODE_REALLOC ) { //std::cerr << "Info: mprof::CompactnessAnalysis::build: realloc record at index '" << *orderItr << "' may have been paritally processed." << std::endl; } retryList.push_back( *orderItr ); } } for( std::list<size_t>::iterator retryItr = retryList.begin(); retryItr != retryList.end(); retryItr = retryList.erase( retryItr ) ) { size_t address, size; if( getAlloc( in_record + *retryItr, address, size ) ) { std::cerr << "Warning: mprof::CompactnessAnalysis::build: found double alloc at index '" << *retryItr << "' for address '" << std::hex << address << std::dec << "'" << std::endl; } if( getFree( in_record + *retryItr, address ) ) { std::cerr << "Warning: mprof::CompactnessAnalysis::build: found unmatched free at index '" << *retryItr << "' for address '" << std::hex << address << std::dec << "'" << std::endl; } } }
/* Find a segregate fit. If there is no a fit in small free list, search a larger free list */ static void *findFit(size_t asize){ size_t index = getIndex(asize); char *firstList = heap_listp + DSIZE; char *lastList = firstList + (LISTNUM-1) * DSIZE; void *list, *ptr; for (list = firstList + index * DSIZE; list != (char *)lastList + DSIZE; list = (char *)list + DSIZE){ for (ptr = nextFree(list); ptr != list; ptr = nextFree(ptr)){ if (!getAlloc(getHeader(ptr)) && (getSize(getHeader(ptr)) >= asize)){ return ptr; } } } return NULL; }
void MM_TLHAllocationSupport::updateFrequentObjectsStats(MM_EnvironmentBase *env) { MM_GCExtensionsBase *extensions = env->getExtensions(); MM_FrequentObjectsStats* frequentObjectsStats = _objectAllocationInterface->getFrequentObjectsStats(); if(NULL != frequentObjectsStats){ /* presumably first and current alloc pointer will point to an object */ GC_ObjectHeapIteratorAddressOrderedList objectHeapIterator(extensions, (omrobjectptr_t) getBase(), (omrobjectptr_t) getAlloc(), false, false); omrobjectptr_t object = NULL; uintptr_t limit = (((uintptr_t) getAlloc() - (uintptr_t) getBase())*extensions->frequentObjectAllocationSamplingRate)/100 + (uintptr_t) getBase(); while(NULL != (object = objectHeapIterator.nextObject())){ if( ((uintptr_t) object) > limit){ break; } frequentObjectsStats->update(env, object); } } }
/* checkheap: Returns 0 if no errors were found, * otherwise returns the error */ int mm_checkheap(int verbose) { char *firstList = heap_listp + DSIZE; char *lastList = firstList + (LISTNUM-1) * DSIZE; char *curBlock = heap_listp + DSIZE; size_t size = getSize(getHeader(curBlock)); int freeCountHeap = 0; int freeCountList = 0; char *nextFreePtr; char *list; char *ptr; int s,a; size_t prevAlloc; verbose = verbose; /* check alignment padding*/ if (get(mem_heap_lo()) != 0) { printf("Alignment padding error!\n"); } /* check prologue */ if (!getAlloc(getHeader(heap_listp+DSIZE))) { printf("Prologue error!\n"); } /* Check the heap */ while (size != 0){ // Check whether the block is in heap if (!inHeap(curBlock)){ dbg_printf("Block %p is not in the heap\n", curBlock); } // Check each blk's address alignment if (!aligned(curBlock)) { dbg_printf("Block %p is not aligned.\n", curBlock); exit(0); } if (!getAlloc(getHeader(curBlock))){ freeCountHeap ++; // Check each blk's header and footer s = (getSize(getHeader(curBlock)) == getSize(getFooter(curBlock))); a = (getAlloc(getHeader(curBlock)) == getAlloc(getFooter(curBlock))); if (!s || !a) { dbg_printf("Header&Footer does not match in %p\n", curBlock); } // Check coalescing prevAlloc = getPrevAlloc(getHeader(curBlock)); if (!prevAlloc){ dbg_printf("Coalescing error! \n"); } } curBlock = nextBlock(curBlock); size = getSize(getHeader(curBlock)); } /* Check the free list */ for (list = firstList; list != (char *)lastList + DSIZE; list = (char *)list + DSIZE){ for (ptr = nextFree(list); ptr != list; ptr = nextFree(ptr)){ // Check the free list node is in heap if (!inHeap(ptr)){ dbg_printf("list node %p is not in the heap\n", ptr); } freeCountList ++; nextFreePtr = nextFree(ptr); // Check all next/prev pointers are consistent if (prevFree(nextFreePtr) != (ptr)){ dbg_printf("next/prev pointers is not consistent!\n"); } } } // Check free blocks by iterating thru every blk and traversing // free list by pointers and see if they match. if (freeCountHeap != freeCountList){ dbg_printf("Free block count does not match!\n"); } return 0; }
/** * Report clearing of a full allocation cache */ void MM_TLHAllocationSupport::reportClearCache(MM_EnvironmentBase *env) { MM_GCExtensionsBase *extensions = env->getExtensions(); MM_MemorySubSpace *subspace = env->getMemorySpace()->getDefaultMemorySubSpace(); TRIGGER_J9HOOK_MM_PRIVATE_CACHE_CLEARED(extensions->privateHookInterface, _omrVMThread, subspace, getBase(), getAlloc(), getTop()); };
bool mprof::CompactnessAnalysis::addRecord( const struct MprofRecordAlloc * in_record, const size_t in_index ) { const struct MprofRecordAlloc * const record = in_record + in_index; uint64_t size; uint64_t address; std::map< uint64_t, std::list< std::pair<uint64_t, uint64_t> > >::iterator mapItr; bool ret = false; bool freed = false; do { if( getFree( record, address ) && address ) { mapItr = allocationMap.find( address ); //detect out-of-order record if( mapItr == allocationMap.end() || mapItr->second.empty() || mapItr->second.back().second != nullIndex ) { //std::cerr << "delaying free @ " << std::hex << address << std::dec << " index " << in_index << std::endl; break; } else { mapItr->second.back().second = in_index; getAlloc( in_record + mapItr->second.back().first, address, size ); //adjust client size clientSize -= size; //mark vmMap refPages( address, size, false ); freed = true; } } if( getAlloc( record, address, size ) && address ) { mapItr = allocationMap.find( address ); //detect out-of-order record if( mapItr == allocationMap.end() ) { allocationMap[ address ] = std::list< std::pair<uint64_t, uint64_t> >(); mapItr = allocationMap.find( address ); } else if( mapItr->second.back().second == nullIndex ) { //std::cerr << "delaying alloc @ " << std::hex << address << std::dec << " index " << in_index << std::endl; break; } mapItr->second.push_back( std::pair<uint64_t, uint64_t>( in_index, nullIndex ) ); //adjust client size clientSize += size; if( clientSize > clientHighWaterMark ) { clientHighWaterMark = clientSize; } //mark vmMap refPages( address, size, true ); } ret = true; } while( false ); if( ret ) { setCompactness( clientSize ); } else if( freed ) { //std::cerr << "correcting realloc " << in_index << std::endl; getFree( record, address ); mapItr = allocationMap.find( address ); mapItr->second.back().second = nullIndex; getAlloc( in_record + mapItr->second.back().first, address, size ); //adjust client size clientSize += size; //mark vmMap refPages( address, size, true ); } return ret; }