Beispiel #1
0
static void* find_fit(size_t size) //find best place
{
#ifdef __DEBUG__
	fprintf(stderr, "find fitting place - size : %d\n", size);
#endif
	void* bp;
	void* s;
	size_t sizeIndex = size;
	for(s = getSegBySize(size); ; s = getSegBySize(sizeIndex))  //increase seg size
	{
		sizeIndex = getNextSize(sizeIndex);
		for(bp = s; ; bp = getNextNode(bp)) //iterate list
		{
			if(bp==NULL) break;
#ifdef __DEBUG__
			fprintf(stderr, "searching : %u / allocated? : %u / size? : %u\n", getBlockHeader(bp), isAllocated(getBlockHeader(bp)), getSize(getBlockHeader(bp)));
#endif
			if(!isAllocated(getBlockHeader(bp)) && size <= getSize(getBlockHeader(bp)))
			{
				return bp;
			}
			if(isTail(bp)) break;
		}
		if(s==seg_inf) break;
	}
	return NULL;
}
void mm_free(void *bp)
{
	unsigned int size = getSize(getBlockHeader(bp));
	
	put(getBlockHeader(bp),pack(size,0));
	put(getBlockFooter(bp),pack(size,0));
	coalesce(bp);
}
static void *coalesce(void *bp)
{
	unsigned int prev_allocfield = getAlloc(getBlockFooter(getPrevBlock(bp)));
	unsigned int next_allocfield = getAlloc(getBlockHeader(getNextBlock(bp)));
	unsigned int size = getSize(getBlockHeader(bp));

	if(prev_allocfield && next_allocfield)
		return bp;
	
	if(prev_allocfield && !next_allocfield)
	{
		size += getSize(getBlockHeader(getNextBlock(bp)));
		put(getBlockHeader(bp),pack(size,0));
		put(getBlockFooter(bp),pack(size,0));
	}
	else if(!prev_allocfield && next_allocfield)
	{
		size += getSize(getBlockHeader(getPrevBlock(bp)));
		put(getBlockHeader(getPrevBlock(bp)),pack(size,0));	
		put(getBlockFooter(bp),pack(size,0));

	}
	else 
	{
		size += getSize(getBlockHeader(getPrevBlock(bp))) + getSize(getBlockHeader(getNextBlock(bp)));
		put(getBlockHeader(getPrevBlock(bp)),pack(size,0));
		put(getBlockFooter(getNextBlock(bp)),pack(size,0));
		bp = getPrevBlock(bp);
	}
	return bp;
}
Beispiel #4
0
inline void *place(void *bp, size_t aSize) //alloc!
{
#ifdef __DEBUG__
	printf("placing on %u, size %u\n",getBlockHeader(bp), aSize);
#endif
	erase(getSize(getBlockHeader(bp)), bp); //no empty
	size_t cSize = getSize(getBlockHeader(bp));
	if((cSize - aSize) >= 2*DSIZE)
	{
		if(aSize>=100) { //huge block to right
			set(getBlockHeader(bp), setMask(cSize - aSize, 0));
			set(getBlockFooter(bp), setMask(cSize - aSize, 0));
			bp = getNextBlock(bp);
			set(getBlockHeader(bp), setMask(aSize, 1));
			set(getBlockFooter(bp), setMask(aSize, 1));
			push_back(cSize-aSize, getPrevBlock(bp));
			return bp;
		} else { //tiny block to left
			set(getBlockHeader(bp), setMask(aSize, 1));
			set(getBlockFooter(bp), setMask(aSize, 1));
			bp = getNextBlock(bp);
			set(getBlockHeader(bp), setMask(cSize - aSize, 0));
			set(getBlockFooter(bp), setMask(cSize - aSize, 0));
			push_back(cSize-aSize, bp);
			return getPrevBlock(bp);
		}
	}
	else
	{
		set(getBlockHeader(bp), setMask(cSize, 1));
		set(getBlockFooter(bp), setMask(cSize, 1));
	}
	return bp;
}
Beispiel #5
0
/*
 * mm_free - Freeing a block does nothing.
 */
void mm_free(void *ptr)
{
#ifdef __DEBUG__
	printf("Trying to free...\n");
#endif
	if(!isAllocated(getBlockHeader(ptr))) return; 
#ifdef __DEBUG__
	fprintf(stderr, "freeing : %u / allocated? : %u / size? : %u\n",getBlockHeader(ptr), isAllocated(getBlockHeader(ptr)), getSize(getBlockHeader(ptr)));
#endif
	size_t size = getSize(getBlockHeader(ptr));
	set(getBlockHeader(ptr), setMask(size, 0));
	set(getBlockFooter(ptr), setMask(size, 0));
	push_back(size, ptr); //new empty block,
	coalesce(ptr); //and coalesce!
}
static void *extend_heap(unsigned int words)
{
	char *bp;
	unsigned int size;

	/* allocate an even number of words to maintain alignment */
	size = (words % 2)?(words+1)*wsize:words*wsize;
	if((bp = (char*)mem_sbrk(size)) == (void*)-1) /* mem_sbrk return the old mem_brk */
		return NULL;
	/* inintialize free block header/footer and epilogue header*/
	put(getBlockHeader(bp),pack(size,0));
	put(getBlockFooter(bp),pack(size,0));
	put(getBlockHeader(getNextBlock(bp)),pack(0,1)); /* ??? */
	return coalesce(bp); 
}
/*
 * "Reallocate" a piece of memory.
 *
 * If the new size is <= the old size, we return the original pointer
 * without doing anything.
 *
 * If the new size is > the old size, we allocate new storage, copy the
 * old stuff over, and mark the new stuff as free.
 */
void* dvmLinearRealloc(Object* classLoader, void* mem, size_t newSize)
{
#ifdef DISABLE_LINEAR_ALLOC
    return realloc(mem, newSize);
#endif
    LinearAllocHdr* pHdr = getHeader(classLoader);

    /* make sure we have the right region (and mem != NULL) */
    assert(mem != NULL);
    assert(mem >= (void*) pHdr->mapAddr &&
           mem < (void*) (pHdr->mapAddr + pHdr->curOffset));

    const u4* pLen = getBlockHeader(mem);
    LOGV("--- LinearRealloc(%d) old=%d\n", newSize, *pLen);

    /* handle size reduction case */
    if (*pLen >= newSize) {
        if (ENFORCE_READ_ONLY)
            dvmLinearSetReadWrite(classLoader, mem);
        return mem;
    }

    void* newMem;

    newMem = dvmLinearAlloc(classLoader, newSize);
    assert(newMem != NULL);
    memcpy(newMem, mem, *pLen);
    dvmLinearFree(classLoader, mem);

    return newMem;
}
Beispiel #8
0
static void *extend_heap(size_t words) //extend heap
{
	char *bp;
	size_t size;
	size = (words % 2) ? (words + 1)*WSIZE : words * WSIZE; 
#ifdef __DEBUG__
	fprintf(stderr, "Extending heap by %u...\n",size);
	printf("Heap size now : %u\n",mem_heapsize());
#endif
	if((long)(bp = mem_sbrk(size)) == -1) return NULL; //sbrk : gathering moar spaces
	set(getBlockHeader(bp), setMask(size, 0));
	set(getBlockFooter(bp), setMask(size, 0));
	set(getBlockHeader(getNextBlock(bp)), setMask(0, 1));
	push_back(size, bp); //empty block to the list
	void* result = coalesce(bp); //coalesces!
//	push_back(getSize(getBlockHeader(result)), result);
	return result;
}
static void *find_first_fit(unsigned int size)
{
	unsigned int blocksize;
	char *bp;

	if(size <= 0)
		return NULL;

	bp = heap_listp;
	blocksize = getSize(getBlockHeader(bp));
	while(blocksize)
	{
		if(blocksize >= size && !getAlloc(getBlockHeader(bp)))
			return bp;
		bp = getNextBlock(bp);
		blocksize = getSize(bp);
	}
	return NULL;
}
static void place(void *bp, unsigned int size)
{
	char *spiltblock_addr;
	unsigned int spiltblock_size;
	unsigned int blocksize;

	blocksize = getSize(getBlockHeader(bp));
	/* if the block size == size,then change the block header/footer alloc field simply*/
	/* the block size > size,spilt the old block */
	if(blocksize > size)
	{
		/* inintial the new spilted block(last) header and footer */
		spiltblock_addr = (char*)bp + size;
		spiltblock_size = blocksize - size;
		put(getBlockHeader(spiltblock_addr),pack(spiltblock_size,0));
		put(getBlockFooter(spiltblock_addr),pack(spiltblock_size,0));
	}
	/* inintial the new block header and footer */
	put(getBlockHeader(bp),pack(size,1));
	put(getBlockFooter(bp),pack(size,1));
}
Beispiel #11
0
/* 
 * mm_malloc - Allocate a block by incrementing the brk pointer.
 *     Always allocate a block whose size is a multiple of the alignment.
 */
void *mm_malloc(size_t size)
{
#ifdef __DEBUG__
	printf("Trying to allocate...\n");
#endif
	size_t adjSize;
	size_t extSize;
	void* bp;
	if(size == 0) return NULL;
	if(size<=DSIZE) adjSize = 3*DSIZE;
	else adjSize = DSIZE * ((size + 2*(DSIZE) + (DSIZE)-1) / DSIZE); //adjust size to align
	if((bp = find_fit(adjSize)) != NULL) { //is there any good place?
#ifdef __DEBUG__
		fprintf(stderr, "alloc ty 1 : %u\n",getBlockHeader(bp));
#endif
		return place(bp, adjSize);
	}
	extSize = MAX(adjSize, CHUNKSIZE); //f****d, extend heap and gather moar spaces
	if((bp = extend_heap(extSize/WSIZE)) == NULL) return NULL;
#ifdef __DEBUG__
	fprintf(stderr, "alloc ty 2 : %u\n",getBlockHeader(bp));
#endif
	return place(bp, adjSize);
}
Beispiel #12
0
/*
 * mm_realloc - Implemented simply in terms of mm_malloc and mm_free
 */
void *mm_realloc(void *ptr, size_t size)
{
#ifdef __DEBUG__
	printf("Trying to realloc...\n");
#endif
    void *oldptr = ptr;
	void *newptr;
	size_t copySize;
	newptr = mm_malloc(size); //alloc new mem
	if(size==0) {
		mm_free(oldptr);
		return 0;
	}
	if(ptr == NULL) {
		return mm_malloc(size);
	}
	copySize = getSize(getBlockHeader(ptr));
	if(size<copySize) copySize = size;
	memcpy(newptr, oldptr, copySize); //copy
	mm_free(oldptr); //free old mem
	return newptr;
}
Beispiel #13
0
/*
 * Mark an allocation as free.
 */
void dvmLinearFree(Object *classLoader, void *mem) {
#ifdef DISABLE_LINEAR_ALLOC
    free(mem);
    return;
#endif
    if (mem == NULL)
        return;

    /* make sure we have the right region */
    assert(mem >= (void *) getHeader(classLoader)->mapAddr &&
           mem < (void *) (getHeader(classLoader)->mapAddr +
                           getHeader(classLoader)->curOffset));

    if (ENFORCE_READ_ONLY)
        dvmLinearSetReadWrite(classLoader, mem);

    u4 *pLen = getBlockHeader(mem);
    *pLen |= LENGTHFLAG_FREE;

    if (ENFORCE_READ_ONLY)
        dvmLinearSetReadOnly(classLoader, mem);
}
Beispiel #14
0
void* half_alloc( U32 bytes ){
	
	block_t block;
	block_t newblock;
	U8 bucket;
	U8 newbucket;
	U8 i;
	U16 v_addr;
	
	// Request 4 more bytes for the header part
	bytes += 4;
	
	// Find a block in the smallest possible non-empty bucket
	i = bucketIndex( (bytes - 1) >> 5 ) + 1;	// Calculate smallest bucket that could fit
	i += __clz(__rbit(bucketOccupancy >> i));	// Get next non-empty bucket
	
	// Stop if we don't have blocks large enough anymore :(
	if (i > 10) {
		return NULL;
	}
	
	// Store bucket info safely
	bucket = i;
	v_addr = ramses[i];
	
	// Get block header info
	getBlockHeader(&block, v_addr);
	
	// Remove block from bucket linked list
	if (block.v_addr == block.next_buck) {
		// Current block is alone in bucket
		
		clearOccupancy(bucket);
		
	} else {
		// Current block is not alone in bucket
		
		// block.next.prev := block.prev		(with _buck)
		setPrevBuck(block.next_buck, block.prev_buck);
		
		// block.prev.next := block.next		(with _buck)
		setNextBuck(block.prev_buck, block.next_buck);
		
		// Change linked list buckethead (http://www.youtube.com/watch?v=lkeXE6FOf6s)
		ramses[bucket] = block.next_buck;
	}
	
	// Mark block as allocated
	memory[block.v_addr*BLOCK_SIZE] |= 0x0002;
	
	
	if ((block.size*BLOCK_BYTES - bytes >= 32)) {
		// Block needs to be split into 2 blocks
		// (block will be returned, newblock stays in free memory)
	
		// Set new block size (block.size - ceil(bytes/BLOCK_BYTES))
		newblock.size = block.size - ((bytes-1)/BLOCK_BYTES + 1);
		
		// Reduce block size
		block.size -= newblock.size;
		
		// Locate new block
		newblock.v_addr = block.v_addr + block.size;
		
		
		// Put new block into memory linked list
		newblock.prev_mem = block.v_addr		;
		newblock.next_mem = block.next_mem 	;
		
		// block.next.prev := newblock.v_addr			(with _mem)
		setPrevMem(block.next_mem, newblock.v_addr);
		
		block.next_mem 		= newblock.v_addr	;
		
		// Save new next_mem & size to current block
		// (Do not save block.prev_mem! Could overwrite critical data.)
		memory[block.v_addr*BLOCK_SIZE] = memory[block.v_addr*BLOCK_SIZE]
			& ~(LAST10 << 12 | LAST10 << 2)
			| (block.next_mem << 12) | ((block.size-1) << 2);
		
		
		// Find bucket number for new block
		newbucket = bucketIndex(newblock.size);
		
		// If applicable, append new block to existing head of bucket
		if (isOccupied(newbucket)) {
			// newblock.next := ramses[newbucket].next					(with _buck)
			newblock.next_buck = getNextBuck(ramses[newbucket]);
			
			// newblock.prev := ramses[newbucket].v_addr				(with _buck)
			newblock.prev_buck = ramses[newbucket];
			
			// ramses[newbucket].next.prev := newblock.v_addr		(with _buck)
			setPrevBuck(getNextBuck(ramses[newbucket]), newblock.v_addr);
			
			// ramses[newbucket].next := newblock.v_addr				(with _buck)
			setNextBuck(ramses[newbucket], newblock.v_addr);
			
		} else {
			// Link new block to itself
			newblock.next_buck = newblock.prev_buck = newblock.v_addr;
			
			// Set new block as head of its bucket
			ramses[newbucket] = newblock.v_addr;
			setOccupancy(newbucket);
		}
		
		// Save new block & set as unallocated
		memory[newblock.v_addr*BLOCK_SIZE] = (newblock.prev_mem << 22) | (newblock.next_mem << 12) | ((newblock.size-1) << 2);
		memory[newblock.v_addr*BLOCK_SIZE+1] = (newblock.prev_buck << 22) | (newblock.next_buck << 12);
	}
	
	// Return pointer to current block
	return (void*)(memory + block.v_addr*BLOCK_SIZE + 1);
		
}
/*
 * Update the read/write status of one or more pages.
 */
static void updatePages(Object* classLoader, void* mem, int direction)
{
    LinearAllocHdr* pHdr = getHeader(classLoader);
    dvmLockMutex(&pHdr->lock);

    /* make sure we have the right region */
    assert(mem >= (void*) pHdr->mapAddr &&
           mem < (void*) (pHdr->mapAddr + pHdr->curOffset));

    u4* pLen = getBlockHeader(mem);
    u4 len = *pLen & LENGTHFLAG_MASK;
    int firstPage, lastPage;

    firstPage = ((u1*)pLen - (u1*)pHdr->mapAddr) / PAGESIZE;
    lastPage = ((u1*)mem - (u1*)pHdr->mapAddr + (len-1)) / PAGESIZE;
    LOGVV("--- updating pages %d-%d (%d)\n", firstPage, lastPage, direction);

    int i, cc;

    /*
     * Update individual pages.  We could do some sort of "lazy update" to
     * combine mprotect calls, but that's almost certainly more trouble
     * than it's worth.
     */
    for (i = firstPage; i <= lastPage; i++) {
        if (direction < 0) {
            /*
             * Trying to mark read-only.
             */
            if (i == firstPage) {
                if ((*pLen & LENGTHFLAG_RW) == 0) {
                    LOGW("Double RO on %p\n", mem);
                    dvmAbort();
                } else
                    *pLen &= ~LENGTHFLAG_RW;
            }

            if (pHdr->writeRefCount[i] == 0) {
                LOGE("Can't make page %d any less writable\n", i);
                dvmAbort();
            }
            pHdr->writeRefCount[i]--;
            if (pHdr->writeRefCount[i] == 0) {
                LOGVV("---  prot page %d RO\n", i);
                cc = mprotect(pHdr->mapAddr + PAGESIZE * i, PAGESIZE, PROT_READ);
                assert(cc == 0);
            }
        } else {
            /*
             * Trying to mark writable.
             */
            if (pHdr->writeRefCount[i] >= 32767) {
                LOGE("Can't make page %d any more writable\n", i);
                dvmAbort();
            }
            if (pHdr->writeRefCount[i] == 0) {
                LOGVV("---  prot page %d RW\n", i);
                cc = mprotect(pHdr->mapAddr + PAGESIZE * i, PAGESIZE,
                        PROT_READ | PROT_WRITE);
                assert(cc == 0);
            }
            pHdr->writeRefCount[i]++;

            if (i == firstPage) {
                if ((*pLen & LENGTHFLAG_RW) != 0) {
                    LOGW("Double RW on %p\n", mem);
                    dvmAbort();
                } else
                    *pLen |= LENGTHFLAG_RW;
            }
        }
    }

    dvmUnlockMutex(&pHdr->lock);
}
Beispiel #16
0
void half_free( void * pointer_void ) {
	
	U8* pointer = (U8*) pointer_void;
	U8 bucket;
	U8 bucket2;
	U16 v_addr;
	
	// The working block (which will contain all merged blocks in the end)
	block_t block;
	
	// Neighboring block
	block_t block2;
	
	// Go left towards beginning of header block
	pointer -= 4;
	
	// Find if block is in our memory pool
	if ((int)pointer - (int)memory < 0 || (int)pointer - (int)memory >= 32768) {
		
		// We're not doing this
		return;
	}
	
	v_addr = ((int)pointer - (int)memory)/BLOCK_BYTES;
	
	// Get block header info
	getBlockHeader(&block, v_addr);
	
	// If there is a free block to the left:
	if ( (block.v_addr != 0) && !isAllocated(block.prev_mem) ) {
		// Get left block header info
		getBlockHeader(&block2, block.prev_mem);
		
		// Add left block size to working block size
		block.size += block2.size;
		
		// Working block's address takes left block's address
		block.v_addr = block.prev_mem;
		
		// Working block's prev_mem takes left block's prev_mem
		block.prev_mem = block2.prev_mem;
		
		// block.next.prev := block.v_addr	(with _mem)
		setPrevMem(block.next_mem, block.v_addr);
		
		
		// Remove left block from its bucket
		bucket2 = bucketIndex(block2.size);
		if ( block2.v_addr == ramses[bucket2] ){
			clearOccupancy(bucket2);
			
		} else {
			setPrevBuck(block2.next_buck, block2.prev_buck);	// block2.next.prev := block2.prev	(with _buck)
			setNextBuck(block2.prev_buck, block2.next_buck);	// block2.prev.next := block2.next	(with _buck)
			
			// if bucket.head == block2
			if( ramses[bucket2] == block2.v_addr){
				// bucket.head := block2.next				(with _buck)
				ramses[bucket2] = block2.next_buck;
			}
		}
	}
	
	// If there is a free block to the right:
	if ( (block.next_mem != 0) && !isAllocated(block.next_mem) ) {
		
		// Get right block info
		getBlockHeader(&block2, block.next_mem);
		
		// Add right block size to working block size
		block.size += block2.size;
		
		// Set working block's next_mem to right block's next_mem
		block.next_mem = block2.next_mem;
		
		// block.next.prev := block.v_addr			(with _mem)
		setPrevMem(block.next_mem, block.v_addr);
		
		// Remove right block from its bucket
		bucket2 = bucketIndex(block2.size);
		if ( block2.v_addr == ramses[bucket2] ){
			clearOccupancy(bucket2);
			
		} else {
			// block2.next.prev := block2.prev		(with _buck)
			setPrevBuck(block2.next_buck, block2.prev_buck);
			
			// block2.prev.next := block2.next		(with _buck)
			setNextBuck(block2.prev_buck, block2.next_buck);
			
			// if bucket.head == block2
			if( ramses[bucket2] == block2.v_addr){
				// bucket.head := block2.next				(with _buck)
				ramses[bucket2] = block2.next_buck;
			}
		}
	}
	
	// Find bucket for working block
	bucket = bucketIndex(block.size);
	
	// If bucket non-empty, insert working block into bucket by appending to existing head
	// Otherwise, link working block to itself & set as head of bucket
	if (isOccupied(bucket)) {
		// block.next := ramses[bucket].next					(with _buck)
		block.next_buck = getNextBuck(ramses[bucket]);
		
		// block.prev := ramses[bucket].v_addr				(with _buck)
		block.prev_buck = ramses[bucket];
		
		// ramses[bucket].next.prev := block.v_addr		(with _buck)
		setPrevBuck(getNextBuck(ramses[bucket]), block.v_addr);
		
		// ramses[bucket].next := block.v_addr				(with _buck)
		setNextBuck(ramses[bucket], block.v_addr);
	} else {
		// Link working block to itself
		block.next_buck = block.prev_buck = block.v_addr;
		
		// Set working block as head of its bucket
		ramses[bucket] = block.v_addr;
		setOccupancy(bucket);
	}
	
	// Save working block
	memory[block.v_addr*BLOCK_SIZE]			= (block.prev_mem  << 22) | (block.next_mem  << 12) | ((block.size-1) << 2);
	memory[block.v_addr*BLOCK_SIZE + 1]	= (block.prev_buck << 22) | (block.next_buck << 12);
	
	return;
}
Beispiel #17
0
static void buildBlockHeaders() {

    info("pass 1 -- walk all blocks and build headers ...");

    size_t nbBlocks = 0;
    size_t baseOffset = 0;
    size_t earlyMissCnt = 0;
    uint8_t buf[8+gHeaderSize];
    const auto sz = sizeof(buf);
    const auto startTime = usecs();
    const auto oneMeg = 1024 * 1024;

    for(const auto &map : mapVec) {

        startMap(0);

        while(1) {

            auto nbRead = read(map.fd, buf, sz);
            if(nbRead<(signed)sz) {
                break;
            }

            startBlock((uint8_t*)0);

            uint8_t *hash = 0;
            Block *prevBlock = 0;
            size_t blockSize = 0;

            getBlockHeader(blockSize, prevBlock, hash, earlyMissCnt, buf);
            if(unlikely(0==hash)) {
                break;
            }

            auto where = lseek(map.fd, (blockSize + 8) - sz, SEEK_CUR);
            auto blockOffset = where - blockSize;
            if(where<0) {
                break;
            }

            auto block = Block::alloc();
            block->init(hash, &map, blockSize, prevBlock, blockOffset);
            gBlockMap[hash] = block;
            endBlock((uint8_t*)0);
            ++nbBlocks;
        }
        baseOffset += map.size;

        auto now = usecs();
        auto elapsed = now - startTime;
        auto bytesPerSec = baseOffset / (elapsed*1e-6);
        auto bytesLeft = gChainSize - baseOffset;
        auto secsLeft = bytesLeft / bytesPerSec;
        fprintf(
            stderr,
            "%.2f%% (%.2f/%.2f Gigs) -- %6d blocks -- %.2f Megs/sec -- ETA %.0f secs -- ELAPSED %.0f secs            \r",
            (100.0*baseOffset)/gChainSize,
            baseOffset/(1000.0*oneMeg),
            gChainSize/(1000.0*oneMeg),
            (int)nbBlocks,
            bytesPerSec*1e-6,
            secsLeft,
            elapsed*1e-6
        );
        fflush(stderr);

        endMap(0);
    }

    if(0==nbBlocks) {
        warning("found no blocks - giving up");
        exit(1);
    }

    char msg[128];
    msg[0] = 0;
    if(0<earlyMissCnt) {
        sprintf(msg, ", %d early link misses", (int)earlyMissCnt);
    }

    auto elapsed = 1e-6*(usecs() - startTime);
    info(
        "pass 1 -- took %.0f secs, %6d blocks, %.2f Gigs, %.2f Megs/secs %s                                            ",
        elapsed,
        (int)nbBlocks,
        (gChainSize * 1e-9),
        (gChainSize * 1e-6) / elapsed,
        msg
    );
}
Beispiel #18
0
static void *coalesce(void *bp)
{
#ifdef __DEBUG__
	fprintf(stderr, "Coalescing...\n");
#endif
	size_t p = isAllocated(getBlockHeader(getPrevBlock(bp))); //allocated "physically" previous block?
	size_t n = isAllocated(getBlockHeader(getNextBlock(bp))); //allocated "physicall" next block?
	size_t size = getSize(getBlockHeader(bp)); //get block size
	if(p && n) { //no coalescing
		return bp;
	}
	else if(!p && n) //previous block is empty!
	{
#ifdef __DEBUG__
		fprintf(stderr, "Merging %u(size : %u) and %u(size : %u)...\n", getPrevBlock(bp), getSize(getBlockHeader(getPrevBlock(bp))), bp, getSize(getBlockHeader(bp)));
#endif
		erase(size, bp); //delete current block
		erase(getSize(getBlockHeader(getPrevBlock(bp))), getPrevBlock(bp)); //delete previous block
		size += getSize(getBlockHeader(getPrevBlock(bp)));
		set(getBlockFooter(bp), setMask(size, 0));
		set(getBlockHeader(getPrevBlock(bp)), setMask(size, 0));
		push_back(size, getPrevBlock(bp)); //add summed block
		return getPrevBlock(bp);
	}
	else if(p && !n) //next block is empty!
	{
#ifdef __DEBUG__
		fprintf(stderr, "Merging %u(size : %u) and %u(size : %u)...\n", bp, getSize(getBlockHeader(bp)), getNextBlock(bp), getSize(getBlockHeader(getNextBlock(bp))));
#endif
		erase(size, bp); //delete current
		erase(getSize(getBlockHeader(getNextBlock(bp))), getNextBlock(bp)); //delete next
		size += getSize(getBlockHeader(getNextBlock(bp)));
		set(getBlockHeader(bp), setMask(size, 0));
		set(getBlockFooter(bp), setMask(size, 0));
		push_back(size, bp); //add summed block
		return bp;
	}
	else //both block are empty!
	{
#ifdef __DEBUG__
		fprintf(stderr, "Merging %u(size : %u), %u(size : %u) and %u(size : %u)...\n", getPrevBlock(bp), getSize(getBlockHeader(getPrevBlock(bp))), bp, getSize(getBlockHeader(bp)), getNextBlock(bp), getSize(getBlockHeader(getNextBlock(bp))));
#endif
		erase(size, bp); //delete every adjacent block
		erase(getSize(getBlockHeader(getPrevBlock(bp))), getPrevBlock(bp));
		erase(getSize(getBlockHeader(getNextBlock(bp))), getNextBlock(bp));
		size += getSize(getBlockHeader(getNextBlock(bp))) + getSize(getBlockHeader(getPrevBlock(bp)));
		set(getBlockHeader(getPrevBlock(bp)), setMask(size, 0));
		set(getBlockFooter(getNextBlock(bp)), setMask(size, 0));
		push_back(size, getPrevBlock(bp)); //sum up
		return getPrevBlock(bp);
	}
}