Ejemplo n.º 1
0
/**********************************************************
 * coalesce (Immediate Coalescing)
 * Covers the 4 cases discussed in the text:
 * - both neighbours are allocated
 * - the next block is available for coalescing
 * - the previous block is available for coalescing
 * - both neighbours are available for coalescing
 * based on these cases, we removed the coalesced blocks and
 * re-insert into the free lists
 **********************************************************/
void *coalesce(void *bp) {

    size_t prev_alloc = GET_ALLOC(FTRP(PREV_BLKP(bp)));
    size_t next_alloc = GET_ALLOC(HDRP(NEXT_BLKP(bp)));
    size_t size = GET_SIZE(HDRP(bp));

    if (prev_alloc && next_alloc) { /* Case 1 */
        //add to free list
        insertToFreeList(bp);
        return bp;
    } else if (prev_alloc && !next_alloc) { /* Case 2 */
        //remove that one from free list
        removeFromFreeList(NEXT_BLKP(bp));

        size += GET_SIZE(HDRP(NEXT_BLKP(bp)));
        PUT(HDRP(bp), PACK(size, 0));
        PUT(FTRP(bp), PACK(size, 0));

        //add the new block to free list
        insertToFreeList(bp);

        return (bp);
    } else if (!prev_alloc && next_alloc) { /* Case 3 */
        //remove that one from free list
        removeFromFreeList(PREV_BLKP(bp));

        size += GET_SIZE(HDRP(PREV_BLKP(bp)));
        PUT(FTRP(bp), PACK(size, 0));
        PUT(HDRP(PREV_BLKP(bp)), PACK(size, 0));

        //add the new block to free list
        insertToFreeList(PREV_BLKP(bp));

        return (PREV_BLKP(bp));
    } else { /* Case 4 */
        //remove that one from free list
        removeFromFreeList(NEXT_BLKP(bp));
        removeFromFreeList(PREV_BLKP(bp));

        size += GET_SIZE(HDRP(PREV_BLKP(bp))) +
                GET_SIZE(FTRP(NEXT_BLKP(bp)));
        PUT(HDRP(PREV_BLKP(bp)), PACK(size, 0));
        PUT(FTRP(NEXT_BLKP(bp)), PACK(size, 0));

        //add the new block to free list
        insertToFreeList(PREV_BLKP(bp));

        return (PREV_BLKP(bp));
    }
}
Ejemplo n.º 2
0
/*split the block if the given free block has larger size than the required size*/
void* splitBlock(void* bp, size_t asize) {
    size_t totalsize = GET_SIZE(HDRP(bp));
    size_t delta = totalsize - asize;
    //if the remaining free block larger than min block size we can split
    //we know that both bp size and required size are aligned with 8 
    //so delta is also aligned with 8

    if (delta >= MIN_BLOCK_SIZE) {
        //split
        PUT(HDRP(bp), PACK(asize, 0));
        PUT(FTRP(bp), PACK(asize, 0)); //footer
        //insert the rest free block to free list
        PUT(HDRP(NEXT_BLKP(bp)), PACK(delta, 0));
        PUT(FTRP(NEXT_BLKP(bp)), PACK(delta, 0));
        insertToFreeList(NEXT_BLKP(bp));

        return bp;
    }
    return bp;
}
Ejemplo n.º 3
0
///
/// Given a pointer to persistent memory, mark the location free and
/// add it to the free list. 
///    
void PArena::freeMem(void *ptr, bool should_log)
{
#ifdef _FORCE_FAIL
    fail_program();
#endif
    Lock();

    if (!PMallocUtil::is_ptr_allocated(ptr))
    {
        fprintf(stderr, "[Atlas-pheap] assert: %p %ld %ld\n",
                (size_t *)ptr, *((size_t *)ptr),
                *(size_t *)((char *)ptr+sizeof(size_t)));
        assert(PMallocUtil::is_ptr_allocated(ptr) &&
               "free called on unallocated memory");
    }

    char *mem = (char*)PMallocUtil::ptr2mem(ptr);
    assert(doesRangeCheck(mem, *(reinterpret_cast<size_t*>(mem))) &&
           "Attempt to free memory outside of arena range!");
    
#ifndef _DISABLE_ALLOC_LOGGING
    if (should_log) nvm_log_free(mem + sizeof(size_t));
#endif
    
    *(size_t*)(mem + sizeof(size_t)) = false;
    NVM_FLUSH(mem + sizeof(size_t));
    
    insertToFreeList(PMallocUtil::get_bin_number(
                         PMallocUtil::get_requested_alloc_size_from_ptr(ptr)), 
                     PMallocUtil::ptr2mem(ptr));

    decrementActualAllocedStats(
        PMallocUtil::get_actual_alloc_size(
            PMallocUtil::get_requested_alloc_size_from_ptr(ptr)));
    
    Unlock();
}
Ejemplo n.º 4
0
///
/// Given a size, allocate memory using the bump pointer. If it
/// reaches the end of the arena, return null.
///    
void *PArena::allocMem(
    size_t sz, bool does_need_cache_line_alignment, bool does_need_logging)
{
#ifdef _FORCE_FAIL
    fail_program();
#endif
    // lock already acquired
    size_t alloc_sz = PMallocUtil::get_actual_alloc_size(sz);
    char *curr_alloc_addr_c = static_cast<char*>(CurrAllocAddr_);
    intptr_t curr_alloc_addr_i = reinterpret_cast<intptr_t>(CurrAllocAddr_);
    if (does_need_cache_line_alignment)
    {
        intptr_t cache_line = curr_alloc_addr_i &
            PMallocUtil::get_cache_line_mask();
        intptr_t next_cache_line = cache_line +
            PMallocUtil::get_cache_line_size();
        if (reinterpret_cast<intptr_t>(
                curr_alloc_addr_c + PMallocUtil::get_metadata_size()) !=
            next_cache_line)
        {
            if (reinterpret_cast<char*>(next_cache_line) - 
                PMallocUtil::get_metadata_size() + alloc_sz >
                static_cast<char*>(EndAddr_))
                return nullptr;
            
            intptr_t diff = next_cache_line - curr_alloc_addr_i -
                PMallocUtil::get_metadata_size();
            assert(diff >= static_cast<intptr_t>(
                       PMallocUtil::get_smallest_actual_alloc_size()) &&
                   "Insufficient space for metadata!");

            *(static_cast<size_t*>(CurrAllocAddr_)) =
                diff - PMallocUtil::get_metadata_size();

            // No need to log the following since it is not user visible

            // Mark it free
            *(reinterpret_cast<size_t*>(
                  curr_alloc_addr_c + sizeof(size_t))) = false; 

            // The above metadata updates are to the same cache line
            NVM_FLUSH(curr_alloc_addr_c);

            insertToFreeList(PMallocUtil::get_bin_number(
                                 diff - PMallocUtil::get_metadata_size()), 
                             curr_alloc_addr_c);
            CurrAllocAddr_ = reinterpret_cast<void*>(
                next_cache_line - PMallocUtil::get_metadata_size());
            NVM_FLUSH(&CurrAllocAddr_);
            curr_alloc_addr_c = static_cast<char*>(CurrAllocAddr_);
        }
    }
    if ((curr_alloc_addr_c + alloc_sz - 1) < static_cast<char*>(EndAddr_))
    {
        *(static_cast<size_t*>(CurrAllocAddr_)) = sz;

#ifndef _DISABLE_ALLOC_LOGGING
        if (does_need_logging) nvm_log_alloc(
            curr_alloc_addr_c + sizeof(size_t));
#endif
        
        *(reinterpret_cast<size_t*>(
              curr_alloc_addr_c + sizeof(size_t))) = true;

        // The above metadata updates are to the same cache line
        NVM_FLUSH(curr_alloc_addr_c);

        // If we fail somewhere above, the above memory will be
        // considered unallocated because CurrAllocAddr_ is not yet set.
        CurrAllocAddr_ = static_cast<void*>(curr_alloc_addr_c + alloc_sz);
        NVM_FLUSH(&CurrAllocAddr_);

        // If we fail here or later, the above memory may be leaked.

        if (does_need_cache_line_alignment)
            assert(PMallocUtil::is_cache_line_aligned(
                       curr_alloc_addr_c + PMallocUtil::get_metadata_size()));

        incrementActualAllocedStats(alloc_sz);
        
        return static_cast<void*>(
            curr_alloc_addr_c + PMallocUtil::get_metadata_size());
    }
    return nullptr;
}
Ejemplo n.º 5
0
///
/// Given a size, traverse the arena from the start while looking for
/// a free chunk of memory that can satisfy the allocation
/// request. The arena free list is updated as the traversal is
/// performed.
///    
void *PArena::allocFromUpdatedFreeList(
    size_t sz, bool does_need_cache_line_alignment, bool does_need_logging)
{
#ifdef _FORCE_FAIL
    fail_program();
#endif
    // TODO: add support for cache line alignment
    if (does_need_cache_line_alignment) return nullptr;

    size_t actual_sz = PMallocUtil::get_actual_alloc_size(sz);
    char *mem = static_cast<char*>(StartAddr_);
    
    while (mem < (char*)CurrAllocAddr_)
    {
        size_t mem_sz = PMallocUtil::get_requested_alloc_size_from_mem(mem);
        size_t actual_mem_sz = PMallocUtil::get_actual_alloc_size(mem_sz);
        if (!PMallocUtil::is_mem_allocated(mem))
        {
            if (actual_sz > actual_mem_sz)
            {
                // This address may be in the free list already. But the
                // implementation ensures no duplicates are added.
                insertToFreeList(
                    PMallocUtil::get_bin_number(
                        *(reinterpret_cast<size_t*>(mem))),
                    mem);
                mem += actual_mem_sz;
                continue;
            }

            void *carved_mem = nullptr;
            if (actual_sz + PMallocUtil::get_smallest_actual_alloc_size() <=
                actual_mem_sz)
                carved_mem = carveExtraMem(mem, actual_sz, actual_mem_sz);
            else assert(actual_sz == actual_mem_sz);

            // If we fail here, the above carving does not take effect
            
            *(reinterpret_cast<size_t*>(mem)) = sz;

            // If we fail here or anywhere above, no memory is leaked

#ifndef _DISABLE_ALLOC_LOGGING
            if (does_need_logging) nvm_log_alloc(mem + sizeof(size_t));
#endif
            
            *(reinterpret_cast<size_t*>(mem + sizeof(size_t))) = true;

            // The above metadata updates are to the same cache line
            NVM_FLUSH(mem);

            // If we fail here, the above allocated memory may be leaked
            deleteFromFreeList(PMallocUtil::get_bin_number(mem_sz), mem);
            
            if (carved_mem)
                insertToFreeList(
                    PMallocUtil::get_bin_number(
                        *(reinterpret_cast<size_t*>(carved_mem))),
                    carved_mem);

            incrementActualAllocedStats(actual_sz);
            
            return static_cast<void*>(mem + PMallocUtil::get_metadata_size());
        }
        mem += actual_mem_sz;
    }
    return nullptr;
}
Ejemplo n.º 6
0
///
/// Given a size, allocate memory from the arena free list, if
/// possible.
///    
void *PArena::allocFromFreeList(
    size_t sz, bool does_need_cache_line_alignment, bool does_need_logging)
{
#ifdef _FORCE_FAIL
    fail_program();
#endif
    // TODO: add support for cache line alignment
    if (does_need_cache_line_alignment) return nullptr;
    
    if (FreeList_->empty()) return nullptr;

    size_t actual_sz = PMallocUtil::get_actual_alloc_size(sz);
    uint32_t bin_number = PMallocUtil::get_bin_number(sz);
    while (bin_number < kMaxFreeCategory_ + 1)
    {
        FreeList::iterator ci = FreeList_->find(bin_number);
        // Look in the existing bin. We don't look for additional memory
        // that may have been freed. We will do that later.
        if (ci != FreeList_->end())
        {
            MemMap & mem_map = ci->second;
            MemMap::iterator mem_ci_end = mem_map.end();
            for (MemMap::iterator mem_ci = mem_map.begin();
                 mem_ci != mem_ci_end; ++mem_ci)
            {
                char *mem = static_cast<char*>(mem_ci->first);
                assert(!PMallocUtil::is_mem_allocated(mem) &&
                       "Location in free list is marked allocated!");

                void *carved_mem = nullptr;
                if (bin_number == kMaxFreeCategory_)
                {
                    // carve out the extra memory if possible
                    size_t actual_free_sz = PMallocUtil::get_actual_alloc_size(
                        *(reinterpret_cast<size_t*>(mem)));

                    if (actual_sz > actual_free_sz) // cannot satisfy
                        continue;
                    else if (actual_sz +
                             PMallocUtil::get_smallest_actual_alloc_size() <=
                             actual_free_sz)
                        carved_mem = carveExtraMem(
                            mem, actual_sz, actual_free_sz);
                    else assert(actual_sz == actual_free_sz);
                }
                // If we fail here, the above carving does not take effect

                *(reinterpret_cast<size_t*>(mem)) = sz;

                // If we fail here or anywhere above, no memory is leaked

#ifndef _DISABLE_ALLOC_LOGGING
                if (does_need_logging) nvm_log_alloc(mem + sizeof(size_t));
#endif
                
                *(reinterpret_cast<size_t*>(mem + sizeof(size_t))) = true;

                // The above metadata updates are to the same cache line
                NVM_FLUSH(mem);
                
                // If we fail here, the above allocated memory may be leaked

                mem_map.erase(mem_ci);

                if (carved_mem) insertToFreeList(
                    PMallocUtil::get_bin_number(
                        *(static_cast<size_t*>(carved_mem))),
                    carved_mem);

                incrementActualAllocedStats(actual_sz);
                
                return static_cast<void*>(mem +
                                          PMallocUtil::get_metadata_size());
            }
        }
        bin_number = PMallocUtil::get_next_bin_number(bin_number);
    }
    return nullptr;
}