Exemplo n.º 1
0
static void
base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
    void *addr, size_t size) {
	if (extent_bsize_get(extent) > 0) {
		/*
		 * Compute the index for the largest size class that does not
		 * exceed extent's size.
		 */
		szind_t index_floor =
		    sz_size2index(extent_bsize_get(extent) + 1) - 1;
		extent_heap_insert(&base->avail[index_floor], extent);
	}

	if (config_stats) {
		base->allocated += size;
		/*
		 * Add one PAGE to base_resident for every page boundary that is
		 * crossed by the new allocation. Adjust n_thp similarly when
		 * metadata_thp is enabled.
		 */
		base->resident += PAGE_CEILING((vaddr_t)addr + size) -
		    PAGE_CEILING((vaddr_t)addr - gap_size);
		assert(base->allocated <= base->resident);
		assert(base->resident <= base->mapped);
		if (metadata_thp_madvise() && (opt_metadata_thp ==
		    metadata_thp_always || base->auto_thp_switched)) {
			base->n_thp += (HUGEPAGE_CEILING((vaddr_t)addr + size)
			    - HUGEPAGE_CEILING((vaddr_t)addr - gap_size)) >>
			    LG_HUGEPAGE;
			assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
		}
	}
Exemplo n.º 2
0
/*
 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
 * sparse data structures such as radix tree nodes efficient with respect to
 * physical memory usage.
 */
void *
base_alloc(tsdn_t *tsdn, size_t size)
{
	void *ret;
	size_t csize, usize;
	extent_node_t *node;
	extent_node_t key;

	/*
	 * Round size up to nearest multiple of the cacheline size, so that
	 * there is no chance of false cache line sharing.
	 */
	csize = CACHELINE_CEILING(size);

	usize = s2u(csize);
	extent_node_init(&key, NULL, NULL, usize, 0, false, false);
	malloc_mutex_lock(tsdn, &base_mtx);
	node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
	if (node != NULL) {
		/* Use existing space. */
		extent_tree_szsnad_remove(&base_avail_szsnad, node);
	} else {
		/* Try to allocate more space. */
		node = base_chunk_alloc(tsdn, csize);
	}
	if (node == NULL) {
		ret = NULL;
		goto label_return;
	}

	ret = extent_node_addr_get(node);
	if (extent_node_size_get(node) > csize) {
		extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
		extent_node_size_set(node, extent_node_size_get(node) - csize);
		extent_tree_szsnad_insert(&base_avail_szsnad, node);
	} else
		base_node_dalloc(tsdn, node);
	if (config_stats) {
		base_allocated += csize;
		/*
		 * Add one PAGE to base_resident for every page boundary that is
		 * crossed by the new allocation.
		 */
		base_resident += PAGE_CEILING((vaddr_t)ret + csize) -
		    PAGE_CEILING((vaddr_t)ret);
	}
	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
	malloc_mutex_unlock(tsdn, &base_mtx);
	return (ret);
}
Exemplo n.º 3
0
static extent_node_t *
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
	extent_node_t *node;
	size_t csize, nsize;
	void *addr;

	malloc_mutex_assert_owner(tsdn, &base_mtx);
	assert(minsize != 0);
	node = base_node_try_alloc(tsdn);
	/* Allocate enough space to also carve a node out if necessary. */
	nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
	csize = CHUNK_CEILING(minsize + nsize);
	addr = chunk_alloc_base(csize);
	if (addr == NULL) {
		if (node != NULL)
			base_node_dalloc(tsdn, node);
		return (NULL);
	}
	base_mapped += csize;
	if (node == NULL) {
		node = (extent_node_t *)addr;
		addr = (void *)((uintptr_t)addr + nsize);
		csize -= nsize;
		if (config_stats) {
			base_allocated += nsize;
			base_resident += PAGE_CEILING(nsize);
		}
	}
	base_extent_node_init(node, addr, csize);
	return (node);
}
Exemplo n.º 4
0
bool
pages_purge_lazy(void *addr, size_t size) {
	assert(PAGE_ADDR2BASE(addr) == addr);
	assert(PAGE_CEILING(size) == size);

	if (!pages_can_purge_lazy) {
		return true;
	}
	if (!pages_can_purge_lazy_runtime) {
		/*
		 * Built with lazy purge enabled, but detected it was not
		 * supported on the current system.
		 */
		return true;
	}

#ifdef _WIN32
	VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
	return false;
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
	return (madvise(addr, size,
#  ifdef MADV_FREE
	    MADV_FREE
#  else
	    JEMALLOC_MADV_FREE
#  endif
	    ) != 0);
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
    !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
	return (madvise(addr, size, MADV_DONTNEED) != 0);
#else
	not_reached();
#endif
}
Exemplo n.º 5
0
static bool
pages_commit_impl(void *addr, size_t size, bool commit) {
	assert(PAGE_ADDR2BASE(addr) == addr);
	assert(PAGE_CEILING(size) == size);

	if (os_overcommits) {
		return true;
	}

#ifdef _WIN32
	return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
	    PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
#else
	{
		int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
		void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
		    -1, 0);
		if (result == MAP_FAILED) {
			return true;
		}
		if (result != addr) {
			/*
			 * We succeeded in mapping memory, but not in the right
			 * place.
			 */
			os_pages_unmap(result, size);
			return true;
		}
		return false;
	}
#endif
}
Exemplo n.º 6
0
void
pages_unmap(void *addr, size_t size) {
	assert(PAGE_ADDR2BASE(addr) == addr);
	assert(PAGE_CEILING(size) == size);

	os_pages_unmap(addr, size);
}
Exemplo n.º 7
0
bool
pages_dodump(void *addr, size_t size) {
	assert(PAGE_ADDR2BASE(addr) == addr);
	assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_MADVISE_DONTDUMP
	return madvise(addr, size, MADV_DODUMP) != 0;
#else
	return false;
#endif
}
Exemplo n.º 8
0
bool
pages_purge_forced(void *addr, size_t size) {
	assert(PAGE_ADDR2BASE(addr) == addr);
	assert(PAGE_CEILING(size) == size);

	if (!pages_can_purge_forced) {
		return true;
	}

#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
    defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
	return (madvise(addr, size, MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_MAPS_COALESCE)
	/* Try to overlay a new demand-zeroed mapping. */
	return pages_commit(addr, size);
#else
	not_reached();
#endif
}
/* Iterate over a valid jemalloc chunk, calling callback for each large
 * allocation run, and calling je_iterate_small for each small allocation run */
static void je_iterate_chunk(arena_chunk_t *chunk,
    void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
  size_t pageind;

  pageind = map_bias;

  while (pageind < chunk_npages) {
    size_t mapbits;
    size_t size;

    mapbits = arena_mapbits_get(chunk, pageind);
    if (!arena_mapbits_allocated_get(chunk, pageind)) {
      /* Unallocated run */
      size = arena_mapbits_unallocated_size_get(chunk, pageind);
    } else if (arena_mapbits_large_get(chunk, pageind)) {
      /* Large allocation run */
      void *rpages;

      size = arena_mapbits_large_size_get(chunk, pageind);
      rpages = arena_miscelm_to_rpages(arena_miscelm_get(chunk, pageind));
      callback((uintptr_t)rpages, size, arg);
    } else {
      /* Run of small allocations */
      szind_t binind;
      arena_run_t *run;

      assert(arena_mapbits_small_runind_get(chunk, pageind) == pageind);
      binind = arena_mapbits_binind_get(chunk, pageind);
      run = &arena_miscelm_get(chunk, pageind)->run;
      assert(run->binind == binind);
      size = arena_bin_info[binind].run_size;

      je_iterate_small(run, callback, arg);
    }
    assert(size == PAGE_CEILING(size));
    assert(size > 0);
    pageind += size >> LG_PAGE;
  }

}
Exemplo n.º 10
0
bool
chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
{
    bool ret;
    unsigned i;
    off_t off;
    void *vaddr;
    size_t cumsize, voff;
    size_t sizes[nfds];

    malloc_mutex_lock(&swap_mtx);

    /* Get file sizes. */
    for (i = 0, cumsize = 0; i < nfds; i++) {
        off = lseek(fds[i], 0, SEEK_END);
        if (off == ((off_t)-1)) {
            ret = true;
            goto RETURN;
        }
        if (PAGE_CEILING(off) != off) {
            /* Truncate to a multiple of the page size. */
            off &= ~PAGE_MASK;
            if (ftruncate(fds[i], off) != 0) {
                ret = true;
                goto RETURN;
            }
        }
        sizes[i] = off;
        if (cumsize + off < cumsize) {
            /*
             * Cumulative file size is greater than the total
             * address space.  Bail out while it's still obvious
             * what the problem is.
             */
            ret = true;
            goto RETURN;
        }
        cumsize += off;
    }

    /* Round down to a multiple of the chunk size. */
    cumsize &= ~chunksize_mask;
    if (cumsize == 0) {
        ret = true;
        goto RETURN;
    }

    /*
     * Allocate a chunk-aligned region of anonymous memory, which will
     * be the final location for the memory-mapped files.
     */
    vaddr = chunk_alloc_mmap_noreserve(cumsize);
    if (vaddr == NULL) {
        ret = true;
        goto RETURN;
    }

    /* Overlay the files onto the anonymous mapping. */
    for (i = 0, voff = 0; i < nfds; i++) {
        void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
        if (addr == MAP_FAILED) {
            char buf[BUFERROR_BUF];


            buferror(errno, buf, sizeof(buf));
            malloc_write(
                "<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
            malloc_write(buf);
            malloc_write("\n");
            if (opt_abort)
                abort();
            if (munmap(vaddr, voff) == -1) {
                buferror(errno, buf, sizeof(buf));
                malloc_write("<jemalloc>: Error in munmap(): ");
                malloc_write(buf);
                malloc_write("\n");
            }
            ret = true;
            goto RETURN;
        }
        assert(addr == (void *)((uintptr_t)vaddr + voff));

        /*
         * Tell the kernel that the mapping will be accessed randomly,
         * and that it should not gratuitously sync pages to the
         * filesystem.
         */
#ifdef MADV_RANDOM
        madvise(addr, sizes[i], MADV_RANDOM);
#endif
#ifdef MADV_NOSYNC
        madvise(addr, sizes[i], MADV_NOSYNC);
#endif

        voff += sizes[i];
    }

    swap_prezeroed = prezeroed;
    swap_base = vaddr;
    swap_end = swap_base;
    swap_max = (void *)((uintptr_t)vaddr + cumsize);

    /* Copy the fds array for mallctl purposes. */
    swap_fds = (int *)base_alloc(nfds * sizeof(int));
    if (swap_fds == NULL) {
        ret = true;
        goto RETURN;
    }
    memcpy(swap_fds, fds, nfds * sizeof(int));
    swap_nfds = nfds;

#ifdef JEMALLOC_STATS
    swap_avail = cumsize;
#endif

    swap_enabled = true;

    ret = false;
RETURN:
    malloc_mutex_unlock(&swap_mtx);
    return (ret);
}