Esempio n. 1
0
void* AllocPool::Alloc(size_t inReqSize)
{
#ifdef DISABLE_MEMORY_POOLS
	return malloc(inReqSize);
#endif

	// OK it has a lot of gotos, but these remove a whole lot of common code
	// that was obfuscating the original version of this function.
	// So here I am choosing the OnceAndOnlyOnce principle over the caveats on gotos.
	// The gotos only jump forward and only to the exit paths of the function

	// The old bin block scheme has been replaced by 4 x 32 bit words so that each bin has a bit
	// and the next bin is found using a count leading zeroes instruction. Much faster.
	// Also now each bin's flag can be kept accurate. This simplifies the searching code quite a bit.

	// Also fwiw, changed 'victim' in the original code to 'candidate'. 'victim' just bothered me.


	AllocChunkPtr 	candidate;        /* inspected/selected chunk */
	size_t			candidate_size;   /* its size */
	AllocChunkPtr 	remainder;        /* remainder from a split */
	int32			remainder_size;   /* its size */
	AllocAreaPtr	area;
	size_t			areaSize;

	size_t size = RequestToSize(inReqSize);
	int index = BinIndex(size);
	assert(index < 128);
	AllocChunkPtr bin = mBins + index;

	check_pool();

	/* Check for exact match in a bin */
	if (index < kMaxSmallBin) { /* Faster version for small requests */
		/* No traversal or size check necessary for small bins.  */
		candidate = bin->Prev();

		/* Also scan the next one, since it would have a remainder < kMinAllocSize */
		if (candidate == bin) candidate = (++bin)->Prev();
		if (candidate != bin) {
			candidate_size = candidate->Size();
			goto found_exact_fit;
		}

		index += 2; /* Set for bin scan below. We've already scanned 2 bins. */
	} else {
		for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {

			candidate_size = candidate->Size();
			remainder_size = (int)(candidate_size - size);
			if (remainder_size >= (int32)kMinAllocSize) { /* too big */
				--index; /* adjust to rescan below after checking last remainder */
				break;
			} else if (remainder_size >= 0) { /* exact fit */
				goto found_exact_fit;
			}
		}
		++index;
	}

	for(; (index = NextFullBin(index)) >= 0; ++index) {
		bin = mBins + index;

		/* Find and use first big enough chunk ... */
		for (candidate = bin->Prev(); candidate != bin; candidate = candidate->Prev()) {
			candidate_size = candidate->Size();
			remainder_size = (int)(candidate_size - size);
			if (remainder_size >= (int32)kMinAllocSize) { /* split */
				UnlinkFree(candidate);
				goto found_bigger_fit;
			} else if (remainder_size >= 0) goto found_exact_fit;
		}
	}
	check_pool();

	if (mAreaMoreSize == 0) { /* pool has a non-growable area */
		if (mAreas != NULL /* fixed size area exhausted */
				|| size > mAreaInitSize)  /* too big anyway */
			goto found_nothing;
		areaSize = mAreaInitSize;
		goto split_new_area;
	}

	if (size > mAreaMoreSize) {
		areaSize = size;
		goto whole_new_area;
	} else {
		areaSize = mAreaMoreSize;
		goto split_new_area;
	}

	// exit paths:
	found_nothing:
		//ipostbuf("alloc failed. size: %d\n", inReqSize);
		throw std::runtime_error("alloc failed, increase server's memory allocation (e.g. via ServerOptions)");

	whole_new_area:
		//ipostbuf("whole_new_area\n");
		area = NewArea(areaSize);
		if (!area) return 0;
		candidate = &area->mChunk;
		candidate_size = candidate->Size();
		goto return_chunk;

	split_new_area:
		//ipostbuf("split_new_area\n");
		area = NewArea(areaSize);
		if (!area) return 0;
		candidate = &area->mChunk;
		candidate_size = candidate->Size();
		remainder_size = (int)(areaSize - size);
		//	FALL THROUGH
	found_bigger_fit:
		//ipostbuf("found_bigger_fit\n");
		remainder = candidate->ChunkAtOffset(size);
		remainder->SetSizeFree(remainder_size);
		candidate_size -= remainder_size;
		LinkFree(remainder);
		goto return_chunk;

	found_exact_fit:
			check_pool();
		UnlinkFree(candidate);
		//	FALL THROUGH
	return_chunk:

		candidate->SetSizeInUse(candidate_size);
			check_malloced_chunk(candidate, candidate_size);
			check_pool();
			garbage_fill(candidate);
		return candidate->ToPtr();
}
Esempio n. 2
0
//This function is equal to mspace_malloc
//replacing PREACTION with 0 and POSTACTION with nothing
void* mspace_malloc_lockless(mspace msp, size_t bytes)
{
  mstate ms = (mstate)msp;
  if (!ok_magic(ms)) {
    USAGE_ERROR_ACTION(ms,ms);
    return 0;
  }
    if (!0){//PREACTION(ms)) {
    void* mem;
    size_t nb;
    if (bytes <= MAX_SMALL_REQUEST) {
      bindex_t idx;
      binmap_t smallbits;
      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
      idx = small_index(nb);
      smallbits = ms->smallmap >> idx;

      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
        mchunkptr b, p;
        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
        b = smallbin_at(ms, idx);
        p = b->fd;
        assert(chunksize(p) == small_index2size(idx));
        unlink_first_small_chunk(ms, b, p, idx);
        set_inuse_and_pinuse(ms, p, small_index2size(idx));
        mem = chunk2mem(p);
        check_malloced_chunk(ms, mem, nb);
        goto postaction;
      }

      else if (nb > ms->dvsize) {
        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
          mchunkptr b, p, r;
          size_t rsize;
          bindex_t i;
          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
          binmap_t leastbit = least_bit(leftbits);
          compute_bit2idx(leastbit, i);
          b = smallbin_at(ms, i);
          p = b->fd;
          assert(chunksize(p) == small_index2size(i));
          unlink_first_small_chunk(ms, b, p, i);
          rsize = small_index2size(i) - nb;
          /* Fit here cannot be remainderless if 4byte sizes */
          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
            set_inuse_and_pinuse(ms, p, small_index2size(i));
          else {
            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
            r = chunk_plus_offset(p, nb);
            set_size_and_pinuse_of_free_chunk(r, rsize);
            replace_dv(ms, r, rsize);
          }
          mem = chunk2mem(p);
          check_malloced_chunk(ms, mem, nb);
          goto postaction;
        }

        else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
          check_malloced_chunk(ms, mem, nb);
          goto postaction;
        }
      }
    }