Example #1
0
Bool BufferCommit(Buffer buffer, Addr p, Size size)
{
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, BufferPool(buffer)->alignment));
  AVER(!BufferIsReady(buffer));

  /* <design/collection#.flip>. */
  /* .commit.before: If a flip occurs before this point, when the */
  /* pool reads "initAtFlip" it will point below the object, so it */
  /* will be trashed and the commit must fail when trip is called.  */
  AVER(p == buffer->ap_s.init);
  AVER(AddrAdd(buffer->ap_s.init, size) == buffer->ap_s.alloc);

  /* .commit.update: Atomically update the init pointer to declare */
  /* that the object is initialized (though it may be invalid if a */
  /* flip occurred). */
  buffer->ap_s.init = buffer->ap_s.alloc;

  /* .improve.memory-barrier: Memory barrier here on the DEC Alpha */
  /* (and other relaxed memory order architectures). */
  /* .commit.after: If a flip occurs at this point, the pool will */
  /* see "initAtFlip" above the object, which is valid, so it will */
  /* be collected.  The commit must succeed when trip is called.  */
  /* The pointer "p" will have been fixed up. */
  /* TODO: Check the above assertion and explain why it is so. */
  /* .commit.trip: Trip the buffer if a flip has occurred. */
  if (buffer->ap_s.limit == 0)
    return BufferTrip(buffer, p, size);

  /* No flip occurred, so succeed. */

  return TRUE;
}
Example #2
0
Bool MPMCheck(void)
{
    CHECKL(sizeof(Word) * CHAR_BIT == MPS_WORD_WIDTH);
    CHECKL((Word)1 << MPS_WORD_SHIFT == MPS_WORD_WIDTH);
    CHECKL(AlignCheck(MPS_PF_ALIGN));
    /* Check that trace ids will fit in the TraceId type. */
    CHECKL(TraceLIMIT <= UINT_MAX);
    /* Check that there are enough bits in */
    /* a TraceSet to store all possible trace ids. */
    CHECKL(sizeof(TraceSet) * CHAR_BIT >= TraceLIMIT);

    CHECKL((SizeAlignUp(0, 2048) == 0));
    CHECKL(!SizeIsAligned(64, (unsigned) -1));
    CHECKL(SizeIsAligned(0, 32));
    CHECKL((SizeAlignUp(1024, 16) == 1024));
    /* .prime: 31051 is prime */
    CHECKL(SizeIsAligned(SizeAlignUp(31051, 256), 256));
    CHECKL(SizeIsAligned(SizeAlignUp(31051, 512), 512));
    CHECKL(!SizeIsAligned(31051, 1024));
    CHECKL(!SizeIsP2(0));
    CHECKL(SizeIsP2(128));
    CHECKL(SizeLog2((Size)1) == 0);
    CHECKL(SizeLog2((Size)256) == 8);
    CHECKL(SizeLog2((Size)65536) == 16);
    CHECKL(SizeLog2((Size)131072) == 17);

    /* .check.writef: We check that various types will fit in a Word; */
    /* See .writef.check.  Don't need to check WriteFS or WriteFF as they */
    /* should not be cast to Word. */
    CHECKL(sizeof(WriteFA) <= sizeof(Word));
    CHECKL(sizeof(WriteFP) <= sizeof(Word));
    CHECKL(sizeof(WriteFW) <= sizeof(Word)); /* Should be trivial*/
    CHECKL(sizeof(WriteFU) <= sizeof(Word));
    CHECKL(sizeof(WriteFB) <= sizeof(Word));
    CHECKL(sizeof(WriteFC) <= sizeof(Word));
    /* .check.write.double: See .write.double.check */
    {
        int e, DBL_EXP_DIG = 1;
        for (e = DBL_MAX_10_EXP; e > 0; e /= 10)
            DBL_EXP_DIG++;
        CHECKL(DBL_EXP_DIG < DBL_DIG);
        CHECKL(-(DBL_MIN_10_EXP) <= DBL_MAX_10_EXP);
    }

    return TRUE;
}
Example #3
0
/* MVFFAddSeg -- Allocates a new segment from the arena
 *
 * Allocates a new segment from the arena (with the given
 * withReservoirPermit flag) of at least the specified size.  The
 * specified size should be pool-aligned.  Adds it to the free list.
 */
static Res MVFFAddSeg(Seg *segReturn,
                      MVFF mvff, Size size, Bool withReservoirPermit)
{
  Pool pool;
  Arena arena;
  Size segSize;
  Seg seg;
  Res res;
  Align align;
  Addr base, limit;

  AVERT(MVFF, mvff);
  AVER(size > 0);
  AVERT(Bool, withReservoirPermit);

  pool = MVFF2Pool(mvff);
  arena = PoolArena(pool);
  align = ArenaAlign(arena);

  AVER(SizeIsAligned(size, PoolAlignment(pool)));

  /* Use extendBy unless it's too small (see */
  /* <design/poolmvff/#design.seg-size>). */
  if (size <= mvff->extendBy)
    segSize = mvff->extendBy;
  else
    segSize = size;

  segSize = SizeAlignUp(segSize, align);

  res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool,
                 withReservoirPermit, argsNone);
  if (res != ResOK) {
    /* try again for a seg just large enough for object */
    /* see <design/poolmvff/#design.seg-fail> */
    segSize = SizeAlignUp(size, align);
    res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool,
                   withReservoirPermit, argsNone);
    if (res != ResOK) {
      return res;
    }
  }

  mvff->total += segSize;
  base = SegBase(seg);
  limit = AddrAdd(base, segSize);
  DebugPoolFreeSplat(pool, base, limit);
  res = MVFFAddToFreeList(&base, &limit, mvff);
  AVER(res == ResOK);
  AVER(base <= SegBase(seg));
  if (mvff->minSegSize > segSize) mvff->minSegSize = segSize;

  /* Don't call MVFFFreeSegs; that would be silly. */

  *segReturn = seg;
  return ResOK;
}
Example #4
0
Res BufferFill(Addr *pReturn, Buffer buffer, Size size)
{
  Res res;
  Pool pool;
  Addr base, limit, next;

  AVER(pReturn != NULL);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, BufferPool(buffer)->alignment));
  AVER(BufferIsReady(buffer));

  pool = BufferPool(buffer);

  /* If we're here because the buffer was trapped, then we attempt */
  /* the allocation here. */
  if (!BufferIsReset(buffer) && buffer->ap_s.limit == (Addr)0) {
    /* .fill.unflip: If the buffer is flipped then we unflip the buffer. */
    if (buffer->mode & BufferModeFLIPPED) {
      BufferSetUnflipped(buffer);
    }

    /* .fill.logged: If the buffer is logged then we leave it logged. */
    next = AddrAdd(buffer->ap_s.alloc, size);
    if (next > (Addr)buffer->ap_s.alloc &&
        next <= (Addr)buffer->poolLimit) {
      buffer->ap_s.alloc = next;
      if (buffer->mode & BufferModeLOGGED) {
        EVENT3(BufferReserve, buffer, buffer->ap_s.init, size);
      }
      *pReturn = buffer->ap_s.init;
      return ResOK;
    }
  }

  /* There really isn't enough room for the allocation now. */
  AVER(AddrAdd(buffer->ap_s.alloc, size) > buffer->poolLimit ||
       AddrAdd(buffer->ap_s.alloc, size) < (Addr)buffer->ap_s.alloc);

  BufferDetach(buffer, pool);

  /* Ask the pool for some memory. */
  res = Method(Pool, pool, bufferFill)(&base, &limit, pool, buffer, size);
  if (res != ResOK)
    return res;

  /* Set up the buffer to point at the memory given by the pool */
  /* and do the allocation that was requested by the client. */
  BufferAttach(buffer, base, limit, base, size);

  if (buffer->mode & BufferModeLOGGED) {
    EVENT3(BufferReserve, buffer, buffer->ap_s.init, size);
  }

  *pReturn = base;
  return res;
}
Example #5
0
Bool ArenaGrainSizeCheck(Size size)
{
  CHECKL(size > 0);
  /* <design/arena/#req.attr.block.align.min> */
  CHECKL(SizeIsAligned(size, MPS_PF_ALIGN));
  /* Grain size must be a power of 2 for the tract lookup and the
   * zones to work. */
  CHECKL(SizeIsP2(size));

  return TRUE;
}
Example #6
0
static Bool EPVMSaveCheck(EPVMSave save)
{
  CHECKS(EPVMSave, save);
  CHECKU(EPVM, save->epvm);
  CHECKL(save->level <= save->epvm->maxSaveLevel);
  CHECKL(save->size <= PoolManagedSize(EPVM2Pool(save->epvm)));
  if (save->level > save->epvm->saveLevel) /* nothing at this level */
    CHECKL(save->size == 0);
  CHECKL(SizeIsAligned(save->size,
                       ArenaAlign(PoolArena(EPVM2Pool(save->epvm)))));
  CHECKL(BoolCheck(save->smallStringSeg));
  CHECKL(BoolCheck(save->smallObjectSeg));
  CHECKL(RingCheck(&save->segRing));

  return TRUE;
}
Example #7
0
/* MVFFBufferFill -- Fill the buffer
 *
 * Fill it with the largest block we can find.
 */
static Res MVFFBufferFill(Addr *baseReturn, Addr *limitReturn,
                          Pool pool, Buffer buffer, Size size,
                          Bool withReservoirPermit)
{
  Res res;
  MVFF mvff;
  Addr base, limit;
  Bool foundBlock;
  Seg seg = NULL;

  AVER(baseReturn != NULL);
  AVER(limitReturn != NULL);
  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, PoolAlignment(pool)));
  AVERT(Bool, withReservoirPermit);

  /* Hoping the largest is big enough, delete it and return if small. */
  foundBlock = CBSFindLargest(&base, &limit, CBSOfMVFF(mvff),
                              CBSFindDeleteENTIRE);
  if (foundBlock && AddrOffset(base, limit) < size) {
    foundBlock = FALSE;
    res = CBSInsert(CBSOfMVFF(mvff), base, limit);
    AVER(res == ResOK);
  }
  if (!foundBlock) {
    res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit);
    if (res != ResOK)
      return res;
    foundBlock = CBSFindLargest(&base, &limit, CBSOfMVFF(mvff),
                                CBSFindDeleteENTIRE);
    AVER(foundBlock); /* We will find the new segment. */
  }

  AVER(AddrOffset(base, limit) >= size);
  mvff->free -= AddrOffset(base, limit);

  *baseReturn = base;
  *limitReturn = limit;
  return ResOK;
}
Example #8
0
/* MVFFFindFirstFree -- Finds the first (or last) suitable free block
 *
 * Finds a free block of the given (pool aligned) size, according
 * to a first (or last) fit policy controlled by the MVFF fields
 * firstFit, slotHigh (for whether to allocate the top or bottom
 * portion of a larger block).
 *
 * Will return FALSE if the free list has no large enough block.
 * In particular, will not attempt to allocate a new segment.
 */
static Bool MVFFFindFirstFree(Addr *baseReturn, Addr *limitReturn,
                              MVFF mvff, Size size)
{
  Bool foundBlock;
  CBSFindDelete findDelete;

  AVER(baseReturn != NULL);
  AVER(limitReturn != NULL);
  AVERT(MVFF, mvff);
  AVER(size > 0);
  AVER(SizeIsAligned(size, PoolAlignment(MVFF2Pool(mvff))));

  findDelete = mvff->slotHigh ? CBSFindDeleteHIGH : CBSFindDeleteLOW;

  foundBlock =
    (mvff->firstFit ? CBSFindFirst : CBSFindLast)
      (baseReturn, limitReturn, CBSOfMVFF(mvff), size, findDelete);

  if (foundBlock)
    mvff->free -= size;

  return foundBlock;
}
Example #9
0
/* MVFFFindFirstFree -- Finds the first (or last) suitable free block
 *
 * Finds a free block of the given (pool aligned) size, according
 * to a first (or last) fit policy controlled by the MVFF fields
 * firstFit, slotHigh (for whether to allocate the top or bottom
 * portion of a larger block).
 *
 * Will return FALSE if the free list has no large enough block.
 * In particular, will not attempt to allocate a new segment.
 */
static Bool MVFFFindFirstFree(Addr *baseReturn, Addr *limitReturn,
                              MVFF mvff, Size size)
{
  Bool foundBlock;
  FindDelete findDelete;
  RangeStruct range, oldRange;

  AVER(baseReturn != NULL);
  AVER(limitReturn != NULL);
  AVERT(MVFF, mvff);
  AVER(size > 0);
  AVER(SizeIsAligned(size, PoolAlignment(MVFF2Pool(mvff))));

  FreelistFlushToCBS(FreelistOfMVFF(mvff), CBSOfMVFF(mvff));

  findDelete = mvff->slotHigh ? FindDeleteHIGH : FindDeleteLOW;

  foundBlock =
    (mvff->firstFit ? CBSFindFirst : CBSFindLast)
    (&range, &oldRange, CBSOfMVFF(mvff), size, findDelete);

  if (!foundBlock) {
    /* Failed to find a block in the CBS: try the emergency free list
     * as well. */
    foundBlock =
      (mvff->firstFit ? FreelistFindFirst : FreelistFindLast)
      (&range, &oldRange, FreelistOfMVFF(mvff), size, findDelete);
  }

  if (foundBlock) {
    *baseReturn = RangeBase(&range);
    *limitReturn = RangeLimit(&range);
    mvff->free -= size;
  }

  return foundBlock;
}
Example #10
0
/* MVFFBufferFill -- Fill the buffer
 *
 * Fill it with the largest block we can find.
 */
static Res MVFFBufferFill(Addr *baseReturn, Addr *limitReturn,
                          Pool pool, Buffer buffer, Size size,
                          Bool withReservoirPermit)
{
  Res res;
  MVFF mvff;
  RangeStruct range, oldRange;
  Bool found;
  Seg seg = NULL;
  AVER(baseReturn != NULL);
  AVER(limitReturn != NULL);
  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, PoolAlignment(pool)));
  AVERT(Bool, withReservoirPermit);

  found = MVFFFindLargest(&range, &oldRange, mvff, size, FindDeleteENTIRE);
  if (!found) {
    /* Add a new segment to the free list and try again. */
    res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit);
    if (res != ResOK)
      return res;
    found = MVFFFindLargest(&range, &oldRange, mvff, size, FindDeleteENTIRE);
  }
  AVER(found);

  AVER(RangeSize(&range) >= size);
  mvff->free -= RangeSize(&range);

  *baseReturn = RangeBase(&range);
  *limitReturn = RangeLimit(&range);
  return ResOK;
}
Example #11
0
Res BufferReserve(Addr *pReturn, Buffer buffer, Size size)
{
  Addr next;

  AVER(pReturn != NULL);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, BufferPool(buffer)->alignment));
  AVER(BufferIsReady(buffer)); /* <design/check/#.common> */

  /* Is there enough room in the unallocated portion of the buffer to */
  /* satisfy the request?  If so, just increase the alloc marker and */
  /* return a pointer to the area below it. */
  next = AddrAdd(buffer->ap_s.alloc, size);
  if (next > (Addr)buffer->ap_s.alloc &&
      next <= (Addr)buffer->ap_s.limit) {
    buffer->ap_s.alloc = next;
    *pReturn = buffer->ap_s.init;
    return ResOK;
  }

  /* If the buffer can't accommodate the request, call "fill". */
  return BufferFill(pReturn, buffer, size);
}
Example #12
0
ATTRIBUTE_UNUSED
static Bool VMChunkCheck(VMChunk vmchunk)
{
  Chunk chunk;

  CHECKS(VMChunk, vmchunk);
  chunk = VMChunk2Chunk(vmchunk);
  CHECKD(Chunk, chunk);
  CHECKD(VM, VMChunkVM(vmchunk));
  CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize(VMChunkVM(vmchunk))));
  CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable);
  CHECKD(SparseArray, &vmchunk->pages);
  /* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck
     makes sure they're where they're expected to be (in the chunk). */
  CHECKL(chunk->base < (Addr)vmchunk->pages.mapped);
  CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <=
         vmchunk->overheadMappedLimit);
  CHECKL(chunk->base < (Addr)vmchunk->pages.pages);
  CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <=
         vmchunk->overheadMappedLimit);
  /* .improve.check-table: Could check the consistency of the tables. */
  
  return TRUE;
}
Example #13
0
static Res MVFFInit(Pool pool, ArgList args)
{
  Size extendBy = MVFF_EXTEND_BY_DEFAULT;
  Size avgSize = MVFF_AVG_SIZE_DEFAULT;
  Size align = MVFF_ALIGN_DEFAULT;
  Bool slotHigh = MVFF_SLOT_HIGH_DEFAULT;
  Bool arenaHigh = MVFF_ARENA_HIGH_DEFAULT;
  Bool firstFit = MVFF_FIRST_FIT_DEFAULT;
  MVFF mvff;
  Arena arena;
  Res res;
  void *p;
  ArgStruct arg;

  AVERT(Pool, pool);
  arena = PoolArena(pool);

  /* .arg: class-specific additional arguments; see */
  /* <design/poolmvff/#method.init> */
  /* .arg.check: we do the same checks here and in MVFFCheck */
  /* except for arenaHigh, which is stored only in the segPref. */
  
  if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY))
    extendBy = arg.val.size;
  
  if (ArgPick(&arg, args, MPS_KEY_MEAN_SIZE))
    avgSize = arg.val.size;
  
  if (ArgPick(&arg, args, MPS_KEY_ALIGN))
    align = arg.val.align;

  if (ArgPick(&arg, args, MPS_KEY_MVFF_SLOT_HIGH))
    slotHigh = arg.val.b;
  
  if (ArgPick(&arg, args, MPS_KEY_MVFF_ARENA_HIGH))
    arenaHigh = arg.val.b;
  
  if (ArgPick(&arg, args, MPS_KEY_MVFF_FIRST_FIT))
    firstFit = arg.val.b;

  AVER(extendBy > 0);           /* .arg.check */
  AVER(avgSize > 0);            /* .arg.check */
  AVER(avgSize <= extendBy);    /* .arg.check */
  AVER(SizeIsAligned(align, MPS_PF_ALIGN));
  AVERT(Bool, slotHigh);
  AVERT(Bool, arenaHigh);
  AVERT(Bool, firstFit);

  mvff = Pool2MVFF(pool);

  mvff->extendBy = extendBy;
  if (extendBy < ArenaAlign(arena))
    mvff->minSegSize = ArenaAlign(arena);
  else
    mvff->minSegSize = extendBy;
  mvff->avgSize = avgSize;
  pool->alignment = align;
  mvff->slotHigh = slotHigh;
  mvff->firstFit = firstFit;

  res = ControlAlloc(&p, arena, sizeof(SegPrefStruct), FALSE);
  if (res != ResOK)
    return res;

  mvff->segPref = (SegPref)p;
  SegPrefInit(mvff->segPref);
  SegPrefExpress(mvff->segPref, arenaHigh ? SegPrefHigh : SegPrefLow, NULL);

  mvff->total = 0;
  mvff->free = 0;

  res = FreelistInit(FreelistOfMVFF(mvff), align);
  if (res != ResOK)
    goto failInit;

  res = CBSInit(CBSOfMVFF(mvff), arena, (void *)mvff, align,
                /* fastFind */ TRUE, /* zoned */ FALSE, args);
  if (res != ResOK)
    goto failInit;

  mvff->sig = MVFFSig;
  AVERT(MVFF, mvff);
  EVENT8(PoolInitMVFF, pool, arena, extendBy, avgSize, align,
         BOOL(slotHigh), BOOL(arenaHigh), BOOL(firstFit));
  return ResOK;

failInit:
  ControlFree(arena, p, sizeof(SegPrefStruct));
  return res;
}
Example #14
0
Bool BufferTrip(Buffer buffer, Addr p, Size size)
{
  Pool pool;

  AVERT(Buffer, buffer);
  AVER(p != 0);
  AVER(size > 0);
  AVER(SizeIsAligned(size, buffer->alignment));

  /* The limit field should be zero, because that's how trip gets */
  /* called.  See .commit.trip. */
  AVER(buffer->ap_s.limit == 0);
  /* Of course we should be trapped. */
  AVER(BufferIsTrapped(buffer));

  /* The init and alloc fields should be equal at this point, because */
  /* the step .commit.update has happened. */
  AVER(buffer->ap_s.init == buffer->ap_s.alloc);

  /* The p parameter points at the base address of the allocated */
  /* block, the end of which should now coincide with the init and */
  /* alloc fields. */
  /* Note that we don't _really_ care about p too much.  We don't */
  /* do anything else with it apart from these checks. (in particular */
  /* it seems like the algorithms could be modified to cope with the */
  /* case of the object having been copied between Commit updating i */
  /* and testing limit) */
  AVER(AddrAdd(p, size) == buffer->ap_s.init);

  pool = BufferPool(buffer);

  AVER(PoolHasAddr(pool, p));

  /* .trip.unflip: If the flip occurred before commit set "init" */
  /* to "alloc" (see .commit.before) then the object is invalid */
  /* (won't've been scanned) so undo the allocation and fail commit. */
  /* Otherwise (see .commit.after) the object is valid (will've been */
  /* scanned) so commit can simply succeed. */
  if ((buffer->mode & BufferModeFLIPPED)
      && buffer->ap_s.init != buffer->initAtFlip) {
    /* Reset just enough state for Reserve/Fill to work. */
    /* The buffer is left trapped and we leave the untrapping */
    /* for the next reserve (which goes out of line to Fill */
    /* (.fill.unflip) because the buffer is still trapped) */
    buffer->ap_s.init = p;
    buffer->ap_s.alloc = p;
    return FALSE;
  }

  /* Emit event including class if logged */
  if (buffer->mode & BufferModeLOGGED) {
    Bool b;
    Format format;
    Addr clientClass;

    b = PoolFormat(&format, buffer->pool);
    if (b) {
      clientClass = format->klass(p);
    } else {
      clientClass = (Addr)0;
    }
    EVENT4(BufferCommit, buffer, p, size, clientClass);
  }
  return TRUE;
}
Example #15
0
Res SACCreate(SAC *sacReturn, Pool pool, Count classesCount,
              SACClasses classes)
{
  void *p;
  SAC sac;
  Res res;
  Index i, j;
  Index middleIndex;  /* index of the size in the middle */
  Size prevSize;
  unsigned totalFreq = 0;
  mps_sac_t esac;

  AVER(sacReturn != NULL);
  AVERT(Pool, pool);
  AVER(classesCount > 0);
  /* In this cache type, there is no upper limit on classesCount. */
  prevSize = sizeof(Addr) - 1; /* must large enough for freelist link */
  /* @@@@ It would be better to dynamically adjust the smallest class */
  /* to be large enough, but that gets complicated, if you have to */
  /* merge classes because of the adjustment. */
  for (i = 0; i < classesCount; ++i) {
    AVER(classes[i]._block_size > 0);
    AVER(SizeIsAligned(classes[i]._block_size, PoolAlignment(pool)));
    AVER(prevSize < classes[i]._block_size);
    prevSize = classes[i]._block_size;
    /* no restrictions on count */
    /* no restrictions on frequency */
  }

  /* Calculate frequency scale */
  for (i = 0; i < classesCount; ++i) {
    unsigned oldFreq = totalFreq;
    totalFreq += classes[i]._frequency;
    AVER(oldFreq <= totalFreq); /* check for overflow */
    UNUSED(oldFreq); /* <code/mpm.c#check.unused> */
  }

  /* Find middle one */
  totalFreq /= 2;
  for (i = 0; i < classesCount; ++i) {
    if (totalFreq < classes[i]._frequency) break;
    totalFreq -= classes[i]._frequency;
  }
  if (totalFreq <= classes[i]._frequency / 2)
    middleIndex = i;
  else
    middleIndex = i + 1; /* there must exist another class at i+1 */

  /* Allocate SAC */
  res = ControlAlloc(&p, PoolArena(pool), sacSize(middleIndex, classesCount),
                     FALSE);
  if(res != ResOK)
    goto failSACAlloc;
  sac = p;

  /* Move classes in place */
  /* It's important this matches SACFind. */
  esac = ExternalSACOfSAC(sac);
  for (j = middleIndex + 1, i = 0; j < classesCount; ++j, i += 2) {
    esac->_freelists[i]._size = classes[j]._block_size;
    esac->_freelists[i]._count = 0;
    esac->_freelists[i]._count_max = classes[j]._cached_count;
    esac->_freelists[i]._blocks = NULL;
  }
  esac->_freelists[i]._size = SizeMAX;
  esac->_freelists[i]._count = 0;
  esac->_freelists[i]._count_max = 0;
  esac->_freelists[i]._blocks = NULL;
  for (j = middleIndex, i = 1; j > 0; --j, i += 2) {
    esac->_freelists[i]._size = classes[j-1]._block_size;
    esac->_freelists[i]._count = 0;
    esac->_freelists[i]._count_max = classes[j]._cached_count;
    esac->_freelists[i]._blocks = NULL;
  }
  esac->_freelists[i]._size = 0;
  esac->_freelists[i]._count = 0;
  esac->_freelists[i]._count_max = classes[j]._cached_count;
  esac->_freelists[i]._blocks = NULL;

  /* finish init */
  esac->_trapped = FALSE;
  esac->_middle = classes[middleIndex]._block_size;
  sac->pool = pool;
  sac->classesCount = classesCount;
  sac->middleIndex = middleIndex;
  sac->sig = SACSig;
  AVERT(SAC, sac);
  *sacReturn = sac;
  return ResOK;

failSACAlloc:
  return res;
}