예제 #1
0
파일: pool.c 프로젝트: bhanug/mps
Res PoolAlloc(Addr *pReturn, Pool pool, Size size)
{
  Res res;

  AVER(pReturn != NULL);
  AVERT(Pool, pool);
  AVER(size > 0);

  res = Method(Pool, pool, alloc)(pReturn, pool, size);
  if (res != ResOK)
    return res;
  /* Make sure that the allocated address was in the pool's memory. */
  /* .hasaddr.critical: The PoolHasAddr check is expensive, and in */
  /* allocation-bound programs this is on the critical path. */
  AVER_CRITICAL(PoolHasAddr(pool, *pReturn));
  /* All allocations should be aligned to the pool's alignment */
  AVER_CRITICAL(AddrIsAligned(*pReturn, pool->alignment));

  /* All PoolAllocs should advance the allocation clock, so we count */
  /* it all in the fillMutatorSize field. */
  ArenaGlobals(PoolArena(pool))->fillMutatorSize += size;

  EVENT3(PoolAlloc, pool, *pReturn, size);

  return ResOK;
}
예제 #2
0
파일: poolmfs.c 프로젝트: CarterTsai/clasp
void MFSExtend(Pool pool, Addr base, Size size)
{
  MFS mfs;
  Tract tract;
  Word i, unitsPerExtent;
  Size unitSize;
  Header header = NULL;

  AVERT(Pool, pool);
  mfs = PoolPoolMFS(pool);
  AVERT(MFS, mfs);
  AVER(size == mfs->extendBy);

  /* Ensure that the memory we're adding belongs to this pool.  This is
     automatic if it was allocated using ArenaAlloc, but if the memory is
     being inserted from elsewhere then it must have been set up correctly. */
  AVER(PoolHasAddr(pool, base));
  
  /* .tract.chain: chain first tracts through TractP(tract) */
  tract = TractOfBaseAddr(PoolArena(pool), base);

  AVER(TractPool(tract) == pool);

  TractSetP(tract, (void *)mfs->tractList);
  mfs->tractList = tract;

  /* Update accounting */
  mfs->total += size;
  mfs->free += size;

  /* Sew together all the new empty units in the region, working down */
  /* from the top so that they are in ascending order of address on the */
  /* free list. */

  unitSize = mfs->unitSize;
  unitsPerExtent = size/unitSize;
  AVER(unitsPerExtent > 0);

#define SUB(b, s, i)    ((Header)AddrAdd(b, (s)*(i)))

  for(i = 0; i < unitsPerExtent; ++i)
  {
    header = SUB(base, unitSize, unitsPerExtent-i - 1);
    AVER(AddrIsAligned(header, pool->alignment));
    AVER(AddrAdd((Addr)header, unitSize) <= AddrAdd(base, size));
    header->next = mfs->freeList;
    mfs->freeList = header;
  }

#undef SUB
}
예제 #3
0
void SACEmpty(SAC sac, Addr p, Size size)
{
  Index i;
  Size blockSize;
  mps_sac_t esac;
  
  AVERT(SAC, sac);
  AVER(p != NULL);
  AVER(PoolHasAddr(sac->pool, p));
  AVER(size > 0);
  esac = ExternalSACOfSAC(sac);

  sacFind(&i, &blockSize, sac, size);
  /* Check it's full (in the future, there will be other cases). */
  AVER(esac->_freelists[i]._count
       == esac->_freelists[i]._count_max);

  /* Adjust size for the overlarge class. */
  if (blockSize == SizeMAX)
    /* see .align */
    blockSize = SizeAlignUp(size, PoolAlignment(sac->pool));
  if (esac->_freelists[i]._count_max > 0) {
    Count blockCount;

    /* Flush 2/3 of the cache for this class. */
    /* Computed as count - count/3, so that the rounding works out right. */
    blockCount = esac->_freelists[i]._count;
    blockCount -= esac->_freelists[i]._count / 3;
    sacClassFlush(sac, i, blockSize, (blockCount > 0) ? blockCount : 1);
    /* Leave the current one in the cache. */
    esac->_freelists[i]._count += 1;
    /* @@@@ ignoring shields for now */
    *ADDR_PTR(Addr, p) = esac->_freelists[i]._blocks;
    esac->_freelists[i]._blocks = p;
  } else {
    /* Free even the current one. */
    PoolFree(sac->pool, p, blockSize);
  }
}
예제 #4
0
파일: buffer.c 프로젝트: Ravenbrook/mps
Bool BufferTrip(Buffer buffer, Addr p, Size size)
{
  Pool pool;

  AVERT(Buffer, buffer);
  AVER(p != 0);
  AVER(size > 0);
  AVER(SizeIsAligned(size, buffer->alignment));

  /* The limit field should be zero, because that's how trip gets */
  /* called.  See .commit.trip. */
  AVER(buffer->ap_s.limit == 0);
  /* Of course we should be trapped. */
  AVER(BufferIsTrapped(buffer));

  /* The init and alloc fields should be equal at this point, because */
  /* the step .commit.update has happened. */
  AVER(buffer->ap_s.init == buffer->ap_s.alloc);

  /* The p parameter points at the base address of the allocated */
  /* block, the end of which should now coincide with the init and */
  /* alloc fields. */
  /* Note that we don't _really_ care about p too much.  We don't */
  /* do anything else with it apart from these checks. (in particular */
  /* it seems like the algorithms could be modified to cope with the */
  /* case of the object having been copied between Commit updating i */
  /* and testing limit) */
  AVER(AddrAdd(p, size) == buffer->ap_s.init);

  pool = BufferPool(buffer);

  AVER(PoolHasAddr(pool, p));

  /* .trip.unflip: If the flip occurred before commit set "init" */
  /* to "alloc" (see .commit.before) then the object is invalid */
  /* (won't've been scanned) so undo the allocation and fail commit. */
  /* Otherwise (see .commit.after) the object is valid (will've been */
  /* scanned) so commit can simply succeed. */
  if ((buffer->mode & BufferModeFLIPPED)
      && buffer->ap_s.init != buffer->initAtFlip) {
    /* Reset just enough state for Reserve/Fill to work. */
    /* The buffer is left trapped and we leave the untrapping */
    /* for the next reserve (which goes out of line to Fill */
    /* (.fill.unflip) because the buffer is still trapped) */
    buffer->ap_s.init = p;
    buffer->ap_s.alloc = p;
    return FALSE;
  }

  /* Emit event including class if logged */
  if (buffer->mode & BufferModeLOGGED) {
    Bool b;
    Format format;
    Addr clientClass;

    b = PoolFormat(&format, buffer->pool);
    if (b) {
      clientClass = format->klass(p);
    } else {
      clientClass = (Addr)0;
    }
    EVENT4(BufferCommit, buffer, p, size, clientClass);
  }
  return TRUE;
}