Res PoolAlloc(Addr *pReturn, Pool pool, Size size) { Res res; AVER(pReturn != NULL); AVERT(Pool, pool); AVER(size > 0); res = Method(Pool, pool, alloc)(pReturn, pool, size); if (res != ResOK) return res; /* Make sure that the allocated address was in the pool's memory. */ /* .hasaddr.critical: The PoolHasAddr check is expensive, and in */ /* allocation-bound programs this is on the critical path. */ AVER_CRITICAL(PoolHasAddr(pool, *pReturn)); /* All allocations should be aligned to the pool's alignment */ AVER_CRITICAL(AddrIsAligned(*pReturn, pool->alignment)); /* All PoolAllocs should advance the allocation clock, so we count */ /* it all in the fillMutatorSize field. */ ArenaGlobals(PoolArena(pool))->fillMutatorSize += size; EVENT3(PoolAlloc, pool, *pReturn, size); return ResOK; }
mps_res_t mps_arena_collect(mps_arena_t arena) { Res res; ArenaEnter(arena); res = ArenaCollect(ArenaGlobals(arena), TraceStartWhyCLIENTFULL_BLOCK); ArenaLeave(arena); return res; }
mps_res_t mps_arena_start_collect(mps_arena_t arena) { Res res; ArenaEnter(arena); res = ArenaStartCollect(ArenaGlobals(arena), TraceStartWhyCLIENTFULL_INCREMENTAL); ArenaLeave(arena); return res; }
void mps_arena_roots_walk(mps_arena_t mps_arena, mps_roots_stepper_t f, void *p, size_t s) { Arena arena = (Arena)mps_arena; Res res; ArenaEnter(arena); AVER(FUNCHECK(f)); /* p and s are arbitrary closures, hence can't be checked */ AVER(ArenaGlobals(arena)->clamped); /* .assume.parked */ AVER(arena->busyTraces == TraceSetEMPTY); /* .assume.parked */ res = ArenaRootsWalk(ArenaGlobals(arena), f, p, s); AVER(res == ResOK); ArenaLeave(arena); }
mps_bool_t mps_arena_step(mps_arena_t arena, double interval, double multiplier) { Bool b; ArenaEnter(arena); b = ArenaStep(ArenaGlobals(arena), interval, multiplier); ArenaLeave(arena); return b; }
void BufferAttach(Buffer buffer, Addr base, Addr limit, Addr init, Size size) { Size filled; AVERT(Buffer, buffer); AVER(BufferIsReset(buffer)); AVER(AddrAdd(base, size) <= limit); AVER(base <= init); AVER(init <= limit); /* Set up the buffer to point at the supplied region */ buffer->mode |= BufferModeATTACHED; buffer->base = base; buffer->ap_s.init = init; buffer->ap_s.alloc = AddrAdd(init, size); /* only set limit if not logged */ if ((buffer->mode & BufferModeLOGGED) == 0) { buffer->ap_s.limit = limit; } else { AVER(buffer->ap_s.limit == (Addr)0); } AVER(buffer->initAtFlip == (Addr)0); buffer->poolLimit = limit; filled = AddrOffset(init, limit); buffer->fillSize += filled; if (buffer->isMutator) { if (base != init) { /* see <design/buffer#.count.alloc.how> */ Size prealloc = AddrOffset(base, init); ArenaGlobals(buffer->arena)->allocMutatorSize -= prealloc; } ArenaGlobals(buffer->arena)->fillMutatorSize += filled; } else { ArenaGlobals(buffer->arena)->fillInternalSize += filled; } /* run any class-specific attachment method */ Method(Buffer, buffer, attach)(buffer, base, limit, init, size); AVERT(Buffer, buffer); EVENT4(BufferFill, buffer, size, base, filled); }
void BufferDetach(Buffer buffer, Pool pool) { AVERT(Buffer, buffer); AVER(BufferIsReady(buffer)); if (!BufferIsReset(buffer)) { Addr init, limit; Size spare; buffer->mode |= BufferModeTRANSITION; /* Ask the owning pool to do whatever it needs to before the */ /* buffer is detached (e.g. copy buffer state into pool state). */ Method(Pool, pool, bufferEmpty)(pool, buffer); /* run any class-specific detachment method */ Method(Buffer, buffer, detach)(buffer); init = BufferGetInit(buffer); limit = BufferLimit(buffer); spare = AddrOffset(init, limit); buffer->emptySize += spare; if (buffer->isMutator) { ArenaGlobals(buffer->arena)->emptyMutatorSize += spare; ArenaGlobals(buffer->arena)->allocMutatorSize += AddrOffset(buffer->base, init); } else { ArenaGlobals(buffer->arena)->emptyInternalSize += spare; } /* Reset the buffer. */ buffer->base = (Addr)0; buffer->initAtFlip = (Addr)0; buffer->ap_s.init = (mps_addr_t)0; buffer->ap_s.alloc = (mps_addr_t)0; buffer->ap_s.limit = (mps_addr_t)0; buffer->poolLimit = (Addr)0; buffer->mode &= ~(BufferModeATTACHED|BufferModeFLIPPED|BufferModeTRANSITION); EVENT2(BufferEmpty, buffer, spare); } }
static Res BufferAbsInit(Buffer buffer, Pool pool, Bool isMutator, ArgList args) { Arena arena; AVER(buffer != NULL); AVERT(Pool, pool); AVER(BoolCheck(isMutator)); AVERT(ArgList, args); /* Superclass init */ InstInit(CouldBeA(Inst, buffer)); arena = PoolArena(pool); /* Initialize the buffer. See <code/mpmst.h> for a definition of the structure. sig and serial comes later .init.sig-serial */ buffer->arena = arena; buffer->pool = pool; RingInit(&buffer->poolRing); buffer->isMutator = isMutator; if (ArenaGlobals(arena)->bufferLogging) { buffer->mode = BufferModeLOGGED; } else { buffer->mode = 0; } buffer->fillSize = 0.0; buffer->emptySize = 0.0; buffer->alignment = PoolAlignment(pool); buffer->base = (Addr)0; buffer->initAtFlip = (Addr)0; /* In the next three assignments we really mean zero, not NULL, because the bit pattern is compared. It's pretty unlikely we'll encounter a platform where this makes a difference. */ buffer->ap_s.init = (mps_addr_t)0; buffer->ap_s.alloc = (mps_addr_t)0; buffer->ap_s.limit = (mps_addr_t)0; buffer->poolLimit = (Addr)0; buffer->rampCount = 0; /* .init.sig-serial: Now the vanilla stuff is initialized, sign the buffer and give it a serial number. It can then be safely checked in subclass methods. */ buffer->serial = pool->bufferSerial; /* .trans.mod */ ++pool->bufferSerial; SetClassOfPoly(buffer, CLASS(Buffer)); buffer->sig = BufferSig; AVERT(Buffer, buffer); /* Attach the initialized buffer to the pool. */ RingAppend(&pool->bufferRing, &buffer->poolRing); EVENT3(BufferInit, buffer, pool, BOOLOF(buffer->isMutator)); return ResOK; }
static void test_step(mps_arena_t arena, double multiplier) { mps_bool_t res; double t1 = my_clock(); res = mps_arena_step(arena, 0.1, multiplier); cdie(ArenaGlobals(arena)->clamped, "arena was unclamped"); t1 = time_since(t1); if (res) { if (t1 > max_step_time) max_step_time = t1; step_time += t1; ++ steps; } else { if (t1 > max_no_step_time) max_no_step_time = t1; no_step_time += t1; ++ no_steps; } }
Bool PoolCheck(Pool pool) { PoolClass klass; /* Checks ordered as per struct decl in <code/mpmst.h#pool> */ CHECKS(Pool, pool); CHECKC(AbstractPool, pool); /* Break modularity for checking efficiency */ CHECKL(pool->serial < ArenaGlobals(pool->arena)->poolSerial); klass = ClassOfPoly(Pool, pool); CHECKD(PoolClass, klass); CHECKU(Arena, pool->arena); CHECKD_NOSIG(Ring, &pool->arenaRing); CHECKD_NOSIG(Ring, &pool->bufferRing); /* Cannot check pool->bufferSerial */ CHECKD_NOSIG(Ring, &pool->segRing); CHECKL(AlignCheck(pool->alignment)); /* Normally pool->format iff PoolHasAttr(pool, AttrFMT), but during pool initialization the class may not yet be set. */ CHECKL(!PoolHasAttr(pool, AttrFMT) || pool->format != NULL); return TRUE; }
void mps_arena_unsafe_restore_protection(mps_arena_t arena) { ArenaEnter(arena); ArenaRestoreProtection(ArenaGlobals(arena)); ArenaLeave(arena); }
/* Null implementations of remember and restore */ void mps_arena_unsafe_expose_remember_protection(mps_arena_t arena) { ArenaEnter(arena); ArenaExposeRemember(ArenaGlobals(arena), 1); ArenaLeave(arena); }
void mps_arena_expose(mps_arena_t arena) { ArenaEnter(arena); ArenaExposeRemember(ArenaGlobals(arena), 0); ArenaLeave(arena); }
void mps_arena_park(mps_arena_t arena) { ArenaEnter(arena); ArenaPark(ArenaGlobals(arena)); ArenaLeave(arena); }
void mps_arena_release(mps_arena_t arena) { ArenaEnter(arena); ArenaRelease(ArenaGlobals(arena)); ArenaLeave(arena); }
void mps_arena_clamp(mps_arena_t arena) { ArenaEnter(arena); ArenaClamp(ArenaGlobals(arena)); ArenaLeave(arena); }