Bool MFSCheck(MFS mfs) { Arena arena; CHECKS(MFS, mfs); CHECKC(MFSPool, mfs); CHECKD(Pool, MFSPool(mfs)); CHECKC(MFSPool, mfs); CHECKL(mfs->unitSize >= UNIT_MIN); CHECKL(mfs->extendBy >= UNIT_MIN); CHECKL(BoolCheck(mfs->extendSelf)); arena = PoolArena(MFSPool(mfs)); CHECKL(SizeIsArenaGrains(mfs->extendBy, arena)); CHECKL(SizeAlignUp(mfs->unroundedUnitSize, PoolAlignment(MFSPool(mfs))) == mfs->unitSize); if(mfs->tractList != NULL) { CHECKD_NOSIG(Tract, mfs->tractList); } CHECKL(mfs->free <= mfs->total); CHECKL((mfs->total - mfs->free) % mfs->unitSize == 0); return TRUE; }
int main(void) { const char *filedir = "dirfile"; const char *format = "dirfile/format"; int error, r = 0; #ifdef GD_NO_C99_API const double cdividend[2] = {33.3, 44.4}; #else const double complex cdividend = 33.3 + _Complex_I * 44.4; #endif gd_entry_t e; DIRFILE *D; rmdirfile(); D = gd_open(filedir, GD_RDWR | GD_CREAT | GD_VERBOSE); gd_add_crecip(D, "new", "in", cdividend, 0); error = gd_error(D); /* check */ gd_entry(D, "new", &e); if (gd_error(D)) r = 1; else { CHECKI(e.field_type, GD_RECIP_ENTRY); CHECKS(e.in_fields[0], "in"); CHECKI(e.comp_scal, 1); CHECKC(e.EN(recip,cdividend), cdividend); CHECKI(e.fragment_index, 0); gd_free_entry_strings(&e); } gd_close(D); unlink(format); rmdir(filedir); CHECKI(error, GD_E_OK); return r; }
Bool PoolCheck(Pool pool) { PoolClass klass; /* Checks ordered as per struct decl in <code/mpmst.h#pool> */ CHECKS(Pool, pool); CHECKC(AbstractPool, pool); /* Break modularity for checking efficiency */ CHECKL(pool->serial < ArenaGlobals(pool->arena)->poolSerial); klass = ClassOfPoly(Pool, pool); CHECKD(PoolClass, klass); CHECKU(Arena, pool->arena); CHECKD_NOSIG(Ring, &pool->arenaRing); CHECKD_NOSIG(Ring, &pool->bufferRing); /* Cannot check pool->bufferSerial */ CHECKD_NOSIG(Ring, &pool->segRing); CHECKL(AlignCheck(pool->alignment)); /* Normally pool->format iff PoolHasAttr(pool, AttrFMT), but during pool initialization the class may not yet be set. */ CHECKL(!PoolHasAttr(pool, AttrFMT) || pool->format != NULL); return TRUE; }
Bool BufferCheck(Buffer buffer) { CHECKS(Buffer, buffer); CHECKC(Buffer, buffer); CHECKL(buffer->serial < buffer->pool->bufferSerial); /* .trans.mod */ CHECKU(Arena, buffer->arena); CHECKU(Pool, buffer->pool); CHECKL(buffer->arena == buffer->pool->arena); CHECKD_NOSIG(Ring, &buffer->poolRing); CHECKL(BoolCheck(buffer->isMutator)); CHECKL(buffer->fillSize >= 0.0); CHECKL(buffer->emptySize >= 0.0); CHECKL(buffer->emptySize <= buffer->fillSize); CHECKL(buffer->alignment == buffer->pool->alignment); CHECKL(AlignCheck(buffer->alignment)); /* If any of the buffer's fields indicate that it is reset, make */ /* sure it is really reset. Otherwise, check various properties */ /* of the non-reset fields. */ if (buffer->mode & BufferModeTRANSITION) { /* nothing to check */ } else if ((buffer->mode & BufferModeATTACHED) == 0 || buffer->base == (Addr)0 || buffer->ap_s.init == (Addr)0 || buffer->ap_s.alloc == (Addr)0 || buffer->poolLimit == (Addr)0) { CHECKL((buffer->mode & BufferModeATTACHED) == 0); CHECKL(buffer->base == (Addr)0); CHECKL(buffer->initAtFlip == (Addr)0); CHECKL(buffer->ap_s.init == (Addr)0); CHECKL(buffer->ap_s.alloc == (Addr)0); CHECKL(buffer->ap_s.limit == (Addr)0); /* Nothing reliable to check for lightweight frame state */ CHECKL(buffer->poolLimit == (Addr)0); } else { /* The buffer is attached to a region of memory. */ /* Check consistency. */ CHECKL(buffer->mode & BufferModeATTACHED); /* These fields should obey the ordering */ /* base <= init <= alloc <= poolLimit */ CHECKL((mps_addr_t)buffer->base <= buffer->ap_s.init); CHECKL(buffer->ap_s.init <= buffer->ap_s.alloc); CHECKL(buffer->ap_s.alloc <= (mps_addr_t)buffer->poolLimit); /* Check that the fields are aligned to the buffer alignment. */ CHECKL(AddrIsAligned(buffer->base, buffer->alignment)); CHECKL(AddrIsAligned(buffer->initAtFlip, buffer->alignment)); CHECKL(AddrIsAligned(buffer->ap_s.init, buffer->alignment)); CHECKL(AddrIsAligned(buffer->ap_s.alloc, buffer->alignment)); CHECKL(AddrIsAligned(buffer->ap_s.limit, buffer->alignment)); CHECKL(AddrIsAligned(buffer->poolLimit, buffer->alignment)); /* If the buffer isn't trapped then "limit" should be the limit */ /* set by the owning pool. Otherwise, "init" is either at the */ /* same place it was at flip (.commit.before) or has been set */ /* to "alloc" (.commit.after). Also, when the buffer is */ /* flipped, initAtFlip should hold the init at flip, which is */ /* between the base and current init. Otherwise, initAtFlip */ /* is kept at zero to avoid misuse (see */ /* request.dylan.170429.sol.zero_). */ /* .. _request.dylan.170429.sol.zero: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170429 */ if (BufferIsTrapped(buffer)) { /* .check.use-trapped: This checking function uses BufferIsTrapped, */ /* So BufferIsTrapped can't do checking as that would cause an */ /* infinite loop. */ if (buffer->mode & BufferModeFLIPPED) { CHECKL(buffer->ap_s.init == buffer->initAtFlip || buffer->ap_s.init == buffer->ap_s.alloc); CHECKL(buffer->base <= buffer->initAtFlip); CHECKL(buffer->initAtFlip <= (Addr)buffer->ap_s.init); } /* Nothing special to check in the logged mode. */ } else { CHECKL(buffer->initAtFlip == (Addr)0); } } return TRUE; }