Bits CPU_Core_Normal_Run(void) { while (CPU_Cycles-- > 0) { LOADIP; core.opcode_index = cpu.code.big*0x200; core.prefixes = cpu.code.big; core.ea_table = &EATable[cpu.code.big*256]; BaseDS = SegBase(ds); BaseSS = SegBase(ss); core.base_val_ds = ds; restart_opcode: // lastOpcode = core.opcode_index+Fetchb(); // switch (lastOpcode) switch (core.opcode_index+Fetchb()) { #include "core_normal/prefix_none.h" #include "core_normal/prefix_0f.h" #include "core_normal/prefix_66.h" #include "core_normal/prefix_66_0f.h" default: illegal_opcode: CPU_Exception(6, 0); continue; } SAVEIP; } FillFlags(); return CBRET_NONE; decode_end: SAVEIP; FillFlags(); return CBRET_NONE; }
/* MVFFAddSeg -- Allocates a new segment from the arena * * Allocates a new segment from the arena (with the given * withReservoirPermit flag) of at least the specified size. The * specified size should be pool-aligned. Adds it to the free list. */ static Res MVFFAddSeg(Seg *segReturn, MVFF mvff, Size size, Bool withReservoirPermit) { Pool pool; Arena arena; Size segSize; Seg seg; Res res; Align align; Addr base, limit; AVERT(MVFF, mvff); AVER(size > 0); AVERT(Bool, withReservoirPermit); pool = MVFF2Pool(mvff); arena = PoolArena(pool); align = ArenaAlign(arena); AVER(SizeIsAligned(size, PoolAlignment(pool))); /* Use extendBy unless it's too small (see */ /* <design/poolmvff/#design.seg-size>). */ if (size <= mvff->extendBy) segSize = mvff->extendBy; else segSize = size; segSize = SizeAlignUp(segSize, align); res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool, withReservoirPermit, argsNone); if (res != ResOK) { /* try again for a seg just large enough for object */ /* see <design/poolmvff/#design.seg-fail> */ segSize = SizeAlignUp(size, align); res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool, withReservoirPermit, argsNone); if (res != ResOK) { return res; } } mvff->total += segSize; base = SegBase(seg); limit = AddrAdd(base, segSize); DebugPoolFreeSplat(pool, base, limit); res = MVFFAddToFreeList(&base, &limit, mvff); AVER(res == ResOK); AVER(base <= SegBase(seg)); if (mvff->minSegSize > segSize) mvff->minSegSize = segSize; /* Don't call MVFFFreeSegs; that would be silly. */ *segReturn = seg; return ResOK; }
static Bool AMSSegRegionIsFree(Seg seg, Addr base, Addr limit) { AMSSeg amsseg = MustBeA(AMSSeg, seg); Index baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); if (amsseg->allocTableInUse) { Index limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); return BTIsResRange(amsseg->allocTable, baseIndex, limitIndex); } else { return amsseg->firstFree <= baseIndex; } }
static Compare shieldQueueEntryCompare(void *left, void *right, void *closure) { Seg segA = left, segB = right; /* These checks are not critical in a hot build, but slow down cool builds quite a bit, so just check the signatures. */ AVER(TESTT(Seg, segA)); AVER(TESTT(Seg, segB)); UNUSED(closure); return shieldAddrCompare(SegBase(segA), SegBase(segB)); }
Bits CPU_Core_Simple_Run(void) { while (CPU_Cycles-->0) { LOADIP; core.opcode_index=cpu.code.big*0x200; core.prefixes=cpu.code.big; core.ea_table=&EATable[cpu.code.big*256]; BaseDS=SegBase(ds); BaseSS=SegBase(ss); core.base_val_ds=ds; #if C_DEBUG #if C_HEAVY_DEBUG if (DEBUG_HeavyIsBreakpoint()) { FillFlags(); return debugCallback; }; #endif #endif cycle_count++; restart_opcode: switch (core.opcode_index+Fetchb()) { #include "core_normal/prefix_none.h" #include "core_normal/prefix_0f.h" #include "core_normal/prefix_66.h" #include "core_normal/prefix_66_0f.h" default: illegal_opcode: #if C_DEBUG { Bitu len=(GETIP-reg_eip); LOADIP; if (len>16) len=16; char tempcode[16*2+1];char * writecode=tempcode; for (;len>0;len--) { // sprintf(writecode,"%X",mem_readb(core.cseip++)); writecode+=2; } LOG(LOG_CPU,LOG_NORMAL)("Illegal/Unhandled opcode %s",tempcode); } #endif CPU_Exception(6,0); continue; } SAVEIP; } FillFlags(); return CBRET_NONE; decode_end: SAVEIP; FillFlags(); return CBRET_NONE; }
/* MVFFFreeSegs -- Free segments from given range * * Given a free range, attempts to find entire segments within * it, and returns them to the arena, updating total size counter. * * This is usually called immediately after MVFFAddToFreeList. * It is not combined with MVFFAddToFreeList because the latter * is also called when new segments are added under MVFFAlloc. */ static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit) { Seg seg = NULL; /* suppress "may be used uninitialized" */ Arena arena; Bool b; Addr segLimit; /* limit of the current segment when iterating */ Addr segBase; /* base of the current segment when iterating */ Res res; AVERT(MVFF, mvff); AVER(base < limit); /* Could profitably AVER that the given range is free, */ /* but the CBS doesn't provide that facility. */ if (AddrOffset(base, limit) < mvff->minSegSize) return; /* not large enough for entire segments */ arena = PoolArena(MVFF2Pool(mvff)); b = SegOfAddr(&seg, arena, base); AVER(b); segBase = SegBase(seg); segLimit = SegLimit(seg); while(segLimit <= limit) { /* segment ends in range */ if (segBase >= base) { /* segment starts in range */ /* Must remove from free list first, in case free list */ /* is using inline data structures. */ res = CBSDelete(CBSOfMVFF(mvff), segBase, segLimit); AVER(res == ResOK); mvff->free -= AddrOffset(segBase, segLimit); mvff->total -= AddrOffset(segBase, segLimit); SegFree(seg); } /* Avoid calling SegNext if the next segment would fail */ /* the loop test, mainly because there might not be a */ /* next segment. */ if (segLimit == limit) /* segment ends at end of range */ break; b = SegNext(&seg, arena, segBase); AVER(b); segBase = SegBase(seg); segLimit = SegLimit(seg); } return; }
static void segBufAttach(Buffer buffer, Addr base, Addr limit, Addr init, Size size) { SegBuf segbuf = MustBeA(SegBuf, buffer); Seg seg = NULL; /* suppress "may be used uninitialized" */ Arena arena; Bool found; /* Other parameters are consistency checked in BufferAttach */ UNUSED(init); UNUSED(size); arena = BufferArena(buffer); found = SegOfAddr(&seg, arena, base); AVER(found); AVER(segbuf->seg == NULL); AVER(!SegHasBuffer(seg)); AVER(SegBase(seg) <= base); AVER(limit <= SegLimit(seg)); /* attach the buffer to the segment */ SegSetBuffer(seg, buffer); segbuf->seg = seg; AVERT(SegBuf, segbuf); }
void ArenaRestoreProtection(Globals globals) { Ring node, next; Arena arena; arena = GlobalsArena(globals); RING_FOR(node, GlobalsRememberedSummaryRing(globals), next) { RememberedSummaryBlock block = RING_ELT(RememberedSummaryBlock, globalRing, node); size_t i; for(i = 0; i < RememberedSummaryBLOCK; ++ i) { Seg seg; Bool b; if(block->the[i].base == (Addr)0) { AVER(block->the[i].summary == RefSetUNIV); continue; } b = SegOfAddr(&seg, arena, block->the[i].base); if(b && SegBase(seg) == block->the[i].base) { AVER(IsA(GCSeg, seg)); SegSetSummary(seg, block->the[i].summary); } else { /* Either seg has gone or moved, both of which are */ /* client errors. */ NOTREACHED; } } }
static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size, Bool withReservoirPermit, DebugInfo info) { Res res; MVFF mvff; Addr base, limit; Bool foundBlock; AVERT(Pool, pool); mvff = Pool2MVFF(pool); AVERT(MVFF, mvff); AVER(aReturn != NULL); AVER(size > 0); AVER(BoolCheck(withReservoirPermit)); UNUSED(info); size = SizeAlignUp(size, PoolAlignment(pool)); foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size); if (!foundBlock) { Seg seg; res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit); if (res != ResOK) return res; foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size); /* We know that the found range must intersect the new segment. */ /* In particular, it doesn't necessarily lie entirely within it. */ /* The next three AVERs test for intersection of two intervals. */ AVER(base >= SegBase(seg) || limit <= SegLimit(seg)); AVER(base < SegLimit(seg)); AVER(SegBase(seg) < limit); /* We also know that the found range is no larger than the segment. */ AVER(SegSize(seg) >= AddrOffset(base, limit)); } AVER(foundBlock); AVER(AddrOffset(base, limit) == size); *aReturn = base; return ResOK; }
static void shieldSync(Shield shield, Seg seg) { SHIELD_AVERT_CRITICAL(Seg, seg); if (!SegIsSynced(seg)) { shieldSetPM(shield, seg, SegSM(seg)); ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); } }
Res PoolAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr) { AVER(pReturn != NULL); AVERT(Pool, pool); AVERT(Seg, seg); AVER(pool == SegPool(seg)); AVER(SegBase(seg) <= addr); AVER(addr < SegLimit(seg)); return Method(Pool, pool, addrObject)(pReturn, pool, seg, addr); }
void BufferReassignSeg(Buffer buffer, Seg seg) { AVERT(Buffer, buffer); AVERT(Seg, seg); AVER(!BufferIsReset(buffer)); AVER(BufferBase(buffer) >= SegBase(seg)); AVER(BufferLimit(buffer) <= SegLimit(seg)); AVER(BufferPool(buffer) == SegPool(seg)); Method(Buffer, buffer, reassignSeg)(buffer, seg); }
static void shieldSync(Arena arena, Seg seg) { AVERT(Arena, arena); AVERT(Seg, seg); if (SegPM(seg) != SegSM(seg)) { ProtSet(SegBase(seg), SegLimit(seg), SegSM(seg)); SegSetPM(seg, SegSM(seg)); /* inv.prot.shield */ } }
static void shieldProtLower(Shield shield, Seg seg, AccessSet mode) { /* <design/trace/#fix.noaver> */ SHIELD_AVERT_CRITICAL(Seg, seg); AVERT_CRITICAL(AccessSet, mode); if (BS_INTER(SegPM(seg), mode) != AccessSetEMPTY) { shieldSetPM(shield, seg, BS_DIFF(SegPM(seg), mode)); ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); } }
Res PoolAccess(Pool pool, Seg seg, Addr addr, AccessSet mode, MutatorFaultContext context) { AVERT(Pool, pool); AVERT(Seg, seg); AVER(SegBase(seg) <= addr); AVER(addr < SegLimit(seg)); AVERT(AccessSet, mode); /* Can't check MutatorFaultContext as there is no check method */ return Method(Pool, pool, access)(pool, seg, addr, mode, context); }
/* This ensures actual prot mode does not include mode */ static void protLower(Arena arena, Seg seg, AccessSet mode) { /* <design/trace/#fix.noaver> */ AVERT_CRITICAL(Arena, arena); UNUSED(arena); AVERT_CRITICAL(Seg, seg); if (SegPM(seg) & mode) { SegSetPM(seg, SegPM(seg) & ~mode); ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); } }
static void shieldFlushEntries(Shield shield) { Addr base = NULL, limit; AccessSet mode; Index i; if (shield->length == 0) { AVER(shield->queue == NULL); return; } QuickSort((void *)shield->queue, shield->limit, shieldQueueEntryCompare, UNUSED_POINTER, &shield->sortStruct); mode = AccessSetEMPTY; limit = NULL; for (i = 0; i < shield->limit; ++i) { Seg seg = shieldDequeue(shield, i); if (!SegIsSynced(seg)) { shieldSetPM(shield, seg, SegSM(seg)); if (SegSM(seg) != mode || SegBase(seg) != limit) { if (base != NULL) { AVER(base < limit); ProtSet(base, limit, mode); } base = SegBase(seg); mode = SegSM(seg); } limit = SegLimit(seg); } } if (base != NULL) { AVER(base < limit); ProtSet(base, limit, mode); } shieldQueueReset(shield); }
/* AMSUnallocateRange -- set a range to be unallocated * * Used as a means of overriding the behaviour of AMSBufferFill. * The code is similar to amsSegBufferEmpty. */ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit) { AMSSeg amsseg; Index baseIndex, limitIndex; Count unallocatedGrains; /* parameters checked by caller */ amsseg = Seg2AMSSeg(seg); baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); if (amsseg->allocTableInUse) { /* check that it's allocated */ AVER(BTIsSetRange(amsseg->allocTable, baseIndex, limitIndex)); BTResRange(amsseg->allocTable, baseIndex, limitIndex); } else { /* check that it's allocated */ AVER(limitIndex <= amsseg->firstFree); if (limitIndex == amsseg->firstFree) /* is it at the end? */ { amsseg->firstFree = baseIndex; } else { /* start using allocTable */ amsseg->allocTableInUse = TRUE; BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); if (amsseg->firstFree < amsseg->grains) BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); BTResRange(amsseg->allocTable, baseIndex, limitIndex); } } unallocatedGrains = limitIndex - baseIndex; AVER(amsseg->bufferedGrains >= unallocatedGrains); amsseg->freeGrains += unallocatedGrains; amsseg->bufferedGrains -= unallocatedGrains; PoolGenAccountForEmpty(ams->pgen, 0, PoolGrainsSize(AMSPool(ams), unallocatedGrains), FALSE); }
/* AMSAllocateRange -- set a range to be allocated * * Used as a means of overriding the behaviour of AMSBufferFill. * The code is similar to AMSUnallocateRange. */ static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit) { AMSSeg amsseg; Index baseIndex, limitIndex; Count allocatedGrains; /* parameters checked by caller */ amsseg = Seg2AMSSeg(seg); baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); if (amsseg->allocTableInUse) { /* check that it's not allocated */ AVER(BTIsResRange(amsseg->allocTable, baseIndex, limitIndex)); BTSetRange(amsseg->allocTable, baseIndex, limitIndex); } else { /* check that it's not allocated */ AVER(baseIndex >= amsseg->firstFree); if (baseIndex == amsseg->firstFree) /* is it at the end? */ { amsseg->firstFree = limitIndex; } else { /* start using allocTable */ amsseg->allocTableInUse = TRUE; BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); if (amsseg->firstFree < amsseg->grains) BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); BTSetRange(amsseg->allocTable, baseIndex, limitIndex); } } allocatedGrains = limitIndex - baseIndex; AVER(amsseg->freeGrains >= allocatedGrains); amsseg->freeGrains -= allocatedGrains; amsseg->bufferedGrains += allocatedGrains; PoolGenAccountForFill(ams->pgen, AddrOffset(base, limit)); }
Res SegAlloc(Seg *segReturn, SegClass klass, LocusPref pref, Size size, Pool pool, ArgList args) { Res res; Arena arena; Seg seg; Addr base; void *p; AVER(segReturn != NULL); AVERT(SegClass, klass); AVERT(LocusPref, pref); AVER(size > (Size)0); AVERT(Pool, pool); arena = PoolArena(pool); AVERT(Arena, arena); AVER(SizeIsArenaGrains(size, arena)); /* allocate the memory from the arena */ res = ArenaAlloc(&base, pref, size, pool); if (res != ResOK) goto failArena; /* allocate the segment object from the control pool */ res = ControlAlloc(&p, arena, klass->size); if (res != ResOK) goto failControl; seg = p; res = SegInit(seg, klass, pool, base, size, args); if (res != ResOK) goto failInit; EVENT5(SegAlloc, arena, seg, SegBase(seg), size, pool); *segReturn = seg; return ResOK; failInit: ControlFree(arena, seg, klass->size); failControl: ArenaFree(base, size, pool); failArena: EVENT3(SegAllocFail, arena, size, pool); return res; }
void SegFree(Seg seg) { Arena arena; Pool pool; Addr base; Size size, structSize; AVERT(Seg, seg); pool = SegPool(seg); AVERT(Pool, pool); arena = PoolArena(pool); AVERT(Arena, arena); base = SegBase(seg); size = SegSize(seg); structSize = ClassOfPoly(Seg, seg)->size; SegFinish(seg); ControlFree(arena, seg, structSize); ArenaFree(base, size, pool); EVENT2(SegFree, arena, seg); }
/* ArenaExposeRemember -- park arena and then lift all protection barriers. Parameter 'remember' specifies whether to remember the protection state or not (for later restoration with ArenaRestoreProtection). */ void ArenaExposeRemember(Globals globals, Bool remember) { Seg seg; Arena arena; AVERT(Globals, globals); AVERT(Bool, remember); ArenaPark(globals); arena = GlobalsArena(globals); if(SegFirst(&seg, arena)) { Addr base; do { base = SegBase(seg); if (IsA(GCSeg, seg)) { if(remember) { RefSet summary; summary = SegSummary(seg); if(summary != RefSetUNIV) { Res res = arenaRememberSummaryOne(globals, base, summary); if(res != ResOK) { /* If we got an error then stop trying to remember any protections. */ remember = 0; } } } SegSetSummary(seg, RefSetUNIV); AVER(SegSM(seg) == AccessSetEMPTY); } } while(SegNext(&seg, arena, seg)); } }
Bits CPU_Core286_Normal_Run(void) { if (CPU_Cycles <= 0) return CBRET_NONE; while (CPU_Cycles-->0) { LOADIP; core.prefixes=0; core.opcode_index=0; core.ea_table=&EATable[0]; BaseDS=SegBase(ds); BaseSS=SegBase(ss); core.base_val_ds=ds; #if C_DEBUG #if C_HEAVY_DEBUG if (DEBUG_HeavyIsBreakpoint()) { FillFlags(); return (Bits)debugCallback; }; #endif #endif cycle_count++; restart_opcode: switch (core.opcode_index+Fetchb()) { #include "core_normal/prefix_none.h" #include "core_normal/prefix_0f.h" default: illegal_opcode: #if C_DEBUG { bool ignore=false; Bitu len=(GETIP-reg_eip); LOADIP; if (len>16) len=16; char tempcode[16*2+1];char * writecode=tempcode; if (ignore_opcode_63 && mem_readb(core.cseip) == 0x63) ignore = true; for (;len>0;len--) { sprintf(writecode,"%02X",mem_readb(core.cseip++)); writecode+=2; } if (!ignore) LOG(LOG_CPU,LOG_NORMAL)("Illegal/Unhandled opcode %s",tempcode); } #endif CPU_Exception(6,0); continue; gp_fault: CPU_Exception(EXCEPTION_GP,0); continue; } SAVEIP; } FillFlags(); return CBRET_NONE; /* 8086/286 multiple prefix interrupt bug emulation. * If an instruction is interrupted, only the last prefix is restarted. * See also [https://www.pcjs.org/pubs/pc/reference/intel/8086/] and [https://www.youtube.com/watch?v=6FC-tcwMBnU] */ prefix_out: SAVEIP_PREFIX; FillFlags(); return CBRET_NONE; decode_end: SAVEIP; FillFlags(); return CBRET_NONE; }
/* MVFFFreeSegs -- Free segments from given range * * Given a free range, attempts to find entire segments within * it, and returns them to the arena, updating total size counter. * * This is usually called immediately after MVFFAddToFreeList. * It is not combined with MVFFAddToFreeList because the latter * is also called when new segments are added under MVFFAlloc. */ static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit) { Seg seg = NULL; /* suppress "may be used uninitialized" */ Arena arena; Bool b; Addr segLimit; /* limit of the current segment when iterating */ Addr segBase; /* base of the current segment when iterating */ Res res; AVERT(MVFF, mvff); AVER(base < limit); /* Could profitably AVER that the given range is free, */ /* but the CBS doesn't provide that facility. */ if (AddrOffset(base, limit) < mvff->minSegSize) return; /* not large enough for entire segments */ arena = PoolArena(MVFF2Pool(mvff)); b = SegOfAddr(&seg, arena, base); AVER(b); segBase = SegBase(seg); segLimit = SegLimit(seg); while(segLimit <= limit) { /* segment ends in range */ if (segBase >= base) { /* segment starts in range */ RangeStruct range, oldRange; RangeInit(&range, segBase, segLimit); res = CBSDelete(&oldRange, CBSOfMVFF(mvff), &range); if (res == ResOK) { mvff->free -= RangeSize(&range); } else if (ResIsAllocFailure(res)) { /* CBS ran out of memory for splay nodes, which must mean that * there were fragments on both sides: see * <design/cbs/#function.cbs.delete.fail>. Handle this by * deleting the whole of oldRange (which requires no * allocation) and re-inserting the fragments. */ RangeStruct oldRange2; res = CBSDelete(&oldRange2, CBSOfMVFF(mvff), &oldRange); AVER(res == ResOK); AVER(RangesEqual(&oldRange2, &oldRange)); mvff->free -= RangeSize(&oldRange); AVER(RangeBase(&oldRange) != segBase); { Addr leftBase = RangeBase(&oldRange); Addr leftLimit = segBase; res = MVFFAddToFreeList(&leftBase, &leftLimit, mvff); } AVER(RangeLimit(&oldRange) != segLimit); { Addr rightBase = segLimit; Addr rightLimit = RangeLimit(&oldRange); res = MVFFAddToFreeList(&rightBase, &rightLimit, mvff); } } else if (res == ResFAIL) { /* Not found in the CBS: must be found in the Freelist. */ res = FreelistDelete(&oldRange, FreelistOfMVFF(mvff), &range); AVER(res == ResOK); mvff->free -= RangeSize(&range); } AVER(res == ResOK); AVER(RangesNest(&oldRange, &range)); /* Can't free the segment earlier, because if it was on the * Freelist rather than the CBS then it likely contains data * that needs to be read in order to update the Freelist. */ SegFree(seg); mvff->total -= RangeSize(&range); } /* Avoid calling SegNext if the next segment would fail */ /* the loop test, mainly because there might not be a */ /* next segment. */ if (segLimit == limit) /* segment ends at end of range */ break; b = SegFindAboveAddr(&seg, arena, segBase); AVER(b); segBase = SegBase(seg); segLimit = SegLimit(seg); } return; }
Bits CPU_Core_Prefetch_Run(void) { bool invalidate_pq=false; while (CPU_Cycles-->0) { if (invalidate_pq) { pq_valid=false; invalidate_pq=false; } LOADIP; core.opcode_index=cpu.code.big*0x200; core.prefixes=cpu.code.big; core.ea_table=&EATable[cpu.code.big*256]; BaseDS=SegBase(ds); BaseSS=SegBase(ss); core.base_val_ds=ds; #if C_DEBUG #if C_HEAVY_DEBUG if (DEBUG_HeavyIsBreakpoint()) { FillFlags(); return debugCallback; }; #endif cycle_count++; #endif restart_opcode: Bit8u next_opcode=Fetchb(); invalidate_pq=false; if (core.opcode_index&OPCODE_0F) invalidate_pq=true; else switch (next_opcode) { case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: case 0x78: case 0x79: case 0x7a: case 0x7b: case 0x7c: case 0x7d: case 0x7e: case 0x7f: // jcc case 0x9a: // call case 0xc2: case 0xc3: // retn case 0xc8: // enter case 0xc9: // leave case 0xca: case 0xcb: // retf case 0xcc: // int3 case 0xcd: // int case 0xce: // into case 0xcf: // iret case 0xe0: // loopnz case 0xe1: // loopz case 0xe2: // loop case 0xe3: // jcxz case 0xe8: // call case 0xe9: case 0xea: case 0xeb: // jmp case 0xff: invalidate_pq=true; break; default: break; } switch (core.opcode_index+next_opcode) { #include "core_normal/prefix_none.h" #include "core_normal/prefix_0f.h" #include "core_normal/prefix_66.h" #include "core_normal/prefix_66_0f.h" default: illegal_opcode: #if C_DEBUG { bool ignore=false; Bitu len=(GETIP-reg_eip); LOADIP; if (len>16) len=16; char tempcode[16*2+1];char * writecode=tempcode; if (ignore_opcode_63 && mem_readb(core.cseip) == 0x63) ignore = true; for (;len>0;len--) { sprintf(writecode,"%02X",mem_readb(core.cseip++)); writecode+=2; } if (!ignore) LOG(LOG_CPU,LOG_NORMAL)("Illegal/Unhandled opcode %s",tempcode); } #endif CPU_Exception(6,0); invalidate_pq=true; continue; } SAVEIP; } FillFlags(); return CBRET_NONE; decode_end: SAVEIP; FillFlags(); return CBRET_NONE; }
/* AMSTBufferFill -- the pool class buffer fill method * * Calls next method - but possibly splits or merges the chosen * segment. * * .merge: A merge is performed when the next method returns the * entire segment, this segment had previously been split from the * segment below, and the segment below is appropriately similar * (i.e. not already attached to a buffer and similarly coloured) * * .split: If we're not merging, a split is performed if the next method * returns the entire segment, and yet lower half of the segment would * meet the request. */ static Res AMSTBufferFill(Addr *baseReturn, Addr *limitReturn, Pool pool, Buffer buffer, Size size) { Addr base, limit; Arena arena; AMS ams; AMST amst; Bool b; Seg seg; AMSTSeg amstseg; Res res; AVERT(Pool, pool); AVER(baseReturn != NULL); AVER(limitReturn != NULL); /* other parameters are checked by next method */ arena = PoolArena(pool); ams = PoolAMS(pool); amst = PoolAMST(pool); /* call next method */ res = NextMethod(Pool, AMSTPool, bufferFill)(&base, &limit, pool, buffer, size); if (res != ResOK) return res; b = SegOfAddr(&seg, arena, base); AVER(b); amstseg = Seg2AMSTSeg(seg); if (SegLimit(seg) == limit && SegBase(seg) == base) { if (amstseg->prev != NULL) { Seg segLo = AMSTSeg2Seg(amstseg->prev); if (!SegHasBuffer(segLo) && SegGrey(segLo) == SegGrey(seg) && SegWhite(segLo) == SegWhite(seg)) { /* .merge */ Seg mergedSeg; Res mres; AMSUnallocateRange(ams, seg, base, limit); mres = SegMerge(&mergedSeg, segLo, seg); if (ResOK == mres) { /* successful merge */ AMSAllocateRange(ams, mergedSeg, base, limit); /* leave range as-is */ } else { /* failed to merge */ AVER(amst->failSegs); /* deliberate fails only */ AMSAllocateRange(ams, seg, base, limit); } } } else { Size half = SegSize(seg) / 2; if (half >= size && SizeIsArenaGrains(half, arena)) { /* .split */ Addr mid = AddrAdd(base, half); Seg segLo, segHi; Res sres; AMSUnallocateRange(ams, seg, mid, limit); sres = SegSplit(&segLo, &segHi, seg, mid); if (ResOK == sres) { /* successful split */ limit = mid; /* range is lower segment */ } else { /* failed to split */ AVER(amst->failSegs); /* deliberate fails only */ AMSAllocateRange(ams, seg, mid, limit); } } } } *baseReturn = base; *limitReturn = limit; return ResOK; }
Bits CPU_Core_Normal_Run(void) { while (CPU_Cycles-->0) { LOADIP; dosbox_check_nonrecursive_pf_cs = SegValue(cs); dosbox_check_nonrecursive_pf_eip = reg_eip; core.opcode_index=cpu.code.big*0x200; core.prefixes=cpu.code.big; core.ea_table=&EATable[cpu.code.big*256]; BaseDS=SegBase(ds); BaseSS=SegBase(ss); core.base_val_ds=ds; #if C_DEBUG #if C_HEAVY_DEBUG if (DEBUG_HeavyIsBreakpoint()) { FillFlags(); return debugCallback; }; #endif #endif cycle_count++; restart_opcode: switch (core.opcode_index+Fetchb()) { #include "core_normal/prefix_none.h" #include "core_normal/prefix_0f.h" #include "core_normal/prefix_66.h" #include "core_normal/prefix_66_0f.h" default: illegal_opcode: #if C_DEBUG { bool ignore=false; Bitu len=(GETIP-reg_eip); LOADIP; if (len>16) len=16; char tempcode[16*2+1];char * writecode=tempcode; if (ignore_opcode_63 && mem_readb(core.cseip) == 0x63) ignore = true; for (;len>0;len--) { sprintf(writecode,"%02X",mem_readb(core.cseip++)); writecode+=2; } if (!ignore) LOG(LOG_CPU,LOG_NORMAL)("Illegal/Unhandled opcode %s",tempcode); } #endif CPU_Exception(6,0); continue; gp_fault: LOG_MSG("Segment limit violation"); CPU_Exception(EXCEPTION_GP,0); continue; } SAVEIP; } FillFlags(); return CBRET_NONE; decode_end: SAVEIP; FillFlags(); return CBRET_NONE; }