code *xmmopass(elem *e,regm_t *pretregs) { elem *e1 = e->E1; elem *e2 = e->E2; tym_t ty1 = tybasic(e1->Ety); unsigned sz1 = tysize[ty1]; regm_t rretregs = XMMREGS & ~*pretregs; if (!rretregs) rretregs = XMMREGS; code *cr = codelem(e2,&rretregs,FALSE); // eval right leaf unsigned rreg = findreg(rretregs); code cs; code *cl,*cg; regm_t retregs; unsigned reg; bool regvar = FALSE; if (config.flags4 & CFG4optimized) { // Be careful of cases like (x = x+x+x). We cannot evaluate in // x if x is in a register. unsigned varreg; regm_t varregm; if (isregvar(e1,&varregm,&varreg) && // if lvalue is register variable doinreg(e1->EV.sp.Vsym,e2) // and we can compute directly into it ) { regvar = TRUE; retregs = varregm; reg = varreg; // evaluate directly in target register cl = NULL; cg = getregs(retregs); // destroy these regs } } if (!regvar) { cl = getlvalue(&cs,e1,rretregs); // get EA retregs = *pretregs & XMMREGS & ~rretregs; if (!retregs) retregs = XMMREGS & ~rretregs; cg = allocreg(&retregs,®,ty1); cs.Iop = xmmload(ty1); // MOVSD xmm,xmm_m64 code_newreg(&cs,reg - XMM0); cg = gen(cg,&cs); } unsigned op = xmmoperator(e1->Ety, e->Eoper); code *co = gen2(CNIL,op,modregxrmx(3,reg-XMM0,rreg-XMM0)); if (!regvar) { cs.Iop = xmmstore(ty1); // reverse operand order of MOVS[SD] gen(co,&cs); } if (e1->Ecount || // if lvalue is a CSE or regvar) // rvalue can't be a CSE { cl = cat(cl,getregs_imm(retregs)); // necessary if both lvalue and // rvalue are CSEs (since a reg // can hold only one e at a time) cssave(e1,retregs,EOP(e1)); // if lvalue is a CSE } co = cat(co,fixresult(e,retregs,pretregs)); freenode(e1); return cat4(cr,cl,cg,co); }
void emitCastObject(HTS& env) { auto const src = popC(env); push(env, gen(env, ConvCellToObj, src)); }
void emitIncStat(HTS& env, int32_t counter, int32_t value) { if (!Stats::enabled()) return; gen(env, IncStat, cns(env, counter), cns(env, value), cns(env, false)); }
void emitClone(HTS& env) { if (!topC(env)->isA(Type::Obj)) PUNT(Clone-NonObj); auto const obj = popC(env); push(env, gen(env, Clone, obj)); gen(env, DecRef, obj); }
void emitCastDouble(HTS& env) { auto const src = popC(env); push(env, gen(env, ConvCellToDbl, src)); gen(env, DecRef, src); }
float FFBP::getRand () { std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<> dis(0, 1); return dis(gen); }
void emitThis(HTS& env) { auto const ctx = gen(env, LdCtx, fp(env)); checkThis(env, ctx); auto const this_ = gen(env, CastCtxThis, ctx); pushIncRef(env, this_); }
SSATmp* TraceBuilder::genDefNone() { return gen(DefConst, Type::None, ConstData(0)); }
SSATmp* TraceBuilder::genLdLocAddr(uint32_t id) { return gen(LdLocAddr, getLocalType(id).ptr(), LocalId(id), getFp()); }
SSATmp* TraceBuilder::genDefNull() { return gen(DefConst, Type::Null, ConstData(0)); }
SSATmp* TraceBuilder::genPtrToUninit() { return gen(DefConst, Type::PtrToInitNull, ConstData(&init_null_variant)); }
SSATmp* TraceBuilder::genDefUninit() { return gen(DefConst, Type::Uninit, ConstData(0)); }
SSATmp* TraceBuilder::genNot(SSATmp* src) { assert(src->type() == Type::Bool); return gen(ConvCellToBool, gen(OpXor, src, cns(1))); }
code *cdvector(elem *e, regm_t *pretregs) { /* e should look like one of: * vector * | * param * / \ * param op2 * / \ * op op1 */ if (!config.fpxmmregs) { printf("SIMD operations not supported on this platform\n"); exit(1); } unsigned n = el_nparams(e->E1); elem **params = (elem **)malloc(n * sizeof(elem *)); assert(params); elem **tmp = params; el_paramArray(&tmp, e->E1); #if 0 printf("cdvector()\n"); for (int i = 0; i < n; i++) { printf("[%d]: ", i); elem_print(params[i]); } #endif if (*pretregs == 0) { /* Evaluate for side effects only */ code *c = CNIL; for (int i = 0; i < n; i++) { c = cat(c, codelem(params[i], pretregs, FALSE)); *pretregs = 0; // in case they got set } return c; } assert(n >= 2 && n <= 4); elem *eop = params[0]; elem *op1 = params[1]; elem *op2 = NULL; tym_t ty2 = 0; if (n >= 3) { op2 = params[2]; ty2 = tybasic(op2->Ety); } unsigned op = el_tolong(eop); #ifdef DEBUG assert(!isXMMstore(op)); #endif tym_t ty1 = tybasic(op1->Ety); unsigned sz1 = tysize[ty1]; // assert(sz1 == 16); // float or double regm_t retregs; code *c; code *cr, *cg, *co; if (n == 3 && ty2 == TYuchar && op2->Eoper == OPconst) { // Handle: op xmm,imm8 retregs = *pretregs & XMMREGS; if (!retregs) retregs = XMMREGS; c = codelem(op1,&retregs,FALSE); // eval left leaf unsigned reg = findreg(retregs); int r; switch (op) { case PSLLD: r = 6; op = 0x660F72; break; case PSLLQ: r = 6; op = 0x660F73; break; case PSLLW: r = 6; op = 0x660F71; break; case PSRAD: r = 4; op = 0x660F72; break; case PSRAW: r = 4; op = 0x660F71; break; case PSRLD: r = 2; op = 0x660F72; break; case PSRLQ: r = 2; op = 0x660F73; break; case PSRLW: r = 2; op = 0x660F71; break; case PSRLDQ: r = 3; op = 0x660F73; break; case PSLLDQ: r = 7; op = 0x660F73; break; default: printf("op = x%x\n", op); assert(0); break; } cr = CNIL; cg = getregs(retregs); co = genc2(CNIL,op,modregrmx(3,r,reg-XMM0), el_tolong(op2)); } else if (n == 2) { /* Handle: op xmm,mem * where xmm is written only, not read */ code cs; if ((op1->Eoper == OPind && !op1->Ecount) || op1->Eoper == OPvar) { c = getlvalue(&cs, op1, RMload); // get addressing mode } else { regm_t rretregs = XMMREGS; c = codelem(op1, &rretregs, FALSE); unsigned rreg = findreg(rretregs) - XMM0; cs.Irm = modregrm(3,0,rreg & 7); cs.Iflags = 0; cs.Irex = 0; if (rreg & 8) cs.Irex |= REX_B; } retregs = *pretregs & XMMREGS; if (!retregs) retregs = XMMREGS; unsigned reg; cr = CNIL; cg = allocreg(&retregs, ®, e->Ety); code_newreg(&cs, reg - XMM0); cs.Iop = op; co = gen(CNIL,&cs); } else if (n == 3 || n == 4) { /* Handle: * op xmm,mem // n = 3 * op xmm,mem,imm8 // n = 4 * Both xmm and mem are operands, evaluate xmm first. */ code cs; retregs = *pretregs & XMMREGS; if (!retregs) retregs = XMMREGS; c = codelem(op1,&retregs,FALSE); // eval left leaf unsigned reg = findreg(retregs); if ((op2->Eoper == OPind && !op2->Ecount) || op2->Eoper == OPvar) { cr = getlvalue(&cs, op2, RMload | retregs); // get addressing mode } else { unsigned rretregs = XMMREGS & ~retregs; cr = scodelem(op2, &rretregs, retregs, TRUE); unsigned rreg = findreg(rretregs) - XMM0; cs.Irm = modregrm(3,0,rreg & 7); cs.Iflags = 0; cs.Irex = 0; if (rreg & 8) cs.Irex |= REX_B; } cg = getregs(retregs); if (n == 4) { switch (op) { case CMPPD: case CMPSS: case CMPSD: case CMPPS: case PSHUFD: case PSHUFHW: case PSHUFLW: case BLENDPD: case BLENDPS: case DPPD: case DPPS: case MPSADBW: case PBLENDW: case ROUNDPD: case ROUNDPS: case ROUNDSD: case ROUNDSS: case SHUFPD: case SHUFPS: break; default: printf("op = x%x\n", op); assert(0); break; } elem *imm8 = params[3]; cs.IFL2 = FLconst; cs.IEV2.Vsize_t = el_tolong(imm8); } code_newreg(&cs, reg - XMM0); cs.Iop = op; co = gen(CNIL,&cs); } else assert(0); co = cat(co,fixresult(e,retregs,pretregs)); free(params); freenode(e); return cat4(c,cr,cg,co); }
SSATmp* TraceBuilder::preOptimizeLdCtx(IRInstruction* inst) { if (isThisAvailable()) return gen(LdThis, m_fpValue); return nullptr; }
template <class IArchive, class OArchive> inline void test_valarray() { std::random_device rd; std::mt19937 gen(rd()); for (int ii = 0; ii<100; ++ii) { std::valarray<int> o_podvalarray(100); for (auto & elem : o_podvalarray) elem = random_value<int>(gen); std::valarray<StructInternalSerialize> o_iservalarray(100); for (auto & elem : o_iservalarray) elem = StructInternalSerialize(random_value<int>(gen), random_value<int>(gen)); std::valarray<StructInternalSplit> o_isplvalarray(100); for (auto & elem : o_isplvalarray) elem = StructInternalSplit(random_value<int>(gen), random_value<int>(gen)); std::valarray<StructExternalSerialize> o_eservalarray(100); for (auto & elem : o_eservalarray) elem = StructExternalSerialize(random_value<int>(gen), random_value<int>(gen)); std::valarray<StructExternalSplit> o_esplvalarray(100); for (auto & elem : o_esplvalarray) elem = StructExternalSplit(random_value<int>(gen), random_value<int>(gen)); std::ostringstream os; { OArchive oar(os); oar(o_podvalarray); oar(o_iservalarray); oar(o_isplvalarray); oar(o_eservalarray); oar(o_esplvalarray); } std::valarray<int> i_podvalarray; std::valarray<StructInternalSerialize> i_iservalarray; std::valarray<StructInternalSplit> i_isplvalarray; std::valarray<StructExternalSerialize> i_eservalarray; std::valarray<StructExternalSplit> i_esplvalarray; std::istringstream is(os.str()); { IArchive iar(is); iar(i_podvalarray); iar(i_iservalarray); iar(i_isplvalarray); iar(i_eservalarray); iar(i_esplvalarray); } CHECK_EQ(i_podvalarray.size(), o_podvalarray.size()); CHECK_EQ(i_iservalarray.size(), o_iservalarray.size()); CHECK_EQ(i_isplvalarray.size(), o_isplvalarray.size()); CHECK_EQ(i_eservalarray.size(), o_eservalarray.size()); CHECK_EQ(i_esplvalarray.size(), o_esplvalarray.size()); check_collection(i_podvalarray , o_podvalarray ); check_collection(i_iservalarray, o_iservalarray); check_collection(i_isplvalarray, o_isplvalarray); check_collection(i_eservalarray, o_eservalarray); check_collection(i_esplvalarray, o_esplvalarray); } }
void runThread(ConnectionString& hostConn, unsigned threadId, unsigned seed, BSONObj& cmdObj, BSONObjBuilder& result) { stringstream ss; ss << "thread-" << threadId; setThreadName(ss.str().c_str()); // Lock name string lockName = string_field(cmdObj, "lockName", this->name + "_lock"); // Range of clock skew in diff threads int skewRange = (int) number_field(cmdObj, "skewRange", 1); // How long to wait with the lock int threadWait = (int) number_field(cmdObj, "threadWait", 30); if(threadWait <= 0) threadWait = 1; // Max amount of time (ms) a thread waits before checking the lock again int threadSleep = (int) number_field(cmdObj, "threadSleep", 30); if(threadSleep <= 0) threadSleep = 1; // (Legacy) how long until the lock is forced in mins, measured locally int takeoverMins = (int) number_field(cmdObj, "takeoverMins", 0); // How long until the lock is forced in ms, only compared locally unsigned long long takeoverMS = (unsigned long long) number_field(cmdObj, "takeoverMS", 0); // Whether or not we should hang some threads int hangThreads = (int) number_field(cmdObj, "hangThreads", 0); boost::mt19937 gen((boost::mt19937::result_type) seed); boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSkew(gen, boost::uniform_int<>(0, skewRange)); boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomWait(gen, boost::uniform_int<>(1, threadWait)); boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSleep(gen, boost::uniform_int<>(1, threadSleep)); int skew = 0; bool legacy = (takeoverMins > 0); if (!lock.get()) { // Pick a skew, but the first two threads skew the whole range if(threadId == 0) skew = -skewRange / 2; else if(threadId == 1) skew = skewRange / 2; else skew = randomSkew() - (skewRange / 2); // Skew this thread jsTimeVirtualThreadSkew( skew ); log() << "Initializing lock with skew of " << skew << " for thread " << threadId << endl; lock.reset(new DistributedLock(hostConn, lockName, legacy ? (unsigned long long)takeoverMins : takeoverMS, true, legacy)); log() << "Skewed time " << jsTime() << " for thread " << threadId << endl << " max wait (with lock: " << threadWait << ", after lock: " << threadSleep << ")" << endl << " takeover in " << (legacy ? (unsigned long long)takeoverMins : takeoverMS) << (legacy ? " (mins local)" : "(ms remote)") << endl; } DistributedLock* myLock = lock.get(); bool errors = false; while (keepGoing) { try { if (myLock->lock_try("Testing distributed lock with skew.")) { log() << "**** Locked for thread " << threadId << endl; count++; int before = count; int sleep = randomWait(); sleepmillis(sleep); int after = count; if(after != before) { errors = true; log() << "**** !Bad increment while sleeping with lock for: " << sleep << "ms" << endl; break; } // Unlock only half the time... if(hangThreads == 0 || threadId % hangThreads != 0) { log() << "**** Unlocking for thread " << threadId << endl; myLock->unlock(); } else { log() << "**** Not unlocking for thread " << threadId << endl; DistributedLock::killPinger( *myLock ); // We're simulating a crashed process... break; } } } catch( LockException& e ) { log() << "*** !Could not try distributed lock." << m_caused_by(e) << endl; break; } sleepmillis(randomSleep()); } result << "errors" << errors << "skew" << skew << "takeover" << (long long) (legacy ? takeoverMS : (unsigned long long)takeoverMins) << "localTimeout" << (takeoverMS > 0); }
SimpleOID(const std::string& other) { boost::uuids::string_generator gen; tag = gen(other); }
void emitUnsetL(HTS& env, int32_t id) { auto const prev = ldLoc(env, id, makeExit(env), DataTypeCountness); stLocRaw(env, id, fp(env), cns(env, Type::Uninit)); gen(env, DecRef, prev); }
/// @fn operator= /// @brief Copies the value of high and low from the RHS. Deep copy SelfType& operator=(std::string const &in_other) { boost::uuids::string_generator gen; tag = gen(in_other); return *this; }
void emitCheckThis(HTS& env) { auto const ctx = gen(env, LdCtx, fp(env)); checkThis(env, ctx); }
Entity::Entity() { boost::uuids::random_generator gen; ID = gen(); }
void emitNameA(HTS& env) { push(env, gen(env, LdClsName, popA(env))); }
SSATmp* TraceBuilder::preOptimizeLdCtx(IRInstruction* inst) { if (m_state.thisAvailable()) return gen(LdThis, m_state.fp()); return nullptr; }
void emitCastInt(HTS& env) { auto const src = popC(env); push(env, gen(env, ConvCellToInt, src)); gen(env, DecRef, src); }
SSATmp* TraceBuilder::optimizeWork(IRInstruction* inst, const folly::Optional<IdomVector>& idoms) { // Since some of these optimizations inspect tracked state, we don't // perform any of them on non-main traces. if (m_savedTraces.size() > 0) return nullptr; static DEBUG_ONLY __thread int instNest = 0; if (debug) ++instNest; SCOPE_EXIT { if (debug) --instNest; }; DEBUG_ONLY auto indent = [&] { return std::string(instNest * 2, ' '); }; FTRACE(1, "optimizing {}{}\n", indent(), inst->toString()); // First pass of tracebuilder optimizations try to replace an // instruction based on tracked state before we do anything else. // May mutate the IRInstruction in place (and return nullptr) or // return an SSATmp*. if (SSATmp* preOpt = preOptimize(inst)) { FTRACE(1, " {}preOptimize returned: {}\n", indent(), preOpt->inst()->toString()); return preOpt; } if (inst->op() == Nop) return nullptr; // copy propagation on inst source operands copyProp(inst); SSATmp* result = nullptr; if (m_enableSimplification) { result = m_simplifier.simplify(inst); if (result) { inst = result->inst(); if (inst->producesReference(0)) { // This effectively prevents CSE from kicking in below, which // would replace the instruction with an IncRef. That is // correct if the simplifier morphed the instruction, but it's // incorrect if the simplifier returned one of original // instruction sources. We currently have no way to // distinguish the two cases, so we prevent CSE completely for // now. return result; } } } if (m_state.enableCse() && inst->canCSE()) { SSATmp* cseResult = m_state.cseLookup(inst, idoms); if (cseResult) { // Found a dominating instruction that can be used instead of inst FTRACE(1, " {}cse found: {}\n", indent(), cseResult->inst()->toString()); assert(!inst->consumesReferences()); if (inst->producesReference(0)) { // Replace with an IncRef FTRACE(1, " {}cse of refcount-producing instruction\n", indent()); gen(IncRef, cseResult); } return cseResult; } } return result; }
void emitCastString(HTS& env) { auto const src = popC(env); push(env, gen(env, ConvCellToStr, src)); gen(env, DecRef, src); }
bool BlackMarket::action(Player &player)const{ if(player.canAct()){ if (player.getGold()>1) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> randomItem(1,3); std::uniform_int_distribution<> randomQuatity(0,5); int quatityAdded; if(randomQuatity(gen)<(player.getCart()-player.getInventory())){ quatityAdded = (player.getCart()-player.getInventory()); // not enough room on cart }else{ quatityAdded = randomQuatity(gen); } for(int i = 0; i < quatityAdded; i++){ switch (randomItem(gen)) { {case 1: int totalSpices = randomQuatity(gen); int spicesGained; if((player.getCart()-player.getInventory())>totalSpices){ spicesGained = totalSpices; }else{ spicesGained = player.getCart()-player.getInventory(); if (spicesGained==0) { return false;//not enough space on cart } } player.setSpice(player.getSpice()+spicesGained); player.setGold(player.getGold()-1); player.setInventory(player.getInventory()+spicesGained); player.setTurn(player.getTurn()+1); player.eat(); break; } {case 2: int totalFabrics = randomQuatity(gen); int fabricesGained; if((player.getCart()-player.getInventory())>totalFabrics){ fabricesGained = totalFabrics; }else{ fabricesGained = player.getCart()-player.getInventory(); if (fabricesGained==0) { return false;//not enough space on cart } } player.setFabric(player.getFabric()+fabricesGained); player.setGold(player.getGold()-1); player.setInventory(player.getInventory()+fabricesGained); player.setTurn(player.getTurn()+1); player.eat(); break; } {case 3: int totalJewelry = randomQuatity(gen); int jewelryGained; if((player.getCart()-player.getInventory())>totalJewelry){ jewelryGained = totalJewelry; }else{ jewelryGained = player.getCart()-player.getInventory(); if (jewelryGained==0) { return false;//not enough space on cart } } player.setJewels(player.getJewels()+jewelryGained); player.setGold(player.getGold()-1); player.setInventory(player.getInventory()+jewelryGained); player.setTurn(player.getTurn()+1); player.eat(); break; } default: break; } } return true; } return false;// not enough gold } return false;//not enough food }
int search(int alpha, int beta, int depth) { int i, j, x; BOOL c, f; /* we're as deep as we want to be; call quiesce() to get a reasonable score and return it. */ if (!depth) return quiesce(alpha,beta); ++nodes; /* do some housekeeping every 1024 nodes */ if ((nodes & 1023) == 0) checkup(); pv_length[ply] = ply; /* if this isn't the root of the search tree (where we have to pick a move and can't simply return 0) then check to see if the position is a repeat. if so, we can assume that this line is a draw and return 0. */ if (ply && reps()) return 0; /* are we too deep? */ if (ply >= MAX_PLY - 1) return eval(); if (hply >= HIST_STACK - 1) return eval(); /* are we in check? if so, we want to search deeper */ c = in_check(side); if (c) ++depth; gen(); if (follow_pv) /* are we following the PV? */ sort_pv(); f = FALSE; /* loop through the moves */ for (i = first_move[ply]; i < first_move[ply + 1]; ++i) { sort(i); if (!makemove(gen_dat[i].m.b)) continue; f = TRUE; x = -search(-beta, -alpha, depth - 1); takeback(); if (x > alpha) { /* this move caused a cutoff, so increase the history value so it gets ordered high next time we can search it */ history[(int)gen_dat[i].m.b.from][(int)gen_dat[i].m.b.to] += depth; if (x >= beta) return beta; alpha = x; /* update the PV */ pv[ply][ply] = gen_dat[i].m; for (j = ply + 1; j < pv_length[ply + 1]; ++j) pv[ply][j] = pv[ply + 1][j]; pv_length[ply] = pv_length[ply + 1]; } } /* no legal moves? then we're in checkmate or stalemate */ if (!f) { if (c) return -10000 + ply; else return 0; } /* fifty move draw rule */ if (fifty >= 100) return 0; return alpha; }
code *xmmeq(elem *e, unsigned op, elem *e1, elem *e2,regm_t *pretregs) { tym_t tymll; unsigned reg; int i; code *cl,*cr,*c,cs; elem *e11; bool regvar; /* TRUE means evaluate into register variable */ regm_t varregm; unsigned varreg; targ_int postinc; //printf("xmmeq(e1 = %p, e2 = %p, *pretregs = %s)\n", e1, e2, regm_str(*pretregs)); int e2oper = e2->Eoper; tym_t tyml = tybasic(e1->Ety); /* type of lvalue */ regm_t retregs = *pretregs; if (!(retregs & XMMREGS)) retregs = XMMREGS; // pick any XMM reg cs.Iop = (op == OPeq) ? xmmstore(tyml) : op; regvar = FALSE; varregm = 0; if (config.flags4 & CFG4optimized) { // Be careful of cases like (x = x+x+x). We cannot evaluate in // x if x is in a register. if (isregvar(e1,&varregm,&varreg) && // if lvalue is register variable doinreg(e1->EV.sp.Vsym,e2) // and we can compute directly into it ) { regvar = TRUE; retregs = varregm; reg = varreg; /* evaluate directly in target register */ } } if (*pretregs & mPSW && !EOP(e1)) // if evaluating e1 couldn't change flags { // Be careful that this lines up with jmpopcode() retregs |= mPSW; *pretregs &= ~mPSW; } cr = scodelem(e2,&retregs,0,TRUE); // get rvalue // Look for special case of (*p++ = ...), where p is a register variable if (e1->Eoper == OPind && ((e11 = e1->E1)->Eoper == OPpostinc || e11->Eoper == OPpostdec) && e11->E1->Eoper == OPvar && e11->E1->EV.sp.Vsym->Sfl == FLreg ) { postinc = e11->E2->EV.Vint; if (e11->Eoper == OPpostdec) postinc = -postinc; cl = getlvalue(&cs,e11,RMstore | retregs); freenode(e11->E2); } else { postinc = 0; cl = getlvalue(&cs,e1,RMstore | retregs); // get lvalue (cl == CNIL if regvar) } c = getregs_imm(varregm); reg = findreg(retregs & XMMREGS); cs.Irm |= modregrm(0,(reg - XMM0) & 7,0); if ((reg - XMM0) & 8) cs.Irex |= REX_R; // Do not generate mov from register onto itself if (!(regvar && reg == XMM0 + ((cs.Irm & 7) | (cs.Irex & REX_B ? 8 : 0)))) c = gen(c,&cs); // MOV EA+offset,reg if (e1->Ecount || // if lvalue is a CSE or regvar) // rvalue can't be a CSE { c = cat(c,getregs_imm(retregs)); // necessary if both lvalue and // rvalue are CSEs (since a reg // can hold only one e at a time) cssave(e1,retregs,EOP(e1)); // if lvalue is a CSE } c = cat4(cr,cl,c,fixresult(e,retregs,pretregs)); Lp: if (postinc) { int reg = findreg(idxregm(&cs)); if (*pretregs & mPSW) { // Use LEA to avoid touching the flags unsigned rm = cs.Irm & 7; if (cs.Irex & REX_B) rm |= 8; c = genc1(c,0x8D,buildModregrm(2,reg,rm),FLconst,postinc); if (tysize(e11->E1->Ety) == 8) code_orrex(c, REX_W); } else if (I64) { c = genc2(c,0x81,modregrmx(3,0,reg),postinc); if (tysize(e11->E1->Ety) == 8) code_orrex(c, REX_W); } else { if (postinc == 1) c = gen1(c,0x40 + reg); // INC reg else if (postinc == -(targ_int)1) c = gen1(c,0x48 + reg); // DEC reg else { c = genc2(c,0x81,modregrm(3,0,reg),postinc); } } } freenode(e1); return c; }