void CodeGenerator::cgInterpOneCommon(IRInstruction* inst) { auto fpReg = x2a(curOpd(inst->src(0)).reg()); auto spReg = x2a(curOpd(inst->src(1)).reg()); auto pcOff = inst->extra<InterpOneData>()->bcOff; auto opc = *(curFunc()->unit()->at(pcOff)); auto* interpOneHelper = interpOneEntryPoints[opc]; // This means push x30 (the link register) first, then x29. This mimics the // x64 stack frame: return address higher in memory than saved FP. m_as. Push (x30, x29); // TODO(2966997): this really should be saving caller-save registers and // basically doing everything else that cgCallHelper does. This only works // now because no caller-saved registers are live. m_as. Mov (rHostCallReg, reinterpret_cast<uint64_t>(interpOneHelper)); m_as. Mov (argReg(0), fpReg); m_as. Mov (argReg(1), spReg); m_as. Mov (argReg(2), pcOff); // Note that sync points for HostCalls have to be recorded at the *start* of // the instruction. recordHostCallSyncPoint(m_as, m_as.frontier()); m_as. HostCall(3); m_as. Pop (x29, x30); }
void CodeGenerator::cgSpillStack(IRInstruction* inst) { // TODO(2966414): so much of this logic could be shared. The opcode itself // should probably be broken up. SSATmp* dst = inst->dst(); SSATmp* sp = inst->src(0); auto const spDeficit = inst->src(1)->getValInt(); auto const spillVals = inst->srcs().subpiece(2); auto const numSpillSrcs = spillVals.size(); auto const dstReg = x2a(curOpd(dst).reg()); auto const spReg = x2a(curOpd(sp).reg()); auto const spillCells = spillValueCells(inst); int64_t adjustment = (spDeficit - spillCells) * sizeof(Cell); for (uint32_t i = 0; i < numSpillSrcs; ++i) { const int64_t offset = i * sizeof(Cell) + adjustment; auto* val = spillVals[i]; if (val->type() == Type::None) { // The simplifier detected that this store was redundnant. continue; } // XXX this is a cut-down version of cgStore. if (val->isConst()) { m_as. Mov (rAsm, val->getValBits()); m_as. Str (rAsm, spReg[offset]); } else { auto reg = x2a(curOpd(val).reg()); m_as. Str (reg, spReg[offset]); } m_as. Mov (rAsm, val->type().toDataType()); m_as. Strb (rAsm.W(), spReg[offset + TVOFF(m_type)]); } emitRegGetsRegPlusImm(m_as, dstReg, spReg, adjustment); }
void CodeGenerator::cgLdContArRaw(IRInstruction* inst) { auto destReg = x2a(curOpd(inst->dst()).reg()); auto contArReg = x2a(curOpd(inst->src(0)).reg()); auto kind = inst->src(1)->getValInt(); auto const& slot = RawMemSlot::Get(RawMemSlot::Kind(kind)); auto off = slot.offset() - c_Continuation::getArOffset(curFunc()); switch (slot.size()) { case sz::byte: m_as. Ldrb (destReg.W(), contArReg[off]); break; case sz::dword: m_as. Ldr (destReg.W(), contArReg[off]); break; case sz::qword: m_as. Ldr (destReg, contArReg[off]); break; default: not_implemented(); } }
void CodeGenerator::cgLdConst(IRInstruction* inst) { auto const dstReg = x2a(curOpd(inst->dst()).reg()); auto const val = inst->extra<LdConst>()->as<uintptr_t>(); if (dstReg.IsValid()) { m_as. Mov (dstReg, val); } }
void f(Y y, int *ip, float *fp) { X x1 = y; // expected-error{{no matching constructor for initialization of 'X'}} X x2 = 0; X x3 = ip; X x4 = fp; // expected-error{{no viable conversion}} X x2a(0); // expected-error{{call to constructor of 'X' is ambiguous}} X x3a(ip); X x4a(fp); }
void CodeGenerator::cgInterpOne(IRInstruction* inst) { cgInterpOneCommon(inst); auto const& extra = *inst->extra<InterpOne>(); auto newSpReg = x2a(curOpd(inst->dst()).reg()); auto spAdjustBytes = cellsToBytes(extra.cellsPopped - extra.cellsPushed); emitRegGetsRegPlusImm(m_as, newSpReg, newSpReg, spAdjustBytes); }
void CodeGenerator::cgInterpOneCommon(IRInstruction* inst) { auto fpReg = x2a(m_regs[inst->src(0)].reg()); auto spReg = x2a(m_regs[inst->src(1)].reg()); auto pcOff = inst->extra<InterpOneData>()->bcOff; auto opc = *(curFunc()->unit()->at(pcOff)); auto* interpOneHelper = interpOneEntryPoints[opc]; m_as. Push (x29, x30); // TODO(2966997): this really should be saving caller-save registers and // basically doing everything else that cgCallHelper does. This only works // now because no caller-saved registers are live. m_as. Mov (rHostCallReg, reinterpret_cast<uint64_t>(interpOneHelper)); m_as. Mov (argReg(0), fpReg); m_as. Mov (argReg(1), spReg); m_as. Mov (argReg(2), pcOff); m_as. HostCall(3); m_as. Pop (x30, x29); }
void CodeGenerator::cgGuardLoc(IRInstruction* inst) { auto const rFP = x2a(m_regs[inst->src(0)].reg()); auto const baseOff = localOffset(inst->extra<GuardLoc>()->locId); emitTypeTest( inst->typeParam(), rFP[baseOff + TVOFF(m_type)], rFP[baseOff + TVOFF(m_data)], [&] (ConditionCode cc) { auto const destSK = SrcKey(curFunc(), m_unit.bcOff()); auto const destSR = m_tx64->getSrcRec(destSK); destSR->emitFallbackJump(this->m_mainCode, ccNegate(cc)); }); }
void CodeGenerator::cgGuardStk(IRInstruction* inst) { auto const rSP = x2a(curOpd(inst->src(0)).reg()); auto const baseOff = cellsToBytes(inst->extra<GuardStk>()->offset); emitTypeTest( inst->typeParam(), rSP[baseOff + TVOFF(m_type)], rSP[baseOff + TVOFF(m_data)], [&] (ConditionCode cc) { auto const destSK = SrcKey(curFunc(), m_unit.bcOff()); auto const destSR = m_tx64->getSrcRec(destSK); destSR->emitFallbackJump(this->m_mainCode, ccNegate(cc)); }); }
void CodeGenerator::cgSideExitGuardStk(IRInstruction* inst) { auto const sp = x2a(curOpd(inst->src(0)).reg()); auto const extra = inst->extra<SideExitGuardStk>(); emitTypeTest( inst->typeParam(), sp[cellsToBytes(extra->checkedSlot) + TVOFF(m_type)], sp[cellsToBytes(extra->checkedSlot) + TVOFF(m_data)], [&] (ConditionCode cc) { auto const sk = SrcKey(curFunc(), extra->taken); emitBindSideExit(this->m_mainCode, this->m_stubsCode, sk, ccNegate(cc)); } ); }
void CodeGenerator::cgLdRaw(IRInstruction* inst) { auto* addr = inst->src(0); auto* offset = inst->src(1); auto destReg = x2a(curOpd(inst->dst()).reg()); auto addrReg = x2a(curOpd(addr).reg()); if (addr->isConst()) { not_implemented(); } if (offset->isConst()) { auto kind = offset->getValInt(); auto& slot = RawMemSlot::Get(RawMemSlot::Kind(kind)); auto ldSize = slot.size(); auto offs = slot.offset(); switch (ldSize) { case sz::qword: m_as. Ldr (destReg, addrReg[offs]); break; case sz::dword: m_as. Ldr (destReg.W(), addrReg[offs]); break; case sz::byte: // Ldrb zero-extends m_as. Ldrb (destReg.W(), addrReg[offs]); break; default: not_reached(); } } else { auto offsetReg = x2a(curOpd(offset).reg()); assert(inst->dst()->type().nativeSize() == sz::qword); m_as. Ldr (destReg, addrReg[offsetReg]); } }
void test(X2 x2, X3 x3, X5 x5) { // CHECK: define linkonce_odr void @_ZN2X2C1ERKS_(%struct.X2* %this, %struct.X2*) unnamed_addr // CHECK: call void @_ZN2X2C2ERKS_({{.*}}) nounwind // CHECK-NEXT: ret void // CHECK-NEXT: } X2 x2a(x2); // CHECK: define linkonce_odr void @_ZN2X3C1ERKS_(%struct.X3* %this, %struct.X3*) unnamed_addr // CHECK: call void @_ZN2X3C2ERKS_({{.*}}) nounwind // CHECK-NEXT: ret void // CHECK-NEXT: } X3 x3a(x3); // CHECK: define linkonce_odr void @_ZN2X5C1ERS_({{.*}}) unnamed_addr // CHECK-NOT: call void @__cxa_call_unexpected // CHECK: ret void X5 x5a(x5); }
void test(X2 x2, X3 x3, X5 x5) { // CHECK: define linkonce_odr void @_ZN2X2C1ERKS_(%struct.X2* %this, %struct.X2* dereferenceable({{[0-9]+}})) unnamed_addr // CHECK: call void @_ZN2X2C2ERKS_({{.*}}) [[NUW:#[0-9]+]] // CHECK-NEXT: ret void // CHECK-NEXT: } X2 x2a(x2); // CHECK: define linkonce_odr void @_ZN2X3C1ERKS_(%struct.X3* %this, %struct.X3* dereferenceable({{[0-9]+}})) unnamed_addr // CHECK: call void @_ZN2X3C2ERKS_({{.*}}) [[NUW]] // CHECK-NEXT: ret void // CHECK-NEXT: } X3 x3a(x3); // CHECK: define linkonce_odr void @_ZN2X5C1ERS_({{.*}}) unnamed_addr // CHECK-NOT: call void @__cxa_call_unexpected // CHECK: ret void X5 x5a(x5); }
void test(X2 x2, X3 x3, X5 x5) { // CHECK: define linkonce_odr void @_ZN2X2C1ERKS_ // CHECK: call void @_ZN2X2C2ERKS_({{.*}}) nounwind // CHECK-NEXT: ret void // CHECK-NEXT: } X2 x2a(x2); // CHECK: define linkonce_odr void @_ZN2X3C1ERKS_ // CHECK: call void @_ZN2X3C2ERKS_({{.*}}) nounwind // CHECK-NEXT: ret void // CHECK-NEXT: } X3 x3a(x3); // CHECK: define linkonce_odr void @_ZN2X5C1ERS_ // CHECK-NOT: call void @__cxa_call_unexpected // CHECK: ret void X5 x5a(x5); }
void CodeGenerator::cgGuardRefs(IRInstruction* inst) { assert(inst->numSrcs() == 5); SSATmp* funcPtrTmp = inst->src(0); SSATmp* nParamsTmp = inst->src(1); SSATmp* firstBitNumTmp = inst->src(2); SSATmp* mask64Tmp = inst->src(3); SSATmp* vals64Tmp = inst->src(4); // Get values in place assert(funcPtrTmp->type() == Type::Func); auto funcPtrReg = x2a(m_regs[funcPtrTmp].reg()); assert(funcPtrReg.IsValid()); assert(nParamsTmp->type() == Type::Int); auto nParamsReg = x2a(m_regs[nParamsTmp].reg()); assert(nParamsReg.IsValid() || nParamsTmp->isConst()); assert(firstBitNumTmp->isConst() && firstBitNumTmp->type() == Type::Int); uint32_t firstBitNum = (uint32_t)(firstBitNumTmp->getValInt()); assert(mask64Tmp->type() == Type::Int); assert(mask64Tmp->isConst()); auto mask64Reg = x2a(m_regs[mask64Tmp].reg()); assert(mask64Reg.IsValid() || mask64Tmp->inst()->op() != LdConst); uint64_t mask64 = mask64Tmp->getValInt(); assert(mask64); assert(vals64Tmp->type() == Type::Int); assert(vals64Tmp->isConst()); auto vals64Reg = x2a(m_regs[vals64Tmp].reg()); assert(vals64Reg.IsValid() || vals64Tmp->inst()->op() != LdConst); uint64_t vals64 = vals64Tmp->getValInt(); assert((vals64 & mask64) == vals64); auto const destSK = SrcKey(curFunc(), m_unit.bcOff()); auto const destSR = m_tx64->getSrcRec(destSK); auto thenBody = [&] { auto bitsOff = sizeof(uint64_t) * (firstBitNum / 64); auto cond = CC_NE; auto bitsPtrReg = rAsm; if (firstBitNum == 0) { bitsOff = Func::refBitValOff(); bitsPtrReg = funcPtrReg; } else { m_as. Ldr (bitsPtrReg, funcPtrReg[Func::sharedOff()]); bitsOff -= sizeof(uint64_t); } if (vals64 == 0 || (mask64 & (mask64 - 1)) == 0) { // If vals64 is zero, or we're testing a single // bit, we can get away with a single test, // rather than mask-and-compare m_as. Ldr (rAsm2, bitsPtrReg[bitsOff]); if (mask64Reg.IsValid()) { m_as. Tst (rAsm2, mask64Reg); } else { assert(vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)); m_as. Tst (rAsm2, mask64); } if (vals64) cond = CC_E; } else { auto bitsValReg = rAsm; m_as. Ldr (bitsValReg, bitsPtrReg[bitsOff]); if (debug) bitsPtrReg = Register(); // bitsValReg <- bitsValReg & mask64 // NB: these 'And' ops don't set flags. They don't need to. if (mask64Reg.IsValid()) { m_as. And (bitsValReg, bitsValReg, mask64Reg); } else { // There are restrictions on the immediates that can be encoded into // logical ops. If the mask doesn't meet those restrictions, we have to // load it into a register first. if (vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)) { m_as.And (bitsValReg, bitsValReg, mask64); } else { m_as.Mov (rAsm2, mask64); m_as.And (bitsValReg, bitsValReg, rAsm2); } } // If bitsValReg != vals64, then goto Exit if (vals64Reg.IsValid()) { m_as. Cmp (bitsValReg, vals64Reg); } else { m_as. Cmp (bitsValReg, vals64); } } destSR->emitFallbackJump(m_mainCode, cond); }; if (firstBitNum == 0) { assert(!nParamsReg.IsValid()); // This is the first 64 bits. No need to check // nParams. thenBody(); } else { assert(nParamsReg.IsValid()); // Check number of args... m_as. Cmp (nParamsReg, firstBitNum); if (vals64 != 0 && vals64 != mask64) { // If we're beyond nParams, then either all params // are refs, or all params are non-refs, so if vals64 // isn't 0 and isnt mask64, there's no possibility of // a match destSR->emitFallbackJump(m_mainCode, CC_LE); thenBody(); } else { ifThenElse(m_as, vixl::gt, thenBody, /* else */ [&] { // If not special builtin... m_as. Ldr (rAsm, funcPtrReg[Func::attrsOff()]); m_as. Tst (rAsm, AttrVariadicByRef); destSR->emitFallbackJump(m_mainCode, vals64 ? CC_Z : CC_NZ); }); } } }
void CodeGenerator::cgGuardRefs(IRInstruction* inst) { assert(inst->numSrcs() == 5); SSATmp* funcPtrTmp = inst->src(0); SSATmp* nParamsTmp = inst->src(1); SSATmp* firstBitNumTmp = inst->src(2); SSATmp* mask64Tmp = inst->src(3); SSATmp* vals64Tmp = inst->src(4); // Get values in place assert(funcPtrTmp->type() == Type::Func); auto funcPtrReg = x2a(curOpd(funcPtrTmp).reg()); assert(funcPtrReg.IsValid()); assert(nParamsTmp->type() == Type::Int); auto nParamsReg = x2a(curOpd(nParamsTmp).reg()); assert(nParamsReg.IsValid() || nParamsTmp->isConst()); assert(firstBitNumTmp->isConst() && firstBitNumTmp->type() == Type::Int); uint32_t firstBitNum = (uint32_t)(firstBitNumTmp->getValInt()); assert(mask64Tmp->type() == Type::Int); assert(mask64Tmp->isConst()); auto mask64Reg = x2a(curOpd(mask64Tmp).reg()); assert(mask64Reg.IsValid() || mask64Tmp->inst()->op() != LdConst); uint64_t mask64 = mask64Tmp->getValInt(); assert(mask64); assert(vals64Tmp->type() == Type::Int); assert(vals64Tmp->isConst()); auto vals64Reg = x2a(curOpd(vals64Tmp).reg()); assert(vals64Reg.IsValid() || vals64Tmp->inst()->op() != LdConst); uint64_t vals64 = vals64Tmp->getValInt(); assert((vals64 & mask64) == vals64); auto const destSK = SrcKey(curFunc(), m_unit.bcOff()); auto const destSR = m_tx64->getSrcRec(destSK); auto thenBody = [&] { auto bitsOff = sizeof(uint64_t) * (firstBitNum / 64); auto cond = CC_NE; auto bitsPtrReg = rAsm; if (firstBitNum == 0) { bitsOff = Func::refBitValOff(); bitsPtrReg = funcPtrReg; } else { m_as. Ldr (bitsPtrReg, funcPtrReg[Func::sharedOff()]); bitsOff -= sizeof(uint64_t); } // Don't need the bits pointer after this point auto bitsReg = rAsm; // Load the bits m_as. Ldr (bitsReg, bitsPtrReg[bitsOff]); // Mask the bits. There are restrictions on what can be encoded as an // immediate in ARM's logical instructions, and if they're not met, we'll // have to use a register. if (vixl::Assembler::IsImmLogical(mask64, vixl::kXRegSize)) { m_as. And (bitsReg, bitsReg, mask64); } else { if (mask64Reg.IsValid()) { m_as.And (bitsReg, bitsReg, mask64Reg); } else { m_as.Mov (rAsm2, mask64); m_as.And (bitsReg, bitsReg, rAsm2); } } // Now do the compare. There are also restrictions on immediates in // arithmetic instructions (of which Cmp is one; it's just a subtract that // sets flags), so same deal as with the mask immediate above. if (vixl::Assembler::IsImmArithmetic(vals64)) { m_as. Cmp (bitsReg, vals64); } else { if (vals64Reg.IsValid()) { m_as.Cmp (bitsReg, vals64Reg); } else { m_as.Mov (rAsm2, vals64); m_as.Cmp (bitsReg, rAsm2); } } destSR->emitFallbackJump(m_mainCode, cond); }; if (firstBitNum == 0) { assert(!nParamsReg.IsValid()); // This is the first 64 bits. No need to check // nParams. thenBody(); } else { assert(nParamsReg.IsValid()); // Check number of args... m_as. Cmp (nParamsReg, firstBitNum); if (vals64 != 0 && vals64 != mask64) { // If we're beyond nParams, then either all params // are refs, or all params are non-refs, so if vals64 // isn't 0 and isnt mask64, there's no possibility of // a match destSR->emitFallbackJump(m_mainCode, CC_LE); thenBody(); } else { ifThenElse(m_as, vixl::gt, thenBody, /* else */ [&] { // If not special builtin... m_as. Ldr (rAsm, funcPtrReg[Func::attrsOff()]); m_as. Tst (rAsm, AttrVariadicByRef); destSR->emitFallbackJump(m_mainCode, vals64 ? CC_Z : CC_NZ); }); } } }
vixl::Register X(Vreg64 r) { PhysReg pr(r.asReg()); return x2a(pr); }
vixl::Register W(Vreg8 r) { PhysReg pr(r.asReg()); return x2a(pr).W(); }
void CodeGenerator::cgLdStackAddr(IRInstruction* inst) { auto const dstReg = x2a(curOpd(inst->dst()).reg()); auto const baseReg = x2a(curOpd(inst->src(0)).reg()); auto const offset = cellsToBytes(inst->extra<LdStackAddr>()->offset); emitRegGetsRegPlusImm(m_as, dstReg, baseReg, offset); }
void CodeGenerator::cgLdARFuncPtr(IRInstruction* inst) { auto dstReg = x2a(curOpd(inst->dst()).reg()); auto baseReg = x2a(curOpd(inst->src(0)).reg()); auto offset = inst->src(1)->getValInt(); m_as. Ldr (dstReg, baseReg[offset + AROFF(m_func)]); }
void CodeGenerator::cgSyncABIRegs(IRInstruction* inst) { emitRegGetsRegPlusImm(m_as, rVmFp, x2a(curOpd(inst->src(0)).reg()), 0); emitRegGetsRegPlusImm(m_as, rVmSp, x2a(curOpd(inst->src(1)).reg()), 0); }