//------------------------------------------------------------------------ // ContainCheckIndir: Determine whether operands of an indir should be contained. // // Arguments: // indirNode - The indirection node of interest // // Notes: // This is called for both store and load indirections. // // Return Value: // None. // void Lowering::ContainCheckIndir(GenTreeIndir* indirNode) { // If this is the rhs of a block copy it will be handled when we handle the store. if (indirNode->TypeGet() == TYP_STRUCT) { return; } #ifdef FEATURE_SIMD // If indirTree is of TYP_SIMD12, don't mark addr as contained // so that it always get computed to a register. This would // mean codegen side logic doesn't need to handle all possible // addr expressions that could be contained. // // TODO-ARM64-CQ: handle other addr mode expressions that could be marked // as contained. if (indirNode->TypeGet() == TYP_SIMD12) { return; } #endif // FEATURE_SIMD GenTree* addr = indirNode->Addr(); bool makeContained = true; if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirNode, addr)) { GenTreeAddrMode* lea = addr->AsAddrMode(); GenTree* base = lea->Base(); GenTree* index = lea->Index(); int cns = lea->Offset(); #ifdef _TARGET_ARM_ // ARM floating-point load/store doesn't support a form similar to integer // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm. if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns)) { if (indirNode->OperGet() == GT_STOREIND) { if (varTypeIsFloating(indirNode->AsStoreInd()->Data())) { makeContained = false; } } else if (indirNode->OperGet() == GT_IND) { if (varTypeIsFloating(indirNode)) { makeContained = false; } } } #endif if (makeContained) { MakeSrcContained(indirNode, addr); } } }
void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree) { GenTreePtr dst = tree; GenTreePtr addr = tree->gtOp.gtOp1; GenTreePtr src = tree->gtOp.gtOp2; if (addr->OperGet() == GT_LEA) { // In the case where we are doing a helper assignment, if the dst // is an indir through an lea, we need to actually instantiate the // lea in a register GenTreeAddrMode* lea = addr->AsAddrMode(); short leaSrcCount = 0; if (lea->Base() != nullptr) { leaSrcCount++; } if (lea->Index() != nullptr) { leaSrcCount++; } lea->gtLsraInfo.srcCount = leaSrcCount; lea->gtLsraInfo.dstCount = 1; } #if NOGC_WRITE_BARRIERS NYI_ARM("NOGC_WRITE_BARRIERS"); // For the NOGC JIT Helper calls // // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST_BYREF) // the 'src' goes into x15 (REG_WRITE_BARRIER) // addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_DST_BYREF); src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER); #else // For the standard JIT Helper calls // op1 goes into REG_ARG_0 and // op2 goes into REG_ARG_1 // addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0); src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1); #endif // NOGC_WRITE_BARRIERS // Both src and dst must reside in a register, which they should since we haven't set // either of them as contained. assert(addr->gtLsraInfo.dstCount == 1); assert(src->gtLsraInfo.dstCount == 1); }
//------------------------------------------------------------------------ // ContainCheckIndir: Determine whether operands of an indir should be contained. // // Arguments: // indirNode - The indirection node of interest // // Notes: // This is called for both store and load indirections. // // Return Value: // None. // void Lowering::ContainCheckIndir(GenTreeIndir* indirNode) { // If this is the rhs of a block copy it will be handled when we handle the store. if (indirNode->TypeGet() == TYP_STRUCT) { return; } GenTree* addr = indirNode->Addr(); bool makeContained = true; if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirNode, addr)) { GenTreeAddrMode* lea = addr->AsAddrMode(); GenTree* base = lea->Base(); GenTree* index = lea->Index(); int cns = lea->Offset(); #ifdef _TARGET_ARM_ // ARM floating-point load/store doesn't support a form similar to integer // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm. if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns)) { if (indirNode->OperGet() == GT_STOREIND) { if (varTypeIsFloating(indirNode->AsStoreInd()->Data())) { makeContained = false; } } else if (indirNode->OperGet() == GT_IND) { if (varTypeIsFloating(indirNode)) { makeContained = false; } } } #endif if (makeContained) { MakeSrcContained(indirNode, addr); } } }
//------------------------------------------------------------------------ // TreeNodeInfoInitIndir: Specify register requirements for address expression // of an indirection operation. // // Arguments: // indirTree - GT_IND, GT_STOREIND, block node or GT_NULLCHECK gentree node // void Lowering::TreeNodeInfoInitIndir(GenTreeIndir* indirTree) { ContainCheckIndir(indirTree); // If this is the rhs of a block copy (i.e. non-enregisterable struct), // it has no register requirements. if (indirTree->TypeGet() == TYP_STRUCT) { return; } TreeNodeInfo* info = &(indirTree->gtLsraInfo); bool isStore = (indirTree->gtOper == GT_STOREIND); info->srcCount = GetIndirSourceCount(indirTree); GenTree* addr = indirTree->Addr(); GenTree* index = nullptr; unsigned cns = 0; #ifdef _TARGET_ARM_ // Unaligned loads/stores for floating point values must first be loaded into integer register(s) if (indirTree->gtFlags & GTF_IND_UNALIGNED) { var_types type = TYP_UNDEF; if (indirTree->OperGet() == GT_STOREIND) { type = indirTree->AsStoreInd()->Data()->TypeGet(); } else if (indirTree->OperGet() == GT_IND) { type = indirTree->TypeGet(); } if (type == TYP_FLOAT) { info->internalIntCount = 1; } else if (type == TYP_DOUBLE) { info->internalIntCount = 2; } } #endif if (addr->isContained()) { assert(addr->OperGet() == GT_LEA); GenTreeAddrMode* lea = addr->AsAddrMode(); index = lea->Index(); cns = lea->gtOffset; // On ARM we may need a single internal register // (when both conditions are true then we still only need a single internal register) if ((index != nullptr) && (cns != 0)) { // ARM does not support both Index and offset so we need an internal register info->internalIntCount++; } else if (!emitter::emitIns_valid_imm_for_ldst_offset(cns, emitTypeSize(indirTree))) { // This offset can't be contained in the ldr/str instruction, so we need an internal register info->internalIntCount++; } } }
//------------------------------------------------------------------------ // TreeNodeInfoInitIndir: Specify register requirements for address expression // of an indirection operation. // // Arguments: // indirTree - GT_IND, GT_STOREIND, block node or GT_NULLCHECK gentree node // void Lowering::TreeNodeInfoInitIndir(GenTreePtr indirTree) { assert(indirTree->OperIsIndir()); // If this is the rhs of a block copy (i.e. non-enregisterable struct), // it has no register requirements. if (indirTree->TypeGet() == TYP_STRUCT) { return; } GenTreePtr addr = indirTree->gtGetOp1(); TreeNodeInfo* info = &(indirTree->gtLsraInfo); GenTreePtr base = nullptr; GenTreePtr index = nullptr; unsigned cns = 0; unsigned mul; bool rev; bool modifiedSources = false; bool makeContained = true; if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirTree, addr)) { GenTreeAddrMode* lea = addr->AsAddrMode(); base = lea->Base(); index = lea->Index(); cns = lea->gtOffset; #ifdef _TARGET_ARM_ // ARM floating-point load/store doesn't support a form similar to integer // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm. if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns)) { if (indirTree->OperGet() == GT_STOREIND) { if (varTypeIsFloating(indirTree->AsStoreInd()->Data())) { makeContained = false; } } else if (indirTree->OperGet() == GT_IND) { if (varTypeIsFloating(indirTree)) { makeContained = false; } } } #endif if (makeContained) { m_lsra->clearOperandCounts(addr); addr->SetContained(); // The srcCount is decremented because addr is now "contained", // then we account for the base and index below, if they are non-null. info->srcCount--; } } else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/) && !(modifiedSources = AreSourcesPossiblyModifiedLocals(indirTree, base, index))) { // An addressing mode will be constructed that may cause some // nodes to not need a register, and cause others' lifetimes to be extended // to the GT_IND or even its parent if it's an assignment assert(base != addr); m_lsra->clearOperandCounts(addr); addr->SetContained(); // Traverse the computation below GT_IND to find the operands // for the addressing mode, marking the various constants and // intermediate results as not consuming/producing. // If the traversal were more complex, we might consider using // a traversal function, but the addressing mode is only made // up of simple arithmetic operators, and the code generator // only traverses one leg of each node. bool foundBase = (base == nullptr); bool foundIndex = (index == nullptr); GenTreePtr nextChild = nullptr; for (GenTreePtr child = addr; child != nullptr && !child->OperIsLeaf(); child = nextChild) { nextChild = nullptr; GenTreePtr op1 = child->gtOp.gtOp1; GenTreePtr op2 = (child->OperIsBinary()) ? child->gtOp.gtOp2 : nullptr; if (op1 == base) { foundBase = true; } else if (op1 == index) { foundIndex = true; } else { m_lsra->clearOperandCounts(op1); op1->SetContained(); if (!op1->OperIsLeaf()) { nextChild = op1; } } if (op2 != nullptr) { if (op2 == base) { foundBase = true; } else if (op2 == index) { foundIndex = true; } else { m_lsra->clearOperandCounts(op2); op2->SetContained(); if (!op2->OperIsLeaf()) { assert(nextChild == nullptr); nextChild = op2; } } } } assert(foundBase && foundIndex); info->srcCount--; // it gets incremented below. } else if (addr->gtOper == GT_ARR_ELEM) { // The GT_ARR_ELEM consumes all the indices and produces the offset. // The array object lives until the mem access. // We also consume the target register to which the address is // computed info->srcCount++; assert(addr->gtLsraInfo.srcCount >= 2); addr->gtLsraInfo.srcCount -= 1; } else { // it is nothing but a plain indir info->srcCount--; // base gets added in below base = addr; } if (!makeContained) { return; } if (base != nullptr) { info->srcCount++; } if (index != nullptr && !modifiedSources) { info->srcCount++; } // On ARM we may need a single internal register // (when both conditions are true then we still only need a single internal register) if ((index != nullptr) && (cns != 0)) { // ARM does not support both Index and offset so we need an internal register info->internalIntCount = 1; } else if (!emitter::emitIns_valid_imm_for_ldst_offset(cns, emitTypeSize(indirTree))) { // This offset can't be contained in the ldr/str instruction, so we need an internal register info->internalIntCount = 1; } }
//------------------------------------------------------------------------ // BuildNode: Build the RefPositions for for a node // // Arguments: // treeNode - the node of interest // // Return Value: // The number of sources consumed by this node. // // Notes: // Preconditions: // LSRA Has been initialized. // // Postconditions: // RefPositions have been built for all the register defs and uses required // for this node. // int LinearScan::BuildNode(GenTree* tree) { assert(!tree->isContained()); Interval* prefSrcInterval = nullptr; int srcCount; int dstCount = 0; regMaskTP dstCandidates = RBM_NONE; regMaskTP killMask = RBM_NONE; bool isLocalDefUse = false; // Reset the build-related members of LinearScan. clearBuildState(); RegisterType registerType = TypeGet(tree); // Set the default dstCount. This may be modified below. if (tree->IsValue()) { dstCount = 1; if (tree->IsUnusedValue()) { isLocalDefUse = true; } } else { dstCount = 0; } switch (tree->OperGet()) { default: srcCount = BuildSimple(tree); break; case GT_LCL_VAR: case GT_LCL_FLD: { // We handle tracked variables differently from non-tracked ones. If it is tracked, // we will simply add a use of the tracked variable at its parent/consumer. // Otherwise, for a use we need to actually add the appropriate references for loading // or storing the variable. // // A tracked variable won't actually get used until the appropriate ancestor tree node // is processed, unless this is marked "isLocalDefUse" because it is a stack-based argument // to a call or an orphaned dead node. // LclVarDsc* const varDsc = &compiler->lvaTable[tree->AsLclVarCommon()->gtLclNum]; if (isCandidateVar(varDsc)) { INDEBUG(dumpNodeInfo(tree, dstCandidates, 0, 1)); return 0; } srcCount = 0; #ifdef FEATURE_SIMD // Need an additional register to read upper 4 bytes of Vector3. if (tree->TypeGet() == TYP_SIMD12) { // We need an internal register different from targetReg in which 'tree' produces its result // because both targetReg and internal reg will be in use at the same time. buildInternalFloatRegisterDefForNode(tree, allSIMDRegs()); setInternalRegsDelayFree = true; buildInternalRegisterUses(); } #endif BuildDef(tree); } break; case GT_STORE_LCL_FLD: case GT_STORE_LCL_VAR: srcCount = 1; assert(dstCount == 0); srcCount = BuildStoreLoc(tree->AsLclVarCommon()); break; case GT_FIELD_LIST: // These should always be contained. We don't correctly allocate or // generate code for a non-contained GT_FIELD_LIST. noway_assert(!"Non-contained GT_FIELD_LIST"); srcCount = 0; break; case GT_LIST: case GT_ARGPLACE: case GT_NO_OP: case GT_START_NONGC: case GT_PROF_HOOK: srcCount = 0; assert(dstCount == 0); break; case GT_START_PREEMPTGC: // This kills GC refs in callee save regs srcCount = 0; assert(dstCount == 0); BuildDefsWithKills(tree, 0, RBM_NONE, RBM_NONE); break; case GT_CNS_DBL: { GenTreeDblCon* dblConst = tree->AsDblCon(); double constValue = dblConst->gtDblCon.gtDconVal; if (emitter::emitIns_valid_imm_for_fmov(constValue)) { // Directly encode constant to instructions. } else { // Reserve int to load constant from memory (IF_LARGELDC) buildInternalIntRegisterDefForNode(tree); buildInternalRegisterUses(); } } __fallthrough; case GT_CNS_INT: { srcCount = 0; assert(dstCount == 1); RefPosition* def = BuildDef(tree); def->getInterval()->isConstant = true; } break; case GT_BOX: case GT_COMMA: case GT_QMARK: case GT_COLON: srcCount = 0; assert(dstCount == 0); unreached(); break; case GT_RETURN: srcCount = BuildReturn(tree); break; case GT_RETFILT: assert(dstCount == 0); if (tree->TypeGet() == TYP_VOID) { srcCount = 0; } else { assert(tree->TypeGet() == TYP_INT); srcCount = 1; BuildUse(tree->gtGetOp1(), RBM_INTRET); } break; case GT_NOP: // A GT_NOP is either a passthrough (if it is void, or if it has // a child), but must be considered to produce a dummy value if it // has a type but no child. srcCount = 0; if (tree->TypeGet() != TYP_VOID && tree->gtGetOp1() == nullptr) { assert(dstCount == 1); BuildDef(tree); } else { assert(dstCount == 0); } break; case GT_JTRUE: srcCount = 0; assert(dstCount == 0); break; case GT_JMP: srcCount = 0; assert(dstCount == 0); break; case GT_SWITCH: // This should never occur since switch nodes must not be visible at this // point in the JIT. srcCount = 0; noway_assert(!"Switch must be lowered at this point"); break; case GT_JMPTABLE: srcCount = 0; assert(dstCount == 1); BuildDef(tree); break; case GT_SWITCH_TABLE: buildInternalIntRegisterDefForNode(tree); srcCount = BuildBinaryUses(tree->AsOp()); assert(dstCount == 0); break; case GT_ASG: noway_assert(!"We should never hit any assignment operator in lowering"); srcCount = 0; break; case GT_ADD: case GT_SUB: if (varTypeIsFloating(tree->TypeGet())) { // overflow operations aren't supported on float/double types. assert(!tree->gtOverflow()); // No implicit conversions at this stage as the expectation is that // everything is made explicit by adding casts. assert(tree->gtGetOp1()->TypeGet() == tree->gtGetOp2()->TypeGet()); } __fallthrough; case GT_AND: case GT_OR: case GT_XOR: case GT_LSH: case GT_RSH: case GT_RSZ: case GT_ROR: srcCount = BuildBinaryUses(tree->AsOp()); assert(dstCount == 1); BuildDef(tree); break; case GT_RETURNTRAP: // this just turns into a compare of its child with an int // + a conditional call BuildUse(tree->gtGetOp1()); srcCount = 1; assert(dstCount == 0); killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC); BuildDefsWithKills(tree, 0, RBM_NONE, killMask); break; case GT_MOD: case GT_UMOD: NYI_IF(varTypeIsFloating(tree->TypeGet()), "FP Remainder in ARM64"); assert(!"Shouldn't see an integer typed GT_MOD node in ARM64"); srcCount = 0; break; case GT_MUL: if (tree->gtOverflow()) { // Need a register different from target reg to check for overflow. buildInternalIntRegisterDefForNode(tree); setInternalRegsDelayFree = true; } __fallthrough; case GT_DIV: case GT_MULHI: case GT_UDIV: { srcCount = BuildBinaryUses(tree->AsOp()); buildInternalRegisterUses(); assert(dstCount == 1); BuildDef(tree); } break; case GT_INTRINSIC: { noway_assert((tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs) || (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Ceiling) || (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Floor) || (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round) || (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Sqrt)); // Both operand and its result must be of the same floating point type. GenTree* op1 = tree->gtGetOp1(); assert(varTypeIsFloating(op1)); assert(op1->TypeGet() == tree->TypeGet()); BuildUse(op1); srcCount = 1; assert(dstCount == 1); BuildDef(tree); } break; #ifdef FEATURE_SIMD case GT_SIMD: srcCount = BuildSIMD(tree->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWIntrinsic: srcCount = BuildHWIntrinsic(tree->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS case GT_CAST: assert(dstCount == 1); srcCount = BuildCast(tree->AsCast()); break; case GT_NEG: case GT_NOT: BuildUse(tree->gtGetOp1()); srcCount = 1; assert(dstCount == 1); BuildDef(tree); break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GE: case GT_GT: case GT_TEST_EQ: case GT_TEST_NE: case GT_JCMP: srcCount = BuildCmp(tree); break; case GT_CKFINITE: srcCount = 1; assert(dstCount == 1); buildInternalIntRegisterDefForNode(tree); BuildUse(tree->gtGetOp1()); BuildDef(tree); buildInternalRegisterUses(); break; case GT_CMPXCHG: { GenTreeCmpXchg* cmpXchgNode = tree->AsCmpXchg(); srcCount = cmpXchgNode->gtOpComparand->isContained() ? 2 : 3; assert(dstCount == 1); if (!compiler->compSupports(InstructionSet_Atomics)) { // For ARMv8 exclusives requires a single internal register buildInternalIntRegisterDefForNode(tree); } // For ARMv8 exclusives the lifetime of the addr and data must be extended because // it may be used used multiple during retries // For ARMv8.1 atomic cas the lifetime of the addr and data must be extended to prevent // them being reused as the target register which must be destroyed early RefPosition* locationUse = BuildUse(tree->gtCmpXchg.gtOpLocation); setDelayFree(locationUse); RefPosition* valueUse = BuildUse(tree->gtCmpXchg.gtOpValue); setDelayFree(valueUse); if (!cmpXchgNode->gtOpComparand->isContained()) { RefPosition* comparandUse = BuildUse(tree->gtCmpXchg.gtOpComparand); // For ARMv8 exclusives the lifetime of the comparand must be extended because // it may be used used multiple during retries if (!compiler->compSupports(InstructionSet_Atomics)) { setDelayFree(comparandUse); } } // Internals may not collide with target setInternalRegsDelayFree = true; buildInternalRegisterUses(); BuildDef(tree); } break; case GT_LOCKADD: case GT_XADD: case GT_XCHG: { assert(dstCount == (tree->TypeGet() == TYP_VOID) ? 0 : 1); srcCount = tree->gtGetOp2()->isContained() ? 1 : 2; if (!compiler->compSupports(InstructionSet_Atomics)) { // GT_XCHG requires a single internal register; the others require two. buildInternalIntRegisterDefForNode(tree); if (tree->OperGet() != GT_XCHG) { buildInternalIntRegisterDefForNode(tree); } } assert(!tree->gtGetOp1()->isContained()); RefPosition* op1Use = BuildUse(tree->gtGetOp1()); RefPosition* op2Use = nullptr; if (!tree->gtGetOp2()->isContained()) { op2Use = BuildUse(tree->gtGetOp2()); } // For ARMv8 exclusives the lifetime of the addr and data must be extended because // it may be used used multiple during retries if (!compiler->compSupports(InstructionSet_Atomics)) { // Internals may not collide with target if (dstCount == 1) { setDelayFree(op1Use); if (op2Use != nullptr) { setDelayFree(op2Use); } setInternalRegsDelayFree = true; } buildInternalRegisterUses(); } if (dstCount == 1) { BuildDef(tree); } } break; #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: srcCount = BuildPutArgSplit(tree->AsPutArgSplit()); dstCount = tree->AsPutArgSplit()->gtNumRegs; break; #endif // FEATURE _SPLIT_ARG case GT_PUTARG_STK: srcCount = BuildPutArgStk(tree->AsPutArgStk()); break; case GT_PUTARG_REG: srcCount = BuildPutArgReg(tree->AsUnOp()); break; case GT_CALL: srcCount = BuildCall(tree->AsCall()); if (tree->AsCall()->HasMultiRegRetVal()) { dstCount = tree->AsCall()->GetReturnTypeDesc()->GetReturnRegCount(); } break; case GT_ADDR: { // For a GT_ADDR, the child node should not be evaluated into a register GenTree* child = tree->gtGetOp1(); assert(!isCandidateLocalRef(child)); assert(child->isContained()); assert(dstCount == 1); srcCount = 0; BuildDef(tree); } break; case GT_BLK: case GT_DYN_BLK: // These should all be eliminated prior to Lowering. assert(!"Non-store block node in Lowering"); srcCount = 0; break; case GT_STORE_BLK: case GT_STORE_OBJ: case GT_STORE_DYN_BLK: srcCount = BuildBlockStore(tree->AsBlk()); break; case GT_INIT_VAL: // Always a passthrough of its child's value. assert(!"INIT_VAL should always be contained"); srcCount = 0; break; case GT_LCLHEAP: { assert(dstCount == 1); // Need a variable number of temp regs (see genLclHeap() in codegenamd64.cpp): // Here '-' means don't care. // // Size? Init Memory? # temp regs // 0 - 0 // const and <=6 ptr words - 0 // const and <PageSize No 0 // >6 ptr words Yes 0 // Non-const Yes 0 // Non-const No 2 // GenTree* size = tree->gtGetOp1(); if (size->IsCnsIntOrI()) { assert(size->isContained()); srcCount = 0; size_t sizeVal = size->gtIntCon.gtIconVal; if (sizeVal != 0) { // Compute the amount of memory to properly STACK_ALIGN. // Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size. // This should also help in debugging as we can examine the original size specified with // localloc. sizeVal = AlignUp(sizeVal, STACK_ALIGN); size_t stpCount = sizeVal / (REGSIZE_BYTES * 2); // For small allocations up to 4 'stp' instructions (i.e. 16 to 64 bytes of localloc) // if (stpCount <= 4) { // Need no internal registers } else if (!compiler->info.compInitMem) { // No need to initialize allocated stack space. if (sizeVal < compiler->eeGetPageSize()) { // Need no internal registers } else { // We need two registers: regCnt and RegTmp buildInternalIntRegisterDefForNode(tree); buildInternalIntRegisterDefForNode(tree); } } } } else { srcCount = 1; if (!compiler->info.compInitMem) { buildInternalIntRegisterDefForNode(tree); buildInternalIntRegisterDefForNode(tree); } } if (!size->isContained()) { BuildUse(size); } buildInternalRegisterUses(); BuildDef(tree); } break; case GT_ARR_BOUNDS_CHECK: #ifdef FEATURE_SIMD case GT_SIMD_CHK: #endif // FEATURE_SIMD { GenTreeBoundsChk* node = tree->AsBoundsChk(); // Consumes arrLen & index - has no result assert(dstCount == 0); GenTree* intCns = nullptr; GenTree* other = nullptr; srcCount = BuildOperandUses(tree->AsBoundsChk()->gtIndex); srcCount += BuildOperandUses(tree->AsBoundsChk()->gtArrLen); } break; case GT_ARR_ELEM: // These must have been lowered to GT_ARR_INDEX noway_assert(!"We should never see a GT_ARR_ELEM in lowering"); srcCount = 0; assert(dstCount == 0); break; case GT_ARR_INDEX: { srcCount = 2; assert(dstCount == 1); buildInternalIntRegisterDefForNode(tree); setInternalRegsDelayFree = true; // For GT_ARR_INDEX, the lifetime of the arrObj must be extended because it is actually used multiple // times while the result is being computed. RefPosition* arrObjUse = BuildUse(tree->AsArrIndex()->ArrObj()); setDelayFree(arrObjUse); BuildUse(tree->AsArrIndex()->IndexExpr()); buildInternalRegisterUses(); BuildDef(tree); } break; case GT_ARR_OFFSET: // This consumes the offset, if any, the arrObj and the effective index, // and produces the flattened offset for this dimension. srcCount = 2; if (!tree->gtArrOffs.gtOffset->isContained()) { BuildUse(tree->AsArrOffs()->gtOffset); srcCount++; } BuildUse(tree->AsArrOffs()->gtIndex); BuildUse(tree->AsArrOffs()->gtArrObj); assert(dstCount == 1); buildInternalIntRegisterDefForNode(tree); buildInternalRegisterUses(); BuildDef(tree); break; case GT_LEA: { GenTreeAddrMode* lea = tree->AsAddrMode(); GenTree* base = lea->Base(); GenTree* index = lea->Index(); int cns = lea->Offset(); // This LEA is instantiating an address, so we set up the srcCount here. srcCount = 0; if (base != nullptr) { srcCount++; BuildUse(base); } if (index != nullptr) { srcCount++; BuildUse(index); } assert(dstCount == 1); // On ARM64 we may need a single internal register // (when both conditions are true then we still only need a single internal register) if ((index != nullptr) && (cns != 0)) { // ARM64 does not support both Index and offset so we need an internal register buildInternalIntRegisterDefForNode(tree); } else if (!emitter::emitIns_valid_imm_for_add(cns, EA_8BYTE)) { // This offset can't be contained in the add instruction, so we need an internal register buildInternalIntRegisterDefForNode(tree); } buildInternalRegisterUses(); BuildDef(tree); } break; case GT_STOREIND: { assert(dstCount == 0); if (compiler->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(tree)) { srcCount = BuildGCWriteBarrier(tree); break; } srcCount = BuildIndir(tree->AsIndir()); if (!tree->gtGetOp2()->isContained()) { BuildUse(tree->gtGetOp2()); srcCount++; } } break; case GT_NULLCHECK: // Unlike ARM, ARM64 implements NULLCHECK as a load to REG_ZR, so no internal register // is required, and it is not a localDefUse. assert(dstCount == 0); assert(!tree->gtGetOp1()->isContained()); BuildUse(tree->gtGetOp1()); srcCount = 1; break; case GT_IND: assert(dstCount == 1); srcCount = BuildIndir(tree->AsIndir()); break; case GT_CATCH_ARG: srcCount = 0; assert(dstCount == 1); BuildDef(tree, RBM_EXCEPTION_OBJECT); break; case GT_CLS_VAR: srcCount = 0; // GT_CLS_VAR, by the time we reach the backend, must always // be a pure use. // It will produce a result of the type of the // node, and use an internal register for the address. assert(dstCount == 1); assert((tree->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEASG)) == 0); buildInternalIntRegisterDefForNode(tree); buildInternalRegisterUses(); BuildDef(tree); break; case GT_INDEX_ADDR: assert(dstCount == 1); srcCount = BuildBinaryUses(tree->AsOp()); buildInternalIntRegisterDefForNode(tree); buildInternalRegisterUses(); BuildDef(tree); break; } // end switch (tree->OperGet()) if (tree->IsUnusedValue() && (dstCount != 0)) { isLocalDefUse = true; } // We need to be sure that we've set srcCount and dstCount appropriately assert((dstCount < 2) || tree->IsMultiRegCall()); assert(isLocalDefUse == (tree->IsValue() && tree->IsUnusedValue())); assert(!tree->IsUnusedValue() || (dstCount != 0)); assert(dstCount == tree->GetRegisterDstCount()); INDEBUG(dumpNodeInfo(tree, dstCandidates, srcCount, dstCount)); return srcCount; }