//------------------------------------------------------------------------ // ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) { assert(storeLoc->OperIsLocalStore()); GenTree* op1 = storeLoc->gtGetOp1(); #ifdef FEATURE_SIMD if (varTypeIsSIMD(storeLoc)) { if (op1->IsIntegralConst(0)) { // For an InitBlk we want op1 to be contained MakeSrcContained(storeLoc, op1); } return; } #endif // FEATURE_SIMD // If the source is a containable immediate, make it contained, unless it is // an int-size or larger store of zero to memory, because we can generate smaller code // by zeroing a register and then storing it. if (IsContainableImmed(storeLoc, op1) && (!op1->IsIntegralConst(0) || varTypeIsSmall(storeLoc))) { MakeSrcContained(storeLoc, op1); } #ifdef _TARGET_ARM_ else if (op1->OperGet() == GT_LONG) { MakeSrcContained(storeLoc, op1); } #endif // _TARGET_ARM_ }
//------------------------------------------------------------------------ // TreeNodeInfoInitPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node // // Arguments: // argNode - a GT_PUTARG_STK node // // Return Value: // None. // // Notes: // Set the child node(s) to be contained when we have a multireg arg // void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info) { assert(argNode->gtOper == GT_PUTARG_STK); GenTreePtr putArgChild = argNode->gtOp.gtOp1; // Initialize 'argNode' as not contained, as this is both the default case // and how MakeSrcContained expects to find things setup. // argNode->gtLsraInfo.srcCount = 1; argNode->gtLsraInfo.dstCount = 0; // Do we have a TYP_STRUCT argument (or a GT_FIELD_LIST), if so it must be a multireg pass-by-value struct if ((putArgChild->TypeGet() == TYP_STRUCT) || (putArgChild->OperGet() == GT_FIELD_LIST)) { // We will use store instructions that each write a register sized value if (putArgChild->OperGet() == GT_FIELD_LIST) { // We consume all of the items in the GT_FIELD_LIST argNode->gtLsraInfo.srcCount = info->numSlots; putArgChild->SetContained(); } else { #ifdef _TARGET_ARM64_ // We could use a ldp/stp sequence so we need two internal registers argNode->gtLsraInfo.internalIntCount = 2; #else // _TARGET_ARM_ // We could use a ldr/str sequence so we need a internal register argNode->gtLsraInfo.internalIntCount = 1; #endif // _TARGET_ARM_ if (putArgChild->OperGet() == GT_OBJ) { GenTreePtr objChild = putArgChild->gtOp.gtOp1; if (objChild->OperGet() == GT_LCL_VAR_ADDR) { // We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR // as one contained operation // MakeSrcContained(putArgChild, objChild); } } // We will generate all of the code for the GT_PUTARG_STK and it's child node // as one contained operation // MakeSrcContained(argNode, putArgChild); } } else { // We must not have a multi-reg struct assert(info->numSlots == 1); } }
//---------------------------------------------------------------------------------------------- // ContainCheckSIMD: Perform containment analysis for a SIMD intrinsic node. // // Arguments: // simdNode - The SIMD intrinsic node. // void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode) { switch (simdNode->gtSIMDIntrinsicID) { GenTree* op1; GenTree* op2; case SIMDIntrinsicInit: op1 = simdNode->gtOp.gtOp1; if (op1->IsIntegralConst(0)) { MakeSrcContained(simdNode, op1); } break; case SIMDIntrinsicInitArray: // We have an array and an index, which may be contained. CheckImmedAndMakeContained(simdNode, simdNode->gtGetOp2()); break; case SIMDIntrinsicOpEquality: case SIMDIntrinsicOpInEquality: // TODO-ARM64-CQ Support containing 0 break; case SIMDIntrinsicGetItem: { // This implements get_Item method. The sources are: // - the source SIMD struct // - index (which element to get) // The result is baseType of SIMD struct. op1 = simdNode->gtOp.gtOp1; op2 = simdNode->gtOp.gtOp2; // If the index is a constant, mark it as contained. if (op2->IsCnsIntOrI()) { MakeSrcContained(simdNode, op2); } if (IsContainableMemoryOp(op1)) { MakeSrcContained(simdNode, op1); if (op1->OperGet() == GT_IND) { op1->AsIndir()->Addr()->ClearContained(); } } break; } default: break; } }
//---------------------------------------------------------------------------------------------- // ContainCheckHWIntrinsic: Perform containment analysis for a hardware intrinsic node. // // Arguments: // node - The hardware intrinsic node. // void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node) { NamedIntrinsic intrinsicID = node->gtHWIntrinsicId; GenTreeArgList* argList = nullptr; GenTree* op1 = node->gtOp.gtOp1; GenTree* op2 = node->gtOp.gtOp2; if (op1->OperIs(GT_LIST)) { argList = op1->AsArgList(); op1 = argList->Current(); op2 = argList->Rest()->Current(); } switch (HWIntrinsicInfo::lookup(node->gtHWIntrinsicId).form) { case HWIntrinsicInfo::SimdExtractOp: if (op2->IsCnsIntOrI()) { MakeSrcContained(node, op2); } break; case HWIntrinsicInfo::SimdInsertOp: if (op2->IsCnsIntOrI()) { MakeSrcContained(node, op2); GenTree* op3 = argList->Rest()->Rest()->Current(); // In the HW intrinsics C# API there is no direct way to specify a vector element to element mov // VX[a] = VY[b] // In C# this would naturally be expressed by // Insert(VX, a, Extract(VY, b)) // If both a & b are immediate constants contain the extract/getItem so that we can emit // the single instruction mov Vx[a], Vy[b] if (op3->OperIs(GT_HWIntrinsic) && (op3->AsHWIntrinsic()->gtHWIntrinsicId == NI_ARM64_SIMD_GetItem)) { ContainCheckHWIntrinsic(op3->AsHWIntrinsic()); if (op3->gtOp.gtOp2->isContained()) { MakeSrcContained(node, op3); } } } break; default: break; } }
//------------------------------------------------------------------------ // ContainCheckIndir: Determine whether operands of an indir should be contained. // // Arguments: // indirNode - The indirection node of interest // // Notes: // This is called for both store and load indirections. // // Return Value: // None. // void Lowering::ContainCheckIndir(GenTreeIndir* indirNode) { // If this is the rhs of a block copy it will be handled when we handle the store. if (indirNode->TypeGet() == TYP_STRUCT) { return; } #ifdef FEATURE_SIMD // If indirTree is of TYP_SIMD12, don't mark addr as contained // so that it always get computed to a register. This would // mean codegen side logic doesn't need to handle all possible // addr expressions that could be contained. // // TODO-ARM64-CQ: handle other addr mode expressions that could be marked // as contained. if (indirNode->TypeGet() == TYP_SIMD12) { return; } #endif // FEATURE_SIMD GenTree* addr = indirNode->Addr(); bool makeContained = true; if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirNode, addr)) { GenTreeAddrMode* lea = addr->AsAddrMode(); GenTree* base = lea->Base(); GenTree* index = lea->Index(); int cns = lea->Offset(); #ifdef _TARGET_ARM_ // ARM floating-point load/store doesn't support a form similar to integer // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm. if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns)) { if (indirNode->OperGet() == GT_STOREIND) { if (varTypeIsFloating(indirNode->AsStoreInd()->Data())) { makeContained = false; } } else if (indirNode->OperGet() == GT_IND) { if (varTypeIsFloating(indirNode)) { makeContained = false; } } } #endif if (makeContained) { MakeSrcContained(indirNode, addr); } } }
//------------------------------------------------------------------------ // ContainCheckShiftRotate: Determine whether a mul op's operands should be contained. // // Arguments: // node - the node we care about // void Lowering::ContainCheckShiftRotate(GenTreeOp* node) { GenTree* shiftBy = node->gtOp2; assert(node->OperIsShiftOrRotate()); #ifdef _TARGET_ARM_ GenTree* source = node->gtOp1; if (node->OperIs(GT_LSH_HI, GT_RSH_LO)) { assert(source->OperGet() == GT_LONG); MakeSrcContained(node, source); } #endif // _TARGET_ARM_ if (shiftBy->IsCnsIntOrI()) { MakeSrcContained(node, shiftBy); } }
//------------------------------------------------------------------------ // ContainCheckStoreIndir: determine whether the sources of a STOREIND node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckStoreIndir(GenTreeIndir* node) { #ifdef _TARGET_ARM64_ GenTree* src = node->gtOp.gtOp2; if (!varTypeIsFloating(src->TypeGet()) && src->IsIntegralConst(0)) { // an integer zero for 'src' can be contained. MakeSrcContained(node, src); } #endif // _TARGET_ARM64_ ContainCheckIndir(node); }
//------------------------------------------------------------------------ // ContainCheckCast: determine whether the source of a CAST node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckCast(GenTreeCast* node) { #ifdef _TARGET_ARM_ GenTree* castOp = node->CastOp(); var_types castToType = node->CastToType(); var_types srcType = castOp->TypeGet(); if (varTypeIsLong(castOp)) { assert(castOp->OperGet() == GT_LONG); MakeSrcContained(node, castOp); } #endif // _TARGET_ARM_ }
//------------------------------------------------------------------------ // ContainCheckIndir: Determine whether operands of an indir should be contained. // // Arguments: // indirNode - The indirection node of interest // // Notes: // This is called for both store and load indirections. // // Return Value: // None. // void Lowering::ContainCheckIndir(GenTreeIndir* indirNode) { // If this is the rhs of a block copy it will be handled when we handle the store. if (indirNode->TypeGet() == TYP_STRUCT) { return; } GenTree* addr = indirNode->Addr(); bool makeContained = true; if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirNode, addr)) { GenTreeAddrMode* lea = addr->AsAddrMode(); GenTree* base = lea->Base(); GenTree* index = lea->Index(); int cns = lea->Offset(); #ifdef _TARGET_ARM_ // ARM floating-point load/store doesn't support a form similar to integer // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm. if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns)) { if (indirNode->OperGet() == GT_STOREIND) { if (varTypeIsFloating(indirNode->AsStoreInd()->Data())) { makeContained = false; } } else if (indirNode->OperGet() == GT_IND) { if (varTypeIsFloating(indirNode)) { makeContained = false; } } } #endif if (makeContained) { MakeSrcContained(indirNode, addr); } } }
//------------------------------------------------------------------------ // TreeNodeInfoInitShiftRotate: Set the NodeInfo for a shift or rotate. // // Arguments: // tree - The node of interest // // Return Value: // None. // void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree) { TreeNodeInfo* info = &(tree->gtLsraInfo); LinearScan* l = m_lsra; info->srcCount = 2; info->dstCount = 1; GenTreePtr shiftBy = tree->gtOp.gtOp2; GenTreePtr source = tree->gtOp.gtOp1; if (shiftBy->IsCnsIntOrI()) { MakeSrcContained(tree, shiftBy); } #ifdef _TARGET_ARM_ // The first operand of a GT_LSH_HI and GT_RSH_LO oper is a GT_LONG so that // we can have a three operand form. Increment the srcCount. if (tree->OperGet() == GT_LSH_HI || tree->OperGet() == GT_RSH_LO) { assert(source->OperGet() == GT_LONG); source->SetContained(); info->srcCount++; if (tree->OperGet() == GT_LSH_HI) { GenTreePtr sourceLo = source->gtOp.gtOp1; sourceLo->gtLsraInfo.isDelayFree = true; } else { GenTreePtr sourceHi = source->gtOp.gtOp2; sourceHi->gtLsraInfo.isDelayFree = true; } source->gtLsraInfo.hasDelayFreeSrc = true; info->hasDelayFreeSrc = true; } #endif // _TARGET_ARM_ }
//------------------------------------------------------------------------ // TreeNodeInfoInitBlockStore: Set the NodeInfo for a block store. // // Arguments: // blkNode - The block store node of interest // // Return Value: // None. // void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode) { GenTree* dstAddr = blkNode->Addr(); unsigned size = blkNode->gtBlkSize; GenTree* source = blkNode->Data(); LinearScan* l = m_lsra; Compiler* compiler = comp; // Sources are dest address and initVal or source. // We may require an additional source or temp register for the size. blkNode->gtLsraInfo.srcCount = 2; blkNode->gtLsraInfo.dstCount = 0; GenTreePtr srcAddrOrFill = nullptr; bool isInitBlk = blkNode->OperIsInitBlkOp(); if (!isInitBlk) { // CopyObj or CopyBlk if (source->gtOper == GT_IND) { srcAddrOrFill = blkNode->Data()->gtGetOp1(); // We're effectively setting source as contained, but can't call MakeSrcContained, because the // "inheritance" of the srcCount is to a child not a parent - it would "just work" but could be misleading. // If srcAddr is already non-contained, we don't need to change it. if (srcAddrOrFill->gtLsraInfo.getDstCount() == 0) { srcAddrOrFill->gtLsraInfo.setDstCount(1); srcAddrOrFill->gtLsraInfo.setSrcCount(source->gtLsraInfo.srcCount); } m_lsra->clearOperandCounts(source); source->SetContained(); source->AsIndir()->Addr()->ClearContained(); } else if (!source->IsMultiRegCall() && !source->OperIsSIMD()) { assert(source->IsLocal()); MakeSrcContained(blkNode, source); blkNode->gtLsraInfo.srcCount--; } } if (isInitBlk) { GenTreePtr initVal = source; if (initVal->OperIsInitVal()) { initVal->SetContained(); initVal = initVal->gtGetOp1(); } srcAddrOrFill = initVal; if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll) { // TODO-ARM-CQ: Currently we generate a helper call for every // initblk we encounter. Later on we should implement loop unrolling // code sequences to improve CQ. // For reference see the code in lsraxarch.cpp. NYI_ARM("initblk loop unrolling is currently not implemented."); #ifdef _TARGET_ARM64_ // No additional temporaries required ssize_t fill = initVal->gtIntCon.gtIconVal & 0xFF; if (fill == 0) { MakeSrcContained(blkNode, source); blkNode->gtLsraInfo.srcCount--; } #endif // _TARGET_ARM64_ } else { assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper); // The helper follows the regular ABI. dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0); initVal->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1); if (size != 0) { // Reserve a temp register for the block size argument. blkNode->gtLsraInfo.setInternalCandidates(l, RBM_ARG_2); blkNode->gtLsraInfo.internalIntCount = 1; } else { // The block size argument is a third argument to GT_STORE_DYN_BLK noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK); blkNode->gtLsraInfo.setSrcCount(3); GenTree* sizeNode = blkNode->AsDynBlk()->gtDynamicSize; sizeNode->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2); } } } else { // CopyObj or CopyBlk // Sources are src and dest and size if not constant. if (blkNode->OperGet() == GT_STORE_OBJ) { // CopyObj // We don't need to materialize the struct size but we still need // a temporary register to perform the sequence of loads and stores. blkNode->gtLsraInfo.internalIntCount = 1; if (size >= 2 * REGSIZE_BYTES) { // We will use ldp/stp to reduce code size and improve performance // so we need to reserve an extra internal register blkNode->gtLsraInfo.internalIntCount++; } // We can't use the special Write Barrier registers, so exclude them from the mask regMaskTP internalIntCandidates = RBM_ALLINT & ~(RBM_WRITE_BARRIER_DST_BYREF | RBM_WRITE_BARRIER_SRC_BYREF); blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates); // If we have a dest address we want it in RBM_WRITE_BARRIER_DST_BYREF. dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_DST_BYREF); // If we have a source address we want it in REG_WRITE_BARRIER_SRC_BYREF. // Otherwise, if it is a local, codegen will put its address in REG_WRITE_BARRIER_SRC_BYREF, // which is killed by a StoreObj (and thus needn't be reserved). if (srcAddrOrFill != nullptr) { srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_SRC_BYREF); } } else { // CopyBlk short internalIntCount = 0; regMaskTP internalIntCandidates = RBM_NONE; if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll) { // TODO-ARM-CQ: cpblk loop unrolling is currently not implemented. // In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size // we should unroll the loop to improve CQ. // For reference see the code in lsraxarch.cpp. NYI_ARM("cpblk loop unrolling is currently not implemented."); #ifdef _TARGET_ARM64_ internalIntCount = 1; internalIntCandidates = RBM_ALLINT; if (size >= 2 * REGSIZE_BYTES) { // We will use ldp/stp to reduce code size and improve performance // so we need to reserve an extra internal register internalIntCount++; } #endif // _TARGET_ARM64_ } else { assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper); dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0); // The srcAddr goes in arg1. if (srcAddrOrFill != nullptr) { srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1); } if (size != 0) { // Reserve a temp register for the block size argument. internalIntCandidates |= RBM_ARG_2; internalIntCount++; } else { // The block size argument is a third argument to GT_STORE_DYN_BLK noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK); blkNode->gtLsraInfo.setSrcCount(3); GenTree* blockSize = blkNode->AsDynBlk()->gtDynamicSize; blockSize->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2); } } if (internalIntCount != 0) { blkNode->gtLsraInfo.internalIntCount = internalIntCount; blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates); } } } }
//------------------------------------------------------------------------ // TreeNodeInfoInitPutArgSplit: Set the NodeInfo for a GT_PUTARG_SPLIT node // // Arguments: // argNode - a GT_PUTARG_SPLIT node // // Return Value: // None. // // Notes: // Set the child node(s) to be contained // void Lowering::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode, TreeNodeInfo& info, fgArgTabEntryPtr argInfo) { assert(argNode->gtOper == GT_PUTARG_SPLIT); GenTreePtr putArgChild = argNode->gtOp.gtOp1; // Registers for split argument corresponds to source argNode->gtLsraInfo.dstCount = argInfo->numRegs; info.srcCount += argInfo->numRegs; regNumber argReg = argInfo->regNum; regMaskTP argMask = RBM_NONE; for (unsigned i = 0; i < argInfo->numRegs; i++) { argMask |= genRegMask((regNumber)((unsigned)argReg + i)); } argNode->gtLsraInfo.setDstCandidates(m_lsra, argMask); if (putArgChild->OperGet() == GT_FIELD_LIST) { // Generated code: // 1. Consume all of the items in the GT_FIELD_LIST (source) // 2. Store to target slot and move to target registers (destination) from source // argNode->gtLsraInfo.srcCount = argInfo->numRegs + argInfo->numSlots; // To avoid redundant moves, have the argument operand computed in the // register in which the argument is passed to the call. GenTreeFieldList* fieldListPtr = putArgChild->AsFieldList(); for (unsigned idx = 0; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest(), idx++) { if (idx < argInfo->numRegs) { GenTreePtr node = fieldListPtr->gtGetOp1(); node->gtLsraInfo.setSrcCandidates(m_lsra, genRegMask((regNumber)((unsigned)argReg + idx))); } } putArgChild->SetContained(); } else { assert(putArgChild->TypeGet() == TYP_STRUCT); assert(putArgChild->OperGet() == GT_OBJ); // We could use a ldr/str sequence so we need a internal register argNode->gtLsraInfo.srcCount = 1; argNode->gtLsraInfo.internalIntCount = 1; regMaskTP internalMask = RBM_ALLINT & ~argMask; argNode->gtLsraInfo.setInternalCandidates(m_lsra, internalMask); GenTreePtr objChild = putArgChild->gtOp.gtOp1; if (objChild->OperGet() == GT_LCL_VAR_ADDR) { // We will generate all of the code for the GT_PUTARG_SPLIT, the GT_OBJ and the GT_LCL_VAR_ADDR // as one contained operation // MakeSrcContained(putArgChild, objChild); putArgChild->gtLsraInfo.srcCount--; } argNode->gtLsraInfo.srcCount = putArgChild->gtLsraInfo.srcCount; MakeSrcContained(argNode, putArgChild); } }
//------------------------------------------------------------------------ // LowerBlockStore: Set block store type // // Arguments: // blkNode - The block store node of interest // // Return Value: // None. // void Lowering::LowerBlockStore(GenTreeBlk* blkNode) { GenTree* dstAddr = blkNode->Addr(); unsigned size = blkNode->gtBlkSize; GenTree* source = blkNode->Data(); Compiler* compiler = comp; // Sources are dest address and initVal or source. GenTree* srcAddrOrFill = nullptr; bool isInitBlk = blkNode->OperIsInitBlkOp(); if (!isInitBlk) { // CopyObj or CopyBlk if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe)) { blkNode->SetOper(GT_STORE_BLK); } if (source->gtOper == GT_IND) { srcAddrOrFill = blkNode->Data()->gtGetOp1(); } } if (isInitBlk) { GenTree* initVal = source; if (initVal->OperIsInitVal()) { initVal->SetContained(); initVal = initVal->gtGetOp1(); } srcAddrOrFill = initVal; #ifdef _TARGET_ARM64_ if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT) && initVal->IsCnsIntOrI()) { // TODO-ARM-CQ: Currently we generate a helper call for every // initblk we encounter. Later on we should implement loop unrolling // code sequences to improve CQ. // For reference see the code in LowerXArch.cpp. NYI_ARM("initblk loop unrolling is currently not implemented."); // The fill value of an initblk is interpreted to hold a // value of (unsigned int8) however a constant of any size // may practically reside on the evaluation stack. So extract // the lower byte out of the initVal constant and replicate // it to a larger constant whose size is sufficient to support // the largest width store of the desired inline expansion. ssize_t fill = initVal->gtIntCon.gtIconVal & 0xFF; if (fill == 0) { MakeSrcContained(blkNode, source); } else if (size < REGSIZE_BYTES) { initVal->gtIntCon.gtIconVal = 0x01010101 * fill; } else { initVal->gtIntCon.gtIconVal = 0x0101010101010101LL * fill; initVal->gtType = TYP_LONG; } blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; } else #endif // _TARGET_ARM64_ { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper; } } else { // CopyObj or CopyBlk // Sources are src and dest and size if not constant. if (blkNode->OperGet() == GT_STORE_OBJ) { // CopyObj GenTreeObj* objNode = blkNode->AsObj(); unsigned slots = objNode->gtSlots; #ifdef DEBUG // CpObj must always have at least one GC-Pointer as a member. assert(objNode->gtGcPtrCount > 0); assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL); CORINFO_CLASS_HANDLE clsHnd = objNode->gtClass; size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd); size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE); // Currently, the EE always round up a class data structure so // we are not handling the case where we have a non multiple of pointer sized // struct. This behavior may change in the future so in order to keeps things correct // let's assert it just to be safe. Going forward we should simply // handle this case. assert(classSize == blkSize); assert((blkSize / TARGET_POINTER_SIZE) == slots); assert(objNode->HasGCPtr()); #endif blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; } else // CopyBlk { // In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size // we should unroll the loop to improve CQ. // For reference see the code in lowerxarch.cpp. if ((size != 0) && (size <= CPBLK_UNROLL_LIMIT)) { blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll; } else { // In case we have a constant integer this means we went beyond // CPBLK_UNROLL_LIMIT bytes of size, still we should never have the case of // any GC-Pointers in the src struct. blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper; } } // CopyObj or CopyBlk if (source->gtOper == GT_IND) { MakeSrcContained(blkNode, source); GenTree* addr = source->AsIndir()->Addr(); if (!addr->OperIsLocalAddr()) { addr->ClearContained(); } } else if (!source->IsMultiRegCall() && !source->OperIsSIMD()) { assert(source->IsLocal()); MakeSrcContained(blkNode, source); } } }