void Rationalizer::RewriteAssignment(LIR::Use& use) { assert(use.IsInitialized()); GenTreeOp* assignment = use.Def()->AsOp(); assert(assignment->OperGet() == GT_ASG); GenTree* location = assignment->gtGetOp1(); GenTree* value = assignment->gtGetOp2(); genTreeOps locationOp = location->OperGet(); if (assignment->OperIsBlkOp()) { #ifdef FEATURE_SIMD if (varTypeIsSIMD(location) && assignment->OperIsInitBlkOp()) { if (location->OperGet() == GT_LCL_VAR) { var_types simdType = location->TypeGet(); GenTree* initVal = assignment->gtOp.gtOp2; var_types baseType = comp->getBaseTypeOfSIMDLocal(location); if (baseType != TYP_UNKNOWN) { GenTreeSIMD* simdTree = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, genTypeSize(simdType)); assignment->gtOp.gtOp2 = simdTree; value = simdTree; initVal->gtNext = simdTree; simdTree->gtPrev = initVal; simdTree->gtNext = location; location->gtPrev = simdTree; } } } #endif // FEATURE_SIMD if ((location->TypeGet() == TYP_STRUCT) && !assignment->IsPhiDefn() && !value->IsMultiRegCall()) { if ((location->OperGet() == GT_LCL_VAR)) { // We need to construct a block node for the location. // Modify lcl to be the address form. location->SetOper(addrForm(locationOp)); LclVarDsc* varDsc = &(comp->lvaTable[location->AsLclVarCommon()->gtLclNum]); location->gtType = TYP_BYREF; GenTreeBlk* storeBlk = nullptr; unsigned int size = varDsc->lvExactSize; if (varDsc->lvStructGcCount != 0) { CORINFO_CLASS_HANDLE structHnd = varDsc->lvVerTypeInfo.GetClassHandle(); GenTreeObj* objNode = comp->gtNewObjNode(structHnd, location)->AsObj(); unsigned int slots = (unsigned)(roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE); objNode->SetGCInfo(varDsc->lvGcLayout, varDsc->lvStructGcCount, slots); objNode->ChangeOper(GT_STORE_OBJ); objNode->SetData(value); comp->fgMorphUnsafeBlk(objNode); storeBlk = objNode; } else { storeBlk = new (comp, GT_STORE_BLK) GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, location, value, size); } storeBlk->gtFlags |= (GTF_REVERSE_OPS | GTF_ASG); storeBlk->gtFlags |= ((location->gtFlags | value->gtFlags) & GTF_ALL_EFFECT); GenTree* insertionPoint = location->gtNext; BlockRange().InsertBefore(insertionPoint, storeBlk); use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); JITDUMP("After transforming local struct assignment into a block op:\n"); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); return; } else { assert(location->OperIsBlk()); } } } switch (locationOp) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_REG_VAR: case GT_PHI_ARG: RewriteAssignmentIntoStoreLclCore(assignment, location, value, locationOp); BlockRange().Remove(location); break; case GT_IND: { GenTreeStoreInd* store = new (comp, GT_STOREIND) GenTreeStoreInd(location->TypeGet(), location->gtGetOp1(), value); copyFlags(store, assignment, GTF_ALL_EFFECT); copyFlags(store, location, GTF_IND_FLAGS); if (assignment->IsReverseOp()) { store->gtFlags |= GTF_REVERSE_OPS; } // TODO: JIT dump // Remove the GT_IND node and replace the assignment node with the store BlockRange().Remove(location); BlockRange().InsertBefore(assignment, store); use.ReplaceWith(comp, store); BlockRange().Remove(assignment); } break; case GT_CLS_VAR: { location->SetOper(GT_CLS_VAR_ADDR); location->gtType = TYP_BYREF; assignment->SetOper(GT_STOREIND); // TODO: JIT dump } break; case GT_BLK: case GT_OBJ: case GT_DYN_BLK: { assert(varTypeIsStruct(location)); GenTreeBlk* storeBlk = location->AsBlk(); genTreeOps storeOper; switch (location->gtOper) { case GT_BLK: storeOper = GT_STORE_BLK; break; case GT_OBJ: storeOper = GT_STORE_OBJ; break; case GT_DYN_BLK: storeOper = GT_STORE_DYN_BLK; break; default: unreached(); } JITDUMP("Rewriting GT_ASG(%s(X), Y) to %s(X,Y):\n", GenTree::NodeName(location->gtOper), GenTree::NodeName(storeOper)); storeBlk->SetOperRaw(storeOper); storeBlk->gtFlags &= ~GTF_DONT_CSE; storeBlk->gtFlags |= (assignment->gtFlags & (GTF_ALL_EFFECT | GTF_REVERSE_OPS | GTF_BLK_VOLATILE | GTF_BLK_UNALIGNED | GTF_DONT_CSE)); storeBlk->gtBlk.Data() = value; // Replace the assignment node with the store use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } break; default: unreached(); break; } }
Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, ArrayStack<GenTree*>& parentStack) { assert(useEdge != nullptr); GenTree* node = *useEdge; assert(node != nullptr); #ifdef DEBUG const bool isLateArg = (node->gtFlags & GTF_LATE_ARG) != 0; #endif // First, remove any preceeding list nodes, which are not otherwise visited by the tree walk. // // NOTE: GT_FIELD_LIST head nodes, and GT_LIST nodes used by phi nodes will in fact be visited. for (GenTree* prev = node->gtPrev; prev != nullptr && prev->OperIsAnyList() && !(prev->OperIsFieldListHead()); prev = node->gtPrev) { BlockRange().Remove(prev); } // In addition, remove the current node if it is a GT_LIST node that is not an aggregate. if (node->OperIsAnyList()) { GenTreeArgList* list = node->AsArgList(); if (!list->OperIsFieldListHead()) { BlockRange().Remove(list); } return Compiler::WALK_CONTINUE; } LIR::Use use; if (parentStack.Height() < 2) { use = LIR::Use::GetDummyUse(BlockRange(), *useEdge); } else { use = LIR::Use(BlockRange(), useEdge, parentStack.Index(1)); } assert(node == use.Def()); switch (node->OperGet()) { case GT_ASG: RewriteAssignment(use); break; case GT_BOX: // GT_BOX at this level just passes through so get rid of it use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); break; case GT_ADDR: RewriteAddress(use); break; case GT_IND: // Clear the `GTF_IND_ASG_LHS` flag, which overlaps with `GTF_IND_REQ_ADDR_IN_REG`. node->gtFlags &= ~GTF_IND_ASG_LHS; break; case GT_NOP: // fgMorph sometimes inserts NOP nodes between defs and uses // supposedly 'to prevent constant folding'. In this case, remove the // NOP. if (node->gtGetOp1() != nullptr) { use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); } break; case GT_COMMA: { GenTree* op1 = node->gtGetOp1(); if ((op1->gtFlags & GTF_ALL_EFFECT) == 0) { // The LHS has no side effects. Remove it. bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange lhsRange = BlockRange().GetTreeRange(op1, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(lhsRange)); } GenTree* replacement = node->gtGetOp2(); if (!use.IsDummyUse()) { use.ReplaceWith(comp, replacement); } else { // This is a top-level comma. If the RHS has no side effects we can remove // it as well. if ((replacement->gtFlags & GTF_ALL_EFFECT) == 0) { bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange rhsRange = BlockRange().GetTreeRange(replacement, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(rhsRange)); } } BlockRange().Remove(node); } break; case GT_ARGPLACE: // Remove argplace and list nodes from the execution order. // // TODO: remove phi args and phi nodes as well? BlockRange().Remove(node); break; #ifdef _TARGET_XARCH_ case GT_CLS_VAR: { // Class vars that are the target of an assignment will get rewritten into // GT_STOREIND(GT_CLS_VAR_ADDR, val) by RewriteAssignment. This check is // not strictly necessary--the GT_IND(GT_CLS_VAR_ADDR) pattern that would // otherwise be generated would also be picked up by RewriteAssignment--but // skipping the rewrite here saves an allocation and a bit of extra work. const bool isLHSOfAssignment = (use.User()->OperGet() == GT_ASG) && (use.User()->gtGetOp1() == node); if (!isLHSOfAssignment) { GenTree* ind = comp->gtNewOperNode(GT_IND, node->TypeGet(), node); node->SetOper(GT_CLS_VAR_ADDR); node->gtType = TYP_BYREF; BlockRange().InsertAfter(node, ind); use.ReplaceWith(comp, ind); // TODO: JIT dump } } break; #endif // _TARGET_XARCH_ case GT_INTRINSIC: // Non-target intrinsics should have already been rewritten back into user calls. assert(Compiler::IsTargetIntrinsic(node->gtIntrinsic.gtIntrinsicId)); break; #ifdef FEATURE_SIMD case GT_BLK: case GT_OBJ: { // TODO-1stClassStructs: These should have been transformed to GT_INDs, but in order // to preserve existing behavior, we will keep this as a block node if this is the // lhs of a block assignment, and either: // - It is a "generic" TYP_STRUCT assignment, OR // - It is an initblk, OR // - Neither the lhs or rhs are known to be of SIMD type. GenTree* parent = use.User(); bool keepBlk = false; if ((parent->OperGet() == GT_ASG) && (node == parent->gtGetOp1())) { if ((node->TypeGet() == TYP_STRUCT) || parent->OperIsInitBlkOp()) { keepBlk = true; } else if (!comp->isAddrOfSIMDType(node->AsBlk()->Addr())) { GenTree* dataSrc = parent->gtGetOp2(); if (!dataSrc->IsLocal() && (dataSrc->OperGet() != GT_SIMD)) { noway_assert(dataSrc->OperIsIndir()); keepBlk = !comp->isAddrOfSIMDType(dataSrc->AsIndir()->Addr()); } } } RewriteSIMDOperand(use, keepBlk); } break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: // TODO-1stClassStructs: Eliminate this. FixupIfSIMDLocal(node->AsLclVarCommon()); break; case GT_SIMD: { noway_assert(comp->featureSIMD); GenTreeSIMD* simdNode = node->AsSIMD(); unsigned simdSize = simdNode->gtSIMDSize; var_types simdType = comp->getSIMDTypeForSize(simdSize); // TODO-1stClassStructs: This should be handled more generally for enregistered or promoted // structs that are passed or returned in a different register type than their enregistered // type(s). if (simdNode->gtType == TYP_I_IMPL && simdNode->gtSIMDSize == TARGET_POINTER_SIZE) { // This happens when it is consumed by a GT_RET_EXPR. // It can only be a Vector2f or Vector2i. assert(genTypeSize(simdNode->gtSIMDBaseType) == 4); simdNode->gtType = TYP_SIMD8; } // Certain SIMD trees require rationalizing. if (simdNode->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicInitArray) { // Rewrite this as an explicit load. JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n"); unsigned int baseTypeSize = genTypeSize(simdNode->gtSIMDBaseType); GenTree* address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdNode->gtOp1, simdNode->gtOp2, baseTypeSize, offsetof(CORINFO_Array, u1Elems)); GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address); BlockRange().InsertBefore(simdNode, address, ind); use.ReplaceWith(comp, ind); BlockRange().Remove(simdNode); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } else { // This code depends on the fact that NONE of the SIMD intrinsics take vector operands // of a different width. If that assumption changes, we will EITHER have to make these type // transformations during importation, and plumb the types all the way through the JIT, // OR add a lot of special handling here. GenTree* op1 = simdNode->gtGetOp1(); if (op1 != nullptr && op1->gtType == TYP_STRUCT) { op1->gtType = simdType; } GenTree* op2 = simdNode->gtGetOp2(); if (op2 != nullptr && op2->gtType == TYP_STRUCT) { op2->gtType = simdType; } } } break; #endif // FEATURE_SIMD default: // JCC nodes should not be present in HIR. assert(node->OperGet() != GT_JCC); break; } // Do some extra processing on top-level nodes to remove unused local reads. if (use.IsDummyUse() && node->OperIsLocalRead()) { assert((node->gtFlags & GTF_ALL_EFFECT) == 0); comp->lvaDecRefCnts(node); BlockRange().Remove(node); } assert(isLateArg == ((use.Def()->gtFlags & GTF_LATE_ARG) != 0)); return Compiler::WALK_CONTINUE; }
// Transform CopyBlk involving SIMD vectors into stlclvar or stind of a SIMD type. // Transformation is done if either src or dst are known to be SIMD vectors. // // Arguments: // ppTree - A pointer-to-a-pointer for the GT_COPYBLK // fgWalkData - A pointer to tree walk data providing the context // // Return Value: // None. // // If either the source or the dst are known to be SIMD (a lclVar or SIMD intrinsic), // get the simdType (TYP_DOUBLE or a SIMD type for SSE2) from the size of the SIMD node. // // For the source: // - If it is a SIMD intrinsic or a lvSIMDType lclVar, change the node type to simdType. // - Otherwise, add a GT_IND of simdType. // For the dst: // - If it is a lclVar of a SIMD type, chanage the node type to simdType. // - Otherwise, change it to a GT_STORE_IND of simdType // // TODO-Cleanup: Once SIMD types are plumbed through the frontend, this will no longer // be required. // void Rationalizer::RewriteCopyBlk(LIR::Use& use) { #ifdef FEATURE_SIMD // No need to transofrm non-SIMD nodes, if featureSIMD is not enabled. if (!comp->featureSIMD) { return; } // See if this is a SIMD copyBlk GenTreeCpBlk* cpBlk = use.Def()->AsCpBlk(); GenTreePtr dstAddr = cpBlk->Dest(); GenTree* srcAddr = cpBlk->Source(); const bool srcIsSIMDAddr = comp->isAddrOfSIMDType(srcAddr); const bool dstIsSIMDAddr = comp->isAddrOfSIMDType(dstAddr); // Do not transform if neither src or dst is known to be a SIMD type. // If src tree type is something we cannot reason but if dst is known to be of a SIMD type // we will treat src tree as a SIMD type and vice versa. if (!srcIsSIMDAddr && !dstIsSIMDAddr) { return; } // At this point it is known to be a copyblk of SIMD vectors and we can // start transforming the original tree. Prior to this point do not perform // any modifications to the original tree. JITDUMP("\nRewriting SIMD CopyBlk\n"); DISPTREERANGE(BlockRange(), cpBlk); // There are currently only three sizes supported: 8 bytes, 12 bytes, 16 bytes or the vector register length. GenTreeIntConCommon* sizeNode = cpBlk->Size()->AsIntConCommon(); var_types simdType = comp->getSIMDTypeForSize((unsigned int)sizeNode->IconValue()); // Remove 'size' from execution order BlockRange().Remove(sizeNode); // Is destination a lclVar which is not an arg? // If yes then we can turn it to a stlcl.var, otherwise turn into stind. GenTree* simdDst = nullptr; genTreeOps oper = GT_NONE; if (dstIsSIMDAddr && dstAddr->OperIsLocalAddr()) { simdDst = dstAddr; simdDst->gtType = simdType; oper = GT_STORE_LCL_VAR; // For structs that are padded (e.g. Vector3f, Vector3i), the morpher will have marked them // as GTF_VAR_USEASG. Unmark them. simdDst->gtFlags &= ~(GTF_VAR_USEASG); } else { // Address of a non-local var simdDst = dstAddr; oper = GT_STOREIND; } GenTree* simdSrc = nullptr; if ((srcAddr->OperGet() == GT_ADDR) && varTypeIsSIMD(srcAddr->gtGetOp1())) { // Get rid of parent node of GT_ADDR(..) if its child happens to be of a SIMD type. BlockRange().Remove(srcAddr); simdSrc = srcAddr->gtGetOp1(); } else if (srcIsSIMDAddr && srcAddr->OperIsLocalAddr()) { // If the source has been rewritten into a local addr node, rewrite it back into a // local var node. simdSrc = srcAddr; simdSrc->SetOper(loadForm(srcAddr->OperGet())); } else { // Since destination is known to be a SIMD type, src must be a SIMD type too // though we cannot figure it out easily enough. Transform src into // GT_IND(src) of simdType. GenTree* indir = comp->gtNewOperNode(GT_IND, simdType, srcAddr); BlockRange().InsertAfter(srcAddr, indir); cpBlk->gtGetOp1()->gtOp.gtOp2 = indir; simdSrc = indir; } simdSrc->gtType = simdType; // Change cpblk to either a st.lclvar or st.ind. // At this point we are manipulating cpblk node with the knowledge of // its internals (i.e. op1 is the size node, and the src & dst are in a GT_LIST on op2). // This logic might need to be changed if we ever restructure cpblk node. assert(simdDst != nullptr); assert(simdSrc != nullptr); GenTree* newNode = nullptr; if (oper == GT_STORE_LCL_VAR) { newNode = simdDst; newNode->SetOper(oper); GenTreeLclVar* store = newNode->AsLclVar(); store->gtOp1 = simdSrc; store->gtType = simdType; store->gtFlags |= ((simdSrc->gtFlags & GTF_ALL_EFFECT) | GTF_ASG); BlockRange().Remove(simdDst); BlockRange().InsertAfter(simdSrc, store); } else { assert(oper == GT_STOREIND); newNode = cpBlk->gtGetOp1(); newNode->SetOper(oper); GenTreeStoreInd* storeInd = newNode->AsStoreInd(); storeInd->gtType = simdType; storeInd->gtFlags |= ((simdSrc->gtFlags & GTF_ALL_EFFECT) | GTF_ASG); storeInd->gtOp1 = simdDst; storeInd->gtOp2 = simdSrc; BlockRange().InsertBefore(cpBlk, storeInd); } use.ReplaceWith(comp, newNode); BlockRange().Remove(cpBlk); JITDUMP("After rewriting SIMD CopyBlk:\n"); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); #endif // FEATURE_SIMD }
void Rationalizer::RewriteAssignment(LIR::Use& use) { assert(use.IsInitialized()); GenTreeOp* assignment = use.Def()->AsOp(); assert(assignment->OperGet() == GT_ASG); GenTree* location = assignment->gtGetOp1(); GenTree* value = assignment->gtGetOp2(); genTreeOps locationOp = location->OperGet(); #ifdef FEATURE_SIMD if (varTypeIsSIMD(location) && assignment->OperIsInitBlkOp()) { if (location->OperGet() == GT_LCL_VAR) { var_types simdType = location->TypeGet(); GenTree* initVal = assignment->gtOp.gtOp2; var_types baseType = comp->getBaseTypeOfSIMDLocal(location); if (baseType != TYP_UNKNOWN) { GenTreeSIMD* simdTree = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, genTypeSize(simdType)); assignment->gtOp.gtOp2 = simdTree; value = simdTree; initVal->gtNext = simdTree; simdTree->gtPrev = initVal; simdTree->gtNext = location; location->gtPrev = simdTree; } } else { assert(location->OperIsBlk()); } } #endif // FEATURE_SIMD switch (locationOp) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_REG_VAR: case GT_PHI_ARG: RewriteAssignmentIntoStoreLclCore(assignment, location, value, locationOp); BlockRange().Remove(location); break; case GT_IND: { GenTreeStoreInd* store = new (comp, GT_STOREIND) GenTreeStoreInd(location->TypeGet(), location->gtGetOp1(), value); copyFlags(store, assignment, GTF_ALL_EFFECT); copyFlags(store, location, GTF_IND_FLAGS); if (assignment->IsReverseOp()) { store->gtFlags |= GTF_REVERSE_OPS; } // TODO: JIT dump // Remove the GT_IND node and replace the assignment node with the store BlockRange().Remove(location); BlockRange().InsertBefore(assignment, store); use.ReplaceWith(comp, store); BlockRange().Remove(assignment); } break; case GT_CLS_VAR: { location->SetOper(GT_CLS_VAR_ADDR); location->gtType = TYP_BYREF; assignment->SetOper(GT_STOREIND); // TODO: JIT dump } break; case GT_BLK: case GT_OBJ: case GT_DYN_BLK: { assert(varTypeIsStruct(location)); GenTreeBlk* storeBlk = location->AsBlk(); genTreeOps storeOper; switch (location->gtOper) { case GT_BLK: storeOper = GT_STORE_BLK; break; case GT_OBJ: storeOper = GT_STORE_OBJ; break; case GT_DYN_BLK: storeOper = GT_STORE_DYN_BLK; break; default: unreached(); } JITDUMP("Rewriting GT_ASG(%s(X), Y) to %s(X,Y):\n", GenTree::NodeName(location->gtOper), GenTree::NodeName(storeOper)); storeBlk->SetOperRaw(storeOper); storeBlk->gtFlags &= ~GTF_DONT_CSE; storeBlk->gtFlags |= (assignment->gtFlags & (GTF_ALL_EFFECT | GTF_REVERSE_OPS | GTF_BLK_VOLATILE | GTF_BLK_UNALIGNED | GTF_BLK_INIT | GTF_DONT_CSE)); storeBlk->gtBlk.Data() = value; // Replace the assignment node with the store use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } break; default: unreached(); break; } }
// Rewrite InitBlk involving SIMD vector into stlcl.var of a SIMD type. // // Arguments: // ppTree - A pointer-to-a-pointer for the GT_INITBLK // fgWalkData - A pointer to tree walk data providing the context // // Return Value: // None. // // TODO-Cleanup: Once SIMD types are plumbed through the frontend, this will no longer // be required. // void Rationalizer::RewriteInitBlk(LIR::Use& use) { #ifdef FEATURE_SIMD // No lowering is needed for non-SIMD nodes, so early out if featureSIMD is not enabled. if (!comp->featureSIMD) { return; } // See if this is a SIMD initBlk that needs to be changed to a simple st.lclVar. GenTreeInitBlk* initBlk = use.Def()->AsInitBlk(); // Is the dstAddr is addr of a SIMD type lclVar? GenTree* dstAddr = initBlk->Dest(); if (!comp->isAddrOfSIMDType(dstAddr) || !dstAddr->OperIsLocalAddr()) { return; } unsigned lclNum = dstAddr->AsLclVarCommon()->gtLclNum; if (!comp->lvaTable[lclNum].lvSIMDType) { return; } var_types baseType = comp->lvaTable[lclNum].lvBaseType; CORINFO_CLASS_HANDLE typeHnd = comp->lvaTable[lclNum].lvVerTypeInfo.GetClassHandle(); unsigned simdLocalSize = comp->getSIMDTypeSizeInBytes(typeHnd); JITDUMP("Rewriting SIMD InitBlk\n"); DISPTREERANGE(BlockRange(), initBlk); assert((dstAddr->gtFlags & GTF_VAR_USEASG) == 0); // There are currently only three sizes supported: 8 bytes, 16 bytes or the vector register length. GenTreeIntConCommon* sizeNode = initBlk->Size()->AsIntConCommon(); unsigned int size = (unsigned int)roundUp(sizeNode->IconValue(), TARGET_POINTER_SIZE); var_types simdType = comp->getSIMDTypeForSize(size); assert(roundUp(simdLocalSize, TARGET_POINTER_SIZE) == size); GenTree* initVal = initBlk->InitVal(); GenTreeSIMD* simdNode = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, (unsigned)sizeNode->IconValue()); dstAddr->SetOper(GT_STORE_LCL_VAR); GenTreeLclVar* store = dstAddr->AsLclVar(); store->gtType = simdType; store->gtOp.gtOp1 = simdNode; store->gtFlags |= ((simdNode->gtFlags & GTF_ALL_EFFECT) | GTF_ASG); BlockRange().Remove(store); // Insert the new nodes into the block BlockRange().InsertAfter(initVal, simdNode, store); use.ReplaceWith(comp, store); // Remove the old size and GT_INITBLK nodes. BlockRange().Remove(sizeNode); BlockRange().Remove(initBlk); JITDUMP("After rewriting SIMD InitBlk:\n"); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); #endif // FEATURE_SIMD }