void Rationalizer::RewriteAddress(LIR::Use& use) { assert(use.IsInitialized()); GenTreeUnOp* address = use.Def()->AsUnOp(); assert(address->OperGet() == GT_ADDR); GenTree* location = address->gtGetOp1(); genTreeOps locationOp = location->OperGet(); if (location->IsLocal()) { // We are changing the child from GT_LCL_VAR TO GT_LCL_VAR_ADDR. // Therefore gtType of the child needs to be changed to a TYP_BYREF #ifdef DEBUG if (locationOp == GT_LCL_VAR) { JITDUMP("Rewriting GT_ADDR(GT_LCL_VAR) to GT_LCL_VAR_ADDR:\n"); } else { assert(locationOp == GT_LCL_FLD); JITDUMP("Rewriting GT_ADDR(GT_LCL_FLD) to GT_LCL_FLD_ADDR:\n"); } #endif // DEBUG location->SetOper(addrForm(locationOp)); location->gtType = TYP_BYREF; copyFlags(location, address, GTF_ALL_EFFECT); use.ReplaceWith(comp, location); BlockRange().Remove(address); } else if (locationOp == GT_CLS_VAR) { location->SetOper(GT_CLS_VAR_ADDR); location->gtType = TYP_BYREF; copyFlags(location, address, GTF_ALL_EFFECT); use.ReplaceWith(comp, location); BlockRange().Remove(address); JITDUMP("Rewriting GT_ADDR(GT_CLS_VAR) to GT_CLS_VAR_ADDR:\n"); } else if (location->OperIsIndir()) { use.ReplaceWith(comp, location->gtGetOp1()); BlockRange().Remove(location); BlockRange().Remove(address); JITDUMP("Rewriting GT_ADDR(GT_IND(X)) to X:\n"); } DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); }
void Rationalizer::RewriteAssignment(LIR::Use& use) { assert(use.IsInitialized()); GenTreeOp* assignment = use.Def()->AsOp(); assert(assignment->OperGet() == GT_ASG); GenTree* location = assignment->gtGetOp1(); GenTree* value = assignment->gtGetOp2(); genTreeOps locationOp = location->OperGet(); switch (locationOp) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_REG_VAR: case GT_PHI_ARG: RewriteAssignmentIntoStoreLclCore(assignment, location, value, locationOp); BlockRange().Remove(location); break; case GT_IND: { GenTreeStoreInd* store = new (comp, GT_STOREIND) GenTreeStoreInd(location->TypeGet(), location->gtGetOp1(), value); copyFlags(store, assignment, GTF_ALL_EFFECT); copyFlags(store, location, GTF_IND_FLAGS); if (assignment->IsReverseOp()) { store->gtFlags |= GTF_REVERSE_OPS; } // TODO: JIT dump // Remove the GT_IND node and replace the assignment node with the store BlockRange().Remove(location); BlockRange().InsertBefore(assignment, store); use.ReplaceWith(comp, store); BlockRange().Remove(assignment); } break; case GT_CLS_VAR: { location->SetOper(GT_CLS_VAR_ADDR); location->gtType = TYP_BYREF; assignment->SetOper(GT_STOREIND); // TODO: JIT dump } break; default: unreached(); break; } }
// Rewrite a SIMD indirection as GT_IND(GT_LEA(obj.op1)), or as a simple // lclVar if possible. // // Arguments: // use - A use reference for a block node // keepBlk - True if this should remain a block node if it is not a lclVar // // Return Value: // None. // // TODO-1stClassStructs: These should be eliminated earlier, once we can handle // lclVars in all the places that used to have GT_OBJ. // void Rationalizer::RewriteSIMDOperand(LIR::Use& use, bool keepBlk) { #ifdef FEATURE_SIMD // No lowering is needed for non-SIMD nodes, so early out if featureSIMD is not enabled. if (!comp->featureSIMD) { return; } GenTree* tree = use.Def(); if (!tree->OperIsIndir()) { return; } var_types simdType = tree->TypeGet(); if (!varTypeIsSIMD(simdType)) { return; } // If we have GT_IND(GT_LCL_VAR_ADDR) and the GT_LCL_VAR_ADDR is TYP_BYREF/TYP_I_IMPL, // and the var is a SIMD type, replace the expression by GT_LCL_VAR. GenTree* addr = tree->AsIndir()->Addr(); if (addr->OperIsLocalAddr() && comp->isAddrOfSIMDType(addr)) { BlockRange().Remove(tree); addr->SetOper(loadForm(addr->OperGet())); addr->gtType = simdType; use.ReplaceWith(comp, addr); } else if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_SIMD)) { // if we have GT_IND(GT_ADDR(GT_SIMD)), remove the GT_IND(GT_ADDR()), leaving just the GT_SIMD. BlockRange().Remove(tree); BlockRange().Remove(addr); use.ReplaceWith(comp, addr->gtGetOp1()); } else if (!keepBlk) { tree->SetOper(GT_IND); tree->gtType = simdType; } #endif // FEATURE_SIMD }
//------------------------------------------------------------------------ // DecomposeLclVar: Decompose GT_LCL_VAR. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeLclVar(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_LCL_VAR); GenTree* tree = use.Def(); unsigned varNum = tree->AsLclVarCommon()->gtLclNum; LclVarDsc* varDsc = m_compiler->lvaTable + varNum; m_compiler->lvaDecRefCnts(tree); GenTree* loResult = tree; loResult->gtType = TYP_INT; GenTree* hiResult = m_compiler->gtNewLclLNode(varNum, TYP_INT); hiResult->CopyCosts(loResult); BlockRange().InsertAfter(loResult, hiResult); if (varDsc->lvPromoted) { assert(varDsc->lvFieldCnt == 2); unsigned loVarNum = varDsc->lvFieldLclStart; unsigned hiVarNum = loVarNum + 1; loResult->AsLclVarCommon()->SetLclNum(loVarNum); hiResult->AsLclVarCommon()->SetLclNum(hiVarNum); } else { noway_assert(varDsc->lvLRACandidate == false); loResult->SetOper(GT_LCL_FLD); loResult->AsLclFld()->gtLclOffs = 0; loResult->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField(); hiResult->SetOper(GT_LCL_FLD); hiResult->AsLclFld()->gtLclOffs = 4; hiResult->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField(); } m_compiler->lvaIncRefCnts(loResult); m_compiler->lvaIncRefCnts(hiResult); return FinalizeDecomposition(use, loResult, hiResult); }
//------------------------------------------------------------------------ // DecomposeLclVar: Decompose GT_LCL_VAR. // // Arguments: // ppTree - the tree to decompose // data - tree walk context // // Return Value: // None. // void DecomposeLongs::DecomposeLclVar(GenTree** ppTree, Compiler::fgWalkData* data) { assert(ppTree != nullptr); assert(*ppTree != nullptr); assert(data != nullptr); assert((*ppTree)->OperGet() == GT_LCL_VAR); GenTree* tree = *ppTree; unsigned varNum = tree->AsLclVarCommon()->gtLclNum; LclVarDsc* varDsc = m_compiler->lvaTable + varNum; m_compiler->lvaDecRefCnts(tree); GenTree* loResult = tree; loResult->gtType = TYP_INT; GenTree* hiResult = m_compiler->gtNewLclLNode(varNum, TYP_INT); if (varDsc->lvPromoted) { assert(varDsc->lvFieldCnt == 2); unsigned loVarNum = varDsc->lvFieldLclStart; unsigned hiVarNum = loVarNum + 1; loResult->AsLclVarCommon()->SetLclNum(loVarNum); hiResult->AsLclVarCommon()->SetLclNum(hiVarNum); } else { noway_assert(varDsc->lvLRACandidate == false); loResult->SetOper(GT_LCL_FLD); loResult->AsLclFld()->gtLclOffs = 0; loResult->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField(); hiResult->SetOper(GT_LCL_FLD); hiResult->AsLclFld()->gtLclOffs = 4; hiResult->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField(); } m_compiler->lvaIncRefCnts(loResult); m_compiler->lvaIncRefCnts(hiResult); FinalizeDecomposition(ppTree, data, loResult, hiResult); }
// Rewrite a SIMD indirection as GT_IND(GT_LEA(obj.op1)), or as a simple // lclVar if possible. // // Arguments: // use - A use reference for a block node // keepBlk - True if this should remain a block node if it is not a lclVar // // Return Value: // None. // // TODO-1stClassStructs: These should be eliminated earlier, once we can handle // lclVars in all the places that used to have GT_OBJ. // void Rationalizer::RewriteSIMDOperand(LIR::Use& use, bool keepBlk) { #ifdef FEATURE_SIMD // No lowering is needed for non-SIMD nodes, so early out if featureSIMD is not enabled. if (!comp->featureSIMD) { return; } GenTree* tree = use.Def(); if (!tree->OperIsIndir()) { return; } var_types simdType = tree->TypeGet(); if (!varTypeIsSIMD(simdType)) { return; } // If the operand of is a GT_ADDR(GT_LCL_VAR) and LclVar is known to be of simdType, // replace obj by GT_LCL_VAR. GenTree* addr = tree->AsIndir()->Addr(); if (addr->OperIsLocalAddr() && comp->isAddrOfSIMDType(addr)) { BlockRange().Remove(tree); addr->SetOper(loadForm(addr->OperGet())); addr->gtType = simdType; use.ReplaceWith(comp, addr); } else if (!keepBlk) { tree->SetOper(GT_IND); tree->gtType = simdType; } #endif // FEATURE_SIMD }
//------------------------------------------------------------------------ // DecomposeArith: Decompose GT_ADD, GT_SUB, GT_OR, GT_XOR, GT_AND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeArith(LIR::Use& use) { assert(use.IsInitialized()); GenTree* tree = use.Def(); genTreeOps oper = tree->OperGet(); assert((oper == GT_ADD) || (oper == GT_SUB) || (oper == GT_OR) || (oper == GT_XOR) || (oper == GT_AND)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); // Both operands must have already been decomposed into GT_LONG operators. noway_assert((op1->OperGet() == GT_LONG) && (op2->OperGet() == GT_LONG)); // Capture the lo and hi halves of op1 and op2. GenTree* loOp1 = op1->gtGetOp1(); GenTree* hiOp1 = op1->gtGetOp2(); GenTree* loOp2 = op2->gtGetOp1(); GenTree* hiOp2 = op2->gtGetOp2(); // Now, remove op1 and op2 from the node list. Range().Remove(op1); Range().Remove(op2); // We will reuse "tree" for the loResult, which will now be of TYP_INT, and its operands // will be the lo halves of op1 from above. GenTree* loResult = tree; loResult->SetOper(GetLoOper(oper)); loResult->gtType = TYP_INT; loResult->gtOp.gtOp1 = loOp1; loResult->gtOp.gtOp2 = loOp2; GenTree* hiResult = new (m_compiler, oper) GenTreeOp(GetHiOper(oper), TYP_INT, hiOp1, hiOp2); Range().InsertAfter(loResult, hiResult); if ((oper == GT_ADD) || (oper == GT_SUB)) { if (loResult->gtOverflow()) { hiResult->gtFlags |= GTF_OVERFLOW; loResult->gtFlags &= ~GTF_OVERFLOW; } if (loResult->gtFlags & GTF_UNSIGNED) { hiResult->gtFlags |= GTF_UNSIGNED; } } return FinalizeDecomposition(use, loResult, hiResult); }
// Rewrite GT_OBJ of SIMD Vector as GT_IND(GT_LEA(obj.op1)) of a SIMD type. // // Arguments: // ppTree - A pointer-to-a-pointer for the GT_OBJ // fgWalkData - A pointer to tree walk data providing the context // // Return Value: // None. // // TODO-Cleanup: Once SIMD types are plumbed through the frontend, this will no longer // be required. // void Rationalizer::RewriteObj(LIR::Use& use) { #ifdef FEATURE_SIMD GenTreeObj* obj = use.Def()->AsObj(); // For UNIX struct passing, we can have Obj nodes for arguments. // For other cases, we should never see a non-SIMD type here. #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING if (!varTypeIsSIMD(obj)) { return; } #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING // Should come here only if featureSIMD is enabled noway_assert(comp->featureSIMD); // We should only call this with a SIMD type. noway_assert(varTypeIsSIMD(obj)); var_types simdType = obj->TypeGet(); // If the operand of obj is a GT_ADDR(GT_LCL_VAR) and LclVar is known to be a SIMD type, // replace obj by GT_LCL_VAR. GenTree* srcAddr = obj->gtGetOp1(); if (srcAddr->OperIsLocalAddr() && comp->isAddrOfSIMDType(srcAddr)) { BlockRange().Remove(obj); srcAddr->SetOper(loadForm(srcAddr->OperGet())); srcAddr->gtType = simdType; use.ReplaceWith(comp, srcAddr); } else { obj->SetOper(GT_IND); obj->gtType = simdType; } #else // we should never reach without feature SIMD assert(!"Unexpected obj during rationalization\n"); unreached(); #endif }
//------------------------------------------------------------------------ // DecomposeArith: Decompose GT_ADD, GT_SUB, GT_OR, GT_XOR, GT_AND. // // Arguments: // ppTree - the tree to decompose // data - tree walk context // // Return Value: // None. // void DecomposeLongs::DecomposeArith(GenTree** ppTree, Compiler::fgWalkData* data) { assert(ppTree != nullptr); assert(*ppTree != nullptr); assert(data != nullptr); assert(m_compiler->compCurStmt != nullptr); GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt(); GenTree* tree = *ppTree; genTreeOps oper = tree->OperGet(); assert((oper == GT_ADD) || (oper == GT_SUB) || (oper == GT_OR) || (oper == GT_XOR) || (oper == GT_AND)); NYI_IF((tree->gtFlags & GTF_REVERSE_OPS) != 0, "Binary operator with GTF_REVERSE_OPS"); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); // Both operands must have already been decomposed into GT_LONG operators. noway_assert((op1->OperGet() == GT_LONG) && (op2->OperGet() == GT_LONG)); // Capture the lo and hi halves of op1 and op2. GenTree* loOp1 = op1->gtGetOp1(); GenTree* hiOp1 = op1->gtGetOp2(); GenTree* loOp2 = op2->gtGetOp1(); GenTree* hiOp2 = op2->gtGetOp2(); // We don't have support to decompose a TYP_LONG node that already has a child that has // been decomposed into parts, where the high part depends on the value generated by the // low part (via the flags register). For example, if we have: // +(gt_long(+(lo3, lo4), +Hi(hi3, hi4)), gt_long(lo2, hi2)) // We would decompose it here to: // gt_long(+(+(lo3, lo4), lo2), +Hi(+Hi(hi3, hi4), hi2)) // But this would generate incorrect code, because the "+Hi(hi3, hi4)" code generation // needs to immediately follow the "+(lo3, lo4)" part. Also, if this node is one that // requires a unique high operator, and the child nodes are not simple locals (e.g., // they are decomposed nodes), then we also can't decompose the node, as we aren't // guaranteed the high and low parts will be executed immediately after each other. NYI_IF(hiOp1->OperIsHigh() || hiOp2->OperIsHigh() || (GenTree::OperIsHigh(GetHiOper(oper)) && (!loOp1->OperIsLeaf() || !hiOp1->OperIsLeaf() || !loOp1->OperIsLeaf() || !hiOp2->OperIsLeaf())), "Can't decompose expression tree TYP_LONG node"); // Now, remove op1 and op2 from the node list. m_compiler->fgSnipNode(curStmt, op1); m_compiler->fgSnipNode(curStmt, op2); // We will reuse "tree" for the loResult, which will now be of TYP_INT, and its operands // will be the lo halves of op1 from above. GenTree* loResult = tree; loResult->SetOper(GetLoOper(loResult->OperGet())); loResult->gtType = TYP_INT; loResult->gtOp.gtOp1 = loOp1; loResult->gtOp.gtOp2 = loOp2; // The various halves will be correctly threaded internally. We simply need to // relink them into the proper order, i.e. loOp1 is followed by loOp2, and then // the loResult node. // (This rethreading, and that below, are where we need to address the reverse ops case). // The current order is (after snipping op1 and op2): // ... loOp1-> ... hiOp1->loOp2First ... loOp2->hiOp2First ... hiOp2 // The order we want is: // ... loOp1->loOp2First ... loOp2->loResult // ... hiOp1->hiOp2First ... hiOp2->hiResult // i.e. we swap hiOp1 and loOp2, and create (for now) separate loResult and hiResult trees GenTree* loOp2First = hiOp1->gtNext; GenTree* hiOp2First = loOp2->gtNext; // First, we will NYI if both hiOp1 and loOp2 have side effects. NYI_IF(((loOp2->gtFlags & GTF_ALL_EFFECT) != 0) && ((hiOp1->gtFlags & GTF_ALL_EFFECT) != 0), "Binary long operator with non-reorderable sub expressions"); // Now, we reorder the loOps and the loResult. loOp1->gtNext = loOp2First; loOp2First->gtPrev = loOp1; loOp2->gtNext = loResult; loResult->gtPrev = loOp2; // Next, reorder the hiOps and the hiResult. GenTree* hiResult = new (m_compiler, oper) GenTreeOp(GetHiOper(oper), TYP_INT, hiOp1, hiOp2); hiOp1->gtNext = hiOp2First; hiOp2First->gtPrev = hiOp1; hiOp2->gtNext = hiResult; hiResult->gtPrev = hiOp2; if ((oper == GT_ADD) || (oper == GT_SUB)) { if (loResult->gtOverflow()) { hiResult->gtFlags |= GTF_OVERFLOW; loResult->gtFlags &= ~GTF_OVERFLOW; } if (loResult->gtFlags & GTF_UNSIGNED) { hiResult->gtFlags |= GTF_UNSIGNED; } } FinalizeDecomposition(ppTree, data, loResult, hiResult); }
Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, ArrayStack<GenTree*>& parentStack) { assert(useEdge != nullptr); GenTree* node = *useEdge; assert(node != nullptr); #ifdef DEBUG const bool isLateArg = (node->gtFlags & GTF_LATE_ARG) != 0; #endif // First, remove any preceeding list nodes, which are not otherwise visited by the tree walk. // // NOTE: GT_FIELD_LIST head nodes, and GT_LIST nodes used by phi nodes will in fact be visited. for (GenTree* prev = node->gtPrev; prev != nullptr && prev->OperIsAnyList() && !(prev->OperIsFieldListHead()); prev = node->gtPrev) { BlockRange().Remove(prev); } // In addition, remove the current node if it is a GT_LIST node that is not an aggregate. if (node->OperIsAnyList()) { GenTreeArgList* list = node->AsArgList(); if (!list->OperIsFieldListHead()) { BlockRange().Remove(list); } return Compiler::WALK_CONTINUE; } LIR::Use use; if (parentStack.Height() < 2) { use = LIR::Use::GetDummyUse(BlockRange(), *useEdge); } else { use = LIR::Use(BlockRange(), useEdge, parentStack.Index(1)); } assert(node == use.Def()); switch (node->OperGet()) { case GT_ASG: RewriteAssignment(use); break; case GT_BOX: // GT_BOX at this level just passes through so get rid of it use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); break; case GT_ADDR: RewriteAddress(use); break; case GT_IND: // Clear the `GTF_IND_ASG_LHS` flag, which overlaps with `GTF_IND_REQ_ADDR_IN_REG`. node->gtFlags &= ~GTF_IND_ASG_LHS; if (varTypeIsSIMD(node)) { RewriteSIMDOperand(use, false); } else { // Due to promotion of structs containing fields of type struct with a // single scalar type field, we could potentially see IR nodes of the // form GT_IND(GT_ADD(lclvarAddr, 0)) where 0 is an offset representing // a field-seq. These get folded here. // // TODO: This code can be removed once JIT implements recursive struct // promotion instead of lying about the type of struct field as the type // of its single scalar field. GenTree* addr = node->AsIndir()->Addr(); if (addr->OperGet() == GT_ADD && addr->gtGetOp1()->OperGet() == GT_LCL_VAR_ADDR && addr->gtGetOp2()->IsIntegralConst(0)) { GenTreeLclVarCommon* lclVarNode = addr->gtGetOp1()->AsLclVarCommon(); unsigned lclNum = lclVarNode->GetLclNum(); LclVarDsc* varDsc = comp->lvaTable + lclNum; if (node->TypeGet() == varDsc->TypeGet()) { JITDUMP("Rewriting GT_IND(GT_ADD(LCL_VAR_ADDR,0)) to LCL_VAR\n"); lclVarNode->SetOper(GT_LCL_VAR); lclVarNode->gtType = node->TypeGet(); use.ReplaceWith(comp, lclVarNode); BlockRange().Remove(addr); BlockRange().Remove(addr->gtGetOp2()); BlockRange().Remove(node); } } } break; case GT_NOP: // fgMorph sometimes inserts NOP nodes between defs and uses // supposedly 'to prevent constant folding'. In this case, remove the // NOP. if (node->gtGetOp1() != nullptr) { use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); } break; case GT_COMMA: { GenTree* op1 = node->gtGetOp1(); if ((op1->gtFlags & GTF_ALL_EFFECT) == 0) { // The LHS has no side effects. Remove it. bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange lhsRange = BlockRange().GetTreeRange(op1, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(lhsRange)); } GenTree* replacement = node->gtGetOp2(); if (!use.IsDummyUse()) { use.ReplaceWith(comp, replacement); } else { // This is a top-level comma. If the RHS has no side effects we can remove // it as well. if ((replacement->gtFlags & GTF_ALL_EFFECT) == 0) { bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange rhsRange = BlockRange().GetTreeRange(replacement, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(rhsRange)); } } BlockRange().Remove(node); } break; case GT_ARGPLACE: // Remove argplace and list nodes from the execution order. // // TODO: remove phi args and phi nodes as well? BlockRange().Remove(node); break; #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM_) case GT_CLS_VAR: { // Class vars that are the target of an assignment will get rewritten into // GT_STOREIND(GT_CLS_VAR_ADDR, val) by RewriteAssignment. This check is // not strictly necessary--the GT_IND(GT_CLS_VAR_ADDR) pattern that would // otherwise be generated would also be picked up by RewriteAssignment--but // skipping the rewrite here saves an allocation and a bit of extra work. const bool isLHSOfAssignment = (use.User()->OperGet() == GT_ASG) && (use.User()->gtGetOp1() == node); if (!isLHSOfAssignment) { GenTree* ind = comp->gtNewOperNode(GT_IND, node->TypeGet(), node); node->SetOper(GT_CLS_VAR_ADDR); node->gtType = TYP_BYREF; BlockRange().InsertAfter(node, ind); use.ReplaceWith(comp, ind); // TODO: JIT dump } } break; #endif // _TARGET_XARCH_ case GT_INTRINSIC: // Non-target intrinsics should have already been rewritten back into user calls. assert(Compiler::IsTargetIntrinsic(node->gtIntrinsic.gtIntrinsicId)); break; #ifdef FEATURE_SIMD case GT_BLK: case GT_OBJ: { // TODO-1stClassStructs: These should have been transformed to GT_INDs, but in order // to preserve existing behavior, we will keep this as a block node if this is the // lhs of a block assignment, and either: // - It is a "generic" TYP_STRUCT assignment, OR // - It is an initblk, OR // - Neither the lhs or rhs are known to be of SIMD type. GenTree* parent = use.User(); bool keepBlk = false; if ((parent->OperGet() == GT_ASG) && (node == parent->gtGetOp1())) { if ((node->TypeGet() == TYP_STRUCT) || parent->OperIsInitBlkOp()) { keepBlk = true; } else if (!comp->isAddrOfSIMDType(node->AsBlk()->Addr())) { GenTree* dataSrc = parent->gtGetOp2(); if (!dataSrc->IsLocal() && (dataSrc->OperGet() != GT_SIMD)) { noway_assert(dataSrc->OperIsIndir()); keepBlk = !comp->isAddrOfSIMDType(dataSrc->AsIndir()->Addr()); } } } RewriteSIMDOperand(use, keepBlk); } break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: // TODO-1stClassStructs: Eliminate this. FixupIfSIMDLocal(node->AsLclVarCommon()); break; case GT_SIMD: { noway_assert(comp->featureSIMD); GenTreeSIMD* simdNode = node->AsSIMD(); unsigned simdSize = simdNode->gtSIMDSize; var_types simdType = comp->getSIMDTypeForSize(simdSize); // TODO-1stClassStructs: This should be handled more generally for enregistered or promoted // structs that are passed or returned in a different register type than their enregistered // type(s). if (simdNode->gtType == TYP_I_IMPL && simdNode->gtSIMDSize == TARGET_POINTER_SIZE) { // This happens when it is consumed by a GT_RET_EXPR. // It can only be a Vector2f or Vector2i. assert(genTypeSize(simdNode->gtSIMDBaseType) == 4); simdNode->gtType = TYP_SIMD8; } // Certain SIMD trees require rationalizing. if (simdNode->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicInitArray) { // Rewrite this as an explicit load. JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n"); unsigned int baseTypeSize = genTypeSize(simdNode->gtSIMDBaseType); GenTree* address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdNode->gtOp1, simdNode->gtOp2, baseTypeSize, offsetof(CORINFO_Array, u1Elems)); GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address); BlockRange().InsertBefore(simdNode, address, ind); use.ReplaceWith(comp, ind); BlockRange().Remove(simdNode); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } else { // This code depends on the fact that NONE of the SIMD intrinsics take vector operands // of a different width. If that assumption changes, we will EITHER have to make these type // transformations during importation, and plumb the types all the way through the JIT, // OR add a lot of special handling here. GenTree* op1 = simdNode->gtGetOp1(); if (op1 != nullptr && op1->gtType == TYP_STRUCT) { op1->gtType = simdType; } GenTree* op2 = simdNode->gtGetOp2IfPresent(); if (op2 != nullptr && op2->gtType == TYP_STRUCT) { op2->gtType = simdType; } } } break; #endif // FEATURE_SIMD default: // JCC nodes should not be present in HIR. assert(node->OperGet() != GT_JCC); break; } // Do some extra processing on top-level nodes to remove unused local reads. if (node->OperIsLocalRead()) { if (use.IsDummyUse()) { comp->lvaDecRefCnts(node); BlockRange().Remove(node); } else { // Local reads are side-effect-free; clear any flags leftover from frontend transformations. node->gtFlags &= ~GTF_ALL_EFFECT; } } assert(isLateArg == ((use.Def()->gtFlags & GTF_LATE_ARG) != 0)); return Compiler::WALK_CONTINUE; }
void Rationalizer::RewriteAssignment(LIR::Use& use) { assert(use.IsInitialized()); GenTreeOp* assignment = use.Def()->AsOp(); assert(assignment->OperGet() == GT_ASG); GenTree* location = assignment->gtGetOp1(); GenTree* value = assignment->gtGetOp2(); genTreeOps locationOp = location->OperGet(); if (assignment->OperIsBlkOp()) { #ifdef FEATURE_SIMD if (varTypeIsSIMD(location) && assignment->OperIsInitBlkOp()) { if (location->OperGet() == GT_LCL_VAR) { var_types simdType = location->TypeGet(); GenTree* initVal = assignment->gtOp.gtOp2; var_types baseType = comp->getBaseTypeOfSIMDLocal(location); if (baseType != TYP_UNKNOWN) { GenTreeSIMD* simdTree = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, genTypeSize(simdType)); assignment->gtOp.gtOp2 = simdTree; value = simdTree; initVal->gtNext = simdTree; simdTree->gtPrev = initVal; simdTree->gtNext = location; location->gtPrev = simdTree; } } } #endif // FEATURE_SIMD if ((location->TypeGet() == TYP_STRUCT) && !assignment->IsPhiDefn() && !value->IsMultiRegCall()) { if ((location->OperGet() == GT_LCL_VAR)) { // We need to construct a block node for the location. // Modify lcl to be the address form. location->SetOper(addrForm(locationOp)); LclVarDsc* varDsc = &(comp->lvaTable[location->AsLclVarCommon()->gtLclNum]); location->gtType = TYP_BYREF; GenTreeBlk* storeBlk = nullptr; unsigned int size = varDsc->lvExactSize; if (varDsc->lvStructGcCount != 0) { CORINFO_CLASS_HANDLE structHnd = varDsc->lvVerTypeInfo.GetClassHandle(); GenTreeObj* objNode = comp->gtNewObjNode(structHnd, location)->AsObj(); unsigned int slots = (unsigned)(roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE); objNode->SetGCInfo(varDsc->lvGcLayout, varDsc->lvStructGcCount, slots); objNode->ChangeOper(GT_STORE_OBJ); objNode->SetData(value); comp->fgMorphUnsafeBlk(objNode); storeBlk = objNode; } else { storeBlk = new (comp, GT_STORE_BLK) GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, location, value, size); } storeBlk->gtFlags |= (GTF_REVERSE_OPS | GTF_ASG); storeBlk->gtFlags |= ((location->gtFlags | value->gtFlags) & GTF_ALL_EFFECT); GenTree* insertionPoint = location->gtNext; BlockRange().InsertBefore(insertionPoint, storeBlk); use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); JITDUMP("After transforming local struct assignment into a block op:\n"); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); return; } else { assert(location->OperIsBlk()); } } } switch (locationOp) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_REG_VAR: case GT_PHI_ARG: RewriteAssignmentIntoStoreLclCore(assignment, location, value, locationOp); BlockRange().Remove(location); break; case GT_IND: { GenTreeStoreInd* store = new (comp, GT_STOREIND) GenTreeStoreInd(location->TypeGet(), location->gtGetOp1(), value); copyFlags(store, assignment, GTF_ALL_EFFECT); copyFlags(store, location, GTF_IND_FLAGS); if (assignment->IsReverseOp()) { store->gtFlags |= GTF_REVERSE_OPS; } // TODO: JIT dump // Remove the GT_IND node and replace the assignment node with the store BlockRange().Remove(location); BlockRange().InsertBefore(assignment, store); use.ReplaceWith(comp, store); BlockRange().Remove(assignment); } break; case GT_CLS_VAR: { location->SetOper(GT_CLS_VAR_ADDR); location->gtType = TYP_BYREF; assignment->SetOper(GT_STOREIND); // TODO: JIT dump } break; case GT_BLK: case GT_OBJ: case GT_DYN_BLK: { assert(varTypeIsStruct(location)); GenTreeBlk* storeBlk = location->AsBlk(); genTreeOps storeOper; switch (location->gtOper) { case GT_BLK: storeOper = GT_STORE_BLK; break; case GT_OBJ: storeOper = GT_STORE_OBJ; break; case GT_DYN_BLK: storeOper = GT_STORE_DYN_BLK; break; default: unreached(); } JITDUMP("Rewriting GT_ASG(%s(X), Y) to %s(X,Y):\n", GenTree::NodeName(location->gtOper), GenTree::NodeName(storeOper)); storeBlk->SetOperRaw(storeOper); storeBlk->gtFlags &= ~GTF_DONT_CSE; storeBlk->gtFlags |= (assignment->gtFlags & (GTF_ALL_EFFECT | GTF_REVERSE_OPS | GTF_BLK_VOLATILE | GTF_BLK_UNALIGNED | GTF_DONT_CSE)); storeBlk->gtBlk.Data() = value; // Replace the assignment node with the store use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } break; default: unreached(); break; } }
//------------------------------------------------------------------------ // DecomposeArith: Decompose GT_ADD, GT_SUB, GT_OR, GT_XOR, GT_AND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeArith(LIR::Use& use) { assert(use.IsInitialized()); GenTree* tree = use.Def(); genTreeOps oper = tree->OperGet(); assert((oper == GT_ADD) || (oper == GT_SUB) || (oper == GT_OR) || (oper == GT_XOR) || (oper == GT_AND)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); // Both operands must have already been decomposed into GT_LONG operators. noway_assert((op1->OperGet() == GT_LONG) && (op2->OperGet() == GT_LONG)); // Capture the lo and hi halves of op1 and op2. GenTree* loOp1 = op1->gtGetOp1(); GenTree* hiOp1 = op1->gtGetOp2(); GenTree* loOp2 = op2->gtGetOp1(); GenTree* hiOp2 = op2->gtGetOp2(); // We don't have support to decompose a TYP_LONG node that already has a child that has // been decomposed into parts, where the high part depends on the value generated by the // low part (via the flags register). For example, if we have: // +(gt_long(+(lo3, lo4), +Hi(hi3, hi4)), gt_long(lo2, hi2)) // We would decompose it here to: // gt_long(+(+(lo3, lo4), lo2), +Hi(+Hi(hi3, hi4), hi2)) // But this would generate incorrect code, because the "+Hi(hi3, hi4)" code generation // needs to immediately follow the "+(lo3, lo4)" part. Also, if this node is one that // requires a unique high operator, and the child nodes are not simple locals (e.g., // they are decomposed nodes), then we also can't decompose the node, as we aren't // guaranteed the high and low parts will be executed immediately after each other. NYI_IF(hiOp1->OperIsHigh() || hiOp2->OperIsHigh() || (GenTree::OperIsHigh(GetHiOper(oper)) && (!loOp1->OperIsLeaf() || !hiOp1->OperIsLeaf() || !loOp1->OperIsLeaf() || !hiOp2->OperIsLeaf())), "Can't decompose expression tree TYP_LONG node"); // Now, remove op1 and op2 from the node list. BlockRange().Remove(op1); BlockRange().Remove(op2); // We will reuse "tree" for the loResult, which will now be of TYP_INT, and its operands // will be the lo halves of op1 from above. GenTree* loResult = tree; loResult->SetOper(GetLoOper(loResult->OperGet())); loResult->gtType = TYP_INT; loResult->gtOp.gtOp1 = loOp1; loResult->gtOp.gtOp2 = loOp2; GenTree* hiResult = new (m_compiler, oper) GenTreeOp(GetHiOper(oper), TYP_INT, hiOp1, hiOp2); hiResult->CopyCosts(loResult); BlockRange().InsertAfter(loResult, hiResult); if ((oper == GT_ADD) || (oper == GT_SUB)) { if (loResult->gtOverflow()) { hiResult->gtFlags |= GTF_OVERFLOW; loResult->gtFlags &= ~GTF_OVERFLOW; } if (loResult->gtFlags & GTF_UNSIGNED) { hiResult->gtFlags |= GTF_UNSIGNED; } } return FinalizeDecomposition(use, loResult, hiResult); }
//------------------------------------------------------------------------ // DecomposeStoreLclVar: Decompose GT_STORE_LCL_VAR. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeStoreLclVar(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STORE_LCL_VAR); GenTree* tree = use.Def(); GenTree* rhs = tree->gtGetOp1(); if ((rhs->OperGet() == GT_PHI) || (rhs->OperGet() == GT_CALL) || ((rhs->OperGet() == GT_MUL_LONG) && (rhs->gtFlags & GTF_MUL_64RSLT) != 0)) { // GT_CALLs are not decomposed, so will not be converted to GT_LONG // GT_STORE_LCL_VAR = GT_CALL are handled in genMultiRegCallStoreToLocal // GT_MULs are not decomposed, so will not be converted to GT_LONG return tree->gtNext; } noway_assert(rhs->OperGet() == GT_LONG); unsigned varNum = tree->AsLclVarCommon()->gtLclNum; LclVarDsc* varDsc = m_compiler->lvaTable + varNum; m_compiler->lvaDecRefCnts(tree); GenTree* loRhs = rhs->gtGetOp1(); GenTree* hiRhs = rhs->gtGetOp2(); GenTree* hiStore = m_compiler->gtNewLclLNode(varNum, TYP_INT); if (varDsc->lvPromoted) { assert(varDsc->lvFieldCnt == 2); unsigned loVarNum = varDsc->lvFieldLclStart; unsigned hiVarNum = loVarNum + 1; tree->AsLclVarCommon()->SetLclNum(loVarNum); hiStore->SetOper(GT_STORE_LCL_VAR); hiStore->AsLclVarCommon()->SetLclNum(hiVarNum); } else { noway_assert(varDsc->lvLRACandidate == false); tree->SetOper(GT_STORE_LCL_FLD); tree->AsLclFld()->gtLclOffs = 0; tree->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField(); hiStore->SetOper(GT_STORE_LCL_FLD); hiStore->AsLclFld()->gtLclOffs = 4; hiStore->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField(); } // 'tree' is going to steal the loRhs node for itself, so we need to remove the // GT_LONG node from the threading. Range().Remove(rhs); tree->gtOp.gtOp1 = loRhs; tree->gtType = TYP_INT; hiStore->gtOp.gtOp1 = hiRhs; hiStore->gtFlags |= GTF_VAR_DEF; m_compiler->lvaIncRefCnts(tree); m_compiler->lvaIncRefCnts(hiStore); Range().InsertAfter(tree, hiStore); return hiStore->gtNext; }
Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, ArrayStack<GenTree*>& parentStack) { assert(useEdge != nullptr); GenTree* node = *useEdge; assert(node != nullptr); #ifdef DEBUG const bool isLateArg = (node->gtFlags & GTF_LATE_ARG) != 0; #endif // First, remove any preceeding GT_LIST nodes, which are not otherwise visited by the tree walk. // // NOTE: GT_LIST nodes that are used by block ops and phi nodes will in fact be visited. for (GenTree* prev = node->gtPrev; prev != nullptr && prev->OperGet() == GT_LIST; prev = node->gtPrev) { BlockRange().Remove(prev); } // In addition, remove the current node if it is a GT_LIST node. if ((*useEdge)->OperGet() == GT_LIST) { BlockRange().Remove(*useEdge); return Compiler::WALK_CONTINUE; } LIR::Use use; if (parentStack.Height() < 2) { use = LIR::Use::GetDummyUse(BlockRange(), *useEdge); } else { use = LIR::Use(BlockRange(), useEdge, parentStack.Index(1)); } assert(node == use.Def()); switch (node->OperGet()) { case GT_ASG: RewriteAssignment(use); break; case GT_BOX: // GT_BOX at this level just passes through so get rid of it use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); break; case GT_ADDR: RewriteAddress(use); break; case GT_NOP: // fgMorph sometimes inserts NOP nodes between defs and uses // supposedly 'to prevent constant folding'. In this case, remove the // NOP. if (node->gtGetOp1() != nullptr) { use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); } break; case GT_COMMA: { GenTree* op1 = node->gtGetOp1(); if ((op1->gtFlags & GTF_ALL_EFFECT) == 0) { // The LHS has no side effects. Remove it. bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange lhsRange = BlockRange().GetTreeRange(op1, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(lhsRange)); } GenTree* replacement = node->gtGetOp2(); if (!use.IsDummyUse()) { use.ReplaceWith(comp, replacement); } else { // This is a top-level comma. If the RHS has no side effects we can remove // it as well. if ((replacement->gtFlags & GTF_ALL_EFFECT) == 0) { bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange rhsRange = BlockRange().GetTreeRange(replacement, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(rhsRange)); } } BlockRange().Remove(node); } break; case GT_ARGPLACE: // Remove argplace and list nodes from the execution order. // // TODO: remove phi args and phi nodes as well? BlockRange().Remove(node); break; #ifdef _TARGET_XARCH_ case GT_CLS_VAR: { // Class vars that are the target of an assignment will get rewritten into // GT_STOREIND(GT_CLS_VAR_ADDR, val) by RewriteAssignment. This check is // not strictly necessary--the GT_IND(GT_CLS_VAR_ADDR) pattern that would // otherwise be generated would also be picked up by RewriteAssignment--but // skipping the rewrite here saves an allocation and a bit of extra work. const bool isLHSOfAssignment = (use.User()->OperGet() == GT_ASG) && (use.User()->gtGetOp1() == node); if (!isLHSOfAssignment) { GenTree* ind = comp->gtNewOperNode(GT_IND, node->TypeGet(), node); node->SetOper(GT_CLS_VAR_ADDR); node->gtType = TYP_BYREF; BlockRange().InsertAfter(node, ind); use.ReplaceWith(comp, ind); // TODO: JIT dump } } break; #endif // _TARGET_XARCH_ case GT_INTRINSIC: // Non-target intrinsics should have already been rewritten back into user calls. assert(Compiler::IsTargetIntrinsic(node->gtIntrinsic.gtIntrinsicId)); break; #ifdef FEATURE_SIMD case GT_INITBLK: RewriteInitBlk(use); break; case GT_COPYBLK: RewriteCopyBlk(use); break; case GT_OBJ: RewriteObj(use); break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: // TODO-1stClassStructs: Eliminate this. FixupIfSIMDLocal(node->AsLclVarCommon()); break; case GT_STOREIND: case GT_IND: if (node->gtType == TYP_STRUCT) { GenTree* addr = node->AsIndir()->Addr(); assert(addr->TypeGet() == TYP_BYREF); if (addr->OperIsLocal()) { LclVarDsc* varDsc = &(comp->lvaTable[addr->AsLclVarCommon()->gtLclNum]); assert(varDsc->lvSIMDType); unsigned simdSize = (unsigned int)roundUp(varDsc->lvExactSize, TARGET_POINTER_SIZE); node->gtType = comp->getSIMDTypeForSize(simdSize); } #if DEBUG else { // If the address is not a local var, assert that the user of this IND is an ADDR node. assert((use.User()->OperGet() == GT_ADDR) || use.User()->OperIsLocalAddr()); } #endif } break; case GT_SIMD: { noway_assert(comp->featureSIMD); GenTreeSIMD* simdNode = node->AsSIMD(); unsigned simdSize = simdNode->gtSIMDSize; var_types simdType = comp->getSIMDTypeForSize(simdSize); // TODO-1stClassStructs: This should be handled more generally for enregistered or promoted // structs that are passed or returned in a different register type than their enregistered // type(s). if (simdNode->gtType == TYP_I_IMPL && simdNode->gtSIMDSize == TARGET_POINTER_SIZE) { // This happens when it is consumed by a GT_RET_EXPR. // It can only be a Vector2f or Vector2i. assert(genTypeSize(simdNode->gtSIMDBaseType) == 4); simdNode->gtType = TYP_SIMD8; } else if (simdNode->gtType == TYP_STRUCT || varTypeIsSIMD(simdNode)) { node->gtType = simdType; } // Certain SIMD trees require rationalizing. if (simdNode->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicInitArray) { // Rewrite this as an explicit load. JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n"); unsigned int baseTypeSize = genTypeSize(simdNode->gtSIMDBaseType); GenTree* address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdNode->gtOp1, simdNode->gtOp2, baseTypeSize, offsetof(CORINFO_Array, u1Elems)); GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address); BlockRange().InsertBefore(simdNode, address, ind); use.ReplaceWith(comp, ind); BlockRange().Remove(simdNode); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } else { // This code depends on the fact that NONE of the SIMD intrinsics take vector operands // of a different width. If that assumption changes, we will EITHER have to make these type // transformations during importation, and plumb the types all the way through the JIT, // OR add a lot of special handling here. GenTree* op1 = simdNode->gtGetOp1(); if (op1 != nullptr && op1->gtType == TYP_STRUCT) { op1->gtType = simdType; } GenTree* op2 = simdNode->gtGetOp2(); if (op2 != nullptr && op2->gtType == TYP_STRUCT) { op2->gtType = simdType; } } } break; #endif // FEATURE_SIMD default: break; } // Do some extra processing on top-level nodes to remove unused local reads. if (use.IsDummyUse() && node->OperIsLocalRead()) { assert((node->gtFlags & GTF_ALL_EFFECT) == 0); comp->lvaDecRefCnts(node); BlockRange().Remove(node); } assert(isLateArg == ((node->gtFlags & GTF_LATE_ARG) != 0)); return Compiler::WALK_CONTINUE; }
// Transform CopyBlk involving SIMD vectors into stlclvar or stind of a SIMD type. // Transformation is done if either src or dst are known to be SIMD vectors. // // Arguments: // ppTree - A pointer-to-a-pointer for the GT_COPYBLK // fgWalkData - A pointer to tree walk data providing the context // // Return Value: // None. // // If either the source or the dst are known to be SIMD (a lclVar or SIMD intrinsic), // get the simdType (TYP_DOUBLE or a SIMD type for SSE2) from the size of the SIMD node. // // For the source: // - If it is a SIMD intrinsic or a lvSIMDType lclVar, change the node type to simdType. // - Otherwise, add a GT_IND of simdType. // For the dst: // - If it is a lclVar of a SIMD type, chanage the node type to simdType. // - Otherwise, change it to a GT_STORE_IND of simdType // // TODO-Cleanup: Once SIMD types are plumbed through the frontend, this will no longer // be required. // void Rationalizer::RewriteCopyBlk(LIR::Use& use) { #ifdef FEATURE_SIMD // No need to transofrm non-SIMD nodes, if featureSIMD is not enabled. if (!comp->featureSIMD) { return; } // See if this is a SIMD copyBlk GenTreeCpBlk* cpBlk = use.Def()->AsCpBlk(); GenTreePtr dstAddr = cpBlk->Dest(); GenTree* srcAddr = cpBlk->Source(); const bool srcIsSIMDAddr = comp->isAddrOfSIMDType(srcAddr); const bool dstIsSIMDAddr = comp->isAddrOfSIMDType(dstAddr); // Do not transform if neither src or dst is known to be a SIMD type. // If src tree type is something we cannot reason but if dst is known to be of a SIMD type // we will treat src tree as a SIMD type and vice versa. if (!srcIsSIMDAddr && !dstIsSIMDAddr) { return; } // At this point it is known to be a copyblk of SIMD vectors and we can // start transforming the original tree. Prior to this point do not perform // any modifications to the original tree. JITDUMP("\nRewriting SIMD CopyBlk\n"); DISPTREERANGE(BlockRange(), cpBlk); // There are currently only three sizes supported: 8 bytes, 12 bytes, 16 bytes or the vector register length. GenTreeIntConCommon* sizeNode = cpBlk->Size()->AsIntConCommon(); var_types simdType = comp->getSIMDTypeForSize((unsigned int)sizeNode->IconValue()); // Remove 'size' from execution order BlockRange().Remove(sizeNode); // Is destination a lclVar which is not an arg? // If yes then we can turn it to a stlcl.var, otherwise turn into stind. GenTree* simdDst = nullptr; genTreeOps oper = GT_NONE; if (dstIsSIMDAddr && dstAddr->OperIsLocalAddr()) { simdDst = dstAddr; simdDst->gtType = simdType; oper = GT_STORE_LCL_VAR; // For structs that are padded (e.g. Vector3f, Vector3i), the morpher will have marked them // as GTF_VAR_USEASG. Unmark them. simdDst->gtFlags &= ~(GTF_VAR_USEASG); } else { // Address of a non-local var simdDst = dstAddr; oper = GT_STOREIND; } GenTree* simdSrc = nullptr; if ((srcAddr->OperGet() == GT_ADDR) && varTypeIsSIMD(srcAddr->gtGetOp1())) { // Get rid of parent node of GT_ADDR(..) if its child happens to be of a SIMD type. BlockRange().Remove(srcAddr); simdSrc = srcAddr->gtGetOp1(); } else if (srcIsSIMDAddr && srcAddr->OperIsLocalAddr()) { // If the source has been rewritten into a local addr node, rewrite it back into a // local var node. simdSrc = srcAddr; simdSrc->SetOper(loadForm(srcAddr->OperGet())); } else { // Since destination is known to be a SIMD type, src must be a SIMD type too // though we cannot figure it out easily enough. Transform src into // GT_IND(src) of simdType. GenTree* indir = comp->gtNewOperNode(GT_IND, simdType, srcAddr); BlockRange().InsertAfter(srcAddr, indir); cpBlk->gtGetOp1()->gtOp.gtOp2 = indir; simdSrc = indir; } simdSrc->gtType = simdType; // Change cpblk to either a st.lclvar or st.ind. // At this point we are manipulating cpblk node with the knowledge of // its internals (i.e. op1 is the size node, and the src & dst are in a GT_LIST on op2). // This logic might need to be changed if we ever restructure cpblk node. assert(simdDst != nullptr); assert(simdSrc != nullptr); GenTree* newNode = nullptr; if (oper == GT_STORE_LCL_VAR) { newNode = simdDst; newNode->SetOper(oper); GenTreeLclVar* store = newNode->AsLclVar(); store->gtOp1 = simdSrc; store->gtType = simdType; store->gtFlags |= ((simdSrc->gtFlags & GTF_ALL_EFFECT) | GTF_ASG); BlockRange().Remove(simdDst); BlockRange().InsertAfter(simdSrc, store); } else { assert(oper == GT_STOREIND); newNode = cpBlk->gtGetOp1(); newNode->SetOper(oper); GenTreeStoreInd* storeInd = newNode->AsStoreInd(); storeInd->gtType = simdType; storeInd->gtFlags |= ((simdSrc->gtFlags & GTF_ALL_EFFECT) | GTF_ASG); storeInd->gtOp1 = simdDst; storeInd->gtOp2 = simdSrc; BlockRange().InsertBefore(cpBlk, storeInd); } use.ReplaceWith(comp, newNode); BlockRange().Remove(cpBlk); JITDUMP("After rewriting SIMD CopyBlk:\n"); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); #endif // FEATURE_SIMD }
// Rewrite InitBlk involving SIMD vector into stlcl.var of a SIMD type. // // Arguments: // ppTree - A pointer-to-a-pointer for the GT_INITBLK // fgWalkData - A pointer to tree walk data providing the context // // Return Value: // None. // // TODO-Cleanup: Once SIMD types are plumbed through the frontend, this will no longer // be required. // void Rationalizer::RewriteInitBlk(LIR::Use& use) { #ifdef FEATURE_SIMD // No lowering is needed for non-SIMD nodes, so early out if featureSIMD is not enabled. if (!comp->featureSIMD) { return; } // See if this is a SIMD initBlk that needs to be changed to a simple st.lclVar. GenTreeInitBlk* initBlk = use.Def()->AsInitBlk(); // Is the dstAddr is addr of a SIMD type lclVar? GenTree* dstAddr = initBlk->Dest(); if (!comp->isAddrOfSIMDType(dstAddr) || !dstAddr->OperIsLocalAddr()) { return; } unsigned lclNum = dstAddr->AsLclVarCommon()->gtLclNum; if (!comp->lvaTable[lclNum].lvSIMDType) { return; } var_types baseType = comp->lvaTable[lclNum].lvBaseType; CORINFO_CLASS_HANDLE typeHnd = comp->lvaTable[lclNum].lvVerTypeInfo.GetClassHandle(); unsigned simdLocalSize = comp->getSIMDTypeSizeInBytes(typeHnd); JITDUMP("Rewriting SIMD InitBlk\n"); DISPTREERANGE(BlockRange(), initBlk); assert((dstAddr->gtFlags & GTF_VAR_USEASG) == 0); // There are currently only three sizes supported: 8 bytes, 16 bytes or the vector register length. GenTreeIntConCommon* sizeNode = initBlk->Size()->AsIntConCommon(); unsigned int size = (unsigned int)roundUp(sizeNode->IconValue(), TARGET_POINTER_SIZE); var_types simdType = comp->getSIMDTypeForSize(size); assert(roundUp(simdLocalSize, TARGET_POINTER_SIZE) == size); GenTree* initVal = initBlk->InitVal(); GenTreeSIMD* simdNode = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, (unsigned)sizeNode->IconValue()); dstAddr->SetOper(GT_STORE_LCL_VAR); GenTreeLclVar* store = dstAddr->AsLclVar(); store->gtType = simdType; store->gtOp.gtOp1 = simdNode; store->gtFlags |= ((simdNode->gtFlags & GTF_ALL_EFFECT) | GTF_ASG); BlockRange().Remove(store); // Insert the new nodes into the block BlockRange().InsertAfter(initVal, simdNode, store); use.ReplaceWith(comp, store); // Remove the old size and GT_INITBLK nodes. BlockRange().Remove(sizeNode); BlockRange().Remove(initBlk); JITDUMP("After rewriting SIMD InitBlk:\n"); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); #endif // FEATURE_SIMD }
//------------------------------------------------------------------------ // DecomposeStoreLclVar: Decompose GT_STORE_LCL_VAR. // // Arguments: // ppTree - the tree to decompose // data - tree walk context // // Return Value: // None. // void DecomposeLongs::DecomposeStoreLclVar(GenTree** ppTree, Compiler::fgWalkData* data) { assert(ppTree != nullptr); assert(*ppTree != nullptr); assert(data != nullptr); assert((*ppTree)->OperGet() == GT_STORE_LCL_VAR); assert(m_compiler->compCurStmt != nullptr); GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt(); GenTree* tree = *ppTree; GenTree* nextTree = tree->gtNext; GenTree* rhs = tree->gtGetOp1(); if ((rhs->OperGet() == GT_PHI) || (rhs->OperGet() == GT_CALL)) { // GT_CALLs are not decomposed, so will not be converted to GT_LONG // GT_STORE_LCL_VAR = GT_CALL are handled in genMultiRegCallStoreToLocal return; } noway_assert(rhs->OperGet() == GT_LONG); unsigned varNum = tree->AsLclVarCommon()->gtLclNum; LclVarDsc* varDsc = m_compiler->lvaTable + varNum; m_compiler->lvaDecRefCnts(tree); GenTree* loRhs = rhs->gtGetOp1(); GenTree* hiRhs = rhs->gtGetOp2(); GenTree* hiStore = m_compiler->gtNewLclLNode(varNum, TYP_INT); if (varDsc->lvPromoted) { assert(varDsc->lvFieldCnt == 2); unsigned loVarNum = varDsc->lvFieldLclStart; unsigned hiVarNum = loVarNum + 1; tree->AsLclVarCommon()->SetLclNum(loVarNum); hiStore->SetOper(GT_STORE_LCL_VAR); hiStore->AsLclVarCommon()->SetLclNum(hiVarNum); } else { noway_assert(varDsc->lvLRACandidate == false); tree->SetOper(GT_STORE_LCL_FLD); tree->AsLclFld()->gtLclOffs = 0; tree->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField(); hiStore->SetOper(GT_STORE_LCL_FLD); hiStore->AsLclFld()->gtLclOffs = 4; hiStore->AsLclFld()->gtFieldSeq = FieldSeqStore::NotAField(); } tree->gtOp.gtOp1 = loRhs; tree->gtType = TYP_INT; loRhs->gtNext = tree; tree->gtPrev = loRhs; hiStore->gtOp.gtOp1 = hiRhs; hiStore->CopyCosts(tree); hiStore->gtFlags |= GTF_VAR_DEF; m_compiler->lvaIncRefCnts(tree); m_compiler->lvaIncRefCnts(hiStore); tree->gtNext = hiRhs; hiRhs->gtPrev = tree; hiRhs->gtNext = hiStore; hiStore->gtPrev = hiRhs; hiStore->gtNext = nextTree; if (nextTree != nullptr) { nextTree->gtPrev = hiStore; } nextTree = hiRhs; bool isEmbeddedStmt = !curStmt->gtStmtIsTopLevel(); if (!isEmbeddedStmt) { tree->gtNext = nullptr; hiRhs->gtPrev = nullptr; } InsertNodeAsStmt(hiStore); }
void Rationalizer::RewriteAssignment(LIR::Use& use) { assert(use.IsInitialized()); GenTreeOp* assignment = use.Def()->AsOp(); assert(assignment->OperGet() == GT_ASG); GenTree* location = assignment->gtGetOp1(); GenTree* value = assignment->gtGetOp2(); genTreeOps locationOp = location->OperGet(); #ifdef FEATURE_SIMD if (varTypeIsSIMD(location) && assignment->OperIsInitBlkOp()) { if (location->OperGet() == GT_LCL_VAR) { var_types simdType = location->TypeGet(); GenTree* initVal = assignment->gtOp.gtOp2; var_types baseType = comp->getBaseTypeOfSIMDLocal(location); if (baseType != TYP_UNKNOWN) { GenTreeSIMD* simdTree = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, genTypeSize(simdType)); assignment->gtOp.gtOp2 = simdTree; value = simdTree; initVal->gtNext = simdTree; simdTree->gtPrev = initVal; simdTree->gtNext = location; location->gtPrev = simdTree; } } else { assert(location->OperIsBlk()); } } #endif // FEATURE_SIMD switch (locationOp) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_REG_VAR: case GT_PHI_ARG: RewriteAssignmentIntoStoreLclCore(assignment, location, value, locationOp); BlockRange().Remove(location); break; case GT_IND: { GenTreeStoreInd* store = new (comp, GT_STOREIND) GenTreeStoreInd(location->TypeGet(), location->gtGetOp1(), value); copyFlags(store, assignment, GTF_ALL_EFFECT); copyFlags(store, location, GTF_IND_FLAGS); if (assignment->IsReverseOp()) { store->gtFlags |= GTF_REVERSE_OPS; } // TODO: JIT dump // Remove the GT_IND node and replace the assignment node with the store BlockRange().Remove(location); BlockRange().InsertBefore(assignment, store); use.ReplaceWith(comp, store); BlockRange().Remove(assignment); } break; case GT_CLS_VAR: { location->SetOper(GT_CLS_VAR_ADDR); location->gtType = TYP_BYREF; assignment->SetOper(GT_STOREIND); // TODO: JIT dump } break; case GT_BLK: case GT_OBJ: case GT_DYN_BLK: { assert(varTypeIsStruct(location)); GenTreeBlk* storeBlk = location->AsBlk(); genTreeOps storeOper; switch (location->gtOper) { case GT_BLK: storeOper = GT_STORE_BLK; break; case GT_OBJ: storeOper = GT_STORE_OBJ; break; case GT_DYN_BLK: storeOper = GT_STORE_DYN_BLK; break; default: unreached(); } JITDUMP("Rewriting GT_ASG(%s(X), Y) to %s(X,Y):\n", GenTree::NodeName(location->gtOper), GenTree::NodeName(storeOper)); storeBlk->SetOperRaw(storeOper); storeBlk->gtFlags &= ~GTF_DONT_CSE; storeBlk->gtFlags |= (assignment->gtFlags & (GTF_ALL_EFFECT | GTF_REVERSE_OPS | GTF_BLK_VOLATILE | GTF_BLK_UNALIGNED | GTF_BLK_INIT | GTF_DONT_CSE)); storeBlk->gtBlk.Data() = value; // Replace the assignment node with the store use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } break; default: unreached(); break; } }