//-------------------------------------------------------------------------------------------------- // ToGenTree - Convert a "condition" into a gentree node. // // Arguments: // comp Compiler instance to allocate trees // // Return Values: // Returns the gen tree representation for the conditional operator on lhs and rhs trees // GenTreePtr LC_Condition::ToGenTree(Compiler* comp) { GenTree* op1Tree = op1.ToGenTree(comp); GenTree* op2Tree = op2.ToGenTree(comp); assert(genTypeSize(genActualType(op1Tree->TypeGet())) == genTypeSize(genActualType(op2Tree->TypeGet()))); return comp->gtNewOperNode(oper, TYP_INT, op1Tree, op2Tree); }
//------------------------------------------------------------------------ // LowerStoreLoc: Lower a store of a lclVar // // Arguments: // storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR) // // Notes: // This involves: // - Widening operations of unsigneds. // void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc) { // Try to widen the ops if they are going into a local var. GenTree* op1 = storeLoc->gtGetOp1(); if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT)) { GenTreeIntCon* con = op1->AsIntCon(); ssize_t ival = con->gtIconVal; unsigned varNum = storeLoc->gtLclNum; LclVarDsc* varDsc = comp->lvaTable + varNum; if (varDsc->lvIsSIMDType()) { noway_assert(storeLoc->gtType != TYP_STRUCT); } unsigned size = genTypeSize(storeLoc); // If we are storing a constant into a local variable // we extend the size of the store here if ((size < 4) && !varTypeIsStruct(varDsc)) { if (!varTypeIsUnsigned(varDsc)) { if (genTypeSize(storeLoc) == 1) { if ((ival & 0x7f) != ival) { ival = ival | 0xffffff00; } } else { assert(genTypeSize(storeLoc) == 2); if ((ival & 0x7fff) != ival) { ival = ival | 0xffff0000; } } } // A local stack slot is at least 4 bytes in size, regardless of // what the local var is typed as, so auto-promote it here // unless it is a field of a promoted struct // TODO-CQ: if the field is promoted shouldn't we also be able to do this? if (!varDsc->lvIsStructField) { storeLoc->gtType = TYP_INT; con->SetIconValue(ival); } } } if (storeLoc->OperIs(GT_STORE_LCL_FLD)) { // We should only encounter this for lclVars that are lvDoNotEnregister. verifyLclFldDoNotEnregister(storeLoc->gtLclNum); } ContainCheckStoreLoc(storeLoc); }
//------------------------------------------------------------------------ // LowerRotate: Lower GT_ROL and GT_ROR nodes. // // Arguments: // tree - the node to lower // // Return Value: // None. // void Lowering::LowerRotate(GenTree* tree) { if (tree->OperGet() == GT_ROL) { // There is no ROL instruction on ARM. Convert ROL into ROR. GenTree* rotatedValue = tree->gtOp.gtOp1; unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8; GenTree* rotateLeftIndexNode = tree->gtOp.gtOp2; if (rotateLeftIndexNode->IsCnsIntOrI()) { ssize_t rotateLeftIndex = rotateLeftIndexNode->gtIntCon.gtIconVal; ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex; rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex; } else { GenTree* tmp = comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode); BlockRange().InsertAfter(rotateLeftIndexNode, tmp); tree->gtOp.gtOp2 = tmp; } tree->ChangeOper(GT_ROR); } ContainCheckShiftRotate(tree->AsOp()); }
//------------------------------------------------------------------------ // DecomposeInd: Decompose GT_IND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeInd(LIR::Use& use) { GenTree* indLow = use.Def(); LIR::Use address(Range(), &indLow->gtOp.gtOp1, indLow); address.ReplaceWithLclVar(m_compiler, m_blockWeight); JITDUMP("[DecomposeInd]: Saving addr tree to a temp var:\n"); DISPTREERANGE(Range(), address.Def()); // Change the type of lower ind. indLow->gtType = TYP_INT; // Create tree of ind(addr+4) GenTreePtr addrBase = indLow->gtGetOp1(); GenTreePtr addrBaseHigh = new (m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET); GenTreePtr addrHigh = new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTreePtr indHigh = new (m_compiler, GT_IND) GenTreeIndir(GT_IND, TYP_INT, addrHigh, nullptr); m_compiler->lvaIncRefCnts(addrBaseHigh); Range().InsertAfter(indLow, addrBaseHigh, addrHigh, indHigh); return FinalizeDecomposition(use, indLow, indHigh); }
//------------------------------------------------------------------------ // DecomposeInd: Decompose GT_IND. // // Arguments: // tree - the tree to decompose // // Return Value: // None. // void DecomposeLongs::DecomposeInd(GenTree** ppTree, Compiler::fgWalkData* data) { GenTreePtr indLow = *ppTree; GenTreeStmt* addrStmt = CreateTemporary(&indLow->gtOp.gtOp1); JITDUMP("[DecomposeInd]: Saving addr tree to a temp var:\n"); DISPTREE(addrStmt); // Change the type of lower ind. indLow->gtType = TYP_INT; // Create tree of ind(addr+4) GenTreePtr addrBase = indLow->gtGetOp1(); GenTreePtr addrBaseHigh = new(m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET); GenTreePtr addrHigh = new(m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTreePtr indHigh = new (m_compiler, GT_IND) GenTreeIndir(GT_IND, TYP_INT, addrHigh, nullptr); // Connect linear links SimpleLinkNodeAfter(addrBaseHigh, addrHigh); SimpleLinkNodeAfter(addrHigh, indHigh); FinalizeDecomposition(ppTree, data, indLow, indHigh); }
//------------------------------------------------------------------------ // DecomposeStoreInd: Decompose GT_STOREIND. // // Arguments: // tree - the tree to decompose // // Return Value: // None. // void DecomposeLongs::DecomposeStoreInd(GenTree** ppTree, Compiler::fgWalkData* data) { assert(ppTree != nullptr); assert(*ppTree != nullptr); assert(data != nullptr); assert((*ppTree)->OperGet() == GT_STOREIND); assert(m_compiler->compCurStmt != nullptr); GenTree* tree = *ppTree; assert(tree->gtOp.gtOp2->OperGet() == GT_LONG); GenTreeStmt* curStmt = m_compiler->compCurStmt->AsStmt(); bool isEmbeddedStmt = !curStmt->gtStmtIsTopLevel(); // Example input trees (a nested embedded statement case) // // <linkBegin Node> // * stmtExpr void (top level) (IL ???... ???) // | /--* argPlace ref $280 // | +--* argPlace int $4a // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar ref V11 tmp9 u:3 $21c // | | { | +--* const int 4 $44 // | | { | /--* + byref $2c8 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclFld long V01 arg1 u:2[+8] Fseq[i] $380 // | | { | | { \--* st.lclVar long (P) V21 cse8 // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0 // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1 // | | { | | /--* lclVar int V22 rat0 $380 // | | { | | +--* lclVar int V23 rat1 // | | { | +--* gt_long long // | | { \--* storeIndir long // | +--* lclVar ref V11 tmp9 u:3 (last use) $21c // | +--* lclVar ref V02 tmp0 u:3 $280 // | +--* const int 8 $4a // \--* call help void HELPER.CORINFO_HELP_ARRADDR_ST $205 // <linkEndNode> // // (editor brace matching compensation: }}}}}}}}}}}}}}}}}}) GenTree* linkBegin = m_compiler->fgGetFirstNode(tree)->gtPrev; GenTree* linkEnd = tree->gtNext; GenTree* gtLong = tree->gtOp.gtOp2; // Save address to a temp. It is used in storeIndLow and storeIndHigh trees. GenTreeStmt* addrStmt = CreateTemporary(&tree->gtOp.gtOp1); JITDUMP("[DecomposeStoreInd]: Saving address tree to a temp var:\n"); DISPTREE(addrStmt); if (!gtLong->gtOp.gtOp1->OperIsLeaf()) { GenTreeStmt* dataLowStmt = CreateTemporary(>Long->gtOp.gtOp1); JITDUMP("[DecomposeStoreInd]: Saving low data tree to a temp var:\n"); DISPTREE(dataLowStmt); } if (!gtLong->gtOp.gtOp2->OperIsLeaf()) { GenTreeStmt* dataHighStmt = CreateTemporary(>Long->gtOp.gtOp2); JITDUMP("[DecomposeStoreInd]: Saving high data tree to a temp var:\n"); DISPTREE(dataHighStmt); } // Example trees after embedded statements for address and data are added. // This example saves all address and data trees into temp variables // to show how those embedded statements are created. // // * stmtExpr void (top level) (IL ???... ???) // | /--* argPlace ref $280 // | +--* argPlace int $4a // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar ref V11 tmp9 u:3 $21c // | | { | +--* const int 4 $44 // | | { | /--* + byref $2c8 // | | { \--* st.lclVar byref V24 rat2 // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar byref V24 rat2 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclFld long V01 arg1 u:2[+8] Fseq[i] $380380 // | | { | | { \--* st.lclVar long (P) V21 cse8 // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0 // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclVar int V22 rat0 $380 // | | { | | { \--* st.lclVar int V25 rat3 // | | { | | /--* lclVar int V25 rat3 // | | { | | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | | { | /--* lclVar int V23 rat1 // | | { | | | { \--* st.lclVar int V26 rat4 // | | { | | +--* lclVar int V26 rat4 // | | { | +--* gt_long long // | | { \--* storeIndir long // | +--* lclVar ref V11 tmp9 u:3 (last use) $21c // | +--* lclVar ref V02 tmp0 u:3 $280 // | +--* const int 8 $4a // \--* call help void HELPER.CORINFO_HELP_ARRADDR_ST $205 // // (editor brace matching compensation: }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}) GenTree* addrBase = tree->gtOp.gtOp1; GenTree* dataHigh = gtLong->gtOp.gtOp2; GenTree* dataLow = gtLong->gtOp.gtOp1; GenTree* storeIndLow = tree; // Rewrite storeIndLow tree to save only lower 32-bit data. // // | | { | /--* lclVar byref V24 rat2 (address) // ... // | | { | +--* lclVar int V25 rat3 (lower 32-bit data) // | | { | { * stmtExpr void (embedded) (IL ???... ???) // | | { | { | /--* lclVar int V23 rat1 // | | { | { \--* st.lclVar int V26 rat4 // | | { \--* storeIndir int // // (editor brace matching compensation: }}}}}}}}}) m_compiler->fgSnipNode(curStmt, gtLong); m_compiler->fgSnipNode(curStmt, dataHigh); storeIndLow->gtOp.gtOp2 = dataLow; storeIndLow->gtType = TYP_INT; // Construct storeIndHigh tree // // | | { *stmtExpr void (embedded)(IL ? ? ? ... ? ? ? ) // | | { | / --* lclVar int V26 rat4 // | | { | | / --* lclVar byref V24 rat2 // | | { | +--* lea(b + 4) ref // | | { \--* storeIndir int // // (editor brace matching compensation: }}}}}) GenTree* addrBaseHigh = new(m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET); GenTree* addrHigh = new(m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTree* storeIndHigh = new(m_compiler, GT_STOREIND) GenTreeStoreInd(TYP_INT, addrHigh, dataHigh); storeIndHigh->gtFlags = (storeIndLow->gtFlags & (GTF_ALL_EFFECT | GTF_LIVENESS_MASK)); storeIndHigh->gtFlags |= GTF_REVERSE_OPS; storeIndHigh->CopyCosts(storeIndLow); // Internal links of storeIndHigh tree dataHigh->gtPrev = nullptr; dataHigh->gtNext = nullptr; SimpleLinkNodeAfter(dataHigh, addrBaseHigh); SimpleLinkNodeAfter(addrBaseHigh, addrHigh); SimpleLinkNodeAfter(addrHigh, storeIndHigh); // External links of storeIndHigh tree //dataHigh->gtPrev = nullptr; if (isEmbeddedStmt) { // If storeIndTree is an embedded statement, connect storeIndLow // and dataHigh storeIndLow->gtNext = dataHigh; dataHigh->gtPrev = storeIndLow; } storeIndHigh->gtNext = linkEnd; if (linkEnd != nullptr) { linkEnd->gtPrev = storeIndHigh; } InsertNodeAsStmt(storeIndHigh); // Example final output // // * stmtExpr void (top level) (IL ???... ???) // | /--* argPlace ref $280 // | +--* argPlace int $4a // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar ref V11 tmp9 u:3 $21c // | | { | +--* const int 4 $44 // | | { | /--* + byref $2c8 // | | { \--* st.lclVar byref V24 rat2 // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar byref V24 rat2 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclFld int V01 arg1 u:2[+8] Fseq[i] $380 // | | { | | { | +--* lclFld int V01 arg1 [+12] // | | { | | { | /--* gt_long long // | | { | | { \--* st.lclVar long (P) V21 cse8 // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0 // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclVar int V22 rat0 $380 // | | { | | { \--* st.lclVar int V25 rat3 // | | { | +--* lclVar int V25 rat3 // | | { | { * stmtExpr void (embedded) (IL ???... ???) // | | { | { | /--* lclVar int V23 rat1 // | | { | { \--* st.lclVar int V26 rat4 // | | { \--* storeIndir int // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar int V26 rat4 // | | { | | /--* lclVar byref V24 rat2 // | | { | +--* lea(b+4) ref // | | { \--* storeIndir int // | | /--* lclVar ref V11 tmp9 u:3 (last use) $21c // | +--* putarg_stk [+0x00] ref // | | /--* lclVar ref V02 tmp0 u:3 $280 // | +--* putarg_reg ref // | | /--* const int 8 $4a // | +--* putarg_reg int // \--* call help void HELPER.CORINFO_HELP_ARRADDR_ST $205 // // (editor brace matching compensation: }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}) }
Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, ArrayStack<GenTree*>& parentStack) { assert(useEdge != nullptr); GenTree* node = *useEdge; assert(node != nullptr); #ifdef DEBUG const bool isLateArg = (node->gtFlags & GTF_LATE_ARG) != 0; #endif // First, remove any preceeding list nodes, which are not otherwise visited by the tree walk. // // NOTE: GT_FIELD_LIST head nodes, and GT_LIST nodes used by phi nodes will in fact be visited. for (GenTree* prev = node->gtPrev; prev != nullptr && prev->OperIsAnyList() && !(prev->OperIsFieldListHead()); prev = node->gtPrev) { BlockRange().Remove(prev); } // In addition, remove the current node if it is a GT_LIST node that is not an aggregate. if (node->OperIsAnyList()) { GenTreeArgList* list = node->AsArgList(); if (!list->OperIsFieldListHead()) { BlockRange().Remove(list); } return Compiler::WALK_CONTINUE; } LIR::Use use; if (parentStack.Height() < 2) { use = LIR::Use::GetDummyUse(BlockRange(), *useEdge); } else { use = LIR::Use(BlockRange(), useEdge, parentStack.Index(1)); } assert(node == use.Def()); switch (node->OperGet()) { case GT_ASG: RewriteAssignment(use); break; case GT_BOX: // GT_BOX at this level just passes through so get rid of it use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); break; case GT_ADDR: RewriteAddress(use); break; case GT_IND: // Clear the `GTF_IND_ASG_LHS` flag, which overlaps with `GTF_IND_REQ_ADDR_IN_REG`. node->gtFlags &= ~GTF_IND_ASG_LHS; if (varTypeIsSIMD(node)) { RewriteSIMDOperand(use, false); } else { // Due to promotion of structs containing fields of type struct with a // single scalar type field, we could potentially see IR nodes of the // form GT_IND(GT_ADD(lclvarAddr, 0)) where 0 is an offset representing // a field-seq. These get folded here. // // TODO: This code can be removed once JIT implements recursive struct // promotion instead of lying about the type of struct field as the type // of its single scalar field. GenTree* addr = node->AsIndir()->Addr(); if (addr->OperGet() == GT_ADD && addr->gtGetOp1()->OperGet() == GT_LCL_VAR_ADDR && addr->gtGetOp2()->IsIntegralConst(0)) { GenTreeLclVarCommon* lclVarNode = addr->gtGetOp1()->AsLclVarCommon(); unsigned lclNum = lclVarNode->GetLclNum(); LclVarDsc* varDsc = comp->lvaTable + lclNum; if (node->TypeGet() == varDsc->TypeGet()) { JITDUMP("Rewriting GT_IND(GT_ADD(LCL_VAR_ADDR,0)) to LCL_VAR\n"); lclVarNode->SetOper(GT_LCL_VAR); lclVarNode->gtType = node->TypeGet(); use.ReplaceWith(comp, lclVarNode); BlockRange().Remove(addr); BlockRange().Remove(addr->gtGetOp2()); BlockRange().Remove(node); } } } break; case GT_NOP: // fgMorph sometimes inserts NOP nodes between defs and uses // supposedly 'to prevent constant folding'. In this case, remove the // NOP. if (node->gtGetOp1() != nullptr) { use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); } break; case GT_COMMA: { GenTree* op1 = node->gtGetOp1(); if ((op1->gtFlags & GTF_ALL_EFFECT) == 0) { // The LHS has no side effects. Remove it. bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange lhsRange = BlockRange().GetTreeRange(op1, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(lhsRange)); } GenTree* replacement = node->gtGetOp2(); if (!use.IsDummyUse()) { use.ReplaceWith(comp, replacement); } else { // This is a top-level comma. If the RHS has no side effects we can remove // it as well. if ((replacement->gtFlags & GTF_ALL_EFFECT) == 0) { bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange rhsRange = BlockRange().GetTreeRange(replacement, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(rhsRange)); } } BlockRange().Remove(node); } break; case GT_ARGPLACE: // Remove argplace and list nodes from the execution order. // // TODO: remove phi args and phi nodes as well? BlockRange().Remove(node); break; #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM_) case GT_CLS_VAR: { // Class vars that are the target of an assignment will get rewritten into // GT_STOREIND(GT_CLS_VAR_ADDR, val) by RewriteAssignment. This check is // not strictly necessary--the GT_IND(GT_CLS_VAR_ADDR) pattern that would // otherwise be generated would also be picked up by RewriteAssignment--but // skipping the rewrite here saves an allocation and a bit of extra work. const bool isLHSOfAssignment = (use.User()->OperGet() == GT_ASG) && (use.User()->gtGetOp1() == node); if (!isLHSOfAssignment) { GenTree* ind = comp->gtNewOperNode(GT_IND, node->TypeGet(), node); node->SetOper(GT_CLS_VAR_ADDR); node->gtType = TYP_BYREF; BlockRange().InsertAfter(node, ind); use.ReplaceWith(comp, ind); // TODO: JIT dump } } break; #endif // _TARGET_XARCH_ case GT_INTRINSIC: // Non-target intrinsics should have already been rewritten back into user calls. assert(Compiler::IsTargetIntrinsic(node->gtIntrinsic.gtIntrinsicId)); break; #ifdef FEATURE_SIMD case GT_BLK: case GT_OBJ: { // TODO-1stClassStructs: These should have been transformed to GT_INDs, but in order // to preserve existing behavior, we will keep this as a block node if this is the // lhs of a block assignment, and either: // - It is a "generic" TYP_STRUCT assignment, OR // - It is an initblk, OR // - Neither the lhs or rhs are known to be of SIMD type. GenTree* parent = use.User(); bool keepBlk = false; if ((parent->OperGet() == GT_ASG) && (node == parent->gtGetOp1())) { if ((node->TypeGet() == TYP_STRUCT) || parent->OperIsInitBlkOp()) { keepBlk = true; } else if (!comp->isAddrOfSIMDType(node->AsBlk()->Addr())) { GenTree* dataSrc = parent->gtGetOp2(); if (!dataSrc->IsLocal() && (dataSrc->OperGet() != GT_SIMD)) { noway_assert(dataSrc->OperIsIndir()); keepBlk = !comp->isAddrOfSIMDType(dataSrc->AsIndir()->Addr()); } } } RewriteSIMDOperand(use, keepBlk); } break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: // TODO-1stClassStructs: Eliminate this. FixupIfSIMDLocal(node->AsLclVarCommon()); break; case GT_SIMD: { noway_assert(comp->featureSIMD); GenTreeSIMD* simdNode = node->AsSIMD(); unsigned simdSize = simdNode->gtSIMDSize; var_types simdType = comp->getSIMDTypeForSize(simdSize); // TODO-1stClassStructs: This should be handled more generally for enregistered or promoted // structs that are passed or returned in a different register type than their enregistered // type(s). if (simdNode->gtType == TYP_I_IMPL && simdNode->gtSIMDSize == TARGET_POINTER_SIZE) { // This happens when it is consumed by a GT_RET_EXPR. // It can only be a Vector2f or Vector2i. assert(genTypeSize(simdNode->gtSIMDBaseType) == 4); simdNode->gtType = TYP_SIMD8; } // Certain SIMD trees require rationalizing. if (simdNode->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicInitArray) { // Rewrite this as an explicit load. JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n"); unsigned int baseTypeSize = genTypeSize(simdNode->gtSIMDBaseType); GenTree* address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdNode->gtOp1, simdNode->gtOp2, baseTypeSize, offsetof(CORINFO_Array, u1Elems)); GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address); BlockRange().InsertBefore(simdNode, address, ind); use.ReplaceWith(comp, ind); BlockRange().Remove(simdNode); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } else { // This code depends on the fact that NONE of the SIMD intrinsics take vector operands // of a different width. If that assumption changes, we will EITHER have to make these type // transformations during importation, and plumb the types all the way through the JIT, // OR add a lot of special handling here. GenTree* op1 = simdNode->gtGetOp1(); if (op1 != nullptr && op1->gtType == TYP_STRUCT) { op1->gtType = simdType; } GenTree* op2 = simdNode->gtGetOp2IfPresent(); if (op2 != nullptr && op2->gtType == TYP_STRUCT) { op2->gtType = simdType; } } } break; #endif // FEATURE_SIMD default: // JCC nodes should not be present in HIR. assert(node->OperGet() != GT_JCC); break; } // Do some extra processing on top-level nodes to remove unused local reads. if (node->OperIsLocalRead()) { if (use.IsDummyUse()) { comp->lvaDecRefCnts(node); BlockRange().Remove(node); } else { // Local reads are side-effect-free; clear any flags leftover from frontend transformations. node->gtFlags &= ~GTF_ALL_EFFECT; } } assert(isLateArg == ((use.Def()->gtFlags & GTF_LATE_ARG) != 0)); return Compiler::WALK_CONTINUE; }
void Rationalizer::RewriteAssignment(LIR::Use& use) { assert(use.IsInitialized()); GenTreeOp* assignment = use.Def()->AsOp(); assert(assignment->OperGet() == GT_ASG); GenTree* location = assignment->gtGetOp1(); GenTree* value = assignment->gtGetOp2(); genTreeOps locationOp = location->OperGet(); if (assignment->OperIsBlkOp()) { #ifdef FEATURE_SIMD if (varTypeIsSIMD(location) && assignment->OperIsInitBlkOp()) { if (location->OperGet() == GT_LCL_VAR) { var_types simdType = location->TypeGet(); GenTree* initVal = assignment->gtOp.gtOp2; var_types baseType = comp->getBaseTypeOfSIMDLocal(location); if (baseType != TYP_UNKNOWN) { GenTreeSIMD* simdTree = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, genTypeSize(simdType)); assignment->gtOp.gtOp2 = simdTree; value = simdTree; initVal->gtNext = simdTree; simdTree->gtPrev = initVal; simdTree->gtNext = location; location->gtPrev = simdTree; } } } #endif // FEATURE_SIMD if ((location->TypeGet() == TYP_STRUCT) && !assignment->IsPhiDefn() && !value->IsMultiRegCall()) { if ((location->OperGet() == GT_LCL_VAR)) { // We need to construct a block node for the location. // Modify lcl to be the address form. location->SetOper(addrForm(locationOp)); LclVarDsc* varDsc = &(comp->lvaTable[location->AsLclVarCommon()->gtLclNum]); location->gtType = TYP_BYREF; GenTreeBlk* storeBlk = nullptr; unsigned int size = varDsc->lvExactSize; if (varDsc->lvStructGcCount != 0) { CORINFO_CLASS_HANDLE structHnd = varDsc->lvVerTypeInfo.GetClassHandle(); GenTreeObj* objNode = comp->gtNewObjNode(structHnd, location)->AsObj(); unsigned int slots = (unsigned)(roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE); objNode->SetGCInfo(varDsc->lvGcLayout, varDsc->lvStructGcCount, slots); objNode->ChangeOper(GT_STORE_OBJ); objNode->SetData(value); comp->fgMorphUnsafeBlk(objNode); storeBlk = objNode; } else { storeBlk = new (comp, GT_STORE_BLK) GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, location, value, size); } storeBlk->gtFlags |= (GTF_REVERSE_OPS | GTF_ASG); storeBlk->gtFlags |= ((location->gtFlags | value->gtFlags) & GTF_ALL_EFFECT); GenTree* insertionPoint = location->gtNext; BlockRange().InsertBefore(insertionPoint, storeBlk); use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); JITDUMP("After transforming local struct assignment into a block op:\n"); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); return; } else { assert(location->OperIsBlk()); } } } switch (locationOp) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_REG_VAR: case GT_PHI_ARG: RewriteAssignmentIntoStoreLclCore(assignment, location, value, locationOp); BlockRange().Remove(location); break; case GT_IND: { GenTreeStoreInd* store = new (comp, GT_STOREIND) GenTreeStoreInd(location->TypeGet(), location->gtGetOp1(), value); copyFlags(store, assignment, GTF_ALL_EFFECT); copyFlags(store, location, GTF_IND_FLAGS); if (assignment->IsReverseOp()) { store->gtFlags |= GTF_REVERSE_OPS; } // TODO: JIT dump // Remove the GT_IND node and replace the assignment node with the store BlockRange().Remove(location); BlockRange().InsertBefore(assignment, store); use.ReplaceWith(comp, store); BlockRange().Remove(assignment); } break; case GT_CLS_VAR: { location->SetOper(GT_CLS_VAR_ADDR); location->gtType = TYP_BYREF; assignment->SetOper(GT_STOREIND); // TODO: JIT dump } break; case GT_BLK: case GT_OBJ: case GT_DYN_BLK: { assert(varTypeIsStruct(location)); GenTreeBlk* storeBlk = location->AsBlk(); genTreeOps storeOper; switch (location->gtOper) { case GT_BLK: storeOper = GT_STORE_BLK; break; case GT_OBJ: storeOper = GT_STORE_OBJ; break; case GT_DYN_BLK: storeOper = GT_STORE_DYN_BLK; break; default: unreached(); } JITDUMP("Rewriting GT_ASG(%s(X), Y) to %s(X,Y):\n", GenTree::NodeName(location->gtOper), GenTree::NodeName(storeOper)); storeBlk->SetOperRaw(storeOper); storeBlk->gtFlags &= ~GTF_DONT_CSE; storeBlk->gtFlags |= (assignment->gtFlags & (GTF_ALL_EFFECT | GTF_REVERSE_OPS | GTF_BLK_VOLATILE | GTF_BLK_UNALIGNED | GTF_DONT_CSE)); storeBlk->gtBlk.Data() = value; // Replace the assignment node with the store use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } break; default: unreached(); break; } }
void Rationalizer::RewriteAssignment(LIR::Use& use) { assert(use.IsInitialized()); GenTreeOp* assignment = use.Def()->AsOp(); assert(assignment->OperGet() == GT_ASG); GenTree* location = assignment->gtGetOp1(); GenTree* value = assignment->gtGetOp2(); genTreeOps locationOp = location->OperGet(); #ifdef FEATURE_SIMD if (varTypeIsSIMD(location) && assignment->OperIsInitBlkOp()) { if (location->OperGet() == GT_LCL_VAR) { var_types simdType = location->TypeGet(); GenTree* initVal = assignment->gtOp.gtOp2; var_types baseType = comp->getBaseTypeOfSIMDLocal(location); if (baseType != TYP_UNKNOWN) { GenTreeSIMD* simdTree = new (comp, GT_SIMD) GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, genTypeSize(simdType)); assignment->gtOp.gtOp2 = simdTree; value = simdTree; initVal->gtNext = simdTree; simdTree->gtPrev = initVal; simdTree->gtNext = location; location->gtPrev = simdTree; } } else { assert(location->OperIsBlk()); } } #endif // FEATURE_SIMD switch (locationOp) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_REG_VAR: case GT_PHI_ARG: RewriteAssignmentIntoStoreLclCore(assignment, location, value, locationOp); BlockRange().Remove(location); break; case GT_IND: { GenTreeStoreInd* store = new (comp, GT_STOREIND) GenTreeStoreInd(location->TypeGet(), location->gtGetOp1(), value); copyFlags(store, assignment, GTF_ALL_EFFECT); copyFlags(store, location, GTF_IND_FLAGS); if (assignment->IsReverseOp()) { store->gtFlags |= GTF_REVERSE_OPS; } // TODO: JIT dump // Remove the GT_IND node and replace the assignment node with the store BlockRange().Remove(location); BlockRange().InsertBefore(assignment, store); use.ReplaceWith(comp, store); BlockRange().Remove(assignment); } break; case GT_CLS_VAR: { location->SetOper(GT_CLS_VAR_ADDR); location->gtType = TYP_BYREF; assignment->SetOper(GT_STOREIND); // TODO: JIT dump } break; case GT_BLK: case GT_OBJ: case GT_DYN_BLK: { assert(varTypeIsStruct(location)); GenTreeBlk* storeBlk = location->AsBlk(); genTreeOps storeOper; switch (location->gtOper) { case GT_BLK: storeOper = GT_STORE_BLK; break; case GT_OBJ: storeOper = GT_STORE_OBJ; break; case GT_DYN_BLK: storeOper = GT_STORE_DYN_BLK; break; default: unreached(); } JITDUMP("Rewriting GT_ASG(%s(X), Y) to %s(X,Y):\n", GenTree::NodeName(location->gtOper), GenTree::NodeName(storeOper)); storeBlk->SetOperRaw(storeOper); storeBlk->gtFlags &= ~GTF_DONT_CSE; storeBlk->gtFlags |= (assignment->gtFlags & (GTF_ALL_EFFECT | GTF_REVERSE_OPS | GTF_BLK_VOLATILE | GTF_BLK_UNALIGNED | GTF_BLK_INIT | GTF_DONT_CSE)); storeBlk->gtBlk.Data() = value; // Replace the assignment node with the store use.ReplaceWith(comp, storeBlk); BlockRange().Remove(assignment); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } break; default: unreached(); break; } }
//------------------------------------------------------------------------ // DecomposeStoreInd: Decompose GT_STOREIND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // GenTree* DecomposeLongs::DecomposeStoreInd(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STOREIND); GenTree* tree = use.Def(); assert(tree->gtOp.gtOp2->OperGet() == GT_LONG); // Example input (address expression omitted): // // t51 = const int 0x37C05E7D // t154 = const int 0x2A0A3C80 // / --* t51 int // + --* t154 int // t155 = *gt_long long // / --* t52 byref // + --* t155 long // * storeIndir long GenTree* gtLong = tree->gtOp.gtOp2; // Save address to a temp. It is used in storeIndLow and storeIndHigh trees. LIR::Use address(Range(), &tree->gtOp.gtOp1, tree); address.ReplaceWithLclVar(m_compiler, m_blockWeight); JITDUMP("[DecomposeStoreInd]: Saving address tree to a temp var:\n"); DISPTREERANGE(Range(), address.Def()); if (!gtLong->gtOp.gtOp1->OperIsLeaf()) { LIR::Use op1(Range(), >Long->gtOp.gtOp1, gtLong); op1.ReplaceWithLclVar(m_compiler, m_blockWeight); JITDUMP("[DecomposeStoreInd]: Saving low data tree to a temp var:\n"); DISPTREERANGE(Range(), op1.Def()); } if (!gtLong->gtOp.gtOp2->OperIsLeaf()) { LIR::Use op2(Range(), >Long->gtOp.gtOp2, gtLong); op2.ReplaceWithLclVar(m_compiler, m_blockWeight); JITDUMP("[DecomposeStoreInd]: Saving high data tree to a temp var:\n"); DISPTREERANGE(Range(), op2.Def()); } GenTree* addrBase = tree->gtOp.gtOp1; GenTree* dataHigh = gtLong->gtOp.gtOp2; GenTree* dataLow = gtLong->gtOp.gtOp1; GenTree* storeIndLow = tree; Range().Remove(gtLong); Range().Remove(dataHigh); storeIndLow->gtOp.gtOp2 = dataLow; storeIndLow->gtType = TYP_INT; GenTree* addrBaseHigh = new (m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET); GenTree* addrHigh = new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTree* storeIndHigh = new (m_compiler, GT_STOREIND) GenTreeStoreInd(TYP_INT, addrHigh, dataHigh); storeIndHigh->gtFlags = (storeIndLow->gtFlags & (GTF_ALL_EFFECT | GTF_LIVENESS_MASK)); storeIndHigh->gtFlags |= GTF_REVERSE_OPS; m_compiler->lvaIncRefCnts(addrBaseHigh); Range().InsertAfter(storeIndLow, dataHigh, addrBaseHigh, addrHigh, storeIndHigh); return storeIndHigh; // Example final output: // // /--* t52 byref // * st.lclVar byref V07 rat0 // t158 = lclVar byref V07 rat0 // t51 = const int 0x37C05E7D // /--* t158 byref // +--* t51 int // * storeIndir int // t154 = const int 0x2A0A3C80 // t159 = lclVar byref V07 rat0 // /--* t159 byref // t160 = * lea(b + 4) ref // /--* t154 int // +--* t160 ref // * storeIndir int }
Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, ArrayStack<GenTree*>& parentStack) { assert(useEdge != nullptr); GenTree* node = *useEdge; assert(node != nullptr); #ifdef DEBUG const bool isLateArg = (node->gtFlags & GTF_LATE_ARG) != 0; #endif // First, remove any preceeding GT_LIST nodes, which are not otherwise visited by the tree walk. // // NOTE: GT_LIST nodes that are used by block ops and phi nodes will in fact be visited. for (GenTree* prev = node->gtPrev; prev != nullptr && prev->OperGet() == GT_LIST; prev = node->gtPrev) { BlockRange().Remove(prev); } // In addition, remove the current node if it is a GT_LIST node. if ((*useEdge)->OperGet() == GT_LIST) { BlockRange().Remove(*useEdge); return Compiler::WALK_CONTINUE; } LIR::Use use; if (parentStack.Height() < 2) { use = LIR::Use::GetDummyUse(BlockRange(), *useEdge); } else { use = LIR::Use(BlockRange(), useEdge, parentStack.Index(1)); } assert(node == use.Def()); switch (node->OperGet()) { case GT_ASG: RewriteAssignment(use); break; case GT_BOX: // GT_BOX at this level just passes through so get rid of it use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); break; case GT_ADDR: RewriteAddress(use); break; case GT_NOP: // fgMorph sometimes inserts NOP nodes between defs and uses // supposedly 'to prevent constant folding'. In this case, remove the // NOP. if (node->gtGetOp1() != nullptr) { use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); } break; case GT_COMMA: { GenTree* op1 = node->gtGetOp1(); if ((op1->gtFlags & GTF_ALL_EFFECT) == 0) { // The LHS has no side effects. Remove it. bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange lhsRange = BlockRange().GetTreeRange(op1, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(lhsRange)); } GenTree* replacement = node->gtGetOp2(); if (!use.IsDummyUse()) { use.ReplaceWith(comp, replacement); } else { // This is a top-level comma. If the RHS has no side effects we can remove // it as well. if ((replacement->gtFlags & GTF_ALL_EFFECT) == 0) { bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange rhsRange = BlockRange().GetTreeRange(replacement, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(rhsRange)); } } BlockRange().Remove(node); } break; case GT_ARGPLACE: // Remove argplace and list nodes from the execution order. // // TODO: remove phi args and phi nodes as well? BlockRange().Remove(node); break; #ifdef _TARGET_XARCH_ case GT_CLS_VAR: { // Class vars that are the target of an assignment will get rewritten into // GT_STOREIND(GT_CLS_VAR_ADDR, val) by RewriteAssignment. This check is // not strictly necessary--the GT_IND(GT_CLS_VAR_ADDR) pattern that would // otherwise be generated would also be picked up by RewriteAssignment--but // skipping the rewrite here saves an allocation and a bit of extra work. const bool isLHSOfAssignment = (use.User()->OperGet() == GT_ASG) && (use.User()->gtGetOp1() == node); if (!isLHSOfAssignment) { GenTree* ind = comp->gtNewOperNode(GT_IND, node->TypeGet(), node); node->SetOper(GT_CLS_VAR_ADDR); node->gtType = TYP_BYREF; BlockRange().InsertAfter(node, ind); use.ReplaceWith(comp, ind); // TODO: JIT dump } } break; #endif // _TARGET_XARCH_ case GT_INTRINSIC: // Non-target intrinsics should have already been rewritten back into user calls. assert(Compiler::IsTargetIntrinsic(node->gtIntrinsic.gtIntrinsicId)); break; #ifdef FEATURE_SIMD case GT_INITBLK: RewriteInitBlk(use); break; case GT_COPYBLK: RewriteCopyBlk(use); break; case GT_OBJ: RewriteObj(use); break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: // TODO-1stClassStructs: Eliminate this. FixupIfSIMDLocal(node->AsLclVarCommon()); break; case GT_STOREIND: case GT_IND: if (node->gtType == TYP_STRUCT) { GenTree* addr = node->AsIndir()->Addr(); assert(addr->TypeGet() == TYP_BYREF); if (addr->OperIsLocal()) { LclVarDsc* varDsc = &(comp->lvaTable[addr->AsLclVarCommon()->gtLclNum]); assert(varDsc->lvSIMDType); unsigned simdSize = (unsigned int)roundUp(varDsc->lvExactSize, TARGET_POINTER_SIZE); node->gtType = comp->getSIMDTypeForSize(simdSize); } #if DEBUG else { // If the address is not a local var, assert that the user of this IND is an ADDR node. assert((use.User()->OperGet() == GT_ADDR) || use.User()->OperIsLocalAddr()); } #endif } break; case GT_SIMD: { noway_assert(comp->featureSIMD); GenTreeSIMD* simdNode = node->AsSIMD(); unsigned simdSize = simdNode->gtSIMDSize; var_types simdType = comp->getSIMDTypeForSize(simdSize); // TODO-1stClassStructs: This should be handled more generally for enregistered or promoted // structs that are passed or returned in a different register type than their enregistered // type(s). if (simdNode->gtType == TYP_I_IMPL && simdNode->gtSIMDSize == TARGET_POINTER_SIZE) { // This happens when it is consumed by a GT_RET_EXPR. // It can only be a Vector2f or Vector2i. assert(genTypeSize(simdNode->gtSIMDBaseType) == 4); simdNode->gtType = TYP_SIMD8; } else if (simdNode->gtType == TYP_STRUCT || varTypeIsSIMD(simdNode)) { node->gtType = simdType; } // Certain SIMD trees require rationalizing. if (simdNode->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicInitArray) { // Rewrite this as an explicit load. JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n"); unsigned int baseTypeSize = genTypeSize(simdNode->gtSIMDBaseType); GenTree* address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdNode->gtOp1, simdNode->gtOp2, baseTypeSize, offsetof(CORINFO_Array, u1Elems)); GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address); BlockRange().InsertBefore(simdNode, address, ind); use.ReplaceWith(comp, ind); BlockRange().Remove(simdNode); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } else { // This code depends on the fact that NONE of the SIMD intrinsics take vector operands // of a different width. If that assumption changes, we will EITHER have to make these type // transformations during importation, and plumb the types all the way through the JIT, // OR add a lot of special handling here. GenTree* op1 = simdNode->gtGetOp1(); if (op1 != nullptr && op1->gtType == TYP_STRUCT) { op1->gtType = simdType; } GenTree* op2 = simdNode->gtGetOp2(); if (op2 != nullptr && op2->gtType == TYP_STRUCT) { op2->gtType = simdType; } } } break; #endif // FEATURE_SIMD default: break; } // Do some extra processing on top-level nodes to remove unused local reads. if (use.IsDummyUse() && node->OperIsLocalRead()) { assert((node->gtFlags & GTF_ALL_EFFECT) == 0); comp->lvaDecRefCnts(node); BlockRange().Remove(node); } assert(isLateArg == ((node->gtFlags & GTF_LATE_ARG) != 0)); return Compiler::WALK_CONTINUE; }
//------------------------------------------------------------------------ // BuildSIMD: Set the NodeInfo for a GT_SIMD tree. // // Arguments: // tree - The GT_SIMD node of interest // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildSIMD(GenTreeSIMD* simdTree) { int srcCount = 0; // Only SIMDIntrinsicInit can be contained if (simdTree->isContained()) { assert(simdTree->gtSIMDIntrinsicID == SIMDIntrinsicInit); } int dstCount = simdTree->IsValue() ? 1 : 0; assert(dstCount == 1); bool buildUses = true; GenTree* op1 = simdTree->gtGetOp1(); GenTree* op2 = simdTree->gtGetOp2(); switch (simdTree->gtSIMDIntrinsicID) { case SIMDIntrinsicInit: case SIMDIntrinsicCast: case SIMDIntrinsicSqrt: case SIMDIntrinsicAbs: case SIMDIntrinsicConvertToSingle: case SIMDIntrinsicConvertToInt32: case SIMDIntrinsicConvertToDouble: case SIMDIntrinsicConvertToInt64: case SIMDIntrinsicWidenLo: case SIMDIntrinsicWidenHi: // No special handling required. break; case SIMDIntrinsicGetItem: { op1 = simdTree->gtGetOp1(); op2 = simdTree->gtGetOp2(); // We have an object and an index, either of which may be contained. bool setOp2DelayFree = false; if (!op2->IsCnsIntOrI() && (!op1->isContained() || op1->OperIsLocal())) { // If the index is not a constant and the object is not contained or is a local // we will need a general purpose register to calculate the address // internal register must not clobber input index // TODO-Cleanup: An internal register will never clobber a source; this code actually // ensures that the index (op2) doesn't interfere with the target. buildInternalIntRegisterDefForNode(simdTree); setOp2DelayFree = true; } srcCount += BuildOperandUses(op1); if (!op2->isContained()) { RefPosition* op2Use = BuildUse(op2); if (setOp2DelayFree) { setDelayFree(op2Use); } srcCount++; } if (!op2->IsCnsIntOrI() && (!op1->isContained())) { // If vector is not already in memory (contained) and the index is not a constant, // we will use the SIMD temp location to store the vector. compiler->getSIMDInitTempVarNum(); } buildUses = false; } break; case SIMDIntrinsicAdd: case SIMDIntrinsicSub: case SIMDIntrinsicMul: case SIMDIntrinsicDiv: case SIMDIntrinsicBitwiseAnd: case SIMDIntrinsicBitwiseAndNot: case SIMDIntrinsicBitwiseOr: case SIMDIntrinsicBitwiseXor: case SIMDIntrinsicMin: case SIMDIntrinsicMax: case SIMDIntrinsicEqual: case SIMDIntrinsicLessThan: case SIMDIntrinsicGreaterThan: case SIMDIntrinsicLessThanOrEqual: case SIMDIntrinsicGreaterThanOrEqual: // No special handling required. break; case SIMDIntrinsicSetX: case SIMDIntrinsicSetY: case SIMDIntrinsicSetZ: case SIMDIntrinsicSetW: case SIMDIntrinsicNarrow: { // Op1 will write to dst before Op2 is free BuildUse(op1); RefPosition* op2Use = BuildUse(op2); setDelayFree(op2Use); srcCount = 2; buildUses = false; break; } case SIMDIntrinsicInitN: { var_types baseType = simdTree->gtSIMDBaseType; srcCount = (short)(simdTree->gtSIMDSize / genTypeSize(baseType)); if (varTypeIsFloating(simdTree->gtSIMDBaseType)) { // Need an internal register to stitch together all the values into a single vector in a SIMD reg. buildInternalFloatRegisterDefForNode(simdTree); } int initCount = 0; for (GenTree* list = op1; list != nullptr; list = list->gtGetOp2()) { assert(list->OperGet() == GT_LIST); GenTree* listItem = list->gtGetOp1(); assert(listItem->TypeGet() == baseType); assert(!listItem->isContained()); BuildUse(listItem); initCount++; } assert(initCount == srcCount); buildUses = false; break; } case SIMDIntrinsicInitArray: // We have an array and an index, which may be contained. break; case SIMDIntrinsicOpEquality: case SIMDIntrinsicOpInEquality: buildInternalFloatRegisterDefForNode(simdTree); break; case SIMDIntrinsicDotProduct: buildInternalFloatRegisterDefForNode(simdTree); break; case SIMDIntrinsicSelect: // TODO-ARM64-CQ Allow lowering to see SIMDIntrinsicSelect so we can generate BSL VC, VA, VB // bsl target register must be VC. Reserve a temp in case we need to shuffle things. // This will require a different approach, as GenTreeSIMD has only two operands. assert(!"SIMDIntrinsicSelect not yet supported"); buildInternalFloatRegisterDefForNode(simdTree); break; case SIMDIntrinsicInitArrayX: case SIMDIntrinsicInitFixed: case SIMDIntrinsicCopyToArray: case SIMDIntrinsicCopyToArrayX: case SIMDIntrinsicNone: case SIMDIntrinsicGetCount: case SIMDIntrinsicGetOne: case SIMDIntrinsicGetZero: case SIMDIntrinsicGetAllOnes: case SIMDIntrinsicGetX: case SIMDIntrinsicGetY: case SIMDIntrinsicGetZ: case SIMDIntrinsicGetW: case SIMDIntrinsicInstEquals: case SIMDIntrinsicHWAccel: case SIMDIntrinsicWiden: case SIMDIntrinsicInvalid: assert(!"These intrinsics should not be seen during register allocation"); __fallthrough; default: noway_assert(!"Unimplemented SIMD node type."); unreached(); } if (buildUses) { assert(!op1->OperIs(GT_LIST)); assert(srcCount == 0); srcCount = BuildOperandUses(op1); if ((op2 != nullptr) && !op2->isContained()) { srcCount += BuildOperandUses(op2); } } assert(internalCount <= MaxInternalCount); buildInternalRegisterUses(); if (dstCount == 1) { BuildDef(simdTree); } else { assert(dstCount == 0); } return srcCount; }
//------------------------------------------------------------------------ // DecomposeStoreInd: Decompose GT_STOREIND. // // Arguments: // use - the LIR::Use object for the def that needs to be decomposed. // // Return Value: // The next node to process. // // TODO-LIR: replace comments below that use embedded statements with ones that do not. GenTree* DecomposeLongs::DecomposeStoreInd(LIR::Use& use) { assert(use.IsInitialized()); assert(use.Def()->OperGet() == GT_STOREIND); GenTree* tree = use.Def(); assert(tree->gtOp.gtOp2->OperGet() == GT_LONG); // Example input trees (a nested embedded statement case) // // <linkBegin Node> // * stmtExpr void (top level) (IL ???... ???) // | /--* argPlace ref $280 // | +--* argPlace int $4a // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar ref V11 tmp9 u:3 $21c // | | { | +--* const int 4 $44 // | | { | /--* + byref $2c8 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclFld long V01 arg1 u:2[+8] Fseq[i] $380 // | | { | | { \--* st.lclVar long (P) V21 cse8 // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0 // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1 // | | { | | /--* lclVar int V22 rat0 $380 // | | { | | +--* lclVar int V23 rat1 // | | { | +--* gt_long long // | | { \--* storeIndir long // | +--* lclVar ref V11 tmp9 u:3 (last use) $21c // | +--* lclVar ref V02 tmp0 u:3 $280 // | +--* const int 8 $4a // \--* call help void HELPER.CORINFO_HELP_ARRADDR_ST $205 // <linkEndNode> // // (editor brace matching compensation: }}}}}}}}}}}}}}}}}}) GenTree* gtLong = tree->gtOp.gtOp2; unsigned blockWeight = m_block->getBBWeight(m_compiler); // Save address to a temp. It is used in storeIndLow and storeIndHigh trees. LIR::Use address(BlockRange(), &tree->gtOp.gtOp1, tree); address.ReplaceWithLclVar(m_compiler, blockWeight); JITDUMP("[DecomposeStoreInd]: Saving address tree to a temp var:\n"); DISPTREERANGE(BlockRange(), address.Def()); if (!gtLong->gtOp.gtOp1->OperIsLeaf()) { LIR::Use op1(BlockRange(), >Long->gtOp.gtOp1, gtLong); op1.ReplaceWithLclVar(m_compiler, blockWeight); JITDUMP("[DecomposeStoreInd]: Saving low data tree to a temp var:\n"); DISPTREERANGE(BlockRange(), op1.Def()); } if (!gtLong->gtOp.gtOp2->OperIsLeaf()) { LIR::Use op2(BlockRange(), >Long->gtOp.gtOp2, gtLong); op2.ReplaceWithLclVar(m_compiler, blockWeight); JITDUMP("[DecomposeStoreInd]: Saving high data tree to a temp var:\n"); DISPTREERANGE(BlockRange(), op2.Def()); } // Example trees after embedded statements for address and data are added. // This example saves all address and data trees into temp variables // to show how those embedded statements are created. // // * stmtExpr void (top level) (IL ???... ???) // | /--* argPlace ref $280 // | +--* argPlace int $4a // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar ref V11 tmp9 u:3 $21c // | | { | +--* const int 4 $44 // | | { | /--* + byref $2c8 // | | { \--* st.lclVar byref V24 rat2 // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar byref V24 rat2 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclFld long V01 arg1 u:2[+8] Fseq[i] $380380 // | | { | | { \--* st.lclVar long (P) V21 cse8 // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0 // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclVar int V22 rat0 $380 // | | { | | { \--* st.lclVar int V25 rat3 // | | { | | /--* lclVar int V25 rat3 // | | { | | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | | { | /--* lclVar int V23 rat1 // | | { | | | { \--* st.lclVar int V26 rat4 // | | { | | +--* lclVar int V26 rat4 // | | { | +--* gt_long long // | | { \--* storeIndir long // | +--* lclVar ref V11 tmp9 u:3 (last use) $21c // | +--* lclVar ref V02 tmp0 u:3 $280 // | +--* const int 8 $4a // \--* call help void HELPER.CORINFO_HELP_ARRADDR_ST $205 // // (editor brace matching compensation: }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}) GenTree* addrBase = tree->gtOp.gtOp1; GenTree* dataHigh = gtLong->gtOp.gtOp2; GenTree* dataLow = gtLong->gtOp.gtOp1; GenTree* storeIndLow = tree; // Rewrite storeIndLow tree to save only lower 32-bit data. // // | | { | /--* lclVar byref V24 rat2 (address) // ... // | | { | +--* lclVar int V25 rat3 (lower 32-bit data) // | | { | { * stmtExpr void (embedded) (IL ???... ???) // | | { | { | /--* lclVar int V23 rat1 // | | { | { \--* st.lclVar int V26 rat4 // | | { \--* storeIndir int // // (editor brace matching compensation: }}}}}}}}}) BlockRange().Remove(gtLong); BlockRange().Remove(dataHigh); storeIndLow->gtOp.gtOp2 = dataLow; storeIndLow->gtType = TYP_INT; // Construct storeIndHigh tree // // | | { *stmtExpr void (embedded)(IL ? ? ? ... ? ? ? ) // | | { | / --* lclVar int V26 rat4 // | | { | | / --* lclVar byref V24 rat2 // | | { | +--* lea(b + 4) ref // | | { \--* storeIndir int // // (editor brace matching compensation: }}}}}) GenTree* addrBaseHigh = new (m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET); GenTree* addrHigh = new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT)); GenTree* storeIndHigh = new (m_compiler, GT_STOREIND) GenTreeStoreInd(TYP_INT, addrHigh, dataHigh); storeIndHigh->gtFlags = (storeIndLow->gtFlags & (GTF_ALL_EFFECT | GTF_LIVENESS_MASK)); storeIndHigh->gtFlags |= GTF_REVERSE_OPS; m_compiler->gtPrepareCost(storeIndHigh); BlockRange().InsertAfter(storeIndLow, dataHigh, addrBaseHigh, addrHigh, storeIndHigh); return storeIndHigh; // Example final output // // * stmtExpr void (top level) (IL ???... ???) // | /--* argPlace ref $280 // | +--* argPlace int $4a // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar ref V11 tmp9 u:3 $21c // | | { | +--* const int 4 $44 // | | { | /--* + byref $2c8 // | | { \--* st.lclVar byref V24 rat2 // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar byref V24 rat2 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclFld int V01 arg1 u:2[+8] Fseq[i] $380 // | | { | | { | +--* lclFld int V01 arg1 [+12] // | | { | | { | /--* gt_long long // | | { | | { \--* st.lclVar long (P) V21 cse8 // | | { | | { \--* int V21.hi (offs=0x00) -> V22 rat0 // | | { | | { \--* int V21.hi (offs=0x04) -> V23 rat1 // | | { | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | | { | /--* lclVar int V22 rat0 $380 // | | { | | { \--* st.lclVar int V25 rat3 // | | { | +--* lclVar int V25 rat3 // | | { | { * stmtExpr void (embedded) (IL ???... ???) // | | { | { | /--* lclVar int V23 rat1 // | | { | { \--* st.lclVar int V26 rat4 // | | { \--* storeIndir int // | | { * stmtExpr void (embedded) (IL ???... ???) // | | { | /--* lclVar int V26 rat4 // | | { | | /--* lclVar byref V24 rat2 // | | { | +--* lea(b+4) ref // | | { \--* storeIndir int // | | /--* lclVar ref V11 tmp9 u:3 (last use) $21c // | +--* putarg_stk [+0x00] ref // | | /--* lclVar ref V02 tmp0 u:3 $280 // | +--* putarg_reg ref // | | /--* const int 8 $4a // | +--* putarg_reg int // \--* call help void HELPER.CORINFO_HELP_ARRADDR_ST $205 // // (editor brace matching compensation: }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}) }