Ejemplo n.º 1
0
// Given a lclvar use, try to find the lclvar's defining assignment and its containing block.
GenTreeOp* RangeCheck::GetSsaDefAsg(GenTreeLclVarCommon* lclUse, BasicBlock** asgBlock)
{
    unsigned ssaNum = lclUse->GetSsaNum();

    if (ssaNum == SsaConfig::RESERVED_SSA_NUM)
    {
        return nullptr;
    }

    LclSsaVarDsc* ssaData = m_pCompiler->lvaTable[lclUse->GetLclNum()].GetPerSsaData(ssaNum);
    GenTree*      lclDef  = ssaData->m_defLoc.m_tree;

    if (lclDef == nullptr)
    {
        return nullptr;
    }

    // We have the def node but we also need the assignment node to get its source.
    // gtGetParent can be used to get the assignment node but it's rather expensive
    // and not strictly necessary here, there shouldn't be any other node between
    // the assignment node and its destination node.
    GenTree* asg = lclDef->gtNext;

    if (!asg->OperIs(GT_ASG) || (asg->gtGetOp1() != lclDef))
    {
        return nullptr;
    }

#ifdef DEBUG
    Location* loc = GetDef(lclUse);
    assert(loc->parent == asg);
    assert(loc->block == ssaData->m_defLoc.m_blk);
#endif

    *asgBlock = ssaData->m_defLoc.m_blk;
    return asg->AsOp();
}
Ejemplo n.º 2
0
Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, ArrayStack<GenTree*>& parentStack)
{
    assert(useEdge != nullptr);

    GenTree* node = *useEdge;
    assert(node != nullptr);

#ifdef DEBUG
    const bool isLateArg = (node->gtFlags & GTF_LATE_ARG) != 0;
#endif

    // First, remove any preceeding list nodes, which are not otherwise visited by the tree walk.
    //
    // NOTE: GT_FIELD_LIST head nodes, and GT_LIST nodes used by phi nodes will in fact be visited.
    for (GenTree* prev = node->gtPrev; prev != nullptr && prev->OperIsAnyList() && !(prev->OperIsFieldListHead());
         prev          = node->gtPrev)
    {
        prev->gtFlags &= ~GTF_REVERSE_OPS;
        BlockRange().Remove(prev);
    }

    // Now clear the REVERSE_OPS flag on the current node.
    node->gtFlags &= ~GTF_REVERSE_OPS;

    // In addition, remove the current node if it is a GT_LIST node that is not an aggregate.
    if (node->OperIsAnyList())
    {
        GenTreeArgList* list = node->AsArgList();
        if (!list->OperIsFieldListHead())
        {
            BlockRange().Remove(list);
        }
        return Compiler::WALK_CONTINUE;
    }

    LIR::Use use;
    if (parentStack.Height() < 2)
    {
        use = LIR::Use::GetDummyUse(BlockRange(), *useEdge);
    }
    else
    {
        use = LIR::Use(BlockRange(), useEdge, parentStack.Index(1));
    }

    assert(node == use.Def());
    switch (node->OperGet())
    {
        case GT_ASG:
            RewriteAssignment(use);
            break;

        case GT_BOX:
            // GT_BOX at this level just passes through so get rid of it
            use.ReplaceWith(comp, node->gtGetOp1());
            BlockRange().Remove(node);
            break;

        case GT_ADDR:
            RewriteAddress(use);
            break;

        case GT_IND:
            // Clear the `GTF_IND_ASG_LHS` flag, which overlaps with `GTF_IND_REQ_ADDR_IN_REG`.
            node->gtFlags &= ~GTF_IND_ASG_LHS;

            if (varTypeIsSIMD(node))
            {
                RewriteSIMDOperand(use, false);
            }
            else
            {
                // Due to promotion of structs containing fields of type struct with a
                // single scalar type field, we could potentially see IR nodes of the
                // form GT_IND(GT_ADD(lclvarAddr, 0)) where 0 is an offset representing
                // a field-seq. These get folded here.
                //
                // TODO: This code can be removed once JIT implements recursive struct
                // promotion instead of lying about the type of struct field as the type
                // of its single scalar field.
                GenTree* addr = node->AsIndir()->Addr();
                if (addr->OperGet() == GT_ADD && addr->gtGetOp1()->OperGet() == GT_LCL_VAR_ADDR &&
                    addr->gtGetOp2()->IsIntegralConst(0))
                {
                    GenTreeLclVarCommon* lclVarNode = addr->gtGetOp1()->AsLclVarCommon();
                    unsigned             lclNum     = lclVarNode->GetLclNum();
                    LclVarDsc*           varDsc     = comp->lvaTable + lclNum;
                    if (node->TypeGet() == varDsc->TypeGet())
                    {
                        JITDUMP("Rewriting GT_IND(GT_ADD(LCL_VAR_ADDR,0)) to LCL_VAR\n");
                        lclVarNode->SetOper(GT_LCL_VAR);
                        lclVarNode->gtType = node->TypeGet();
                        use.ReplaceWith(comp, lclVarNode);
                        BlockRange().Remove(addr);
                        BlockRange().Remove(addr->gtGetOp2());
                        BlockRange().Remove(node);
                    }
                }
            }
            break;

        case GT_NOP:
            // fgMorph sometimes inserts NOP nodes between defs and uses
            // supposedly 'to prevent constant folding'. In this case, remove the
            // NOP.
            if (node->gtGetOp1() != nullptr)
            {
                use.ReplaceWith(comp, node->gtGetOp1());
                BlockRange().Remove(node);
            }
            break;

        case GT_COMMA:
        {
            GenTree*           op1         = node->gtGetOp1();
            bool               isClosed    = false;
            unsigned           sideEffects = 0;
            LIR::ReadOnlyRange lhsRange    = BlockRange().GetTreeRange(op1, &isClosed, &sideEffects);

            if ((sideEffects & GTF_ALL_EFFECT) == 0)
            {
                // The LHS has no side effects. Remove it.
                // None of the transforms performed herein violate tree order, so isClosed
                // should always be true.
                assert(isClosed);

                BlockRange().Delete(comp, m_block, std::move(lhsRange));
            }
            else if (op1->IsValue())
            {
                op1->SetUnusedValue();
            }

            BlockRange().Remove(node);

            GenTree* replacement = node->gtGetOp2();
            if (!use.IsDummyUse())
            {
                use.ReplaceWith(comp, replacement);
                node = replacement;
            }
            else
            {
                // This is a top-level comma. If the RHS has no side effects we can remove
                // it as well.
                bool               isClosed    = false;
                unsigned           sideEffects = 0;
                LIR::ReadOnlyRange rhsRange    = BlockRange().GetTreeRange(replacement, &isClosed, &sideEffects);

                if ((sideEffects & GTF_ALL_EFFECT) == 0)
                {
                    // None of the transforms performed herein violate tree order, so isClosed
                    // should always be true.
                    assert(isClosed);

                    BlockRange().Delete(comp, m_block, std::move(rhsRange));
                }
                else
                {
                    node = replacement;
                }
            }
        }
        break;

        case GT_ARGPLACE:
            // Remove argplace and list nodes from the execution order.
            //
            // TODO: remove phi args and phi nodes as well?
            BlockRange().Remove(node);
            break;

#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM_)
        case GT_CLS_VAR:
        {
            // Class vars that are the target of an assignment will get rewritten into
            // GT_STOREIND(GT_CLS_VAR_ADDR, val) by RewriteAssignment. This check is
            // not strictly necessary--the GT_IND(GT_CLS_VAR_ADDR) pattern that would
            // otherwise be generated would also be picked up by RewriteAssignment--but
            // skipping the rewrite here saves an allocation and a bit of extra work.
            const bool isLHSOfAssignment = (use.User()->OperGet() == GT_ASG) && (use.User()->gtGetOp1() == node);
            if (!isLHSOfAssignment)
            {
                GenTree* ind = comp->gtNewOperNode(GT_IND, node->TypeGet(), node);

                node->SetOper(GT_CLS_VAR_ADDR);
                node->gtType = TYP_BYREF;

                BlockRange().InsertAfter(node, ind);
                use.ReplaceWith(comp, ind);

                // TODO: JIT dump
            }
        }
        break;
#endif // _TARGET_XARCH_

        case GT_INTRINSIC:
            // Non-target intrinsics should have already been rewritten back into user calls.
            assert(Compiler::IsTargetIntrinsic(node->gtIntrinsic.gtIntrinsicId));
            break;

#ifdef FEATURE_SIMD
        case GT_BLK:
        case GT_OBJ:
        {
            // TODO-1stClassStructs: These should have been transformed to GT_INDs, but in order
            // to preserve existing behavior, we will keep this as a block node if this is the
            // lhs of a block assignment, and either:
            // - It is a "generic" TYP_STRUCT assignment, OR
            // - It is an initblk, OR
            // - Neither the lhs or rhs are known to be of SIMD type.

            GenTree* parent  = use.User();
            bool     keepBlk = false;
            if ((parent->OperGet() == GT_ASG) && (node == parent->gtGetOp1()))
            {
                if ((node->TypeGet() == TYP_STRUCT) || parent->OperIsInitBlkOp())
                {
                    keepBlk = true;
                }
                else if (!comp->isAddrOfSIMDType(node->AsBlk()->Addr()))
                {
                    GenTree* dataSrc = parent->gtGetOp2();
                    if (!dataSrc->IsLocal() && (dataSrc->OperGet() != GT_SIMD))
                    {
                        noway_assert(dataSrc->OperIsIndir());
                        keepBlk = !comp->isAddrOfSIMDType(dataSrc->AsIndir()->Addr());
                    }
                }
            }
            RewriteSIMDOperand(use, keepBlk);
        }
        break;

        case GT_LCL_FLD:
        case GT_STORE_LCL_FLD:
            // TODO-1stClassStructs: Eliminate this.
            FixupIfSIMDLocal(node->AsLclVarCommon());
            break;

        case GT_SIMD:
        {
            noway_assert(comp->featureSIMD);
            GenTreeSIMD* simdNode = node->AsSIMD();
            unsigned     simdSize = simdNode->gtSIMDSize;
            var_types    simdType = comp->getSIMDTypeForSize(simdSize);

            // TODO-1stClassStructs: This should be handled more generally for enregistered or promoted
            // structs that are passed or returned in a different register type than their enregistered
            // type(s).
            if (simdNode->gtType == TYP_I_IMPL && simdNode->gtSIMDSize == TARGET_POINTER_SIZE)
            {
                // This happens when it is consumed by a GT_RET_EXPR.
                // It can only be a Vector2f or Vector2i.
                assert(genTypeSize(simdNode->gtSIMDBaseType) == 4);
                simdNode->gtType = TYP_SIMD8;
            }
            // Certain SIMD trees require rationalizing.
            if (simdNode->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicInitArray)
            {
                // Rewrite this as an explicit load.
                JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n");
                unsigned int baseTypeSize = genTypeSize(simdNode->gtSIMDBaseType);
                GenTree*     address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdNode->gtOp1, simdNode->gtOp2,
                                                                      baseTypeSize, offsetof(CORINFO_Array, u1Elems));
                GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address);

                BlockRange().InsertBefore(simdNode, address, ind);
                use.ReplaceWith(comp, ind);
                BlockRange().Remove(simdNode);

                DISPTREERANGE(BlockRange(), use.Def());
                JITDUMP("\n");
            }
            else
            {
                // This code depends on the fact that NONE of the SIMD intrinsics take vector operands
                // of a different width.  If that assumption changes, we will EITHER have to make these type
                // transformations during importation, and plumb the types all the way through the JIT,
                // OR add a lot of special handling here.
                GenTree* op1 = simdNode->gtGetOp1();
                if (op1 != nullptr && op1->gtType == TYP_STRUCT)
                {
                    op1->gtType = simdType;
                }

                GenTree* op2 = simdNode->gtGetOp2IfPresent();
                if (op2 != nullptr && op2->gtType == TYP_STRUCT)
                {
                    op2->gtType = simdType;
                }
            }
        }
        break;
#endif // FEATURE_SIMD

        default:
            // CMP, SETCC and JCC nodes should not be present in HIR.
            assert(!node->OperIs(GT_CMP, GT_SETCC, GT_JCC));
            break;
    }

    // Do some extra processing on top-level nodes to remove unused local reads.
    if (node->OperIsLocalRead())
    {
        if (use.IsDummyUse())
        {
            comp->lvaDecRefCnts(node);
            BlockRange().Remove(node);
        }
        else
        {
            // Local reads are side-effect-free; clear any flags leftover from frontend transformations.
            node->gtFlags &= ~GTF_ALL_EFFECT;
        }
    }
    else
    {
        if (!node->OperIsStore())
        {
            // Clear the GTF_ASG flag for all nodes but stores
            node->gtFlags &= ~GTF_ASG;
        }

        if (!node->IsCall())
        {
            // Clear the GTF_CALL flag for all nodes but calls
            node->gtFlags &= ~GTF_CALL;
        }

        if (node->IsValue() && use.IsDummyUse())
        {
            node->SetUnusedValue();
        }

        if (node->TypeGet() == TYP_LONG)
        {
            comp->compLongUsed = true;
        }
    }

    assert(isLateArg == ((use.Def()->gtFlags & GTF_LATE_ARG) != 0));

    return Compiler::WALK_CONTINUE;
}