示例#1
0
bool RangeCheck::IsBinOpMonotonicallyIncreasing(GenTreePtr op1, GenTreePtr op2, genTreeOps oper, SearchPath* path)
{
    JITDUMP("[RangeCheck::IsBinOpMonotonicallyIncreasing] %p, %p\n", dspPtr(op1), dspPtr(op2));
    // Check if we have a var + const.
    if (op2->OperGet() == GT_LCL_VAR)
    {
        jitstd::swap(op1, op2);
    }
    if (op1->OperGet() != GT_LCL_VAR)
    {
        JITDUMP("Not monotonic because op1 is not lclVar.\n");
        return false;
    }
    switch (op2->OperGet())
    {
    case GT_LCL_VAR:
        return IsMonotonicallyIncreasing(op1, path) && 
            IsMonotonicallyIncreasing(op2, path);

    case GT_CNS_INT:
        return oper == GT_ADD && op2->AsIntConCommon()->IconValue() >= 0 &&
            IsMonotonicallyIncreasing(op1, path);

    default:
        JITDUMP("Not monotonic because expression is not recognized.\n");
        return false;
    }
}
示例#2
0
//------------------------------------------------------------------------
// TreeNodeInfoInitPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node
//
// Arguments:
//    argNode - a GT_PUTARG_STK node
//
// Return Value:
//    None.
//
// Notes:
//    Set the child node(s) to be contained when we have a multireg arg
//
void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info)
{
    assert(argNode->gtOper == GT_PUTARG_STK);

    GenTreePtr putArgChild = argNode->gtOp.gtOp1;

    // Initialize 'argNode' as not contained, as this is both the default case
    //  and how MakeSrcContained expects to find things setup.
    //
    argNode->gtLsraInfo.srcCount = 1;
    argNode->gtLsraInfo.dstCount = 0;

    // Do we have a TYP_STRUCT argument (or a GT_FIELD_LIST), if so it must be a multireg pass-by-value struct
    if ((putArgChild->TypeGet() == TYP_STRUCT) || (putArgChild->OperGet() == GT_FIELD_LIST))
    {
        // We will use store instructions that each write a register sized value

        if (putArgChild->OperGet() == GT_FIELD_LIST)
        {
            // We consume all of the items in the GT_FIELD_LIST
            argNode->gtLsraInfo.srcCount = info->numSlots;
            putArgChild->SetContained();
        }
        else
        {
#ifdef _TARGET_ARM64_
            // We could use a ldp/stp sequence so we need two internal registers
            argNode->gtLsraInfo.internalIntCount = 2;
#else  // _TARGET_ARM_
            // We could use a ldr/str sequence so we need a internal register
            argNode->gtLsraInfo.internalIntCount = 1;
#endif // _TARGET_ARM_

            if (putArgChild->OperGet() == GT_OBJ)
            {
                GenTreePtr objChild = putArgChild->gtOp.gtOp1;
                if (objChild->OperGet() == GT_LCL_VAR_ADDR)
                {
                    // We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR
                    // as one contained operation
                    //
                    MakeSrcContained(putArgChild, objChild);
                }
            }

            // We will generate all of the code for the GT_PUTARG_STK and it's child node
            // as one contained operation
            //
            MakeSrcContained(argNode, putArgChild);
        }
    }
    else
    {
        // We must not have a multi-reg struct
        assert(info->numSlots == 1);
    }
}
示例#3
0
void CodeGen::genCodeForTreeFloat(GenTreePtr     tree,
                                  RegSet::RegisterPreference *pref)
{
    genTreeOps      oper;
    unsigned        kind;

    assert(tree);
    assert(tree->gtOper != GT_STMT);

    // What kind of node do we have?
    oper = tree->OperGet();
    kind = tree->OperKind();

    if  (kind & GTK_CONST)
    {
        genFloatConst(tree, pref);
    }
    else if (kind & GTK_LEAF)
    {
        genFloatLeaf(tree, pref);
    }
    else if (kind & GTK_SMPOP)
    {
        genFloatSimple(tree, pref);
    }
    else
    {
        assert(oper == GT_CALL);
        genCodeForCall(tree, true);
    }       
}
示例#4
0
//------------------------------------------------------------------------
// LowerRotate: Lower GT_ROL and GT_ROR nodes.
//
// Arguments:
//    tree - the node to lower
//
// Return Value:
//    None.
//
void Lowering::LowerRotate(GenTreePtr tree)
{
    if (tree->OperGet() == GT_ROL)
    {
        // There is no ROL instruction on ARM. Convert ROL into ROR.
        GenTreePtr rotatedValue        = tree->gtOp.gtOp1;
        unsigned   rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
        GenTreePtr rotateLeftIndexNode = tree->gtOp.gtOp2;

        if (rotateLeftIndexNode->IsCnsIntOrI())
        {
            ssize_t rotateLeftIndex                 = rotateLeftIndexNode->gtIntCon.gtIconVal;
            ssize_t rotateRightIndex                = rotatedValueBitSize - rotateLeftIndex;
            rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex;
        }
        else
        {
            GenTreePtr tmp =
                comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
            BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
            tree->gtOp.gtOp2 = tmp;
        }
        tree->ChangeOper(GT_ROR);
    }
    ContainCheckShiftRotate(tree->AsOp());
}
示例#5
0
GenTreePtr CodeGen::genMakeAddressableFloat(GenTreePtr tree, 
                                            regMaskTP * regMaskIntPtr, 
                                            regMaskTP * regMaskFltPtr, 
                                            bool bCollapseConstantDoubles)
{    
    *regMaskIntPtr = *regMaskFltPtr = 0;

    switch (tree->OperGet())
    {    

    case GT_LCL_VAR:
        genMarkLclVar(tree);
        __fallthrough;

    case GT_REG_VAR:
    case GT_LCL_FLD:
    case GT_CLS_VAR:
        return tree;

    case GT_IND:
        // Try to make the address directly addressable

        if  (genMakeIndAddrMode(tree->gtOp.gtOp1,
                                tree,
                                false,
                                RBM_ALLFLOAT,
                                RegSet::KEEP_REG,
                                regMaskIntPtr,
                                false))
        {
            genUpdateLife(tree);
            return tree;
        }
        else
        {
            GenTreePtr addr = tree;
            tree = tree->gtOp.gtOp1;
            genCodeForTree(tree, 0);
            regSet.rsMarkRegUsed(tree, addr);

            *regMaskIntPtr = genRegMask(tree->gtRegNum);                        
            return addr;
        }

        // fall through

    default:
        genCodeForTreeFloat(tree);
        regSet.SetUsedRegFloat(tree, true);

        // update mask
        *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());

        return tree;
        break;
    }    
}
示例#6
0
void CodeGen::genKeepAddressableFloat(GenTreePtr tree, regMaskTP *  regMaskIntPtr, regMaskTP *  regMaskFltPtr)
{
    regMaskTP regMaskInt, regMaskFlt;

    regMaskInt = *regMaskIntPtr;
    regMaskFlt = *regMaskFltPtr;

    *regMaskIntPtr = *regMaskFltPtr = 0;
        
    switch (tree->OperGet())
    {
    case GT_REG_VAR:
        // If register has been spilled, unspill it
        if (tree->gtFlags & GTF_SPILLED)
        {
            UnspillFloat(&compiler->lvaTable[tree->gtLclVarCommon.gtLclNum]);
        }        
        break;

    case GT_CNS_DBL:
        if (tree->gtFlags & GTF_SPILLED)
        {
            UnspillFloat(tree);
        }
        *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
        break;

    case GT_LCL_FLD:
    case GT_LCL_VAR:
    case GT_CLS_VAR:
        break;

    case GT_IND:
        if (regMaskFlt == RBM_NONE)
        {
            *regMaskIntPtr = genKeepAddressable(tree, regMaskInt, 0);
            *regMaskFltPtr = 0;
            return;
        } 
        __fallthrough;

    default:
        *regMaskIntPtr = 0;
        if (tree->gtFlags & GTF_SPILLED)
        {
            UnspillFloat(tree);
        }
        *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
        break;
    }
}
示例#7
0
void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
{
    GenTreePtr dst  = tree;
    GenTreePtr addr = tree->gtOp.gtOp1;
    GenTreePtr src  = tree->gtOp.gtOp2;

    if (addr->OperGet() == GT_LEA)
    {
        // In the case where we are doing a helper assignment, if the dst
        // is an indir through an lea, we need to actually instantiate the
        // lea in a register
        GenTreeAddrMode* lea = addr->AsAddrMode();

        short leaSrcCount = 0;
        if (lea->Base() != nullptr)
        {
            leaSrcCount++;
        }
        if (lea->Index() != nullptr)
        {
            leaSrcCount++;
        }
        lea->gtLsraInfo.srcCount = leaSrcCount;
        lea->gtLsraInfo.dstCount = 1;
    }

#if NOGC_WRITE_BARRIERS
    NYI_ARM("NOGC_WRITE_BARRIERS");

    // For the NOGC JIT Helper calls
    //
    // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST_BYREF)
    // the 'src'  goes into x15 (REG_WRITE_BARRIER)
    //
    addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_DST_BYREF);
    src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER);
#else
    // For the standard JIT Helper calls
    // op1 goes into REG_ARG_0 and
    // op2 goes into REG_ARG_1
    //
    addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0);
    src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1);
#endif // NOGC_WRITE_BARRIERS

    // Both src and dst must reside in a register, which they should since we haven't set
    // either of them as contained.
    assert(addr->gtLsraInfo.dstCount == 1);
    assert(src->gtLsraInfo.dstCount == 1);
}
示例#8
0
//------------------------------------------------------------------------
// ContainCheckCast: determine whether the source of a CAST node should be contained.
//
// Arguments:
//    node - pointer to the node
//
void Lowering::ContainCheckCast(GenTreeCast* node)
{
#ifdef _TARGET_ARM_
    GenTreePtr castOp     = node->CastOp();
    var_types  castToType = node->CastToType();
    var_types  srcType    = castOp->TypeGet();

    if (varTypeIsLong(castOp))
    {
        assert(castOp->OperGet() == GT_LONG);
        MakeSrcContained(node, castOp);
    }
#endif // _TARGET_ARM_
}
示例#9
0
//------------------------------------------------------------------------
// TreeNodeInfoInitShiftRotate: Set the NodeInfo for a shift or rotate.
//
// Arguments:
//    tree      - The node of interest
//
// Return Value:
//    None.
//
void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
{
    TreeNodeInfo* info = &(tree->gtLsraInfo);
    LinearScan*   l    = m_lsra;

    info->srcCount = 2;
    info->dstCount = 1;

    GenTreePtr shiftBy = tree->gtOp.gtOp2;
    GenTreePtr source  = tree->gtOp.gtOp1;
    if (shiftBy->IsCnsIntOrI())
    {
        MakeSrcContained(tree, shiftBy);
    }

#ifdef _TARGET_ARM_

    // The first operand of a GT_LSH_HI and GT_RSH_LO oper is a GT_LONG so that
    // we can have a three operand form. Increment the srcCount.
    if (tree->OperGet() == GT_LSH_HI || tree->OperGet() == GT_RSH_LO)
    {
        assert(source->OperGet() == GT_LONG);
        source->SetContained();

        info->srcCount++;

        if (tree->OperGet() == GT_LSH_HI)
        {
            GenTreePtr sourceLo              = source->gtOp.gtOp1;
            sourceLo->gtLsraInfo.isDelayFree = true;
        }
        else
        {
            GenTreePtr sourceHi              = source->gtOp.gtOp2;
            sourceHi->gtLsraInfo.isDelayFree = true;
        }

        source->gtLsraInfo.hasDelayFreeSrc = true;
        info->hasDelayFreeSrc              = true;
    }

#endif // _TARGET_ARM_
}
示例#10
0
//------------------------------------------------------------------------
// ContainCheckShiftRotate: Determine whether a mul op's operands should be contained.
//
// Arguments:
//    node - the node we care about
//
void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
{
    GenTreePtr shiftBy = node->gtOp2;

#ifdef _TARGET_ARM_
    GenTreePtr source = node->gtOp1;
    if (node->OperIs(GT_LSH_HI, GT_RSH_LO))
    {
        assert(source->OperGet() == GT_LONG);
        MakeSrcContained(node, source);
    }
#else  // !_TARGET_ARM_
    assert(node->OperIsShiftOrRotate());
#endif // !_TARGET_ARM_

    if (shiftBy->IsCnsIntOrI())
    {
        MakeSrcContained(node, shiftBy);
    }
}
示例#11
0
/**************************************************************************************
 *
 * Perform copy propagation on a given tree as we walk the graph and if it is a local
 * variable, then look up all currently live definitions and check if any of those
 * definitions share the same value number. If so, then we can make the replacement.
 *
 */
void Compiler::optCopyProp(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree, LclNumToGenTreePtrStack* curSsaName)
{
    // TODO-Review: EH successor/predecessor iteration seems broken.
    if (block->bbCatchTyp == BBCT_FINALLY || block->bbCatchTyp == BBCT_FAULT)
    {
        return;
    }

    // If not local nothing to do.
    if (!tree->IsLocal())
    {
        return;
    }
    if (tree->OperGet() == GT_PHI_ARG || tree->OperGet() == GT_LCL_FLD)
    {
        return;
    }

    // Propagate only on uses.
    if (tree->gtFlags & GTF_VAR_DEF)
    {
        return;
    }
    unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();

    // Skip address exposed variables.
    if (fgExcludeFromSsa(lclNum))
    {
        return;
    }

    assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);

    for (LclNumToGenTreePtrStack::KeyIterator iter = curSsaName->Begin(); !iter.Equal(curSsaName->End()); ++iter)
    {
        unsigned newLclNum = iter.Get();

        GenTreePtr op = iter.GetValue()->Index(0);

        // Nothing to do if same.
        if (lclNum == newLclNum)
        {
            continue;
        }

        // Skip variables with assignments embedded in the statement (i.e., with a comma). Because we
        // are not currently updating their SSA names as live in the copy-prop pass of the stmt.
        if (VarSetOps::IsMember(this, optCopyPropKillSet, lvaTable[newLclNum].lvVarIndex))
        {
            continue;
        }

        if (op->gtFlags & GTF_VAR_CAST)
        {
            continue;
        }
        if (gsShadowVarInfo != nullptr && lvaTable[newLclNum].lvIsParam &&
            gsShadowVarInfo[newLclNum].shadowCopy == lclNum)
        {
            continue;
        }
        ValueNum opVN = GetUseAsgDefVNOrTreeVN(op);
        if (opVN == ValueNumStore::NoVN)
        {
            continue;
        }
        if (op->TypeGet() != tree->TypeGet())
        {
            continue;
        }
        if (opVN != tree->gtVNPair.GetConservative())
        {
            continue;
        }
        if (optCopyProp_LclVarScore(&lvaTable[lclNum], &lvaTable[newLclNum], true) <= 0)
        {
            continue;
        }
        // Check whether the newLclNum is live before being substituted. Otherwise, we could end
        // up in a situation where there must've been a phi node that got pruned because the variable
        // is not live anymore. For example,
        //  if
        //     x0 = 1
        //  else
        //     x1 = 2
        //  print(c) <-- x is not live here. Let's say 'c' shares the value number with "x0."
        //
        // If we simply substituted 'c' with "x0", we would be wrong. Ideally, there would be a phi
        // node x2 = phi(x0, x1) which can then be used to substitute 'c' with. But because of pruning
        // there would be no such phi node. To solve this we'll check if 'x' is live, before replacing
        // 'c' with 'x.'
        if (!lvaTable[newLclNum].lvVerTypeInfo.IsThisPtr())
        {
            if (lvaTable[newLclNum].lvAddrExposed)
            {
                continue;
            }

            // We compute liveness only on tracked variables. So skip untracked locals.
            if (!lvaTable[newLclNum].lvTracked)
            {
                continue;
            }

            // Because of this dependence on live variable analysis, CopyProp phase is immediately
            // after Liveness, SSA and VN.
            if (!VarSetOps::IsMember(this, compCurLife, lvaTable[newLclNum].lvVarIndex))
            {
                continue;
            }
        }
        unsigned newSsaNum = SsaConfig::RESERVED_SSA_NUM;
        if (op->gtFlags & GTF_VAR_DEF)
        {
            newSsaNum = GetSsaNumForLocalVarDef(op);
        }
        else // parameters, this pointer etc.
        {
            newSsaNum = op->AsLclVarCommon()->GetSsaNum();
        }

        if (newSsaNum == SsaConfig::RESERVED_SSA_NUM)
        {
            continue;
        }

#ifdef DEBUG
        if (verbose)
        {
            JITDUMP("VN based copy assertion for ");
            printTreeID(tree);
            printf(" V%02d @%08X by ", lclNum, tree->GetVN(VNK_Conservative));
            printTreeID(op);
            printf(" V%02d @%08X.\n", newLclNum, op->GetVN(VNK_Conservative));
            gtDispTree(tree, nullptr, nullptr, true);
        }
#endif

        lvaTable[lclNum].decRefCnts(block->getBBWeight(this), this);
        lvaTable[newLclNum].incRefCnts(block->getBBWeight(this), this);
        tree->gtLclVarCommon.SetLclNum(newLclNum);
        tree->AsLclVarCommon()->SetSsaNum(newSsaNum);
#ifdef DEBUG
        if (verbose)
        {
            printf("copy propagated to:\n");
            gtDispTree(tree, nullptr, nullptr, true);
        }
#endif
        break;
    }
    return;
}
示例#12
0
/*****************************************************************************
 * gsMarkPtrsAndAssignGroups
 * Walk a tree looking for assignment groups, variables whose value is used
 * in a *p store or use, and variable passed to calls.  This info is then used
 * to determine parameters which are vulnerable.
 * This function carries a state to know if it is under an assign node, call node
 * or indirection node.  It starts a new tree walk for it's subtrees when the state
 * changes.
 */
Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr *pTree, fgWalkData *data)
{
    struct MarkPtrsInfo *pState= (MarkPtrsInfo *)data->pCallbackData;
    struct MarkPtrsInfo newState = *pState;
    Compiler *comp = data->compiler;
    GenTreePtr tree = *pTree;
    ShadowParamVarInfo *shadowVarInfo = pState->comp->gsShadowVarInfo;
    assert(shadowVarInfo);
    bool fIsBlk = false;
    unsigned lclNum;

    assert(!pState->isAssignSrc || pState->lvAssignDef != (unsigned)-1);

    if (pState->skipNextNode)
    {
        pState->skipNextNode = false;
        return WALK_CONTINUE;
    }

    switch (tree->OperGet())
    {
    // Indirections - look for *p uses and defs
    case GT_INITBLK:
    case GT_COPYOBJ:
    case GT_COPYBLK:
        fIsBlk = true;
        // fallthrough
    case GT_IND:
    case GT_LDOBJ:
    case GT_ARR_ELEM:
    case GT_ARR_INDEX:
    case GT_ARR_OFFSET:
    case GT_FIELD:

        newState.isUnderIndir = true;
        {
            if (fIsBlk)
            {
                // Blk nodes have implicit indirections.
                comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);

                if (tree->OperGet() == GT_INITBLK)
                {
                    newState.isUnderIndir = false;
                }
                comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
            }
            else
            {
                newState.skipNextNode = true;  // Don't have to worry about which kind of node we're dealing with
                comp->fgWalkTreePre(&tree, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
            }
        }

        return WALK_SKIP_SUBTREES;

    // local vars and param uses
    case GT_LCL_VAR:
    case GT_LCL_FLD:
        lclNum = tree->gtLclVarCommon.gtLclNum;

        if (pState->isUnderIndir)
        {
            // The variable is being dereferenced for a read or a write.
            comp->lvaTable[lclNum].lvIsPtr = 1;
        }

        if (pState->isAssignSrc)
        {
            //
            // Add lvAssignDef and lclNum to a common assign group
            if (shadowVarInfo[pState->lvAssignDef].assignGroup)
            {
                if (shadowVarInfo[lclNum].assignGroup)
                {
                    // OR both bit vector
                    shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectOr(shadowVarInfo[lclNum].assignGroup);
                }
                else
                {
                    shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectSet(lclNum);
                }
            
                // Point both to the same bit vector
                shadowVarInfo[lclNum].assignGroup = shadowVarInfo[pState->lvAssignDef].assignGroup;
            }
            else if (shadowVarInfo[lclNum].assignGroup)
            {
                shadowVarInfo[lclNum].assignGroup->bitVectSet(pState->lvAssignDef);
            
                // Point both to the same bit vector
                shadowVarInfo[pState->lvAssignDef].assignGroup = shadowVarInfo[lclNum].assignGroup;
            }
            else
            {
                FixedBitVect *bv = FixedBitVect::bitVectInit(pState->comp->lvaCount, pState->comp);

                // (shadowVarInfo[pState->lvAssignDef] == NULL && shadowVarInfo[lclNew] == NULL);
                // Neither of them has an assign group yet.  Make a new one.
                shadowVarInfo[pState->lvAssignDef].assignGroup = bv;
                shadowVarInfo[lclNum].assignGroup = bv;
                bv->bitVectSet(pState->lvAssignDef);
                bv->bitVectSet(lclNum);
            }

        }
        return WALK_CONTINUE;
    
    // Calls - Mark arg variables
    case GT_CALL:

        newState.isUnderIndir = false;
        newState.isAssignSrc = false;
        {
            if (tree->gtCall.gtCallObjp)
            {
                newState.isUnderIndir = true;
                comp->fgWalkTreePre(&tree->gtCall.gtCallObjp, gsMarkPtrsAndAssignGroups, (void *)&newState);
            }

            for (GenTreeArgList* args = tree->gtCall.gtCallArgs; args; args = args->Rest())
            {
                comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void *)&newState);
            }
            for (GenTreeArgList* args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
            {
                comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void *)&newState);
            }

            if (tree->gtCall.gtCallType == CT_INDIRECT)
            {
                newState.isUnderIndir = true;

                // A function pointer is treated like a write-through pointer since
                // it controls what code gets executed, and so indirectly can cause
                // a write to memory.
                comp->fgWalkTreePre(&tree->gtCall.gtCallAddr, gsMarkPtrsAndAssignGroups, (void *)&newState);
            }
        }
        return WALK_SKIP_SUBTREES;


    case GT_ADDR:
        newState.isUnderIndir = false;
        // We'll assume p in "**p = " can be vulnerable because by changing 'p', someone
        // could control where **p stores to.
        {
            comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
        }
        return WALK_SKIP_SUBTREES;


    default:
        // Assignments - track assign groups and *p defs.
        if (tree->OperIsAssignment())
        {
            bool isLocVar;
            bool isLocFld;

            // Walk dst side
            comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
            
            // Now handle src side
            isLocVar = tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR;
            isLocFld = tree->gtOp.gtOp1->OperGet() == GT_LCL_FLD;

            if ((isLocVar || isLocFld) && tree->gtOp.gtOp2)
            {
                lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
                newState.lvAssignDef = lclNum;
                newState.isAssignSrc = true;
            }

            comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);

            return WALK_SKIP_SUBTREES;
        }
    }

    return WALK_CONTINUE;
}
示例#13
0
//------------------------------------------------------------------------
// TreeNodeInfoInitPutArgSplit: Set the NodeInfo for a GT_PUTARG_SPLIT node
//
// Arguments:
//    argNode - a GT_PUTARG_SPLIT node
//
// Return Value:
//    None.
//
// Notes:
//    Set the child node(s) to be contained
//
void Lowering::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode, TreeNodeInfo& info, fgArgTabEntryPtr argInfo)
{
    assert(argNode->gtOper == GT_PUTARG_SPLIT);

    GenTreePtr putArgChild = argNode->gtOp.gtOp1;

    // Registers for split argument corresponds to source
    argNode->gtLsraInfo.dstCount = argInfo->numRegs;
    info.srcCount += argInfo->numRegs;

    regNumber argReg  = argInfo->regNum;
    regMaskTP argMask = RBM_NONE;
    for (unsigned i = 0; i < argInfo->numRegs; i++)
    {
        argMask |= genRegMask((regNumber)((unsigned)argReg + i));
    }
    argNode->gtLsraInfo.setDstCandidates(m_lsra, argMask);

    if (putArgChild->OperGet() == GT_FIELD_LIST)
    {
        // Generated code:
        // 1. Consume all of the items in the GT_FIELD_LIST (source)
        // 2. Store to target slot and move to target registers (destination) from source
        //
        argNode->gtLsraInfo.srcCount = argInfo->numRegs + argInfo->numSlots;

        // To avoid redundant moves, have the argument operand computed in the
        // register in which the argument is passed to the call.
        GenTreeFieldList* fieldListPtr = putArgChild->AsFieldList();
        for (unsigned idx = 0; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest(), idx++)
        {
            if (idx < argInfo->numRegs)
            {
                GenTreePtr node = fieldListPtr->gtGetOp1();
                node->gtLsraInfo.setSrcCandidates(m_lsra, genRegMask((regNumber)((unsigned)argReg + idx)));
            }
        }

        putArgChild->SetContained();
    }
    else
    {
        assert(putArgChild->TypeGet() == TYP_STRUCT);
        assert(putArgChild->OperGet() == GT_OBJ);

        // We could use a ldr/str sequence so we need a internal register
        argNode->gtLsraInfo.srcCount         = 1;
        argNode->gtLsraInfo.internalIntCount = 1;
        regMaskTP internalMask               = RBM_ALLINT & ~argMask;
        argNode->gtLsraInfo.setInternalCandidates(m_lsra, internalMask);

        GenTreePtr objChild = putArgChild->gtOp.gtOp1;
        if (objChild->OperGet() == GT_LCL_VAR_ADDR)
        {
            // We will generate all of the code for the GT_PUTARG_SPLIT, the GT_OBJ and the GT_LCL_VAR_ADDR
            // as one contained operation
            //
            MakeSrcContained(putArgChild, objChild);
            putArgChild->gtLsraInfo.srcCount--;
        }
        argNode->gtLsraInfo.srcCount = putArgChild->gtLsraInfo.srcCount;
        MakeSrcContained(argNode, putArgChild);
    }
}
示例#14
0
//------------------------------------------------------------------------
// TreeNodeInfoInitCall: Set the NodeInfo for a call.
//
// Arguments:
//    call - The call node of interest
//
// Return Value:
//    None.
//
void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
{
    TreeNodeInfo*   info              = &(call->gtLsraInfo);
    LinearScan*     l                 = m_lsra;
    Compiler*       compiler          = comp;
    bool            hasMultiRegRetVal = false;
    ReturnTypeDesc* retTypeDesc       = nullptr;

    info->srcCount = 0;
    if (call->TypeGet() != TYP_VOID)
    {
        hasMultiRegRetVal = call->HasMultiRegRetVal();
        if (hasMultiRegRetVal)
        {
            // dst count = number of registers in which the value is returned by call
            retTypeDesc    = call->GetReturnTypeDesc();
            info->dstCount = retTypeDesc->GetReturnRegCount();
        }
        else
        {
            info->dstCount = 1;
        }
    }
    else
    {
        info->dstCount = 0;
    }

    GenTree* ctrlExpr = call->gtControlExpr;
    if (call->gtCallType == CT_INDIRECT)
    {
        // either gtControlExpr != null or gtCallAddr != null.
        // Both cannot be non-null at the same time.
        assert(ctrlExpr == nullptr);
        assert(call->gtCallAddr != nullptr);
        ctrlExpr = call->gtCallAddr;
    }

    // set reg requirements on call target represented as control sequence.
    if (ctrlExpr != nullptr)
    {
        // we should never see a gtControlExpr whose type is void.
        assert(ctrlExpr->TypeGet() != TYP_VOID);

        info->srcCount++;

        // In case of fast tail implemented as jmp, make sure that gtControlExpr is
        // computed into a register.
        if (call->IsFastTailCall())
        {
            NYI_ARM("tail call");

#ifdef _TARGET_ARM64_
            // Fast tail call - make sure that call target is always computed in IP0
            // so that epilog sequence can generate "br xip0" to achieve fast tail call.
            ctrlExpr->gtLsraInfo.setSrcCandidates(l, genRegMask(REG_IP0));
#endif // _TARGET_ARM64_
        }
    }
#ifdef _TARGET_ARM_
    else
    {
        info->internalIntCount = 1;
    }
#endif // _TARGET_ARM_

    RegisterType registerType = call->TypeGet();

// Set destination candidates for return value of the call.

#ifdef _TARGET_ARM_
    if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
    {
        // The ARM CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
        // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
        info->setDstCandidates(l, RBM_PINVOKE_TCB);
    }
    else
#endif // _TARGET_ARM_
        if (hasMultiRegRetVal)
    {
        assert(retTypeDesc != nullptr);
        info->setDstCandidates(l, retTypeDesc->GetABIReturnRegs());
    }
    else if (varTypeIsFloating(registerType))
    {
        info->setDstCandidates(l, RBM_FLOATRET);
    }
    else if (registerType == TYP_LONG)
    {
        info->setDstCandidates(l, RBM_LNGRET);
    }
    else
    {
        info->setDstCandidates(l, RBM_INTRET);
    }

    // If there is an explicit this pointer, we don't want that node to produce anything
    // as it is redundant
    if (call->gtCallObjp != nullptr)
    {
        GenTreePtr thisPtrNode = call->gtCallObjp;

        if (thisPtrNode->gtOper == GT_PUTARG_REG)
        {
            l->clearOperandCounts(thisPtrNode);
            thisPtrNode->SetContained();
            l->clearDstCount(thisPtrNode->gtOp.gtOp1);
        }
        else
        {
            l->clearDstCount(thisPtrNode);
        }
    }

    // First, count reg args
    bool callHasFloatRegArgs = false;

    for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
    {
        assert(list->OperIsList());

        GenTreePtr argNode = list->Current();

        fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
        assert(curArgTabEntry);

        if (curArgTabEntry->regNum == REG_STK)
        {
            // late arg that is not passed in a register
            assert(argNode->gtOper == GT_PUTARG_STK);

            TreeNodeInfoInitPutArgStk(argNode->AsPutArgStk(), curArgTabEntry);
            continue;
        }

        // A GT_FIELD_LIST has a TYP_VOID, but is used to represent a multireg struct
        if (argNode->OperGet() == GT_FIELD_LIST)
        {
            argNode->SetContained();

            // There could be up to 2-4 PUTARG_REGs in the list (3 or 4 can only occur for HFAs)
            regNumber argReg = curArgTabEntry->regNum;
            for (GenTreeFieldList* entry = argNode->AsFieldList(); entry != nullptr; entry = entry->Rest())
            {
                TreeNodeInfoInitPutArgReg(entry->Current()->AsUnOp(), argReg, *info, false, &callHasFloatRegArgs);

                // Update argReg for the next putarg_reg (if any)
                argReg = genRegArgNext(argReg);

#if defined(_TARGET_ARM_)
                // A double register is modelled as an even-numbered single one
                if (entry->Current()->TypeGet() == TYP_DOUBLE)
                {
                    argReg = genRegArgNext(argReg);
                }
#endif // _TARGET_ARM_
            }
        }
#ifdef _TARGET_ARM_
        else if (argNode->OperGet() == GT_PUTARG_SPLIT)
        {
            fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
            TreeNodeInfoInitPutArgSplit(argNode->AsPutArgSplit(), *info, curArgTabEntry);
        }
#endif
        else
        {
            TreeNodeInfoInitPutArgReg(argNode->AsUnOp(), curArgTabEntry->regNum, *info, false, &callHasFloatRegArgs);
        }
    }

    // Now, count stack args
    // Note that these need to be computed into a register, but then
    // they're just stored to the stack - so the reg doesn't
    // need to remain live until the call.  In fact, it must not
    // because the code generator doesn't actually consider it live,
    // so it can't be spilled.

    GenTreePtr args = call->gtCallArgs;
    while (args)
    {
        GenTreePtr arg = args->gtOp.gtOp1;

        // Skip arguments that have been moved to the Late Arg list
        if (!(args->gtFlags & GTF_LATE_ARG))
        {
            if (arg->gtOper == GT_PUTARG_STK)
            {
                fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
                assert(curArgTabEntry);

                assert(curArgTabEntry->regNum == REG_STK);

                TreeNodeInfoInitPutArgStk(arg->AsPutArgStk(), curArgTabEntry);
            }
#ifdef _TARGET_ARM_
            else if (arg->OperGet() == GT_PUTARG_SPLIT)
            {
                fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
                TreeNodeInfoInitPutArgSplit(arg->AsPutArgSplit(), *info, curArgTabEntry);
            }
#endif
            else
            {
                TreeNodeInfo* argInfo = &(arg->gtLsraInfo);
                if (argInfo->dstCount != 0)
                {
                    argInfo->isLocalDefUse = true;
                }

                argInfo->dstCount = 0;
            }
        }
        args = args->gtOp.gtOp2;
    }

    // If it is a fast tail call, it is already preferenced to use IP0.
    // Therefore, no need set src candidates on call tgt again.
    if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr))
    {
        NYI_ARM("float reg varargs");

        // Don't assign the call target to any of the argument registers because
        // we will use them to also pass floating point arguments as required
        // by Arm64 ABI.
        ctrlExpr->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_ARG_REGS));
    }

#ifdef _TARGET_ARM_

    if (call->NeedsNullCheck())
    {
        info->internalIntCount++;
    }

#endif // _TARGET_ARM_
}
示例#15
0
void CodeGen::genLoadFloat(GenTreePtr tree, regNumber reg)
{
    if (tree->IsRegVar())
    {
        // if it has been spilled, unspill it.%
        LclVarDsc * varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
        if (varDsc->lvSpilled)
        {
            UnspillFloat(varDsc);
        }

        inst_RV_RV(ins_FloatCopy(tree->TypeGet()), reg, tree->gtRegNum, tree->TypeGet());
    }
    else
    {
        bool unalignedLoad = false;
        switch (tree->OperGet())
        {
        case GT_IND:
        case GT_CLS_VAR:
            if  (tree->gtFlags & GTF_IND_UNALIGNED)
                unalignedLoad = true;
            break;
        case GT_LCL_FLD:
            // Check for a misalignment on a Floating Point field
            //
            if (varTypeIsFloating(tree->TypeGet()))
            {                
                if ((tree->gtLclFld.gtLclOffs % emitTypeSize(tree->TypeGet())) != 0)
                {
                    unalignedLoad = true;
                }
            }
            break;
        default:
            break;
        }

        if (unalignedLoad)
        {
            // Make the target addressable
            //
            regMaskTP   addrReg = genMakeAddressable(tree, 0, RegSet::KEEP_REG, true);
            regSet.rsLockUsedReg(addrReg);  // Must prevent regSet.rsGrabReg from choosing an addrReg

            var_types loadType = tree->TypeGet();
            assert(loadType == TYP_DOUBLE || loadType == TYP_FLOAT);

            // Unaligned Floating-Point Loads must be loaded into integer register(s)
            // and then moved over to the Floating-Point register
            regNumber  intRegLo    = regSet.rsGrabReg(RBM_ALLINT);
            regNumber  intRegHi    = REG_NA;
            regMaskTP  tmpLockMask = genRegMask(intRegLo);

            if (loadType == TYP_DOUBLE)
            {
                intRegHi     = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(intRegLo));
                tmpLockMask |= genRegMask(intRegHi);
            }
            
            regSet.rsLockReg(tmpLockMask);     // Temporarily lock the intRegs 
            tree->gtType = TYP_INT;     // Temporarily change the type to TYP_INT

            inst_RV_TT(ins_Load(TYP_INT), intRegLo, tree);
            regTracker.rsTrackRegTrash(intRegLo);

            if (loadType == TYP_DOUBLE)
            {
                inst_RV_TT(ins_Load(TYP_INT), intRegHi, tree, 4);
                regTracker.rsTrackRegTrash(intRegHi);
            }

            tree->gtType = loadType;    // Change the type back to the floating point type
            regSet.rsUnlockReg(tmpLockMask);   // Unlock the intRegs

            // move the integer register(s) over to the FP register
            //
            if  (loadType == TYP_DOUBLE)
                getEmitter()->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, reg, intRegLo, intRegHi);
            else 
                getEmitter()->emitIns_R_R(INS_vmov_i2f, EA_4BYTE, reg, intRegLo);

            // Free up anything that was tied up by genMakeAddressable
            //
            regSet.rsUnlockUsedReg(addrReg);
            genDoneAddressable(tree, addrReg, RegSet::KEEP_REG);
        }
        else
        {
            inst_RV_TT(ins_FloatLoad(tree->TypeGet()), reg, tree);
        }
        if (((tree->OperGet() == GT_CLS_VAR) || (tree->OperGet() == GT_IND)) && 
            (tree->gtFlags & GTF_IND_VOLATILE))
        {
            // Emit a memory barrier instruction after the load 
            instGen_MemoryBarrier();
        }
    }
}
示例#16
0
bool RangeCheck::IsMonotonicallyIncreasing(GenTreePtr expr, SearchPath* path)
{
    JITDUMP("[RangeCheck::IsMonotonicallyIncreasing] %p\n", dspPtr(expr));
    if (path->Lookup(expr))
    {
        return true;
    }

    // Add hashtable entry for expr.
    path->Set(expr, NULL);

    // Remove hashtable entry for expr when we exit the present scope.
    auto code = [&] { path->Remove(expr); };
    jitstd::utility::scoped_code<decltype(code)> finally(code);

    // If the rhs expr is constant, then it is not part of the dependency
    // loop which has to increase monotonically.
    ValueNum vn = expr->gtVNPair.GetConservative();
    if (m_pCompiler->vnStore->IsVNConstant(vn))
    {
        return true;
    }
    // If the rhs expr is local, then try to find the def of the local.
    else if (expr->IsLocal())
    {
        Location* loc = GetDef(expr);
        if (loc == nullptr)
        {
            return false;
        }
        GenTreePtr asg = loc->parent;
        assert(asg->OperKind() & GTK_ASGOP);
        switch (asg->OperGet())
        {
        case GT_ASG:
            return IsMonotonicallyIncreasing(asg->gtGetOp2(), path);

        case GT_ASG_ADD:
            return IsBinOpMonotonicallyIncreasing(asg->gtGetOp1(), asg->gtGetOp2(), GT_ADD, path);
        }
        JITDUMP("Unknown local definition type\n");
        return false;
    }
    else if (expr->OperGet() == GT_ADD)
    {
        return IsBinOpMonotonicallyIncreasing(expr->gtGetOp1(), expr->gtGetOp2(), GT_ADD, path);
    }
    else if (expr->OperGet() == GT_PHI)
    {
        for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList();
                args != nullptr; args = args->Rest())
        {
            // If the arg is already in the path, skip.
            if (path->Lookup(args->Current()))
            {
                continue;
            }
            if (!IsMonotonicallyIncreasing(args->Current(), path))
            {
                JITDUMP("Phi argument not monotonic\n");
                return false;
            }
        }
        return true;
    }
    JITDUMP("Unknown tree type\n");
    return false;
}
示例#17
0
void CodeGen::genFloatArith (GenTreePtr tree,
                             RegSet::RegisterPreference *tgtPref)
{
    var_types       type    = tree->TypeGet();
    genTreeOps      oper    = tree->OperGet();
    GenTreePtr      op1     = tree->gtGetOp1();
    GenTreePtr      op2     = tree->gtGetOp2();

    regNumber       tgtReg;
    unsigned        varNum;
    LclVarDsc   *   varDsc;
    VARSET_TP       varBit;

    assert(oper == GT_ADD ||
           oper == GT_SUB ||
           oper == GT_MUL ||
           oper == GT_DIV);

    RegSet::RegisterPreference defaultPref(RBM_ALLFLOAT, RBM_NONE);
    if (tgtPref == NULL)
    {
        tgtPref = &defaultPref;
    }

    // Is the op2 (RHS)more complex than op1 (LHS)?
    //
    if  (tree->gtFlags & GTF_REVERSE_OPS)
    {
        regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op1->gtRsvdRegs);
        RegSet::RegisterPreference pref(RBM_ALLFLOAT, bestRegs);

        // Evaluate op2 into a floating point register 
        //
        genCodeForTreeFloat(op2, &pref);
        regSet.SetUsedRegFloat(op2, true);

        // Evaluate op1 into any floating point register 
        //
        genCodeForTreeFloat(op1);
        regSet.SetUsedRegFloat(op1, true);

        regNumber  op1Reg  = op1->gtRegNum;
        regMaskTP  op1Mask = genRegMaskFloat(op1Reg, type);

        // Fix 388445 ARM JitStress WP7
        regSet.rsLockUsedReg(op1Mask);
        genRecoverReg(op2, RBM_ALLFLOAT, RegSet::KEEP_REG);
        noway_assert(op2->gtFlags & GTF_REG_VAL);
        regSet.rsUnlockUsedReg(op1Mask);

        regSet.SetUsedRegFloat(op1, false);
        regSet.SetUsedRegFloat(op2, false);
    }
    else
    {
        regMaskTP bestRegs = regSet.rsNarrowHint(RBM_ALLFLOAT, ~op2->gtRsvdRegs);
        RegSet::RegisterPreference pref(RBM_ALLFLOAT, bestRegs);

        // Evaluate op1 into a floating point register 
        //
        genCodeForTreeFloat(op1, &pref);
        regSet.SetUsedRegFloat(op1, true);

        // Evaluate op2 into any floating point register 
        //
        genCodeForTreeFloat(op2);
        regSet.SetUsedRegFloat(op2, true);
        
        regNumber  op2Reg  = op2->gtRegNum;
        regMaskTP  op2Mask = genRegMaskFloat(op2Reg, type);

        // Fix 388445 ARM JitStress WP7
        regSet.rsLockUsedReg(op2Mask);
        genRecoverReg(op1, RBM_ALLFLOAT, RegSet::KEEP_REG);
        noway_assert(op1->gtFlags & GTF_REG_VAL);
        regSet.rsUnlockUsedReg(op2Mask);

        regSet.SetUsedRegFloat(op2, false); 
        regSet.SetUsedRegFloat(op1, false);
    }

    tgtReg = regSet.PickRegFloat(type, tgtPref, true);

    noway_assert(op1->gtFlags & GTF_REG_VAL);
    noway_assert(op2->gtFlags & GTF_REG_VAL);

    inst_RV_RV_RV(ins_MathOp(oper, type), tgtReg, op1->gtRegNum, op2->gtRegNum, emitActualTypeSize(type));

    genCodeForTreeFloat_DONE(tree, tgtReg);
}
示例#18
0
void RangeCheck::OptimizeRangeCheck(BasicBlock* block, GenTreePtr stmt, GenTreePtr treeParent)
{
    // Check if we are dealing with a bounds check node.
    if (treeParent->OperGet() != GT_COMMA)
    {
        return;
    }

    // If we are not looking at array bounds check, bail.
    GenTreePtr tree = treeParent->gtOp.gtOp1;
    if (tree->gtOper != GT_ARR_BOUNDS_CHECK)
    {
        return;
    }

    GenTreeBoundsChk* bndsChk = tree->AsBoundsChk();
    m_pCurBndsChk = bndsChk;
    GenTreePtr treeIndex = bndsChk->gtIndex;

    // Take care of constant index first, like a[2], for example.
    ValueNum idxVn = treeIndex->gtVNPair.GetConservative();
    ValueNum arrLenVn = bndsChk->gtArrLen->gtVNPair.GetConservative();
    int arrSize = GetArrLength(arrLenVn);
    JITDUMP("ArrSize for lengthVN:%03X = %d\n", arrLenVn, arrSize);
    if (m_pCompiler->vnStore->IsVNConstant(idxVn) && arrSize > 0)
    {
        ssize_t idxVal = -1;
        unsigned iconFlags = 0;
        if (!m_pCompiler->optIsTreeKnownIntValue(true, treeIndex, &idxVal, &iconFlags))
        {
            return;
        }

        JITDUMP("[RangeCheck::OptimizeRangeCheck] Is index %d in <0, arrLenVn VN%X sz:%d>.\n", idxVal, arrLenVn, arrSize);
        if (arrSize > 0 && idxVal < arrSize && idxVal >= 0)
        {
            JITDUMP("Removing range check\n");
            m_pCompiler->optRemoveRangeCheck(treeParent, stmt, true, GTF_ASG, true /* force remove */);
            return;
        }
    }

    GetRangeMap()->RemoveAll();
    GetOverflowMap()->RemoveAll();

    // Get the range for this index.
    SearchPath* path = new (m_pCompiler->getAllocator()) SearchPath(m_pCompiler->getAllocator());

    Range range = GetRange(block, stmt, treeIndex, path, false DEBUGARG(0));

    // If upper or lower limit is found to be unknown (top), or it was found to
    // be unknown because of over budget or a deep search, then return early.
    if (range.UpperLimit().IsUnknown() || range.LowerLimit().IsUnknown())
    {
        // Note: If we had stack depth too deep in the GetRange call, we'd be
        // too deep even in the DoesOverflow call. So return early.
        return;
    }

    if (DoesOverflow(block, stmt, treeIndex, path))
    {
        JITDUMP("Method determined to overflow.\n");
        return;
    }

    JITDUMP("Range value %s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly()));
    path->RemoveAll();
    Widen(block, stmt, treeIndex, path, &range);

    // If upper or lower limit is unknown, then return.
    if (range.UpperLimit().IsUnknown() || range.LowerLimit().IsUnknown())
    {
        return;
    }

    // Is the range between the lower and upper bound values.
    if (BetweenBounds(range, 0, bndsChk->gtArrLen))
    {
        JITDUMP("[RangeCheck::OptimizeRangeCheck] Between bounds\n");
        m_pCompiler->optRemoveRangeCheck(treeParent, stmt, true, GTF_ASG, true /* force remove */);
    }
    return;
}
示例#19
0
文件: gcinfo.cpp 项目: ROOTU/coreclr
GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr tgtAddr)
{
    GCInfo::WriteBarrierForm result = GCInfo::WBF_BarrierUnknown; // Default case, we have no information.

    // If we store through an int to a GC_REF field, we'll assume that needs to use a checked barriers.
    if (tgtAddr->TypeGet() == TYP_I_IMPL)
    {
        return GCInfo::WBF_BarrierChecked; // Why isn't this GCInfo::WBF_BarrierUnknown?
    }

    // Otherwise...
    assert(tgtAddr->TypeGet() == TYP_BYREF);
    bool simplifiedExpr = true;
    while (simplifiedExpr)
    {
        simplifiedExpr = false;

        tgtAddr = tgtAddr->gtSkipReloadOrCopy();

        while (tgtAddr->OperGet() == GT_ADDR && tgtAddr->gtOp.gtOp1->OperGet() == GT_IND)
        {
            tgtAddr        = tgtAddr->gtOp.gtOp1->gtOp.gtOp1;
            simplifiedExpr = true;
            assert(tgtAddr->TypeGet() == TYP_BYREF);
        }
        // For additions, one of the operands is a byref or a ref (and the other is not).  Follow this down to its
        // source.
        while (tgtAddr->OperGet() == GT_ADD || tgtAddr->OperGet() == GT_LEA)
        {
            if (tgtAddr->OperGet() == GT_ADD)
            {
                if (tgtAddr->gtOp.gtOp1->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp1->TypeGet() == TYP_REF)
                {
                    assert(!(tgtAddr->gtOp.gtOp2->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp2->TypeGet() == TYP_REF));
                    tgtAddr        = tgtAddr->gtOp.gtOp1;
                    simplifiedExpr = true;
                }
                else if (tgtAddr->gtOp.gtOp2->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp2->TypeGet() == TYP_REF)
                {
                    tgtAddr        = tgtAddr->gtOp.gtOp2;
                    simplifiedExpr = true;
                }
                else
                {
                    // We might have a native int. For example:
                    //        const     int    0
                    //    +         byref
                    //        lclVar    int    V06 loc5  // this is a local declared "valuetype VType*"
                    return GCInfo::WBF_BarrierUnknown;
                }
            }
            else
            {
                // Must be an LEA (i.e., an AddrMode)
                assert(tgtAddr->OperGet() == GT_LEA);
                tgtAddr = tgtAddr->AsAddrMode()->Base();
                if (tgtAddr->TypeGet() == TYP_BYREF || tgtAddr->TypeGet() == TYP_REF)
                {
                    simplifiedExpr = true;
                }
                else
                {
                    // We might have a native int.
                    return GCInfo::WBF_BarrierUnknown;
                }
            }
        }
    }
    if (tgtAddr->IsLocalAddrExpr() != nullptr)
    {
        // No need for a GC barrier when writing to a local variable.
        return GCInfo::WBF_NoBarrier;
    }
    if (tgtAddr->OperGet() == GT_LCL_VAR || tgtAddr->OperGet() == GT_REG_VAR)
    {
        unsigned lclNum = 0;
        if (tgtAddr->gtOper == GT_LCL_VAR)
        {
            lclNum = tgtAddr->gtLclVar.gtLclNum;
        }
        else
        {
            assert(tgtAddr->gtOper == GT_REG_VAR);
            lclNum = tgtAddr->gtRegVar.gtLclNum;
        }

        LclVarDsc* varDsc = &compiler->lvaTable[lclNum];

        // Instead of marking LclVar with 'lvStackByref',
        // Consider decomposing the Value Number given to this LclVar to see if it was
        // created using a GT_ADDR(GT_LCLVAR)  or a GT_ADD( GT_ADDR(GT_LCLVAR), Constant)

        // We may have an internal compiler temp created in fgMorphCopyBlock() that we know
        // points at one of our stack local variables, it will have lvStackByref set to true.
        //
        if (varDsc->lvStackByref)
        {
            assert(varDsc->TypeGet() == TYP_BYREF);
            return GCInfo::WBF_NoBarrier;
        }

        // We don't eliminate for inlined methods, where we (can) know where the "retBuff" points.
        if (!compiler->compIsForInlining() && lclNum == compiler->info.compRetBuffArg)
        {
            assert(compiler->info.compRetType == TYP_STRUCT); // Else shouldn't have a ret buff.

            // Are we assured that the ret buff pointer points into the stack of a caller?
            if (compiler->info.compRetBuffDefStack)
            {
#if 0
                // This is an optional debugging mode.  If the #if 0 above is changed to #if 1,
                // every barrier we remove for stores to GC ref fields of a retbuff use a special
                // helper that asserts that the target is not in the heap.
#ifdef DEBUG
                return WBF_NoBarrier_CheckNotHeapInDebug;
#else
                return WBF_NoBarrier;
#endif
#else  // 0
                return GCInfo::WBF_NoBarrier;
#endif // 0
            }
        }
    }
    if (tgtAddr->TypeGet() == TYP_REF)
    {
        return GCInfo::WBF_BarrierUnchecked;
    }
    // Otherwise, we have no information.
    return GCInfo::WBF_BarrierUnknown;
}
示例#20
0
//------------------------------------------------------------------------
// TreeNodeInfoInitIndir: Specify register requirements for address expression
//                       of an indirection operation.
//
// Arguments:
//    indirTree - GT_IND, GT_STOREIND, block node or GT_NULLCHECK gentree node
//
void Lowering::TreeNodeInfoInitIndir(GenTreePtr indirTree)
{
    assert(indirTree->OperIsIndir());
    // If this is the rhs of a block copy (i.e. non-enregisterable struct),
    // it has no register requirements.
    if (indirTree->TypeGet() == TYP_STRUCT)
    {
        return;
    }

    GenTreePtr    addr = indirTree->gtGetOp1();
    TreeNodeInfo* info = &(indirTree->gtLsraInfo);

    GenTreePtr base  = nullptr;
    GenTreePtr index = nullptr;
    unsigned   cns   = 0;
    unsigned   mul;
    bool       rev;
    bool       modifiedSources = false;
    bool       makeContained   = true;

    if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirTree, addr))
    {
        GenTreeAddrMode* lea = addr->AsAddrMode();
        base                 = lea->Base();
        index                = lea->Index();
        cns                  = lea->gtOffset;

#ifdef _TARGET_ARM_
        // ARM floating-point load/store doesn't support a form similar to integer
        // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported
        // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm.
        if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns))
        {
            if (indirTree->OperGet() == GT_STOREIND)
            {
                if (varTypeIsFloating(indirTree->AsStoreInd()->Data()))
                {
                    makeContained = false;
                }
            }
            else if (indirTree->OperGet() == GT_IND)
            {
                if (varTypeIsFloating(indirTree))
                {
                    makeContained = false;
                }
            }
        }
#endif

        if (makeContained)
        {
            m_lsra->clearOperandCounts(addr);
            addr->SetContained();
            // The srcCount is decremented because addr is now "contained",
            // then we account for the base and index below, if they are non-null.
            info->srcCount--;
        }
    }
    else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/) &&
             !(modifiedSources = AreSourcesPossiblyModifiedLocals(indirTree, base, index)))
    {
        // An addressing mode will be constructed that may cause some
        // nodes to not need a register, and cause others' lifetimes to be extended
        // to the GT_IND or even its parent if it's an assignment

        assert(base != addr);
        m_lsra->clearOperandCounts(addr);
        addr->SetContained();

        // Traverse the computation below GT_IND to find the operands
        // for the addressing mode, marking the various constants and
        // intermediate results as not consuming/producing.
        // If the traversal were more complex, we might consider using
        // a traversal function, but the addressing mode is only made
        // up of simple arithmetic operators, and the code generator
        // only traverses one leg of each node.

        bool       foundBase  = (base == nullptr);
        bool       foundIndex = (index == nullptr);
        GenTreePtr nextChild  = nullptr;
        for (GenTreePtr child = addr; child != nullptr && !child->OperIsLeaf(); child = nextChild)
        {
            nextChild      = nullptr;
            GenTreePtr op1 = child->gtOp.gtOp1;
            GenTreePtr op2 = (child->OperIsBinary()) ? child->gtOp.gtOp2 : nullptr;

            if (op1 == base)
            {
                foundBase = true;
            }
            else if (op1 == index)
            {
                foundIndex = true;
            }
            else
            {
                m_lsra->clearOperandCounts(op1);
                op1->SetContained();
                if (!op1->OperIsLeaf())
                {
                    nextChild = op1;
                }
            }

            if (op2 != nullptr)
            {
                if (op2 == base)
                {
                    foundBase = true;
                }
                else if (op2 == index)
                {
                    foundIndex = true;
                }
                else
                {
                    m_lsra->clearOperandCounts(op2);
                    op2->SetContained();
                    if (!op2->OperIsLeaf())
                    {
                        assert(nextChild == nullptr);
                        nextChild = op2;
                    }
                }
            }
        }
        assert(foundBase && foundIndex);
        info->srcCount--; // it gets incremented below.
    }
    else if (addr->gtOper == GT_ARR_ELEM)
    {
        // The GT_ARR_ELEM consumes all the indices and produces the offset.
        // The array object lives until the mem access.
        // We also consume the target register to which the address is
        // computed

        info->srcCount++;
        assert(addr->gtLsraInfo.srcCount >= 2);
        addr->gtLsraInfo.srcCount -= 1;
    }
    else
    {
        // it is nothing but a plain indir
        info->srcCount--; // base gets added in below
        base = addr;
    }

    if (!makeContained)
    {
        return;
    }

    if (base != nullptr)
    {
        info->srcCount++;
    }
    if (index != nullptr && !modifiedSources)
    {
        info->srcCount++;
    }

    // On ARM we may need a single internal register
    // (when both conditions are true then we still only need a single internal register)
    if ((index != nullptr) && (cns != 0))
    {
        // ARM does not support both Index and offset so we need an internal register
        info->internalIntCount = 1;
    }
    else if (!emitter::emitIns_valid_imm_for_ldst_offset(cns, emitTypeSize(indirTree)))
    {
        // This offset can't be contained in the ldr/str instruction, so we need an internal register
        info->internalIntCount = 1;
    }
}