Пример #1
0
void RangeCheck::Widen(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree, SearchPath* path, Range* pRange)
{
#ifdef DEBUG
    if (m_pCompiler->verbose)
    {
        printf("[RangeCheck::Widen] BB%02d, \n", block->bbNum);
        Compiler::printTreeID(tree);
        printf("\n");
    }
#endif // DEBUG

    Range& range = *pRange;

    // Try to deduce the lower bound, if it is not known already.
    if (range.LowerLimit().IsDependent() || range.LowerLimit().IsUnknown())
    {
        // To determine the lower bound, ask if the loop increases monotonically.
        bool increasing = IsMonotonicallyIncreasing(tree, path);
        JITDUMP("IsMonotonicallyIncreasing %d", increasing);
        if (increasing)
        {
            GetRangeMap()->RemoveAll();
            *pRange = GetRange(block, stmt, tree, path, true DEBUGARG(0));
        }
    }
}
Пример #2
0
/*****************************************************************************
 * gsGSChecksInitCookie
 * Grabs the cookie for detecting overflow of unsafe buffers.
 */
void Compiler::gsGSChecksInitCookie()
{
    var_types type = TYP_I_IMPL;

    lvaGSSecurityCookie = lvaGrabTemp(false DEBUGARG("GSSecurityCookie"));

    // Prevent cookie init/check from being optimized
    lvaSetVarAddrExposed(lvaGSSecurityCookie);
    lvaTable[lvaGSSecurityCookie].lvType = type;

    info.compCompHnd->getGSCookie(&gsGlobalSecurityCookieVal, &gsGlobalSecurityCookieAddr);
}
Пример #3
0
/*****************************************************************************
 * gsParamsToShadows
 * Copy each vulnerable param ptr or buffer to a local shadow copy and replace
 * uses of the param by the shadow copy
 */
void Compiler::gsParamsToShadows()
{
    // Cache old count since we'll add new variables, and  
    // gsShadowVarInfo will not grow to accomodate the new ones.
    UINT lvaOldCount = lvaCount;

    // Create shadow copy for each param candidate
    for (UINT lclNum = 0; lclNum < lvaOldCount; lclNum++)
    {
        LclVarDsc *varDsc = &lvaTable[lclNum];
        gsShadowVarInfo[lclNum].shadowCopy = NO_SHADOW_COPY;

        // Only care about params whose values are on the stack
        if (!ShadowParamVarInfo::mayNeedShadowCopy(varDsc))
        {
            continue;
        }

        if (!varDsc->lvIsPtr && !varDsc->lvIsUnsafeBuffer)
        {            
            continue;
        }


        int shadowVar = lvaGrabTemp(false DEBUGARG("shadowVar"));
        // Copy some info

        var_types type = varTypeIsSmall(varDsc->TypeGet()) ? TYP_INT : varDsc->TypeGet();
        lvaTable[shadowVar].lvType        = type;
        lvaTable[shadowVar].lvAddrExposed = varDsc->lvAddrExposed;
        lvaTable[shadowVar].lvDoNotEnregister = varDsc->lvDoNotEnregister;
#ifdef DEBUG
        lvaTable[shadowVar].lvVMNeedsStackAddr = varDsc->lvVMNeedsStackAddr;
        lvaTable[shadowVar].lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr;
        lvaTable[shadowVar].lvLclFieldExpr = varDsc->lvLclFieldExpr;
        lvaTable[shadowVar].lvLiveAcrossUCall = varDsc->lvLiveAcrossUCall;
#endif
        lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo;
        lvaTable[shadowVar].lvGcLayout    = varDsc->lvGcLayout;
        lvaTable[shadowVar].lvIsUnsafeBuffer = varDsc->lvIsUnsafeBuffer;
        lvaTable[shadowVar].lvIsPtr       = varDsc->lvIsPtr;

#ifdef  DEBUG
        if (verbose)
        {
            printf("Var V%02u is shadow param candidate. Shadow copy is V%02u.\n", lclNum, shadowVar);
        }
#endif

        gsShadowVarInfo[lclNum].shadowCopy = shadowVar;
    }

    // Replace param uses with shadow copy
    fgWalkAllTreesPre(gsReplaceShadowParams, (void *)this);

    // Now insert code to copy the params to their shadow copy.
    for (UINT lclNum = 0; lclNum < lvaOldCount; lclNum++)
    {
        LclVarDsc *varDsc = &lvaTable[lclNum];

        unsigned shadowVar = gsShadowVarInfo[lclNum].shadowCopy;
        if (shadowVar == NO_SHADOW_COPY)
        {
            continue;
        }

        var_types type = lvaTable[shadowVar].TypeGet();

        GenTreePtr src = gtNewLclvNode(lclNum, varDsc->TypeGet());
        GenTreePtr dst = gtNewLclvNode(shadowVar, type);

        src->gtFlags |= GTF_DONT_CSE;
        dst->gtFlags |= GTF_DONT_CSE;

        GenTreePtr opAssign = NULL;
        if (type == TYP_STRUCT)
        {
            CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle();

            // We don't need unsafe value cls check here since we are copying the params and this flag
            // would have been set on the original param before reaching here.
            lvaSetStruct(shadowVar, clsHnd, false);

            src = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
            dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst);

            opAssign = gtNewCpObjNode(dst, src, clsHnd, false);
#if FEATURE_MULTIREG_ARGS_OR_RET
            lvaTable[shadowVar].lvIsMultiRegArgOrRet = lvaTable[lclNum].lvIsMultiRegArgOrRet;
#endif // FEATURE_MULTIREG_ARGS_OR_RET
        }
        else
        {
            opAssign = gtNewAssignNode(dst, src);
        }
        fgEnsureFirstBBisScratch();
        (void) fgInsertStmtAtBeg(fgFirstBB, fgMorphTree(opAssign));
    }

    // If the method has "Jmp CalleeMethod", then we need to copy shadow params back to original
    // params before "jmp" to CalleeMethod.
    if (compJmpOpUsed)
    {
        // There could be more than one basic block ending with a "Jmp" type tail call.
        // We would have to insert assignments in all such blocks, just before GT_JMP stmnt.
        for (BasicBlock * block = fgFirstBB; block; block = block->bbNext)
        {
            if (block->bbJumpKind != BBJ_RETURN)
            {
                continue;
            }

            if  ((block->bbFlags & BBF_HAS_JMP) == 0) 
            {
                continue;
            }

            for (UINT lclNum = 0; lclNum < info.compArgsCount; lclNum++)
            {
                LclVarDsc *varDsc = &lvaTable[lclNum];

                unsigned shadowVar = gsShadowVarInfo[lclNum].shadowCopy;
                if (shadowVar == NO_SHADOW_COPY)
                {
                    continue;
                }

                GenTreePtr src = gtNewLclvNode(shadowVar, lvaTable[shadowVar].TypeGet());
                GenTreePtr dst = gtNewLclvNode(lclNum, varDsc->TypeGet());
                
                src->gtFlags |= GTF_DONT_CSE;
                dst->gtFlags |= GTF_DONT_CSE;

                GenTreePtr opAssign = nullptr;
                if (varDsc->TypeGet() == TYP_STRUCT)
                {
                    CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle();
                    src = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
                    dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst);

                    opAssign = gtNewCpObjNode(dst, src, clsHnd, false);
                }
                else
                {
                    opAssign = gtNewAssignNode(dst, src);
                }
                
                (void) fgInsertStmtNearEnd(block, fgMorphTree(opAssign));
            }

        }
    }
}
Пример #4
0
void RangeCheck::OptimizeRangeCheck(BasicBlock* block, GenTreePtr stmt, GenTreePtr treeParent)
{
    // Check if we are dealing with a bounds check node.
    if (treeParent->OperGet() != GT_COMMA)
    {
        return;
    }

    // If we are not looking at array bounds check, bail.
    GenTreePtr tree = treeParent->gtOp.gtOp1;
    if (tree->gtOper != GT_ARR_BOUNDS_CHECK)
    {
        return;
    }

    GenTreeBoundsChk* bndsChk = tree->AsBoundsChk();
    m_pCurBndsChk = bndsChk;
    GenTreePtr treeIndex = bndsChk->gtIndex;

    // Take care of constant index first, like a[2], for example.
    ValueNum idxVn = treeIndex->gtVNPair.GetConservative();
    ValueNum arrLenVn = bndsChk->gtArrLen->gtVNPair.GetConservative();
    int arrSize = GetArrLength(arrLenVn);
    JITDUMP("ArrSize for lengthVN:%03X = %d\n", arrLenVn, arrSize);
    if (m_pCompiler->vnStore->IsVNConstant(idxVn) && arrSize > 0)
    {
        ssize_t idxVal = -1;
        unsigned iconFlags = 0;
        if (!m_pCompiler->optIsTreeKnownIntValue(true, treeIndex, &idxVal, &iconFlags))
        {
            return;
        }

        JITDUMP("[RangeCheck::OptimizeRangeCheck] Is index %d in <0, arrLenVn VN%X sz:%d>.\n", idxVal, arrLenVn, arrSize);
        if (arrSize > 0 && idxVal < arrSize && idxVal >= 0)
        {
            JITDUMP("Removing range check\n");
            m_pCompiler->optRemoveRangeCheck(treeParent, stmt, true, GTF_ASG, true /* force remove */);
            return;
        }
    }

    GetRangeMap()->RemoveAll();
    GetOverflowMap()->RemoveAll();

    // Get the range for this index.
    SearchPath* path = new (m_pCompiler->getAllocator()) SearchPath(m_pCompiler->getAllocator());

    Range range = GetRange(block, stmt, treeIndex, path, false DEBUGARG(0));

    // If upper or lower limit is found to be unknown (top), or it was found to
    // be unknown because of over budget or a deep search, then return early.
    if (range.UpperLimit().IsUnknown() || range.LowerLimit().IsUnknown())
    {
        // Note: If we had stack depth too deep in the GetRange call, we'd be
        // too deep even in the DoesOverflow call. So return early.
        return;
    }

    if (DoesOverflow(block, stmt, treeIndex, path))
    {
        JITDUMP("Method determined to overflow.\n");
        return;
    }

    JITDUMP("Range value %s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly()));
    path->RemoveAll();
    Widen(block, stmt, treeIndex, path, &range);

    // If upper or lower limit is unknown, then return.
    if (range.UpperLimit().IsUnknown() || range.LowerLimit().IsUnknown())
    {
        return;
    }

    // Is the range between the lower and upper bound values.
    if (BetweenBounds(range, 0, bndsChk->gtArrLen))
    {
        JITDUMP("[RangeCheck::OptimizeRangeCheck] Between bounds\n");
        m_pCompiler->optRemoveRangeCheck(treeParent, stmt, true, GTF_ASG, true /* force remove */);
    }
    return;
}
Пример #5
0
void RangeCheck::OptimizeRangeCheck(BasicBlock* block, GenTreeStmt* stmt, GenTree* treeParent)
{
    // Check if we are dealing with a bounds check node.
    if (treeParent->OperGet() != GT_COMMA)
    {
        return;
    }

    // If we are not looking at array bounds check, bail.
    GenTree* tree = treeParent->gtOp.gtOp1;
    if (!tree->OperIsBoundsCheck())
    {
        return;
    }

    GenTreeBoundsChk* bndsChk = tree->AsBoundsChk();
    m_pCurBndsChk             = bndsChk;
    GenTree* treeIndex        = bndsChk->gtIndex;

    // Take care of constant index first, like a[2], for example.
    ValueNum idxVn    = m_pCompiler->vnStore->VNConservativeNormalValue(treeIndex->gtVNPair);
    ValueNum arrLenVn = m_pCompiler->vnStore->VNConservativeNormalValue(bndsChk->gtArrLen->gtVNPair);
    int      arrSize  = 0;

    if (m_pCompiler->vnStore->IsVNConstant(arrLenVn))
    {
        ssize_t  constVal  = -1;
        unsigned iconFlags = 0;

        if (m_pCompiler->optIsTreeKnownIntValue(true, bndsChk->gtArrLen, &constVal, &iconFlags))
        {
            arrSize = (int)constVal;
        }
    }
    else
#ifdef FEATURE_SIMD
        if (tree->gtOper != GT_SIMD_CHK
#ifdef FEATURE_HW_INTRINSICS
            && tree->gtOper != GT_HW_INTRINSIC_CHK
#endif // FEATURE_HW_INTRINSICS
            )
#endif // FEATURE_SIMD
    {
        arrSize = GetArrLength(arrLenVn);
    }

    JITDUMP("ArrSize for lengthVN:%03X = %d\n", arrLenVn, arrSize);
    if (m_pCompiler->vnStore->IsVNConstant(idxVn) && (arrSize > 0))
    {
        ssize_t  idxVal    = -1;
        unsigned iconFlags = 0;
        if (!m_pCompiler->optIsTreeKnownIntValue(true, treeIndex, &idxVal, &iconFlags))
        {
            return;
        }

        JITDUMP("[RangeCheck::OptimizeRangeCheck] Is index %d in <0, arrLenVn " FMT_VN " sz:%d>.\n", idxVal, arrLenVn,
                arrSize);
        if ((idxVal < arrSize) && (idxVal >= 0))
        {
            JITDUMP("Removing range check\n");
            m_pCompiler->optRemoveRangeCheck(treeParent, stmt);
            return;
        }
    }

    GetRangeMap()->RemoveAll();
    GetOverflowMap()->RemoveAll();
    m_pSearchPath = new (m_alloc) SearchPath(m_alloc);

    // Get the range for this index.
    Range range = GetRange(block, treeIndex, false DEBUGARG(0));

    // If upper or lower limit is found to be unknown (top), or it was found to
    // be unknown because of over budget or a deep search, then return early.
    if (range.UpperLimit().IsUnknown() || range.LowerLimit().IsUnknown())
    {
        // Note: If we had stack depth too deep in the GetRange call, we'd be
        // too deep even in the DoesOverflow call. So return early.
        return;
    }

    if (DoesOverflow(block, treeIndex))
    {
        JITDUMP("Method determined to overflow.\n");
        return;
    }

    JITDUMP("Range value %s\n", range.ToString(m_pCompiler->getAllocatorDebugOnly()));
    m_pSearchPath->RemoveAll();
    Widen(block, treeIndex, &range);

    // If upper or lower limit is unknown, then return.
    if (range.UpperLimit().IsUnknown() || range.LowerLimit().IsUnknown())
    {
        return;
    }

    // Is the range between the lower and upper bound values.
    if (BetweenBounds(range, 0, bndsChk->gtArrLen))
    {
        JITDUMP("[RangeCheck::OptimizeRangeCheck] Between bounds\n");
        m_pCompiler->optRemoveRangeCheck(treeParent, stmt);
    }
    return;
}
Пример #6
0
void TreeLifeUpdater<ForCodeGen>::UpdateLifeVar(GenTree* tree)
{
    GenTree* indirAddrLocal = compiler->fgIsIndirOfAddrOfLocal(tree);
    assert(tree->OperIsNonPhiLocal() || indirAddrLocal != nullptr);

    // Get the local var tree -- if "tree" is "Ldobj(addr(x))", or "ind(addr(x))" this is "x", else it's "tree".
    GenTree* lclVarTree = indirAddrLocal;
    if (lclVarTree == nullptr)
    {
        lclVarTree = tree;
    }
    unsigned int lclNum = lclVarTree->gtLclVarCommon.gtLclNum;
    LclVarDsc*   varDsc = compiler->lvaTable + lclNum;

#ifdef DEBUG
#if !defined(_TARGET_AMD64_)
    // There are no addr nodes on ARM and we are experimenting with encountering vars in 'random' order.
    // Struct fields are not traversed in a consistent order, so ignore them when
    // verifying that we see the var nodes in execution order
    if (ForCodeGen)
    {
        if (tree->OperIsIndir())
        {
            assert(indirAddrLocal != NULL);
        }
        else if (tree->gtNext != NULL && tree->gtNext->gtOper == GT_ADDR &&
                 ((tree->gtNext->gtNext == NULL || !tree->gtNext->gtNext->OperIsIndir())))
        {
            assert(tree->IsLocal()); // Can only take the address of a local.
            // The ADDR might occur in a context where the address it contributes is eventually
            // dereferenced, so we can't say that this is not a use or def.
        }
    }
#endif // !_TARGET_AMD64_
#endif // DEBUG

    compiler->compCurLifeTree = tree;
    VarSetOps::Assign(compiler, newLife, compiler->compCurLife);

    // By codegen, a struct may not be TYP_STRUCT, so we have to
    // check lvPromoted, for the case where the fields are being
    // tracked.
    if (!varDsc->lvTracked && !varDsc->lvPromoted)
    {
        return;
    }

    // if it's a partial definition then variable "x" must have had a previous, original, site to be born.
    bool isBorn  = ((tree->gtFlags & GTF_VAR_DEF) != 0 && (tree->gtFlags & GTF_VAR_USEASG) == 0);
    bool isDying = ((tree->gtFlags & GTF_VAR_DEATH) != 0);
    bool spill   = ((tree->gtFlags & GTF_SPILL) != 0);

    // Since all tracked vars are register candidates, but not all are in registers at all times,
    // we maintain two separate sets of variables - the total set of variables that are either
    // born or dying here, and the subset of those that are on the stack
    VarSetOps::ClearD(compiler, stackVarDeltaSet);

    if (isBorn || isDying)
    {
        VarSetOps::ClearD(compiler, varDeltaSet);

        if (varDsc->lvTracked)
        {
            VarSetOps::AddElemD(compiler, varDeltaSet, varDsc->lvVarIndex);
            if (ForCodeGen)
            {
                if (isBorn && varDsc->lvIsRegCandidate() && tree->gtHasReg())
                {
                    compiler->codeGen->genUpdateVarReg(varDsc, tree);
                }
                if (varDsc->lvIsInReg() && tree->gtRegNum != REG_NA)
                {
                    compiler->codeGen->genUpdateRegLife(varDsc, isBorn, isDying DEBUGARG(tree));
                }
                else
                {
                    VarSetOps::AddElemD(compiler, stackVarDeltaSet, varDsc->lvVarIndex);
                }
            }
        }
        else if (varDsc->lvPromoted)
        {
            // If hasDeadTrackedFieldVars is true, then, for a LDOBJ(ADDR(<promoted struct local>)),
            // *deadTrackedFieldVars indicates which tracked field vars are dying.
            bool hasDeadTrackedFieldVars = false;

            if (indirAddrLocal != nullptr && isDying)
            {
                assert(!isBorn); // GTF_VAR_DEATH only set for LDOBJ last use.
                VARSET_TP* deadTrackedFieldVars = nullptr;
                hasDeadTrackedFieldVars =
                    compiler->LookupPromotedStructDeathVars(indirAddrLocal, &deadTrackedFieldVars);
                if (hasDeadTrackedFieldVars)
                {
                    VarSetOps::Assign(compiler, varDeltaSet, *deadTrackedFieldVars);
                }
            }

            for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
            {
                LclVarDsc* fldVarDsc = &(compiler->lvaTable[i]);
                noway_assert(fldVarDsc->lvIsStructField);
                if (fldVarDsc->lvTracked)
                {
                    unsigned fldVarIndex = fldVarDsc->lvVarIndex;
                    noway_assert(fldVarIndex < compiler->lvaTrackedCount);
                    if (!hasDeadTrackedFieldVars)
                    {
                        VarSetOps::AddElemD(compiler, varDeltaSet, fldVarIndex);
                        if (ForCodeGen)
                        {
                            // We repeat this call here and below to avoid the VarSetOps::IsMember
                            // test in this, the common case, where we have no deadTrackedFieldVars.
                            if (fldVarDsc->lvIsInReg())
                            {
                                if (isBorn)
                                {
                                    compiler->codeGen->genUpdateVarReg(fldVarDsc, tree);
                                }
                                compiler->codeGen->genUpdateRegLife(fldVarDsc, isBorn, isDying DEBUGARG(tree));
                            }
                            else
                            {
                                VarSetOps::AddElemD(compiler, stackVarDeltaSet, fldVarIndex);
                            }
                        }
                    }
                    else if (ForCodeGen && VarSetOps::IsMember(compiler, varDeltaSet, fldVarIndex))
                    {
                        if (compiler->lvaTable[i].lvIsInReg())
                        {
                            if (isBorn)
                            {
                                compiler->codeGen->genUpdateVarReg(fldVarDsc, tree);
                            }
                            compiler->codeGen->genUpdateRegLife(fldVarDsc, isBorn, isDying DEBUGARG(tree));
                        }
                        else
                        {
                            VarSetOps::AddElemD(compiler, stackVarDeltaSet, fldVarIndex);
                        }
                    }
                }
            }
        }

        // First, update the live set
        if (isDying)
        {
            // We'd like to be able to assert the following, however if we are walking
            // through a qmark/colon tree, we may encounter multiple last-use nodes.
            // assert (VarSetOps::IsSubset(compiler, regVarDeltaSet, newLife));
            VarSetOps::DiffD(compiler, newLife, varDeltaSet);
        }
        else
        {
            // This shouldn't be in newLife, unless this is debug code, in which
            // case we keep vars live everywhere, OR the variable is address-exposed,
            // OR this block is part of a try block, in which case it may be live at the handler
            // Could add a check that, if it's in newLife, that it's also in
            // fgGetHandlerLiveVars(compCurBB), but seems excessive
            //
            // For a dead store, it can be the case that we set both isBorn and isDying to true.
            // (We don't eliminate dead stores under MinOpts, so we can't assume they're always
            // eliminated.)  If it's both, we handled it above.
            VarSetOps::UnionD(compiler, newLife, varDeltaSet);
        }
    }

    if (!VarSetOps::Equal(compiler, compiler->compCurLife, newLife))
    {
#ifdef DEBUG
        if (compiler->verbose)
        {
            printf("\t\t\t\t\t\t\tLive vars: ");
            dumpConvertedVarSet(compiler, compiler->compCurLife);
            printf(" => ");
            dumpConvertedVarSet(compiler, newLife);
            printf("\n");
        }
#endif // DEBUG

        VarSetOps::Assign(compiler, compiler->compCurLife, newLife);

        if (ForCodeGen)
        {
            // Only add vars to the gcInfo.gcVarPtrSetCur if they are currently on stack, since the
            // gcInfo.gcTrkStkPtrLcls
            // includes all TRACKED vars that EVER live on the stack (i.e. are not always in a register).
            VarSetOps::Assign(compiler, gcTrkStkDeltaSet, compiler->codeGen->gcInfo.gcTrkStkPtrLcls);
            VarSetOps::IntersectionD(compiler, gcTrkStkDeltaSet, stackVarDeltaSet);
            if (!VarSetOps::IsEmpty(compiler, gcTrkStkDeltaSet))
            {
#ifdef DEBUG
                if (compiler->verbose)
                {
                    printf("\t\t\t\t\t\t\tGCvars: ");
                    dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
                    printf(" => ");
                }
#endif // DEBUG

                if (isBorn)
                {
                    VarSetOps::UnionD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, gcTrkStkDeltaSet);
                }
                else
                {
                    VarSetOps::DiffD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, gcTrkStkDeltaSet);
                }

#ifdef DEBUG
                if (compiler->verbose)
                {
                    dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur);
                    printf("\n");
                }
#endif // DEBUG
            }
#ifdef USING_SCOPE_INFO
            compiler->codeGen->siUpdate();
#endif // USING_SCOPE_INFO
        }
    }

    if (ForCodeGen && spill)
    {
        assert(!varDsc->lvPromoted);
        compiler->codeGen->genSpillVar(tree);
        if (VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcTrkStkPtrLcls, varDsc->lvVarIndex))
        {
            if (!VarSetOps::IsMember(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex))
            {
                VarSetOps::AddElemD(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
#ifdef DEBUG
                if (compiler->verbose)
                {
                    printf("\t\t\t\t\t\t\tVar V%02u becoming live\n", varDsc - compiler->lvaTable);
                }
#endif // DEBUG
            }
        }
    }
}
Пример #7
0
void LegacyPolicy::NoteInt(InlineObservation obs, int value)
{
    switch (obs)
    {
    case InlineObservation::CALLEE_MAXSTACK:
        {
            assert(m_IsForceInlineKnown);

            unsigned calleeMaxStack = static_cast<unsigned>(value);

            if (!m_IsForceInline && (calleeMaxStack > SMALL_STACK_SIZE))
            {
                SetNever(InlineObservation::CALLEE_MAXSTACK_TOO_BIG);
            }

            break;
        }

    case InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS:
        {
            assert(m_IsForceInlineKnown);
            assert(value != 0);

            unsigned basicBlockCount = static_cast<unsigned>(value);

            if (!m_IsForceInline && (basicBlockCount > MAX_BASIC_BLOCKS))
            {
                SetNever(InlineObservation::CALLEE_TOO_MANY_BASIC_BLOCKS);
            }

            break;
        }

    case InlineObservation::CALLEE_IL_CODE_SIZE:
        {
            assert(m_IsForceInlineKnown);
            assert(value != 0);
            m_CodeSize = static_cast<unsigned>(value);

            // Now that we know size and forceinline state,
            // update candidacy.
            if (m_CodeSize <= ALWAYS_INLINE_SIZE)
            {
                // Candidate based on small size
                SetCandidate(InlineObservation::CALLEE_BELOW_ALWAYS_INLINE_SIZE);
            }
            else if (m_IsForceInline)
            {
                // Candidate based on force inline
                SetCandidate(InlineObservation::CALLEE_IS_FORCE_INLINE);
            }
            else if (m_CodeSize <= m_Compiler->getImpInlineSize())
            {
                // Candidate, pending profitability evaluation
                SetCandidate(InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
            }
            else
            {
                // Callee too big, not a candidate
                SetNever(InlineObservation::CALLEE_TOO_MUCH_IL);
            }

            break;
        }

    case InlineObservation::CALLEE_OPCODE_NORMED:
    case InlineObservation::CALLEE_OPCODE:
        {
            if (m_StateMachine != nullptr)
            {
                OPCODE opcode = static_cast<OPCODE>(value);
                SM_OPCODE smOpcode = CodeSeqSM::MapToSMOpcode(opcode);
                noway_assert(smOpcode < SM_COUNT);
                noway_assert(smOpcode != SM_PREFIX_N);
                if (obs == InlineObservation::CALLEE_OPCODE_NORMED)
                {
                    if (smOpcode == SM_LDARGA_S)
                    {
                        smOpcode = SM_LDARGA_S_NORMED;
                    }
                    else if (smOpcode == SM_LDLOCA_S)
                    {
                        smOpcode = SM_LDLOCA_S_NORMED;
                    }
                }

                m_StateMachine->Run(smOpcode DEBUGARG(0));
            }
            break;
        }

    case InlineObservation::CALLSITE_FREQUENCY:
        assert(m_CallsiteFrequency == InlineCallsiteFrequency::UNUSED);
        m_CallsiteFrequency = static_cast<InlineCallsiteFrequency>(value);
        assert(m_CallsiteFrequency != InlineCallsiteFrequency::UNUSED);
        break;

    default:
        // Ignore all other information
        break;
    }
}