Ejemplo n.º 1
0
/**************************************************************************************
 *
 * Corresponding to the live definition pushes, pop the stack as we finish a sub-paths
 * of the graph originating from the block. Refer SSA renaming for any additional info.
 * "curSsaName" tracks the currently live definitions.
 */
void Compiler::optBlockCopyPropPopStacks(BasicBlock* block, LclNumToGenTreePtrStack* curSsaName)
{
    for (GenTreePtr stmt = block->bbTreeList; stmt; stmt = stmt->gtNext)
    {
        for (GenTreePtr tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
        {
            if (!tree->IsLocal())
            {
                continue;
            }
            unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
            if (fgExcludeFromSsa(lclNum))
            {
                continue;
            }
            if (tree->gtFlags & GTF_VAR_DEF)
            {
                GenTreePtrStack* stack = nullptr;
                curSsaName->Lookup(lclNum, &stack);
                stack->Pop();
                if (stack->Height() == 0)
                {
                    curSsaName->Remove(lclNum);
                }
            }
        }
    }
}
Ejemplo n.º 2
0
void CodeGen::genDoneAddressableFloat(GenTreePtr      tree,
                                      regMaskTP       addrRegInt,
                                      regMaskTP       addrRegFlt,
                                      RegSet::KeepReg keptReg)
{
    assert(!(addrRegInt && addrRegFlt));

    if (addrRegInt)
    {
        return genDoneAddressable(tree, addrRegInt, keptReg);
    }
    else if (addrRegFlt)
    {
        if (keptReg == RegSet::KEEP_REG)
        {
            for (regNumber r = REG_FP_FIRST; r != REG_NA; r = regNextOfType(r, tree->TypeGet()))
            {
                regMaskTP mask = genRegMaskFloat(r, tree->TypeGet());
                // some masks take up more than one bit
                if ((mask & addrRegFlt) == mask)
                {
                    regSet.SetUsedRegFloat(tree, false);
                }
            }
        }
    }
}
Ejemplo n.º 3
0
//------------------------------------------------------------------------
// LowerRotate: Lower GT_ROL and GT_ROR nodes.
//
// Arguments:
//    tree - the node to lower
//
// Return Value:
//    None.
//
void Lowering::LowerRotate(GenTreePtr tree)
{
    if (tree->OperGet() == GT_ROL)
    {
        // There is no ROL instruction on ARM. Convert ROL into ROR.
        GenTreePtr rotatedValue        = tree->gtOp.gtOp1;
        unsigned   rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
        GenTreePtr rotateLeftIndexNode = tree->gtOp.gtOp2;

        if (rotateLeftIndexNode->IsCnsIntOrI())
        {
            ssize_t rotateLeftIndex                 = rotateLeftIndexNode->gtIntCon.gtIconVal;
            ssize_t rotateRightIndex                = rotatedValueBitSize - rotateLeftIndex;
            rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex;
        }
        else
        {
            GenTreePtr tmp =
                comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
            BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
            tree->gtOp.gtOp2 = tmp;
        }
        tree->ChangeOper(GT_ROR);
    }
    ContainCheckShiftRotate(tree->AsOp());
}
Ejemplo n.º 4
0
void CodeGen::genCodeForTreeFloat(GenTreePtr     tree,
                                  RegSet::RegisterPreference *pref)
{
    genTreeOps      oper;
    unsigned        kind;

    assert(tree);
    assert(tree->gtOper != GT_STMT);

    // What kind of node do we have?
    oper = tree->OperGet();
    kind = tree->OperKind();

    if  (kind & GTK_CONST)
    {
        genFloatConst(tree, pref);
    }
    else if (kind & GTK_LEAF)
    {
        genFloatLeaf(tree, pref);
    }
    else if (kind & GTK_SMPOP)
    {
        genFloatSimple(tree, pref);
    }
    else
    {
        assert(oper == GT_CALL);
        genCodeForCall(tree, true);
    }       
}
Ejemplo n.º 5
0
//------------------------------------------------------------------------
// DecomposeInd: Decompose GT_IND.
//
// Arguments:
//    use - the LIR::Use object for the def that needs to be decomposed.
//
// Return Value:
//    The next node to process.
//
GenTree* DecomposeLongs::DecomposeInd(LIR::Use& use)
{
    GenTree* indLow = use.Def();

    LIR::Use address(Range(), &indLow->gtOp.gtOp1, indLow);
    address.ReplaceWithLclVar(m_compiler, m_blockWeight);
    JITDUMP("[DecomposeInd]: Saving addr tree to a temp var:\n");
    DISPTREERANGE(Range(), address.Def());

    // Change the type of lower ind.
    indLow->gtType = TYP_INT;

    // Create tree of ind(addr+4)
    GenTreePtr addrBase     = indLow->gtGetOp1();
    GenTreePtr addrBaseHigh = new (m_compiler, GT_LCL_VAR)
    GenTreeLclVar(GT_LCL_VAR, addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET);
    GenTreePtr addrHigh =
        new (m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT));
    GenTreePtr indHigh = new (m_compiler, GT_IND) GenTreeIndir(GT_IND, TYP_INT, addrHigh, nullptr);

    m_compiler->lvaIncRefCnts(addrBaseHigh);

    Range().InsertAfter(indLow, addrBaseHigh, addrHigh, indHigh);

    return FinalizeDecomposition(use, indLow, indHigh);
}
Ejemplo n.º 6
0
bool RangeCheck::IsBinOpMonotonicallyIncreasing(GenTreePtr op1, GenTreePtr op2, genTreeOps oper, SearchPath* path)
{
    JITDUMP("[RangeCheck::IsBinOpMonotonicallyIncreasing] %p, %p\n", dspPtr(op1), dspPtr(op2));
    // Check if we have a var + const.
    if (op2->OperGet() == GT_LCL_VAR)
    {
        jitstd::swap(op1, op2);
    }
    if (op1->OperGet() != GT_LCL_VAR)
    {
        JITDUMP("Not monotonic because op1 is not lclVar.\n");
        return false;
    }
    switch (op2->OperGet())
    {
    case GT_LCL_VAR:
        return IsMonotonicallyIncreasing(op1, path) && 
            IsMonotonicallyIncreasing(op2, path);

    case GT_CNS_INT:
        return oper == GT_ADD && op2->AsIntConCommon()->IconValue() >= 0 &&
            IsMonotonicallyIncreasing(op1, path);

    default:
        JITDUMP("Not monotonic because expression is not recognized.\n");
        return false;
    }
}
Ejemplo n.º 7
0
//------------------------------------------------------------------------
// TreeNodeInfoInitCmp: Lower a GT comparison node.
//
// Arguments:
//    tree - the node to lower
//
// Return Value:
//    None.
//
void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
{
    ContainCheckCompare(tree->AsOp());

    TreeNodeInfo* info = &(tree->gtLsraInfo);

    info->srcCount = tree->gtOp.gtOp2->isContained() ? 1 : 2;
    info->dstCount = tree->OperIs(GT_CMP) ? 0 : 1;
}
Ejemplo n.º 8
0
GenTreePtr CodeGen::genMakeAddressableFloat(GenTreePtr tree, 
                                            regMaskTP * regMaskIntPtr, 
                                            regMaskTP * regMaskFltPtr, 
                                            bool bCollapseConstantDoubles)
{    
    *regMaskIntPtr = *regMaskFltPtr = 0;

    switch (tree->OperGet())
    {    

    case GT_LCL_VAR:
        genMarkLclVar(tree);
        __fallthrough;

    case GT_REG_VAR:
    case GT_LCL_FLD:
    case GT_CLS_VAR:
        return tree;

    case GT_IND:
        // Try to make the address directly addressable

        if  (genMakeIndAddrMode(tree->gtOp.gtOp1,
                                tree,
                                false,
                                RBM_ALLFLOAT,
                                RegSet::KEEP_REG,
                                regMaskIntPtr,
                                false))
        {
            genUpdateLife(tree);
            return tree;
        }
        else
        {
            GenTreePtr addr = tree;
            tree = tree->gtOp.gtOp1;
            genCodeForTree(tree, 0);
            regSet.rsMarkRegUsed(tree, addr);

            *regMaskIntPtr = genRegMask(tree->gtRegNum);                        
            return addr;
        }

        // fall through

    default:
        genCodeForTreeFloat(tree);
        regSet.SetUsedRegFloat(tree, true);

        // update mask
        *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());

        return tree;
        break;
    }    
}
Ejemplo n.º 9
0
bool RegSet::IsLockedRegFloat(GenTreePtr tree)
{
    /* The value must be sitting in a register */
    assert(tree);
    assert(tree->gtFlags & GTF_REG_VAL);
    assert(varTypeIsFloating(tree->TypeGet()));

    regMaskTP  regMask = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
    return (rsGetMaskLock() & regMask) == regMask;
}
Ejemplo n.º 10
0
void GCInfo::gcMarkRegPtrVal(GenTreePtr tree)
{
    if (varTypeIsGC(tree->TypeGet()))
    {
        if (tree->gtOper == GT_LCL_VAR)
            compiler->codeGen->genMarkLclVar(tree);
        if (tree->InReg())
        {
            gcMarkRegSetNpt(genRegMask(tree->gtRegNum));
        }
    }
}
Ejemplo n.º 11
0
void CodeGen::genKeepAddressableFloat(GenTreePtr tree, regMaskTP *  regMaskIntPtr, regMaskTP *  regMaskFltPtr)
{
    regMaskTP regMaskInt, regMaskFlt;

    regMaskInt = *regMaskIntPtr;
    regMaskFlt = *regMaskFltPtr;

    *regMaskIntPtr = *regMaskFltPtr = 0;
        
    switch (tree->OperGet())
    {
    case GT_REG_VAR:
        // If register has been spilled, unspill it
        if (tree->gtFlags & GTF_SPILLED)
        {
            UnspillFloat(&compiler->lvaTable[tree->gtLclVarCommon.gtLclNum]);
        }        
        break;

    case GT_CNS_DBL:
        if (tree->gtFlags & GTF_SPILLED)
        {
            UnspillFloat(tree);
        }
        *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
        break;

    case GT_LCL_FLD:
    case GT_LCL_VAR:
    case GT_CLS_VAR:
        break;

    case GT_IND:
        if (regMaskFlt == RBM_NONE)
        {
            *regMaskIntPtr = genKeepAddressable(tree, regMaskInt, 0);
            *regMaskFltPtr = 0;
            return;
        } 
        __fallthrough;

    default:
        *regMaskIntPtr = 0;
        if (tree->gtFlags & GTF_SPILLED)
        {
            UnspillFloat(tree);
        }
        *regMaskFltPtr = genRegMaskFloat(tree->gtRegNum, tree->TypeGet());
        break;
    }
}
Ejemplo n.º 12
0
//------------------------------------------------------------------------
// ContainCheckCast: determine whether the source of a CAST node should be contained.
//
// Arguments:
//    node - pointer to the node
//
void Lowering::ContainCheckCast(GenTreeCast* node)
{
#ifdef _TARGET_ARM_
    GenTreePtr castOp     = node->CastOp();
    var_types  castToType = node->CastToType();
    var_types  srcType    = castOp->TypeGet();

    if (varTypeIsLong(castOp))
    {
        assert(castOp->OperGet() == GT_LONG);
        MakeSrcContained(node, castOp);
    }
#endif // _TARGET_ARM_
}
Ejemplo n.º 13
0
void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree)
{
    GenTreePtr dst  = tree;
    GenTreePtr addr = tree->gtOp.gtOp1;
    GenTreePtr src  = tree->gtOp.gtOp2;

    if (addr->OperGet() == GT_LEA)
    {
        // In the case where we are doing a helper assignment, if the dst
        // is an indir through an lea, we need to actually instantiate the
        // lea in a register
        GenTreeAddrMode* lea = addr->AsAddrMode();

        short leaSrcCount = 0;
        if (lea->Base() != nullptr)
        {
            leaSrcCount++;
        }
        if (lea->Index() != nullptr)
        {
            leaSrcCount++;
        }
        lea->gtLsraInfo.srcCount = leaSrcCount;
        lea->gtLsraInfo.dstCount = 1;
    }

#if NOGC_WRITE_BARRIERS
    NYI_ARM("NOGC_WRITE_BARRIERS");

    // For the NOGC JIT Helper calls
    //
    // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST_BYREF)
    // the 'src'  goes into x15 (REG_WRITE_BARRIER)
    //
    addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_DST_BYREF);
    src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER);
#else
    // For the standard JIT Helper calls
    // op1 goes into REG_ARG_0 and
    // op2 goes into REG_ARG_1
    //
    addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0);
    src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1);
#endif // NOGC_WRITE_BARRIERS

    // Both src and dst must reside in a register, which they should since we haven't set
    // either of them as contained.
    assert(addr->gtLsraInfo.dstCount == 1);
    assert(src->gtLsraInfo.dstCount == 1);
}
Ejemplo n.º 14
0
Compiler::fgWalkResult CodeGen::genRegVarDiesInSubTreeWorker(GenTreePtr* pTree, Compiler::fgWalkData* data)
{
    GenTreePtr                  tree  = *pTree;
    genRegVarDiesInSubTreeData* pData = (genRegVarDiesInSubTreeData*)data->pCallbackData;

    // if it's dying, just rename the register, else load it normally
    if (tree->IsRegVar() && tree->IsRegVarDeath() && tree->gtRegVar.gtRegNum == pData->reg)
    {
        pData->result = true;
        return Compiler::WALK_ABORT;
    }

    return Compiler::WALK_CONTINUE;
}
Ejemplo n.º 15
0
regNumber CodeGen::genAssignArithFloat(genTreeOps oper, 
                                       GenTreePtr dst, regNumber dstreg, 
                                       GenTreePtr src, regNumber srcreg)
{
    regNumber result;
    
    // dst should be a regvar or memory

    if (dst->IsRegVar())
    {
        regNumber reg = dst->gtRegNum;

        if (src->IsRegVar())
        {                
            inst_RV_RV(ins_MathOp(oper, dst->gtType), reg, src->gtRegNum, dst->gtType);
        }
        else
        {
            inst_RV_TT(ins_MathOp(oper, dst->gtType), reg, src, 0, EmitSize(dst));
        }
        result = reg;
    }
    else // dst in memory 
    {
        // since this is an asgop the ACTUAL destination is memory
        // but it is also one of the sources and SSE ops do not allow mem dests
        // so we have loaded it into a reg, and that is what dstreg represents
        assert(dstreg != REG_NA);

        if ( (src->InReg()))
        {
            inst_RV_RV(ins_MathOp(oper, dst->gtType), dstreg, src->gtRegNum, dst->gtType);
        }
        else
        {
            //mem mem operation
            inst_RV_TT(ins_MathOp(oper, dst->gtType), dstreg, src, 0, EmitSize(dst));
        }

        dst->gtFlags &= ~GTF_REG_VAL; // ???

        inst_TT_RV(ins_FloatStore(dst->gtType), dst, dstreg, 0, EmitSize(dst));

        result = REG_NA;
    }

    return result;
}
Ejemplo n.º 16
0
GenTreeStmt* BasicBlock::lastTopLevelStmt()
{
    if (bbTreeList == nullptr)
        return nullptr;

    GenTreePtr stmt = lastStmt();

#ifndef LEGACY_BACKEND
    while ((stmt->gtFlags & GTF_STMT_TOP_LEVEL) == 0)
    {
        stmt = stmt->gtPrev;
    }
#endif // !LEGACY_BACKEND

    return stmt->AsStmt();
}
Ejemplo n.º 17
0
//------------------------------------------------------------------------
// TreeNodeInfoInitPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node
//
// Arguments:
//    argNode - a GT_PUTARG_STK node
//
// Return Value:
//    None.
//
// Notes:
//    Set the child node(s) to be contained when we have a multireg arg
//
void Lowering::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* argNode, fgArgTabEntryPtr info)
{
    assert(argNode->gtOper == GT_PUTARG_STK);

    GenTreePtr putArgChild = argNode->gtOp.gtOp1;

    // Initialize 'argNode' as not contained, as this is both the default case
    //  and how MakeSrcContained expects to find things setup.
    //
    argNode->gtLsraInfo.srcCount = 1;
    argNode->gtLsraInfo.dstCount = 0;

    // Do we have a TYP_STRUCT argument (or a GT_FIELD_LIST), if so it must be a multireg pass-by-value struct
    if ((putArgChild->TypeGet() == TYP_STRUCT) || (putArgChild->OperGet() == GT_FIELD_LIST))
    {
        // We will use store instructions that each write a register sized value

        if (putArgChild->OperGet() == GT_FIELD_LIST)
        {
            // We consume all of the items in the GT_FIELD_LIST
            argNode->gtLsraInfo.srcCount = info->numSlots;
            putArgChild->SetContained();
        }
        else
        {
#ifdef _TARGET_ARM64_
            // We could use a ldp/stp sequence so we need two internal registers
            argNode->gtLsraInfo.internalIntCount = 2;
#else  // _TARGET_ARM_
            // We could use a ldr/str sequence so we need a internal register
            argNode->gtLsraInfo.internalIntCount = 1;
#endif // _TARGET_ARM_

            if (putArgChild->OperGet() == GT_OBJ)
            {
                GenTreePtr objChild = putArgChild->gtOp.gtOp1;
                if (objChild->OperGet() == GT_LCL_VAR_ADDR)
                {
                    // We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR
                    // as one contained operation
                    //
                    MakeSrcContained(putArgChild, objChild);
                }
            }

            // We will generate all of the code for the GT_PUTARG_STK and it's child node
            // as one contained operation
            //
            MakeSrcContained(argNode, putArgChild);
        }
    }
    else
    {
        // We must not have a multi-reg struct
        assert(info->numSlots == 1);
    }
}
Ejemplo n.º 18
0
//------------------------------------------------------------------------
// TreeNodeInfoInitCmp: Lower a GT comparison node.
//
// Arguments:
//    tree - the node to lower
//
// Return Value:
//    None.
//
void Lowering::TreeNodeInfoInitCmp(GenTreePtr tree)
{
    TreeNodeInfo* info = &(tree->gtLsraInfo);

    info->srcCount = 2;
    info->dstCount = tree->OperIs(GT_CMP) ? 0 : 1;

    CheckImmedAndMakeContained(tree, tree->gtOp.gtOp2);
}
Ejemplo n.º 19
0
//------------------------------------------------------------------------
// TreeNodeInfoInitShiftRotate: Set the NodeInfo for a shift or rotate.
//
// Arguments:
//    tree      - The node of interest
//
// Return Value:
//    None.
//
void Lowering::TreeNodeInfoInitShiftRotate(GenTree* tree)
{
    TreeNodeInfo* info = &(tree->gtLsraInfo);
    LinearScan*   l    = m_lsra;

    info->srcCount = 2;
    info->dstCount = 1;

    GenTreePtr shiftBy = tree->gtOp.gtOp2;
    GenTreePtr source  = tree->gtOp.gtOp1;
    if (shiftBy->IsCnsIntOrI())
    {
        MakeSrcContained(tree, shiftBy);
    }

#ifdef _TARGET_ARM_

    // The first operand of a GT_LSH_HI and GT_RSH_LO oper is a GT_LONG so that
    // we can have a three operand form. Increment the srcCount.
    if (tree->OperGet() == GT_LSH_HI || tree->OperGet() == GT_RSH_LO)
    {
        assert(source->OperGet() == GT_LONG);
        source->SetContained();

        info->srcCount++;

        if (tree->OperGet() == GT_LSH_HI)
        {
            GenTreePtr sourceLo              = source->gtOp.gtOp1;
            sourceLo->gtLsraInfo.isDelayFree = true;
        }
        else
        {
            GenTreePtr sourceHi              = source->gtOp.gtOp2;
            sourceHi->gtLsraInfo.isDelayFree = true;
        }

        source->gtLsraInfo.hasDelayFreeSrc = true;
        info->hasDelayFreeSrc              = true;
    }

#endif // _TARGET_ARM_
}
Ejemplo n.º 20
0
//------------------------------------------------------------------------
// ContainCheckShiftRotate: Determine whether a mul op's operands should be contained.
//
// Arguments:
//    node - the node we care about
//
void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
{
    GenTreePtr shiftBy = node->gtOp2;

#ifdef _TARGET_ARM_
    GenTreePtr source = node->gtOp1;
    if (node->OperIs(GT_LSH_HI, GT_RSH_LO))
    {
        assert(source->OperGet() == GT_LONG);
        MakeSrcContained(node, source);
    }
#else  // !_TARGET_ARM_
    assert(node->OperIsShiftOrRotate());
#endif // !_TARGET_ARM_

    if (shiftBy->IsCnsIntOrI())
    {
        MakeSrcContained(node, shiftBy);
    }
}
Ejemplo n.º 21
0
//------------------------------------------------------------------------
// DecomposeInd: Decompose GT_IND.
//
// Arguments:
//    tree - the tree to decompose
//
// Return Value:
//    None.
//
void DecomposeLongs::DecomposeInd(GenTree** ppTree, Compiler::fgWalkData* data)
{
    GenTreePtr indLow = *ppTree;
    GenTreeStmt* addrStmt = CreateTemporary(&indLow->gtOp.gtOp1);
    JITDUMP("[DecomposeInd]: Saving addr tree to a temp var:\n");
    DISPTREE(addrStmt);

    // Change the type of lower ind.
    indLow->gtType = TYP_INT;

    // Create tree of ind(addr+4)
    GenTreePtr addrBase = indLow->gtGetOp1();
    GenTreePtr addrBaseHigh = new(m_compiler, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR,
        addrBase->TypeGet(), addrBase->AsLclVarCommon()->GetLclNum(), BAD_IL_OFFSET);
    GenTreePtr addrHigh = new(m_compiler, GT_LEA) GenTreeAddrMode(TYP_REF, addrBaseHigh, nullptr, 0, genTypeSize(TYP_INT));
    GenTreePtr indHigh = new (m_compiler, GT_IND) GenTreeIndir(GT_IND, TYP_INT, addrHigh, nullptr);
    
    // Connect linear links
    SimpleLinkNodeAfter(addrBaseHigh, addrHigh);
    SimpleLinkNodeAfter(addrHigh, indHigh);

    FinalizeDecomposition(ppTree, data, indLow, indHigh);
}
Ejemplo n.º 22
0
void        CodeGen::genComputeAddressableFloat(GenTreePtr tree, 
                                                regMaskTP addrRegInt,
                                                regMaskTP addrRegFlt,
                                                RegSet::KeepReg keptReg,
                                                regMaskTP needReg,
                                                RegSet::KeepReg keepReg,
                                                bool freeOnly /* = false */)
{
    noway_assert(genStillAddressable(tree));
    noway_assert(varTypeIsFloating(tree->TypeGet()));

    genDoneAddressableFloat(tree, addrRegInt, addrRegFlt, keptReg);

    regNumber reg;
    if (tree->gtFlags & GTF_REG_VAL)
    {
        reg = tree->gtRegNum;
        if (freeOnly && !(genRegMaskFloat(reg, tree->TypeGet()) & regSet.RegFreeFloat()))
        {
            goto LOAD_REG;
        }
    }
    else
    {
        LOAD_REG:
            RegSet::RegisterPreference pref(needReg, RBM_NONE);
            reg = regSet.PickRegFloat(tree->TypeGet(), &pref);
            genLoadFloat(tree, reg);
    }

    genMarkTreeInReg(tree, reg);

    if (keepReg == RegSet::KEEP_REG)
    {
        regSet.SetUsedRegFloat(tree, true);
    }
}
Ejemplo n.º 23
0
regNumber CodeGen::genArithmFloat(genTreeOps oper, 
                                  GenTreePtr dst, regNumber dstreg,
                                  GenTreePtr src, regNumber srcreg, 
                                  bool bReverse)
{   
    regNumber result = REG_NA;

    assert(dstreg != REG_NA);

    if (bReverse)
    {
        GenTree *temp = src;
        regNumber tempreg = srcreg;
        src = dst;
        srcreg = dstreg;
        dst = temp;
        dstreg = tempreg;
    }

    if (srcreg == REG_NA)
    {
        if (src->IsRegVar())
        {                
            inst_RV_RV(ins_MathOp(oper, dst->gtType), dst->gtRegNum, src->gtRegNum, dst->gtType);
        }
        else
        {
            inst_RV_TT(ins_MathOp(oper, dst->gtType), dst->gtRegNum, src);
        }
    }
    else
    {
        inst_RV_RV(ins_MathOp(oper, dst->gtType), dstreg, srcreg, dst->gtType);
    }

    result = dstreg;

    assert (result != REG_NA);
    return result;
}
Ejemplo n.º 24
0
/**************************************************************************************
 *
 * Helper to check if tree is a local that participates in SSA numbering.
 */
bool Compiler::optIsSsaLocal(GenTreePtr tree)
{
    return tree->IsLocal() && !fgExcludeFromSsa(tree->AsLclVarCommon()->GetLclNum());
}
Ejemplo n.º 25
0
/**************************************************************************************
 *
 * Perform copy propagation on a given tree as we walk the graph and if it is a local
 * variable, then look up all currently live definitions and check if any of those
 * definitions share the same value number. If so, then we can make the replacement.
 *
 */
void Compiler::optCopyProp(BasicBlock* block, GenTreePtr stmt, GenTreePtr tree, LclNumToGenTreePtrStack* curSsaName)
{
    // TODO-Review: EH successor/predecessor iteration seems broken.
    if (block->bbCatchTyp == BBCT_FINALLY || block->bbCatchTyp == BBCT_FAULT)
    {
        return;
    }

    // If not local nothing to do.
    if (!tree->IsLocal())
    {
        return;
    }
    if (tree->OperGet() == GT_PHI_ARG || tree->OperGet() == GT_LCL_FLD)
    {
        return;
    }

    // Propagate only on uses.
    if (tree->gtFlags & GTF_VAR_DEF)
    {
        return;
    }
    unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();

    // Skip address exposed variables.
    if (fgExcludeFromSsa(lclNum))
    {
        return;
    }

    assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);

    for (LclNumToGenTreePtrStack::KeyIterator iter = curSsaName->Begin(); !iter.Equal(curSsaName->End()); ++iter)
    {
        unsigned newLclNum = iter.Get();

        GenTreePtr op = iter.GetValue()->Index(0);

        // Nothing to do if same.
        if (lclNum == newLclNum)
        {
            continue;
        }

        // Skip variables with assignments embedded in the statement (i.e., with a comma). Because we
        // are not currently updating their SSA names as live in the copy-prop pass of the stmt.
        if (VarSetOps::IsMember(this, optCopyPropKillSet, lvaTable[newLclNum].lvVarIndex))
        {
            continue;
        }

        if (op->gtFlags & GTF_VAR_CAST)
        {
            continue;
        }
        if (gsShadowVarInfo != nullptr && lvaTable[newLclNum].lvIsParam &&
            gsShadowVarInfo[newLclNum].shadowCopy == lclNum)
        {
            continue;
        }
        ValueNum opVN = GetUseAsgDefVNOrTreeVN(op);
        if (opVN == ValueNumStore::NoVN)
        {
            continue;
        }
        if (op->TypeGet() != tree->TypeGet())
        {
            continue;
        }
        if (opVN != tree->gtVNPair.GetConservative())
        {
            continue;
        }
        if (optCopyProp_LclVarScore(&lvaTable[lclNum], &lvaTable[newLclNum], true) <= 0)
        {
            continue;
        }
        // Check whether the newLclNum is live before being substituted. Otherwise, we could end
        // up in a situation where there must've been a phi node that got pruned because the variable
        // is not live anymore. For example,
        //  if
        //     x0 = 1
        //  else
        //     x1 = 2
        //  print(c) <-- x is not live here. Let's say 'c' shares the value number with "x0."
        //
        // If we simply substituted 'c' with "x0", we would be wrong. Ideally, there would be a phi
        // node x2 = phi(x0, x1) which can then be used to substitute 'c' with. But because of pruning
        // there would be no such phi node. To solve this we'll check if 'x' is live, before replacing
        // 'c' with 'x.'
        if (!lvaTable[newLclNum].lvVerTypeInfo.IsThisPtr())
        {
            if (lvaTable[newLclNum].lvAddrExposed)
            {
                continue;
            }

            // We compute liveness only on tracked variables. So skip untracked locals.
            if (!lvaTable[newLclNum].lvTracked)
            {
                continue;
            }

            // Because of this dependence on live variable analysis, CopyProp phase is immediately
            // after Liveness, SSA and VN.
            if (!VarSetOps::IsMember(this, compCurLife, lvaTable[newLclNum].lvVarIndex))
            {
                continue;
            }
        }
        unsigned newSsaNum = SsaConfig::RESERVED_SSA_NUM;
        if (op->gtFlags & GTF_VAR_DEF)
        {
            newSsaNum = GetSsaNumForLocalVarDef(op);
        }
        else // parameters, this pointer etc.
        {
            newSsaNum = op->AsLclVarCommon()->GetSsaNum();
        }

        if (newSsaNum == SsaConfig::RESERVED_SSA_NUM)
        {
            continue;
        }

#ifdef DEBUG
        if (verbose)
        {
            JITDUMP("VN based copy assertion for ");
            printTreeID(tree);
            printf(" V%02d @%08X by ", lclNum, tree->GetVN(VNK_Conservative));
            printTreeID(op);
            printf(" V%02d @%08X.\n", newLclNum, op->GetVN(VNK_Conservative));
            gtDispTree(tree, nullptr, nullptr, true);
        }
#endif

        lvaTable[lclNum].decRefCnts(block->getBBWeight(this), this);
        lvaTable[newLclNum].incRefCnts(block->getBBWeight(this), this);
        tree->gtLclVarCommon.SetLclNum(newLclNum);
        tree->AsLclVarCommon()->SetSsaNum(newSsaNum);
#ifdef DEBUG
        if (verbose)
        {
            printf("copy propagated to:\n");
            gtDispTree(tree, nullptr, nullptr, true);
        }
#endif
        break;
    }
    return;
}
Ejemplo n.º 26
0
void RegSet::SetUsedRegFloat(GenTreePtr tree, bool bValue)
{
    /* The value must be sitting in a register */
    assert(tree);
    assert(tree->gtFlags & GTF_REG_VAL);

    var_types  type    = tree->TypeGet();
#ifdef _TARGET_ARM_
    if (type == TYP_STRUCT)
    {
        assert(m_rsCompiler->IsHfa(tree));
        type = TYP_FLOAT;
    }
#endif
    regNumber  regNum  = tree->gtRegNum;
    regMaskTP  regMask = genRegMaskFloat(regNum, type);

    if (bValue)
    {
        // Mark as used

#ifdef  DEBUG
        if  (m_rsCompiler->verbose)
        {
            printf("\t\t\t\t\t\t\tThe register %s currently holds ", 
                   getRegNameFloat(regNum, type));
            Compiler::printTreeID(tree);
            printf("\n");
        }
#endif

        assert((rsGetMaskLock() & regMask) == 0);

#if FEATURE_STACK_FP_X87
        assert((rsGetMaskUsed() & regMask) == 0);
#else
        /* Is the register used by two different values simultaneously? */

        if  (regMask & rsGetMaskUsed())
        {
            /* Save the preceding use information */

            rsRecMultiReg(regNum, type);
        }
#endif
        /* Set the register's bit in the 'used' bitset */

        rsSetMaskUsed( (rsGetMaskUsed() | regMask) );

        // Assign slot
        rsSetUsedTree(regNum, tree);
    }
    else
    {
        // Mark as free

#ifdef DEBUG
        if  (m_rsCompiler->verbose)
        {
            printf("\t\t\t\t\t\t\tThe register %s no longer holds ",
                   getRegNameFloat(regNum, type));
            Compiler::printTreeID(tree);
            printf("\n");
        }
#endif

        assert((rsGetMaskUsed() & regMask) == regMask);

        // Are we freeing a multi-use registers?

        if  (regMask & rsGetMaskMult())
        {
            // Free any multi-use registers
            rsMultRegFree(regMask);
            return;
        }

        rsSetMaskUsed( (rsGetMaskUsed() & ~regMask) );

        // Free slot
        rsFreeUsedTree(regNum, tree);
    }
}
Ejemplo n.º 27
0
/*****************************************************************************
 * gsMarkPtrsAndAssignGroups
 * Walk a tree looking for assignment groups, variables whose value is used
 * in a *p store or use, and variable passed to calls.  This info is then used
 * to determine parameters which are vulnerable.
 * This function carries a state to know if it is under an assign node, call node
 * or indirection node.  It starts a new tree walk for it's subtrees when the state
 * changes.
 */
Compiler::fgWalkResult Compiler::gsMarkPtrsAndAssignGroups(GenTreePtr *pTree, fgWalkData *data)
{
    struct MarkPtrsInfo *pState= (MarkPtrsInfo *)data->pCallbackData;
    struct MarkPtrsInfo newState = *pState;
    Compiler *comp = data->compiler;
    GenTreePtr tree = *pTree;
    ShadowParamVarInfo *shadowVarInfo = pState->comp->gsShadowVarInfo;
    assert(shadowVarInfo);
    bool fIsBlk = false;
    unsigned lclNum;

    assert(!pState->isAssignSrc || pState->lvAssignDef != (unsigned)-1);

    if (pState->skipNextNode)
    {
        pState->skipNextNode = false;
        return WALK_CONTINUE;
    }

    switch (tree->OperGet())
    {
    // Indirections - look for *p uses and defs
    case GT_INITBLK:
    case GT_COPYOBJ:
    case GT_COPYBLK:
        fIsBlk = true;
        // fallthrough
    case GT_IND:
    case GT_LDOBJ:
    case GT_ARR_ELEM:
    case GT_ARR_INDEX:
    case GT_ARR_OFFSET:
    case GT_FIELD:

        newState.isUnderIndir = true;
        {
            if (fIsBlk)
            {
                // Blk nodes have implicit indirections.
                comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);

                if (tree->OperGet() == GT_INITBLK)
                {
                    newState.isUnderIndir = false;
                }
                comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
            }
            else
            {
                newState.skipNextNode = true;  // Don't have to worry about which kind of node we're dealing with
                comp->fgWalkTreePre(&tree, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
            }
        }

        return WALK_SKIP_SUBTREES;

    // local vars and param uses
    case GT_LCL_VAR:
    case GT_LCL_FLD:
        lclNum = tree->gtLclVarCommon.gtLclNum;

        if (pState->isUnderIndir)
        {
            // The variable is being dereferenced for a read or a write.
            comp->lvaTable[lclNum].lvIsPtr = 1;
        }

        if (pState->isAssignSrc)
        {
            //
            // Add lvAssignDef and lclNum to a common assign group
            if (shadowVarInfo[pState->lvAssignDef].assignGroup)
            {
                if (shadowVarInfo[lclNum].assignGroup)
                {
                    // OR both bit vector
                    shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectOr(shadowVarInfo[lclNum].assignGroup);
                }
                else
                {
                    shadowVarInfo[pState->lvAssignDef].assignGroup->bitVectSet(lclNum);
                }
            
                // Point both to the same bit vector
                shadowVarInfo[lclNum].assignGroup = shadowVarInfo[pState->lvAssignDef].assignGroup;
            }
            else if (shadowVarInfo[lclNum].assignGroup)
            {
                shadowVarInfo[lclNum].assignGroup->bitVectSet(pState->lvAssignDef);
            
                // Point both to the same bit vector
                shadowVarInfo[pState->lvAssignDef].assignGroup = shadowVarInfo[lclNum].assignGroup;
            }
            else
            {
                FixedBitVect *bv = FixedBitVect::bitVectInit(pState->comp->lvaCount, pState->comp);

                // (shadowVarInfo[pState->lvAssignDef] == NULL && shadowVarInfo[lclNew] == NULL);
                // Neither of them has an assign group yet.  Make a new one.
                shadowVarInfo[pState->lvAssignDef].assignGroup = bv;
                shadowVarInfo[lclNum].assignGroup = bv;
                bv->bitVectSet(pState->lvAssignDef);
                bv->bitVectSet(lclNum);
            }

        }
        return WALK_CONTINUE;
    
    // Calls - Mark arg variables
    case GT_CALL:

        newState.isUnderIndir = false;
        newState.isAssignSrc = false;
        {
            if (tree->gtCall.gtCallObjp)
            {
                newState.isUnderIndir = true;
                comp->fgWalkTreePre(&tree->gtCall.gtCallObjp, gsMarkPtrsAndAssignGroups, (void *)&newState);
            }

            for (GenTreeArgList* args = tree->gtCall.gtCallArgs; args; args = args->Rest())
            {
                comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void *)&newState);
            }
            for (GenTreeArgList* args = tree->gtCall.gtCallLateArgs; args; args = args->Rest())
            {
                comp->fgWalkTreePre(&args->Current(), gsMarkPtrsAndAssignGroups, (void *)&newState);
            }

            if (tree->gtCall.gtCallType == CT_INDIRECT)
            {
                newState.isUnderIndir = true;

                // A function pointer is treated like a write-through pointer since
                // it controls what code gets executed, and so indirectly can cause
                // a write to memory.
                comp->fgWalkTreePre(&tree->gtCall.gtCallAddr, gsMarkPtrsAndAssignGroups, (void *)&newState);
            }
        }
        return WALK_SKIP_SUBTREES;


    case GT_ADDR:
        newState.isUnderIndir = false;
        // We'll assume p in "**p = " can be vulnerable because by changing 'p', someone
        // could control where **p stores to.
        {
            comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
        }
        return WALK_SKIP_SUBTREES;


    default:
        // Assignments - track assign groups and *p defs.
        if (tree->OperIsAssignment())
        {
            bool isLocVar;
            bool isLocFld;

            // Walk dst side
            comp->fgWalkTreePre(&tree->gtOp.gtOp1, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);
            
            // Now handle src side
            isLocVar = tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR;
            isLocFld = tree->gtOp.gtOp1->OperGet() == GT_LCL_FLD;

            if ((isLocVar || isLocFld) && tree->gtOp.gtOp2)
            {
                lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
                newState.lvAssignDef = lclNum;
                newState.isAssignSrc = true;
            }

            comp->fgWalkTreePre(&tree->gtOp.gtOp2, comp->gsMarkPtrsAndAssignGroups, (void *)&newState);

            return WALK_SKIP_SUBTREES;
        }
    }

    return WALK_CONTINUE;
}
Ejemplo n.º 28
0
//------------------------------------------------------------------------
// TreeNodeInfoInitBlockStore: Set the NodeInfo for a block store.
//
// Arguments:
//    blkNode       - The block store node of interest
//
// Return Value:
//    None.
//
void Lowering::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
{
    GenTree*    dstAddr  = blkNode->Addr();
    unsigned    size     = blkNode->gtBlkSize;
    GenTree*    source   = blkNode->Data();
    LinearScan* l        = m_lsra;
    Compiler*   compiler = comp;

    // Sources are dest address and initVal or source.
    // We may require an additional source or temp register for the size.
    blkNode->gtLsraInfo.srcCount = 2;
    blkNode->gtLsraInfo.dstCount = 0;
    GenTreePtr srcAddrOrFill     = nullptr;
    bool       isInitBlk         = blkNode->OperIsInitBlkOp();

    if (!isInitBlk)
    {
        // CopyObj or CopyBlk
        if (source->gtOper == GT_IND)
        {
            srcAddrOrFill = blkNode->Data()->gtGetOp1();
            // We're effectively setting source as contained, but can't call MakeSrcContained, because the
            // "inheritance" of the srcCount is to a child not a parent - it would "just work" but could be misleading.
            // If srcAddr is already non-contained, we don't need to change it.
            if (srcAddrOrFill->gtLsraInfo.getDstCount() == 0)
            {
                srcAddrOrFill->gtLsraInfo.setDstCount(1);
                srcAddrOrFill->gtLsraInfo.setSrcCount(source->gtLsraInfo.srcCount);
            }
            m_lsra->clearOperandCounts(source);
            source->SetContained();
            source->AsIndir()->Addr()->ClearContained();
        }
        else if (!source->IsMultiRegCall() && !source->OperIsSIMD())
        {
            assert(source->IsLocal());
            MakeSrcContained(blkNode, source);
            blkNode->gtLsraInfo.srcCount--;
        }
    }

    if (isInitBlk)
    {
        GenTreePtr initVal = source;
        if (initVal->OperIsInitVal())
        {
            initVal->SetContained();
            initVal = initVal->gtGetOp1();
        }
        srcAddrOrFill = initVal;

        if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)
        {
            // TODO-ARM-CQ: Currently we generate a helper call for every
            // initblk we encounter.  Later on we should implement loop unrolling
            // code sequences to improve CQ.
            // For reference see the code in lsraxarch.cpp.
            NYI_ARM("initblk loop unrolling is currently not implemented.");

#ifdef _TARGET_ARM64_
            // No additional temporaries required
            ssize_t fill = initVal->gtIntCon.gtIconVal & 0xFF;
            if (fill == 0)
            {
                MakeSrcContained(blkNode, source);
                blkNode->gtLsraInfo.srcCount--;
            }
#endif // _TARGET_ARM64_
        }
        else
        {
            assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
            // The helper follows the regular ABI.
            dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
            initVal->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
            if (size != 0)
            {
                // Reserve a temp register for the block size argument.
                blkNode->gtLsraInfo.setInternalCandidates(l, RBM_ARG_2);
                blkNode->gtLsraInfo.internalIntCount = 1;
            }
            else
            {
                // The block size argument is a third argument to GT_STORE_DYN_BLK
                noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
                blkNode->gtLsraInfo.setSrcCount(3);
                GenTree* sizeNode = blkNode->AsDynBlk()->gtDynamicSize;
                sizeNode->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
            }
        }
    }
    else
    {
        // CopyObj or CopyBlk
        // Sources are src and dest and size if not constant.
        if (blkNode->OperGet() == GT_STORE_OBJ)
        {
            // CopyObj
            // We don't need to materialize the struct size but we still need
            // a temporary register to perform the sequence of loads and stores.
            blkNode->gtLsraInfo.internalIntCount = 1;

            if (size >= 2 * REGSIZE_BYTES)
            {
                // We will use ldp/stp to reduce code size and improve performance
                // so we need to reserve an extra internal register
                blkNode->gtLsraInfo.internalIntCount++;
            }

            // We can't use the special Write Barrier registers, so exclude them from the mask
            regMaskTP internalIntCandidates = RBM_ALLINT & ~(RBM_WRITE_BARRIER_DST_BYREF | RBM_WRITE_BARRIER_SRC_BYREF);
            blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates);

            // If we have a dest address we want it in RBM_WRITE_BARRIER_DST_BYREF.
            dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_DST_BYREF);

            // If we have a source address we want it in REG_WRITE_BARRIER_SRC_BYREF.
            // Otherwise, if it is a local, codegen will put its address in REG_WRITE_BARRIER_SRC_BYREF,
            // which is killed by a StoreObj (and thus needn't be reserved).
            if (srcAddrOrFill != nullptr)
            {
                srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_WRITE_BARRIER_SRC_BYREF);
            }
        }
        else
        {
            // CopyBlk
            short     internalIntCount      = 0;
            regMaskTP internalIntCandidates = RBM_NONE;

            if (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll)
            {
                // TODO-ARM-CQ: cpblk loop unrolling is currently not implemented.
                // In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size
                // we should unroll the loop to improve CQ.
                // For reference see the code in lsraxarch.cpp.
                NYI_ARM("cpblk loop unrolling is currently not implemented.");

#ifdef _TARGET_ARM64_

                internalIntCount      = 1;
                internalIntCandidates = RBM_ALLINT;

                if (size >= 2 * REGSIZE_BYTES)
                {
                    // We will use ldp/stp to reduce code size and improve performance
                    // so we need to reserve an extra internal register
                    internalIntCount++;
                }

#endif // _TARGET_ARM64_
            }
            else
            {
                assert(blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindHelper);
                dstAddr->gtLsraInfo.setSrcCandidates(l, RBM_ARG_0);
                // The srcAddr goes in arg1.
                if (srcAddrOrFill != nullptr)
                {
                    srcAddrOrFill->gtLsraInfo.setSrcCandidates(l, RBM_ARG_1);
                }
                if (size != 0)
                {
                    // Reserve a temp register for the block size argument.
                    internalIntCandidates |= RBM_ARG_2;
                    internalIntCount++;
                }
                else
                {
                    // The block size argument is a third argument to GT_STORE_DYN_BLK
                    noway_assert(blkNode->gtOper == GT_STORE_DYN_BLK);
                    blkNode->gtLsraInfo.setSrcCount(3);
                    GenTree* blockSize = blkNode->AsDynBlk()->gtDynamicSize;
                    blockSize->gtLsraInfo.setSrcCandidates(l, RBM_ARG_2);
                }
            }
            if (internalIntCount != 0)
            {
                blkNode->gtLsraInfo.internalIntCount = internalIntCount;
                blkNode->gtLsraInfo.setInternalCandidates(l, internalIntCandidates);
            }
        }
    }
}
Ejemplo n.º 29
0
//------------------------------------------------------------------------
// TreeNodeInfoInitPutArgSplit: Set the NodeInfo for a GT_PUTARG_SPLIT node
//
// Arguments:
//    argNode - a GT_PUTARG_SPLIT node
//
// Return Value:
//    None.
//
// Notes:
//    Set the child node(s) to be contained
//
void Lowering::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode, TreeNodeInfo& info, fgArgTabEntryPtr argInfo)
{
    assert(argNode->gtOper == GT_PUTARG_SPLIT);

    GenTreePtr putArgChild = argNode->gtOp.gtOp1;

    // Registers for split argument corresponds to source
    argNode->gtLsraInfo.dstCount = argInfo->numRegs;
    info.srcCount += argInfo->numRegs;

    regNumber argReg  = argInfo->regNum;
    regMaskTP argMask = RBM_NONE;
    for (unsigned i = 0; i < argInfo->numRegs; i++)
    {
        argMask |= genRegMask((regNumber)((unsigned)argReg + i));
    }
    argNode->gtLsraInfo.setDstCandidates(m_lsra, argMask);

    if (putArgChild->OperGet() == GT_FIELD_LIST)
    {
        // Generated code:
        // 1. Consume all of the items in the GT_FIELD_LIST (source)
        // 2. Store to target slot and move to target registers (destination) from source
        //
        argNode->gtLsraInfo.srcCount = argInfo->numRegs + argInfo->numSlots;

        // To avoid redundant moves, have the argument operand computed in the
        // register in which the argument is passed to the call.
        GenTreeFieldList* fieldListPtr = putArgChild->AsFieldList();
        for (unsigned idx = 0; fieldListPtr != nullptr; fieldListPtr = fieldListPtr->Rest(), idx++)
        {
            if (idx < argInfo->numRegs)
            {
                GenTreePtr node = fieldListPtr->gtGetOp1();
                node->gtLsraInfo.setSrcCandidates(m_lsra, genRegMask((regNumber)((unsigned)argReg + idx)));
            }
        }

        putArgChild->SetContained();
    }
    else
    {
        assert(putArgChild->TypeGet() == TYP_STRUCT);
        assert(putArgChild->OperGet() == GT_OBJ);

        // We could use a ldr/str sequence so we need a internal register
        argNode->gtLsraInfo.srcCount         = 1;
        argNode->gtLsraInfo.internalIntCount = 1;
        regMaskTP internalMask               = RBM_ALLINT & ~argMask;
        argNode->gtLsraInfo.setInternalCandidates(m_lsra, internalMask);

        GenTreePtr objChild = putArgChild->gtOp.gtOp1;
        if (objChild->OperGet() == GT_LCL_VAR_ADDR)
        {
            // We will generate all of the code for the GT_PUTARG_SPLIT, the GT_OBJ and the GT_LCL_VAR_ADDR
            // as one contained operation
            //
            MakeSrcContained(putArgChild, objChild);
            putArgChild->gtLsraInfo.srcCount--;
        }
        argNode->gtLsraInfo.srcCount = putArgChild->gtLsraInfo.srcCount;
        MakeSrcContained(argNode, putArgChild);
    }
}
Ejemplo n.º 30
0
//------------------------------------------------------------------------
// TreeNodeInfoInitCall: Set the NodeInfo for a call.
//
// Arguments:
//    call - The call node of interest
//
// Return Value:
//    None.
//
void Lowering::TreeNodeInfoInitCall(GenTreeCall* call)
{
    TreeNodeInfo*   info              = &(call->gtLsraInfo);
    LinearScan*     l                 = m_lsra;
    Compiler*       compiler          = comp;
    bool            hasMultiRegRetVal = false;
    ReturnTypeDesc* retTypeDesc       = nullptr;

    info->srcCount = 0;
    if (call->TypeGet() != TYP_VOID)
    {
        hasMultiRegRetVal = call->HasMultiRegRetVal();
        if (hasMultiRegRetVal)
        {
            // dst count = number of registers in which the value is returned by call
            retTypeDesc    = call->GetReturnTypeDesc();
            info->dstCount = retTypeDesc->GetReturnRegCount();
        }
        else
        {
            info->dstCount = 1;
        }
    }
    else
    {
        info->dstCount = 0;
    }

    GenTree* ctrlExpr = call->gtControlExpr;
    if (call->gtCallType == CT_INDIRECT)
    {
        // either gtControlExpr != null or gtCallAddr != null.
        // Both cannot be non-null at the same time.
        assert(ctrlExpr == nullptr);
        assert(call->gtCallAddr != nullptr);
        ctrlExpr = call->gtCallAddr;
    }

    // set reg requirements on call target represented as control sequence.
    if (ctrlExpr != nullptr)
    {
        // we should never see a gtControlExpr whose type is void.
        assert(ctrlExpr->TypeGet() != TYP_VOID);

        info->srcCount++;

        // In case of fast tail implemented as jmp, make sure that gtControlExpr is
        // computed into a register.
        if (call->IsFastTailCall())
        {
            NYI_ARM("tail call");

#ifdef _TARGET_ARM64_
            // Fast tail call - make sure that call target is always computed in IP0
            // so that epilog sequence can generate "br xip0" to achieve fast tail call.
            ctrlExpr->gtLsraInfo.setSrcCandidates(l, genRegMask(REG_IP0));
#endif // _TARGET_ARM64_
        }
    }
#ifdef _TARGET_ARM_
    else
    {
        info->internalIntCount = 1;
    }
#endif // _TARGET_ARM_

    RegisterType registerType = call->TypeGet();

// Set destination candidates for return value of the call.

#ifdef _TARGET_ARM_
    if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
    {
        // The ARM CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
        // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
        info->setDstCandidates(l, RBM_PINVOKE_TCB);
    }
    else
#endif // _TARGET_ARM_
        if (hasMultiRegRetVal)
    {
        assert(retTypeDesc != nullptr);
        info->setDstCandidates(l, retTypeDesc->GetABIReturnRegs());
    }
    else if (varTypeIsFloating(registerType))
    {
        info->setDstCandidates(l, RBM_FLOATRET);
    }
    else if (registerType == TYP_LONG)
    {
        info->setDstCandidates(l, RBM_LNGRET);
    }
    else
    {
        info->setDstCandidates(l, RBM_INTRET);
    }

    // If there is an explicit this pointer, we don't want that node to produce anything
    // as it is redundant
    if (call->gtCallObjp != nullptr)
    {
        GenTreePtr thisPtrNode = call->gtCallObjp;

        if (thisPtrNode->gtOper == GT_PUTARG_REG)
        {
            l->clearOperandCounts(thisPtrNode);
            thisPtrNode->SetContained();
            l->clearDstCount(thisPtrNode->gtOp.gtOp1);
        }
        else
        {
            l->clearDstCount(thisPtrNode);
        }
    }

    // First, count reg args
    bool callHasFloatRegArgs = false;

    for (GenTreePtr list = call->gtCallLateArgs; list; list = list->MoveNext())
    {
        assert(list->OperIsList());

        GenTreePtr argNode = list->Current();

        fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
        assert(curArgTabEntry);

        if (curArgTabEntry->regNum == REG_STK)
        {
            // late arg that is not passed in a register
            assert(argNode->gtOper == GT_PUTARG_STK);

            TreeNodeInfoInitPutArgStk(argNode->AsPutArgStk(), curArgTabEntry);
            continue;
        }

        // A GT_FIELD_LIST has a TYP_VOID, but is used to represent a multireg struct
        if (argNode->OperGet() == GT_FIELD_LIST)
        {
            argNode->SetContained();

            // There could be up to 2-4 PUTARG_REGs in the list (3 or 4 can only occur for HFAs)
            regNumber argReg = curArgTabEntry->regNum;
            for (GenTreeFieldList* entry = argNode->AsFieldList(); entry != nullptr; entry = entry->Rest())
            {
                TreeNodeInfoInitPutArgReg(entry->Current()->AsUnOp(), argReg, *info, false, &callHasFloatRegArgs);

                // Update argReg for the next putarg_reg (if any)
                argReg = genRegArgNext(argReg);

#if defined(_TARGET_ARM_)
                // A double register is modelled as an even-numbered single one
                if (entry->Current()->TypeGet() == TYP_DOUBLE)
                {
                    argReg = genRegArgNext(argReg);
                }
#endif // _TARGET_ARM_
            }
        }
#ifdef _TARGET_ARM_
        else if (argNode->OperGet() == GT_PUTARG_SPLIT)
        {
            fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
            TreeNodeInfoInitPutArgSplit(argNode->AsPutArgSplit(), *info, curArgTabEntry);
        }
#endif
        else
        {
            TreeNodeInfoInitPutArgReg(argNode->AsUnOp(), curArgTabEntry->regNum, *info, false, &callHasFloatRegArgs);
        }
    }

    // Now, count stack args
    // Note that these need to be computed into a register, but then
    // they're just stored to the stack - so the reg doesn't
    // need to remain live until the call.  In fact, it must not
    // because the code generator doesn't actually consider it live,
    // so it can't be spilled.

    GenTreePtr args = call->gtCallArgs;
    while (args)
    {
        GenTreePtr arg = args->gtOp.gtOp1;

        // Skip arguments that have been moved to the Late Arg list
        if (!(args->gtFlags & GTF_LATE_ARG))
        {
            if (arg->gtOper == GT_PUTARG_STK)
            {
                fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
                assert(curArgTabEntry);

                assert(curArgTabEntry->regNum == REG_STK);

                TreeNodeInfoInitPutArgStk(arg->AsPutArgStk(), curArgTabEntry);
            }
#ifdef _TARGET_ARM_
            else if (arg->OperGet() == GT_PUTARG_SPLIT)
            {
                fgArgTabEntryPtr curArgTabEntry = compiler->gtArgEntryByNode(call, arg);
                TreeNodeInfoInitPutArgSplit(arg->AsPutArgSplit(), *info, curArgTabEntry);
            }
#endif
            else
            {
                TreeNodeInfo* argInfo = &(arg->gtLsraInfo);
                if (argInfo->dstCount != 0)
                {
                    argInfo->isLocalDefUse = true;
                }

                argInfo->dstCount = 0;
            }
        }
        args = args->gtOp.gtOp2;
    }

    // If it is a fast tail call, it is already preferenced to use IP0.
    // Therefore, no need set src candidates on call tgt again.
    if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr))
    {
        NYI_ARM("float reg varargs");

        // Don't assign the call target to any of the argument registers because
        // we will use them to also pass floating point arguments as required
        // by Arm64 ABI.
        ctrlExpr->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_ARG_REGS));
    }

#ifdef _TARGET_ARM_

    if (call->NeedsNullCheck())
    {
        info->internalIntCount++;
    }

#endif // _TARGET_ARM_
}