Exemplo n.º 1
0
void GCInfo::gcMarkRegSetByref(regMaskTP regMask DEBUGARG(bool forceOutput))
{
    regMaskTP gcRegByrefSetNew = gcRegByrefSetCur | regMask;  // Set it in Byref mask
    regMaskTP gcRegGCrefSetNew = gcRegGCrefSetCur & ~regMask; // Clear it if set in GCref mask

    INDEBUG(gcDspGCrefSetChanges(gcRegGCrefSetNew));
    INDEBUG(gcDspByrefSetChanges(gcRegByrefSetNew, forceOutput));

    gcRegByrefSetCur = gcRegByrefSetNew;
    gcRegGCrefSetCur = gcRegGCrefSetNew;
}
Exemplo n.º 2
0
void GCInfo::gcMarkRegSetNpt(regMaskTP regMask DEBUGARG(bool forceOutput))
{
    /* NOTE: don't unmark any live register variables */

    regMaskTP gcRegByrefSetNew = gcRegByrefSetCur & ~(regMask & ~regSet->rsMaskVars);
    regMaskTP gcRegGCrefSetNew = gcRegGCrefSetCur & ~(regMask & ~regSet->rsMaskVars);

    INDEBUG(gcDspGCrefSetChanges(gcRegGCrefSetNew, forceOutput));
    INDEBUG(gcDspByrefSetChanges(gcRegByrefSetNew, forceOutput));

    gcRegByrefSetCur = gcRegByrefSetNew;
    gcRegGCrefSetCur = gcRegGCrefSetNew;
}
Exemplo n.º 3
0
void GCInfo::gcMarkRegSetGCref(regMaskTP regMask DEBUGARG(bool forceOutput))
{
#ifdef DEBUG
    if (compiler->compRegSetCheckLevel == 0)
    {
        // This set of registers are going to hold REFs.
        // Make sure they were not holding BYREFs.
        assert((gcRegByrefSetCur & regMask) == 0);
    }
#endif

    regMaskTP gcRegByrefSetNew = gcRegByrefSetCur & ~regMask; // Clear it if set in Byref mask
    regMaskTP gcRegGCrefSetNew = gcRegGCrefSetCur | regMask;  // Set it in GCref mask

    INDEBUG(gcDspGCrefSetChanges(gcRegGCrefSetNew, forceOutput));
    INDEBUG(gcDspByrefSetChanges(gcRegByrefSetNew));

    gcRegByrefSetCur = gcRegByrefSetNew;
    gcRegGCrefSetCur = gcRegGCrefSetNew;
}
Exemplo n.º 4
0
//---------------------------------------------------------------------------
// Creates a platform-optimized version of TlsGetValue compiled
// for a particular index. Can return NULL.
//---------------------------------------------------------------------------
// A target for the optimized getter can be passed in, this is 
// useful so that code can avoid an indirect call for the GetThread
// and GetAppDomain calls for instance. If NULL is passed then
// we will allocate from the executeable heap.
POPTIMIZEDTLSGETTER MakeOptimizedTlsGetter(DWORD tlsIndex, LPVOID pBuffer, SIZE_T cbBuffer, POPTIMIZEDTLSGETTER pGenericImpl, BOOL fForceGeneric)
{
    // Static contracts because this is used by contract infrastructure
    STATIC_CONTRACT_NOTHROW;
    STATIC_CONTRACT_GC_NOTRIGGER;

    ARM_ONLY(pBuffer = ThumbCodeToDataPointer<BYTE*>(pBuffer));

    // Buffer that should be big enough to encode the TLS getter on any reasonable platform
    TADDR patch[4 INDEBUG(+4 /* last error trashing */)];

    PBYTE pPatch = (PBYTE)&patch;

    TLSACCESSMODE mode = fForceGeneric ? TLSACCESS_GENERIC : GetTLSAccessMode(tlsIndex);

#if defined(_DEBUG)
    if (mode != TLSACCESS_GENERIC)
    {
        //
        // Trash last error in debug builds
        //

#ifdef _TARGET_X86_
        *((DWORD*) (pPatch + 0))  = 0x05c764;    //  mov dword ptr fs:[offsetof(TEB, LastErrorValue)], LAST_ERROR_TRASH_VALUE
        *((DWORD*) (pPatch + 3))  = offsetof(TEB, LastErrorValue);
        *((DWORD*) (pPatch + 7))  = LAST_ERROR_TRASH_VALUE;
        pPatch += 11;
#endif // _TARGET_X86_

#ifdef _TARGET_AMD64_
        // iDNA doesn't like writing directly to gs:[nn]
        *((UINT64*)(pPatch + 0))  = 0x25048b4865;         //  mov rax, gs:[offsetof(TEB, NtTib.Self)]
        *((DWORD*) (pPatch + 5))  = offsetof(TEB, NtTib.Self);
        *((WORD*)  (pPatch + 9))  = 0x80c7;               //  mov dword ptr [rax + offsetof(TEB, LastErrorValue)], LAST_ERROR_TRASH_VALUE
        *((DWORD*) (pPatch + 11)) = offsetof(TEB, LastErrorValue);
        *((DWORD*) (pPatch + 15)) = LAST_ERROR_TRASH_VALUE;
        pPatch += 19;
#endif
    }
#endif // _DEBUG 

    switch (mode) 
    {
#ifdef _TARGET_X86_
        case TLSACCESS_WNT:
            *((WORD*)  (pPatch + 0)) = 0xa164;               //  mov  eax, fs:[IMM32]
            *((DWORD*) (pPatch + 2)) = offsetof(TEB, TlsSlots) + tlsIndex * sizeof(void*);
            *((BYTE*)  (pPatch + 6)) = 0xc3;                 //  retn
            pPatch += 7;
            break;

        case TLSACCESS_GENERIC:
            if (pGenericImpl == NULL)
                return NULL;

            _ASSERTE(pBuffer != NULL);
            *((BYTE*)   (pPatch + 0)) = 0xE9;        // jmp pGenericImpl
            TADDR rel32 = ((TADDR)pGenericImpl - ((TADDR)pBuffer + 1 + sizeof(INT32)));
            *((INT32*)  (pPatch + 1)) = (INT32)rel32;
            pPatch += 5;
            break;
#endif // _TARGET_X86_

#ifdef _TARGET_AMD64_
        case TLSACCESS_WNT:
            *((UINT64*)(pPatch + 0)) = 0x25048b4865; //  mov  rax, gs:[IMM32]
            *((DWORD*) (pPatch + 5)) = offsetof(TEB, TlsSlots) + (tlsIndex * sizeof(void*));
            *((BYTE*)  (pPatch + 9)) = 0xc3;         //  return                
            pPatch += 10;
            break;

        case TLSACCESS_GENERIC:
            if (pGenericImpl == NULL)
                return NULL;

            _ASSERTE(pBuffer != NULL);
            *((BYTE*)   (pPatch + 0)) = 0xE9;        // jmp pGenericImpl
            TADDR rel32 = ((TADDR)pGenericImpl - ((TADDR)pBuffer + 1 + sizeof(INT32)));
            _ASSERTE((INT64)(INT32)rel32 == (INT64)rel32);
            *((INT32*)  (pPatch + 1)) = (INT32)rel32;
            pPatch += 5;

            *pPatch++ = 0xCC; // Make sure there is full 8 bytes worth of data
            *pPatch++ = 0xCC;
            *pPatch++ = 0xCC;
            break;

#endif // _TARGET_AMD64_

#ifdef _TARGET_ARM_
        case TLSACCESS_WNT:
            {
                WORD slotOffset = (WORD)(offsetof(TEB, TlsSlots) + tlsIndex * sizeof(void*));
                _ASSERTE(slotOffset < 4096);

                WORD *pInstr = (WORD*)pPatch;

                *pInstr++ = 0xee1d;     // mrc p15, 0, r0, c13, c0, 2
                *pInstr++ = 0x0f50;
                *pInstr++ = 0xf8d0;     // ldr r0, [r0, #slotOffset]
                *pInstr++ = slotOffset;
                *pInstr++ = 0x4770;     // bx lr

                pPatch = (PBYTE)pInstr;
            }
            break;

        case TLSACCESS_GENERIC:
            {
                if (pGenericImpl == NULL)
                    return NULL;

                _ASSERTE(pBuffer != NULL);

                *(DWORD *)pPatch = 0x9000F000;  // b pGenericImpl
                PutThumb2BlRel24((WORD*)pPatch, (TADDR)pGenericImpl - ((TADDR)pBuffer + 4 + THUMB_CODE));

                pPatch += 4;
            }
            break;
#endif // _TARGET_ARM_
    }

    SIZE_T cbCode = (TADDR)pPatch - (TADDR)&patch;
    _ASSERTE(cbCode <= sizeof(patch));

    if (pBuffer != NULL)
    {
        _ASSERTE_ALL_BUILDS("clr/src/utilcode/tls.cpp", cbCode <= cbBuffer);

        // We assume that the first instruction of the buffer is a short jump to dummy helper 
        // that can be atomically overwritten to avoid races with other threads executing the code.
        // It is the same basic technique as hot patching.

        // Assert on all builds to make sure that retail optimizations are not affecting the alignment.
        _ASSERTE_ALL_BUILDS("clr/src/utilcode/tls.cpp", IS_ALIGNED((void*)pBuffer, sizeof(TADDR)));

        // Size of short jump that gets patched last.
        if (cbCode > sizeof(TADDR))
        {
            memcpy((BYTE *)pBuffer + sizeof(TADDR), &patch[1], cbCode - sizeof(TADDR));
            FlushInstructionCache(GetCurrentProcess(), (BYTE *)pBuffer + sizeof(TADDR), cbCode - sizeof(TADDR));
        }

        // Make sure that the the dummy implementation still works.
        _ASSERTE(((POPTIMIZEDTLSGETTER)ARM_ONLY(DataPointerToThumbCode<BYTE*>)(pBuffer))() == NULL);

        // It is important for this write to happen atomically     
        VolatileStore<TADDR>((TADDR *)pBuffer, patch[0]);

        FlushInstructionCache(GetCurrentProcess(), (BYTE *)pBuffer, sizeof(TADDR));
    }
    else
    {
        pBuffer = (BYTE*) new (executable, nothrow) BYTE[cbCode];
        if (pBuffer == NULL)
            return NULL;

        memcpy(pBuffer, &patch, cbCode);

        FlushInstructionCache(GetCurrentProcess(), pBuffer, cbCode);
    }

    return (POPTIMIZEDTLSGETTER)ARM_ONLY(DataPointerToThumbCode<BYTE*>)(pBuffer);
}
Exemplo n.º 5
0
void DacDbiInterfaceImpl::EnumerateInternalFrames(VMPTR_Thread                           vmThread,
                                                  FP_INTERNAL_FRAME_ENUMERATION_CALLBACK fpCallback,
                                                  void *                                 pUserData)
{
    DD_ENTER_MAY_THROW;

    DebuggerIPCE_STRData frameData;

    Thread *    pThread    = vmThread.GetDacPtr();
    Frame *     pFrame     = pThread->GetFrame();
    AppDomain * pAppDomain = pThread->GetDomain(INDEBUG(TRUE));

    // This used to be only true for Enter-Managed chains.
    // Since we don't have chains anymore, this can always be false.
    frameData.quicklyUnwound = false;
    frameData.eType = DebuggerIPCE_STRData::cStubFrame;

    while (pFrame != FRAME_TOP)
    {
        // check if the internal frame is interesting
        frameData.stubFrame.frameType = GetInternalFrameType(pFrame);
        if (frameData.stubFrame.frameType != STUBFRAME_NONE)
        {
            frameData.fp = FramePointer::MakeFramePointer(PTR_HOST_TO_TADDR(pFrame));

            frameData.vmCurrentAppDomainToken.SetHostPtr(pAppDomain);

            MethodDesc * pMD = pFrame->GetFunction();
#if defined(FEATURE_COMINTEROP)
            if (frameData.stubFrame.frameType == STUBFRAME_U2M)
            {
                _ASSERTE(pMD == NULL);

                // U2M transition frame generally don't store the target MD because we know what the target
                // is by looking at the callee stack frame.  However, for reverse COM interop, we can try
                // to get the MD for the interface.
                // 
                // Note that some reverse COM interop cases don't have an intermediate interface MD, so
                // pMD may still be NULL.
                //
                // Even if there is an MD on the ComMethodFrame, it could be in a different appdomain than
                // the ComMethodFrame itself.  The only known scenario is a cross-appdomain reverse COM 
                // interop call.  We need to check for this case.  The end result is that GetFunction() and
                // GetFunctionToken() on ICDInternalFrame will return NULL.

                // Minidumps without full memory don't guarantee to capture the CCW since we can do without
                // it.  In this case, pMD will remain NULL.
                EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
                {
                    if (pFrame->GetVTablePtr() == ComMethodFrame::GetMethodFrameVPtr())
                    {
                        ComMethodFrame * pCOMFrame = dac_cast<PTR_ComMethodFrame>(pFrame);
                        PTR_VOID pUnkStackSlot     = pCOMFrame->GetPointerToArguments();
                        PTR_IUnknown pUnk          = dac_cast<PTR_IUnknown>(*dac_cast<PTR_TADDR>(pUnkStackSlot));
                        ComCallWrapper * pCCW      = ComCallWrapper::GetWrapperFromIP(pUnk);

                        if (!pCCW->NeedToSwitchDomains(pAppDomain->GetId()))
                        {
                            ComCallMethodDesc * pCMD = NULL;
                            pCMD = dac_cast<PTR_ComCallMethodDesc>(pCOMFrame->ComMethodFrame::GetDatum());
                            pMD  = pCMD->GetInterfaceMethodDesc();
                        }
                    }
                }
                EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY
            }
#endif // FEATURE_COMINTEROP

            Module *     pModule = (pMD ? pMD->GetModule() : NULL);
            DomainFile * pDomainFile = (pModule ? pModule->GetDomainFile(pAppDomain) : NULL);

            if (frameData.stubFrame.frameType == STUBFRAME_FUNC_EVAL)
            {
                FuncEvalFrame * pFEF = dac_cast<PTR_FuncEvalFrame>(pFrame);
                DebuggerEval *  pDE  = pFEF->GetDebuggerEval();

                frameData.stubFrame.funcMetadataToken = pDE->m_methodToken;
                frameData.stubFrame.vmDomainFile.SetHostPtr(
                    pDE->m_debuggerModule ? pDE->m_debuggerModule->GetDomainFile() : NULL);
                frameData.stubFrame.vmMethodDesc = VMPTR_MethodDesc::NullPtr();
            }
            else
            {
                frameData.stubFrame.funcMetadataToken = (pMD == NULL ? NULL : pMD->GetMemberDef());
                frameData.stubFrame.vmDomainFile.SetHostPtr(pDomainFile);
                frameData.stubFrame.vmMethodDesc.SetHostPtr(pMD);
            }

            // invoke the callback
            fpCallback(&frameData, pUserData);
        }
Exemplo n.º 6
0
/* Returns true when we must create an EBP frame
   This is used to force most managed methods to have EBP based frames
   which allows the ETW kernel stackwalker to walk the stacks of managed code
   this allows the kernel to perform light weight profiling
 */
bool Compiler::rpMustCreateEBPFrame(INDEBUG(const char** wbReason))
{
    bool result = false;
#ifdef DEBUG
    const char* reason = nullptr;
#endif

#if ETW_EBP_FRAMED
    if (!result && (opts.MinOpts() || opts.compDbgCode))
    {
        INDEBUG(reason = "Debug Code");
        result = true;
    }
    if (!result && (info.compMethodInfo->ILCodeSize > DEFAULT_MAX_INLINE_SIZE))
    {
        INDEBUG(reason = "IL Code Size");
        result = true;
    }
    if (!result && (fgBBcount > 3))
    {
        INDEBUG(reason = "BasicBlock Count");
        result = true;
    }
    if (!result && fgHasLoops)
    {
        INDEBUG(reason = "Method has Loops");
        result = true;
    }
    if (!result && (optCallCount >= 2))
    {
        INDEBUG(reason = "Call Count");
        result = true;
    }
    if (!result && (optIndirectCallCount >= 1))
    {
        INDEBUG(reason = "Indirect Call");
        result = true;
    }
#endif // ETW_EBP_FRAMED

    // VM wants to identify the containing frame of an InlinedCallFrame always
    // via the frame register never the stack register so we need a frame.
    if (!result && (optNativeCallCount != 0))
    {
        INDEBUG(reason = "Uses PInvoke");
        result = true;
    }

#ifdef _TARGET_ARM64_
    // TODO-ARM64-NYI: This is temporary: force a frame pointer-based frame until genFnProlog can handle non-frame
    // pointer frames.
    if (!result)
    {
        INDEBUG(reason = "Temporary ARM64 force frame pointer");
        result = true;
    }
#endif // _TARGET_ARM64_

#ifdef DEBUG
    if ((result == true) && (wbReason != nullptr))
    {
        *wbReason = reason;
    }
#endif

    return result;
}
Exemplo n.º 7
0
//------------------------------------------------------------------------
// BuildNode: Build the RefPositions for for a node
//
// Arguments:
//    treeNode - the node of interest
//
// Return Value:
//    The number of sources consumed by this node.
//
// Notes:
// Preconditions:
//    LSRA Has been initialized.
//
// Postconditions:
//    RefPositions have been built for all the register defs and uses required
//    for this node.
//
int LinearScan::BuildNode(GenTree* tree)
{
    assert(!tree->isContained());
    Interval* prefSrcInterval = nullptr;
    int       srcCount;
    int       dstCount      = 0;
    regMaskTP dstCandidates = RBM_NONE;
    regMaskTP killMask      = RBM_NONE;
    bool      isLocalDefUse = false;

    // Reset the build-related members of LinearScan.
    clearBuildState();

    RegisterType registerType = TypeGet(tree);

    // Set the default dstCount. This may be modified below.
    if (tree->IsValue())
    {
        dstCount = 1;
        if (tree->IsUnusedValue())
        {
            isLocalDefUse = true;
        }
    }
    else
    {
        dstCount = 0;
    }

    switch (tree->OperGet())
    {
        default:
            srcCount = BuildSimple(tree);
            break;

        case GT_LCL_VAR:
        case GT_LCL_FLD:
        {
            // We handle tracked variables differently from non-tracked ones.  If it is tracked,
            // we will simply add a use of the tracked variable at its parent/consumer.
            // Otherwise, for a use we need to actually add the appropriate references for loading
            // or storing the variable.
            //
            // A tracked variable won't actually get used until the appropriate ancestor tree node
            // is processed, unless this is marked "isLocalDefUse" because it is a stack-based argument
            // to a call or an orphaned dead node.
            //
            LclVarDsc* const varDsc = &compiler->lvaTable[tree->AsLclVarCommon()->gtLclNum];
            if (isCandidateVar(varDsc))
            {
                INDEBUG(dumpNodeInfo(tree, dstCandidates, 0, 1));
                return 0;
            }
            srcCount = 0;
#ifdef FEATURE_SIMD
            // Need an additional register to read upper 4 bytes of Vector3.
            if (tree->TypeGet() == TYP_SIMD12)
            {
                // We need an internal register different from targetReg in which 'tree' produces its result
                // because both targetReg and internal reg will be in use at the same time.
                buildInternalFloatRegisterDefForNode(tree, allSIMDRegs());
                setInternalRegsDelayFree = true;
                buildInternalRegisterUses();
            }
#endif
            BuildDef(tree);
        }
        break;

        case GT_STORE_LCL_FLD:
        case GT_STORE_LCL_VAR:
            srcCount = 1;
            assert(dstCount == 0);
            srcCount = BuildStoreLoc(tree->AsLclVarCommon());
            break;

        case GT_FIELD_LIST:
            // These should always be contained. We don't correctly allocate or
            // generate code for a non-contained GT_FIELD_LIST.
            noway_assert(!"Non-contained GT_FIELD_LIST");
            srcCount = 0;
            break;

        case GT_LIST:
        case GT_ARGPLACE:
        case GT_NO_OP:
        case GT_START_NONGC:
        case GT_PROF_HOOK:
            srcCount = 0;
            assert(dstCount == 0);
            break;

        case GT_START_PREEMPTGC:
            // This kills GC refs in callee save regs
            srcCount = 0;
            assert(dstCount == 0);
            BuildDefsWithKills(tree, 0, RBM_NONE, RBM_NONE);
            break;

        case GT_CNS_DBL:
        {
            GenTreeDblCon* dblConst   = tree->AsDblCon();
            double         constValue = dblConst->gtDblCon.gtDconVal;

            if (emitter::emitIns_valid_imm_for_fmov(constValue))
            {
                // Directly encode constant to instructions.
            }
            else
            {
                // Reserve int to load constant from memory (IF_LARGELDC)
                buildInternalIntRegisterDefForNode(tree);
                buildInternalRegisterUses();
            }
        }
            __fallthrough;

        case GT_CNS_INT:
        {
            srcCount = 0;
            assert(dstCount == 1);
            RefPosition* def               = BuildDef(tree);
            def->getInterval()->isConstant = true;
        }
        break;

        case GT_BOX:
        case GT_COMMA:
        case GT_QMARK:
        case GT_COLON:
            srcCount = 0;
            assert(dstCount == 0);
            unreached();
            break;

        case GT_RETURN:
            srcCount = BuildReturn(tree);
            break;

        case GT_RETFILT:
            assert(dstCount == 0);
            if (tree->TypeGet() == TYP_VOID)
            {
                srcCount = 0;
            }
            else
            {
                assert(tree->TypeGet() == TYP_INT);
                srcCount = 1;
                BuildUse(tree->gtGetOp1(), RBM_INTRET);
            }
            break;

        case GT_NOP:
            // A GT_NOP is either a passthrough (if it is void, or if it has
            // a child), but must be considered to produce a dummy value if it
            // has a type but no child.
            srcCount = 0;
            if (tree->TypeGet() != TYP_VOID && tree->gtGetOp1() == nullptr)
            {
                assert(dstCount == 1);
                BuildDef(tree);
            }
            else
            {
                assert(dstCount == 0);
            }
            break;

        case GT_JTRUE:
            srcCount = 0;
            assert(dstCount == 0);
            break;

        case GT_JMP:
            srcCount = 0;
            assert(dstCount == 0);
            break;

        case GT_SWITCH:
            // This should never occur since switch nodes must not be visible at this
            // point in the JIT.
            srcCount = 0;
            noway_assert(!"Switch must be lowered at this point");
            break;

        case GT_JMPTABLE:
            srcCount = 0;
            assert(dstCount == 1);
            BuildDef(tree);
            break;

        case GT_SWITCH_TABLE:
            buildInternalIntRegisterDefForNode(tree);
            srcCount = BuildBinaryUses(tree->AsOp());
            assert(dstCount == 0);
            break;

        case GT_ASG:
            noway_assert(!"We should never hit any assignment operator in lowering");
            srcCount = 0;
            break;

        case GT_ADD:
        case GT_SUB:
            if (varTypeIsFloating(tree->TypeGet()))
            {
                // overflow operations aren't supported on float/double types.
                assert(!tree->gtOverflow());

                // No implicit conversions at this stage as the expectation is that
                // everything is made explicit by adding casts.
                assert(tree->gtGetOp1()->TypeGet() == tree->gtGetOp2()->TypeGet());
            }

            __fallthrough;

        case GT_AND:
        case GT_OR:
        case GT_XOR:
        case GT_LSH:
        case GT_RSH:
        case GT_RSZ:
        case GT_ROR:
            srcCount = BuildBinaryUses(tree->AsOp());
            assert(dstCount == 1);
            BuildDef(tree);
            break;

        case GT_RETURNTRAP:
            // this just turns into a compare of its child with an int
            // + a conditional call
            BuildUse(tree->gtGetOp1());
            srcCount = 1;
            assert(dstCount == 0);
            killMask = compiler->compHelperCallKillSet(CORINFO_HELP_STOP_FOR_GC);
            BuildDefsWithKills(tree, 0, RBM_NONE, killMask);
            break;

        case GT_MOD:
        case GT_UMOD:
            NYI_IF(varTypeIsFloating(tree->TypeGet()), "FP Remainder in ARM64");
            assert(!"Shouldn't see an integer typed GT_MOD node in ARM64");
            srcCount = 0;
            break;

        case GT_MUL:
            if (tree->gtOverflow())
            {
                // Need a register different from target reg to check for overflow.
                buildInternalIntRegisterDefForNode(tree);
                setInternalRegsDelayFree = true;
            }
            __fallthrough;

        case GT_DIV:
        case GT_MULHI:
        case GT_UDIV:
        {
            srcCount = BuildBinaryUses(tree->AsOp());
            buildInternalRegisterUses();
            assert(dstCount == 1);
            BuildDef(tree);
        }
        break;

        case GT_INTRINSIC:
        {
            noway_assert((tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Abs) ||
                         (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Ceiling) ||
                         (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Floor) ||
                         (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Round) ||
                         (tree->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Sqrt));

            // Both operand and its result must be of the same floating point type.
            GenTree* op1 = tree->gtGetOp1();
            assert(varTypeIsFloating(op1));
            assert(op1->TypeGet() == tree->TypeGet());

            BuildUse(op1);
            srcCount = 1;
            assert(dstCount == 1);
            BuildDef(tree);
        }
        break;

#ifdef FEATURE_SIMD
        case GT_SIMD:
            srcCount = BuildSIMD(tree->AsSIMD());
            break;
#endif // FEATURE_SIMD

#ifdef FEATURE_HW_INTRINSICS
        case GT_HWIntrinsic:
            srcCount = BuildHWIntrinsic(tree->AsHWIntrinsic());
            break;
#endif // FEATURE_HW_INTRINSICS

        case GT_CAST:
            assert(dstCount == 1);
            srcCount = BuildCast(tree->AsCast());
            break;

        case GT_NEG:
        case GT_NOT:
            BuildUse(tree->gtGetOp1());
            srcCount = 1;
            assert(dstCount == 1);
            BuildDef(tree);
            break;

        case GT_EQ:
        case GT_NE:
        case GT_LT:
        case GT_LE:
        case GT_GE:
        case GT_GT:
        case GT_TEST_EQ:
        case GT_TEST_NE:
        case GT_JCMP:
            srcCount = BuildCmp(tree);
            break;

        case GT_CKFINITE:
            srcCount = 1;
            assert(dstCount == 1);
            buildInternalIntRegisterDefForNode(tree);
            BuildUse(tree->gtGetOp1());
            BuildDef(tree);
            buildInternalRegisterUses();
            break;

        case GT_CMPXCHG:
        {
            GenTreeCmpXchg* cmpXchgNode = tree->AsCmpXchg();
            srcCount                    = cmpXchgNode->gtOpComparand->isContained() ? 2 : 3;
            assert(dstCount == 1);

            if (!compiler->compSupports(InstructionSet_Atomics))
            {
                // For ARMv8 exclusives requires a single internal register
                buildInternalIntRegisterDefForNode(tree);
            }

            // For ARMv8 exclusives the lifetime of the addr and data must be extended because
            // it may be used used multiple during retries

            // For ARMv8.1 atomic cas the lifetime of the addr and data must be extended to prevent
            // them being reused as the target register which must be destroyed early

            RefPosition* locationUse = BuildUse(tree->gtCmpXchg.gtOpLocation);
            setDelayFree(locationUse);
            RefPosition* valueUse = BuildUse(tree->gtCmpXchg.gtOpValue);
            setDelayFree(valueUse);
            if (!cmpXchgNode->gtOpComparand->isContained())
            {
                RefPosition* comparandUse = BuildUse(tree->gtCmpXchg.gtOpComparand);

                // For ARMv8 exclusives the lifetime of the comparand must be extended because
                // it may be used used multiple during retries
                if (!compiler->compSupports(InstructionSet_Atomics))
                {
                    setDelayFree(comparandUse);
                }
            }

            // Internals may not collide with target
            setInternalRegsDelayFree = true;
            buildInternalRegisterUses();
            BuildDef(tree);
        }
        break;

        case GT_LOCKADD:
        case GT_XADD:
        case GT_XCHG:
        {
            assert(dstCount == (tree->TypeGet() == TYP_VOID) ? 0 : 1);
            srcCount = tree->gtGetOp2()->isContained() ? 1 : 2;

            if (!compiler->compSupports(InstructionSet_Atomics))
            {
                // GT_XCHG requires a single internal register; the others require two.
                buildInternalIntRegisterDefForNode(tree);
                if (tree->OperGet() != GT_XCHG)
                {
                    buildInternalIntRegisterDefForNode(tree);
                }
            }

            assert(!tree->gtGetOp1()->isContained());
            RefPosition* op1Use = BuildUse(tree->gtGetOp1());
            RefPosition* op2Use = nullptr;
            if (!tree->gtGetOp2()->isContained())
            {
                op2Use = BuildUse(tree->gtGetOp2());
            }

            // For ARMv8 exclusives the lifetime of the addr and data must be extended because
            // it may be used used multiple during retries
            if (!compiler->compSupports(InstructionSet_Atomics))
            {
                // Internals may not collide with target
                if (dstCount == 1)
                {
                    setDelayFree(op1Use);
                    if (op2Use != nullptr)
                    {
                        setDelayFree(op2Use);
                    }
                    setInternalRegsDelayFree = true;
                }
                buildInternalRegisterUses();
            }
            if (dstCount == 1)
            {
                BuildDef(tree);
            }
        }
        break;

#if FEATURE_ARG_SPLIT
        case GT_PUTARG_SPLIT:
            srcCount = BuildPutArgSplit(tree->AsPutArgSplit());
            dstCount = tree->AsPutArgSplit()->gtNumRegs;
            break;
#endif // FEATURE _SPLIT_ARG

        case GT_PUTARG_STK:
            srcCount = BuildPutArgStk(tree->AsPutArgStk());
            break;

        case GT_PUTARG_REG:
            srcCount = BuildPutArgReg(tree->AsUnOp());
            break;

        case GT_CALL:
            srcCount = BuildCall(tree->AsCall());
            if (tree->AsCall()->HasMultiRegRetVal())
            {
                dstCount = tree->AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
            }
            break;

        case GT_ADDR:
        {
            // For a GT_ADDR, the child node should not be evaluated into a register
            GenTree* child = tree->gtGetOp1();
            assert(!isCandidateLocalRef(child));
            assert(child->isContained());
            assert(dstCount == 1);
            srcCount = 0;
            BuildDef(tree);
        }
        break;

        case GT_BLK:
        case GT_DYN_BLK:
            // These should all be eliminated prior to Lowering.
            assert(!"Non-store block node in Lowering");
            srcCount = 0;
            break;

        case GT_STORE_BLK:
        case GT_STORE_OBJ:
        case GT_STORE_DYN_BLK:
            srcCount = BuildBlockStore(tree->AsBlk());
            break;

        case GT_INIT_VAL:
            // Always a passthrough of its child's value.
            assert(!"INIT_VAL should always be contained");
            srcCount = 0;
            break;

        case GT_LCLHEAP:
        {
            assert(dstCount == 1);

            // Need a variable number of temp regs (see genLclHeap() in codegenamd64.cpp):
            // Here '-' means don't care.
            //
            //  Size?                   Init Memory?    # temp regs
            //   0                          -               0
            //   const and <=6 ptr words    -               0
            //   const and <PageSize        No              0
            //   >6 ptr words               Yes             0
            //   Non-const                  Yes             0
            //   Non-const                  No              2
            //

            GenTree* size = tree->gtGetOp1();
            if (size->IsCnsIntOrI())
            {
                assert(size->isContained());
                srcCount = 0;

                size_t sizeVal = size->gtIntCon.gtIconVal;

                if (sizeVal != 0)
                {
                    // Compute the amount of memory to properly STACK_ALIGN.
                    // Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size.
                    // This should also help in debugging as we can examine the original size specified with
                    // localloc.
                    sizeVal         = AlignUp(sizeVal, STACK_ALIGN);
                    size_t stpCount = sizeVal / (REGSIZE_BYTES * 2);

                    // For small allocations up to 4 'stp' instructions (i.e. 16 to 64 bytes of localloc)
                    //
                    if (stpCount <= 4)
                    {
                        // Need no internal registers
                    }
                    else if (!compiler->info.compInitMem)
                    {
                        // No need to initialize allocated stack space.
                        if (sizeVal < compiler->eeGetPageSize())
                        {
                            // Need no internal registers
                        }
                        else
                        {
                            // We need two registers: regCnt and RegTmp
                            buildInternalIntRegisterDefForNode(tree);
                            buildInternalIntRegisterDefForNode(tree);
                        }
                    }
                }
            }
            else
            {
                srcCount = 1;
                if (!compiler->info.compInitMem)
                {
                    buildInternalIntRegisterDefForNode(tree);
                    buildInternalIntRegisterDefForNode(tree);
                }
            }

            if (!size->isContained())
            {
                BuildUse(size);
            }
            buildInternalRegisterUses();
            BuildDef(tree);
        }
        break;

        case GT_ARR_BOUNDS_CHECK:
#ifdef FEATURE_SIMD
        case GT_SIMD_CHK:
#endif // FEATURE_SIMD
        {
            GenTreeBoundsChk* node = tree->AsBoundsChk();
            // Consumes arrLen & index - has no result
            assert(dstCount == 0);

            GenTree* intCns = nullptr;
            GenTree* other  = nullptr;
            srcCount        = BuildOperandUses(tree->AsBoundsChk()->gtIndex);
            srcCount += BuildOperandUses(tree->AsBoundsChk()->gtArrLen);
        }
        break;

        case GT_ARR_ELEM:
            // These must have been lowered to GT_ARR_INDEX
            noway_assert(!"We should never see a GT_ARR_ELEM in lowering");
            srcCount = 0;
            assert(dstCount == 0);
            break;

        case GT_ARR_INDEX:
        {
            srcCount = 2;
            assert(dstCount == 1);
            buildInternalIntRegisterDefForNode(tree);
            setInternalRegsDelayFree = true;

            // For GT_ARR_INDEX, the lifetime of the arrObj must be extended because it is actually used multiple
            // times while the result is being computed.
            RefPosition* arrObjUse = BuildUse(tree->AsArrIndex()->ArrObj());
            setDelayFree(arrObjUse);
            BuildUse(tree->AsArrIndex()->IndexExpr());
            buildInternalRegisterUses();
            BuildDef(tree);
        }
        break;

        case GT_ARR_OFFSET:
            // This consumes the offset, if any, the arrObj and the effective index,
            // and produces the flattened offset for this dimension.
            srcCount = 2;
            if (!tree->gtArrOffs.gtOffset->isContained())
            {
                BuildUse(tree->AsArrOffs()->gtOffset);
                srcCount++;
            }
            BuildUse(tree->AsArrOffs()->gtIndex);
            BuildUse(tree->AsArrOffs()->gtArrObj);
            assert(dstCount == 1);
            buildInternalIntRegisterDefForNode(tree);
            buildInternalRegisterUses();
            BuildDef(tree);
            break;

        case GT_LEA:
        {
            GenTreeAddrMode* lea = tree->AsAddrMode();

            GenTree* base  = lea->Base();
            GenTree* index = lea->Index();
            int      cns   = lea->Offset();

            // This LEA is instantiating an address, so we set up the srcCount here.
            srcCount = 0;
            if (base != nullptr)
            {
                srcCount++;
                BuildUse(base);
            }
            if (index != nullptr)
            {
                srcCount++;
                BuildUse(index);
            }
            assert(dstCount == 1);

            // On ARM64 we may need a single internal register
            // (when both conditions are true then we still only need a single internal register)
            if ((index != nullptr) && (cns != 0))
            {
                // ARM64 does not support both Index and offset so we need an internal register
                buildInternalIntRegisterDefForNode(tree);
            }
            else if (!emitter::emitIns_valid_imm_for_add(cns, EA_8BYTE))
            {
                // This offset can't be contained in the add instruction, so we need an internal register
                buildInternalIntRegisterDefForNode(tree);
            }
            buildInternalRegisterUses();
            BuildDef(tree);
        }
        break;

        case GT_STOREIND:
        {
            assert(dstCount == 0);

            if (compiler->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(tree))
            {
                srcCount = BuildGCWriteBarrier(tree);
                break;
            }

            srcCount = BuildIndir(tree->AsIndir());
            if (!tree->gtGetOp2()->isContained())
            {
                BuildUse(tree->gtGetOp2());
                srcCount++;
            }
        }
        break;

        case GT_NULLCHECK:
            // Unlike ARM, ARM64 implements NULLCHECK as a load to REG_ZR, so no internal register
            // is required, and it is not a localDefUse.
            assert(dstCount == 0);
            assert(!tree->gtGetOp1()->isContained());
            BuildUse(tree->gtGetOp1());
            srcCount = 1;
            break;

        case GT_IND:
            assert(dstCount == 1);
            srcCount = BuildIndir(tree->AsIndir());
            break;

        case GT_CATCH_ARG:
            srcCount = 0;
            assert(dstCount == 1);
            BuildDef(tree, RBM_EXCEPTION_OBJECT);
            break;

        case GT_CLS_VAR:
            srcCount = 0;
            // GT_CLS_VAR, by the time we reach the backend, must always
            // be a pure use.
            // It will produce a result of the type of the
            // node, and use an internal register for the address.

            assert(dstCount == 1);
            assert((tree->gtFlags & (GTF_VAR_DEF | GTF_VAR_USEASG)) == 0);
            buildInternalIntRegisterDefForNode(tree);
            buildInternalRegisterUses();
            BuildDef(tree);
            break;

        case GT_INDEX_ADDR:
            assert(dstCount == 1);
            srcCount = BuildBinaryUses(tree->AsOp());
            buildInternalIntRegisterDefForNode(tree);
            buildInternalRegisterUses();
            BuildDef(tree);
            break;

    } // end switch (tree->OperGet())

    if (tree->IsUnusedValue() && (dstCount != 0))
    {
        isLocalDefUse = true;
    }
    // We need to be sure that we've set srcCount and dstCount appropriately
    assert((dstCount < 2) || tree->IsMultiRegCall());
    assert(isLocalDefUse == (tree->IsValue() && tree->IsUnusedValue()));
    assert(!tree->IsUnusedValue() || (dstCount != 0));
    assert(dstCount == tree->GetRegisterDstCount());
    INDEBUG(dumpNodeInfo(tree, dstCandidates, srcCount, dstCount));
    return srcCount;
}