/***************************************************************************** * gsParamsToShadows * Copy each vulnerable param ptr or buffer to a local shadow copy and replace * uses of the param by the shadow copy */ void Compiler::gsParamsToShadows() { // Cache old count since we'll add new variables, and // gsShadowVarInfo will not grow to accomodate the new ones. UINT lvaOldCount = lvaCount; // Create shadow copy for each param candidate for (UINT lclNum = 0; lclNum < lvaOldCount; lclNum++) { LclVarDsc *varDsc = &lvaTable[lclNum]; gsShadowVarInfo[lclNum].shadowCopy = NO_SHADOW_COPY; // Only care about params whose values are on the stack if (!ShadowParamVarInfo::mayNeedShadowCopy(varDsc)) { continue; } if (!varDsc->lvIsPtr && !varDsc->lvIsUnsafeBuffer) { continue; } int shadowVar = lvaGrabTemp(false DEBUGARG("shadowVar")); // Copy some info var_types type = varTypeIsSmall(varDsc->TypeGet()) ? TYP_INT : varDsc->TypeGet(); lvaTable[shadowVar].lvType = type; lvaTable[shadowVar].lvAddrExposed = varDsc->lvAddrExposed; lvaTable[shadowVar].lvDoNotEnregister = varDsc->lvDoNotEnregister; #ifdef DEBUG lvaTable[shadowVar].lvVMNeedsStackAddr = varDsc->lvVMNeedsStackAddr; lvaTable[shadowVar].lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr; lvaTable[shadowVar].lvLclFieldExpr = varDsc->lvLclFieldExpr; lvaTable[shadowVar].lvLiveAcrossUCall = varDsc->lvLiveAcrossUCall; #endif lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo; lvaTable[shadowVar].lvGcLayout = varDsc->lvGcLayout; lvaTable[shadowVar].lvIsUnsafeBuffer = varDsc->lvIsUnsafeBuffer; lvaTable[shadowVar].lvIsPtr = varDsc->lvIsPtr; #ifdef DEBUG if (verbose) { printf("Var V%02u is shadow param candidate. Shadow copy is V%02u.\n", lclNum, shadowVar); } #endif gsShadowVarInfo[lclNum].shadowCopy = shadowVar; } // Replace param uses with shadow copy fgWalkAllTreesPre(gsReplaceShadowParams, (void *)this); // Now insert code to copy the params to their shadow copy. for (UINT lclNum = 0; lclNum < lvaOldCount; lclNum++) { LclVarDsc *varDsc = &lvaTable[lclNum]; unsigned shadowVar = gsShadowVarInfo[lclNum].shadowCopy; if (shadowVar == NO_SHADOW_COPY) { continue; } var_types type = lvaTable[shadowVar].TypeGet(); GenTreePtr src = gtNewLclvNode(lclNum, varDsc->TypeGet()); GenTreePtr dst = gtNewLclvNode(shadowVar, type); src->gtFlags |= GTF_DONT_CSE; dst->gtFlags |= GTF_DONT_CSE; GenTreePtr opAssign = NULL; if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle(); // We don't need unsafe value cls check here since we are copying the params and this flag // would have been set on the original param before reaching here. lvaSetStruct(shadowVar, clsHnd, false); src = gtNewOperNode(GT_ADDR, TYP_BYREF, src); dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst); opAssign = gtNewCpObjNode(dst, src, clsHnd, false); #if FEATURE_MULTIREG_ARGS_OR_RET lvaTable[shadowVar].lvIsMultiRegArgOrRet = lvaTable[lclNum].lvIsMultiRegArgOrRet; #endif // FEATURE_MULTIREG_ARGS_OR_RET } else { opAssign = gtNewAssignNode(dst, src); } fgEnsureFirstBBisScratch(); (void) fgInsertStmtAtBeg(fgFirstBB, fgMorphTree(opAssign)); } // If the method has "Jmp CalleeMethod", then we need to copy shadow params back to original // params before "jmp" to CalleeMethod. if (compJmpOpUsed) { // There could be more than one basic block ending with a "Jmp" type tail call. // We would have to insert assignments in all such blocks, just before GT_JMP stmnt. for (BasicBlock * block = fgFirstBB; block; block = block->bbNext) { if (block->bbJumpKind != BBJ_RETURN) { continue; } if ((block->bbFlags & BBF_HAS_JMP) == 0) { continue; } for (UINT lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc *varDsc = &lvaTable[lclNum]; unsigned shadowVar = gsShadowVarInfo[lclNum].shadowCopy; if (shadowVar == NO_SHADOW_COPY) { continue; } GenTreePtr src = gtNewLclvNode(shadowVar, lvaTable[shadowVar].TypeGet()); GenTreePtr dst = gtNewLclvNode(lclNum, varDsc->TypeGet()); src->gtFlags |= GTF_DONT_CSE; dst->gtFlags |= GTF_DONT_CSE; GenTreePtr opAssign = nullptr; if (varDsc->TypeGet() == TYP_STRUCT) { CORINFO_CLASS_HANDLE clsHnd = varDsc->lvVerTypeInfo.GetClassHandle(); src = gtNewOperNode(GT_ADDR, TYP_BYREF, src); dst = gtNewOperNode(GT_ADDR, TYP_BYREF, dst); opAssign = gtNewCpObjNode(dst, src, clsHnd, false); } else { opAssign = gtNewAssignNode(dst, src); } (void) fgInsertStmtNearEnd(block, fgMorphTree(opAssign)); } } } }
void GCInfo::gcCountForHeader(UNALIGNED unsigned int* untrackedCount, UNALIGNED unsigned int* varPtrTableSize) { unsigned varNum; LclVarDsc* varDsc; varPtrDsc* varTmp; bool thisKeptAliveIsInUntracked = false; // did we track "this" in a synchronized method? unsigned int count = 0; /* Count the untracked locals and non-enregistered args */ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount; varNum++, varDsc++) { if (varTypeIsGC(varDsc->TypeGet())) { if (compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { // Field local of a PROMOTION_TYPE_DEPENDENT struct must have been // reported through its parent local continue; } /* Do we have an argument or local variable? */ if (!varDsc->lvIsParam) { if (varDsc->lvTracked || !varDsc->lvOnFrame) { continue; } } else { /* Stack-passed arguments which are not enregistered * are always reported in this "untracked stack * pointers" section of the GC info even if lvTracked==true */ /* Has this argument been fully enregistered? */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef LEGACY_BACKEND if (!varDsc->lvOnFrame) #else // LEGACY_BACKEND if (varDsc->lvRegister) #endif // LEGACY_BACKEND { /* if a CEE_JMP has been used, then we need to report all the arguments even if they are enregistered, since we will be using this value in JMP call. Note that this is subtle as we require that argument offsets are always fixed up properly even if lvRegister is set */ if (!compiler->compJmpOpUsed) { continue; } } else { if (!varDsc->lvOnFrame) { /* If this non-enregistered pointer arg is never * used, we don't need to report it */ assert(varDsc->lvRefCnt == 0); continue; } else if (varDsc->lvIsRegArg && varDsc->lvTracked) { /* If this register-passed arg is tracked, then * it has been allocated space near the other * pointer variables and we have accurate life- * time info. It will be reported with * gcVarPtrList in the "tracked-pointer" section */ continue; } } } #if !defined(JIT32_GCENCODER) || !defined(WIN64EXCEPTIONS) // For x86/WIN64EXCEPTIONS, "this" must always be in untracked variables // so we cannot have "this" in variable lifetimes if (compiler->lvaIsOriginalThisArg(varNum) && compiler->lvaKeepAliveAndReportThis()) { // Encoding of untracked variables does not support reporting // "this". So report it as a tracked variable with a liveness // extending over the entire method. thisKeptAliveIsInUntracked = true; continue; } #endif #ifdef DEBUG if (compiler->verbose) { int offs = varDsc->lvStkOffs; printf("GCINFO: untrckd %s lcl at [%s", varTypeGCstring(varDsc->TypeGet()), compiler->genEmitter->emitGetFrameReg()); if (offs < 0) { printf("-%02XH", -offs); } else if (offs > 0) { printf("+%02XH", +offs); } printf("]\n"); } #endif count++; } else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE)) { unsigned slots = compiler->lvaLclSize(varNum) / sizeof(void*); BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum); // walk each member of the array for (unsigned i = 0; i < slots; i++) { if (gcPtrs[i] != TYPE_GC_NONE) { // count only gc slots count++; } } } } /* Also count spill temps that hold pointers */ assert(compiler->tmpAllFree()); for (TempDsc* tempThis = compiler->tmpListBeg(); tempThis != nullptr; tempThis = compiler->tmpListNxt(tempThis)) { if (varTypeIsGC(tempThis->tdTempType()) == false) { continue; } #ifdef DEBUG if (compiler->verbose) { int offs = tempThis->tdTempOffs(); printf("GCINFO: untrck %s Temp at [%s", varTypeGCstring(varDsc->TypeGet()), compiler->genEmitter->emitGetFrameReg()); if (offs < 0) { printf("-%02XH", -offs); } else if (offs > 0) { printf("+%02XH", +offs); } printf("]\n"); } #endif count++; } #ifdef DEBUG if (compiler->verbose) { printf("GCINFO: untrckVars = %u\n", count); } #endif *untrackedCount = count; /* Count the number of entries in the table of non-register pointer variable lifetimes. */ count = 0; if (thisKeptAliveIsInUntracked) { count++; } if (gcVarPtrList) { /* We'll use a delta encoding for the lifetime offsets */ for (varTmp = gcVarPtrList; varTmp; varTmp = varTmp->vpdNext) { /* Special case: skip any 0-length lifetimes */ if (varTmp->vpdBegOfs == varTmp->vpdEndOfs) { continue; } count++; } } #ifdef DEBUG if (compiler->verbose) { printf("GCINFO: trackdLcls = %u\n", count); } #endif *varPtrTableSize = count; }
GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr tgtAddr) { GCInfo::WriteBarrierForm result = GCInfo::WBF_BarrierUnknown; // Default case, we have no information. // If we store through an int to a GC_REF field, we'll assume that needs to use a checked barriers. if (tgtAddr->TypeGet() == TYP_I_IMPL) { return GCInfo::WBF_BarrierChecked; // Why isn't this GCInfo::WBF_BarrierUnknown? } // Otherwise... assert(tgtAddr->TypeGet() == TYP_BYREF); bool simplifiedExpr = true; while (simplifiedExpr) { simplifiedExpr = false; tgtAddr = tgtAddr->gtSkipReloadOrCopy(); while (tgtAddr->OperGet() == GT_ADDR && tgtAddr->gtOp.gtOp1->OperGet() == GT_IND) { tgtAddr = tgtAddr->gtOp.gtOp1->gtOp.gtOp1; simplifiedExpr = true; assert(tgtAddr->TypeGet() == TYP_BYREF); } // For additions, one of the operands is a byref or a ref (and the other is not). Follow this down to its // source. while (tgtAddr->OperGet() == GT_ADD || tgtAddr->OperGet() == GT_LEA) { if (tgtAddr->OperGet() == GT_ADD) { if (tgtAddr->gtOp.gtOp1->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp1->TypeGet() == TYP_REF) { assert(!(tgtAddr->gtOp.gtOp2->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp2->TypeGet() == TYP_REF)); tgtAddr = tgtAddr->gtOp.gtOp1; simplifiedExpr = true; } else if (tgtAddr->gtOp.gtOp2->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp2->TypeGet() == TYP_REF) { tgtAddr = tgtAddr->gtOp.gtOp2; simplifiedExpr = true; } else { // We might have a native int. For example: // const int 0 // + byref // lclVar int V06 loc5 // this is a local declared "valuetype VType*" return GCInfo::WBF_BarrierUnknown; } } else { // Must be an LEA (i.e., an AddrMode) assert(tgtAddr->OperGet() == GT_LEA); tgtAddr = tgtAddr->AsAddrMode()->Base(); if (tgtAddr->TypeGet() == TYP_BYREF || tgtAddr->TypeGet() == TYP_REF) { simplifiedExpr = true; } else { // We might have a native int. return GCInfo::WBF_BarrierUnknown; } } } } if (tgtAddr->IsLocalAddrExpr() != nullptr) { // No need for a GC barrier when writing to a local variable. return GCInfo::WBF_NoBarrier; } if (tgtAddr->OperGet() == GT_LCL_VAR || tgtAddr->OperGet() == GT_REG_VAR) { unsigned lclNum = 0; if (tgtAddr->gtOper == GT_LCL_VAR) { lclNum = tgtAddr->gtLclVar.gtLclNum; } else { assert(tgtAddr->gtOper == GT_REG_VAR); lclNum = tgtAddr->gtRegVar.gtLclNum; } LclVarDsc* varDsc = &compiler->lvaTable[lclNum]; // Instead of marking LclVar with 'lvStackByref', // Consider decomposing the Value Number given to this LclVar to see if it was // created using a GT_ADDR(GT_LCLVAR) or a GT_ADD( GT_ADDR(GT_LCLVAR), Constant) // We may have an internal compiler temp created in fgMorphCopyBlock() that we know // points at one of our stack local variables, it will have lvStackByref set to true. // if (varDsc->lvStackByref) { assert(varDsc->TypeGet() == TYP_BYREF); return GCInfo::WBF_NoBarrier; } // We don't eliminate for inlined methods, where we (can) know where the "retBuff" points. if (!compiler->compIsForInlining() && lclNum == compiler->info.compRetBuffArg) { assert(compiler->info.compRetType == TYP_STRUCT); // Else shouldn't have a ret buff. // Are we assured that the ret buff pointer points into the stack of a caller? if (compiler->info.compRetBuffDefStack) { #if 0 // This is an optional debugging mode. If the #if 0 above is changed to #if 1, // every barrier we remove for stores to GC ref fields of a retbuff use a special // helper that asserts that the target is not in the heap. #ifdef DEBUG return WBF_NoBarrier_CheckNotHeapInDebug; #else return WBF_NoBarrier; #endif #else // 0 return GCInfo::WBF_NoBarrier; #endif // 0 } } } if (tgtAddr->TypeGet() == TYP_REF) { return GCInfo::WBF_BarrierUnchecked; } // Otherwise, we have no information. return GCInfo::WBF_BarrierUnknown; }
Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, ArrayStack<GenTree*>& parentStack) { assert(useEdge != nullptr); GenTree* node = *useEdge; assert(node != nullptr); #ifdef DEBUG const bool isLateArg = (node->gtFlags & GTF_LATE_ARG) != 0; #endif // First, remove any preceeding list nodes, which are not otherwise visited by the tree walk. // // NOTE: GT_FIELD_LIST head nodes, and GT_LIST nodes used by phi nodes will in fact be visited. for (GenTree* prev = node->gtPrev; prev != nullptr && prev->OperIsAnyList() && !(prev->OperIsFieldListHead()); prev = node->gtPrev) { BlockRange().Remove(prev); } // In addition, remove the current node if it is a GT_LIST node that is not an aggregate. if (node->OperIsAnyList()) { GenTreeArgList* list = node->AsArgList(); if (!list->OperIsFieldListHead()) { BlockRange().Remove(list); } return Compiler::WALK_CONTINUE; } LIR::Use use; if (parentStack.Height() < 2) { use = LIR::Use::GetDummyUse(BlockRange(), *useEdge); } else { use = LIR::Use(BlockRange(), useEdge, parentStack.Index(1)); } assert(node == use.Def()); switch (node->OperGet()) { case GT_ASG: RewriteAssignment(use); break; case GT_BOX: // GT_BOX at this level just passes through so get rid of it use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); break; case GT_ADDR: RewriteAddress(use); break; case GT_IND: // Clear the `GTF_IND_ASG_LHS` flag, which overlaps with `GTF_IND_REQ_ADDR_IN_REG`. node->gtFlags &= ~GTF_IND_ASG_LHS; if (varTypeIsSIMD(node)) { RewriteSIMDOperand(use, false); } else { // Due to promotion of structs containing fields of type struct with a // single scalar type field, we could potentially see IR nodes of the // form GT_IND(GT_ADD(lclvarAddr, 0)) where 0 is an offset representing // a field-seq. These get folded here. // // TODO: This code can be removed once JIT implements recursive struct // promotion instead of lying about the type of struct field as the type // of its single scalar field. GenTree* addr = node->AsIndir()->Addr(); if (addr->OperGet() == GT_ADD && addr->gtGetOp1()->OperGet() == GT_LCL_VAR_ADDR && addr->gtGetOp2()->IsIntegralConst(0)) { GenTreeLclVarCommon* lclVarNode = addr->gtGetOp1()->AsLclVarCommon(); unsigned lclNum = lclVarNode->GetLclNum(); LclVarDsc* varDsc = comp->lvaTable + lclNum; if (node->TypeGet() == varDsc->TypeGet()) { JITDUMP("Rewriting GT_IND(GT_ADD(LCL_VAR_ADDR,0)) to LCL_VAR\n"); lclVarNode->SetOper(GT_LCL_VAR); lclVarNode->gtType = node->TypeGet(); use.ReplaceWith(comp, lclVarNode); BlockRange().Remove(addr); BlockRange().Remove(addr->gtGetOp2()); BlockRange().Remove(node); } } } break; case GT_NOP: // fgMorph sometimes inserts NOP nodes between defs and uses // supposedly 'to prevent constant folding'. In this case, remove the // NOP. if (node->gtGetOp1() != nullptr) { use.ReplaceWith(comp, node->gtGetOp1()); BlockRange().Remove(node); } break; case GT_COMMA: { GenTree* op1 = node->gtGetOp1(); if ((op1->gtFlags & GTF_ALL_EFFECT) == 0) { // The LHS has no side effects. Remove it. bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange lhsRange = BlockRange().GetTreeRange(op1, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(lhsRange)); } GenTree* replacement = node->gtGetOp2(); if (!use.IsDummyUse()) { use.ReplaceWith(comp, replacement); } else { // This is a top-level comma. If the RHS has no side effects we can remove // it as well. if ((replacement->gtFlags & GTF_ALL_EFFECT) == 0) { bool isClosed = false; unsigned sideEffects = 0; LIR::ReadOnlyRange rhsRange = BlockRange().GetTreeRange(replacement, &isClosed, &sideEffects); // None of the transforms performed herein violate tree order, so these // should always be true. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); BlockRange().Delete(comp, m_block, std::move(rhsRange)); } } BlockRange().Remove(node); } break; case GT_ARGPLACE: // Remove argplace and list nodes from the execution order. // // TODO: remove phi args and phi nodes as well? BlockRange().Remove(node); break; #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM_) case GT_CLS_VAR: { // Class vars that are the target of an assignment will get rewritten into // GT_STOREIND(GT_CLS_VAR_ADDR, val) by RewriteAssignment. This check is // not strictly necessary--the GT_IND(GT_CLS_VAR_ADDR) pattern that would // otherwise be generated would also be picked up by RewriteAssignment--but // skipping the rewrite here saves an allocation and a bit of extra work. const bool isLHSOfAssignment = (use.User()->OperGet() == GT_ASG) && (use.User()->gtGetOp1() == node); if (!isLHSOfAssignment) { GenTree* ind = comp->gtNewOperNode(GT_IND, node->TypeGet(), node); node->SetOper(GT_CLS_VAR_ADDR); node->gtType = TYP_BYREF; BlockRange().InsertAfter(node, ind); use.ReplaceWith(comp, ind); // TODO: JIT dump } } break; #endif // _TARGET_XARCH_ case GT_INTRINSIC: // Non-target intrinsics should have already been rewritten back into user calls. assert(Compiler::IsTargetIntrinsic(node->gtIntrinsic.gtIntrinsicId)); break; #ifdef FEATURE_SIMD case GT_BLK: case GT_OBJ: { // TODO-1stClassStructs: These should have been transformed to GT_INDs, but in order // to preserve existing behavior, we will keep this as a block node if this is the // lhs of a block assignment, and either: // - It is a "generic" TYP_STRUCT assignment, OR // - It is an initblk, OR // - Neither the lhs or rhs are known to be of SIMD type. GenTree* parent = use.User(); bool keepBlk = false; if ((parent->OperGet() == GT_ASG) && (node == parent->gtGetOp1())) { if ((node->TypeGet() == TYP_STRUCT) || parent->OperIsInitBlkOp()) { keepBlk = true; } else if (!comp->isAddrOfSIMDType(node->AsBlk()->Addr())) { GenTree* dataSrc = parent->gtGetOp2(); if (!dataSrc->IsLocal() && (dataSrc->OperGet() != GT_SIMD)) { noway_assert(dataSrc->OperIsIndir()); keepBlk = !comp->isAddrOfSIMDType(dataSrc->AsIndir()->Addr()); } } } RewriteSIMDOperand(use, keepBlk); } break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: // TODO-1stClassStructs: Eliminate this. FixupIfSIMDLocal(node->AsLclVarCommon()); break; case GT_SIMD: { noway_assert(comp->featureSIMD); GenTreeSIMD* simdNode = node->AsSIMD(); unsigned simdSize = simdNode->gtSIMDSize; var_types simdType = comp->getSIMDTypeForSize(simdSize); // TODO-1stClassStructs: This should be handled more generally for enregistered or promoted // structs that are passed or returned in a different register type than their enregistered // type(s). if (simdNode->gtType == TYP_I_IMPL && simdNode->gtSIMDSize == TARGET_POINTER_SIZE) { // This happens when it is consumed by a GT_RET_EXPR. // It can only be a Vector2f or Vector2i. assert(genTypeSize(simdNode->gtSIMDBaseType) == 4); simdNode->gtType = TYP_SIMD8; } // Certain SIMD trees require rationalizing. if (simdNode->gtSIMD.gtSIMDIntrinsicID == SIMDIntrinsicInitArray) { // Rewrite this as an explicit load. JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n"); unsigned int baseTypeSize = genTypeSize(simdNode->gtSIMDBaseType); GenTree* address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdNode->gtOp1, simdNode->gtOp2, baseTypeSize, offsetof(CORINFO_Array, u1Elems)); GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address); BlockRange().InsertBefore(simdNode, address, ind); use.ReplaceWith(comp, ind); BlockRange().Remove(simdNode); DISPTREERANGE(BlockRange(), use.Def()); JITDUMP("\n"); } else { // This code depends on the fact that NONE of the SIMD intrinsics take vector operands // of a different width. If that assumption changes, we will EITHER have to make these type // transformations during importation, and plumb the types all the way through the JIT, // OR add a lot of special handling here. GenTree* op1 = simdNode->gtGetOp1(); if (op1 != nullptr && op1->gtType == TYP_STRUCT) { op1->gtType = simdType; } GenTree* op2 = simdNode->gtGetOp2IfPresent(); if (op2 != nullptr && op2->gtType == TYP_STRUCT) { op2->gtType = simdType; } } } break; #endif // FEATURE_SIMD default: // JCC nodes should not be present in HIR. assert(node->OperGet() != GT_JCC); break; } // Do some extra processing on top-level nodes to remove unused local reads. if (node->OperIsLocalRead()) { if (use.IsDummyUse()) { comp->lvaDecRefCnts(node); BlockRange().Remove(node); } else { // Local reads are side-effect-free; clear any flags leftover from frontend transformations. node->gtFlags &= ~GTF_ALL_EFFECT; } } assert(isLateArg == ((use.Def()->gtFlags & GTF_LATE_ARG) != 0)); return Compiler::WALK_CONTINUE; }