void Lowering::TreeNodeInfoInitGCWriteBarrier(GenTree* tree) { GenTreePtr dst = tree; GenTreePtr addr = tree->gtOp.gtOp1; GenTreePtr src = tree->gtOp.gtOp2; if (addr->OperGet() == GT_LEA) { // In the case where we are doing a helper assignment, if the dst // is an indir through an lea, we need to actually instantiate the // lea in a register GenTreeAddrMode* lea = addr->AsAddrMode(); short leaSrcCount = 0; if (lea->Base() != nullptr) { leaSrcCount++; } if (lea->Index() != nullptr) { leaSrcCount++; } lea->gtLsraInfo.srcCount = leaSrcCount; lea->gtLsraInfo.dstCount = 1; } #if NOGC_WRITE_BARRIERS NYI_ARM("NOGC_WRITE_BARRIERS"); // For the NOGC JIT Helper calls // // the 'addr' goes into x14 (REG_WRITE_BARRIER_DST_BYREF) // the 'src' goes into x15 (REG_WRITE_BARRIER) // addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER_DST_BYREF); src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_WRITE_BARRIER); #else // For the standard JIT Helper calls // op1 goes into REG_ARG_0 and // op2 goes into REG_ARG_1 // addr->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_0); src->gtLsraInfo.setSrcCandidates(m_lsra, RBM_ARG_1); #endif // NOGC_WRITE_BARRIERS // Both src and dst must reside in a register, which they should since we haven't set // either of them as contained. assert(addr->gtLsraInfo.dstCount == 1); assert(src->gtLsraInfo.dstCount == 1); }
GCInfo::WriteBarrierForm GCInfo::gcWriteBarrierFormFromTargetAddress(GenTreePtr tgtAddr) { GCInfo::WriteBarrierForm result = GCInfo::WBF_BarrierUnknown; // Default case, we have no information. // If we store through an int to a GC_REF field, we'll assume that needs to use a checked barriers. if (tgtAddr->TypeGet() == TYP_I_IMPL) { return GCInfo::WBF_BarrierChecked; // Why isn't this GCInfo::WBF_BarrierUnknown? } // Otherwise... assert(tgtAddr->TypeGet() == TYP_BYREF); bool simplifiedExpr = true; while (simplifiedExpr) { simplifiedExpr = false; tgtAddr = tgtAddr->gtSkipReloadOrCopy(); while (tgtAddr->OperGet() == GT_ADDR && tgtAddr->gtOp.gtOp1->OperGet() == GT_IND) { tgtAddr = tgtAddr->gtOp.gtOp1->gtOp.gtOp1; simplifiedExpr = true; assert(tgtAddr->TypeGet() == TYP_BYREF); } // For additions, one of the operands is a byref or a ref (and the other is not). Follow this down to its // source. while (tgtAddr->OperGet() == GT_ADD || tgtAddr->OperGet() == GT_LEA) { if (tgtAddr->OperGet() == GT_ADD) { if (tgtAddr->gtOp.gtOp1->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp1->TypeGet() == TYP_REF) { assert(!(tgtAddr->gtOp.gtOp2->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp2->TypeGet() == TYP_REF)); tgtAddr = tgtAddr->gtOp.gtOp1; simplifiedExpr = true; } else if (tgtAddr->gtOp.gtOp2->TypeGet() == TYP_BYREF || tgtAddr->gtOp.gtOp2->TypeGet() == TYP_REF) { tgtAddr = tgtAddr->gtOp.gtOp2; simplifiedExpr = true; } else { // We might have a native int. For example: // const int 0 // + byref // lclVar int V06 loc5 // this is a local declared "valuetype VType*" return GCInfo::WBF_BarrierUnknown; } } else { // Must be an LEA (i.e., an AddrMode) assert(tgtAddr->OperGet() == GT_LEA); tgtAddr = tgtAddr->AsAddrMode()->Base(); if (tgtAddr->TypeGet() == TYP_BYREF || tgtAddr->TypeGet() == TYP_REF) { simplifiedExpr = true; } else { // We might have a native int. return GCInfo::WBF_BarrierUnknown; } } } } if (tgtAddr->IsLocalAddrExpr() != nullptr) { // No need for a GC barrier when writing to a local variable. return GCInfo::WBF_NoBarrier; } if (tgtAddr->OperGet() == GT_LCL_VAR || tgtAddr->OperGet() == GT_REG_VAR) { unsigned lclNum = 0; if (tgtAddr->gtOper == GT_LCL_VAR) { lclNum = tgtAddr->gtLclVar.gtLclNum; } else { assert(tgtAddr->gtOper == GT_REG_VAR); lclNum = tgtAddr->gtRegVar.gtLclNum; } LclVarDsc* varDsc = &compiler->lvaTable[lclNum]; // Instead of marking LclVar with 'lvStackByref', // Consider decomposing the Value Number given to this LclVar to see if it was // created using a GT_ADDR(GT_LCLVAR) or a GT_ADD( GT_ADDR(GT_LCLVAR), Constant) // We may have an internal compiler temp created in fgMorphCopyBlock() that we know // points at one of our stack local variables, it will have lvStackByref set to true. // if (varDsc->lvStackByref) { assert(varDsc->TypeGet() == TYP_BYREF); return GCInfo::WBF_NoBarrier; } // We don't eliminate for inlined methods, where we (can) know where the "retBuff" points. if (!compiler->compIsForInlining() && lclNum == compiler->info.compRetBuffArg) { assert(compiler->info.compRetType == TYP_STRUCT); // Else shouldn't have a ret buff. // Are we assured that the ret buff pointer points into the stack of a caller? if (compiler->info.compRetBuffDefStack) { #if 0 // This is an optional debugging mode. If the #if 0 above is changed to #if 1, // every barrier we remove for stores to GC ref fields of a retbuff use a special // helper that asserts that the target is not in the heap. #ifdef DEBUG return WBF_NoBarrier_CheckNotHeapInDebug; #else return WBF_NoBarrier; #endif #else // 0 return GCInfo::WBF_NoBarrier; #endif // 0 } } } if (tgtAddr->TypeGet() == TYP_REF) { return GCInfo::WBF_BarrierUnchecked; } // Otherwise, we have no information. return GCInfo::WBF_BarrierUnknown; }
//------------------------------------------------------------------------ // TreeNodeInfoInitIndir: Specify register requirements for address expression // of an indirection operation. // // Arguments: // indirTree - GT_IND, GT_STOREIND, block node or GT_NULLCHECK gentree node // void Lowering::TreeNodeInfoInitIndir(GenTreePtr indirTree) { assert(indirTree->OperIsIndir()); // If this is the rhs of a block copy (i.e. non-enregisterable struct), // it has no register requirements. if (indirTree->TypeGet() == TYP_STRUCT) { return; } GenTreePtr addr = indirTree->gtGetOp1(); TreeNodeInfo* info = &(indirTree->gtLsraInfo); GenTreePtr base = nullptr; GenTreePtr index = nullptr; unsigned cns = 0; unsigned mul; bool rev; bool modifiedSources = false; bool makeContained = true; if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirTree, addr)) { GenTreeAddrMode* lea = addr->AsAddrMode(); base = lea->Base(); index = lea->Index(); cns = lea->gtOffset; #ifdef _TARGET_ARM_ // ARM floating-point load/store doesn't support a form similar to integer // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm. if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns)) { if (indirTree->OperGet() == GT_STOREIND) { if (varTypeIsFloating(indirTree->AsStoreInd()->Data())) { makeContained = false; } } else if (indirTree->OperGet() == GT_IND) { if (varTypeIsFloating(indirTree)) { makeContained = false; } } } #endif if (makeContained) { m_lsra->clearOperandCounts(addr); addr->SetContained(); // The srcCount is decremented because addr is now "contained", // then we account for the base and index below, if they are non-null. info->srcCount--; } } else if (comp->codeGen->genCreateAddrMode(addr, -1, true, 0, &rev, &base, &index, &mul, &cns, true /*nogen*/) && !(modifiedSources = AreSourcesPossiblyModifiedLocals(indirTree, base, index))) { // An addressing mode will be constructed that may cause some // nodes to not need a register, and cause others' lifetimes to be extended // to the GT_IND or even its parent if it's an assignment assert(base != addr); m_lsra->clearOperandCounts(addr); addr->SetContained(); // Traverse the computation below GT_IND to find the operands // for the addressing mode, marking the various constants and // intermediate results as not consuming/producing. // If the traversal were more complex, we might consider using // a traversal function, but the addressing mode is only made // up of simple arithmetic operators, and the code generator // only traverses one leg of each node. bool foundBase = (base == nullptr); bool foundIndex = (index == nullptr); GenTreePtr nextChild = nullptr; for (GenTreePtr child = addr; child != nullptr && !child->OperIsLeaf(); child = nextChild) { nextChild = nullptr; GenTreePtr op1 = child->gtOp.gtOp1; GenTreePtr op2 = (child->OperIsBinary()) ? child->gtOp.gtOp2 : nullptr; if (op1 == base) { foundBase = true; } else if (op1 == index) { foundIndex = true; } else { m_lsra->clearOperandCounts(op1); op1->SetContained(); if (!op1->OperIsLeaf()) { nextChild = op1; } } if (op2 != nullptr) { if (op2 == base) { foundBase = true; } else if (op2 == index) { foundIndex = true; } else { m_lsra->clearOperandCounts(op2); op2->SetContained(); if (!op2->OperIsLeaf()) { assert(nextChild == nullptr); nextChild = op2; } } } } assert(foundBase && foundIndex); info->srcCount--; // it gets incremented below. } else if (addr->gtOper == GT_ARR_ELEM) { // The GT_ARR_ELEM consumes all the indices and produces the offset. // The array object lives until the mem access. // We also consume the target register to which the address is // computed info->srcCount++; assert(addr->gtLsraInfo.srcCount >= 2); addr->gtLsraInfo.srcCount -= 1; } else { // it is nothing but a plain indir info->srcCount--; // base gets added in below base = addr; } if (!makeContained) { return; } if (base != nullptr) { info->srcCount++; } if (index != nullptr && !modifiedSources) { info->srcCount++; } // On ARM we may need a single internal register // (when both conditions are true then we still only need a single internal register) if ((index != nullptr) && (cns != 0)) { // ARM does not support both Index and offset so we need an internal register info->internalIntCount = 1; } else if (!emitter::emitIns_valid_imm_for_ldst_offset(cns, emitTypeSize(indirTree))) { // This offset can't be contained in the ldr/str instruction, so we need an internal register info->internalIntCount = 1; } }
GCInfo::WriteBarrierForm GCInfo::gcIsWriteBarrierCandidate(GenTreePtr tgt, GenTreePtr assignVal) { #if FEATURE_WRITE_BARRIER /* Are we storing a GC ptr? */ if (!varTypeIsGC(tgt->TypeGet())) { return WBF_NoBarrier; } /* Ignore any assignments of NULL */ // 'assignVal' can be the constant Null or something else (LclVar, etc..) // that is known to be null via Value Numbering. if (assignVal->GetVN(VNK_Liberal) == ValueNumStore::VNForNull()) { return WBF_NoBarrier; } if (assignVal->gtOper == GT_CNS_INT && assignVal->gtIntCon.gtIconVal == 0) { return WBF_NoBarrier; } /* Where are we storing into? */ tgt = tgt->gtEffectiveVal(); switch (tgt->gtOper) { #ifndef LEGACY_BACKEND case GT_STOREIND: #endif // !LEGACY_BACKEND case GT_IND: /* Could be the managed heap */ if (tgt->TypeGet() == TYP_BYREF) { // Byref values cannot be in managed heap. // This case occurs for Span<T>. return WBF_NoBarrier; } return gcWriteBarrierFormFromTargetAddress(tgt->gtOp.gtOp1); case GT_LEA: return gcWriteBarrierFormFromTargetAddress(tgt->AsAddrMode()->Base()); case GT_ARR_ELEM: /* Definitely in the managed heap */ case GT_CLS_VAR: return WBF_BarrierUnchecked; case GT_REG_VAR: /* Definitely not in the managed heap */ case GT_LCL_VAR: case GT_LCL_FLD: case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: return WBF_NoBarrier; default: break; } assert(!"Missing case in gcIsWriteBarrierCandidate"); #endif return WBF_NoBarrier; }