int LegacyPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methInfo) { int callsiteSize = 55; // Direct call take 5 native bytes; indirect call takes 6 native bytes. bool hasThis = methInfo->args.hasThis(); if (hasThis) { callsiteSize += 30; // "mov" or "lea" } CORINFO_ARG_LIST_HANDLE argLst = methInfo->args.args; COMP_HANDLE comp = m_Compiler->info.compCompHnd; for (unsigned i = (hasThis ? 1 : 0); i < methInfo->args.totalILArgs(); i++, argLst = comp->getArgNext(argLst)) { var_types sigType = (var_types) m_Compiler->eeGetArgType(argLst, &methInfo->args); if (sigType == TYP_STRUCT) { typeInfo verType = m_Compiler->verParseArgSigToTypeInfo(&methInfo->args, argLst); /* IN0028: 00009B lea EAX, bword ptr [EBP-14H] IN0029: 00009E push dword ptr [EAX+4] IN002a: 0000A1 push gword ptr [EAX] IN002b: 0000A3 call [MyStruct.staticGetX2(struct):int] */ callsiteSize += 10; // "lea EAX, bword ptr [EBP-14H]" // NB sizeof (void*) fails to convey intent when cross-jitting. unsigned opsz = (unsigned)(roundUp(comp->getClassSize(verType.GetClassHandle()), sizeof(void*))); unsigned slots = opsz / sizeof(void*); callsiteSize += slots * 20; // "push gword ptr [EAX+offs] " } else { callsiteSize += 30; // push by average takes 3 bytes. } } return callsiteSize; }
static BOOL tiCompatibleWithByRef(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent) { assert(parent.IsByRef()); if (!child.IsByRef()) { return FALSE; } if (child.IsReadonlyByRef() && !parent.IsReadonlyByRef()) { return FALSE; } // Byrefs are compatible if the underlying types are equivalent typeInfo childTarget = ::DereferenceByRef(child); typeInfo parentTarget = ::DereferenceByRef(parent); if (typeInfo::AreEquivalent(childTarget, parentTarget)) { return TRUE; } // Make sure that both types have a valid m_cls if ((childTarget.IsType(TI_REF) || childTarget.IsType(TI_STRUCT)) && (parentTarget.IsType(TI_REF) || parentTarget.IsType(TI_STRUCT))) { return CompHnd->areTypesEquivalent(childTarget.GetClassHandle(), parentTarget.GetClassHandle()); } return FALSE; }
void InlineResult::report() { // User may have suppressed reporting via setReported(). If so, do nothing. if (inlReported) { return; } inlReported = true; #ifdef DEBUG // Optionally dump the result if (VERBOSE) { const char* format = "INLINER: during '%s' result '%s' reason '%s' for '%s' calling '%s'\n"; const char* caller = (inlCaller == nullptr) ? "n/a" : inlCompiler->eeGetMethodFullName(inlCaller); const char* callee = (inlCallee == nullptr) ? "n/a" : inlCompiler->eeGetMethodFullName(inlCallee); JITDUMP(format, inlContext, resultString(), reasonString(), caller, callee); } // If the inline failed, leave information on the call so we can // later recover what observation lead to the failure. if (isFailure() && (inlCall != nullptr)) { // compiler should have revoked candidacy on the call by now assert((inlCall->gtFlags & GTF_CALL_INLINE_CANDIDATE) == 0); inlCall->gtInlineObservation = static_cast<unsigned>(inlObservation); } #endif // DEBUG if (isDecided()) { const char* format = "INLINER: during '%s' result '%s' reason '%s'\n"; JITLOG_THIS(inlCompiler, (LL_INFO100000, format, inlContext, resultString(), reasonString())); COMP_HANDLE comp = inlCompiler->info.compCompHnd; comp->reportInliningDecision(inlCaller, inlCallee, result(), reasonString()); } }
void InlineResult::Report() { // If we weren't actually inlining, user may have suppressed // reporting via setReported(). If so, do nothing. if (m_Reported) { return; } m_Reported = true; #ifdef DEBUG const char* callee = nullptr; // Optionally dump the result if (VERBOSE) { const char* format = "INLINER: during '%s' result '%s' reason '%s' for '%s' calling '%s'\n"; const char* caller = (m_Caller == nullptr) ? "n/a" : m_RootCompiler->eeGetMethodFullName(m_Caller); callee = (m_Callee == nullptr) ? "n/a" : m_RootCompiler->eeGetMethodFullName(m_Callee); JITDUMP(format, m_Context, ResultString(), ReasonString(), caller, callee); } // If the inline failed, leave information on the call so we can // later recover what observation lead to the failure. if (IsFailure() && (m_Call != nullptr)) { // compiler should have revoked candidacy on the call by now assert((m_Call->gtFlags & GTF_CALL_INLINE_CANDIDATE) == 0); m_Call->gtInlineObservation = m_Policy->GetObservation(); } #endif // DEBUG // Was the result NEVER? If so we might want to propagate this to // the runtime. if (IsNever() && m_Policy->PropagateNeverToRuntime()) { // If we know the callee, and if the observation that got us // to this Never inline state is something *other* than // IS_NOINLINE, then we've uncovered a reason why this method // can't ever be inlined. Update the callee method attributes // so that future inline attempts for this callee fail faster. InlineObservation obs = m_Policy->GetObservation(); if ((m_Callee != nullptr) && (obs != InlineObservation::CALLEE_IS_NOINLINE)) { #ifdef DEBUG if (VERBOSE) { const char* obsString = InlGetObservationString(obs); JITDUMP("\nINLINER: Marking %s as NOINLINE because of %s\n", callee, obsString); } #endif // DEBUG COMP_HANDLE comp = m_RootCompiler->info.compCompHnd; comp->setMethodAttribs(m_Callee, CORINFO_FLG_BAD_INLINEE); } } if (IsDecided()) { const char* format = "INLINER: during '%s' result '%s' reason '%s'\n"; JITLOG_THIS(m_RootCompiler, (LL_INFO100000, format, m_Context, ResultString(), ReasonString())); COMP_HANDLE comp = m_RootCompiler->info.compCompHnd; comp->reportInliningDecision(m_Caller, m_Callee, Result(), ReasonString()); } }
BOOL typeInfo::tiMergeToCommonParent(COMP_HANDLE CompHnd, typeInfo* pDest, const typeInfo* pSrc, bool* changed) { assert(pSrc->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pSrc), *pSrc)); assert(pDest->IsDead() || typeInfo::AreEquivalent(::NormaliseForStack(*pDest), *pDest)); // Merge the auxiliary information like "this" pointer tracking, etc... // Remember the pre-state, so we can tell if it changed. *changed = false; DWORD destFlagsBefore = pDest->m_flags; // This bit is only set if both pDest and pSrc have it set pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_THIS_PTR); // This bit is set if either pDest or pSrc have it set pDest->m_flags |= (pSrc->m_flags & TI_FLAG_UNINIT_OBJREF); // This bit is set if either pDest or pSrc have it set pDest->m_flags |= (pSrc->m_flags & TI_FLAG_BYREF_READONLY); // If the byref wasn't permanent home in both sides, then merge won't have the bit set pDest->m_flags &= (pSrc->m_flags | ~TI_FLAG_BYREF_PERMANENT_HOME); if (pDest->m_flags != destFlagsBefore) { *changed = true; } // OK the main event. Merge the main types if (typeInfo::AreEquivalent(*pDest, *pSrc)) { return (TRUE); } if (pDest->IsUnboxedGenericTypeVar() || pSrc->IsUnboxedGenericTypeVar()) { // Should have had *pDest == *pSrc goto FAIL; } if (pDest->IsType(TI_REF)) { if (pSrc->IsType(TI_NULL)) { // NULL can be any reference type return TRUE; } if (!pSrc->IsType(TI_REF)) { goto FAIL; } // Ask the EE to find the common parent, This always succeeds since System.Object always works CORINFO_CLASS_HANDLE pDestClsBefore = pDest->m_cls; pDest->m_cls = CompHnd->mergeClasses(pDest->GetClassHandle(), pSrc->GetClassHandle()); if (pDestClsBefore != pDest->m_cls) { *changed = true; } return TRUE; } else if (pDest->IsType(TI_NULL)) { if (pSrc->IsType(TI_REF)) // NULL can be any reference type { *pDest = *pSrc; *changed = true; return TRUE; } goto FAIL; } else if (pDest->IsType(TI_STRUCT)) { if (pSrc->IsType(TI_STRUCT) && CompHnd->areTypesEquivalent(pDest->GetClassHandle(), pSrc->GetClassHandle())) { return TRUE; } goto FAIL; } else if (pDest->IsByRef()) { return tiCompatibleWithByRef(CompHnd, *pSrc, *pDest); } #ifdef _TARGET_64BIT_ // On 64-bit targets we have precise representation for native int, so these rules // represent the fact that the ECMA spec permits the implicit conversion // between an int32 and a native int. else if (typeInfo::AreEquivalent(*pDest, typeInfo::nativeInt()) && pSrc->IsType(TI_INT)) { return TRUE; } else if (typeInfo::AreEquivalent(*pSrc, typeInfo::nativeInt()) && pDest->IsType(TI_INT)) { *pDest = *pSrc; *changed = true; return TRUE; } #endif // _TARGET_64BIT_ FAIL: *pDest = typeInfo(); return FALSE; }
BOOL typeInfo::tiCompatibleWith(COMP_HANDLE CompHnd, const typeInfo& child, const typeInfo& parent, bool normalisedForStack) { assert(child.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(child), child)); assert(parent.IsDead() || !normalisedForStack || typeInfo::AreEquivalent(::NormaliseForStack(parent), parent)); if (typeInfo::AreEquivalent(child, parent)) { return TRUE; } if (parent.IsUnboxedGenericTypeVar() || child.IsUnboxedGenericTypeVar()) { return (FALSE); // need to have had child == parent } else if (parent.IsType(TI_REF)) { // An uninitialized objRef is not compatible to initialized. if (child.IsUninitialisedObjRef() && !parent.IsUninitialisedObjRef()) { return FALSE; } if (child.IsNullObjRef()) { // NULL can be any reference type return TRUE; } if (!child.IsType(TI_REF)) { return FALSE; } return CompHnd->canCast(child.m_cls, parent.m_cls); } else if (parent.IsType(TI_METHOD)) { if (!child.IsType(TI_METHOD)) { return FALSE; } // Right now we don't bother merging method handles return FALSE; } else if (parent.IsType(TI_STRUCT)) { if (!child.IsType(TI_STRUCT)) { return FALSE; } // Structures are compatible if they are equivalent return CompHnd->areTypesEquivalent(child.m_cls, parent.m_cls); } else if (parent.IsByRef()) { return tiCompatibleWithByRef(CompHnd, child, parent); } #ifdef _TARGET_64BIT_ // On 64-bit targets we have precise representation for native int, so these rules // represent the fact that the ECMA spec permits the implicit conversion // between an int32 and a native int. else if (parent.IsType(TI_INT) && typeInfo::AreEquivalent(nativeInt(), child)) { return TRUE; } else if (typeInfo::AreEquivalent(nativeInt(), parent) && child.IsType(TI_INT)) { return TRUE; } #endif // _TARGET_64BIT_ return FALSE; }