// merge the info from 'incoming' into this record. // we finish with a union of this+incoming and special handling for collisions bool MediaAnalyticsItem::merge(MediaAnalyticsItem *incoming) { // if I don't have key or session id, take them from incoming // 'this' should never be missing both of them... if (mKey.empty()) { mKey = incoming->mKey; } else if (mSessionID == 0) { mSessionID = incoming->mSessionID; } // we always take the more recent 'finalized' value setFinalized(incoming->getFinalized()); // for each attribute from 'incoming', resolve appropriately int nattr = incoming->mPropCount; for (int i = 0 ; i < nattr; i++ ) { Prop *iprop = &incoming->mProps[i]; Prop *oprop = findProp(iprop->mName); const char *p = iprop->mName; size_t len = strlen(p); char semantic = p[len-1]; if (oprop == NULL) { // no oprop, so we insert the new one oprop = allocateProp(p); copyProp(oprop, iprop); } else { // merge iprop into oprop switch (semantic) { case '<': // first aka keep old) /* nop */ break; default: // default is 'last' case '>': // last (aka keep new) copyProp(oprop, iprop); break; case '+': /* sum */ // XXX validate numeric types, sum in place break; } } } // not sure when we'd return false... return true; }
SSATmp* TraceBuilder::optimizeWork(IRInstruction* inst, const folly::Optional<IdomVector>& idoms) { // Since some of these optimizations inspect tracked state, we don't // perform any of them on non-main traces. if (m_savedTraces.size() > 0) return nullptr; static DEBUG_ONLY __thread int instNest = 0; if (debug) ++instNest; SCOPE_EXIT { if (debug) --instNest; }; DEBUG_ONLY auto indent = [&] { return std::string(instNest * 2, ' '); }; FTRACE(1, "{}{}\n", indent(), inst->toString()); // First pass of tracebuilder optimizations try to replace an // instruction based on tracked state before we do anything else. // May mutate the IRInstruction in place (and return nullptr) or // return an SSATmp*. if (SSATmp* preOpt = preOptimize(inst)) { FTRACE(1, " {}preOptimize returned: {}\n", indent(), preOpt->inst()->toString()); return preOpt; } if (inst->op() == Nop) return nullptr; // copy propagation on inst source operands copyProp(inst); SSATmp* result = nullptr; if (m_enableCse && inst->canCSE()) { result = cseLookup(inst, idoms); if (result) { // Found a dominating instruction that can be used instead of inst FTRACE(1, " {}cse found: {}\n", indent(), result->inst()->toString()); assert(!inst->consumesReferences()); if (inst->producesReference()) { // Replace with an IncRef FTRACE(1, " {}cse of refcount-producing instruction\n", indent()); return gen(IncRef, result); } else { return result; } } } if (m_enableSimplification) { result = m_simplifier.simplify(inst); if (result) { // Found a simpler instruction that can be used instead of inst FTRACE(1, " {}simplification returned: {}\n", indent(), result->inst()->toString()); assert(inst->hasDst()); return result; } } return nullptr; }
SSATmp* TraceBuilder::optimizeWork(IRInstruction* inst) { static DEBUG_ONLY __thread int instNest = 0; if (debug) ++instNest; SCOPE_EXIT { if (debug) --instNest; }; DEBUG_ONLY auto indent = [&] { return std::string(instNest * 2, ' '); }; FTRACE(1, "{}{}\n", indent(), inst->toString()); // First pass of tracebuilder optimizations try to replace an // instruction based on tracked state before we do anything else. // May mutate the IRInstruction in place (and return nullptr) or // return an SSATmp*. if (SSATmp* preOpt = preOptimize(inst)) { FTRACE(1, " {}preOptimize returned: {}\n", indent(), preOpt->inst()->toString()); return preOpt; } if (inst->op() == Nop) return nullptr; // copy propagation on inst source operands copyProp(inst); SSATmp* result = nullptr; if (m_enableCse && inst->canCSE()) { result = cseLookup(inst); if (result) { // Found a dominating instruction that can be used instead of inst FTRACE(1, " {}cse found: {}\n", indent(), result->inst()->toString()); return result; } } if (m_enableSimplification) { result = m_simplifier.simplify(inst); if (result) { // Found a simpler instruction that can be used instead of inst FTRACE(1, " {}simplification returned: {}\n", indent(), result->inst()->toString()); assert(inst->hasDst()); return result; } } return nullptr; }
// make a deep copy of myself MediaAnalyticsItem *MediaAnalyticsItem::dup() { MediaAnalyticsItem *dst = new MediaAnalyticsItem(this->mKey); if (dst != NULL) { // key as part of constructor dst->mPid = this->mPid; dst->mUid = this->mUid; dst->mSessionID = this->mSessionID; dst->mTimestamp = this->mTimestamp; dst->mFinalized = this->mFinalized; // properties aka attributes dst->growProps(this->mPropCount); for(size_t i=0;i<mPropCount;i++) { copyProp(&dst->mProps[i], &this->mProps[i]); } dst->mPropCount = this->mPropCount; } return dst; }
SSATmp* IRBuilder::optimizeWork(IRInstruction* inst, const folly::Optional<IdomVector>& idoms) { // Since some of these optimizations inspect tracked state, we don't // perform any of them on non-main traces. if (m_savedBlocks.size() > 0) return nullptr; static DEBUG_ONLY __thread int instNest = 0; if (debug) ++instNest; SCOPE_EXIT { if (debug) --instNest; }; DEBUG_ONLY auto indent = [&] { return std::string(instNest * 2, ' '); }; FTRACE(1, "optimizing {}{}\n", indent(), inst->toString()); // First pass of IRBuilder optimizations try to replace an // instruction based on tracked state before we do anything else. // May mutate the IRInstruction in place (and return nullptr) or // return an SSATmp*. if (SSATmp* preOpt = preOptimize(inst)) { FTRACE(1, " {}preOptimize returned: {}\n", indent(), preOpt->inst()->toString()); return preOpt; } if (inst->op() == Nop) return nullptr; // copy propagation on inst source operands copyProp(inst); SSATmp* result = nullptr; if (m_enableSimplification) { result = m_simplifier.simplify(inst); if (result) { inst = result->inst(); if (inst->producesReference(0)) { // This effectively prevents CSE from kicking in below, which // would replace the instruction with an IncRef. That is // correct if the simplifier morphed the instruction, but it's // incorrect if the simplifier returned one of original // instruction sources. We currently have no way to // distinguish the two cases, so we prevent CSE completely for // now. return result; } } } if (m_state.enableCse() && inst->canCSE()) { SSATmp* cseResult = m_state.cseLookup(inst, idoms); if (cseResult) { // Found a dominating instruction that can be used instead of inst FTRACE(1, " {}cse found: {}\n", indent(), cseResult->inst()->toString()); assert(!inst->consumesReferences()); if (inst->producesReference(0)) { // Replace with an IncRef FTRACE(1, " {}cse of refcount-producing instruction\n", indent()); gen(IncRef, cseResult); } return cseResult; } } return result; }
SSATmp* TraceBuilder::optimizeWork(IRInstruction* inst, const folly::Optional<IdomVector>& idoms) { // Since some of these optimizations inspect tracked state, we don't // perform any of them on non-main traces. if (m_savedTraces.size() > 0) return nullptr; static DEBUG_ONLY __thread int instNest = 0; if (debug) ++instNest; SCOPE_EXIT { if (debug) --instNest; }; DEBUG_ONLY auto indent = [&] { return std::string(instNest * 2, ' '); }; FTRACE(1, "{}{}\n", indent(), inst->toString()); // turn off ActRec optimization for instructions that will require a frame if (m_state.needsFPAnchor(inst)) { m_state.setHasFPAnchor(); always_assert(m_state.fp() != nullptr); gen(InlineFPAnchor, m_state.fp()); FTRACE(2, "Anchor for: {}\n", inst->toString()); } // First pass of tracebuilder optimizations try to replace an // instruction based on tracked state before we do anything else. // May mutate the IRInstruction in place (and return nullptr) or // return an SSATmp*. if (SSATmp* preOpt = preOptimize(inst)) { FTRACE(1, " {}preOptimize returned: {}\n", indent(), preOpt->inst()->toString()); return preOpt; } if (inst->op() == Nop) return nullptr; // copy propagation on inst source operands copyProp(inst); SSATmp* result = nullptr; if (m_state.enableCse() && inst->canCSE()) { result = m_state.cseLookup(inst, idoms); if (result) { // Found a dominating instruction that can be used instead of inst FTRACE(1, " {}cse found: {}\n", indent(), result->inst()->toString()); // CheckType and AssertType are special. They're marked as both PRc and // CRc to placate our refcounting optimizations, for for the purposes of // CSE they're neither. if (inst->is(CheckType, AssertType)) { return result; } assert(!inst->consumesReferences()); if (inst->producesReference()) { // Replace with an IncRef FTRACE(1, " {}cse of refcount-producing instruction\n", indent()); return gen(IncRef, result); } else { return result; } } } if (m_enableSimplification) { result = m_simplifier.simplify(inst); if (result) { // Found a simpler instruction that can be used instead of inst FTRACE(1, " {}simplification returned: {}\n", indent(), result->inst()->toString()); assert(inst->hasDst()); return result; } } return nullptr; }
/* * Performs simplification and CSE on the input instruction. If the input * instruction has a dest, this will return an SSATmp that represents the same * value as dst(0) of the input instruction. If the input instruction has no * dest, this will return nullptr. * * The caller never needs to clone or append; all this has been done. */ SSATmp* IRBuilder::optimizeInst(IRInstruction* inst, CloneFlag doClone, Block* srcBlock, const folly::Optional<IdomVector>& idoms) { static DEBUG_ONLY __thread int instNest = 0; if (debug) ++instNest; SCOPE_EXIT { if (debug) --instNest; }; DEBUG_ONLY auto indent = [&] { return std::string(instNest * 2, ' '); }; auto doCse = [&] (IRInstruction* cseInput) -> SSATmp* { if (m_state.enableCse() && cseInput->canCSE()) { SSATmp* cseResult = m_state.cseLookup(cseInput, srcBlock, idoms); if (cseResult) { // Found a dominating instruction that can be used instead of input FTRACE(1, " {}cse found: {}\n", indent(), cseResult->inst()->toString()); assert(!cseInput->consumesReferences()); if (cseInput->producesReference(0)) { // Replace with an IncRef FTRACE(1, " {}cse of refcount-producing instruction\n", indent()); gen(IncRef, cseResult); } return cseResult; } } return nullptr; }; auto cloneAndAppendOriginal = [&] () -> SSATmp* { if (inst->op() == Nop) return nullptr; if (auto cseResult = doCse(inst)) { return cseResult; } if (doClone == CloneFlag::Yes) { inst = m_unit.cloneInstruction(inst); } appendInstruction(inst); return inst->dst(0); }; // Since some of these optimizations inspect tracked state, we don't // perform any of them on non-main traces. if (m_savedBlocks.size() > 0) return cloneAndAppendOriginal(); // copy propagation on inst source operands copyProp(inst); // First pass of IRBuilder optimizations try to replace an // instruction based on tracked state before we do anything else. // May mutate the IRInstruction in place (and return nullptr) or // return an SSATmp*. if (SSATmp* preOpt = preOptimize(inst)) { FTRACE(1, " {}preOptimize returned: {}\n", indent(), preOpt->inst()->toString()); return preOpt; } if (inst->op() == Nop) return cloneAndAppendOriginal(); if (!m_enableSimplification) { return cloneAndAppendOriginal(); } auto simpResult = m_simplifier.simplify(inst, shouldConstrainGuards()); // These are the possible outputs: // // ([], nullptr): no optimization possible. Use original inst. // // ([], non-nullptr): passing through a src. Don't CSE. // // ([X, ...], Y): throw away input instruction, append 'X, ...' (CSEing // as we go), return Y. if (!simpResult.instrs.empty()) { // New instructions were generated. Append the new ones, filtering out Nops. for (auto* newInst : simpResult.instrs) { assert(!newInst->isTransient()); if (newInst->op() == Nop) continue; auto cseResult = doCse(newInst); if (cseResult) { appendInstruction(m_unit.mov(newInst->dst(), cseResult, newInst->marker())); } else { appendInstruction(newInst); } } return simpResult.dst; } // No new instructions were generated. Either simplification didn't do // anything, or we're using some other instruction's dst instead of our own. if (simpResult.dst) { // We're using some other instruction's output. Don't append anything, and // don't do any CSE. assert(simpResult.dst->inst() != inst); return simpResult.dst; } // No simplification happened. return cloneAndAppendOriginal(); }
/* * For all guard instructions in unit, check to see if we can relax the * destination type to something less specific. The GuardConstraints map * contains information about what properties of the guarded type matter for * each instruction. If simple is true, guards will not be relaxed past * DataTypeSpecific except guards which are relaxed all the way to * DataTypeGeneric. Returns true iff any changes were made to the trace. */ bool relaxGuards(IRUnit& unit, const GuardConstraints& constraints, RelaxGuardsFlags flags) { Timer _t(Timer::optimize_relaxGuards); ITRACE(2, "entering relaxGuards\n"); Indent _i; bool simple = flags & RelaxSimple; bool reflow = flags & RelaxReflow; splitCriticalEdges(unit); auto& guards = constraints.guards; auto blocks = rpoSortCfg(unit); auto changed = false; for (auto* block : blocks) { for (auto& inst : *block) { if (!isGuardOp(inst.op())) continue; auto it = guards.find(&inst); auto constraint = it == guards.end() ? TypeConstraint() : it->second; ITRACE(2, "relaxGuards processing {} with constraint {}\n", inst, constraint); auto simplifyCategory = [simple](DataTypeCategory& cat) { if (simple && cat > DataTypeGeneric && cat < DataTypeSpecific) { cat = DataTypeSpecific; } }; simplifyCategory(constraint.category); simplifyCategory(constraint.innerCat); auto const oldType = inst.typeParam(); auto newType = relaxType(oldType, constraint); if (oldType != newType) { ITRACE(1, "relaxGuards changing {}'s type to {}\n", inst, newType); inst.setTypeParam(newType); changed = true; } } } if (!changed) return false; if (!reflow) return true; // Make a second pass to reflow types, with some special logic for loads. FrameState state{unit, unit.entry()->front().marker()}; for (auto* block : blocks) { ITRACE(2, "relaxGuards reflow entering B{}\n", block->id()); Indent _i; state.startBlock(block, block->front().marker()); for (auto& inst : *block) { state.setMarker(inst.marker()); copyProp(&inst); visitLoad(&inst, state); retypeDests(&inst, &unit); state.update(&inst); } state.finishBlock(block); } return true; }
/* * For all guard instructions in unit, check to see if we can relax the * destination type to something less specific. The GuardConstraints map * contains information about what properties of the guarded type matter for * each instruction. If simple is true, guards will not be relaxed past * DataTypeSpecific except guards which are relaxed all the way to * DataTypeGeneric. Returns true iff any changes were made to the trace. */ bool relaxGuards(IRUnit& unit, const GuardConstraints& guards, bool simple) { Timer _t("optimize_relaxGuards"); splitCriticalEdges(unit); auto blocks = rpoSortCfg(unit); auto changed = false; for (auto* block : blocks) { for (auto& inst : *block) { if (!isGuardOp(inst.op())) continue; auto it = guards.find(&inst); auto constraint = it == guards.end() ? TypeConstraint() : it->second; FTRACE(2, "relaxGuards processing {} with constraint {}\n", inst, constraint); if (simple && constraint.category > DataTypeGeneric && constraint.category < DataTypeSpecific) { constraint.category = DataTypeSpecific; } auto const oldType = inst.typeParam(); auto newType = relaxType(oldType, constraint); // Sometimes we (legitimately) end up with a guard like this: // // t4:StkPtr = GuardStk<BoxedArr,0,<DataTypeGeneric, // inner:DataTypeSpecific, // Type::BoxedCell>> t2:StkPtr // // The outer category is DataTypeGeneric because we know from eval stack // flavors that the top of the stack here is always boxed. The inner // category is DataTypeSpecific, indicating we care what the inner type // is, even though it's just a hint. If we treated this like any other // guard, we would relax the typeParam to Type::Gen and insert an assert // to Type::BoxedCell right after it. Unfortunately, this loses the hint // that the inner type is Arr. Eventually we should have some side // channel for passing around hints for inner ref types, but for now the // best we can do is forcibly keep the guard around, preserving the inner // type hint. if (constraint.assertedType.isBoxed() && oldType < constraint.assertedType) { auto relaxedInner = relaxInner(oldType, constraint); if (relaxedInner < Type::BoxedCell && newType >= Type::BoxedCell) { FTRACE(1, "relaxGuards changing newType to {}\n", newType); newType = relaxedInner; } } if (constraint.assertedType < newType) { // If the asserted type is more specific than the new guarded type, set // the guard to the relaxed type but insert an assert operation between // the instruction and its dst. We go from something like this: // // t5:FramePtr = GuardLoc<Int, 4, <DataTypeGeneric,Int>> t4:FramePtr // // to this: // // t6:FramePtr = GuardLoc<Gen, 4> t4:FramePtr // t5:FramePtr = AssertLoc<Int, 4> t6:FramePtr auto* oldDst = inst.dst(); auto* newDst = unit.genDst(&inst); auto* newAssert = [&] { switch (inst.op()) { case GuardLoc: case CheckLoc: return unit.genWithDst(oldDst, guardToAssert(inst.op()), inst.marker(), *inst.extra<LocalId>(), constraint.assertedType, newDst); case GuardStk: case CheckStk: return unit.genWithDst(oldDst, guardToAssert(inst.op()), inst.marker(), *inst.extra<StackOffset>(), constraint.assertedType, newDst); case CheckType: return unit.genWithDst(oldDst, guardToAssert(inst.op()), inst.marker(), constraint.assertedType, newDst); default: always_assert(false); } }(); FTRACE(1, "relaxGuards inserting {} between {} and its dst, " "changing typeParam to {}\n", *newAssert, inst, newType); inst.setTypeParam(newType); // Now, insert the assert after the guard. For control flow guards, // this means inserting it on the next edge. if (inst.isControlFlow()) { auto* block = inst.next(); block->insert(block->skipHeader(), newAssert); } else { auto* block = inst.block(); auto it = block->iteratorTo(&inst); ++it; block->insert(it, newAssert); } changed = true; } else if (oldType != newType) { FTRACE(1, "relaxGuards changing {}'s type to {}\n", inst, newType); inst.setTypeParam(newType); changed = true; } } } if (!changed) return false; // Make a second pass to reflow types, with some special logic for loads. FrameState state(unit); for (auto* block : blocks) { state.startBlock(block); for (auto& inst : *block) { state.setMarker(inst.marker()); copyProp(&inst); visitLoad(&inst, state); if (!removeGuard(unit, &inst, state)) { retypeDests(&inst); state.update(&inst); } } state.finishBlock(block); } return true; }