void InlinedScope::initializeArguments() { const int nofArgs = _method->number_of_arguments(); _arguments = new GrowableArray<Expr*>(nofArgs, nofArgs, NULL); if (isTop()) { // create expr for self but do not allocate a location yet // (self is setup by the prologue node) _self = new KlassExpr(klassOop(selfKlass()), new SAPReg(this, unAllocated, false, false, PrologueBCI, EpilogueBCI), NULL); // preallocate incoming arguments, i.e., create their expressions // using SAPRegs that are already allocated for (int i = 0; i < nofArgs; i++) { SAPReg* arg = new SAPReg(this, Mapping::incomingArg(i, nofArgs), false, false, PrologueBCI, EpilogueBCI); _arguments->at_put(i, new UnknownExpr(arg)); } } else { _self = NULL; // will be initialized by sender // get args from sender's expression stack; top of expr stack = last arg, etc. const int top = sender()->exprStack()->length(); for (int i = 0; i < nofArgs; i++) { _arguments->at_put(i, sender()->exprStack()->at(top - nofArgs + i)); } } }
void StructureAbstractValue::filter(const StructureAbstractValue& other) { SAMPLE("StructureAbstractValue filter value"); if (other.isTop()) return; if (other.isClobbered()) { if (isTop()) return; if (!isClobbered()) { // See justification in filter(const StructureSet&), above. An unclobbered set is // almost always better. if (m_set.size() > other.m_set.size() + clobberedSupremacyThreshold) *this = other; // Keep the clobbered set. return; } m_set.filter(other.m_set); return; } filter(other.m_set); }
void StructureAbstractValue::clobber() { SAMPLE("StructureAbstractValue clobber"); // The premise of this approach to clobbering is that anytime we introduce // a watchable structure into an abstract value, we watchpoint it. You can assert // that this holds by calling assertIsWatched(). if (isTop()) return; setClobbered(true); if (m_set.isThin()) { if (!m_set.singleStructure()) return; if (!m_set.singleStructure()->dfgShouldWatch()) makeTopWhenThin(); return; } StructureSet::OutOfLineList* list = m_set.structureList(); for (unsigned i = list->m_length; i--;) { if (!list->list()[i]->dfgShouldWatch()) { makeTop(); return; } } }
Pointer & Pointer::join(const Domain &value) { if (isTop()) return *this; if (value.isBottom()) return *this; if (value.isTop()) { setTop(); return *this; } const Pointer &vv = llvm::cast<Pointer>(value); CANAL_ASSERT_MSG(&vv.mType == &mType, "Unexpected different types in a pointer merge (" << Canal::toString(vv.mType) << " != " << Canal::toString(mType) << ")"); PlaceTargetMap::const_iterator valueit = vv.mTargets.begin(); for (; valueit != vv.mTargets.end(); ++valueit) { PlaceTargetMap::iterator it = mTargets.find(valueit->first); if (it == mTargets.end()) mTargets.insert(PlaceTargetMap::value_type( valueit->first, new Target(*valueit->second))); else it->second->join(*valueit->second); } return *this; }
void StructureAbstractValue::filter(const StructureSet& other) { SAMPLE("StructureAbstractValue filter set"); if (isTop()) { m_set = other; return; } if (isClobbered()) { // We have two choices here: // // Do nothing: It's legal to keep our set intact, which would essentially mean that for // now, our set would behave like TOP but after the next invalidation point it wold be // a finite set again. This may be a good choice if 'other' is much bigger than our // m_set. // // Replace m_set with other and clear the clobber bit: This is also legal, and means that // we're no longer clobbered. This is usually better because it immediately gives us a // smaller set. // // This scenario should come up rarely. We usually don't do anything to an abstract value // after it is clobbered. But we apply some heuristics. if (other.size() > m_set.size() + clobberedSupremacyThreshold) return; // Keep the clobbered set. m_set = other; setClobbered(false); return; } m_set.filter(other); }
void AbstractHeap::Payload::dump(PrintStream& out) const { if (isTop()) out.print("TOP"); else out.print(value()); }
void InlinedScope::initialize(methodOop method, klassOop methodHolder, InlinedScope* sender, RScope* rs, SendInfo* info) { _scopeID = currentScopeID(); theCompiler->scopes->append(this); assert(theCompiler->scopes->at(_scopeID) == this, "bad list"); _sender = sender; _scopeInfo = NULL; if (sender) { _senderBCI = sender->bci(); sender->addSubScope(this); depth = _sender->depth + 1; loopDepth = _sender->loopDepth; } else { _senderBCI = IllegalBCI; depth = loopDepth = 0; } result = nlrResult = NULL; // these are set during compilation if (info && info->resReg) { resultPR = info->resReg; } else { // potential bug: live range of resultPR is bogus assert(isTop(), "should have resReg for inlined scope"); resultPR = new SAPReg(this, resultLoc, false, false, PrologueBCI, EpilogueBCI); } rscope = rs; rs->extend(); predicted = info ? info->predicted : false; assert(info->key->klass(), "must have klass"); _key = info->key; _method = method; _methodHolder = methodHolder; // NB: can be NULL if method is in Object _nofSends = 0; _nofInterruptPoints = 0; _primFailure = sender ? sender->_primFailure : false; _endsDead = false; _self = NULL; // initialized by createTemps or by sender scope _gen.initialize(this); _temporaries = NULL; // allocated by createTemporaries _floatTemporaries = NULL; // allocated by createFloatTemporaries _contextTemporaries = NULL; // allocated by createContextTemporaries _context = NULL; // set for blocks and used/set by createContextTemporaries _exprStackElems = new GrowableArray<Expr*>(nofBytes()); _subScopes = new GrowableArray<InlinedScope*>(5); _loops = new GrowableArray<CompiledLoop*>(5); _typeTests = new GrowableArray<NonTrivialNode*>(10); _pregsBegSorted = new GrowableArray<PReg*>(5); _pregsEndSorted = new GrowableArray<PReg*>(5); _firstFloatIndex = -1; // set during float allocation _hasBeenGenerated = false; theCompiler->nofBytesCompiled(nofBytes()); if (!rs->isNullScope() && rs->method() != method) { // wrong rscope (could happen after programming change) rs = new RNullScope; } }
bool StructureAbstractValue::merge(const StructureSet& other) { SAMPLE("StructureAbstractValue merge set"); if (isTop()) return false; return mergeNotTop(other); }
bool StructureAbstractValue::contains(Structure* structure) const { SAMPLE("StructureAbstractValue contains"); if (isTop() || isClobbered()) return true; return m_set.contains(structure); }
bool StructureAbstractValue::isSupersetOf(const StructureSet& other) const { SAMPLE("StructureAbstractValue isSupersetOf set"); if (isTop() || isClobbered()) return true; return m_set.isSupersetOf(other); }
bool StructureAbstractValue::overlaps(const StructureSet& other) const { SAMPLE("StructureAbstractValue overlaps set"); if (isTop() || isClobbered()) return true; return m_set.overlaps(other); }
void StructureAbstractValue::dumpInContext(PrintStream& out, DumpContext* context) const { if (isClobbered()) out.print("Clobbered:"); if (isTop()) out.print("TOP"); else out.print(inContext(m_set, context)); }
void StructureAbstractValue::assertIsWatched(Graph& graph) const { SAMPLE("StructureAbstractValue assertIsWatched"); if (isTop()) return; for (unsigned i = size(); i--;) graph.assertIsWatched(at(i)); }
bool StructureAbstractValue::equalsSlow(const StructureAbstractValue& other) const { SAMPLE("StructureAbstractValue equalsSlow"); ASSERT(m_set.m_pointer != other.m_set.m_pointer); ASSERT(!isTop()); ASSERT(!other.isTop()); return m_set == other.m_set && isClobbered() == other.isClobbered(); }
bool DummyStorageLink::onUp(const api::StorageMessage::SP& reply) { if (isTop()) { vespalib::MonitorGuard lock(_waitMonitor); { vespalib::LockGuard guard(_lock); _replies.push_back(reply); } lock.broadcast(); return true; } return StorageLink::onUp(reply); }
void StructureAbstractValue::filterSlow(SpeculatedType type) { SAMPLE("StructureAbstractValue filter type slow"); if (!(type & SpecCell)) { clear(); return; } ASSERT(!isTop()); ConformsToType conformsToType(type); m_set.genericFilter(conformsToType); }
void InlinedScope::createFloatTemporaries(int nofFloats) { assert(!hasFloatTemporaries(), "cannot be called twice"); _floatTemporaries = new GrowableArray<Expr*>(nofFloats, nofFloats, NULL); // initialize float temps for (int i = 0; i < nofFloats; i++) { PReg* preg = new PReg(this, Location::floatLocation(scopeID(), i), false, false); _floatTemporaries->at_put(i, new UnknownExpr(preg, NULL)); if (isTop()) { // floats are initialized by PrologueNode } else { warning("float initialization of floats in inlined scopes not implemented yet"); } } }
bool StructureAbstractValue::add(Structure* structure) { SAMPLE("StructureAbstractValue add"); if (isTop()) return false; if (!m_set.add(structure)) return false; if (m_set.size() > polymorphismLimit) makeTop(); return true; }
void StructureAbstractValue::filterSlow(SpeculatedType type) { SAMPLE("StructureAbstractValue filter type slow"); if (!(type & SpecCell)) { clear(); return; } ASSERT(!isTop()); m_set.genericFilter( [&] (Structure* structure) { return !!(speculationFromStructure(structure) & type); }); }
bool MethodScope::isRecursiveCall(methodOop method, klassOop rcvrKlass, int depth) { // test is method/rcvrKlass would be a recursive invocation of this scope if (method == _method && rcvrKlass == selfKlass()) { if (depth <= 1) { return true; // terminate recursion here } else { // it's recursive, but the unrolling depth hasn't been reached yet depth--; } } // check sender if (isTop()) { return false; } else { return sender()->isRecursiveCall(method, rcvrKlass, depth); } }
void StructureAbstractValue::observeTransition(Structure* from, Structure* to) { SAMPLE("StructureAbstractValue observeTransition"); ASSERT(!from->dfgShouldWatch()); if (isTop()) return; if (!m_set.contains(from)) return; if (!m_set.add(to)) return; if (m_set.size() > polymorphismLimit) makeTop(); }
Pointer & Pointer::meet(const Domain &value) { if (isBottom()) return *this; if (value.isTop()) return *this; if (value.isBottom()) { setBottom(); return *this; } const Pointer &vv = llvm::cast<Pointer>(value); CANAL_ASSERT_MSG(&vv.mType == &mType, "Unexpected different types in a pointer merge (" << Canal::toString(vv.mType) << " != " << Canal::toString(mType) << ")"); if (isTop()) { mTop = false; CANAL_ASSERT(mTargets.empty()); } PlaceTargetMap::iterator it = mTargets.begin(); for (; it != mTargets.end(); ++it) { PlaceTargetMap::const_iterator valueit = vv.mTargets.find(it->first); if (it == vv.mTargets.end()) { delete it->second; mTargets.erase(it); } else it->second->meet(*valueit->second); } return *this; }
void FSelfScope::initialize() { assert( isTop(), "can't inline yet"); // preallocate receiver, incoming args, locals self = receiver = IReceiverReg; allocs->allocatePermanent(receiver); { // Allocate space for arguments and count argument slots. nargs = 0; FOR_EACH_SLOTDESC_N(method()->map(), s, i) { args->append(UnAllocated); if (s->is_arg_slot()) { oop ind= s->data; assert_smi(ind, "bad index"); fint argIndex= smiOop(ind)->value(); allocs->allocatePermanent(IArgLocation(argIndex)); args->nthPut(i, IArgLocation(argIndex)); nargs++; } } }
void InlinedScope::genCode() { _hasBeenGenerated = true; prologue(); // always generate (shared) entry points for ordinary & non-local return _returnPoint = NodeFactory::new_MergeNode(EpilogueBCI); _NLReturnPoint = NodeFactory::new_MergeNode(EpilogueBCI); _nlrTestPoint = NULL; _contextInitializer = NULL; int nofTemps = method()->number_of_stack_temporaries(); if (isTop()) { _returnPoint->append(NodeFactory::new_ReturnNode(resultPR, EpilogueBCI)); _NLReturnPoint->append(NodeFactory::new_NLRSetupNode(resultPR, EpilogueBCI)); Node* first = NodeFactory::new_PrologueNode(key(), nofArguments(), nofTemps); theCompiler->firstNode = first; gen()->setCurrent(first); } // allocate space for temporaries - initialization done in the prologue code assert(!hasTemporaries(), "should have no temporaries yet\n"); createTemporaries(nofTemps); // allocate space for float temporaries int nofFloats = method()->total_number_of_floats(); if (UseFPUStack) { const int FPUStackSize = 8; if (method()->float_expression_stack_size() <= FPUStackSize) { // float expression stack fits in FPU stack, use it instead // and allocate only space for the real float temporaries nofFloats = method()->number_of_float_temporaries(); } else { warning("possible performance bug: cannot use FPU stack for float expressions"); } } createFloatTemporaries(nofFloats); // build the intermediate representation assert(gen()->current() != NULL, "current() should have been set before"); MethodIterator iter(method(), gen()); if (gen()->aborting()) { // ends with dead code -- clean up expression stack while (!exprStack()->isEmpty()) exprStack()->pop(); } epilogue(); }
void InlinedScope::epilogue() { // generate epilogue code (i.e., everything after last byte code has been processed) assert(exprStack()->isEmpty(), "expr. stack should be empty now"); // first make sure subScopes are sorted by bci _subScopes->sort(compare_scopeBCIs); // now remove all subscopes that were created but not used (not inlined) while (! _subScopes->isEmpty() && !_subScopes->last()->hasBeenGenerated()) _subScopes->pop(); #ifdef ASSERT for (int i = 0; i < _subScopes->length(); i++) { if (!_subScopes->at(i)->hasBeenGenerated()) fatal("unused scopes should be at end"); } #endif if (_nofSends > 0 && containsNLR()) { // this scope *could* be the target of a non-inlined NLR; add an UnknownExpr to // the scope's result expression to account for this possibility // note: to be sure, we'd have to know that at least one nested block wasn't inlined, // but this analysis isn't performed until later addResult(new UnknownExpr(resultPR, NULL)); // also make sure we have an NLR test point to catch the NLR (void)nlrTestPoint(); assert(has_nlrTestPoint(), "should have a NLR test point now"); } // generate NLR code if needed if (has_nlrTestPoint()) { // NB: assertion below is too strict -- may have an nlr node that will be connected // only during fixupNLRPoints() // assert(nlrTestPoint()->nPredecessors() > 0, "nlr node is unused??"); } else if (isTop() && theCompiler->nlrTestPoints->nonEmpty()) { // the top scope doesn't have an NLR point, but needs one anyway so that inlined // scopes have somewhere to jump to (void)nlrTestPoint(); } if (!result) result = new NoResultExpr; theCompiler->exitScope(this); }
bool StructureAbstractValue::isSubsetOf(const StructureAbstractValue& other) const { SAMPLE("StructureAbstractValue isSubsetOf value"); if (isTop()) return false; if (other.isTop()) return true; if (isClobbered() == other.isClobbered()) return m_set.isSubsetOf(other.m_set); // Here it gets tricky. If in doubt, return false! if (isClobbered()) return false; // A clobbered set is never a subset of an unclobbered set. // An unclobbered set is currently a subset of a clobbered set, but it may not be so after // invalidation. return m_set.isSubsetOf(other.m_set); }
void StructureAbstractValue::observeTransitions(const TransitionVector& vector) { SAMPLE("StructureAbstractValue observeTransitions"); if (isTop()) return; StructureSet newStructures; for (unsigned i = vector.size(); i--;) { ASSERT(!vector[i].previous->dfgShouldWatch()); if (!m_set.contains(vector[i].previous)) continue; newStructures.add(vector[i].next); } if (!m_set.merge(newStructures)) return; if (m_set.size() > polymorphismLimit) makeTop(); }
void InlinedScope::createTemporaries(int nofTemps) { // add nofTemps temporaries (may be called multiple times) int firstNew; if (!hasTemporaries()) { // first time we're called _temporaries = new GrowableArray<Expr*>(nofTemps, nofTemps, NULL); // The canonical model has the context in the first temporary. // To preserve this model the first temporary is aliased to _context. // Lars, 3/8/96 if (_context) { _temporaries->at_put(0, new ContextExpr(_context)); firstNew = 1; } else { firstNew = 0; } } else { // grow existing temp array const GrowableArray<Expr*>* oldTemps = _temporaries; const int n = nofTemps + oldTemps->length(); _temporaries = new GrowableArray<Expr*>(n, n, NULL); firstNew = oldTemps->length(); nofTemps += oldTemps->length(); for (int i = 0; i < firstNew; i++) _temporaries->at_put(i, oldTemps->at(i)); } // initialize new temps ConstPReg* nil = new_ConstPReg(this, nilObj); for (int i = firstNew; i < nofTemps; i++) { PReg* r = new PReg(this); _temporaries->at_put(i, new UnknownExpr(r, NULL)); if (isTop()) { // temps are initialized by PrologueNode } else { gen()->append(NodeFactory::new_AssignNode(nil, r)); } } }
void Lcars::shoulder( uint16_t x, uint16_t y, uint16_t width, uint16_t height, uint8_t corner, uint16_t color ) { width = width - Lcars_B_VSpacing; height = height - Lcars_B_HSpacing; uint8_t top = isTop(corner); uint8_t left = isLeft(corner); // vertical (*_tft).fillRect( x + (left ? 0 : width - Lcars_B_Width), y + (top ? _posRadius : 0), Lcars_B_Width, height - _posRadius, color ); // positive radius uint16_t posCircX = x + Lcars_Pos_Radius + (left ? 0 : width - (Lcars_Pos_Radius * 2 + 1)); uint16_t posCircY = y + Lcars_Pos_Radius + (top ? 0 : height - (Lcars_Pos_Radius * 2 + 1)); (*_tft).fillCircleHelper( posCircX, posCircY, Lcars_Pos_Radius, (left ? 2 : 1), 0, color ); // Fill gap (*_tft).fillRect( posCircX + (left ? 0 : -Lcars_Pos_Radius +1), posCircY + (top ? -Lcars_Pos_Radius : +1), Lcars_Pos_Radius, Lcars_Pos_Radius, color ); // horizontal (*_tft).fillRect( x + (left ? Lcars_B_Width : 0), y + (top ? 0 : height - Lcars_Bar_Height), width - Lcars_B_Width, Lcars_Bar_Height, color ); // Fill outer (*_tft).fillRect( x + (left ? Lcars_B_Width : width - Lcars_B_Width - Lcars_Neg_Radius), y + (top ? Lcars_Bar_Height : height - Lcars_Bar_Height - Lcars_Neg_Radius), Lcars_Neg_Radius, Lcars_Neg_Radius, color ); // Trim outer (*_tft).fillCircleHelper( x + (left ? Lcars_B_Width + Lcars_Neg_Radius : width - Lcars_B_Width - Lcars_Neg_Radius - 1), y + (top ? Lcars_Bar_Height + Lcars_Neg_Radius : height - Lcars_Bar_Height - Lcars_Neg_Radius - 1), Lcars_Neg_Radius, (left ? 2 : 1), 0, Lcars_Color_Black ); }
void StructureAbstractValue::validateReferences(const TrackedReferences& trackedReferences) const { if (isTop()) return; m_set.validateReferences(trackedReferences); }