void PluginView::stop() { if (!m_isStarted) return; HashSet<RefPtr<PluginStream> > streams = m_streams; HashSet<RefPtr<PluginStream> >::iterator end = streams.end(); for (HashSet<RefPtr<PluginStream> >::iterator it = streams.begin(); it != end; ++it) { (*it)->stop(); disconnectStream((*it).get()); } ASSERT(m_streams.isEmpty()); m_isStarted = false; JSC::JSLock::DropAllLocks dropAllLocks(false); // Clear the window m_npWindow.window = 0; delete (NPSetWindowCallbackStruct *)m_npWindow.ws_info; m_npWindow.ws_info = 0; if (m_plugin->pluginFuncs()->setwindow && !m_plugin->quirks().contains(PluginQuirkDontSetNullWindowHandleOnDestroy)) { PluginView::setCurrentPluginView(this); setCallingPlugin(true); m_plugin->pluginFuncs()->setwindow(m_instance, &m_npWindow); setCallingPlugin(false); PluginView::setCurrentPluginView(0); } // Destroy the plugin { PluginView::setCurrentPluginView(this); setCallingPlugin(true); m_plugin->pluginFuncs()->destroy(m_instance, 0); setCallingPlugin(false); PluginView::setCurrentPluginView(0); } m_instance->pdata = 0; }
void WMLTemplateElement::registerTemplatesInDocument(Document* doc) { ASSERT(doc); // Build list of cards in document RefPtr<NodeList> nodeList = doc->getElementsByTagName("card"); if (!nodeList) return; unsigned length = nodeList->length(); if (length < 1) return; HashSet<WMLCardElement*> cards; for (unsigned i = 0; i < length; ++i) cards.add(static_cast<WMLCardElement*>(nodeList->item(i))); if (cards.isEmpty()) return; // Register template element to all cards nodeList = doc->getElementsByTagName("template"); if (!nodeList) return; length = nodeList->length(); if (length < 1) return; // Only one template element should be allowed in a document // Calling setTemplateElement() twice on a WMLCardElement, will result in a tokenizer error. for (unsigned i = 0; i < length; ++i) { WMLTemplateElement* temp = static_cast<WMLTemplateElement*>(nodeList->item(i)); HashSet<WMLCardElement*>::iterator it = cards.begin(); HashSet<WMLCardElement*>::iterator end = cards.end(); for (; it != end; ++it) (*it)->setTemplateElement(temp); } }
void SVGDocumentExtensions::removeAllElementReferencesForTarget(SVGElement* referencedElement) { ASSERT(referencedElement); HashMap<SVGElement*, OwnPtr<HashSet<SVGElement*> > >::iterator it = m_elementDependencies.find(referencedElement); if (it == m_elementDependencies.end()) return; ASSERT(it->first == referencedElement); Vector<SVGElement*> toBeNotified; HashSet<SVGElement*>* referencingElements = it->second.get(); HashSet<SVGElement*>::iterator setEnd = referencingElements->end(); for (HashSet<SVGElement*>::iterator setIt = referencingElements->begin(); setIt != setEnd; ++setIt) toBeNotified.append(*setIt); m_elementDependencies.remove(it); // Force rebuilding the referencingElement so it knows about this change. Vector<SVGElement*>::iterator vectorEnd = toBeNotified.end(); for (Vector<SVGElement*>::iterator vectorIt = toBeNotified.begin(); vectorIt != vectorEnd; ++vectorIt) (*vectorIt)->svgAttributeChanged(XLinkNames::hrefAttr); }
bool StyleEngine::updateActiveStyleSheets(StyleResolverUpdateMode updateMode) { ASSERT(isMaster()); ASSERT(!m_document.inStyleRecalc()); if (!m_document.isActive()) return false; bool requiresFullStyleRecalc = false; if (m_documentScopeDirty || updateMode == FullStyleUpdate) requiresFullStyleRecalc = m_documentStyleSheetCollection.updateActiveStyleSheets(this, updateMode); if (shouldUpdateShadowTreeStyleSheetCollection(updateMode)) { TreeScopeSet treeScopes = updateMode == FullStyleUpdate ? m_activeTreeScopes : m_dirtyTreeScopes; HashSet<TreeScope*> treeScopesRemoved; for (TreeScopeSet::iterator it = treeScopes.begin(); it != treeScopes.end(); ++it) { TreeScope* treeScope = *it; ASSERT(treeScope != m_document); ShadowTreeStyleSheetCollection* collection = static_cast<ShadowTreeStyleSheetCollection*>(styleSheetCollectionFor(*treeScope)); ASSERT(collection); collection->updateActiveStyleSheets(this, updateMode); if (!collection->hasStyleSheetCandidateNodes()) treeScopesRemoved.add(treeScope); } if (!treeScopesRemoved.isEmpty()) for (HashSet<TreeScope*>::iterator it = treeScopesRemoved.begin(); it != treeScopesRemoved.end(); ++it) m_activeTreeScopes.remove(*it); } InspectorInstrumentation::activeStyleSheetsUpdated(&m_document); m_usesRemUnits = m_documentStyleSheetCollection.usesRemUnits(); m_dirtyTreeScopes.clear(); m_documentScopeDirty = false; return requiresFullStyleRecalc; }
void HTMLLinkElement::getSubresourceAttributeStrings(Vector<String>& urls) const { if (m_isIcon) { urls.append(href().string()); return; } if (!m_isStyleSheet) return; // Append the URL of this link element. urls.append(href().string()); // Walk the URLs linked by the linked-to stylesheet. HashSet<String> styleURLs; StyleSheet* styleSheet = const_cast<HTMLLinkElement*>(this)->sheet(); if (styleSheet) styleSheet->addSubresourceURLStrings(styleURLs, href()); HashSet<String>::iterator end = styleURLs.end(); for (HashSet<String>::iterator i = styleURLs.begin(); i != end; ++i) urls.append(*i); }
static inline void removeFromCacheAndInvalidateDependencies(RenderObject* object, bool needsLayout) { ASSERT(object); if (SVGResources* resources = SVGResourcesCache::cachedResourcesForRenderObject(object)) { if (RenderSVGResourceFilter* filter = resources->filter()) filter->removeClientFromCache(object); if (RenderSVGResourceMasker* masker = resources->masker()) masker->removeClientFromCache(object); if (RenderSVGResourceClipper* clipper = resources->clipper()) clipper->removeClientFromCache(object); } if (!object->node() || !object->node()->isSVGElement()) return; HashSet<SVGElement*>* dependencies = object->document().accessSVGExtensions().setOfElementsReferencingTarget(toSVGElement(object->node())); if (!dependencies) return; // We allow cycles in SVGDocumentExtensions reference sets in order to avoid expensive // reference graph adjustments on changes, so we need to break possible cycles here. DEFINE_STATIC_LOCAL(HashSet<SVGElement*>, invalidatingDependencies, ()); HashSet<SVGElement*>::iterator end = dependencies->end(); for (HashSet<SVGElement*>::iterator it = dependencies->begin(); it != end; ++it) { if (RenderObject* renderer = (*it)->renderer()) { if (UNLIKELY(!invalidatingDependencies.add(*it).isNewEntry)) { // Reference cycle: we are in process of invalidating this dependant. continue; } RenderSVGResource::markForLayoutAndParentResourceInvalidation(renderer, needsLayout); invalidatingDependencies.remove(*it); } } }
void EditMode::activate() { InteractionMode::activate(); if (!fragment_db_) { fragment_db_ = new FragmentDB("fragments/Editing-Fragments.db"); } list<AtomContainer*> acs = scene_->getContainers(); list<Composite*> sel; list<AtomContainer*>::iterator lit = acs.begin(); for (; lit != acs.end(); lit++) { sel.push_back(*lit); } ControlSelectionMessage* msg = new ControlSelectionMessage(); msg->setSelection(sel); scene_->notify(msg); //edit_id_->setChecked(true); scene_->setElementCursor(scene_->getEditElementType()); HashSet<Composite*> selection = scene_->getMainControl()->getSelection(); HashSet<Composite*>::Iterator it = selection.begin(); for (; +it; ++it) { if (!(**it).containsSelection()) continue; scene_->getMainControl()->deselectCompositeRecursive(*it, true); scene_->getMainControl()->update(**it, false); } scene_->notify(new NewSelectionMessage); main_action_->setChecked(true); }
void SVGDocumentExtensions::rebuildAllElementReferencesForTarget(SVGElement* referencedElement) { ASSERT(referencedElement); HashMap<SVGElement*, OwnPtr<HashSet<SVGElement*> > >::iterator it = m_elementDependencies.find(referencedElement); if (it == m_elementDependencies.end()) return; ASSERT(it->key == referencedElement); Vector<SVGElement*> toBeNotified; HashSet<SVGElement*>* referencingElements = it->value.get(); HashSet<SVGElement*>::iterator setEnd = referencingElements->end(); for (HashSet<SVGElement*>::iterator setIt = referencingElements->begin(); setIt != setEnd; ++setIt) toBeNotified.append(*setIt); // Force rebuilding the referencingElement so it knows about this change. Vector<SVGElement*>::iterator vectorEnd = toBeNotified.end(); for (Vector<SVGElement*>::iterator vectorIt = toBeNotified.begin(); vectorIt != vectorEnd; ++vectorIt) { // Before rebuilding referencingElement ensure it was not removed from under us. if (HashSet<SVGElement*>* referencingElements = setOfElementsReferencingTarget(referencedElement)) { if (referencingElements->contains(*vectorIt)) (*vectorIt)->svgAttributeChanged(XLinkNames::hrefAttr); } } }
void WebApplicationCacheManager::getApplicationCacheOrigins(uint64_t callbackID) { HashSet<RefPtr<SecurityOrigin>, SecurityOriginHash> origins; cacheStorage().getOriginsWithCache(origins); Vector<SecurityOriginData> identifiers; identifiers.reserveCapacity(origins.size()); HashSet<RefPtr<SecurityOrigin>, SecurityOriginHash>::iterator end = origins.end(); HashSet<RefPtr<SecurityOrigin>, SecurityOriginHash>::iterator i = origins.begin(); for (; i != end; ++i) { RefPtr<SecurityOrigin> origin = *i; SecurityOriginData originData; originData.protocol = origin->protocol(); originData.host = origin->host(); originData.port = origin->port(); identifiers.uncheckedAppend(originData); } m_childProcess->send(Messages::WebApplicationCacheManagerProxy::DidGetApplicationCacheOrigins(identifiers, callbackID), 0); }
void performanceTest_int() { const int N = 1; std::vector<int> textArray; { for (int i = 0; i < (1 << 18); ++i) textArray.push_back(rand() % 4096); } std::vector<int> rArray; for (int i = 0; i < (1 << 20); ++i) rArray.push_back(rand() % textArray.size()); std::vector<int> res; { Timer _t("HashSet"); for (int _ = 0; _ < N; ++_) { HashSet<int> s; for (int i = 0; i < textArray.size(); ++i) s.insert(textArray[i]); for (int i = 0; i < rArray.size(); ++i) { s.erase(textArray[rArray[i]]); } for (int i = 0; i < rArray.size(); ++i) { if (rArray[i] & 3) { s.erase(textArray[rArray[i]]); } else { s.insert(textArray[rArray[i]]); } } if (res.empty()) { res.assign(s.begin(), s.end()); } } } std::sort(res.begin(), res.end()); cout << res.size() << endl; std::vector<int> res2; { Timer _t("set"); for (int _ = 0; _ < N; ++_) { std::set<int> s; for (int i = 0; i < textArray.size(); ++i) s.insert(textArray[i]); for (int i = 0; i < rArray.size(); ++i) { s.erase(textArray[rArray[i]]); } for (int i = 0; i < rArray.size(); ++i) { if (rArray[i] & 3) { s.erase(textArray[rArray[i]]); } else { s.insert(textArray[rArray[i]]); } } if (res2.empty()) { res2.assign(s.begin(), s.end()); } } } std::sort(res2.begin(), res2.end()); cout << (res == res2) << endl; { Timer _t("hash_set"); for (int _ = 0; _ < N; ++_) { stdext::hash_set<int> s; for (int i = 0; i < textArray.size(); ++i) s.insert(textArray[i]); for (int i = 0; i < rArray.size(); ++i) { s.erase(textArray[rArray[i]]); } for (int i = 0; i < rArray.size(); ++i) { if (rArray[i] & 3) { s.erase(textArray[rArray[i]]); } else { s.insert(textArray[rArray[i]]); } } if (res2.empty()) { res2.assign(s.begin(), s.end()); } } } std::sort(res2.begin(), res2.end()); cout << (res == res2) << endl; }
void ApplicationCacheGroup::postListenerTask(ApplicationCacheHost::EventID eventID, int progressTotal, int progressDone, const HashSet<DocumentLoader*>& loaderSet) { HashSet<DocumentLoader*>::const_iterator loaderSetEnd = loaderSet.end(); for (HashSet<DocumentLoader*>::const_iterator iter = loaderSet.begin(); iter != loaderSetEnd; ++iter) postListenerTask(eventID, progressTotal, progressDone, *iter); }
TEST_EQUAL(res, 1) TEST_EQUAL(hs.has(0), false) TEST_EQUAL(hs.has(1), true) TEST_EQUAL(hs.has(2), true) TEST_EQUAL(hs.has(3), false) TEST_EQUAL(hs.getSize(), 2) RESULT CHECK(void erase(Iterator f, Iterator l) throw(Exception::IncompatibleIterators)) HashSet<int> hs; hs.insert(0); hs.insert(1); hs.insert(2); hs.insert(3); HashSet<int>::Iterator it1 = hs.begin(); HashSet<int>::Iterator it2 = hs.begin(); ++it2; ++it2; ++it1; hs.erase(it1, it2); TEST_EQUAL(hs.has(0), false) TEST_EQUAL(hs.has(2), true) TEST_EQUAL(hs.has(3), true) TEST_EQUAL(hs.has(1), true) TEST_EQUAL(hs.getSize(), 3) hs.erase(hs.begin(), hs.end()); TEST_EQUAL(hs.getSize(), 0)
// insert an atom at screen positions (x,y) on the view plane // TODO: make the renderer dependent on the current target! void EditMode::insert_(int x, int y, PDBAtom &atom) { // find the 3D coordinates of screen position (x,y) on the view plane // move the atom to that position atom.setPosition(scene_->mapViewportTo3D(x,y)); // now we need to find the AtomContainer into which we will insert the atom. // get all highlighted composites list<Composite*> composite_list = scene_->getMainControl()->getMolecularControlSelection(); Size nr_high = composite_list.size(); //TODO: read highlighting if (nr_high > 1 /*|| (only_highlighted_ && nr_high == 0)*/) { scene_->setStatusbarText(qApp->tr("Edit Mode", "Please highlight exactly one AtomContainer for insertion of the created atoms!"), true); return; } // exactly one highlighted composite if (nr_high == 1) { // is it an AtomContainer? AtomContainer* ai = dynamic_cast<AtomContainer*>(*composite_list.begin()); if (ai == 0) { // is the parent an AtomContainer? Composite* parent = (**composite_list.begin()).getParent(); if (parent != 0) { ai = dynamic_cast<AtomContainer*>(parent); } if (ai == 0) { scene_->setStatusbarText(qApp->tr("Edit Mode", "Please highlight exactly one AtomContainer for insertion of the created atoms!"), true); return; } } // prevent adding of atoms to a System: // some forcefields will go havoc otherwise if (RTTI::isKindOf<System>(*ai)) { System* system = (System*) ai; Molecule* mol = system->getMolecule(0); if (mol == 0) { mol = new Molecule(); system->insert(*mol); } ai = mol; } // we do not need to create our own system ai->insert(atom); scene_->getMainControl()->update(*ai, true); return; } ///////////////////////////////////////////////////////// // no atom container highlighted: HashSet<Composite*> composites = getMainControl()->getCompositeManager().getComposites(); // no System exists? -> create one if (composites.size() == 0) { System *system = new System(); Molecule* current_molecule = new Molecule(); system->insert(*current_molecule); current_molecule->insert(atom); getMainControl()->insert(*system); getMainControl()->update(*system); return; } // add to first Molecule in first System System* system = dynamic_cast<System*>(*composites.begin()); Molecule* mol = system->getMolecule(0); if (mol == 0) { mol = new Molecule(); system->insert(*mol); } mol->appendChild(atom); getMainControl()->update(*mol, true); }
void EditMode::createBond_() { // this functionality shall be independent from the edit mode // check if two atoms are selected HashSet<Composite*> selection = scene_->getMainControl()->getSelection(); // by switching into the edit mode recursive selection // has already cleaned up Atom* first_atom = 0; Atom* second_atom = 0; // case 1: one system with exactly two atoms if (selection.size() == 1) { if (RTTI::isKindOf<AtomContainer>(**selection.begin())) { AtomContainer* ac = reinterpret_cast<AtomContainer*>(*selection.begin()); if (ac->countAtoms() == 2) { AtomIterator atom_it = ac->beginAtom(); for(; +atom_it; ++atom_it) { if (!first_atom) { first_atom = &*atom_it; } else if (!second_atom) { second_atom = &*atom_it; } else { Log.error() << (String)tr("Internal error! Too many atoms selected.") << std::endl; } } } else { scene_->setStatusbarText(tr("Please select exactly two atoms."), true); } } else { scene_->setStatusbarText(tr("Please select exactly two atoms."), true); } } // case 2: two selected atoms with unselected in // either distinct atom containers // or with unselected in the same container else if (selection.size() == 2) { HashSet<Composite*>::Iterator it = selection.begin(); for (; +it; ++it) { if (RTTI::isKindOf<Atom>(**it)) { if (!first_atom) { first_atom = reinterpret_cast<Atom*>(*it); } else if (!second_atom) { second_atom = reinterpret_cast<Atom*>(*it); } } // case 3: a single atom in selected atomcontainer else if (RTTI::isKindOf<AtomContainer>(**it)) { AtomContainer* ac = reinterpret_cast<AtomContainer*>(*it); if (ac->countAtoms() == 1) { if (!first_atom) { first_atom = &*ac->beginAtom(); } else if (!second_atom) { second_atom = &*ac->beginAtom(); } } else { Log.error() << (String)tr("Scene: Internal error! ") << __LINE__ << std::endl; } } } } // we found two atoms if (first_atom && second_atom) { // create a bond Bond* bond = first_atom->createBond(*second_atom); bond->setOrder(Bond::ORDER__SINGLE); //TODO single bond or current edit mode default bond order? // TODO:for undo -operation // EditOperation eo(0, bond, "Added bond of type single" , EditOperation::ADDED__BOND); // undo_.push_back(eo); // // // tell about the new undo operation // emit newEditOperation(eo); // if the bond is between two molecules, merge them scene_->merge(first_atom, second_atom); // update representation scene_->getMainControl()->update(*first_atom, true); scene_->getMainControl()->update(*second_atom, true); scene_->setStatusbarText(tr("Added a bond")); // deselect and delete recursively from the selection set HashSet<Composite*>::Iterator it = selection.begin(); for (; +it; ++it) { if (!(**it).containsSelection()) continue; scene_->getMainControl()->deselectCompositeRecursive(*it, true); scene_->getMainControl()->update(**it, false); } first_atom->deselect(); second_atom->deselect(); // update representation scene_->getMainControl()->update(*first_atom, true); scene_->getMainControl()->update(*second_atom, true); } else { scene_->setStatusbarText(tr("Please select exactly two atoms."), true); } }
void SVGResourcesCycleSolver::resolveCycles() { ASSERT(m_allResources.isEmpty()); #if DEBUG_CYCLE_DETECTION > 0 fprintf(stderr, "\nBefore cycle detection:\n"); m_resources->dump(m_renderer); #endif // Stash all resources into a HashSet for the ease of traversing. HashSet<RenderSVGResourceContainer*> localResources; m_resources->buildSetOfResources(localResources); ASSERT(!localResources.isEmpty()); // Add all parent resource containers to the HashSet. HashSet<RenderSVGResourceContainer*> parentResources; RenderObject* parent = m_renderer->parent(); while (parent) { if (parent->isSVGResourceContainer()) parentResources.add(toRenderSVGResourceContainer(parent)); parent = parent->parent(); } #if DEBUG_CYCLE_DETECTION > 0 fprintf(stderr, "\nDetecting wheter any resources references any of following objects:\n"); { fprintf(stderr, "Local resources:\n"); HashSet<RenderSVGResourceContainer*>::iterator end = localResources.end(); for (HashSet<RenderSVGResourceContainer*>::iterator it = localResources.begin(); it != end; ++it) fprintf(stderr, "|> %s: object=%p (node=%p)\n", (*it)->renderName(), *it, (*it)->node()); fprintf(stderr, "Parent resources:\n"); end = parentResources.end(); for (HashSet<RenderSVGResourceContainer*>::iterator it = parentResources.begin(); it != end; ++it) fprintf(stderr, "|> %s: object=%p (node=%p)\n", (*it)->renderName(), *it, (*it)->node()); } #endif // Build combined set of local and parent resources. m_allResources = localResources; HashSet<RenderSVGResourceContainer*>::iterator end = parentResources.end(); for (HashSet<RenderSVGResourceContainer*>::iterator it = parentResources.begin(); it != end; ++it) m_allResources.add(*it); // If we're a resource, add ourselves to the HashSet. if (m_renderer->isSVGResourceContainer()) m_allResources.add(toRenderSVGResourceContainer(m_renderer)); ASSERT(!m_allResources.isEmpty()); // The job of this function is to determine wheter any of the 'resources' associated with the given 'renderer' // references us (or wheter any of its kids references us) -> that's a cycle, we need to find and break it. end = localResources.end(); for (HashSet<RenderSVGResourceContainer*>::iterator it = localResources.begin(); it != end; ++it) { RenderSVGResourceContainer* resource = *it; if (parentResources.contains(resource) || resourceContainsCycles(resource)) breakCycle(resource); } #if DEBUG_CYCLE_DETECTION > 0 fprintf(stderr, "\nAfter cycle detection:\n"); m_resources->dump(m_renderer); #endif m_allResources.clear(); }
ff.options.setVector(PeriodicBoundary::Option::PERIODIC_BOX_LOWER, Vector3(-8.0)); ff.options.setVector(PeriodicBoundary::Option::PERIODIC_BOX_UPPER, Vector3(8.0)); ff.periodic_boundary.enable(); ff.setup(S); CHECK(calculateNonBondedAtomPairs()) ForceField::PairVector pair_vector; pair_vector.reserve(2000000); MolmecSupport::calculateNonBondedAtomPairs (pair_vector, ff.getAtoms(), ff.periodic_boundary.getBox(), 4.0, true, MolmecSupport::HASH_GRID); std::cout << pair_vector.size() << std::endl; HashSet<ForceField::PairVector::value_type> pair_set_hash_grid; std::copy(pair_vector.begin(), pair_vector.end(), std::inserter(pair_set_hash_grid, pair_set_hash_grid.begin())); pair_vector.clear(); MolmecSupport::calculateNonBondedAtomPairs (pair_vector, ff.getAtoms(), ff.periodic_boundary.getBox(), 4.0, true, MolmecSupport::BRUTE_FORCE); STATUS("first atom handle: " << ff.getAtoms()[0]->getHandle()) STATUS("last atom handle: " << ff.getAtoms()[ff.getAtoms().size() - 1]->getHandle()) std::cout << pair_vector.size() << std::endl; HashSet<ForceField::PairVector::value_type> pair_set_brute_force; std::copy(pair_vector.begin(), pair_vector.end(), std::inserter(pair_set_brute_force, pair_set_brute_force.begin())); pair_vector.clear(); STATUS("# of pairs in hash grid set: " << pair_set_hash_grid.size())
void SharedWorkerProxy::postConsoleMessageToWorkerObject(MessageDestination destination, MessageSource source, MessageType type, MessageLevel level, const String& message, int lineNumber, const String& sourceURL) { MutexLocker lock(m_workerDocumentsLock); for (HashSet<Document*>::iterator iter = m_workerDocuments.begin(); iter != m_workerDocuments.end(); ++iter) (*iter)->postTask(createCallbackTask(&postConsoleMessageTask, destination, source, type, level, message, lineNumber, sourceURL)); }
void SVGRenderSupport::layoutChildren(RenderObject* start, bool selfNeedsLayout) { bool layoutSizeChanged = layoutSizeOfNearestViewportChanged(start); bool transformChanged = transformToRootChanged(start); bool hasSVGShadow = rendererHasSVGShadow(start); bool needsBoundariesUpdate = start->needsBoundariesUpdate(); HashSet<RenderObject*> notlayoutedObjects; for (RenderObject* child = start->firstChildSlow(); child; child = child->nextSibling()) { bool needsLayout = selfNeedsLayout; bool childEverHadLayout = child->everHadLayout(); if (needsBoundariesUpdate && hasSVGShadow) { // If we have a shadow, our shadow is baked into our children's cached boundaries, // so they need to update. child->setNeedsBoundariesUpdate(); needsLayout = true; } if (transformChanged) { // If the transform changed we need to update the text metrics (note: this also happens for layoutSizeChanged=true). if (child->isSVGText()) toRenderSVGText(child)->setNeedsTextMetricsUpdate(); needsLayout = true; } if (layoutSizeChanged) { // When selfNeedsLayout is false and the layout size changed, we have to check whether this child uses relative lengths if (SVGElement* element = child->node()->isSVGElement() ? toSVGElement(child->node()) : 0) { if (element->hasRelativeLengths()) { // When the layout size changed and when using relative values tell the RenderSVGShape to update its shape object if (child->isSVGShape()) toRenderSVGShape(child)->setNeedsShapeUpdate(); else if (child->isSVGText()) { toRenderSVGText(child)->setNeedsTextMetricsUpdate(); toRenderSVGText(child)->setNeedsPositioningValuesUpdate(); } needsLayout = true; } } } if (needsLayout) child->setNeedsLayout(MarkOnlyThis); if (child->needsLayout()) { toRenderElement(child)->layout(); // Renderers are responsible for repainting themselves when changing, except // for the initial paint to avoid potential double-painting caused by non-sensical "old" bounds. // We could handle this in the individual objects, but for now it's easier to have // parent containers call repaint(). (RenderBlock::layout* has similar logic.) if (!childEverHadLayout) child->repaint(); } else if (layoutSizeChanged) notlayoutedObjects.add(child); ASSERT(!child->needsLayout()); } if (!layoutSizeChanged) { ASSERT(notlayoutedObjects.isEmpty()); return; } // If the layout size changed, invalidate all resources of all children that didn't go through the layout() code path. HashSet<RenderObject*>::iterator end = notlayoutedObjects.end(); for (HashSet<RenderObject*>::iterator it = notlayoutedObjects.begin(); it != end; ++it) invalidateResourcesOfChildren(*it); }
void PageGroup::clearDomStorage() { if (!pageGroups) return; PageGroupMap::iterator end = pageGroups->end(); for (PageGroupMap::iterator it = pageGroups->begin(); it != end; ++it) { String basePath = ""; // This is being called as a result of the user explicitly // asking to clear all stored data (e.g. through a settings // dialog. We need a page in the page group to fire a // StorageEvent. There isn't really a correct page to use // as the source (as the clear request hasn't come from a // particular page). One thing we should ensure though is that // we don't try to clear a private browsing mode page as that has no concept // of DOM storage.. HashSet<Page*> pages = it->second->pages(); HashSet<Page*>::iterator pagesEnd = pages.end(); Page* page = 0; for(HashSet<Page*>::iterator pit = pages.begin(); pit != pagesEnd; ++pit) { Page* p = *pit; // Grab the storage location from an arbitrary page. This is set // to the same value on all private browsing and "normal" pages, // so we can get it from anything. if (basePath.isEmpty()) basePath = p->settings()->localStorageDatabasePath(); // DOM storage is disabled in private browsing pages, so nothing to do if // this is such a page. if (p->settings()->privateBrowsingEnabled()) continue; // Clear session storage. StorageNamespace* sessionStorage = p->sessionStorage(false); if (sessionStorage) sessionStorage->clear(p); // Save this page so we can clear local storage. page = p; } // If page is still null at this point, then the only pages that are // open are private browsing pages. Hence no pages are currently using local // storage, so we don't need a page pointer to send any events and the // clear function will handle a 0 input. it->second->localStorage()->clear(page); it->second->localStorage()->close(); // Closing the storage areas will stop the background thread and so // we need to remove the local storage ref here so that next time // we come to a site that uses it the thread will get started again. it->second->removeLocalStorage(); // At this point, active local and session storage have been cleared and the // StorageAreas for this PageGroup closed. The final sync will have taken // place. All that is left is to purge the database files. if (!basePath.isEmpty()) { Vector<String> files = listDirectory(basePath, "*.localstorage"); Vector<String>::iterator filesEnd = files.end(); for (Vector<String>::iterator it = files.begin(); it != filesEnd; ++it) deleteFile(*it); } } }
void FieldInfos::add(HashSet<String> names, bool isIndexed) { SyncLock syncLock(this); for (HashSet<String>::iterator name = names.begin(); name != names.end(); ++name) add(*name, isIndexed); }
void FieldInfos::addIndexed(HashSet<String> names, bool storeTermVectors, bool storePositionWithTermVector, bool storeOffsetWithTermVector) { SyncLock syncLock(this); for (HashSet<String>::iterator name = names.begin(); name != names.end(); ++name) add(*name, true, storeTermVectors, storePositionWithTermVector, storeOffsetWithTermVector); }
bool PluginDatabase::refresh() { #if ENABLE(NETSCAPE_PLUGIN_METADATA_CACHE) if (!m_persistentMetadataCacheIsLoaded) loadPersistentMetadataCache(); #endif bool pluginSetChanged = false; if (!m_plugins.isEmpty()) { PluginSet pluginsToUnload; getDeletedPlugins(pluginsToUnload); // Unload plugins PluginSet::const_iterator end = pluginsToUnload.end(); for (PluginSet::const_iterator it = pluginsToUnload.begin(); it != end; ++it) remove(it->get()); pluginSetChanged = !pluginsToUnload.isEmpty(); } HashSet<String> paths; getPluginPathsInDirectories(paths); HashMap<String, time_t> pathsWithTimes; // We should only skip unchanged files if we didn't remove any plugins above. If we did remove // any plugins, we need to look at every plugin file so that, e.g., if the user has two versions // of RealPlayer installed and just removed the newer one, we'll pick up the older one. bool shouldSkipUnchangedFiles = !pluginSetChanged; HashSet<String>::const_iterator pathsEnd = paths.end(); for (HashSet<String>::const_iterator it = paths.begin(); it != pathsEnd; ++it) { time_t lastModified; if (!getFileModificationTime(*it, lastModified)) continue; pathsWithTimes.add(*it, lastModified); // If the path's timestamp hasn't changed since the last time we ran refresh(), we don't have to do anything. if (shouldSkipUnchangedFiles && m_pluginPathsWithTimes.get(*it) == lastModified) continue; if (RefPtr<PluginPackage> oldPackage = m_pluginsByPath.get(*it)) { ASSERT(!shouldSkipUnchangedFiles || oldPackage->lastModified() != lastModified); remove(oldPackage.get()); } RefPtr<PluginPackage> package = PluginPackage::createPackage(*it, lastModified); if (package && add(package.release())) pluginSetChanged = true; } // Cache all the paths we found with their timestamps for next time. pathsWithTimes.swap(m_pluginPathsWithTimes); if (!pluginSetChanged) return false; #if ENABLE(NETSCAPE_PLUGIN_METADATA_CACHE) updatePersistentMetadataCache(); #endif m_registeredMIMETypes.clear(); // Register plug-in MIME types PluginSet::const_iterator end = m_plugins.end(); for (PluginSet::const_iterator it = m_plugins.begin(); it != end; ++it) { // Get MIME types MIMEToDescriptionsMap::const_iterator map_it = (*it)->mimeToDescriptions().begin(); MIMEToDescriptionsMap::const_iterator map_end = (*it)->mimeToDescriptions().end(); for (; map_it != map_end; ++map_it) m_registeredMIMETypes.add(map_it->first); } return true; }
void syntaxTest_HashSet() { { HashSet<int> s; s.insert(1); s.insert(2); s.insert(1); s.insert(3); s.insert(2); assert(s.size() == 3); s.erase(3); assert(s.size() == 2); s.erase(3); assert(s.size() == 2); } { int a[5] = {1, 3, 2, 4, 1}; HashSet<int> s(a, a + 5); assert(s.size() == 4); { HashSet<int> s2(s); assert(s2.size() == 4); } { HashSet<int> s2; assert(s2.empty()); s2 = s; assert(s2.size() == 4); assert(!s2.empty()); assert(s2.contain(3)); assert(s2.count(2) == 1); assert(s2 == s); assert(!(s2 != s)); } } { std::vector<int> v(5, 0); HashSet<int> s; s.insert(v.begin(), v.end()); assert(s.size() == 1); assert(s.find(1) == s.end()); assert(s.find(0) != s.end()); assert(s.find(0) == s.begin()); s.erase(s.find(0)); assert(s.empty()); s.insert(5); assert(!s.empty()); s.clear(); assert(s.empty()); HashSet<int> s2; s2.insert(5); std::swap(s, s2); assert(s2.empty()); assert(s.find(5) != s.end()); } { int a[4] = {1,2, 3, 1}; const HashSet<int> s(a, a + 4); HashSet<int> b; for (HashSet<int>::ConstIterator iter = s.begin(); iter != s.end(); ++iter) { b.insert(*iter); } assert(b.size() == 3); } }
void SharedWorkerProxy::postExceptionToWorkerObject(const String& errorMessage, int lineNumber, const String& sourceURL) { MutexLocker lock(m_workerDocumentsLock); for (HashSet<Document*>::iterator iter = m_workerDocuments.begin(); iter != m_workerDocuments.end(); ++iter) (*iter)->postTask(createCallbackTask(&postExceptionTask, errorMessage, lineNumber, sourceURL)); }
void performanceTest_string() { const int N = 1; std::vector<std::string> textArray; { std::ifstream fi("1.txt"); for (std::string s; getline(fi, s);) textArray.push_back(s); } std::vector<int> rArray; for (int i = 0; i < (1 << 20); ++i) rArray.push_back(rand() % textArray.size()); std::vector<std::string> res; { Timer _t("HashSet"); for (int _ = 0; _ < N; ++_) { HashSet<std::string> s; for (int i = 0; i < textArray.size(); ++i) s.insert(textArray[i]); for (int i = 0; i < rArray.size(); ++i) { s.erase(textArray[rArray[i]]); } for (int i = 0; i < rArray.size(); ++i) { if (rArray[i] & 3) { s.erase(textArray[rArray[i]]); } else { s.insert(textArray[rArray[i]]); } } if (res.empty()) { res.assign(s.begin(), s.end()); } } } std::sort(res.begin(), res.end()); cout << res.size() << endl; std::vector<std::string> res2; { Timer _t("set"); for (int _ = 0; _ < N; ++_) { std::set<std::string> s; for (int i = 0; i < textArray.size(); ++i) s.insert(textArray[i]); for (int i = 0; i < rArray.size(); ++i) { s.erase(textArray[rArray[i]]); } for (int i = 0; i < rArray.size(); ++i) { if (rArray[i] & 3) { s.erase(textArray[rArray[i]]); } else { s.insert(textArray[rArray[i]]); } } if (res2.empty()) { res2.assign(s.begin(), s.end()); } } } std::sort(res2.begin(), res2.end()); cout << (res == res2) << endl; { Timer _t("hash_set"); for (int _ = 0; _ < N; ++_) { stdext::hash_set<std::string> s; for (int i = 0; i < textArray.size(); ++i) s.insert(textArray[i]); for (int i = 0; i < rArray.size(); ++i) { s.erase(textArray[rArray[i]]); } for (int i = 0; i < rArray.size(); ++i) { if (rArray[i] & 3) { s.erase(textArray[rArray[i]]); } else { s.insert(textArray[rArray[i]]); } } if (res2.empty()) { res2.assign(s.begin(), s.end()); } } } std::sort(res2.begin(), res2.end()); cout << (res == res2) << endl; }
void SVGRenderSupport::layoutChildren(RenderObject* start, bool selfNeedsLayout) { bool layoutSizeChanged = layoutSizeOfNearestViewportChanged(start); bool transformChanged = transformToRootChanged(start); HashSet<RenderObject*> notlayoutedObjects; for (RenderObject* child = start->slowFirstChild(); child; child = child->nextSibling()) { bool needsLayout = selfNeedsLayout; bool childEverHadLayout = child->everHadLayout(); if (transformChanged) { // If the transform changed we need to update the text metrics (note: this also happens for layoutSizeChanged=true). if (child->isSVGText()) toRenderSVGText(child)->setNeedsTextMetricsUpdate(); needsLayout = true; } if (layoutSizeChanged) { // When selfNeedsLayout is false and the layout size changed, we have to check whether this child uses relative lengths if (SVGElement* element = child->node()->isSVGElement() ? toSVGElement(child->node()) : 0) { if (element->hasRelativeLengths()) { // When the layout size changed and when using relative values tell the RenderSVGShape to update its shape object if (child->isSVGShape()) { toRenderSVGShape(child)->setNeedsShapeUpdate(); } else if (child->isSVGText()) { toRenderSVGText(child)->setNeedsTextMetricsUpdate(); toRenderSVGText(child)->setNeedsPositioningValuesUpdate(); } needsLayout = true; } } } SubtreeLayoutScope layoutScope(*child); // Resource containers are nasty: they can invalidate clients outside the current SubtreeLayoutScope. // Since they only care about viewport size changes (to resolve their relative lengths), we trigger // their invalidation directly from SVGSVGElement::svgAttributeChange() or at a higher // SubtreeLayoutScope (in RenderView::layout()). if (needsLayout && !child->isSVGResourceContainer()) layoutScope.setNeedsLayout(child); layoutResourcesIfNeeded(child); if (child->needsLayout()) { child->layout(); // Renderers are responsible for repainting themselves when changing, except // for the initial paint to avoid potential double-painting caused by non-sensical "old" bounds. // We could handle this in the individual objects, but for now it's easier to have // parent containers call repaint(). (RenderBlock::layout* has similar logic.) if (!childEverHadLayout && !RuntimeEnabledFeatures::repaintAfterLayoutEnabled()) child->paintInvalidationForWholeRenderer(); } else if (layoutSizeChanged) { notlayoutedObjects.add(child); } } if (!layoutSizeChanged) { ASSERT(notlayoutedObjects.isEmpty()); return; } // If the layout size changed, invalidate all resources of all children that didn't go through the layout() code path. HashSet<RenderObject*>::iterator end = notlayoutedObjects.end(); for (HashSet<RenderObject*>::iterator it = notlayoutedObjects.begin(); it != end; ++it) invalidateResourcesOfChildren(*it); }
static void compileStub( unsigned exitID, JITCode* jitCode, OSRExit& exit, VM* vm, CodeBlock* codeBlock) { StackMaps::Record* record = nullptr; for (unsigned i = jitCode->stackmaps.records.size(); i--;) { record = &jitCode->stackmaps.records[i]; if (record->patchpointID == exit.m_stackmapID) break; } RELEASE_ASSERT(record->patchpointID == exit.m_stackmapID); // This code requires framePointerRegister is the same as callFrameRegister static_assert(MacroAssembler::framePointerRegister == GPRInfo::callFrameRegister, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same"); CCallHelpers jit(vm, codeBlock); // We need scratch space to save all registers, to build up the JS stack, to deal with unwind // fixup, pointers to all of the objects we materialize, and the elements inside those objects // that we materialize. // Figure out how much space we need for those object allocations. unsigned numMaterializations = 0; size_t maxMaterializationNumArguments = 0; for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) { numMaterializations++; maxMaterializationNumArguments = std::max( maxMaterializationNumArguments, materialization->properties().size()); } ScratchBuffer* scratchBuffer = vm->scratchBufferForSize( sizeof(EncodedJSValue) * ( exit.m_values.size() + numMaterializations + maxMaterializationNumArguments) + requiredScratchMemorySizeInBytes() + codeBlock->calleeSaveRegisters()->size() * sizeof(uint64_t)); EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; EncodedJSValue* materializationPointers = scratch + exit.m_values.size(); EncodedJSValue* materializationArguments = materializationPointers + numMaterializations; char* registerScratch = bitwise_cast<char*>(materializationArguments + maxMaterializationNumArguments); uint64_t* unwindScratch = bitwise_cast<uint64_t*>(registerScratch + requiredScratchMemorySizeInBytes()); HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*> materializationToPointer; unsigned materializationCount = 0; for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) { materializationToPointer.add( materialization, materializationPointers + materializationCount++); } // Note that we come in here, the stack used to be as LLVM left it except that someone called pushToSave(). // We don't care about the value they saved. But, we do appreciate the fact that they did it, because we use // that slot for saveAllRegisters(). saveAllRegisters(jit, registerScratch); // Bring the stack back into a sane form and assert that it's sane. jit.popToRestore(GPRInfo::regT0); jit.checkStackPointerAlignment(); if (vm->m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation) { Profiler::Database& database = *vm->m_perBytecodeProfiler; Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get(); Profiler::OSRExit* profilerExit = compilation->addOSRExit( exitID, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin), exit.m_kind, exit.m_kind == UncountableInvalidation); jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress())); } // The remaining code assumes that SP/FP are in the same state that they were in the FTL's // call frame. // Get the call frame and tag thingies. // Restore the exiting function's callFrame value into a regT4 jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); jit.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister); // Do some value profiling. if (exit.m_profileDataFormat != DataFormatNone) { record->locations[0].restoreInto(jit, jitCode->stackmaps, registerScratch, GPRInfo::regT0); reboxAccordingToFormat( exit.m_profileDataFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2); if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile; if (ArrayProfile* arrayProfile = jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) { jit.load32(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureIDOffset()), GPRInfo::regT1); jit.store32(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructureID()); jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::indexingTypeOffset()), GPRInfo::regT1); jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2); jit.lshift32(GPRInfo::regT1, GPRInfo::regT2); jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes())); } } if (!!exit.m_valueProfile) jit.store64(GPRInfo::regT0, exit.m_valueProfile.getSpecFailBucket(0)); } // Materialize all objects. Don't materialize an object until all // of the objects it needs have been materialized. We break cycles // by populating objects late - we only consider an object as // needing another object if the later is needed for the // allocation of the former. HashSet<ExitTimeObjectMaterialization*> toMaterialize; for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) toMaterialize.add(materialization); while (!toMaterialize.isEmpty()) { unsigned previousToMaterializeSize = toMaterialize.size(); Vector<ExitTimeObjectMaterialization*> worklist; worklist.appendRange(toMaterialize.begin(), toMaterialize.end()); for (ExitTimeObjectMaterialization* materialization : worklist) { // Check if we can do anything about this right now. bool allGood = true; for (ExitPropertyValue value : materialization->properties()) { if (!value.value().isObjectMaterialization()) continue; if (!value.location().neededForMaterialization()) continue; if (toMaterialize.contains(value.value().objectMaterialization())) { // Gotta skip this one, since it needs a // materialization that hasn't been materialized. allGood = false; break; } } if (!allGood) continue; // All systems go for materializing the object. First we // recover the values of all of its fields and then we // call a function to actually allocate the beast. // We only recover the fields that are needed for the allocation. for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) { const ExitPropertyValue& property = materialization->properties()[propertyIndex]; const ExitValue& value = property.value(); if (!property.location().neededForMaterialization()) continue; compileRecovery( jit, value, record, jitCode->stackmaps, registerScratch, materializationToPointer); jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex); } // This call assumes that we don't pass arguments on the stack. jit.setupArgumentsWithExecState( CCallHelpers::TrustedImmPtr(materialization), CCallHelpers::TrustedImmPtr(materializationArguments)); jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization)); // Let everyone know that we're done. toMaterialize.remove(materialization); } // We expect progress! This ensures that we crash rather than looping infinitely if there // is something broken about this fixpoint. Or, this could happen if we ever violate the // "materializations form a DAG" rule. RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize); } // Now that all the objects have been allocated, we populate them // with the correct values. This time we can recover all the // fields, including those that are only needed for the allocation. for (ExitTimeObjectMaterialization* materialization : exit.m_materializations) { for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) { const ExitValue& value = materialization->properties()[propertyIndex].value(); compileRecovery( jit, value, record, jitCode->stackmaps, registerScratch, materializationToPointer); jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex); } // This call assumes that we don't pass arguments on the stack jit.setupArgumentsWithExecState( CCallHelpers::TrustedImmPtr(materialization), CCallHelpers::TrustedImmPtr(materializationToPointer.get(materialization)), CCallHelpers::TrustedImmPtr(materializationArguments)); jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationPopulateObjectInOSR)), GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); } // Save all state from wherever the exit data tells us it was, into the appropriate place in // the scratch buffer. This also does the reboxing. for (unsigned index = exit.m_values.size(); index--;) { compileRecovery( jit, exit.m_values[index], record, jitCode->stackmaps, registerScratch, materializationToPointer); jit.store64(GPRInfo::regT0, scratch + index); } // Henceforth we make it look like the exiting function was called through a register // preservation wrapper. This implies that FP must be nudged down by a certain amount. Then // we restore the various things according to either exit.m_values or by copying from the // old frame, and finally we save the various callee-save registers into where the // restoration thunk would restore them from. // Before we start messing with the frame, we need to set aside any registers that the // FTL code was preserving. for (unsigned i = codeBlock->calleeSaveRegisters()->size(); i--;) { RegisterAtOffset entry = codeBlock->calleeSaveRegisters()->at(i); jit.load64( MacroAssembler::Address(MacroAssembler::framePointerRegister, entry.offset()), GPRInfo::regT0); jit.store64(GPRInfo::regT0, unwindScratch + i); } jit.load32(CCallHelpers::payloadFor(JSStack::ArgumentCount), GPRInfo::regT2); // Let's say that the FTL function had failed its arity check. In that case, the stack will // contain some extra stuff. // // We compute the padded stack space: // // paddedStackSpace = roundUp(codeBlock->numParameters - regT2 + 1) // // The stack will have regT2 + CallFrameHeaderSize stuff. // We want to make the stack look like this, from higher addresses down: // // - argument padding // - actual arguments // - call frame header // This code assumes that we're dealing with FunctionCode. RELEASE_ASSERT(codeBlock->codeType() == FunctionCode); jit.add32( MacroAssembler::TrustedImm32(-codeBlock->numParameters()), GPRInfo::regT2, GPRInfo::regT3); MacroAssembler::Jump arityIntact = jit.branch32( MacroAssembler::GreaterThanOrEqual, GPRInfo::regT3, MacroAssembler::TrustedImm32(0)); jit.neg32(GPRInfo::regT3); jit.add32(MacroAssembler::TrustedImm32(1 + stackAlignmentRegisters() - 1), GPRInfo::regT3); jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), GPRInfo::regT3); jit.add32(GPRInfo::regT3, GPRInfo::regT2); arityIntact.link(&jit); CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin); // First set up SP so that our data doesn't get clobbered by signals. unsigned conservativeStackDelta = (exit.m_values.numberOfLocals() + baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters()) * sizeof(Register) + maxFrameExtentForSlowPathCall; conservativeStackDelta = WTF::roundUpToMultipleOf( stackAlignmentBytes(), conservativeStackDelta); jit.addPtr( MacroAssembler::TrustedImm32(-conservativeStackDelta), MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister); jit.checkStackPointerAlignment(); RegisterSet allFTLCalleeSaves = RegisterSet::ftlCalleeSaveRegisters(); RegisterAtOffsetList* baselineCalleeSaves = baselineCodeBlock->calleeSaveRegisters(); for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { if (!allFTLCalleeSaves.get(reg)) continue; unsigned unwindIndex = codeBlock->calleeSaveRegisters()->indexOf(reg); RegisterAtOffset* baselineRegisterOffset = baselineCalleeSaves->find(reg); if (reg.isGPR()) { GPRReg regToLoad = baselineRegisterOffset ? GPRInfo::regT0 : reg.gpr(); if (unwindIndex == UINT_MAX) { // The FTL compilation didn't preserve this register. This means that it also // didn't use the register. So its value at the beginning of OSR exit should be // preserved by the thunk. Luckily, we saved all registers into the register // scratch buffer, so we can restore them from there. jit.load64(registerScratch + offsetOfReg(reg), regToLoad); } else { // The FTL compilation preserved the register. Its new value is therefore // irrelevant, but we can get the value that was preserved by using the unwind // data. We've already copied all unwind-able preserved registers into the unwind // scratch buffer, so we can get it from there. jit.load64(unwindScratch + unwindIndex, regToLoad); } if (baselineRegisterOffset) jit.store64(regToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset())); } else { FPRReg fpRegToLoad = baselineRegisterOffset ? FPRInfo::fpRegT0 : reg.fpr(); if (unwindIndex == UINT_MAX) jit.loadDouble(MacroAssembler::TrustedImmPtr(registerScratch + offsetOfReg(reg)), fpRegToLoad); else jit.loadDouble(MacroAssembler::TrustedImmPtr(unwindScratch + unwindIndex), fpRegToLoad); if (baselineRegisterOffset) jit.storeDouble(fpRegToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset())); } } size_t baselineVirtualRegistersForCalleeSaves = baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters(); // Now get state out of the scratch buffer and place it back into the stack. The values are // already reboxed so we just move them. for (unsigned index = exit.m_values.size(); index--;) { VirtualRegister reg = exit.m_values.virtualRegisterForIndex(index); if (reg.isLocal() && reg.toLocal() < static_cast<int>(baselineVirtualRegistersForCalleeSaves)) continue; jit.load64(scratch + index, GPRInfo::regT0); jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(reg)); } handleExitCounts(jit, exit); reifyInlinedCallFrames(jit, exit); adjustAndJumpToTarget(jit, exit, false); LinkBuffer patchBuffer(*vm, jit, codeBlock); exit.m_code = FINALIZE_CODE_IF( shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(), patchBuffer, ("FTL OSR exit #%u (%s, %s) from %s, with operands = %s, and record = %s", exitID, toCString(exit.m_codeOrigin).data(), exitKindToString(exit.m_kind), toCString(*codeBlock).data(), toCString(ignoringContext<DumpContext>(exit.m_values)).data(), toCString(*record).data())); }