void XapianIndex::removeCommonTerms(Xapian::Document &doc) { DocumentInfo docInfo; string record(doc.get_data()); // First, remove the magic term doc.remove_term(MAGIC_TERM); if (record.empty() == true) { // Nothing else we can do return; } string language(StringManip::extractField(record, "language=", "")); string timestamp(StringManip::extractField(record, "timestamp=", "\n")); docInfo = DocumentInfo(StringManip::extractField(record, "caption=", "\n"), StringManip::extractField(record, "url=", "\n"), StringManip::extractField(record, "type=", "\n"), Languages::toLocale(language)); // We used to use timestamp prior to 0.60 if (timestamp.empty() == true) { string modTime(StringManip::extractField(record, "modtime=", "\n")); if (modTime.empty() == false) { time_t timeT = (time_t )atol(modTime.c_str()); timestamp = TimeConverter::toTimestamp(timeT); } } docInfo.setTimestamp(timestamp); Url urlObj(docInfo.getLocation()); // FIXME: remove terms extracted from the title if they don't have more than one posting string title(docInfo.getTitle()); if (title.empty() == false) { Document titleDoc; titleDoc.setData(title.c_str(), title.length()); Tokenizer titleTokens(&titleDoc); removeFirstPostingsFromDocument(titleTokens, doc, "S", language, STORE_UNSTEM); titleTokens.rewind(); removeFirstPostingsFromDocument(titleTokens, doc, "", language, m_stemMode); } // Title doc.remove_term(limitTermLength(string("U") + docInfo.getLocation(), true)); // Host name string hostName(StringManip::toLowerCase(urlObj.getHost())); if (hostName.empty() == false) { doc.remove_term(limitTermLength(string("H") + hostName, true)); string::size_type dotPos = hostName.find('.'); while (dotPos != string::npos) { doc.remove_term(limitTermLength(string("H") + hostName.substr(dotPos + 1), true)); // Next dotPos = hostName.find('.', dotPos + 1); } } // ...location string tree(urlObj.getLocation()); if (tree.empty() == false) { doc.remove_term(limitTermLength(string("XDIR:") + tree, true)); string::size_type slashPos = tree.find('/', 1); while (slashPos != string::npos) { doc.remove_term(limitTermLength(string("XDIR:") + tree.substr(0, slashPos), true)); // Next slashPos = tree.find('/', slashPos + 1); } } // ...and file name string fileName(urlObj.getFile()); if (fileName.empty() == false) { doc.remove_term(limitTermLength(string("P") + StringManip::toLowerCase(fileName), true)); } // Language code doc.remove_term(string("L") + Languages::toCode(language)); // MIME type doc.remove_term(string("T") + docInfo.getType()); }
void TypingCommand::insertParagraphSeparator(Document& document, Options options) { if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(document.frame())) { lastTypingCommand->setShouldRetainAutocorrectionIndicator(options & RetainAutocorrectionIndicator); lastTypingCommand->insertParagraphSeparator(); return; } applyCommand(TypingCommand::create(document, InsertParagraphSeparator, "", options)); }
bool IsImageExtractionAllowed(Document* aDocument, JSContext* aCx, nsIPrincipal& aPrincipal) { // Do the rest of the checks only if privacy.resistFingerprinting is on. if (!nsContentUtils::ShouldResistFingerprinting(aDocument)) { return true; } // Don't proceed if we don't have a document or JavaScript context. if (!aDocument || !aCx) { return false; } // The system principal can always extract canvas data. if (nsContentUtils::IsSystemPrincipal(&aPrincipal)) { return true; } // Allow extension principals. auto principal = BasePrincipal::Cast(&aPrincipal); if (principal->AddonPolicy() || principal->ContentScriptAddonPolicy()) { return true; } // Get the document URI and its spec. nsIURI* docURI = aDocument->GetDocumentURI(); nsCString docURISpec; docURI->GetSpec(docURISpec); // Allow local files to extract canvas data. bool isFileURL; if (NS_SUCCEEDED(docURI->SchemeIs("file", &isFileURL)) && isFileURL) { return true; } // Don't show canvas prompt for PDF.js JS::AutoFilename scriptFile; if (JS::DescribeScriptedCaller(aCx, &scriptFile) && scriptFile.get() && strcmp(scriptFile.get(), "resource://pdf.js/build/pdf.js") == 0) { return true; } Document* topLevelDocument = aDocument->GetTopLevelContentDocument(); nsIURI* topLevelDocURI = topLevelDocument ? topLevelDocument->GetDocumentURI() : nullptr; nsCString topLevelDocURISpec; if (topLevelDocURI) { topLevelDocURI->GetSpec(topLevelDocURISpec); } // Load Third Party Util service. nsresult rv; nsCOMPtr<mozIThirdPartyUtil> thirdPartyUtil = do_GetService(THIRDPARTYUTIL_CONTRACTID, &rv); NS_ENSURE_SUCCESS(rv, false); // Block all third-party attempts to extract canvas. bool isThirdParty = true; rv = thirdPartyUtil->IsThirdPartyURI(topLevelDocURI, docURI, &isThirdParty); NS_ENSURE_SUCCESS(rv, false); if (isThirdParty) { nsAutoString message; message.AppendPrintf("Blocked third party %s from extracting canvas data.", docURISpec.get()); nsContentUtils::ReportToConsoleNonLocalized( message, nsIScriptError::warningFlag, NS_LITERAL_CSTRING("Security"), aDocument); return false; } // Load Permission Manager service. nsCOMPtr<nsIPermissionManager> permissionManager = do_GetService(NS_PERMISSIONMANAGER_CONTRACTID, &rv); NS_ENSURE_SUCCESS(rv, false); // Check if the site has permission to extract canvas data. // Either permit or block extraction if a stored permission setting exists. uint32_t permission; rv = permissionManager->TestPermissionFromPrincipal( principal, PERMISSION_CANVAS_EXTRACT_DATA, &permission); NS_ENSURE_SUCCESS(rv, false); switch (permission) { case nsIPermissionManager::ALLOW_ACTION: return true; case nsIPermissionManager::DENY_ACTION: return false; default: break; } // At this point, permission is unknown // (nsIPermissionManager::UNKNOWN_ACTION). // Check if the request is in response to user input bool isAutoBlockCanvas = StaticPrefs:: privacy_resistFingerprinting_autoDeclineNoUserInputCanvasPrompts() && !EventStateManager::IsHandlingUserInput(); if (isAutoBlockCanvas) { nsAutoString message; message.AppendPrintf( "Blocked %s from extracting canvas data because no user input was " "detected.", docURISpec.get()); nsContentUtils::ReportToConsoleNonLocalized( message, nsIScriptError::warningFlag, NS_LITERAL_CSTRING("Security"), aDocument); } else { // It was in response to user input, so log and display the prompt. nsAutoString message; message.AppendPrintf( "Blocked %s from extracting canvas data, but prompting the user.", docURISpec.get()); nsContentUtils::ReportToConsoleNonLocalized( message, nsIScriptError::warningFlag, NS_LITERAL_CSTRING("Security"), aDocument); } // Prompt the user (asynchronous). nsPIDOMWindowOuter* win = aDocument->GetWindow(); nsAutoCString origin; rv = principal->GetOrigin(origin); NS_ENSURE_SUCCESS(rv, false); if (XRE_IsContentProcess()) { BrowserChild* browserChild = BrowserChild::GetFrom(win); if (browserChild) { browserChild->SendShowCanvasPermissionPrompt(origin, isAutoBlockCanvas); } } else { nsCOMPtr<nsIObserverService> obs = mozilla::services::GetObserverService(); if (obs) { obs->NotifyObservers(win, isAutoBlockCanvas ? TOPIC_CANVAS_PERMISSIONS_PROMPT_HIDE_DOORHANGER : TOPIC_CANVAS_PERMISSIONS_PROMPT, NS_ConvertUTF8toUTF16(origin).get()); } } // We don't extract the image for now -- user may override at prompt. return false; }
void CSSFontSelector::updateGenericFontFamilySettings(Document& document) { ASSERT(document.settings()); m_genericFontFamilySettings = document.settings()->genericFontFamilySettings(); }
void WebSocket::connect(const String& url, const Vector<String>& protocols, ExceptionState& exceptionState) { WTF_LOG(Network, "WebSocket %p connect() url='%s'", this, url.utf8().data()); m_url = KURL(KURL(), url); if (!m_url.isValid()) { m_state = CLOSED; exceptionState.throwDOMException(SyntaxError, "The URL '" + url + "' is invalid."); return; } if (!m_url.protocolIs("ws") && !m_url.protocolIs("wss")) { m_state = CLOSED; exceptionState.throwDOMException(SyntaxError, "The URL's scheme must be either 'ws' or 'wss'. '" + m_url.protocol() + "' is not allowed."); return; } if (MixedContentChecker::isMixedContent(executionContext()->securityOrigin(), m_url)) { // FIXME: Throw an exception and close the connection. String message = "Connecting to a non-secure WebSocket server from a secure origin is deprecated."; executionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, message); } if (m_url.hasFragmentIdentifier()) { m_state = CLOSED; exceptionState.throwDOMException(SyntaxError, "The URL contains a fragment identifier ('" + m_url.fragmentIdentifier() + "'). Fragment identifiers are not allowed in WebSocket URLs."); return; } if (!portAllowed(m_url)) { m_state = CLOSED; exceptionState.throwSecurityError("The port " + String::number(m_url.port()) + " is not allowed."); return; } // FIXME: Convert this to check the isolated world's Content Security Policy once webkit.org/b/104520 is solved. bool shouldBypassMainWorldContentSecurityPolicy = false; if (executionContext()->isDocument()) { Document* document = toDocument(executionContext()); shouldBypassMainWorldContentSecurityPolicy = document->frame()->script().shouldBypassMainWorldContentSecurityPolicy(); } if (!shouldBypassMainWorldContentSecurityPolicy && !executionContext()->contentSecurityPolicy()->allowConnectToSource(m_url)) { m_state = CLOSED; // The URL is safe to expose to JavaScript, as this check happens synchronously before redirection. exceptionState.throwSecurityError("Refused to connect to '" + m_url.elidedString() + "' because it violates the document's Content Security Policy."); return; } m_channel = WebSocketChannel::create(executionContext(), this); // FIXME: There is a disagreement about restriction of subprotocols between WebSocket API and hybi-10 protocol // draft. The former simply says "only characters in the range U+0021 to U+007E are allowed," while the latter // imposes a stricter rule: "the elements MUST be non-empty strings with characters as defined in [RFC2616], // and MUST all be unique strings." // // Here, we throw SyntaxError if the given protocols do not meet the latter criteria. This behavior does not // comply with WebSocket API specification, but it seems to be the only reasonable way to handle this conflict. for (size_t i = 0; i < protocols.size(); ++i) { if (!isValidProtocolString(protocols[i])) { m_state = CLOSED; exceptionState.throwDOMException(SyntaxError, "The subprotocol '" + encodeProtocolString(protocols[i]) + "' is invalid."); releaseChannel(); return; } } HashSet<String> visited; for (size_t i = 0; i < protocols.size(); ++i) { if (!visited.add(protocols[i]).isNewEntry) { m_state = CLOSED; exceptionState.throwDOMException(SyntaxError, "The subprotocol '" + encodeProtocolString(protocols[i]) + "' is duplicated."); releaseChannel(); return; } } String protocolString; if (!protocols.isEmpty()) protocolString = joinStrings(protocols, subProtocolSeperator()); m_channel->connect(m_url, protocolString); }
PassRefPtr<DocumentFragment> createFragmentFromText(Range* context, const String& text) { if (!context) return 0; Node* styleNode = context->firstNode(); if (!styleNode) { styleNode = context->startPosition().deprecatedNode(); if (!styleNode) return 0; } Document* document = styleNode->document(); RefPtr<DocumentFragment> fragment = document->createDocumentFragment(); if (text.isEmpty()) return fragment.release(); String string = text; string.replace("\r\n", "\n"); string.replace('\r', '\n'); RenderObject* renderer = styleNode->renderer(); if (renderer && renderer->style()->preserveNewline()) { fragment->appendChild(document->createTextNode(string), ASSERT_NO_EXCEPTION); if (string.endsWith('\n')) { RefPtr<Element> element = createBreakElement(document); element->setAttribute(classAttr, AppleInterchangeNewline); fragment->appendChild(element.release(), ASSERT_NO_EXCEPTION); } return fragment.release(); } // A string with no newlines gets added inline, rather than being put into a paragraph. if (string.find('\n') == notFound) { fillContainerFromString(fragment.get(), string); return fragment.release(); } // Break string into paragraphs. Extra line breaks turn into empty paragraphs. Node* blockNode = enclosingBlock(context->firstNode()); Element* block = toElement(blockNode); bool useClonesOfEnclosingBlock = blockNode && blockNode->isElementNode() && !block->hasTagName(bodyTag) && !block->hasTagName(htmlTag) && block != editableRootForPosition(context->startPosition()); bool useLineBreak = enclosingTextFormControl(context->startPosition()); Vector<String> list; string.split('\n', true, list); // true gets us empty strings in the list size_t numLines = list.size(); for (size_t i = 0; i < numLines; ++i) { const String& s = list[i]; RefPtr<Element> element; if (s.isEmpty() && i + 1 == numLines) { // For last line, use the "magic BR" rather than a P. element = createBreakElement(document); element->setAttribute(classAttr, AppleInterchangeNewline); } else if (useLineBreak) { element = createBreakElement(document); fillContainerFromString(fragment.get(), s); } else { if (useClonesOfEnclosingBlock) element = block->cloneElementWithoutChildren(); else element = createDefaultParagraphElement(document); fillContainerFromString(element.get(), s); } fragment->appendChild(element.release(), ASSERT_NO_EXCEPTION); } return fragment.release(); }
bool FilterWrapper::filterDocument(IndexInterface &index, const Document &doc, const string &originalType, const set<string> &labels, unsigned int &docId, bool doUpdate) { Filter *pFilter = FilterFactory::getFilter(doc.getType()); bool fedFilter = false, success = false; if (pFilter != NULL) { fedFilter = FilterUtils::feedFilter(doc, pFilter); } else { // Chances are this type is not supported pFilter = new TextFilter("text/plain"); Document emptyDoc(doc.getTitle(), doc.getLocation(), doc.getType(), doc.getLanguage()); emptyDoc.setTimestamp(doc.getTimestamp()); emptyDoc.setSize(doc.getSize()); emptyDoc.setData(" ", 1); #ifdef DEBUG cout << "FilterWrapper::filterDocument: unsupported type " << doc.getType() << endl; #endif fedFilter = FilterUtils::feedFilter(emptyDoc, pFilter); } if (fedFilter == false) { delete pFilter; return false; } while (pFilter->has_documents() == true) { string actualType(originalType); if (pFilter->next_document() == false) { break; } Document filteredDoc(doc.getTitle(), doc.getLocation(), "text/plain", doc.getLanguage()); filteredDoc.setTimestamp(doc.getTimestamp()); filteredDoc.setSize(doc.getSize()); if (FilterUtils::populateDocument(filteredDoc, pFilter) == false) { continue; } // Is this a nested document ? if (filteredDoc.getLocation().length() > doc.getLocation().length()) { actualType = filteredDoc.getType(); #ifdef DEBUG cout << "FilterWrapper::filterDocument: nested document of type " << actualType << endl; #endif } // Pass it down to another filter ? if ((filteredDoc.getType().length() >= 10) && (filteredDoc.getType().substr(0, 10) == "text/plain")) { // No, it's been reduced to plain text filteredDoc.setType(actualType); Tokenizer tokens(&filteredDoc); if (doUpdate == false) { success = index.indexDocument(tokens, labels, docId); } else { success = index.updateDocument(docId, tokens); } } else { success = filterDocument(index, filteredDoc, originalType, labels, docId, doUpdate); delete pFilter; return success; } } delete pFilter; #ifdef DEBUG if (success == false) { cout << "FilterWrapper::filterDocument: didn't index " << doc.getLocation() << endl; } #endif return success; }
bool V8Proxy::isEnabled() { Settings* settings = m_frame->settings(); if (!settings) return false; // In the common case, JavaScript is enabled and we're done. if (settings->isJavaScriptEnabled()) return true; // If JavaScript has been disabled, we need to look at the frame to tell // whether this script came from the web or the embedder. Scripts from the // embedder are safe to run, but scripts from the other sources are // disallowed. Document* document = m_frame->document(); if (!document) return false; SecurityOrigin* origin = document->securityOrigin(); if (origin->protocol().isEmpty()) return false; // Uninitialized document if (origin->protocol() == "http" || origin->protocol() == "https") return false; // Web site // FIXME: the following are application decisions, and they should // not be made at this layer. instead, we should bridge out to the // embedder to allow them to override policy here. #if PLATFORM(CHROMIUM) // TODO(andreip): ChromeBridge->BrowserBridge? if (origin->protocol() == ChromiumBridge::uiResourceProtocol()) return true; // Embedder's scripts are ok to run #endif // If the scheme is ftp: or file:, an empty file name indicates a directory // listing, which requires JavaScript to function properly. const char* kDirProtocols[] = { "ftp", "file" }; #if PLATFORM(ANDROID) // TODO(andreip): Port arraysize function to Android. There's one in Gears. for (size_t i = 0; i < 2; ++i) { #else for (size_t i = 0; i < arraysize(kDirProtocols); ++i) { #endif if (origin->protocol() == kDirProtocols[i]) { const KURL& url = document->url(); return url.pathAfterLastSlash() == url.pathEnd(); } } return false; // Other protocols fall through to here } void V8Proxy::updateDocumentWrapper(v8::Handle<v8::Value> wrapper) { clearDocumentWrapper(); ASSERT(m_document.IsEmpty()); m_document = v8::Persistent<v8::Value>::New(wrapper); #ifndef NDEBUG V8GCController::registerGlobalHandle(PROXY, this, m_document); #endif }
bool CrossRefPlugin::resolve (Document &doc) { /* * Prompt for username and password if needed */ if (_global_prefs->getCrossRefUsername ().empty ()) { Glib::ustring message = String::ucompose ( "<b><big>%1</big></b>\n\n%2\n", _("CrossRef credentials not found"), _("To use the CrossRef service, a free account is needed. " "Login information may be set in Preferences, or the CrossRef plugin " "may be disabled.") ); Gtk::MessageDialog dialog(message, true, Gtk::MESSAGE_WARNING, Gtk::BUTTONS_NONE, true); dialog.add_button (Gtk::Stock::CANCEL, 0); dialog.add_button (_("_Preferences"), 1); dialog.add_button (_("_Disable CrossRef"), 2); do { int response = dialog.run (); if (response == 1) { // Preferences doConfigure (); if (!_global_prefs->getCrossRefUsername ().empty ()) break; // if they didn't give us one then we loop around // else we go ahead } else if (response == 2) { // Disable _global_prefs->disablePlugin (this); return false; } else { // Cancel return false; } } while (1); } Glib::ustring messagetext = String::ucompose ( "<b><big>%1</big></b>\n\n%2\n", _("Downloading metadata"), String::ucompose ( _("Contacting crossref.org to retrieve metadata for '%1'"), doc.getField("doi")) ); Glib::ustring const username = _global_prefs->getCrossRefUsername (); Glib::ustring const password = _global_prefs->getCrossRefPassword (); Glib::ustring const url = Glib::ustring("http://www.crossref.org/openurl/?pid=") + username + (password.empty() ? "" : ":") + password + Glib::ustring("&id=doi:") + Gnome::Vfs::escape_string(doc.getField("doi")) + Glib::ustring ("&noredirect=true"); DEBUG ("CrossRefPlugin::resolve: using url '%1'", url); // FIXME: even if we don't get any metadata, // an exceptionless download+parse is considered // a success. // Nobody notices as long as crossref is the last resort bool success = true; try { Glib::ustring &xml = Transfer::readRemoteFile ( _("Downloading Metadata"), messagetext, url); DEBUG (xml); // XXX // Test for "Missing WWW-Authenticate header" for bad username/password // Test for "No DOI found" for bad DOI CrossRefParser parser (doc.getBibData()); Glib::Markup::ParseContext context (parser); try { context.parse (xml); context.end_parse (); } catch (Glib::MarkupError const ex) { DEBUG ("Markuperror while parsing:\n'''%1\n'''", xml); //Utility::exceptionDialog (&ex, _("Parsing CrossRef XML. The DOI could be invalid, or not known to crossref.org")); success = false; } } catch (Transfer::Exception ex) { //Utility::exceptionDialog (&ex, _("Downloading metadata")); success = false; } DEBUG ("resolve returning %1", success); return success; }
virtual ReadResult readNode(std::istream& fin, const Options* options) const { Document document; document.setOptions(options); // option string and parent pools if (options) { const char readerMsg[] = "flt reader option: "; document.setReplaceClampWithClampToEdge((options->getOptionString().find("clampToEdge")!=std::string::npos)); osg::notify(osg::DEBUG_INFO) << readerMsg << "clampToEdge=" << document.getReplaceClampWithClampToEdge() << std::endl; document.setKeepExternalReferences((options->getOptionString().find("keepExternalReferences")!=std::string::npos)); osg::notify(osg::DEBUG_INFO) << readerMsg << "keepExternalReferences=" << document.getKeepExternalReferences() << std::endl; document.setPreserveFace((options->getOptionString().find("preserveFace")!=std::string::npos)); osg::notify(osg::DEBUG_INFO) << readerMsg << "preserveFace=" << document.getPreserveFace() << std::endl; document.setPreserveObject((options->getOptionString().find("preserveObject")!=std::string::npos)); osg::notify(osg::DEBUG_INFO) << readerMsg << "preserveObject=" << document.getPreserveObject() << std::endl; document.setDefaultDOFAnimationState((options->getOptionString().find("dofAnimation")!=std::string::npos)); osg::notify(osg::DEBUG_INFO) << readerMsg << "dofAnimation=" << document.getDefaultDOFAnimationState() << std::endl; document.setUseBillboardCenter((options->getOptionString().find("billboardCenter")!=std::string::npos)); osg::notify(osg::DEBUG_INFO) << readerMsg << "billboardCenter=" << document.getUseBillboardCenter() << std::endl; document.setUseTextureAlphaForTransparancyBinning(options->getOptionString().find("noTextureAlphaForTransparancyBinning")==std::string::npos); osg::notify(osg::DEBUG_INFO) << readerMsg << "noTextureAlphaForTransparancyBinning=" << !document.getUseTextureAlphaForTransparancyBinning() << std::endl; document.setReadObjectRecordData(options->getOptionString().find("readObjectRecordData")==std::string::npos); osg::notify(osg::DEBUG_INFO) << readerMsg << "readObjectRecordData=" << !document.getReadObjectRecordData() << std::endl; document.setDoUnitsConversion((options->getOptionString().find("noUnitsConversion")==std::string::npos)); // default to true, unless noUnitsConversion is specified. osg::notify(osg::DEBUG_INFO) << readerMsg << "noUnitsConversion=" << !document.getDoUnitsConversion() << std::endl; if (document.getDoUnitsConversion()) { if (options->getOptionString().find("convertToFeet")!=std::string::npos) document.setDesiredUnits(FEET); else if (options->getOptionString().find("convertToInches")!=std::string::npos) document.setDesiredUnits(INCHES); else if (options->getOptionString().find("convertToMeters")!=std::string::npos) document.setDesiredUnits(METERS); else if (options->getOptionString().find("convertToKilometers")!=std::string::npos) document.setDesiredUnits(KILOMETERS); else if (options->getOptionString().find("convertToNauticalMiles")!=std::string::npos) document.setDesiredUnits(NAUTICAL_MILES); } const ParentPools* pools = dynamic_cast<const ParentPools*>( options->getUserData() ); if (pools) { // This file is an external reference. The individual pools will // be non-NULL if the parent is overriding the ext ref model's pools. if (pools->getColorPool()) document.setColorPool( pools->getColorPool(), true ); if (pools->getTexturePool()) document.setTexturePool( pools->getTexturePool(), true ); if (pools->getMaterialPool()) document.setMaterialPool( pools->getMaterialPool(), true ); if (pools->getLightSourcePool()) document.setLightSourcePool( pools->getLightSourcePool(), true ); if (pools->getLPAppearancePool()) document.setLightPointAppearancePool( pools->getLPAppearancePool(), true ); if (pools->getLPAnimationPool()) document.setLightPointAnimationPool( pools->getLPAnimationPool(), true ); if (pools->getShaderPool()) document.setShaderPool( pools->getShaderPool(), true ); } } const int RECORD_HEADER_SIZE = 4; opcode_type continuationOpcode = INVALID_OP; std::string continuationBuffer; while (fin.good() && !document.done()) { // The continuation record complicates things a bit. // Get current read position in stream. std::istream::pos_type pos = fin.tellg(); // get opcode and size flt::DataInputStream dataStream(fin.rdbuf()); opcode_type opcode = (opcode_type)dataStream.readUInt16(); size_type size = (size_type)dataStream.readUInt16(); // If size == 0, an EOF has probably been reached, i.e. there is nothing // more to read so we must return. if (size==0) { // If a header was read, we return it. // This allows us handle files with empty hierarchies. if (document.getHeaderNode()) { return document.getHeaderNode(); } else // (no valid header) { return ReadResult::ERROR_IN_READING_FILE; } } // variable length record complete? if (!continuationBuffer.empty() && opcode!=CONTINUATION_OP) { // parse variable length record std::stringbuf sb(continuationBuffer); flt::RecordInputStream recordStream(&sb); recordStream.readRecordBody(continuationOpcode, continuationBuffer.length(), document); continuationOpcode = INVALID_OP; continuationBuffer.clear(); } // variable length record use continuation buffer in case next // record is a continuation record. if (opcode==EXTENSION_OP || opcode==NAME_TABLE_OP || opcode==LOCAL_VERTEX_POOL_OP || opcode==MESH_PRIMITIVE_OP) { continuationOpcode = opcode; if (size > RECORD_HEADER_SIZE) { // Put record in buffer. std::string buffer((std::string::size_type)size-RECORD_HEADER_SIZE,'\0'); fin.read(&buffer[0], size-RECORD_HEADER_SIZE); // Can't parse it until we know we have the complete record. continuationBuffer = buffer; } } else if (opcode==CONTINUATION_OP) { if (size > RECORD_HEADER_SIZE) { std::string buffer((std::string::size_type)size-RECORD_HEADER_SIZE,'\0'); fin.read(&buffer[0], size-RECORD_HEADER_SIZE); // The record continues. continuationBuffer.append(buffer); } } else if (opcode==VERTEX_PALETTE_OP) { // Vertex Palette needs the file stream as it reads beyond the current record. flt::RecordInputStream recordStream(fin.rdbuf()); recordStream.readRecordBody(opcode, size, document); } else // normal (fixed size) record. { // Put record in buffer. std::string buffer((std::string::size_type)size,'\0'); if (size > RECORD_HEADER_SIZE) fin.read(&buffer[0], size-RECORD_HEADER_SIZE); // Parse buffer. std::stringbuf sb(buffer); flt::RecordInputStream recordStream(&sb); recordStream.readRecordBody(opcode, size, document); } } if (!document.getHeaderNode()) return ReadResult::ERROR_IN_READING_FILE; if (!document.getPreserveFace()) { osgUtil::Optimizer optimizer; optimizer.optimize(document.getHeaderNode(), osgUtil::Optimizer::SHARE_DUPLICATE_STATE | osgUtil::Optimizer::MERGE_GEOMETRY | osgUtil::Optimizer::MERGE_GEODES | osgUtil::Optimizer::TESSELLATE_GEOMETRY | osgUtil::Optimizer::STATIC_OBJECT_DETECTION); } return document.getHeaderNode(); }
const ListHashSet<RefPtr<FontFace> >& FontFaceSet::cssConnectedFontFaceList() const { Document* d = document(); d->ensureStyleResolver(); // Flush pending style changes. return d->styleEngine()->fontSelector()->fontFaceCache()->cssConnectedFontFaces(); }
static bool preferHiddenVolumeControls(const Document& document) { return !document.settings() || document.settings()->preferHiddenVolumeControls(); }
void XMLDocumentParser::insertErrorMessageBlock() { #if USE(QXMLSTREAM) if (m_parsingFragment) return; #endif // One or more errors occurred during parsing of the code. Display an error block to the user above // the normal content (the DOM tree is created manually and includes line/col info regarding // where the errors are located) // Create elements for display ExceptionCode ec = 0; Document* document = this->document(); RefPtr<Element> documentElement = document->documentElement(); if (!documentElement) { RefPtr<Element> rootElement = document->createElement(htmlTag, false); document->appendChild(rootElement, ec); RefPtr<Element> body = document->createElement(bodyTag, false); rootElement->appendChild(body, ec); documentElement = body.get(); } #if ENABLE(SVG) else if (documentElement->namespaceURI() == SVGNames::svgNamespaceURI) { RefPtr<Element> rootElement = document->createElement(htmlTag, false); RefPtr<Element> body = document->createElement(bodyTag, false); rootElement->appendChild(body, ec); body->appendChild(documentElement, ec); document->appendChild(rootElement.get(), ec); documentElement = body.get(); } #endif RefPtr<Element> reportElement = createXHTMLParserErrorHeader(document, m_errorMessages); documentElement->insertBefore(reportElement, documentElement->firstChild(), ec); #if ENABLE(XSLT) if (document->transformSourceDocument()) { RefPtr<Element> paragraph = document->createElement(pTag, false); paragraph->setAttribute(styleAttr, "white-space: normal"); paragraph->appendChild(document->createTextNode("This document was created as the result of an XSL transformation. The line and column numbers given are from the transformed result."), ec); reportElement->appendChild(paragraph.release(), ec); } #endif document->updateStyleIfNeeded(); }
void ImageLoader::updateFromElement() { // If we're not making renderers for the page, then don't load images. We don't want to slow // down the raw HTML parsing case by loading images we don't intend to display. Document* document = m_element->document(); if (!document->renderer()) return; AtomicString attr = m_element->getAttribute(m_element->imageSourceAttributeName()); if (attr == m_failedLoadURL) return; // Do not load any image if the 'src' attribute is missing or if it is // an empty string. CachedImage* newImage = 0; if (!attr.isNull() && !stripLeadingAndTrailingHTMLSpaces(attr).isEmpty()) { ResourceRequest request = ResourceRequest(document->completeURL(sourceURI(attr))); String crossOriginMode = m_element->fastGetAttribute(HTMLNames::crossoriginAttr); if (!crossOriginMode.isNull()) { StoredCredentials allowCredentials = equalIgnoringCase(crossOriginMode, "use-credentials") ? AllowStoredCredentials : DoNotAllowStoredCredentials; updateRequestForAccessControl(request, document->securityOrigin(), allowCredentials); } if (m_loadManually) { bool autoLoadOtherImages = document->cachedResourceLoader()->autoLoadImages(); document->cachedResourceLoader()->setAutoLoadImages(false); newImage = new CachedImage(request); newImage->setLoading(true); newImage->setOwningCachedResourceLoader(document->cachedResourceLoader()); document->cachedResourceLoader()->m_documentResources.set(newImage->url(), newImage); document->cachedResourceLoader()->setAutoLoadImages(autoLoadOtherImages); } else newImage = document->cachedResourceLoader()->requestImage(request); // If we do not have an image here, it means that a cross-site // violation occurred. m_failedLoadURL = !newImage ? attr : AtomicString(); } else if (!attr.isNull()) // Fire an error event if the url is empty. m_element->dispatchEvent(Event::create(eventNames().errorEvent, false, false)); CachedImage* oldImage = m_image.get(); if (newImage != oldImage) { if (!m_firedBeforeLoad) beforeLoadEventSender().cancelEvent(this); if (!m_firedLoad) loadEventSender().cancelEvent(this); m_image = newImage; m_firedBeforeLoad = !newImage; m_firedLoad = !newImage; m_imageComplete = !newImage; if (newImage) { if (!m_element->document()->hasListenerType(Document::BEFORELOAD_LISTENER)) dispatchPendingBeforeLoadEvent(); else beforeLoadEventSender().dispatchEventSoon(this); // If newImage is cached, addClient() will result in the load event // being queued to fire. Ensure this happens after beforeload is // dispatched. newImage->addClient(this); } if (oldImage) oldImage->removeClient(this); } if (RenderImageResource* imageResource = renderImageResource()) imageResource->resetAnimation(); }
TreeResolver::Parent::Parent(Document& document, Change change) : element(nullptr) , style(*document.renderStyle()) , change(change) { }
void UseCounter::count(const Document& document, Feature feature) { count(document.frame(), feature); }
TEST_F(RapidJson, SIMD_SUFFIX(Whitespace)) { for (size_t i = 0; i < kTrialCount; i++) { Document doc; ASSERT_TRUE(doc.Parse(whitespace_).IsArray()); } }
void UseCounter::countDeprecation(const Document& document, Feature feature) { UseCounter::countDeprecation(document.frame(), feature); }
/// Retrieves the specified document; NULL if error. Document *NeonDownloader::retrieveUrl(const DocumentInfo &docInfo) { Document *pDocument = NULL; string url = Url::escapeUrl(docInfo.getLocation()); char *pContent = NULL; size_t contentLen = 0; int statusCode = 200; unsigned int redirectionsCount = 0; if (url.empty() == true) { #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: no URL specified !" << endl; #endif return NULL; } Url urlObj(url); string protocol = urlObj.getProtocol(); string hostName = urlObj.getHost(); string location = urlObj.getLocation(); string file = urlObj.getFile(); string parameters = urlObj.getParameters(); string locationHeaderValue; string contentTypeHeaderValue; // Create a session ne_session *pSession = ne_session_create(protocol.c_str(), hostName.c_str(), 80); // urlObj.getPort()); if (pSession == NULL) { #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: couldn't create session !" << endl; #endif return NULL; } // Set the user agent ne_set_useragent(pSession, m_userAgent.c_str()); // ...and the timeout ne_set_read_timeout(pSession, (int)m_timeout); // Is a proxy defined ? if ((m_proxyAddress.empty() == false) && (m_proxyPort > 0)) { // Type is HTTP ne_session_proxy(pSession, m_proxyAddress.c_str(), m_proxyPort); } string fullLocation = "/"; if (location.empty() == false) { fullLocation += location; } if (file.empty() == false) { if (location.empty() == false) { fullLocation += "/"; } fullLocation += file; } if (parameters.empty() == false) { fullLocation += "?"; fullLocation += parameters; } // Create a request for this URL ne_request *pRequest = ne_request_create(pSession, "GET", fullLocation.c_str()); if (pRequest == NULL) { #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: couldn't create request !" << endl; #endif ne_session_destroy(pSession); return NULL; } #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: request for " << fullLocation << " on " << hostName << endl; #endif int requestStatus = NE_RETRY; while (requestStatus == NE_RETRY) { locationHeaderValue.clear(); contentTypeHeaderValue.clear(); // Begin the request requestStatus = ne_begin_request(pRequest); #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: request begun with status " << requestStatus << endl; #endif if (requestStatus == NE_OK) { ssize_t bytesRead = 0; char buffer[1024]; // Get the status const ne_status *pStatus = ne_get_status(pRequest); if (pStatus != NULL) { statusCode = pStatus->code; #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: status is " << statusCode << endl; #endif } else { // Assume all is well statusCode = 200; } // Read the content while ((bytesRead = ne_read_response_block(pRequest, buffer, 1024)) > 0) { pContent = (char*)realloc(pContent, contentLen + bytesRead); memcpy((void*)(pContent + contentLen), (const void*)buffer, bytesRead); contentLen += bytesRead; } // Get headers const char *pValue = ne_get_response_header(pRequest, "Last-Modified"); if (pValue != NULL) { locationHeaderValue = pValue; } pValue = ne_get_response_header(pRequest, "Content-Type"); if (pValue != NULL) { contentTypeHeaderValue = pValue; } // Redirection ? if ((statusCode >= 300) && (statusCode < 400) && (redirectionsCount < 10)) { ne_end_request(pRequest); ne_request_destroy(pRequest); pRequest = NULL; string documentUrl = handleRedirection(pContent, contentLen); if (documentUrl.empty() == true) { // Did we find a Location header ? if (locationHeaderValue.empty() == true) { // Fail free(pContent); pContent = NULL; contentLen = 0; break; } documentUrl = locationHeaderValue; } #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: redirected to " << documentUrl << endl; #endif urlObj = Url(documentUrl); location = urlObj.getLocation(); file = urlObj.getFile(); // Is this on the same host ? if (hostName != urlObj.getHost()) { // No, it isn't hostName = urlObj.getHost(); // Create a new session ne_session_destroy(pSession); pSession = ne_session_create(protocol.c_str(), hostName.c_str(), 80); // urlObj.getPort()); if (pSession == NULL) { #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: couldn't create session !" << endl; #endif return NULL; } ne_set_useragent(pSession, m_userAgent.c_str()); ne_set_read_timeout(pSession, (int)m_timeout); } // Try again fullLocation = "/"; if (location.empty() == false) { fullLocation += location; fullLocation += "/"; } if (file.empty() == false) { fullLocation += file; } #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: redirected to " << fullLocation << " on " << hostName << endl; #endif // Create a new request for this URL pRequest = ne_request_create(pSession, "GET", fullLocation.c_str()); if (pRequest == NULL) { #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: couldn't create request !" << endl; #endif ne_session_destroy(pSession); return NULL; } redirectionsCount++; requestStatus = NE_RETRY; // Discard whatever content we have already got free(pContent); pContent = NULL; contentLen = 0; continue; } } // End the request requestStatus = ne_end_request(pRequest); } if ((pContent != NULL) && (contentLen > 0)) { if (statusCode < 400) { // Copy the document content pDocument = new Document(docInfo); pDocument->setData(pContent, contentLen); pDocument->setLocation(url); pDocument->setType(contentTypeHeaderValue); #ifdef DEBUG cout << "NeonDownloader::retrieveUrl: document size is " << contentLen << endl; #endif } free(pContent); } // Cleanup ne_request_destroy(pRequest); ne_session_destroy(pSession); return pDocument; }
QString AssemblyBrowser::tryAddObject(GObject * obj) { Document * objDoc = obj->getDocument(); SAFE_POINT(NULL != objDoc, "", tr("Internal error: only object with document can be added to browser")); if (GObjectTypes::SEQUENCE == obj->getGObjectType()) { U2SequenceObject * seqObj = qobject_cast<U2SequenceObject*>(obj); CHECK(NULL != seqObj, tr("Internal error: broken sequence object")); SAFE_POINT(NULL != objDoc->getDocumentFormat(), "", tr("Internal error: empty document format")); U2OpStatus2Log os; qint64 seqLen = seqObj->getSequenceLength(); QStringList errs; qint64 modelLen = model->getModelLength(os); if (seqLen != modelLen) { errs << tr("The lengths of the sequence and assembly are different."); } if (seqObj->getGObjectName() != gobject->getGObjectName()) { errs << tr("The sequence and assembly names are different."); } // commented: waiting for fix //QByteArray refMd5 = model->getReferenceMd5(); //if(!refMd5.isEmpty()) { // //QByteArray data = QString(seqObj->getSequence()).remove("-").toUpper().toUtf8(); // QByteArray data = QString(seqObj->getSequence()).toUpper().toUtf8(); // QByteArray seqObjMd5 = QCryptographicHash::hash(data, QCryptographicHash::Md5).toHex(); // if(seqObjMd5 != refMd5) { // errs << tr("- Reference MD5 not match with MD5 written in assembly"); // } //} bool setRef = !isAssemblyObjectLocked(true) && !model->isLoadingReference(); setRef &= model->checkPermissions(QFile::WriteUser, setRef); if(!errs.isEmpty() && setRef) { const NotificationStack *notificationStack = AppContext::getMainWindow()->getNotificationStack(); const QString message = tr("It seems that sequence \"%1\", set as reference to assembly \"%2\", does not match it.").arg(seqObj->getGObjectName()).arg(gobject->getGObjectName()) + "\n- " + errs.join("\n- "); notificationStack->addNotification(message, Warning_Not); } if(setRef) { model->setReference(seqObj); U2Assembly assembly = model->getAssembly(); U2DataId refId; QString folder; const QStringList folders = model->getDbiConnection().dbi->getObjectDbi()->getObjectFolders(assembly.id, os); if (folders.isEmpty() || os.isCoR()) { folder = U2ObjectDbi::ROOT_FOLDER; } else { folder = folders.first(); } if (seqObj->getEntityRef().dbiRef == model->getDbiConnection().dbi->getDbiRef()) { refId = seqObj->getEntityRef().entityId; } else { U2CrossDatabaseReferenceDbi * crossDbi = model->getDbiConnection().dbi->getCrossDatabaseReferenceDbi(); U2CrossDatabaseReference crossDbRef; // Cannot simply use seqObj->getSequenceRef(), since it points to a temporary dbi // TODO: make similar method seqObj->getPersistentSequenctRef() crossDbRef.dataRef.dbiRef.dbiId = objDoc->getURLString(); crossDbRef.dataRef.dbiRef.dbiFactoryId = "document"; crossDbRef.dataRef.entityId = seqObj->getGObjectName().toUtf8(); crossDbRef.visualName = "cross_database_reference: " + seqObj->getGObjectName(); crossDbRef.dataRef.version = 1; crossDbi->createCrossReference(crossDbRef, folder, os); LOG_OP(os); refId = crossDbRef.id; addObjectToView(obj); } model->associateWithReference(refId); } } else if (GObjectTypes::VARIANT_TRACK == obj->getGObjectType()) { VariantTrackObject *trackObj = qobject_cast<VariantTrackObject*>(obj); CHECK(NULL != trackObj, tr("Internal error: broken variant track object")); model->addTrackObject(trackObj); addObjectToView(obj); connect(model.data(), SIGNAL(si_trackRemoved(VariantTrackObject *)), SLOT(sl_trackRemoved(VariantTrackObject *))); } else {
bool FocusController::advanceFocusInDocumentOrder(FocusDirection direction, KeyboardEvent* event, bool initialFocus) { Frame* frame = focusedOrMainFrame(); ASSERT(frame); Document* document = frame->document(); Node* currentNode = document->focusedNode(); // FIXME: Not quite correct when it comes to focus transitions leaving/entering the WebView itself bool caretBrowsing = focusedOrMainFrame()->settings()->caretBrowsingEnabled(); if (caretBrowsing && !currentNode) currentNode = frame->selection()->start().node(); document->updateLayoutIgnorePendingStylesheets(); RefPtr<Node> node = (direction == FocusDirectionForward) ? document->nextFocusableNode(currentNode, event) : document->previousFocusableNode(currentNode, event); // If there's no focusable node to advance to, move up the frame tree until we find one. while (!node && frame) { Frame* parentFrame = frame->tree()->parent(); if (!parentFrame) break; Document* parentDocument = parentFrame->document(); HTMLFrameOwnerElement* owner = frame->ownerElement(); if (!owner) break; node = (direction == FocusDirectionForward) ? parentDocument->nextFocusableNode(owner, event) : parentDocument->previousFocusableNode(owner, event); frame = parentFrame; } node = deepFocusableNode(direction, node.get(), event); if (!node) { // We didn't find a node to focus, so we should try to pass focus to Chrome. if (!initialFocus && m_page->chrome()->canTakeFocus(direction)) { document->setFocusedNode(0); setFocusedFrame(0); m_page->chrome()->takeFocus(direction); return true; } // Chrome doesn't want focus, so we should wrap focus. Document* d = m_page->mainFrame()->document(); node = (direction == FocusDirectionForward) ? d->nextFocusableNode(0, event) : d->previousFocusableNode(0, event); node = deepFocusableNode(direction, node.get(), event); if (!node) return false; } ASSERT(node); if (node == document->focusedNode()) // Focus wrapped around to the same node. return true; if (!node->isElementNode()) // FIXME: May need a way to focus a document here. return false; if (node->isFrameOwnerElement()) { // We focus frames rather than frame owners. // FIXME: We should not focus frames that have no scrollbars, as focusing them isn't useful to the user. HTMLFrameOwnerElement* owner = static_cast<HTMLFrameOwnerElement*>(node.get()); if (!owner->contentFrame()) return false; document->setFocusedNode(0); setFocusedFrame(owner->contentFrame()); return true; } // FIXME: It would be nice to just be able to call setFocusedNode(node) here, but we can't do // that because some elements (e.g. HTMLInputElement and HTMLTextAreaElement) do extra work in // their focus() methods. Document* newDocument = node->document(); if (newDocument != document) // Focus is going away from this document, so clear the focused node. document->setFocusedNode(0); if (newDocument) setFocusedFrame(newDocument->frame()); if (caretBrowsing) { VisibleSelection newSelection(Position(node, 0), Position(node, 0), DOWNSTREAM); if (frame->shouldChangeSelection(newSelection)) frame->selection()->setSelection(newSelection); } static_cast<Element*>(node.get())->focus(false); return true; }
Document DocumentSourceChangeStreamTransform::applyTransformation(const Document& input) { // If we're executing a change stream pipeline that was forwarded from mongos, then we expect it // to "need merge"---we expect to be executing the shards part of a split pipeline. It is never // correct for mongos to pass through the change stream without splitting into into a merging // part executed on mongos and a shards part. // // This is necessary so that mongos can correctly handle "invalidate" and "retryNeeded" change // notifications. See SERVER-31978 for an example of why the pipeline must be split. // // We have to check this invariant at run-time of the change stream rather than parse time, // since a mongos may forward a change stream in an invalid position (e.g. in a nested $lookup // or $facet pipeline). In this case, mongod is responsible for parsing the pipeline and // throwing an error without ever executing the change stream. if (pExpCtx->fromMongos) { invariant(pExpCtx->needsMerge); } MutableDocument doc; // Extract the fields we need. checkValueType(input[repl::OplogEntry::kOpTypeFieldName], repl::OplogEntry::kOpTypeFieldName, BSONType::String); string op = input[repl::OplogEntry::kOpTypeFieldName].getString(); Value ts = input[repl::OplogEntry::kTimestampFieldName]; Value ns = input[repl::OplogEntry::kNssFieldName]; checkValueType(ns, repl::OplogEntry::kNssFieldName, BSONType::String); Value uuid = input[repl::OplogEntry::kUuidFieldName]; std::vector<FieldPath> documentKeyFields; // Deal with CRUD operations and commands. auto opType = repl::OpType_parse(IDLParserErrorContext("ChangeStreamEntry.op"), op); NamespaceString nss(ns.getString()); // Ignore commands in the oplog when looking up the document key fields since a command implies // that the change stream is about to be invalidated (e.g. collection drop). if (!uuid.missing() && opType != repl::OpTypeEnum::kCommand) { checkValueType(uuid, repl::OplogEntry::kUuidFieldName, BSONType::BinData); // We need to retrieve the document key fields if our cache does not have an entry for this // UUID or if the cache entry is not definitively final, indicating that the collection was // unsharded when the entry was last populated. auto it = _documentKeyCache.find(uuid.getUuid()); if (it == _documentKeyCache.end() || !it->second.isFinal) { auto docKeyFields = pExpCtx->mongoProcessInterface->collectDocumentKeyFieldsForHostedCollection( pExpCtx->opCtx, nss, uuid.getUuid()); if (it == _documentKeyCache.end() || docKeyFields.second) { _documentKeyCache[uuid.getUuid()] = DocumentKeyCacheEntry(docKeyFields); } } documentKeyFields = _documentKeyCache.find(uuid.getUuid())->second.documentKeyFields; } Value id = input.getNestedField("o._id"); // Non-replace updates have the _id in field "o2". StringData operationType; Value fullDocument; Value updateDescription; Value documentKey; switch (opType) { case repl::OpTypeEnum::kInsert: { operationType = DocumentSourceChangeStream::kInsertOpType; fullDocument = input[repl::OplogEntry::kObjectFieldName]; documentKey = Value(document_path_support::extractPathsFromDoc( fullDocument.getDocument(), documentKeyFields)); break; } case repl::OpTypeEnum::kDelete: { operationType = DocumentSourceChangeStream::kDeleteOpType; documentKey = input[repl::OplogEntry::kObjectFieldName]; break; } case repl::OpTypeEnum::kUpdate: { if (id.missing()) { operationType = DocumentSourceChangeStream::kUpdateOpType; checkValueType(input[repl::OplogEntry::kObjectFieldName], repl::OplogEntry::kObjectFieldName, BSONType::Object); Document opObject = input[repl::OplogEntry::kObjectFieldName].getDocument(); Value updatedFields = opObject["$set"]; Value removedFields = opObject["$unset"]; // Extract the field names of $unset document. vector<Value> removedFieldsVector; if (removedFields.getType() == BSONType::Object) { auto iter = removedFields.getDocument().fieldIterator(); while (iter.more()) { removedFieldsVector.push_back(Value(iter.next().first)); } } updateDescription = Value(Document{ {"updatedFields", updatedFields.missing() ? Value(Document()) : updatedFields}, {"removedFields", removedFieldsVector}}); } else { operationType = DocumentSourceChangeStream::kReplaceOpType; fullDocument = input[repl::OplogEntry::kObjectFieldName]; } documentKey = input[repl::OplogEntry::kObject2FieldName]; break; } case repl::OpTypeEnum::kCommand: { if (!input.getNestedField("o.applyOps").missing()) { // We should never see an applyOps inside of an applyOps that made it past the // filter. This prevents more than one level of recursion. invariant(!_txnContext); initializeTransactionContext(input); // Now call applyTransformation on the first relevant entry in the applyOps. boost::optional<Document> nextDoc = extractNextApplyOpsEntry(); invariant(nextDoc); return applyTransformation(*nextDoc); } else if (!input.getNestedField("o.drop").missing()) { operationType = DocumentSourceChangeStream::kDropCollectionOpType; // The "o.drop" field will contain the actual collection name. nss = NamespaceString(nss.db(), input.getNestedField("o.drop").getString()); } else if (!input.getNestedField("o.renameCollection").missing()) { operationType = DocumentSourceChangeStream::kRenameCollectionOpType; // The "o.renameCollection" field contains the namespace of the original collection. nss = NamespaceString(input.getNestedField("o.renameCollection").getString()); // The "o.to" field contains the target namespace for the rename. const auto renameTargetNss = NamespaceString(input.getNestedField("o.to").getString()); doc.addField(DocumentSourceChangeStream::kRenameTargetNssField, Value(Document{{"db", renameTargetNss.db()}, {"coll", renameTargetNss.coll()}})); } else if (!input.getNestedField("o.dropDatabase").missing()) { operationType = DocumentSourceChangeStream::kDropDatabaseOpType; // Extract the database name from the namespace field and leave the collection name // empty. nss = NamespaceString(nss.db()); } else { // All other commands will invalidate the stream. operationType = DocumentSourceChangeStream::kInvalidateOpType; } // Make sure the result doesn't have a document key. documentKey = Value(); break; } case repl::OpTypeEnum::kNoop: { operationType = DocumentSourceChangeStream::kNewShardDetectedOpType; // Generate a fake document Id for NewShardDetected operation so that we can resume // after this operation. documentKey = Value(Document{{DocumentSourceChangeStream::kIdField, input[repl::OplogEntry::kObject2FieldName]}}); break; } default: { MONGO_UNREACHABLE; } } // UUID should always be present except for invalidate and dropDatabase entries. if (operationType != DocumentSourceChangeStream::kInvalidateOpType && operationType != DocumentSourceChangeStream::kDropDatabaseOpType) { invariant(!uuid.missing(), "Saw a CRUD op without a UUID"); } // If the collection did not exist when the change stream was opened, then the UUID will not // have been obtained from the catalog. In this case, we set the UUID on the ExpressionContext // after obtaining it from the first relevant oplog entry, so that the UUID can be included in // high water mark tokens for change streams watching a single collection. The UUID is needed // for resumability against a single collection due to collation semantics. if (!pExpCtx->uuid && !uuid.missing() && pExpCtx->isSingleNamespaceAggregation()) { pExpCtx->uuid = uuid.getUuid(); } // Note that 'documentKey' and/or 'uuid' might be missing, in which case they will not appear // in the output. auto resumeTokenData = getResumeToken(ts, uuid, documentKey); auto resumeToken = ResumeToken(resumeTokenData).toDocument(); // Add some additional fields only relevant to transactions. if (_txnContext) { doc.addField(DocumentSourceChangeStream::kTxnNumberField, Value(static_cast<long long>(_txnContext->txnNumber))); doc.addField(DocumentSourceChangeStream::kLsidField, Value(_txnContext->lsid)); } doc.addField(DocumentSourceChangeStream::kIdField, Value(resumeToken)); doc.addField(DocumentSourceChangeStream::kOperationTypeField, Value(operationType)); doc.addField(DocumentSourceChangeStream::kClusterTimeField, Value(resumeTokenData.clusterTime)); // We set the resume token as the document's sort key in both the sharded and non-sharded cases, // since we will subsequently rely upon it to generate a correct postBatchResumeToken. // TODO SERVER-38539: when returning results for merging, we first check whether 'mergeByPBRT' // has been set. If not, then the request was sent from an older mongoS which cannot merge by // raw resume tokens, and we must use the old sort key format. This check, and the 'mergeByPBRT' // flag, are no longer necessary in 4.4; all change streams will be merged by resume token. if (pExpCtx->needsMerge && !pExpCtx->mergeByPBRT) { doc.setSortKeyMetaField(BSON("" << ts << "" << uuid << "" << documentKey)); } else { doc.setSortKeyMetaField(resumeToken.toBson()); } // "invalidate" and "newShardDetected" entries have fewer fields. if (operationType == DocumentSourceChangeStream::kInvalidateOpType || operationType == DocumentSourceChangeStream::kNewShardDetectedOpType) { return doc.freeze(); } doc.addField(DocumentSourceChangeStream::kFullDocumentField, fullDocument); doc.addField(DocumentSourceChangeStream::kNamespaceField, operationType == DocumentSourceChangeStream::kDropDatabaseOpType ? Value(Document{{"db", nss.db()}}) : Value(Document{{"db", nss.db()}, {"coll", nss.coll()}})); doc.addField(DocumentSourceChangeStream::kDocumentKeyField, documentKey); // Note that 'updateDescription' might be the 'missing' value, in which case it will not be // serialized. doc.addField("updateDescription", updateDescription); return doc.freeze(); }
void HTMLCollection::invalidateNamedElementCache(Document& document) const { ASSERT(hasNamedElementCache()); document.collectionWillClearIdNameMap(*this); m_namedElementCache = nullptr; }
CSMDoc::WriteFilterStage::WriteFilterStage (Document& document, SavingState& state, CSMFilter::Filter::Scope scope) : WriteCollectionStage<CSMWorld::IdCollection<CSMFilter::Filter> > (document.getData().getFilters(), state), mDocument (document), mScope (scope) {}
void TypingCommand::insertParagraphSeparatorInQuotedContent(Document& document) { if (RefPtr<TypingCommand> lastTypingCommand = lastTypingCommandIfStillOpenForTyping(document.frame())) { lastTypingCommand->insertParagraphSeparatorInQuotedContent(); return; } applyCommand(TypingCommand::create(document, InsertParagraphSeparatorInQuotedContent)); }
CSMDoc::WriteDialogueCollectionStage::WriteDialogueCollectionStage (Document& document, SavingState& state, bool journal) : mDocument (document), mState (state), mTopics (journal ? document.getData().getJournals() : document.getData().getTopics()), mInfos (journal ? document.getData().getJournalInfos() : document.getData().getTopicInfos()) {}
int NodeBtHandler (int argc, char *argv[]) { // DDS değişkenleri DDSEntityManager mgrBtPub; DDSEntityManager mgrReqSub; ReturnCode_t status; SampleInfoSeq_var infoSeq = new SampleInfoSeq(); BtSeq* btSeqInstance = new BtSeq(); ServerReqSeq serverReqSeq; // Zaman ile alakalı değişkenler long int messageIDCount = 0; Time_t tsBeforeTheScan = { 0, 0 }; Time_t tsAfterTheScan = { 0, 0 }; //Time_t tsWifiPub = { 0, 0 }; struct timeval tsConverter; DDS::Duration_t waitAckTime = { 0, 800000000 }; //800ms int refreshRate = 60; // Veri tutucular (data structures) vector<string> btMacHolder; vector<int> btRssiHolder; string btFileContenHolder; // Bluetooth tarama sonuçlarının yazdırıldığı dosyadan okuma yapacak // olan değişken boost::filesystem::ifstream fIn; // Bluetooth tarama sıklığı ayarlayan değişken int refreshRateCounter = -1; char hostName[1024]; gethostname(hostName, 1024); // !!! Bluetooth tarama mesajlarını Publish edecek Topic yaratılıyor // ve o Topic'e ait konfigürasyon ayarları yapılıyor. // Domain participant yaratılıyor mgrBtPub.createParticipant ("KonSens_BtSeq_Participant"); // BtSeq tipi yaratılıyor BtSeqTypeSupport_var btSeqTs = new BtSeqTypeSupport(); mgrBtPub.registerType(btSeqTs.in()); // Topic yaratılıyor char btPubTopicName[] = "KonSensData_BtSeq_Topic"; mgrBtPub.createTopic(btPubTopicName); // Publisher yaratılıyor mgrBtPub.createPublisher(); // DataWriter yaratılıyor bool autodispose_unregistered_instances = false; mgrBtPub.createWriter(autodispose_unregistered_instances, KEEP_ALL_HISTORY_QOS, BY_SOURCE_TIMESTAMP_DESTINATIONORDER_QOS); // Yaratılan DataWriter, BtSeq tipi için özelleştiriliyor DataWriter_var dWriter = mgrBtPub.getWriter (); BtSeqDataWriter_var btSeqWriter = BtSeqDataWriter::_narrow(dWriter.in()); // Düğüm numarasını atanıyor btSeqInstance->userID = 13; // Publish edilecek olan mesajlara zaman etiketi takabilmek için // btSeqInstance değişkeni register ediliyor //userHandle = btSeqWriter->register_instance_w_timestamp(*btSeqInstance, // tsWifiPub); cout << "=== [Publisher of KonSensData_BtSeq_Topic] Ready ..." << endl; // !!! Sunucudan gelen komutlara Subscribe olacak olan Topic yaratılıyor // ve o Topic için gerekli konfigürasyon ayarları yapılıyor // Domain participant yaratılıyor mgrReqSub.createParticipant( "KonSensData_ServerReq_Participant_Server_to_Node"); // ServerReq tipi yaratılıyor. ServerReqTypeSupport_var mgrSubscriberTS = new ServerReqTypeSupport(); mgrReqSub.registerType(mgrSubscriberTS.in()); // Topic yaratılıyor char reqSubTopicName[] = "KonSensData_ServerReq_Topic_Server_to_Node"; mgrReqSub.createTopic(reqSubTopicName, RELIABLE_RELIABILITY_QOS, VOLATILE_DURABILITY_QOS); // Subscriber yaratılıyor mgrReqSub.createSubscriber(); // DataReader yaratılıyor mgrReqSub.createReader(KEEP_LAST_HISTORY_QOS, 1); // Yaratılan DataReader, ServerReq tipi için özelleştiriliyor. DataReader_var dReaderSub = mgrReqSub.getReader(); ServerReqDataReader_var serverReqReader = ServerReqDataReader::_narrow(dReaderSub.in()); checkHandle(serverReqReader.in(), "ServerReqDataReader::_narrow"); cout << "=== [Subscriber KonSensData_ServerReq_Topic_Server_to_Node]" " Ready ..." << endl; // Bluetooth aktif hale getiriliyor. system("sudo hciconfig -a hci0 up"); stringstream ssBtName; ssBtName << "sudo bt-adapter -a hci0 --set Name \"" << hostName << "\""; system(ssBtName.str().c_str()); system("sudo bt-adapter -a hci0 --set Discoverable 1"); // Yenileme sıklığını belirleyecek olan 'timelimit' değişkeni yaratılıyor. Timeout timelimit(std::chrono::milliseconds(refreshRate*1000)); while (true) { if (timelimit.isExpired() || refreshRateCounter == -1) { // BT mesajını Publish etmek için hazırlık yapılıyor. cout << "-----------------------------------" << endl; btSeqInstance->messageID = messageIDCount; // Tarama öncesi alınan zaman etiketi (timestamp[0]) gettimeofday(&tsConverter, NULL); tsBeforeTheScan.sec = tsConverter.tv_sec; tsBeforeTheScan.nanosec = (tsConverter.tv_usec * 1000); cout << " timestamp[0] (before the scan) = " << tsBeforeTheScan.sec << '.'; cout << setfill('0') << setw(9) << (tsBeforeTheScan.nanosec) << endl; // BT taraması yapılıyor ve ardından tarama sonuçları 'bt_rssi' // dosyasına yazdırılıyor. system("sudo hciconfig hci0 reset"); FakeDelay(); system("sudo bt-adapter -a hci0 -d >> bt_rssi.txt"); FakeDelay(); cout << "Bluetooth message is publishing..." << endl; try { // BT tarama dosyası okunuyor fIn.open ("bt_rssi.txt", ios::in); stringstream ssBt; ssBt << fIn.rdbuf(); btFileContenHolder = ssBt.str(); system("rm bt_rssi.txt"); // Okunan dosya boost kütüphane yardımıyla ayrıştırılıyor boost::regex expAd( "Address: ([0-9A-F:]{17})" ) ; boost::regex expBt( "RSSI:.*?([0-9]+)") ; boost::match_results<string::const_iterator> whatAd; string::const_iterator startAd = btFileContenHolder.begin(); string::const_iterator finishAd = btFileContenHolder.end(); while (boost::regex_search(startAd, finishAd, whatAd, expAd)) { btMacHolder.push_back(whatAd[1]); startAd = whatAd[0].second ; } boost::match_results<string::const_iterator> whatBt; startAd = btFileContenHolder.begin() ; finishAd = btFileContenHolder.end() ; while (boost::regex_search(startAd, finishAd, whatBt, expBt)) { string foundRssi(whatBt[1]); btRssiHolder.push_back(atoi(foundRssi.c_str())); startAd = whatBt[0].second ; } cout << "Number of BT connection that has been found: " << btRssiHolder.size() << endl; cout << "MessageID: " << btSeqInstance->messageID << endl; // Tarama sonrası alınan zaman etiketi (timestamp[1]) gettimeofday(&tsConverter, NULL); tsAfterTheScan.sec = tsConverter.tv_sec; tsAfterTheScan.nanosec =( tsConverter.tv_usec * 1000); cout << " timestamp[1] (after the scan) = " << tsAfterTheScan.sec << '.'; cout << setfill('0') << setw(9) << (tsAfterTheScan.nanosec) << endl; // Ayrıştırılan BT tarama dosyası ve alınan zaman etiketleri, // Publish edilecek olan mesaj değişkenlerine kaydediliyor. btSeqInstance->timestamp[0][0] = tsBeforeTheScan.nanosec; btSeqInstance->timestamp[0][1] = tsBeforeTheScan.sec; btSeqInstance->timestamp[1][0] = tsAfterTheScan.nanosec; btSeqInstance->timestamp[1][1] = tsAfterTheScan.sec; btSeqInstance->messages.length(btMacHolder.size()); for(int i = 0; i < btMacHolder.size(); i++) { Msg msg; msg.devID = DDS::string_dup(btMacHolder[i].c_str()); msg.hostName = DDS::string_dup(hostName); msg.dbm = -btRssiHolder[i]; btSeqInstance->messages[i] = msg; } // Publish edilmeden önce, bir önceki mesajın acknowlegde mesajı // bekleniyor btSeqWriter->wait_for_acknowledgments(waitAckTime); status = btSeqWriter->write(*btSeqInstance, DDS::HANDLE_NIL); checkStatus(status, "BtSeqDataWriter::write"); messageIDCount++; } catch ( boost::bad_expression & ex ) { std::cout << ex.what() ; break; } btMacHolder.clear(); btRssiHolder.clear(); fIn.close(); cout << "-----------------------------------" << endl; // Tarama sıklığını belirleyen değişken sıfırlanıyor timelimit.setTimerToZero(); refreshRateCounter = 0; cout << refreshRateCounter << endl; } // BT Publisher kısmının sonu // Sunucu tarafından gönderilen Matlab komutlarına Subscribe olunuyor else { status = serverReqReader->take(serverReqSeq, infoSeq, LENGTH_UNLIMITED, ANY_SAMPLE_STATE, ANY_VIEW_STATE, ANY_INSTANCE_STATE); checkStatus(status, "severReqDataReader::read"); for (DDS::ULong j = 0; j < serverReqSeq.length(); j++) { if(infoSeq[j].valid_data) { cout << "=== [Subscriber] message received :" << endl; cout << " Received Request Message : " << serverReqSeq[j].request << endl; cout << " Received RequestID : \"" << serverReqSeq[j].requestID << "\"" << endl; // Rapidjson yapılandırıcısı yaratılıyor Document d; if(d.Parse(serverReqSeq[j].request).HasParseError()) cout << " Parsing Error!" << endl; StringBuffer nodeIdBuffer; Writer<StringBuffer> nodeIdWriter(nodeIdBuffer); d["NodeID"].Accept(nodeIdWriter); string tempNodeId = nodeIdBuffer.GetString(); // Subscribe olunan mesajın düğüme ait olup olmadığı kontrol ediliyor if (tempNodeId == "\"SensDug13\"") { StringBuffer buffer; Value::ConstMemberIterator itr = d.FindMember("SetRefreshRate"); // Ref Rate komutunun gelip gelmediği kontrol ediliyor if(itr != d.MemberEnd()) { string refreshRateString; int refreshRateInt; // Document formatındaki JSON mesajı StrinBuffer'a dönüştürülüyor Writer<StringBuffer> writer(buffer); d["SetRefreshRate"].Accept(writer); refreshRateString = buffer.GetString(); // Gelen mesajda fazladan çift tırnak ile bulunuyor // Örneğin, ""15"" // Bu yüzden ilk son karakterler kırpılıyor refreshRateString = refreshRateString.substr(1, refreshRateString.size()-1); // Refresh rate değeri stringden integera çevriliyor refreshRateInt = atoi(refreshRateString.c_str()); refreshRate = refreshRateInt; timelimit.setMaxDuration(std::chrono::milliseconds (refreshRate*1000)); } } else cout << "Invalid NodeID!" << endl; } } status = serverReqReader->return_loan(serverReqSeq, infoSeq); checkStatus(status, "ServerReqDataReader::return_loan"); refreshRateCounter++; cout << refreshRateCounter << endl; } // Matlab komutuna Subscribe olma kısmının sonu // Terminalde akacak olan çıktıları dah gözle görülebilir bir şekilde // yazdırmak için koyulmuştur FakeDelay(); } // Hafıza temizle işlemleri gerçekleştiriliyor mgrBtPub.deleteWriter(); mgrBtPub.deletePublisher(); mgrBtPub.deleteTopic(); mgrBtPub.deleteParticipant(); mgrReqSub.deleteReader(); mgrReqSub.deleteSubscriber(); mgrReqSub.deleteTopic(); mgrReqSub.deleteParticipant(); return 0; }
TreeResolver::Scope::Scope(Document& document) : styleResolver(document.ensureStyleResolver()) , sharingResolver(document, styleResolver.ruleSets(), selectorFilter) { }
void PageSerializer::serializeFrame(Frame* frame) { Document* document = frame->document(); KURL url = document->url(); if (!url.isValid() || url.protocolIs("about")) { // For blank frames we generate a fake URL so they can be referenced by their containing frame. url = urlForBlankFrame(frame); } if (m_resourceURLs.contains(url)) { // FIXME: We could have 2 frame with the same URL but which were dynamically changed and have now // different content. So we should serialize both and somehow rename the frame src in the containing // frame. Arg! return; } Vector<Node*> nodes; SerializerMarkupAccumulator accumulator(this, document, &nodes); TextEncoding textEncoding(document->charset()); CString data; if (!textEncoding.isValid()) { // FIXME: iframes used as images trigger this. We should deal with them correctly. return; } String text = accumulator.serializeNodes(document->documentElement(), 0, IncludeNode); CString frameHTML = textEncoding.encode(text.characters(), text.length(), EntitiesForUnencodables); m_resources->append(Resource(url, document->suggestedMIMEType(), SharedBuffer::create(frameHTML.data(), frameHTML.length()))); m_resourceURLs.add(url); for (Vector<Node*>::iterator iter = nodes.begin(); iter != nodes.end(); ++iter) { Node* node = *iter; if (!node->isElementNode()) continue; Element* element = toElement(node); // We have to process in-line style as it might contain some resources (typically background images). if (element->isStyledElement()) retrieveResourcesForProperties(static_cast<StyledElement*>(element)->inlineStyle(), document); if (element->hasTagName(HTMLNames::imgTag)) { HTMLImageElement* imageElement = static_cast<HTMLImageElement*>(element); KURL url = document->completeURL(imageElement->getAttribute(HTMLNames::srcAttr)); CachedImage* cachedImage = imageElement->cachedImage(); addImageToResources(cachedImage, imageElement->renderer(), url); } else if (element->hasTagName(HTMLNames::linkTag)) { HTMLLinkElement* linkElement = static_cast<HTMLLinkElement*>(element); if (CSSStyleSheet* sheet = linkElement->sheet()) { KURL url = document->completeURL(linkElement->getAttribute(HTMLNames::hrefAttr)); serializeCSSStyleSheet(sheet, url); ASSERT(m_resourceURLs.contains(url)); } } else if (element->hasTagName(HTMLNames::styleTag)) { HTMLStyleElement* styleElement = static_cast<HTMLStyleElement*>(element); if (CSSStyleSheet* sheet = styleElement->sheet()) serializeCSSStyleSheet(sheet, KURL()); } } for (Frame* childFrame = frame->tree()->firstChild(); childFrame; childFrame = childFrame->tree()->nextSibling()) serializeFrame(childFrame); }
bool XapianIndex::addCommonTerms(const DocumentInfo &info, Xapian::Document &doc, Xapian::termcount &termPos) const { string title(info.getTitle()); string location(info.getLocation()); Url urlObj(location); // Add a magic term :-) doc.add_term(MAGIC_TERM); // Index the title with and without prefix S if (title.empty() == false) { Document titleDoc; titleDoc.setData(title.c_str(), title.length()); Tokenizer titleTokens(&titleDoc); addPostingsToDocument(titleTokens, doc, "S", termPos, STORE_UNSTEM); titleTokens.rewind(); addPostingsToDocument(titleTokens, doc, "", termPos, m_stemMode); } // Index the full URL with prefix U doc.add_term(limitTermLength(string("U") + location, true)); // ...the host name and included domains with prefix H string hostName(StringManip::toLowerCase(urlObj.getHost())); if (hostName.empty() == false) { doc.add_term(limitTermLength(string("H") + hostName, true)); string::size_type dotPos = hostName.find('.'); while (dotPos != string::npos) { doc.add_term(limitTermLength(string("H") + hostName.substr(dotPos + 1), true)); // Next dotPos = hostName.find('.', dotPos + 1); } } // ...the location (as is) and all directories with prefix XDIR: string tree(urlObj.getLocation()); if (tree.empty() == false) { doc.add_term(limitTermLength(string("XDIR:") + tree, true)); string::size_type slashPos = tree.find('/', 1); while (slashPos != string::npos) { doc.add_term(limitTermLength(string("XDIR:") + tree.substr(0, slashPos), true)); // Next slashPos = tree.find('/', slashPos + 1); } } // ...and the file name with prefix P string fileName(urlObj.getFile()); if (fileName.empty() == false) { doc.add_term(limitTermLength(string("P") + StringManip::toLowerCase(fileName), true)); } // Finally, add the language code with prefix L doc.add_term(string("L") + Languages::toCode(m_stemLanguage)); // ...and the MIME type with prefix T doc.add_term(string("T") + info.getType()); return true; }