QSet<qint64> TestSearchPlugin::search(const QString &query, const QList<qint64> &collections, const QStringList &mimeTypes) { Q_UNUSED(collections); Q_UNUSED(mimeTypes); const QSet<qint64> result = parseQuery(query); qDebug() << "PLUGIN QUERY:" << query; qDebug() << "PLUGIN RESULT:" << result; return parseQuery(query); }
std::pair<String, StoragePtr> createTableFromDefinition( const String & definition, const String & database_name, const String & database_data_path, Context & context, bool has_force_restore_data_flag, const String & description_for_error_message) { ParserCreateQuery parser; ASTPtr ast = parseQuery(parser, definition.data(), definition.data() + definition.size(), description_for_error_message, 0); ASTCreateQuery & ast_create_query = typeid_cast<ASTCreateQuery &>(*ast); ast_create_query.attach = true; ast_create_query.database = database_name; /// We do not directly use `InterpreterCreateQuery::execute`, because /// - the database has not been created yet; /// - the code is simpler, since the query is already brought to a suitable form. if (!ast_create_query.columns) throw Exception("Missing definition of columns.", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED); ColumnsDescription columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns, context); return { ast_create_query.table, StorageFactory::instance().get( ast_create_query, database_data_path, ast_create_query.table, database_name, context, context.getGlobalContext(), columns, true, has_force_restore_data_flag) }; }
ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns) { auto columns_list = std::make_shared<ASTExpressionList>(); for (const auto & column : columns.getAll()) { const auto column_declaration = std::make_shared<ASTColumnDeclaration>(); ASTPtr column_declaration_ptr{column_declaration}; column_declaration->name = column.name; StringPtr type_name = std::make_shared<String>(column.type->getName()); auto pos = type_name->data(); const auto end = pos + type_name->size(); ParserIdentifierWithOptionalParameters storage_p; column_declaration->type = parseQuery(storage_p, pos, end, "data type", 0); column_declaration->type->owned_string = type_name; const auto it = columns.defaults.find(column.name); if (it != std::end(columns.defaults)) { column_declaration->default_specifier = toString(it->second.kind); column_declaration->default_expression = it->second.expression->clone(); } columns_list->children.push_back(column_declaration_ptr); } return columns_list; }
Status ParsedUpdate::parseRequest() { // It is invalid to request that the UpdateStage return the prior or newly-updated version // of a document during a multi-update. invariant(!(_request->shouldReturnAnyDocs() && _request->isMulti())); // It is invalid to request that a ProjectionStage be applied to the UpdateStage if the // UpdateStage would not return any document. invariant(_request->getProj().isEmpty() || _request->shouldReturnAnyDocs()); if (!_request->getCollation().isEmpty()) { auto collator = CollatorFactoryInterface::get(_opCtx->getServiceContext()) ->makeFromBSON(_request->getCollation()); if (!collator.isOK()) { return collator.getStatus(); } _collator = std::move(collator.getValue()); } Status status = parseArrayFilters(); if (!status.isOK()) { return status; } // We parse the update portion before the query portion because the dispostion of the update // may determine whether or not we need to produce a CanonicalQuery at all. For example, if // the update involves the positional-dollar operator, we must have a CanonicalQuery even if // it isn't required for query execution. parseUpdate(); status = parseQuery(); if (!status.isOK()) return status; return Status::OK(); }
result_t Url::parse(exlib::string url, bool parseQueryString, bool slashesDenoteHost) { bool bHost; clear(); m_slashes = false; trimUrl(url, url); const char* c_str = url.c_str(); bool hasHash = qstrchr(c_str, '#') != NULL; if (!slashesDenoteHost && !hasHash && isUrlSlash(*c_str)) { parsePath(c_str); parseQuery(c_str); parseHash(c_str); if (parseQueryString) { m_queryParsed = new HttpCollection(); m_queryParsed->parse(m_query); } return 0; } parseProtocol(c_str); bHost = checkHost(c_str); if (slashesDenoteHost || m_protocol.length() > 0 || bHost) m_slashes = ((isUrlSlash(*c_str) && isUrlSlash(c_str[1])) && (m_protocol.length() <= 0 || m_protocol.compare("javascript:"))); if (m_protocol.compare("javascript:") && m_slashes) { c_str += 2; parseAuth(c_str); parseHost(c_str); } parsePath(c_str); parseQuery(c_str); parseHash(c_str); if (parseQueryString) { m_queryParsed = new HttpCollection(); m_queryParsed->parse(m_query); } return 0; }
void DatabaseOrdinary::alterTable( const Context & context, const String & name, const NamesAndTypesList & columns, const NamesAndTypesList & materialized_columns, const NamesAndTypesList & alias_columns, const ColumnDefaults & column_defaults, const ASTModifier & engine_modifier) { /// Считываем определение таблицы и заменяем в нём нужные части на новые. String table_name_escaped = escapeForFileName(name); String table_metadata_tmp_path = path + "/" + table_name_escaped + ".sql.tmp"; String table_metadata_path = path + "/" + table_name_escaped + ".sql"; String statement; { char in_buf[METADATA_FILE_BUFFER_SIZE]; ReadBufferFromFile in(table_metadata_path, METADATA_FILE_BUFFER_SIZE, -1, in_buf); WriteBufferFromString out(statement); copyData(in, out); } ParserCreateQuery parser; ASTPtr ast = parseQuery(parser, statement.data(), statement.data() + statement.size(), "in file " + table_metadata_path); ASTCreateQuery & ast_create_query = typeid_cast<ASTCreateQuery &>(*ast); ASTPtr new_columns = InterpreterCreateQuery::formatColumns(columns, materialized_columns, alias_columns, column_defaults); auto it = std::find(ast_create_query.children.begin(), ast_create_query.children.end(), ast_create_query.columns); if (it == ast_create_query.children.end()) throw Exception("Logical error: cannot find columns child in ASTCreateQuery", ErrorCodes::LOGICAL_ERROR); *it = new_columns; ast_create_query.columns = new_columns; if (engine_modifier) engine_modifier(ast_create_query.storage); statement = getTableDefinitionFromCreateQuery(ast); { WriteBufferFromFile out(table_metadata_tmp_path, statement.size(), O_WRONLY | O_CREAT | O_EXCL); writeString(statement, out); out.next(); out.sync(); out.close(); } try { /// rename атомарно заменяет старый файл новым. Poco::File(table_metadata_tmp_path).renameTo(table_metadata_path); } catch (...) { Poco::File(table_metadata_tmp_path).remove(); throw; } }
ASTPtr InterpreterCreateQuery::formatColumns(const ColumnsDescription & columns) { auto columns_list = std::make_shared<ASTExpressionList>(); for (const auto & column : columns.getAll()) { const auto column_declaration = std::make_shared<ASTColumnDeclaration>(); ASTPtr column_declaration_ptr{column_declaration}; column_declaration->name = column.name; ParserIdentifierWithOptionalParameters storage_p; String type_name = column.type->getName(); auto type_name_pos = type_name.data(); const auto type_name_end = type_name_pos + type_name.size(); column_declaration->type = parseQuery(storage_p, type_name_pos, type_name_end, "data type", 0); const auto defaults_it = columns.defaults.find(column.name); if (defaults_it != std::end(columns.defaults)) { column_declaration->default_specifier = toString(defaults_it->second.kind); column_declaration->default_expression = defaults_it->second.expression->clone(); } const auto comments_it = columns.comments.find(column.name); if (comments_it != std::end(columns.comments)) { column_declaration->comment = std::make_shared<ASTLiteral>(Field(comments_it->second)); } const auto ct = columns.codecs.find(column.name); if (ct != std::end(columns.codecs)) { String codec_desc = ct->second->getCodecDesc(); codec_desc = "CODEC(" + codec_desc + ")"; auto codec_desc_pos = codec_desc.data(); const auto codec_desc_end = codec_desc_pos + codec_desc.size(); ParserIdentifierWithParameters codec_p; column_declaration->codec = parseQuery(codec_p, codec_desc_pos, codec_desc_end, "column codec", 0); } columns_list->children.push_back(column_declaration_ptr); } return columns_list; }
static void runDir(MaQueue *q) { MaConn *conn; MaResponse *resp; MaRequest *req; MprList *list; MprDirEntry *dp; Dir *dir; cchar *filename; uint nameSize; int next; conn = q->conn; req = conn->request; resp = conn->response; dir = q->stage->stageData; filename = resp->filename; mprAssert(filename); maDontCacheResponse(conn); maSetHeader(conn, 0, "Last-Modified", req->host->currentDate); maPutForService(q, maCreateHeaderPacket(q), 0); parseQuery(conn); list = mprGetPathFiles(conn, filename, 1); if (list == 0) { maWrite(q, "<h2>Can't get file list</h2>\r\n"); outputFooter(q); return; } if (dir->pattern) { filterDirList(conn, list); } sortList(conn, list); /* * Get max filename */ nameSize = 0; for (next = 0; (dp = mprGetNextItem(list, &next)) != 0; ) { nameSize = max((int) strlen(dp->name), nameSize); } nameSize = max(nameSize, 22); outputHeader(q, req->url, nameSize); for (next = 0; (dp = mprGetNextItem(list, &next)) != 0; ) { outputLine(q, dp, filename, nameSize); } outputFooter(q); maPutForService(q, maCreateEndPacket(conn), 1); mprFree(list); }
void parseParams(const std::string& input, std::vector<Query>* params) { std::stringstream ss(input); std::string item; while (std::getline(ss, item, '&')) { auto query = parseQuery(item); params->push_back(query); TRC("Parsed param: %s:%s", query.key.c_str(), query.value.c_str()); } }
// Entry point for a search. virtual shared_ptr<Cursor> newCursor(const BSONObj& query, const BSONObj& order, int numWanted) const { vector<QueryGeometry> regions; double maxDistance = DBL_MAX; bool isNear = false; bool isIntersect = false; // Go through the fields that we index, and for each geo one, make a QueryGeometry // object for the S2Cursor class to do intersection testing/cover generating with. for (size_t i = 0; i < _fields.size(); ++i) { const IndexedField &field = _fields[i]; if (IndexedField::GEO != field.type) { continue; } BSONElement e = query.getFieldDotted(field.name); if (e.eoo()) { continue; } if (!e.isABSONObj()) { continue; } BSONObj obj = e.Obj(); QueryGeometry geoQueryField(field.name); if (parseLegacy(obj, &geoQueryField, &isNear, &isIntersect, &maxDistance)) { regions.push_back(geoQueryField); } else if (parseQuery(obj, &geoQueryField, &isNear, &isIntersect, &maxDistance)) { regions.push_back(geoQueryField); } else { uasserted(16535, "can't parse query for *2d geo search: " + obj.toString()); } } if (isNear && isIntersect ) { uasserted(16474, "Can't do both near and intersect, query: " + query.toString()); } // I copied this from 2d.cpp. Guard against perversion. if (numWanted < 0) numWanted *= -1; if (0 == numWanted) numWanted = INT_MAX; BSONObjBuilder geoFieldsToNuke; for (size_t i = 0; i < _fields.size(); ++i) { const IndexedField &field = _fields[i]; if (IndexedField::GEO != field.type) { continue; } geoFieldsToNuke.append(field.name, ""); } // false means we want to filter OUT geoFieldsToNuke, not filter to include only that. BSONObj filteredQuery = query.filterFieldsUndotted(geoFieldsToNuke.obj(), false); if (isNear) { S2NearCursor *cursor = new S2NearCursor(keyPattern(), getDetails(), filteredQuery, regions, _params, numWanted, maxDistance); return shared_ptr<Cursor>(cursor); } else { // Default to intersect. S2Cursor *cursor = new S2Cursor(keyPattern(), getDetails(), filteredQuery, regions, _params, numWanted); return shared_ptr<Cursor>(cursor); } }
/* Start the request (and complete it) */ static void startDir(HttpQueue *q) { HttpConn *conn; HttpTx *tx; HttpRx *rx; MprList *list; MprDirEntry *dp; HttpDir *dir; cchar *path; uint nameSize; int next; conn = q->conn; rx = conn->rx; tx = conn->tx; if ((dir = conn->reqData) == 0) { httpError(conn, HTTP_CODE_INTERNAL_SERVER_ERROR, "Cannot get directory listing"); return; } assert(tx->filename); if (!(rx->flags & (HTTP_GET | HTTP_HEAD))) { httpError(conn, HTTP_CODE_BAD_METHOD, "Bad method"); return; } httpSetContentType(conn, "text/html"); httpSetHeaderString(conn, "Cache-Control", "no-cache"); httpSetHeaderString(conn, "Last-Modified", conn->http->currentDate); parseQuery(conn); if ((list = mprGetPathFiles(tx->filename, MPR_PATH_RELATIVE)) == 0) { httpWrite(q, "<h2>Cannot get file list</h2>\r\n"); outputFooter(q); return; } if (dir->pattern) { filterDirList(conn, list); } sortList(conn, list); /* Get max filename size */ nameSize = 0; for (next = 0; (dp = mprGetNextItem(list, &next)) != 0; ) { nameSize = max((int) strlen(dp->name), nameSize); } nameSize = max(nameSize, 22); path = rx->route->prefix ? sjoin(rx->route->prefix, rx->pathInfo, NULL) : rx->pathInfo; outputHeader(q, path, nameSize); for (next = 0; (dp = mprGetNextItem(list, &next)) != 0; ) { outputLine(q, dp, tx->filename, nameSize); } outputFooter(q); httpFinalize(conn); }
bool QueryDataSource::createBrowseWU() { StringAttr dataset, datasetDefs; StringAttrAdaptor a1(dataset), a2(datasetDefs); wuResult->getResultDataset(a1, a2); if (!dataset || !datasetDefs) return false; StringBuffer fullText; fullText.append(datasetDefs).append(dataset); OwnedHqlExpr parsed = parseQuery(fullText.str()); if (!parsed) return false; HqlExprAttr selectFields = parsed.getLink(); if (selectFields->getOperator() == no_output) selectFields.set(selectFields->queryChild(0)); OwnedHqlExpr browseWUcode = buildQueryViewerEcl(selectFields); if (!browseWUcode) return false; returnedRecord.set(browseWUcode->queryChild(0)->queryRecord()); StringAttr tempAttr; StringAttrAdaptor temp(tempAttr); Owned<IWorkUnitFactory> factory = getWorkUnitFactory(); Owned<IConstWorkUnit> parent = factory->openWorkUnit(wuid, false); SCMStringBuffer user; StringAttrAdaptor acluster(cluster); parent->getClusterName(acluster); parent->getUser(user); Owned<IWorkUnit> workunit = factory->createWorkUnit(NULL, "fileViewer", user.str()); workunit->setUser(user.str()); workunit->setClusterName(cluster); workunit->setCustomerId(parent->getCustomerId(temp).str()); workunit->setCompareMode(CompareModeOff); // ? parent->getCompareMode() StringAttrAdaptor bwa(browseWuid); workunit->getWuid(bwa); workunit->setDebugValueInt("importImplicitModules", false, true); workunit->setDebugValueInt("importAllModules", false, true); workunit->setDebugValueInt("forceFakeThor", 1, true); StringBuffer jobName; jobName.append("FileView for ").append(wuid).append(":").append("x"); workunit->setJobName(jobName.str()); StringBuffer eclText; toECL(browseWUcode, eclText, true); Owned<IWUQuery> query = workunit->updateQuery(); query->setQueryText(eclText.str()); query->setQueryName(jobName.str()); return true; }
ASTPtr DatabaseDictionary::getCreateDatabaseQuery(const Context & /*context*/) const { String query; { WriteBufferFromString buffer(query); buffer << "CREATE DATABASE " << backQuoteIfNeed(name) << " ENGINE = Dictionary"; } ParserCreateQuery parser; return parseQuery(parser, query.data(), query.data() + query.size(), "", 0); }
/** * Parses oauth_token and oauth_token_secret from response of the service provider * and sets m_oauthToken and m_oauthTokenSecret accordingly * @param response response from service provider */ void OAuth::parseTokens(const QByteArray& response) { //OAuth spec 5.3, 6.1.2, 6.3.2 //use QUrl for parsing QByteArray parseQuery("https://parse.com?"); QUrl parseUrl = QUrl::fromEncoded(parseQuery + response); m_oauthToken = parseUrl.encodedQueryItemValue("oauth_token"); m_oauthTokenSecret = parseUrl.encodedQueryItemValue("oauth_token_secret"); }
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { BSONElement argElt = cmdObj["stageDebug"]; if (argElt.eoo() || !argElt.isABSONObj()) { return false; } BSONObj argObj = argElt.Obj(); // Pull out the collection name. BSONElement collElt = argObj["collection"]; if (collElt.eoo() || (String != collElt.type())) { return false; } string collName = collElt.String(); // Need a context to get the actual Collection* // TODO A write lock is currently taken here to accommodate stages that perform writes // (e.g. DeleteStage). This should be changed to use a read lock for read-only // execution trees. Client::WriteContext ctx(txn, dbname); // Make sure the collection is valid. Database* db = ctx.ctx().db(); Collection* collection = db->getCollection(txn, db->name() + '.' + collName); uassert(17446, "Couldn't find the collection " + collName, NULL != collection); // Pull out the plan BSONElement planElt = argObj["plan"]; if (planElt.eoo() || !planElt.isABSONObj()) { return false; } BSONObj planObj = planElt.Obj(); // Parse the plan into these. OwnedPointerVector<MatchExpression> exprs; auto_ptr<WorkingSet> ws(new WorkingSet()); PlanStage* userRoot = parseQuery(txn, collection, planObj, ws.get(), &exprs); uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), NULL != userRoot); // Add a fetch at the top for the user so we can get obj back for sure. // TODO: Do we want to do this for the user? I think so. PlanStage* rootFetch = new FetchStage(ws.get(), userRoot, NULL, collection); PlanExecutor runner(ws.release(), rootFetch, collection); BSONArrayBuilder resultBuilder(result.subarrayStart("results")); for (BSONObj obj; PlanExecutor::ADVANCED == runner.getNext(&obj, NULL); ) { resultBuilder.append(obj); } resultBuilder.done(); return true; }
ResultSet VectorQueryPageRank::query(string query) { // Get query terms list<string> terms; parseQuery(query, terms); map <unsigned int, double> docsMap; // For each term in the query for (list<string>::iterator it=terms.begin(); it != terms.end(); ++it) { // Get term string term = *it; // Search term at vocabulary vector<term_info> inverted_list; index->searchTerm(term, inverted_list); // Calculate idf double idf = index->getTermIDF(term); // For each document in the inverted list of the term for(size_t i=0; i<inverted_list.size(); i++){ unsigned int doc = inverted_list[i].doc; unsigned int freq = inverted_list[i].freq; docsMap[doc] += (1 + log2(freq)) * idf; } } double max=0.0, min=1000.0; for(auto i = docsMap.begin(); i != docsMap.end(); ++i){ if(i->second > max) max = i->second; if(i->second < min) min = i->second; } // Divide by the document norm multimap<double, unsigned int> flippedDocMap; for(auto it = docsMap.begin(); it != docsMap.end(); ++it){ double v = (it->second-min)/(max-min); double res = alfa*v + (1-alfa)*pageRank->getRank(it->first); flippedDocMap.insert ( make_pair(res, it->first) ); } list<unsigned int> docIds; for (multimap<double, unsigned int>::reverse_iterator it=flippedDocMap.rbegin(); it!=flippedDocMap.rend(); ++it){ unsigned int d = it->second; docIds.push_back(d); } ResultSet result(docFileManager, docIds); return result; }
vector<Result*> QueryProcessor::searchIndex(string search_string, IndexHandler*& ih) { parseQuery(search_string); //check different types of arguments vector<Page*>results; if (currentQ->getandArgs().size() > 0) { for (auto e: currentQ->getandArgs()) { if (results.size() > 0) { set<Page*>test(results.begin(), results.end()); results.clear(); set<Page*> andargs = ih->searchIndex(e); set_intersection(test.begin(), test.end(), andargs.begin(), andargs.end(), back_inserter(results)); } else { set<Page*> andargs = ih->searchIndex(e); copy(andargs.begin(), andargs.end(), back_inserter(results)); } } } else if (currentQ->getorArgs().size() > 0) { set<Page*>orResultSet; for (auto e: currentQ->getorArgs()) { set<Page*> a = ih->searchIndex(e); orResultSet.insert(a.begin(), a.end()); } copy(orResultSet.begin(), orResultSet.end(), back_inserter(results)); } else if (currentQ->getnormArgs().size() > 0) { set<Page*> a = ih->searchIndex(currentQ->getnormArgs()[0]); copy(a.begin(), a.end(), back_inserter(results)); } if (currentQ->getnotArgs().size() > 0) { for (auto e: currentQ->getnotArgs()) { set<Page*>test(results.begin(), results.end()); results.clear(); set<Page*> notargs = ih->searchIndex(e); set_difference(test.begin(), test.end(), notargs.begin(), notargs.end(), back_inserter(results)); } } for (auto e: results) cout << e->getTitle() << endl; vector<Result*> resultsvector = sortResults(results); return resultsvector; }
void HttpRequest::parseUrl() { if(!schema.empty()) { return; } struct http_parser_url u; if(http_parser_parse_url(url.c_str(), url.size(), 0, &u) != 0) { LOG_ERROR("parseurl error %s", url.c_str()); return; } if(u.field_set & (1 << UF_SCHEMA)) { schema = url.substr(u.field_data[UF_SCHEMA].off, u.field_data[UF_SCHEMA].len); } if(u.field_set & (1 << UF_HOST)) { host = url.substr(u.field_data[UF_HOST].off, u.field_data[UF_HOST].len); } if(u.field_set & (1 << UF_PORT)) { port = u.port; } else { if(strcasecmp(schema.c_str(), "https") == 0 || strcasecmp(schema.c_str(), "wss") == 0) { port = 443; } else { port = 80; } } if(u.field_set & (1 << UF_PATH)) { path = url.substr(u.field_data[UF_PATH].off, u.field_data[UF_PATH].len); } if(u.field_set & (1 << UF_QUERY)) { query = url.substr(u.field_data[UF_QUERY].off, u.field_data[UF_QUERY].len); parseQuery(); } }
void DatabaseOrdinary::alterTable( const Context & context, const String & name, const ColumnsDescription & columns, const ASTModifier & storage_modifier) { /// Read the definition of the table and replace the necessary parts with new ones. String table_name_escaped = escapeForFileName(name); String table_metadata_tmp_path = metadata_path + "/" + table_name_escaped + ".sql.tmp"; String table_metadata_path = metadata_path + "/" + table_name_escaped + ".sql"; String statement; { char in_buf[METADATA_FILE_BUFFER_SIZE]; ReadBufferFromFile in(table_metadata_path, METADATA_FILE_BUFFER_SIZE, -1, in_buf); readStringUntilEOF(statement, in); } ParserCreateQuery parser; ASTPtr ast = parseQuery(parser, statement.data(), statement.data() + statement.size(), "in file " + table_metadata_path, 0); ASTCreateQuery & ast_create_query = typeid_cast<ASTCreateQuery &>(*ast); ASTPtr new_columns = InterpreterCreateQuery::formatColumns(columns); ast_create_query.replace(ast_create_query.columns, new_columns); if (storage_modifier) storage_modifier(*ast_create_query.storage); statement = getTableDefinitionFromCreateQuery(ast); { WriteBufferFromFile out(table_metadata_tmp_path, statement.size(), O_WRONLY | O_CREAT | O_EXCL); writeString(statement, out); out.next(); if (context.getSettingsRef().fsync_metadata) out.sync(); out.close(); } try { /// rename atomically replaces the old file with the new one. Poco::File(table_metadata_tmp_path).renameTo(table_metadata_path); } catch (...) { Poco::File(table_metadata_tmp_path).remove(); throw; } }
Status UpdateExecutor::prepare() { // We parse the update portion before the query portion because the dispostion of the update // may determine whether or not we need to produce a CanonicalQuery at all. For example, if // the update involves the positional-dollar operator, we must have a CanonicalQuery even if // it isn't required for query execution. Status status = parseUpdate(); if (!status.isOK()) return status; status = parseQuery(); if (!status.isOK()) return status; return Status::OK(); }
bool Uri::Private::parseRelativeRef() { if (!parseRelativePart()) { return false; } if (expectChar('?')) { parseQuery(); } if (expectChar('#')) { parseFragment(); } return true; }
void MyParser::parsePayload(const std::string& payload, std::vector<Query>* out) const { int i1 = payload.find_first_of("&"); if (i1 == std::string::npos) { int i2 = payload.find_first_of("="); if (i2 != std::string::npos) { auto query = parseQuery(payload); out->push_back(query); return; // single item } return; // no payload items } parseParams(payload, out); }
ResultSet VectorQuery::query(string query) { // Get query terms list<string> terms; parseQuery(query, terms); map <unsigned int, double> docsMap; // For each term in the query for (list<string>::iterator it=terms.begin(); it != terms.end(); ++it) { // Get term string term = *it; // Search term at vocabulary vector<term_info> inverted_list; index->searchTerm(term, inverted_list); // Calculate idf double idf = index->getTermIDF(term); // For each document in the inverted list of the term for(size_t i=0; i<inverted_list.size(); i++){ unsigned int doc = inverted_list[i].doc; unsigned int freq = inverted_list[i].freq; docsMap[doc] += (1 + log2(freq)) * idf; } } // Divide by the document norm multimap<double, unsigned int> flippedDocMap; for (map<unsigned int, double>::iterator it=docsMap.begin(); it!=docsMap.end(); ++it){ unsigned int d = it->first; if(norm == NORM_VECTOR) flippedDocMap.insert ( make_pair(it->second/index->getDocumentNorm(d), d) ); else if(norm == NORM_WORDS) flippedDocMap.insert ( make_pair(it->second/index->getDocumentLen(d), d) ); else flippedDocMap.insert ( make_pair(it->second, d) ); } list<unsigned int> docIds; for (multimap<double, unsigned int>::reverse_iterator it=flippedDocMap.rbegin(); it!=flippedDocMap.rend(); ++it){ unsigned int d = it->second; docIds.push_back(d); } ResultSet result(docFileManager, docIds); return result; }
static void processDir(HttpQueue *q) { HttpConn *conn; HttpTx *tx; HttpRx *rx; MprList *list; MprDirEntry *dp; Dir *dir; uint nameSize; int next; conn = q->conn; rx = conn->rx; tx = conn->tx; dir = conn->data; mprLog(5, "processDir"); mprAssert(tx->filename); httpSetHeaderString(conn, "Cache-Control", "no-cache"); httpSetHeaderString(conn, "Last-Modified", conn->http->currentDate); parseQuery(conn); list = mprGetPathFiles(tx->filename, 1); if (list == 0) { httpWrite(q, "<h2>Can't get file list</h2>\r\n"); outputFooter(q); return; } if (dir->pattern) { filterDirList(conn, list); } sortList(conn, list); /* Get max filename */ nameSize = 0; for (next = 0; (dp = mprGetNextItem(list, &next)) != 0; ) { nameSize = max((int) strlen(dp->name), nameSize); } nameSize = max(nameSize, 22); outputHeader(q, rx->pathInfo, nameSize); for (next = 0; (dp = mprGetNextItem(list, &next)) != 0; ) { outputLine(q, dp, tx->filename, nameSize); } outputFooter(q); httpFinalize(conn); }
void Transport::parsePostParams() { if (!m_postDataParsed) { assert(m_postData == nullptr); int size; const char *data = (const char *)getPostData(size); if (data && *data && size) { // Post data may be binary, but if parsePostParams() is called, it is // correct to handle it as a null-terminated string m_postData = strdup(data); parseQuery(m_postData, m_postParams); } m_postDataParsed = true; } }
static ASTPtr getCreateQueryImpl(const String & path, const String & table_name) { String table_name_escaped = escapeForFileName(table_name); String table_metadata_path = path + "/" + table_name_escaped + ".sql"; String query; { ReadBufferFromFile in(table_metadata_path, 4096); WriteBufferFromString out(query); copyData(in, out); } ParserCreateQuery parser; return parseQuery(parser, query.data(), query.data() + query.size(), "in file " + table_metadata_path); }
void Transport::parseGetParams() { if (m_url == nullptr) { const char *url = getServerObject(); assert(url); const char *p = strchr(url, '?'); if (p) { m_url = strdup(p + 1); } else { m_url = strdup(""); } parseQuery(m_url, m_getParams); } }
void Transport::parsePostParams() { FiberWriteLock lock(this); ASSERT(!m_postDataParsed); ASSERT(m_postData == NULL); int size; const char *data = (const char *)getPostData(size); if (data && *data && size) { // Post data may be binary, but if parsePostParams() is called, it is // correct to handle it as a null-terminated string m_postData = strdup(data); parseQuery(m_postData, m_postParams); } m_postDataParsed = true; }
void URI::parsePathEtc(std::string::const_iterator& it, const std::string::const_iterator& end) { if (it == end) return; if (*it != '?' && *it != '#') parsePath(it, end); if (it != end && *it == '?') { ++it; parseQuery(it, end); } if (it != end && *it == '#') { ++it; parseFragment(it, end); } }
void Transport::parseGetParams() { FiberWriteLock lock(this); ASSERT(m_url == NULL); const char *url = getServerObject(); ASSERT(url); const char *p = strchr(url, '?'); if (p) { m_url = strdup(p + 1); } else { m_url = strdup(""); } parseQuery(m_url, m_getParams); }