/* * Retrieve result set */ static int db_mongodb_store_result(const db1_con_t* _h, db1_res_t** _r) { km_mongodb_con_t *mgcon; db_mongodb_result_t *mgres; const bson_t *itdoc; mgcon = MONGODB_CON(_h); if(!_r) { LM_ERR("invalid result parameter\n"); return -1; } *_r = db_mongodb_new_result(); if (!*_r) { LM_ERR("no memory left for result \n"); goto error; } mgres = (db_mongodb_result_t*)RES_PTR(*_r); mgres->collection = mgcon->collection; mgcon->collection = NULL; mgres->cursor = mgcon->cursor; mgcon->cursor = NULL; mgres->colsdoc = mgcon->colsdoc; mgcon->colsdoc = NULL; mgres->nrcols = mgcon->nrcols; mgcon->nrcols = 0; if(!mongoc_cursor_more (mgres->cursor) || !mongoc_cursor_next (mgres->cursor, &itdoc) || !itdoc) { LM_DBG("no result from mongodb\n"); return 0; } /* first document linked internally in result to get columns */ mgres->rdoc = (bson_t*)itdoc; if(db_mongodb_get_columns(_h, *_r)<0) { LM_ERR("failed to set the columns\n"); goto error; } if(db_mongodb_convert_result(_h, *_r)<0) { LM_ERR("failed to set the rows in result\n"); goto error; } return 0; error: if(mgcon->colsdoc) { bson_destroy (mgcon->colsdoc); mgcon->colsdoc = NULL; } mgcon->nrcols = 0; if(mgcon->cursor) { mongoc_cursor_destroy (mgcon->cursor); mgcon->cursor = NULL; } if(mgcon->collection) { mongoc_collection_destroy (mgcon->collection); mgcon->collection = NULL; } return -1; }
bool Cursor::hasNext() { const bson_t *doc; if (mongoc_cursor_error(cursor_, &last_error_) || !mongoc_cursor_more(cursor_)) return false; if (!mongoc_cursor_next(cursor_, &doc)) return false; last_result_ = Document(*doc); return true; }
/** * mongoc_database_has_collection: * @database: (in): A #mongoc_database_t. * @name: (in): The name of the collection to check for. * @error: (out) (allow-none): A location for a #bson_error_t, or %NULL. * * Checks to see if a collection exists within the database on the MongoDB * server. * * This will return %false if their was an error communicating with the * server, or if the collection does not exist. * * If @error is provided, it will first be zeroed. Upon error, error.domain * will be set. * * Returns: %true if @name exists, otherwise %false. @error may be set. */ bool mongoc_database_has_collection (mongoc_database_t *database, const char *name, bson_error_t *error) { mongoc_collection_t *collection; mongoc_read_prefs_t *read_prefs; mongoc_cursor_t *cursor; const bson_t *doc; bson_iter_t iter; bool ret = false; const char *cur_name; bson_t q = BSON_INITIALIZER; char ns[140]; ENTRY; BSON_ASSERT (database); BSON_ASSERT (name); if (error) { memset (error, 0, sizeof *error); } bson_snprintf (ns, sizeof ns, "%s.%s", database->name, name); ns[sizeof ns - 1] = '\0'; read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); collection = mongoc_client_get_collection (database->client, database->name, "system.namespaces"); cursor = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &q, NULL, read_prefs); while (!mongoc_cursor_error (cursor, error) && mongoc_cursor_more (cursor)) { while (mongoc_cursor_next (cursor, &doc) && bson_iter_init_find (&iter, doc, "name") && BSON_ITER_HOLDS_UTF8 (&iter)) { cur_name = bson_iter_utf8(&iter, NULL); if (!strcmp(cur_name, ns)) { ret = true; GOTO(cleanup); } } } cleanup: mongoc_cursor_destroy (cursor); mongoc_collection_destroy (collection); mongoc_read_prefs_destroy (read_prefs); RETURN(ret); }
char *be_mongo_getuser(void *handle, const char *username) { struct mongo_backend *conf = (struct mongo_backend *)handle; mongoc_collection_t *collection; mongoc_cursor_t *cursor; bson_error_t error; const bson_t *doc; const char *collection_name = "passport"; bson_t query; char *str = NULL; char *result = malloc(33); memset(result, 0, 33); bson_init (&query); bson_append_utf8 (&query, "username", -1, username, -1); collection = mongoc_client_get_collection (conf->client, "cas", collection_name); cursor = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, NULL, /* Fields, NULL for all. */ NULL); /* Read Prefs, NULL for default */ bson_iter_t iter; while (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_more (cursor)) { if (mongoc_cursor_next (cursor, &doc)) { bson_iter_init(&iter, doc); bson_iter_find(&iter, "pwd"); //fprintf (stdout, "%s\n", bson_iter_utf8(&iter, NULL)); str = bson_as_json (doc, NULL); //fprintf (stdout, "%s\n", str); bson_free (str); char *src = (char *)bson_iter_utf8(&iter, NULL); memcpy(result, src, strlen(src)); } } if (mongoc_cursor_error (cursor, &error)) { fprintf (stderr, "Cursor Failure: %s\n", error.message); return result; } bson_destroy (&query); mongoc_cursor_destroy (cursor); mongoc_collection_destroy (collection); return result; }
int be_mongo_superuser(void *conf, const char *username) { struct mongo_backend *handle = (struct mongo_backend *) conf; mongoc_collection_t *collection; mongoc_cursor_t *cursor; bson_error_t error; const bson_t *doc; int result; bson_t query; bson_iter_t iter; bson_init (&query); bson_append_utf8(&query, "username", -1, username, -1); collection = mongoc_client_get_collection(handle->client, dbName, colName); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, NULL, NULL); while (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_more (cursor)) { if (mongoc_cursor_next (cursor, &doc)) { bson_iter_init(&iter, doc); bson_iter_find(&iter, superUser); result = (int64_t) bson_iter_as_int64(&iter); //_log(LOG_NOTICE, "SUPERUSER: %d", result); } } if (mongoc_cursor_error (cursor, &error)) { fprintf (stderr, "Cursor Failure: %s\n", error.message); return result; } bson_destroy (&query); mongoc_cursor_destroy (cursor); mongoc_collection_destroy (collection); return result; }
char *be_mongo_getuser(void *handle, const char *username, const char *password, int *authenticated) { struct mongo_backend *conf = (struct mongo_backend *)handle; mongoc_collection_t *collection; mongoc_cursor_t *cursor; bson_error_t error; const bson_t *doc; bson_iter_t iter; bson_t query; char *result; bson_init (&query); bson_append_utf8 (&query, "username", -1, username, -1); collection = mongoc_client_get_collection (conf->client, dbName, colName); cursor = mongoc_collection_find (collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, NULL, /* Fields, NULL for all. */ NULL); /* Read Prefs, NULL for default */ while (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_more (cursor)) { if (mongoc_cursor_next (cursor, &doc)) { bson_iter_init(&iter, doc); bson_iter_find(&iter, passLoc); char *src = (char *)bson_iter_utf8(&iter, NULL); size_t tmp = strlen(src); result = (char *) malloc(tmp); memset(result, 0, tmp); memcpy(result, src, tmp); } } if (mongoc_cursor_error (cursor, &error)) { fprintf (stderr, "Cursor Failure: %s\n", error.message); return result; } bson_destroy (&query); mongoc_cursor_destroy (cursor); mongoc_collection_destroy (collection); return result; }
void SettingsOutput::SetSubbasinIDs() { bson_t *b = bson_new(); bson_t *child = bson_new(); bson_t *child2 = bson_new(); bson_t *child3 = bson_new(); BSON_APPEND_DOCUMENT_BEGIN(b, "$query", child); BSON_APPEND_DOCUMENT_BEGIN(child, PARAM_FLD_NAME, child2); BSON_APPEND_ARRAY_BEGIN(child2, "$in", child3); BSON_APPEND_UTF8(child3,PARAM_FLD_NAME, VAR_OUTLETID); BSON_APPEND_UTF8(child3,PARAM_FLD_NAME, VAR_SUBBSNID_NUM); bson_append_array_end(child2, child3); bson_append_document_end(child, child2); bson_append_document_end(b, child); //printf("%s\n",bson_as_json(b,NULL)); mongoc_cursor_t *cursor; const bson_t *bsonTable; mongoc_collection_t *collection; collection = mongoc_client_get_collection(m_conn, m_dbName.c_str(), DB_TAB_PARAMETERS); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, b, NULL, NULL); bson_iter_t iter; while (mongoc_cursor_more(cursor) && mongoc_cursor_next(cursor, &bsonTable)) { string nameTmp = ""; int numTmp = -1; if (bson_iter_init_find(&iter, bsonTable, PARAM_FLD_NAME)) nameTmp = GetStringFromBSONITER(&iter); if (bson_iter_init_find(&iter, bsonTable, PARAM_FLD_VALUE)) numTmp = GetIntFromBSONITER(&iter); if(!StringMatch(nameTmp, "") && numTmp != -1) { if(StringMatch(nameTmp, VAR_OUTLETID)) m_outletID = GetIntFromBSONITER(&iter); else if (StringMatch(nameTmp, VAR_SUBBSNID_NUM)) m_nSubbasins = GetIntFromBSONITER(&iter); } else throw ModelException("SettingOutput","SetSubbasinIDs","No valid values found in MongoDB!"); } bson_destroy(child); bson_destroy(child2); bson_destroy(child3); bson_destroy(b); mongoc_collection_destroy(collection); mongoc_cursor_destroy(cursor); return; }
// iterate over all results and return the count. returns -1 on error. int64_t Cursor::itcount() { int64_t count = 0; const bson_t *doc; // iterate over all results while (true) { if (mongoc_cursor_error(cursor_, &last_error_)) { DEBUG("cursor failure: %s\n", last_error_.message); return -1; } if (!mongoc_cursor_more(cursor_) || !mongoc_cursor_next(cursor_, &doc)) { break; } ++count; } return count; }
static void tail_collection (mongoc_collection_t *collection) { mongoc_cursor_t *cursor; uint32_t last_time; const bson_t *doc; bson_error_t error; bson_iter_t iter; BSON_ASSERT (collection); last_time = (uint32_t) time (NULL); while (true) { cursor = query_collection (collection, last_time); while (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_more (cursor)) { if (mongoc_cursor_next (cursor, &doc)) { if (bson_iter_init_find (&iter, doc, "ts") && BSON_ITER_HOLDS_TIMESTAMP (&iter)) { bson_iter_timestamp (&iter, &last_time, NULL); } print_bson (doc); } } if (mongoc_cursor_error (cursor, &error)) { if (error.domain == MONGOC_ERROR_SERVER) { fprintf (stderr, "%s\n", error.message); exit (1); } } mongoc_cursor_destroy (cursor); sleep (1); } }
/*! * \brief Convert rows from mongodb to db API representation * \param _h database connection * \param _r database result set * \return 0 on success, negative on failure */ static int db_mongodb_convert_result(const db1_con_t* _h, db1_res_t* _r) { int row; db_mongodb_result_t *mgres; const bson_t *itdoc; char *jstr; if ((!_h) || (!_r)) { LM_ERR("invalid parameter\n"); return -1; } mgres = (db_mongodb_result_t*)RES_PTR(_r); if(!mgres->rdoc) { mgres->nrcols = 0; return 0; } if(mgres->nrcols==0) { LM_DBG("no fields to return\n"); return 0; } if(!mongoc_cursor_more (mgres->cursor)) { RES_ROW_N(_r) = 1; mgres->maxrows = 1; } else { RES_ROW_N(_r) = DB_MONGODB_ROWS_STEP; mgres->maxrows = DB_MONGODB_ROWS_STEP; } if (db_allocate_rows(_r) < 0) { LM_ERR("could not allocate rows\n"); RES_ROW_N(_r) = 0; return -2; } itdoc = mgres->rdoc; row = 0; do { if(row >= RES_ROW_N(_r)) { if (db_reallocate_rows(_r, RES_ROW_N(_r)+DB_MONGODB_ROWS_STEP) < 0) { LM_ERR("could not reallocate rows\n"); return -2; } mgres->maxrows = RES_ROW_N(_r); } if(is_printable(L_DBG)) { jstr = bson_as_json (itdoc, NULL); LM_DBG("selected document: %s\n", jstr); bson_free (jstr); } if(db_mongodb_convert_bson(_h, _r, row, itdoc)) { LM_ERR("failed to convert bson at pos %d\n", row); return -1; } row++; } while (mongoc_cursor_more (mgres->cursor) && mongoc_cursor_next (mgres->cursor, &itdoc)); RES_ROW_N(_r) = row; LM_DBG("retrieved number of rows: %d\n", row); return 0; }
int be_mongo_aclcheck(void *conf, const char *clientid, const char *username, const char *topic, int acc) { struct mongo_backend *handle = (struct mongo_backend *) conf; mongoc_collection_t *collection; mongoc_cursor_t *cursor; bson_error_t error; const bson_t *doc; bson_iter_t iter; bool check = false; int match = 0, foundFlag = 0; bson_t query; bson_init(&query); bson_append_utf8(&query, "username", -1, username, -1); collection = mongoc_client_get_collection(handle->client, dbName, colName); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, NULL, NULL); while (!mongoc_cursor_error (cursor, &error) && mongoc_cursor_more (cursor)) { if (foundFlag == 0 && mongoc_cursor_next (cursor, &doc)) { bson_iter_init(&iter, doc); bson_iter_find(&iter, topicLoc); int64_t topId = (int64_t) bson_iter_as_int64(&iter);//, NULL); bson_destroy(&query); mongoc_cursor_destroy(cursor); mongoc_collection_destroy(collection); bson_init(&query); bson_append_int64(&query, topicID, -1, topId); collection = mongoc_client_get_collection(handle->client, dbName, topicLoc); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, &query, NULL, NULL); foundFlag = 1; } if (foundFlag == 1 && mongoc_cursor_next(cursor, &doc)) { bson_iter_init(&iter, doc); bson_iter_find(&iter, topicLoc); uint32_t len; const uint8_t *arr; bson_iter_array(&iter, &len, &arr); bson_t b; if (bson_init_static(&b, arr, len)) { bson_iter_init(&iter, &b); while (bson_iter_next(&iter)) { char *str = bson_iter_dup_utf8(&iter, &len); mosquitto_topic_matches_sub(str, topic, &check); if (check) { match = 1; bson_free(str); break; } bson_free(str); } } } } if (mongoc_cursor_error (cursor, &error)) { fprintf (stderr, "Cursor Failure: %s\n", error.message); return 0; } bson_destroy(&query); mongoc_cursor_destroy (cursor); mongoc_collection_destroy(collection); return match; }
bool SettingsOutput::LoadSettingsOutputFromMongoDB(int subBasinID) { bson_t *b = bson_new(); bson_t *child1 = bson_new(); BSON_APPEND_DOCUMENT_BEGIN(b, "$query", child1); bson_append_document_end(b, child1); bson_destroy(child1); mongoc_cursor_t *cursor; const bson_t *bsonTable; mongoc_collection_t *collection; collection = mongoc_client_get_collection(m_conn, m_dbName.c_str(), DB_TAB_FILEOUT); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, b, NULL, NULL); bson_iter_t itertor; while (mongoc_cursor_more(cursor) && mongoc_cursor_next(cursor, &bsonTable)) { int use = -1; string modCls = "", outputID = "", descprition = ""; string outFileName = "", aggType = "", unit = "", subBsn = ""; string dataType = "", intervalUnit = ""; int interval = -1; string sTimeStr = "", eTimeStr = ""; if (bson_iter_init_find(&itertor, bsonTable, Tag_OutputUSE)) use = GetIntFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_MODCLS)) modCls = GetStringFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_OutputID)) outputID = GetStringFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_OutputDESC)) descprition = GetStringFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_FileName)) outFileName = GetStringFromBSONITER(&itertor); string coreFileName = GetCoreFileName(outFileName); string suffix = GetSuffix(outFileName); if (bson_iter_init_find(&itertor, bsonTable, Tag_AggType)) aggType = GetStringFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_OutputUNIT)) unit = GetStringFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_OutputSubbsn)) subBsn = GetStringFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_StartTime)) sTimeStr = GetStringFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_EndTime)) eTimeStr = GetStringFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_Interval)) interval = GetIntFromBSONITER(&itertor); if (bson_iter_init_find(&itertor, bsonTable, Tag_IntervalUnit)) intervalUnit = GetStringFromBSONITER(&itertor); if(use <= 0) continue; /// First, if OutputID does not existed in m_printInfos, then create a new one. if(m_printInfosMap.find(outputID) == m_printInfosMap.end()) { m_printInfosMap[outputID] = new PrintInfo(); m_printInfosMap[outputID]->setOutputID(outputID);/// set the OUTPUTID for the new PrintInfo } PrintInfo *pi = NULL; /// reset the pointer pi = m_printInfosMap[outputID]; ostringstream oss; oss << subBasinID << "_"; string strSubbasinID = oss.str(); bool isRaster = false; string gtiff(GTiffExtension); if (gtiff.find(suffix) != gtiff.npos) isRaster = true; /// Check Tag_OutputSubbsn first if (StringMatch(subBsn, Tag_Outlet)) /// Output of outlet, such as Qoutlet, SEDoutlet, etc. { pi->setInterval(interval); pi->setIntervalUnits(intervalUnit); pi->AddPrintItem(sTimeStr, eTimeStr, strSubbasinID + coreFileName, ValueToString(m_outletID), suffix, m_conn, m_outputGfs, true); //pi->AddPrintItem(sTimeStr, eTimeStr, strSubbasinID + coreFileName, suffix); } else if (StringMatch(subBsn, Tag_AllSubbsn) && isRaster) { /// Output of all subbasins of DT_Raster1D or DT_Raster2D vector<string> aggTypes = utils::SplitString(aggType, ','); for(vector<string>::iterator it = aggTypes.begin(); it != aggTypes.end(); it++) pi->AddPrintItem(*it, sTimeStr, eTimeStr, strSubbasinID + coreFileName, suffix, m_conn, m_outputGfs); } else // subbasin IDs is provided { pi->setInterval(interval); pi->setIntervalUnits(intervalUnit); vector<string> subBsns; if (StringMatch(subBsn, Tag_AllSubbsn)) { for(int i = 0; i <= m_nSubbasins; i++) subBsns.push_back(ValueToString(i)); vector<string>(subBsns).swap(subBsns); } else subBsns = utils::SplitString(subBsn, ','); for(vector<string>::iterator it = subBsns.begin(); it != subBsns.end(); it++) pi->AddPrintItem(sTimeStr, eTimeStr, strSubbasinID + coreFileName, *it, suffix, m_conn, m_outputGfs, true); } } for (map<string, PrintInfo*>::iterator it = m_printInfosMap.begin(); it != m_printInfosMap.end(); it++) { m_printInfos.push_back(it->second); } vector<PrintInfo*>(m_printInfos).swap(m_printInfos); bson_destroy(b); mongoc_collection_destroy(collection); mongoc_cursor_destroy(cursor); return true; }
/* Uses old way of querying system.namespaces. */ bson_t * _mongoc_database_get_collection_info_legacy (mongoc_database_t *database, const bson_t *filter, bson_error_t *error) { mongoc_collection_t *col; mongoc_cursor_t *cursor; mongoc_read_prefs_t *read_prefs; uint32_t dbname_len; const bson_t *doc; bson_t legacy_filter; bson_iter_t iter; const char *name; const char *col_filter; bson_t q = BSON_INITIALIZER; bson_t *ret = NULL; bson_t col_array = BSON_INITIALIZER; const char *key; char keystr[16]; uint32_t n_cols = 0; BSON_ASSERT (database); col = mongoc_client_get_collection (database->client, database->name, "system.namespaces"); BSON_ASSERT (col); dbname_len = (uint32_t)strlen (database->name); /* Filtering on name needs to be handled differently for old servers. */ if (filter && bson_iter_init_find (&iter, filter, "name")) { /* on legacy servers, this must be a string (i.e. not a regex) */ if (!BSON_ITER_HOLDS_UTF8 (&iter)) { bson_set_error (error, MONGOC_ERROR_NAMESPACE, MONGOC_ERROR_NAMESPACE_INVALID_FILTER_TYPE, "On legacy servers, a filter on name can only be a string."); goto cleanup_filter; } BSON_ASSERT (BSON_ITER_HOLDS_UTF8 (&iter)); col_filter = bson_iter_utf8 (&iter, NULL); bson_init (&legacy_filter); bson_copy_to_excluding_noinit (filter, &legacy_filter, "name", NULL); /* We must db-qualify filters on name. */ bson_string_t *buf = bson_string_new (database->name); bson_string_append_c (buf, '.'); bson_string_append (buf, col_filter); BSON_APPEND_UTF8 (&legacy_filter, "name", buf->str); bson_string_free (buf, true); filter = &legacy_filter; } read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); cursor = mongoc_collection_find (col, MONGOC_QUERY_NONE, 0, 0, 0, filter ? filter : &q, NULL, read_prefs); ret = bson_new(); BSON_APPEND_ARRAY_BEGIN (ret, "collections", &col_array); while (mongoc_cursor_more (cursor) && !mongoc_cursor_error (cursor, error)) { if (mongoc_cursor_next (cursor, &doc)) { /* 2 gotchas here. * 1. need to ignore any system collections (prefixed with $) * 2. need to remove the database name from the collection so that clients * don't need to specialize their logic for old versions of the server. */ if (bson_iter_init_find (&iter, doc, "name") && BSON_ITER_HOLDS_UTF8 (&iter) && (name = bson_iter_utf8 (&iter, NULL)) && !strchr (name, '$') && (0 == strncmp (name, database->name, dbname_len))) { bson_t nprefix_col = BSON_INITIALIZER; bson_copy_to_excluding_noinit (doc, &nprefix_col, "name", NULL); BSON_APPEND_UTF8 (&nprefix_col, "name", name + (dbname_len + 1)); /* +1 for the '.' */ /* need to construct a key for this array element. */ bson_uint32_to_string(n_cols, &key, keystr, sizeof (keystr)); BSON_APPEND_DOCUMENT (&col_array, key, &nprefix_col); ++n_cols; } } } bson_append_array_end (ret, &col_array); mongoc_cursor_destroy (cursor); mongoc_read_prefs_destroy (read_prefs); cleanup_filter: mongoc_collection_destroy (col); return ret; }
SEXP R_mongo_cursor_more (SEXP ptr){ mongoc_cursor_t *c = r2cursor(ptr); return ScalarLogical(mongoc_cursor_more(c)); }