int gridfs_find_query( gridfs *gfs, bson *query, gridfile *gfile ) { bson uploadDate; bson finalQuery; bson out; int i; bson_init( &uploadDate ); bson_append_int( &uploadDate, "uploadDate", -1 ); bson_finish( &uploadDate ); bson_init( &finalQuery ); bson_append_bson( &finalQuery, "query", query ); bson_append_bson( &finalQuery, "orderby", &uploadDate ); bson_finish( &finalQuery ); i = ( mongo_find_one( gfs->client, gfs->files_ns, &finalQuery, NULL, &out ) == MONGO_OK ); bson_destroy( &uploadDate ); bson_destroy( &finalQuery ); if ( !i ) return MONGO_ERROR; else { gridfile_init( gfs, &out, gfile ); bson_destroy( &out ); return MONGO_OK; } }
SEXP rmongo_find(SEXP mongo_conn, SEXP ns, SEXP query, SEXP sort, SEXP fields, SEXP limit, SEXP skip, SEXP options) { mongo* conn = _checkMongo(mongo_conn); const char* _ns = CHAR(STRING_ELT(ns, 0)); bson* _query = _checkBSON(query); bson* _sort = _checkBSON(sort); bson* q = _query; bson sorted_query; if (_sort != NULL && bson_size(_sort) > 5) { q = &sorted_query; bson_init(q); bson_append_bson(q, "$query", _query); bson_append_bson(q, "$orderby", _sort); bson_finish(q); } bson* _fields = _checkBSON(fields); int _limit = asInteger(limit); int _skip = asInteger(skip); int _options = 0; int i; int len = LENGTH(options); for (i = 0; i < len; i++) _options |= INTEGER(options)[i]; mongo_cursor* cursor = mongo_find(conn, _ns, q, _fields, _limit, _skip, _options); if (q == &sorted_query) bson_destroy(&sorted_query); return _mongo_cursor_create(cursor); }
int gridfs_find_query(gridfs* gfs, bson* query, gridfile* gfile ) { bson_buffer date_buffer; bson uploadDate; bson_buffer buf; bson finalQuery; bson out; int i; bson_buffer_init(&date_buffer); bson_append_int(&date_buffer, "uploadDate", -1); bson_from_buffer(&uploadDate, &date_buffer); bson_buffer_init(&buf); bson_append_bson(&buf, "query", query); bson_append_bson(&buf, "orderby", &uploadDate); bson_from_buffer(&finalQuery, &buf); i = (mongo_find_one(gfs->client, gfs->files_ns, &finalQuery, NULL, &out)); bson_destroy(&uploadDate); bson_destroy(&finalQuery); if (!i) return FALSE; else { gridfile_init(gfs, &out, gfile); bson_destroy(&out); return TRUE; } }
mongo_cursor* gridfile_get_chunks(gridfile* gfile, int start, int size) { bson_iterator it; bson_oid_t id; bson_buffer gte_buf; bson gte_bson; bson_buffer query_buf; bson query_bson; bson_buffer orderby_buf; bson orderby_bson; bson_buffer command_buf; bson command_bson; bson_type type; char *id_str; int id_int; type = bson_find(&it, gfile->meta, "_id"); if( type == bson_oid ) { id = *bson_iterator_oid(&it); bson_buffer_init(&query_buf); bson_append_oid(&query_buf, "files_id", &id); } else if (type == bson_string) { id_str = bson_iterator_string(&it); bson_buffer_init(&query_buf); bson_append_string(&query_buf, "files_id", id_str); } else if (type == bson_string) { id_int = bson_iterator_int(&it); bson_buffer_init(&query_buf); bson_append_int(&query_buf, "files_id", id_int); } else return NULL; if (size == 1) { bson_append_int(&query_buf, "n", start); } else { bson_buffer_init(>e_buf); bson_append_int(>e_buf, "$gte", start); bson_from_buffer(>e_bson, >e_buf); bson_append_bson(&query_buf, "n", >e_bson); } bson_from_buffer(&query_bson, &query_buf); bson_buffer_init(&orderby_buf); bson_append_int(&orderby_buf, "n", 1); bson_from_buffer(&orderby_bson, &orderby_buf); bson_buffer_init(&command_buf); bson_append_bson(&command_buf, "query", &query_bson); bson_append_bson(&command_buf, "orderby", &orderby_bson); bson_from_buffer(&command_bson, &command_buf); return mongo_find(gfile->gfs->client, gfile->gfs->chunks_ns, &command_bson, NULL, size, 0, 0); }
int64_t mongo_count(mongo_connection* conn, const char* db, const char* ns, bson* query){ bson_buffer bb; bson cmd; bson out; int64_t count = -1; bson_buffer_init(&bb); bson_append_string(&bb, "count", ns); if (query && bson_size(query) > 5) /* not empty */ bson_append_bson(&bb, "query", query); bson_from_buffer(&cmd, &bb); MONGO_TRY{ if(mongo_run_command(conn, db, &cmd, &out)){ bson_iterator it; if(bson_find(&it, &out, "n")) count = bson_iterator_long(&it); } }MONGO_CATCH{ bson_destroy(&cmd); MONGO_RETHROW(); } bson_destroy(&cmd); bson_destroy(&out); return count; }
int64_t mongo_count( mongo *conn, const char *db, const char *ns, bson *query ) { bson cmd; bson out; // = {NULL, 0}; memset(&out, 0, sizeof(out)); int64_t count = -1; bson_init( &cmd ); bson_append_string( &cmd, "count", ns ); if ( query && bson_size( query ) > 5 ) /* not empty */ bson_append_bson( &cmd, "query", query ); bson_finish( &cmd ); if( mongo_run_command( conn, db, &cmd, &out ) == MONGO_OK ) { bson_iterator it; if( bson_find( &it, &out, "n" ) ) count = bson_iterator_long( &it ); bson_destroy( &cmd ); bson_destroy( &out ); return count; } else { bson_destroy( &out ); bson_destroy( &cmd ); return MONGO_ERROR; } }
MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ) { bson_iterator it; bson_oid_t id; bson gte; bson query; bson orderby; bson command; mongo_cursor *cursor; bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_init( &query ); bson_append_oid( &query, "files_id", &id ); if ( size == 1 ) { bson_append_int( &query, "n", start ); } else { bson_init( >e ); bson_append_int( >e, "$gte", start ); bson_finish( >e ); bson_append_bson( &query, "n", >e ); bson_destroy( >e ); } bson_finish( &query ); bson_init( &orderby ); bson_append_int( &orderby, "n", 1 ); bson_finish( &orderby ); bson_init( &command ); bson_append_bson( &command, "query", &query ); bson_append_bson( &command, "orderby", &orderby ); bson_finish( &command ); cursor = mongo_find( gfile->gfs->client, gfile->gfs->chunks_ns, &command, NULL, size, 0, 0 ); bson_destroy( &command ); bson_destroy( &query ); bson_destroy( &orderby ); return cursor; }
void MongodbObject::EnableForEdit() { bson *newObject = new bson(); bson_init(newObject); bson_iterator it; bson_iterator_init(&it, objectData->object); while (bson_iterator_next(&it)) { const char * key = bson_iterator_key(&it); bson_type type = bson_iterator_type(&it); switch (type) { case BSON_STRING: bson_append_string(newObject, key, bson_iterator_string(&it)); break; case BSON_INT: bson_append_int(newObject, key, bson_iterator_int(&it)); break; case BSON_LONG: bson_append_long(newObject, key, bson_iterator_long(&it)); break; case BSON_DOUBLE: bson_append_double(newObject, key, bson_iterator_double(&it)); break; case BSON_OBJECT: bson sub; bson_iterator_subobject(&it, &sub); bson_append_bson(newObject, key, &sub); break; case BSON_OID: bson_append_oid(newObject, key, bson_iterator_oid(&it)); break; default: break; } } bson_destroy(objectData->object); SafeDelete(objectData->object); objectData->object = newObject; }
void InitWith(bson *obj) { bson_iterator it; bson_iterator_init(&it, obj); while (bson_iterator_next(&it)) { const char * key = bson_iterator_key(&it); bson_type type = bson_iterator_type(&it); switch (type) { case BSON_STRING: bson_append_string(object, key, bson_iterator_string(&it)); break; case BSON_INT: bson_append_int(object, key, bson_iterator_int(&it)); break; case BSON_LONG: bson_append_long(object, key, bson_iterator_long(&it)); break; case BSON_DOUBLE: bson_append_double(object, key, bson_iterator_double(&it)); break; case BSON_OBJECT: { bson sub; bson_iterator_subobject(&it, &sub); bson_append_bson(object, key, &sub); break; } case BSON_OID: bson_append_oid(object, key, bson_iterator_oid(&it)); break; default: DVASSERT(false); Logger::Error("[MongodbObjectInternalData::InitWith] Not implemented type: %d", type); break; } } }
static int gridfs_insert_file2( gridfs *gfs, const char *name, const bson_oid_t id, gridfs_offset length, const char *contenttype, gridfile* gfile ) { bson command; bson ret; bson res; bson_iterator it; int result; int64_t d; /* Check run md5 */ bson_init( &command ); bson_append_oid( &command, "filemd5", &id ); bson_append_string( &command, "root", gfs->prefix ); bson_finish( &command ); result = mongo_run_command( gfs->client, gfs->dbname, &command, &res ); bson_destroy( &command ); if (result != MONGO_OK) return result; /* Create and insert BSON for file metadata */ bson_init( &ret ); bson_append_oid( &ret, "_id", &id ); if ( name != NULL && *name != '\0' ) { bson_append_string( &ret, "filename", name ); } bson_append_long( &ret, "length", length ); bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE ); d = ( bson_date_t )1000*time( NULL ); bson_append_date( &ret, "uploadDate", d); bson_find( &it, &res, "md5" ); bson_append_string( &ret, "md5", bson_iterator_string( &it ) ); bson_destroy( &res ); if ( contenttype != NULL && *contenttype != '\0' ) { bson_append_string( &ret, "contentType", contenttype ); } bson_append_bson(&ret, "metadata", gfile->meta); bson_finish( &ret ); result = mongo_insert( gfs->client, gfs->files_ns, &ret ); bson_destroy( &ret ); return result; }
int mongo_create_index(mongo_connection * conn, const char * ns, bson * key, int options, bson * out){ bson_buffer bb; bson b; bson_iterator it; char name[255] = {'_'}; int i = 1; char idxns[1024]; bson_iterator_init(&it, key->data); while(i < 255 && bson_iterator_next(&it)){ strncpy(name + i, bson_iterator_key(&it), 255 - i); i += strlen(bson_iterator_key(&it)); } name[254] = '\0'; bson_buffer_init(&bb); bson_append_bson(&bb, "key", key); bson_append_string(&bb, "ns", ns); bson_append_string(&bb, "name", name); if (options & MONGO_INDEX_UNIQUE) bson_append_bool(&bb, "unique", 1); if (options & MONGO_INDEX_DROP_DUPS) bson_append_bool(&bb, "dropDups", 1); if (options & MONGO_INDEX_BACKGROUND) bson_append_bool(&bb, "background", 1); if (options & MONGO_INDEX_SPARSE) bson_append_bool(&bb, "sparse", 1); bson_from_buffer(&b, &bb); strncpy(idxns, ns, 1024-16); strcpy(strchr(idxns, '.'), ".system.indexes"); mongo_insert(conn, idxns, &b); bson_destroy(&b); *strchr(idxns, '.') = '\0'; /* just db not ns */ return mongo_cmd_get_last_error(conn, idxns, out); }
void testBuildQuery1() { CU_ASSERT_PTR_NOT_NULL_FATAL(jb); /* Query = { "name" : Петров Петр, "age" : 33, "family" : { "wife" : { "name" : "Jeniffer", "age" : {"$gt" : 25}, "phone" : "444-111" }, "children" : [ { "name" : "Dasha", "age" : {"$in" : [1, 4, 10]} } ] } */ bson q1; bson_init_as_query(&q1); bson_append_string(&q1, "name", "Петров Петр"); bson_append_int(&q1, "age", 33); bson q1family_wife; bson_init_as_query(&q1family_wife); bson_append_string(&q1family_wife, "name", "Jeniffer"); bson_append_start_object(&q1family_wife, "age"); bson_append_int(&q1family_wife, "$gt", 25); bson_append_finish_object(&q1family_wife); bson_append_string(&q1family_wife, "phone", "444-111"); bson_finish(&q1family_wife); bson q1family_child; bson_init_as_query(&q1family_child); bson_append_string(&q1family_child, "name", "Dasha"); //"age" : {"$in" : [1, 4, 10]} bson q1family_child_age_IN; bson_init_as_query(&q1family_child_age_IN); bson_append_start_array(&q1family_child_age_IN, "$in"); bson_append_int(&q1family_child_age_IN, "0", 1); bson_append_int(&q1family_child_age_IN, "1", 4); bson_append_int(&q1family_child_age_IN, "2", 10); bson_append_finish_array(&q1family_child_age_IN); bson_finish(&q1family_child_age_IN); bson_append_bson(&q1family_child, "age", &q1family_child_age_IN); bson_finish(&q1family_child); bson q1family; bson_init_as_query(&q1family); bson_append_bson(&q1family, "wife", &q1family_wife); bson_append_start_array(&q1family, "children"); bson_append_bson(&q1family, "0", &q1family_child); bson_append_finish_array(&q1family); bson_finish(&q1family); bson_append_bson(&q1, "family", &q1family); bson_finish(&q1); CU_ASSERT_FALSE_FATAL(q1.err); CU_ASSERT_FALSE_FATAL(q1family.err); CU_ASSERT_FALSE_FATAL(q1family_wife.err); CU_ASSERT_FALSE_FATAL(q1family_child.err); CU_ASSERT_FALSE_FATAL(q1family_child_age_IN.err); EJQ *ejq = ejdbcreatequery(jb, &q1, NULL, 0, NULL); CU_ASSERT_PTR_NOT_NULL_FATAL(ejq); bson_destroy(&q1); bson_destroy(&q1family); bson_destroy(&q1family_wife); bson_destroy(&q1family_child); bson_destroy(&q1family_child_age_IN); CU_ASSERT_PTR_NOT_NULL_FATAL(ejq->qobjlist); TCLIST *qmap = ejq->qobjlist; CU_ASSERT_EQUAL(qmap->num, 7); for (int i = 0; i < TCLISTNUM(qmap); ++i) { const EJQF *qf = TCLISTVALPTR(qmap, i); CU_ASSERT_PTR_NOT_NULL_FATAL(qf); const char* key = qf->fpath; switch (i) { case 0: { CU_ASSERT_STRING_EQUAL(key, "name"); CU_ASSERT_PTR_NOT_NULL(qf); CU_ASSERT_STRING_EQUAL(qf->expr, "Петров Петр"); CU_ASSERT_EQUAL(qf->tcop, TDBQCSTREQ); break; } case 1: { CU_ASSERT_STRING_EQUAL(key, "age"); CU_ASSERT_PTR_NOT_NULL(qf); CU_ASSERT_STRING_EQUAL(qf->expr, "33"); CU_ASSERT_EQUAL(qf->tcop, TDBQCNUMEQ); break; } case 2: { CU_ASSERT_STRING_EQUAL(key, "family.wife.name"); CU_ASSERT_PTR_NOT_NULL(qf); CU_ASSERT_STRING_EQUAL(qf->expr, "Jeniffer"); CU_ASSERT_EQUAL(qf->tcop, TDBQCSTREQ); break; } case 3: { CU_ASSERT_STRING_EQUAL(key, "family.wife.age"); CU_ASSERT_PTR_NOT_NULL(qf); CU_ASSERT_STRING_EQUAL(qf->expr, "25"); CU_ASSERT_EQUAL(qf->tcop, TDBQCNUMGT); break; } case 4: { CU_ASSERT_STRING_EQUAL(key, "family.wife.phone"); CU_ASSERT_PTR_NOT_NULL(qf); CU_ASSERT_STRING_EQUAL(qf->expr, "444-111"); CU_ASSERT_EQUAL(qf->tcop, TDBQCSTREQ); break; } case 5: { CU_ASSERT_STRING_EQUAL(key, "family.children.0.name"); CU_ASSERT_PTR_NOT_NULL(qf); CU_ASSERT_STRING_EQUAL(qf->expr, "Dasha"); CU_ASSERT_EQUAL(qf->tcop, TDBQCSTREQ); break; } case 6: { CU_ASSERT_STRING_EQUAL(key, "family.children.0.age"); CU_ASSERT_PTR_NOT_NULL(qf); CU_ASSERT_EQUAL(qf->ftype, BSON_ARRAY); TCLIST *al = tclistload(qf->expr, qf->exprsz); char* als = tcstrjoin(al, ','); CU_ASSERT_STRING_EQUAL(als, "1,4,10"); TCFREE(als); tclistdel(al); CU_ASSERT_EQUAL(qf->tcop, TDBQCNUMOREQ); break; } } } ejdbquerydel(ejq); }
/** * \brief This function save current version o tag group to MongoDB */ static void vs_mongo_taggroup_save_version(struct VSTagGroup *tg, bson *bson_tg, uint32 version) { bson bson_version; bson bson_tag; struct VBucket *bucket; struct VSTag *tag; char str_num[15]; int item_id; bson_init(&bson_version); bson_append_int(&bson_version, "crc32", tg->crc32); bson_append_start_object(&bson_version, "tags"); bucket = tg->tags.lb.first; while(bucket != NULL) { tag = (struct VSTag*)bucket->data; bson_init(&bson_tag); bson_append_int(&bson_tag, "data_type", tag->data_type); bson_append_int(&bson_tag, "count", tag->count); bson_append_int(&bson_tag, "custom_type", tag->custom_type); bson_append_start_array(&bson_tag, "data"); switch(tag->data_type) { case VRS_VALUE_TYPE_UINT8: for(item_id = 0; item_id < tag->count; item_id++) { sprintf(str_num, "%d", item_id); bson_append_int(&bson_tag, str_num, ((uint8*)tag->value)[item_id]); } break; case VRS_VALUE_TYPE_UINT16: for(item_id = 0; item_id < tag->count; item_id++) { sprintf(str_num, "%d", item_id); bson_append_int(&bson_tag, str_num, ((uint16*)tag->value)[item_id]); } break; case VRS_VALUE_TYPE_UINT32: for(item_id = 0; item_id < tag->count; item_id++) { sprintf(str_num, "%d", item_id); bson_append_int(&bson_tag, str_num, ((uint32*)tag->value)[item_id]); } break; case VRS_VALUE_TYPE_UINT64: for(item_id = 0; item_id < tag->count; item_id++) { sprintf(str_num, "%d", item_id); bson_append_long(&bson_tag, str_num, ((uint64*)tag->value)[item_id]); } break; case VRS_VALUE_TYPE_REAL16: /* TODO */ break; case VRS_VALUE_TYPE_REAL32: for(item_id = 0; item_id < tag->count; item_id++) { sprintf(str_num, "%d", item_id); bson_append_double(&bson_tag, str_num, ((float*)tag->value)[item_id]); } break; case VRS_VALUE_TYPE_REAL64: for(item_id = 0; item_id < tag->count; item_id++) { sprintf(str_num, "%d", item_id); bson_append_double(&bson_tag, str_num, ((double*)tag->value)[item_id]); } break; case VRS_VALUE_TYPE_STRING8: bson_append_string(&bson_tag, "0", (char*)tag->value); break; } bson_append_finish_array(&bson_tag); bson_finish(&bson_tag); sprintf(str_num, "%d", tag->id); bson_append_bson(&bson_version, str_num, &bson_tag); bucket = bucket->next; } bson_append_finish_object(&bson_version); bson_finish(&bson_version); sprintf(str_num, "%u", version); bson_append_bson(bson_tg, str_num, &bson_version); }
/** * \brief This function tries to update tag group in MongoDB. It adds new * version of data. */ int vs_mongo_taggroup_update(struct VS_CTX *vs_ctx, struct VSNode *node, struct VSTagGroup *tg) { bson cond, op; bson bson_version; int ret; /* TODO: delete old version, when there is too much versions: int old_saved_version = tg->saved_version; */ bson_init(&cond); { bson_append_oid(&cond, "_id", &tg->oid); /* To be sure that right tag group will be updated */ bson_append_int(&cond, "node_id", node->id); bson_append_int(&cond, "taggroup_id", tg->id); } bson_finish(&cond); bson_init(&op); { /* Update item current_version in document */ bson_append_start_object(&op, "$set"); { bson_append_int(&op, "current_version", tg->version); } bson_append_finish_object(&op); /* Create new bson object representing current version and add it to * the object versions */ bson_append_start_object(&op, "$set"); { bson_init(&bson_version); { vs_mongo_taggroup_save_version(tg, &bson_version, UINT32_MAX); } bson_finish(&bson_version); bson_append_bson(&op, "versions", &bson_version); } bson_append_finish_object(&op); } bson_finish(&op); ret = mongo_update(vs_ctx->mongo_conn, vs_ctx->mongo_tg_ns, &cond, &op, MONGO_UPDATE_BASIC, 0); bson_destroy(&bson_version); bson_destroy(&cond); bson_destroy(&op); if(ret != MONGO_OK) { v_print_log(VRS_PRINT_ERROR, "Unable to update tag group %d to MongoDB: %s, error: %s\n", tg->id, vs_ctx->mongo_tg_ns, mongo_get_server_err_string(vs_ctx->mongo_conn)); return 0; } return 1; }
void MongodbObject::AddObject(const String &fieldname, DAVA::MongodbObject *addObject) { BSON_VERIFY(bson_append_bson(objectData->object, fieldname.c_str(), addObject->objectData->object)); }
EXPORT int mongo_bson_buffer_append_bson(struct bson_buffer* b, char* name, struct bson_* bs) { return (bson_append_bson((bson*)b, name, (bson*)bs) == BSON_OK); }