static int gridfs_insert_file( gridfs *gfs, const char *name, const bson_oid_t id, gridfs_offset length, const char *contenttype ) { bson command; bson ret; bson res; bson_iterator it; int result; /* Check run md5 */ bson_init( &command ); bson_append_oid( &command, "filemd5", &id ); bson_append_string( &command, "root", gfs->prefix ); bson_finish( &command ); assert( mongo_run_command( gfs->client, gfs->dbname, &command, &res ) == MONGO_OK ); bson_destroy( &command ); /* Create and insert BSON for file metadata */ bson_init( &ret ); bson_append_oid( &ret, "_id", &id ); if ( name != NULL && *name != '\0' ) { bson_append_string( &ret, "filename", name ); } bson_append_long( &ret, "length", length ); bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE ); bson_append_date( &ret, "uploadDate", ( bson_date_t )1000*time( NULL ) ); bson_find( &it, &res, "md5" ); bson_append_string( &ret, "md5", bson_iterator_string( &it ) ); bson_destroy( &res ); if ( contenttype != NULL && *contenttype != '\0' ) { bson_append_string( &ret, "contentType", contenttype ); } bson_finish( &ret ); result = mongo_insert( gfs->client, gfs->files_ns, &ret ); bson_destroy( &ret ); return result; }
static void test_insert (void) { mongoc_collection_t *collection; mongoc_client_t *client; bson_context_t *context; bson_error_t error; bool r; bson_oid_t oid; unsigned i; bson_t b; client = mongoc_client_new(gTestUri); ASSERT (client); collection = mongoc_client_get_collection(client, "test", "test"); ASSERT (collection); mongoc_collection_drop(collection, &error); context = bson_context_new(BSON_CONTEXT_NONE); ASSERT (context); for (i = 0; i < 10; i++) { bson_init(&b); bson_oid_init(&oid, context); bson_append_oid(&b, "_id", 3, &oid); bson_append_utf8(&b, "hello", 5, "/world", 5); r = mongoc_collection_insert(collection, MONGOC_INSERT_NONE, &b, NULL, &error); if (!r) { MONGOC_WARNING("%s\n", error.message); } ASSERT (r); bson_destroy(&b); } bson_init (&b); BSON_APPEND_INT32 (&b, "$hello", 1); r = mongoc_collection_insert (collection, MONGOC_INSERT_NONE, &b, NULL, &error); ASSERT (!r); ASSERT (error.domain == MONGOC_ERROR_BSON); ASSERT (error.code == MONGOC_ERROR_BSON_INVALID); bson_destroy (&b); mongoc_collection_destroy(collection); bson_context_destroy(context); mongoc_client_destroy(client); }
static void test_save (void) { mongoc_collection_t *collection; mongoc_database_t *database; mongoc_client_t *client; bson_context_t *context; bson_error_t error; bool r; bson_oid_t oid; unsigned i; bson_t b; client = mongoc_client_new(gTestUri); ASSERT (client); database = get_test_database (client); ASSERT (database); collection = get_test_collection (client, "test_save"); ASSERT (collection); mongoc_collection_drop (collection, &error); context = bson_context_new(BSON_CONTEXT_NONE); ASSERT (context); for (i = 0; i < 10; i++) { bson_init(&b); bson_oid_init(&oid, context); bson_append_oid(&b, "_id", 3, &oid); bson_append_utf8(&b, "hello", 5, "/world", 5); r = mongoc_collection_save(collection, &b, NULL, &error); if (!r) { MONGOC_WARNING("%s\n", error.message); } ASSERT (r); bson_destroy(&b); } bson_destroy (&b); r = mongoc_collection_drop (collection, &error); ASSERT (r); mongoc_collection_destroy(collection); mongoc_database_destroy(database); bson_context_destroy(context); mongoc_client_destroy(client); }
void gridfs_remove_filename( gridfs *gfs, const char *filename ) { bson query; mongo_cursor *files; bson file; bson_iterator it; bson_oid_t id; bson b; bson_init( &query ); bson_append_string( &query, "filename", filename ); bson_finish( &query ); files = mongo_find( gfs->client, gfs->files_ns, &query, NULL, 0, 0, 0 ); bson_destroy( &query ); /* Remove each file and it's chunks from files named filename */ while ( mongo_cursor_next( files ) == MONGO_OK ) { file = files->current; bson_find( &it, &file, "_id" ); id = *bson_iterator_oid( &it ); /* Remove the file with the specified id */ bson_init( &b ); bson_append_oid( &b, "_id", &id ); bson_finish( &b ); mongo_remove( gfs->client, gfs->files_ns, &b ); bson_destroy( &b ); /* Remove all chunks from the file with the specified id */ bson_init( &b ); bson_append_oid( &b, "files_id", &id ); bson_finish( &b ); mongo_remove( gfs->client, gfs->chunks_ns, &b ); bson_destroy( &b ); } mongo_cursor_destroy( files ); }
static void test_bson_as_json (void) { bson_oid_t oid; bson_t *b; bson_t *b2; char *str; size_t len; int i; bson_oid_init_from_string(&oid, "123412341234abcdabcdabcd"); b = bson_new(); assert(bson_append_utf8(b, "utf8", -1, "bar", -1)); assert(bson_append_int32(b, "int32", -1, 1234)); assert(bson_append_int64(b, "int64", -1, 4321)); assert(bson_append_double(b, "double", -1, 123.4)); assert(bson_append_undefined(b, "undefined", -1)); assert(bson_append_null(b, "null", -1)); assert(bson_append_oid(b, "oid", -1, &oid)); assert(bson_append_bool(b, "true", -1, true)); assert(bson_append_bool(b, "false", -1, false)); assert(bson_append_time_t(b, "date", -1, time(NULL))); assert(bson_append_timestamp(b, "timestamp", -1, (uint32_t)time(NULL), 1234)); assert(bson_append_regex(b, "regex", -1, "^abcd", "xi")); assert(bson_append_dbpointer(b, "dbpointer", -1, "mycollection", &oid)); assert(bson_append_minkey(b, "minkey", -1)); assert(bson_append_maxkey(b, "maxkey", -1)); assert(bson_append_symbol(b, "symbol", -1, "var a = {};", -1)); b2 = bson_new(); assert(bson_append_int32(b2, "0", -1, 60)); assert(bson_append_document(b, "document", -1, b2)); assert(bson_append_array(b, "array", -1, b2)); { const uint8_t binary[] = { 0, 1, 2, 3, 4 }; assert(bson_append_binary(b, "binary", -1, BSON_SUBTYPE_BINARY, binary, sizeof binary)); } for (i = 0; i < 1000; i++) { str = bson_as_json(b, &len); bson_free(str); } bson_destroy(b); bson_destroy(b2); }
void InitWith(bson *obj) { bson_iterator it; bson_iterator_init(&it, obj); while (bson_iterator_next(&it)) { const char * key = bson_iterator_key(&it); bson_type type = bson_iterator_type(&it); switch (type) { case BSON_STRING: bson_append_string(object, key, bson_iterator_string(&it)); break; case BSON_INT: bson_append_int(object, key, bson_iterator_int(&it)); break; case BSON_LONG: bson_append_long(object, key, bson_iterator_long(&it)); break; case BSON_DOUBLE: bson_append_double(object, key, bson_iterator_double(&it)); break; case BSON_OBJECT: { bson sub; bson_iterator_subobject(&it, &sub); bson_append_bson(object, key, &sub); break; } case BSON_OID: bson_append_oid(object, key, bson_iterator_oid(&it)); break; default: DVASSERT(false); Logger::Error("[MongodbObjectInternalData::InitWith] Not implemented type: %d", type); break; } } }
static void test_bson_append_oid (void) { bson_oid_t oid; bson_t *b; bson_t *b2; bson_oid_init_from_string(&oid, "1234567890abcdef1234abcd"); b = bson_new(); assert(bson_append_oid(b, "oid", -1, &oid)); b2 = get_bson("test22.bson"); assert_bson_equal(b, b2); bson_destroy(b); bson_destroy(b2); }
static bson * chunk_new(bson_oid_t id, int chunkNumber, const char * data, int len) { bson * b; bson_buffer buf; b = (bson *)malloc(sizeof(bson)); if (b == NULL) return NULL; bson_buffer_init(&buf); bson_append_oid(&buf, "files_id", &id); bson_append_int(&buf, "n", chunkNumber); bson_append_binary(&buf, "data", 2, data, len); bson_from_buffer(b, &buf); return b; }
bson * test_bson_generate_full (void) { bson *b, *d, *a, *scope; guint8 oid[] = "1234567890ab"; a = bson_new (); bson_append_int32 (a, "0", 32); bson_append_int64 (a, "1", (gint64)-42); bson_finish (a); d = bson_new (); bson_append_string (d, "name", "sub-document", -1); bson_append_int32 (d, "answer", 42); bson_finish (d); scope = bson_new (); bson_append_string (scope, "v", "hello world", -1); bson_finish (scope); b = bson_new (); bson_append_double (b, "double", 3.14); bson_append_string (b, "str", "hello world", -1); bson_append_document (b, "doc", d); bson_append_array (b, "array", a); bson_append_binary (b, "binary0", BSON_BINARY_SUBTYPE_GENERIC, (guint8 *)"foo\0bar", 7); bson_append_oid (b, "_id", oid); bson_append_boolean (b, "TRUE", FALSE); bson_append_utc_datetime (b, "date", 1294860709000); bson_append_timestamp (b, "ts", 1294860709000); bson_append_null (b, "null"); bson_append_regex (b, "foobar", "s/foo.*bar/", "i"); bson_append_javascript (b, "alert", "alert (\"hello world!\");", -1); bson_append_symbol (b, "sex", "Marilyn Monroe", -1); bson_append_javascript_w_scope (b, "print", "alert (v);", -1, scope); bson_append_int32 (b, "int32", 32); bson_append_int64 (b, "int64", (gint64)-42); bson_finish (b); bson_free (d); bson_free (a); bson_free (scope); return b; }
uint8_t XTDBInsert(XTDBHandle* handle,bson* newVal) { _S_FN(insert); handle->gen++; bson_oid_t oid; bson_oid_gen(&oid); BinaryStr key,val; uint8_t ret; bson copyObj; handle->lastOp = XTDB_OP_INSERT; key.data = oid.bytes; key.len = sizeof(oid.bytes); if (bson_copy(©Obj,newVal)) { handle->error = XTDB_INVALID_BSON; return False; } bson_unfinish_object(©Obj); if (bson_append_oid(©Obj,"_id",&oid)) { handle->error = XTDB_NO_MEM; bson_destroy(©Obj); return False; } if (bson_finish(©Obj)) { handle->error = XTDB_NO_MEM; bson_destroy(©Obj); return False; } //bson_print(©Obj); BsonToBStr(©Obj,&val); if (XTDBInsertToIndex(handle,&key,&val) == False) { assert(0); return False; } ret = DBSet(handle->mainDB,&key,&val,False); _E_FN(insert); if (!ret) { handle->error = DBGetLastError(handle->mainDB); } else { memcpy(&handle->status.insertStatus.newOid,&oid,sizeof(oid)); } return ret; }
int commit_inode(struct inode * e) { bson cond, doc; mongo * conn = get_conn(); char istr[4]; struct dirent * cde = e->dirents; int res; bson_init(&doc); bson_append_start_object(&doc, "$set"); bson_append_start_array(&doc, "dirents"); res = 0; while(cde) { bson_numstr(istr, res++); bson_append_string(&doc, istr, cde->path); cde = cde->next; } bson_append_finish_array(&doc); bson_append_int(&doc, "mode", e->mode); bson_append_long(&doc, "owner", e->owner); bson_append_long(&doc, "group", e->group); bson_append_long(&doc, "size", e->size); bson_append_time_t(&doc, "created", e->created); bson_append_time_t(&doc, "modified", e->modified); if(e->data && e->datalen > 0) bson_append_string_n(&doc, "data", e->data, e->datalen); bson_append_finish_object(&doc); bson_finish(&doc); bson_init(&cond); bson_append_oid(&cond, "_id", &e->oid); bson_finish(&cond); res = mongo_update(conn, inodes_name, &cond, &doc, MONGO_UPDATE_UPSERT, NULL); bson_destroy(&cond); bson_destroy(&doc); if(res != MONGO_OK) { fprintf(stderr, "Error committing inode %s\n", mongo_get_server_err_string(conn)); return -EIO; } return 0; }
MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ) { bson_iterator it; bson_oid_t id; bson gte; bson query; bson orderby; bson command; mongo_cursor *cursor; bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_init( &query ); bson_append_oid( &query, "files_id", &id ); if ( size == 1 ) { bson_append_int( &query, "n", start ); } else { bson_init( >e ); bson_append_int( >e, "$gte", start ); bson_finish( >e ); bson_append_bson( &query, "n", >e ); bson_destroy( >e ); } bson_finish( &query ); bson_init( &orderby ); bson_append_int( &orderby, "n", 1 ); bson_finish( &orderby ); bson_init( &command ); bson_append_bson( &command, "query", &query ); bson_append_bson( &command, "orderby", &orderby ); bson_finish( &command ); cursor = mongo_find( gfile->gfs->client, gfile->gfs->chunks_ns, &command, NULL, size, 0, 0 ); bson_destroy( &command ); bson_destroy( &query ); bson_destroy( &orderby ); return cursor; }
uint8_t XTDBBsonAppendOid(XTDBHandle* handle,bson* in , bson* out,bson_oid_t * oid) { if (bson_copy(out,in)) { handle->error = XTDB_INVALID_BSON; return False; } bson_unfinish_object(out); if (bson_append_oid(out,"_id",oid)) { handle->error = XTDB_NO_MEM; bson_destroy(out); return False; } if (bson_finish(out)) { handle->error = XTDB_NO_MEM; bson_destroy(out); return False; } return True; }
static void test_has_collection (void) { mongoc_collection_t *collection; mongoc_database_t *database; mongoc_client_t *client; bson_error_t error; char *name; bool r; bson_oid_t oid; bson_t b; client = mongoc_client_new (gTestUri); assert (client); name = gen_collection_name ("has_collection"); collection = mongoc_client_get_collection (client, "test", name); assert (collection); database = mongoc_client_get_database (client, "test"); assert (database); bson_init (&b); bson_oid_init (&oid, NULL); bson_append_oid (&b, "_id", 3, &oid); bson_append_utf8 (&b, "hello", 5, "world", 5); r = mongoc_collection_insert (collection, MONGOC_INSERT_NONE, &b, NULL, &error); if (!r) { MONGOC_WARNING ("%s\n", error.message); } assert (r); bson_destroy (&b); r = mongoc_database_has_collection (database, name, &error); assert (!error.domain); assert (r); bson_free (name); mongoc_database_destroy (database); mongoc_collection_destroy (collection); mongoc_client_destroy (client); }
static void test_oid (void) { bson_t bcon, expected; bson_oid_t oid; bson_init (&bcon); bson_init (&expected); bson_oid_init (&oid, NULL); bson_append_oid (&expected, "foo", -1, &oid); BCON_APPEND (&bcon, "foo", BCON_OID (&oid)); bson_eq_bson (&bcon, &expected); bson_destroy (&bcon); bson_destroy (&expected); }
bson gridfile_get_chunk( gridfile *gfile, int n ) { bson query; bson out; bson_iterator it; bson_oid_t id; bson_init( &query ); bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_append_oid( &query, "files_id", &id ); bson_append_int( &query, "n", n ); bson_finish( &query ); assert( mongo_find_one( gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, &out ) == MONGO_OK ); bson_destroy( &query ); return out; }
mongo_cursor* gridfile_get_chunks(gridfile* gfile, int start, int size) { bson_iterator it; bson_oid_t id; bson_buffer gte_buf; bson gte_bson; bson_buffer query_buf; bson query_bson; bson_buffer orderby_buf; bson orderby_bson; bson_buffer command_buf; bson command_bson; bson_find(&it, gfile->meta, "_id"); id = *bson_iterator_oid(&it); bson_buffer_init(&query_buf); bson_append_oid(&query_buf, "files_id", &id); if (size == 1) { bson_append_int(&query_buf, "n", start); } else { bson_buffer_init(>e_buf); bson_append_int(>e_buf, "$gte", start); bson_from_buffer(>e_bson, >e_buf); bson_append_bson(&query_buf, "n", >e_bson); } bson_from_buffer(&query_bson, &query_buf); bson_buffer_init(&orderby_buf); bson_append_int(&orderby_buf, "n", 1); bson_from_buffer(&orderby_bson, &orderby_buf); bson_buffer_init(&command_buf); bson_append_bson(&command_buf, "query", &query_bson); bson_append_bson(&command_buf, "orderby", &orderby_bson); bson_from_buffer(&command_bson, &command_buf); return mongo_find(gfile->gfs->client, gfile->gfs->chunks_ns, &command_bson, NULL, size, 0, 0); }
bson gridfile_get_chunk(gridfile* gfile, int n) { bson query; bson out; bson_buffer buf; bson_iterator it; bson_oid_t id; bson_buffer_init(&buf); bson_find(&it, gfile->meta, "_id"); id = *bson_iterator_oid(&it); bson_append_oid(&buf, "files_id", &id); bson_append_int(&buf, "n", n); bson_from_buffer(&query, &buf); assert(mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, &out)); return out; }
int do_trunc(struct inode * e, off_t off) { bson cond; int res; mongo * conn = get_conn(); if(off > e->size) { e->size = off; return 0; } pthread_mutex_lock(&e->wr_lock); if(e->wr_extent) { if(off < 0 && (res = serialize_extent(e, e->wr_extent)) != 0) return res; e->wr_extent->nnodes = 0; } e->wr_age = time(NULL); pthread_mutex_unlock(&e->wr_lock); bson_init(&cond); bson_append_oid(&cond, "inode", &e->oid); if(off > 0) { bson_append_start_object(&cond, "start"); bson_append_long(&cond, "$gte", off); bson_append_finish_object(&cond); } bson_finish(&cond); res = mongo_remove(conn, extents_name, &cond, NULL); bson_destroy(&cond); if(res != 0) { fprintf(stderr, "Error removing extents in do_truncate\n"); return -EIO; } e->size = off; return 0; }
static void download_task (perf_test_t *test) { download_test_t *gridfs_test; bson_t query = BSON_INITIALIZER; mongoc_gridfs_file_t *file; mongoc_iovec_t iov; bson_error_t error; ssize_t read_sz; gridfs_test = (download_test_t *) test; bson_append_oid (&query, "_id", 3, &gridfs_test->file_id); file = mongoc_gridfs_find_one (gridfs_test->base.gridfs, &query, &error); if (!file) { MONGOC_ERROR ("gridfs_find_one: %s\n", error.message); abort (); } /* overwrite the buffer we used for _upload_big_file */ iov.iov_base = (void *) gridfs_test->base.data; iov.iov_len = gridfs_test->base.data_sz; read_sz = mongoc_gridfs_file_readv (file, &iov, 1, gridfs_test->base.data_sz, 0); if (read_sz != gridfs_test->base.data_sz) { if (mongoc_gridfs_file_error (file, &error)) { MONGOC_ERROR ("file_readv: %s\n", error.message); } else { MONGOC_ERROR ("file_readv: unknown error\n"); } abort (); } mongoc_gridfs_file_destroy (file); bson_destroy (&query); }
static void insert_test_docs (mongoc_collection_t *collection) { mongoc_write_concern_t *write_concern; bson_error_t error; bson_oid_t oid; bson_t b; int i; write_concern = mongoc_write_concern_new(); mongoc_write_concern_set_w(write_concern, 3); { const bson_t *wc; char *str; wc = _mongoc_write_concern_get_gle(write_concern); str = bson_as_json(wc, NULL); fprintf(stderr, "Write Concern: %s\n", str); bson_free(str); } for (i = 0; i < 200; i++) { bson_init(&b); bson_oid_init(&oid, NULL); bson_append_oid(&b, "_id", 3, &oid); ASSERT_OR_PRINT (mongoc_collection_insert(collection, MONGOC_INSERT_NONE, &b, write_concern, &error), error); bson_destroy(&b); } mongoc_write_concern_destroy(write_concern); }
void test_func_mongo_sync_oidtest (void) { mongo_sync_connection *conn; bson *boid, *reply = NULL; bson_cursor *c; mongo_packet *p; guint8 *oid; const guint8 *noid; mongo_util_oid_init (0); oid = mongo_util_oid_new (1); boid = bson_new (); bson_append_oid (boid, "driverOIDTest", oid); bson_finish (boid); conn = mongo_sync_connect (config.primary_host, config.primary_port, FALSE); p = mongo_sync_cmd_custom (conn, config.db, boid); ok (p != NULL, "driverOIDTest(OID) custom command works"); mongo_wire_reply_packet_get_nth_document (p, 1, &reply); bson_finish (reply); c = bson_find (reply, "oid"); bson_cursor_get_oid (c, &noid); ok (memcmp (oid, noid, 12) == 0, "driverOIDTest(OID) returns the same OID"); bson_cursor_free (c); mongo_sync_disconnect (conn); mongo_wire_packet_free (p); bson_free (boid); bson_free (reply); }
MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out ) { bson query; bson_iterator it; bson_oid_t id; int result; bson_init( &query ); bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_append_oid( &query, "files_id", &id ); bson_append_int( &query, "n", n ); bson_finish( &query ); result = (mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, out ) == MONGO_OK ); bson_destroy( &query ); if (!result) { bson empty; bson_empty(&empty); bson_copy(out, &empty); } }
void VariantToBsonConverter::convertDocument(bson_t *bson, const char *property_name, Variant v) { bson_t child; int unmangle = 0; Array document; /* if we are not at a top-level, we need to check (and convert) special * BSON types too */ if (v.isObject()) { if (convertSpecialObject(bson, property_name, v.toObject())) { return; } /* The "convertSpecialObject" method didn't understand this type, so we * will continue treating this as a normal document */ } document = v.toObject()->o_toIterArray(null_string, ObjectData::PreserveRefs); if (_isPackedArray(document) && !v.isObject()) { if (property_name != NULL) { bson_append_array_begin(bson, property_name, -1, &child); } } else { unmangle = 1; if (property_name != NULL) { bson_append_document_begin(bson, property_name, -1, &child); } } for (ArrayIter iter(document); iter; ++iter) { Variant key(iter.first()); const Variant& data(iter.secondRef()); String s_key = key.toString(); if (m_level == 0 && (m_flags & HIPPO_BSON_ADD_ID)) { /* If we have an ID, we don't need to add it. But we also need to * set m_out to the value! */ if (strncmp(s_key.c_str(), "_id", s_key.length()) == 0) { m_flags &= ~HIPPO_BSON_ADD_ID; if (m_flags & HIPPO_BSON_RETURN_ID) { /* FIXME: Should we add a ref here? */ m_out = data; } } } m_level++; if (unmangle) { const char *unmangledName; unmangledName = _getUnmangledPropertyName(s_key); convertElement(property_name != NULL ? &child : bson, unmangledName, data); free((void*) unmangledName); } else { convertElement(property_name != NULL ? &child : bson, s_key.c_str(), data); } m_level--; } if (m_level == 0 && (m_flags & HIPPO_BSON_ADD_ID)) { bson_oid_t oid; bson_oid_init(&oid, NULL); bson_append_oid(bson, "_id", strlen("_id"), &oid); if (m_flags & HIPPO_BSON_RETURN_ID) { static Class* c_objectId; c_objectId = Unit::lookupClass(s_MongoBsonObjectID_className.get()); assert(c_objectId); Object obj = Object{c_objectId}; MongoDBBsonObjectIDData* obj_data = Native::data<MongoDBBsonObjectIDData>(obj.get()); bson_oid_copy(&oid, &obj_data->m_oid); m_out = obj; } } if (property_name != NULL) { if (_isPackedArray(document)) { bson_append_array_end(bson, &child); } else { bson_append_document_end(bson, &child); } } }
int bson_append_new_oid( bson *b, const char *name ) { bson_oid_t oid; bson_oid_gen( &oid ); return bson_append_oid( b, name, &oid ); }
int main(){ mongo_connection conn[1]; bson_buffer bb; bson obj; bson cond; int i; bson_oid_t oid; const char* col = "c.update_test"; const char* ns = "test.c.update_test"; INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn , TEST_SERVER, 27017 )){ printf("failed to connect\n"); exit(1); } /* if the collection doesn't exist dropping it will fail */ if ( mongo_cmd_drop_collection(conn, "test", col, NULL) == MONGO_OK && mongo_find_one(conn, ns, bson_empty(&obj), bson_empty(&obj), NULL) != MONGO_OK ){ printf("failed to drop collection\n"); exit(1); } bson_oid_gen(&oid); { /* insert */ bson_buffer_init(&bb); bson_append_oid(&bb, "_id", &oid); bson_append_int(&bb, "a", 3 ); bson_from_buffer(&obj, &bb); mongo_insert(conn, ns, &obj); bson_destroy(&obj); } { /* insert */ bson op; bson_buffer_init(&bb); bson_append_oid(&bb, "_id", &oid); bson_from_buffer(&cond, &bb); bson_buffer_init(&bb); { bson_append_start_object(&bb, "$inc"); bson_append_int(&bb, "a", 2 ); bson_append_finish_object(&bb); } { bson_append_start_object(&bb, "$set"); bson_append_double(&bb, "b", -1.5 ); bson_append_finish_object(&bb); } bson_from_buffer(&op, &bb); for (i=0; i<5; i++) mongo_update(conn, ns, &cond, &op, 0); /* cond is used later */ bson_destroy(&op); } if( mongo_find_one(conn, ns, &cond, 0, &obj) != MONGO_OK ){ printf("Failed to find object\n"); exit(1); } else { int fields = 0; bson_iterator it; bson_iterator_init(&it, obj.data); bson_destroy(&cond); while(bson_iterator_next(&it)){ switch(bson_iterator_key(&it)[0]){ case '_': /* id */ ASSERT(bson_iterator_type(&it) == BSON_OID); ASSERT(!memcmp(bson_iterator_oid(&it)->bytes, oid.bytes, 12)); fields++; break; case 'a': ASSERT(bson_iterator_type(&it) == BSON_INT); ASSERT(bson_iterator_int(&it) == 3 + 5*2); fields++; break; case 'b': ASSERT(bson_iterator_type(&it) == BSON_DOUBLE); ASSERT(bson_iterator_double(&it) == -1.5); fields++; break; } } ASSERT(fields == 3); } bson_destroy(&obj); mongo_cmd_drop_db(conn, "test"); mongo_destroy(conn); return 0; }
static void *threadrace1(void *_tr) { const int iterations = 500; TARGRACE *tr = (TARGRACE*) _tr; bool err = false; bson bq; bson_init_as_query(&bq); bson_append_int(&bq, "tid", tr->id); bson_finish(&bq); bson_type bt; bson_iterator it; void *bsdata; bool saved = false; int lastcnt = 0; EJCOLL *coll = ejdbcreatecoll(jb, "threadrace1", NULL); CU_ASSERT_PTR_NOT_NULL_FATAL(coll); EJQ *q = ejdbcreatequery(jb, &bq, NULL, 0, NULL); TCXSTR *log = tcxstrnew(); for (int i = 0; !err && i < iterations; ++i) { CU_ASSERT_PTR_NOT_NULL_FATAL(q); tcxstrclear(log); bson_oid_t oid2; bson_oid_t *oid = NULL; int cnt = 0; uint32_t count; TCLIST *res = NULL; if (ejdbecode(jb) != 0) { eprint(jb, __LINE__, "threadrace1"); err = true; goto ffinish; } res = ejdbqryexecute(coll, q, &count, 0, log); if (ejdbecode(jb) != 0) { eprint(jb, __LINE__, "threadrace1.ejdbqryexecute"); err = true; goto ffinish; } if (count != 1 && saved) { fprintf(stderr, "%d:COUNT=%d it=%d\n", tr->id, count, i); CU_ASSERT_TRUE(false); goto ffinish; } if (count > 0) { bsdata = TCLISTVALPTR(res, 0); CU_ASSERT_PTR_NOT_NULL_FATAL(bsdata); bt = bson_find_from_buffer(&it, bsdata, "cnt"); CU_ASSERT_EQUAL_FATAL(bt, BSON_INT); cnt = bson_iterator_int(&it); bt = bson_find_from_buffer(&it, bsdata, "_id"); CU_ASSERT_EQUAL_FATAL(bt, BSON_OID); oid = bson_iterator_oid(&it); CU_ASSERT_PTR_NOT_NULL_FATAL(oid); } bson sbs; bson_init(&sbs); if (oid) { bson_append_oid(&sbs, "_id", oid); } bson_append_int(&sbs, "tid", tr->id); bson_append_int(&sbs, "cnt", ++cnt); bson_finish(&sbs); if (!ejdbsavebson(coll, &sbs, &oid2)) { eprint(jb, __LINE__, "threadrace1.ejdbsavebson"); err = true; } saved = true; bson_destroy(&sbs); lastcnt = cnt; ffinish: if (res) tclistdel(res); } if (q) ejdbquerydel(q); if (log) tcxstrdel(log); bson_destroy(&bq); CU_ASSERT_EQUAL(lastcnt, iterations); //fprintf(stderr, "\nThread %d finished", tr->id); return err ? "error" : NULL; }
static gboolean afmongodb_worker_insert (MongoDBDestDriver *self) { gboolean success; guint8 *oid; LogMessage *msg; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; afmongodb_dd_connect(self, TRUE); g_mutex_lock(self->queue_mutex); log_queue_reset_parallel_push(self->queue); success = log_queue_pop_head(self->queue, &msg, &path_options, FALSE, FALSE); g_mutex_unlock(self->queue_mutex); if (!success) return TRUE; msg_set_context(msg); bson_reset (self->bson_sel); bson_reset (self->bson_upd); bson_reset (self->bson_set); oid = mongo_util_oid_new_with_time (self->last_msg_stamp, self->seq_num); bson_append_oid (self->bson_sel, "_id", oid); g_free (oid); bson_finish (self->bson_sel); value_pairs_foreach (self->vp, afmongodb_vp_foreach, msg, self->seq_num, self->bson_set); bson_finish (self->bson_set); bson_append_document (self->bson_upd, "$set", self->bson_set); bson_finish (self->bson_upd); if (!mongo_sync_cmd_update (self->conn, self->ns, MONGO_WIRE_FLAG_UPDATE_UPSERT, self->bson_sel, self->bson_upd)) { msg_error ("Network error while inserting into MongoDB", evt_tag_int("time_reopen", self->time_reopen), NULL); success = FALSE; } msg_set_context(NULL); if (success) { stats_counter_inc(self->stored_messages); step_sequence_number(&self->seq_num); log_msg_ack(msg, &path_options); log_msg_unref(msg); } else { g_mutex_lock(self->queue_mutex); log_queue_push_head(self->queue, msg, &path_options); g_mutex_unlock(self->queue_mutex); } return success; }
void mongoIdToBSON(const Object& value, const char* key, bson_t* bson) { bson_oid_t oid; bson_oid_init_from_string(&oid, value->o_get("$id").toString().c_str()); bson_append_oid(bson, key, -1, &oid); }
/* {{{ MongoDriver\BSON\ObjectID */ void VariantToBsonConverter::_convertObjectID(bson_t *bson, const char *key, Object v) { MongoDBBsonObjectIDData* data = Native::data<MongoDBBsonObjectIDData>(v.get()); bson_append_oid(bson, key, -1, &data->m_oid); }