void _mongoc_write_command_update_append (mongoc_write_command_t *command, const bson_t *selector, const bson_t *update, bool upsert, bool multi) { const char *key; char keydata [16]; bson_t doc; ENTRY; BSON_ASSERT (command); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_UPDATE); BSON_ASSERT (selector && update); bson_init (&doc); BSON_APPEND_DOCUMENT (&doc, "q", selector); BSON_APPEND_DOCUMENT (&doc, "u", update); BSON_APPEND_BOOL (&doc, "upsert", upsert); BSON_APPEND_BOOL (&doc, "multi", multi); key = NULL; bson_uint32_to_string (command->n_documents, &key, keydata, sizeof keydata); BSON_ASSERT (key); BSON_APPEND_DOCUMENT (command->documents, key, &doc); command->n_documents++; bson_destroy (&doc); EXIT; }
void _mongoc_write_command_update_append (mongoc_write_command_t *command, const bson_t *selector, const bson_t *update, const bson_t *opts) { bson_t document; ENTRY; BSON_ASSERT (command); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_UPDATE); BSON_ASSERT (selector && update); bson_init (&document); BSON_APPEND_DOCUMENT (&document, "q", selector); BSON_APPEND_DOCUMENT (&document, "u", update); if (opts) { bson_concat (&document, opts); } _mongoc_buffer_append ( &command->payload, bson_get_data (&document), document.len); command->n_documents++; bson_destroy (&document); EXIT; }
void _mongoc_write_command_delete_append (mongoc_write_command_t *command, const bson_t *selector) { const char *key; char keydata [16]; bson_t doc; ENTRY; BSON_ASSERT (command); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_DELETE); BSON_ASSERT (selector); BSON_ASSERT (selector->len >= 5); bson_init (&doc); BSON_APPEND_DOCUMENT (&doc, "q", selector); BSON_APPEND_INT32 (&doc, "limit", command->u.delete_.multi ? 0 : 1); key = NULL; bson_uint32_to_string (command->n_documents, &key, keydata, sizeof keydata); BSON_ASSERT (key); BSON_APPEND_DOCUMENT (command->documents, key, &doc); command->n_documents++; bson_destroy (&doc); EXIT; }
static void td_to_bson (const mongoc_topology_description_t *td, bson_t *bson) { size_t i; bson_t servers = BSON_INITIALIZER; bson_t server; char str[16]; const char *key; for (i = 0; i < td->servers->items_len; i++) { bson_uint32_to_string ((uint32_t) i, &key, str, sizeof str); sd_to_bson (mongoc_set_get_item (td->servers, (int) i), &server); BSON_APPEND_DOCUMENT (&servers, key, &server); bson_destroy (&server); } bson_init (bson); BSON_APPEND_UTF8 (bson, "topologyType", mongoc_topology_description_type (td)); if (td->set_name) { BSON_APPEND_UTF8 (bson, "setName", td->set_name); } BSON_APPEND_ARRAY (bson, "servers", &servers); bson_destroy (&servers); }
void _append_array_from_command (mongoc_write_command_t *command, bson_t *bson) { bson_t ar; bson_reader_t *reader; char str[16]; uint32_t i = 0; const char *key; bool eof; const bson_t *current; reader = bson_reader_new_from_data (command->payload.data, command->payload.len); bson_append_array_begin (bson, gCommandFields[command->type], gCommandFieldLens[command->type], &ar); while ((current = bson_reader_read (reader, &eof))) { bson_uint32_to_string (i, &key, str, sizeof str); BSON_APPEND_DOCUMENT (&ar, key, current); i++; } bson_append_array_end (bson, &ar); bson_reader_destroy (reader); }
void _mongoc_write_command_insert_append (mongoc_write_command_t *command, const bson_t *document) { const char *key; bson_iter_t iter; bson_oid_t oid; bson_t tmp; char keydata [16]; ENTRY; BSON_ASSERT (command); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); BSON_ASSERT (document); BSON_ASSERT (document->len >= 5); key = NULL; bson_uint32_to_string (command->n_documents, &key, keydata, sizeof keydata); BSON_ASSERT (key); /* * If the document does not contain an "_id" field, we need to generate * a new oid for "_id". */ if (!bson_iter_init_find (&iter, document, "_id")) { bson_init (&tmp); bson_oid_init (&oid, NULL); BSON_APPEND_OID (&tmp, "_id", &oid); bson_concat (&tmp, document); BSON_APPEND_DOCUMENT (command->documents, key, &tmp); bson_destroy (&tmp); } else { BSON_APPEND_DOCUMENT (command->documents, key, document); } command->n_documents++; EXIT; }
mongoc_cursor_t * mongoc_database_find_collections (mongoc_database_t *database, const bson_t *filter, bson_error_t *error) { mongoc_cursor_t *cursor; mongoc_read_prefs_t *read_prefs; bson_t cmd = BSON_INITIALIZER; bson_t child; bson_error_t lerror; BSON_ASSERT (database); BSON_APPEND_INT32 (&cmd, "listCollections", 1); if (filter) { BSON_APPEND_DOCUMENT (&cmd, "filter", filter); BSON_APPEND_DOCUMENT_BEGIN (&cmd, "cursor", &child); bson_append_document_end (&cmd, &child); } read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); cursor = _mongoc_cursor_new (database->client, database->name, MONGOC_QUERY_SLAVE_OK, 0, 0, 0, true, NULL, NULL, NULL, NULL); _mongoc_cursor_cursorid_init (cursor, &cmd); if (_mongoc_cursor_cursorid_prime (cursor)) { /* intentionally empty */ } else { if (mongoc_cursor_error (cursor, &lerror)) { if (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND) { /* We are talking to a server that doesn' support listCollections. */ /* clear out the error. */ memset (&lerror, 0, sizeof lerror); /* try again with using system.namespaces */ mongoc_cursor_destroy (cursor); cursor = _mongoc_database_find_collections_legacy ( database, filter, error); } else if (error) { memcpy (error, &lerror, sizeof *error); } } } bson_destroy (&cmd); mongoc_read_prefs_destroy (read_prefs); return cursor; }
static void context_append (context_t *context, bson_t *event) { char str[16]; const char *key; bson_uint32_to_string (context->n_events, &key, str, sizeof str); BSON_APPEND_DOCUMENT (&context->events, key, event); context->n_events++; bson_destroy (event); }
bson_t * mongoc_database_get_collection_info (mongoc_database_t *database, const bson_t *filter, bson_error_t *error) { mongoc_read_prefs_t *read_prefs; bson_t *reply = bson_new(); bson_t cmd = BSON_INITIALIZER; bool cmd_success; BSON_ASSERT (database); BSON_APPEND_INT32 (&cmd, "listCollections", 1); if (filter) { BSON_APPEND_DOCUMENT (&cmd, "filter", filter); } read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); cmd_success = mongoc_database_command_simple (database, &cmd, read_prefs, reply, error); if (cmd_success) { /* intentionally empty */ } else if (error->code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND) { bson_destroy (reply); /* We are talking to a server that doesn' support listCollections. */ /* clear out the error. */ error->code = 0; error->domain = 0; /* try again with using system.namespaces */ reply = _mongoc_database_get_collection_info_legacy (database, filter, error); } else { /* network error */ bson_destroy (reply); reply = NULL; } bson_destroy (&cmd); mongoc_read_prefs_destroy (read_prefs); return reply; }
static void append_documents_from_cmd (const mongoc_cmd_t *cmd, mongoc_apm_command_started_t *event) { int32_t doc_len; bson_t doc; const uint8_t *pos; const char *field_name; bson_t bson; char str[16]; const char *key; uint32_t i; if (!cmd->payload || !cmd->payload_size) { return; } if (!event->command_owned) { event->command = bson_copy (event->command); event->command_owned = true; } /* make array from outgoing OP_MSG payload type 1 on an "insert", * "update", or "delete" command. */ field_name = _mongoc_get_documents_field_name (cmd->command_name); BSON_ASSERT (field_name); BSON_ASSERT (BSON_APPEND_ARRAY_BEGIN (event->command, field_name, &bson)); pos = cmd->payload; i = 0; while (pos < cmd->payload + cmd->payload_size) { memcpy (&doc_len, pos, sizeof (doc_len)); doc_len = BSON_UINT32_FROM_LE (doc_len); BSON_ASSERT (bson_init_static (&doc, pos, (size_t) doc_len)); bson_uint32_to_string (i, &key, str, sizeof (str)); BSON_APPEND_DOCUMENT (&bson, key, &doc); pos += doc_len; i++; } bson_append_array_end (event->command, &bson); }
bson_t* file_getBSON(file_t *file) { char arrayKey[16]; bson_t *bson = bson_new(); BSON_APPEND_UTF8(bson, "_id", file->id); BSON_APPEND_UTF8(bson, "name", file->name); BSON_APPEND_INT64(bson, "size", file->size); BSON_APPEND_UTF8(bson, "parentId", file->parentId); bson_t *bson_blocks = bson_new(); int blockIndex = 0; void listBlocks(t_list* blockCopies) { bson_t *bson_block_copies = bson_new(); int copyIndex = 0; void listBlockCopy(file_block_t *blockCopy) { sprintf(arrayKey, "%d", copyIndex); bson_t *bson_block_copy = file_block_getBSON(blockCopy); BSON_APPEND_DOCUMENT(bson_block_copies, arrayKey, bson_block_copy); bson_destroy(bson_block_copy); copyIndex++; }
static bool appendBsonValue(bson_t *bson, const QString &key, const QVariant &value) { const QLatin1String oidkey("_id"); bool ok = true; int type = value.type(); // _id if (key == oidkey) { QByteArray oidVal = value.toByteArray(); if (oidVal.length() == 24) { // ObjectId bson_oid_t oid; bson_oid_init_from_string(&oid, oidVal.data()); BSON_APPEND_OID(bson, oidkey.latin1(), &oid); } else { int id = value.toInt(&ok); if (ok) { BSON_APPEND_INT32(bson, oidkey.latin1(), id); } else { BSON_APPEND_UTF8(bson, oidkey.latin1(), value.toString().toUtf8().data()); } } return true; } switch (type) { case QVariant::Int: BSON_APPEND_INT32(bson, qPrintable(key), value.toInt(&ok)); break; case QVariant::String: BSON_APPEND_UTF8(bson, qPrintable(key), value.toString().toUtf8().data()); break; case QVariant::LongLong: BSON_APPEND_INT64(bson, qPrintable(key), value.toLongLong(&ok)); break; case QVariant::Map: BSON_APPEND_DOCUMENT(bson, qPrintable(key), (const bson_t *)TBson::toBson(value.toMap()).constData()); break; case QVariant::Double: BSON_APPEND_DOUBLE(bson, qPrintable(key), value.toDouble(&ok)); break; case QVariant::Bool: BSON_APPEND_BOOL(bson, qPrintable(key), value.toBool()); break; case QVariant::DateTime: { #if QT_VERSION >= 0x040700 BSON_APPEND_DATE_TIME(bson, qPrintable(key), value.toDateTime().toMSecsSinceEpoch()); #else QDateTime utcDate = value.toDateTime().toUTC(); qint64 ms = utcDate.time().msec(); qint64 tm = utcDate.toTime_t() * 1000LL; if (ms > 0) { tm += ms; } BSON_APPEND_DATE_TIME(bson, qPrintable(key), tm); #endif break; } case QVariant::ByteArray: { QByteArray ba = value.toByteArray(); BSON_APPEND_BINARY(bson, qPrintable(key), BSON_SUBTYPE_BINARY, (uint8_t *)ba.constData(), ba.length()); break; } case QVariant::List: // FALL THROUGH case QVariant::StringList: { bson_t child; BSON_APPEND_ARRAY_BEGIN(bson, qPrintable(key), &child); int i = 0; for (auto &var : value.toList()) { appendBsonValue(&child, QString::number(i++), var); } bson_append_array_end(bson, &child); break; } case QVariant::Invalid: BSON_APPEND_UNDEFINED(bson, qPrintable(key)); break; default: tError("toBson() failed to convert name:%s type:%d", qPrintable(key), type); ok = false; break; } return ok; }
/** * mongoc_database_add_user: * @database: A #mongoc_database_t. * @username: A string containing the username. * @password: (allow-none): A string containing password, or NULL. * @roles: (allow-none): An optional bson_t of roles. * @custom_data: (allow-none): An optional bson_t of data to store. * @error: (out) (allow-none): A location for a bson_error_t or %NULL. * * Creates a new user with access to @database. * * Returns: None. * Side effects: None. */ bool mongoc_database_add_user (mongoc_database_t *database, const char *username, const char *password, const bson_t *roles, const bson_t *custom_data, bson_error_t *error) { bson_error_t lerror; bson_t cmd; bson_t ar; char *input; char *hashed_password; bool ret = false; ENTRY; BSON_ASSERT (database); BSON_ASSERT (username); /* * CDRIVER-232: * * Perform a (slow and tedious) round trip to mongod to determine if * we can safely call createUser. Otherwise, we will fallback and * perform legacy insertion into users collection. */ bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "usersInfo", username); ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, &lerror); bson_destroy (&cmd); if (!ret && (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) { ret = mongoc_database_add_user_legacy (database, username, password, error); } else if (ret || (lerror.code == 13)) { /* usersInfo succeeded or failed with auth err, we're on modern mongod */ input = bson_strdup_printf ("%s:mongo:%s", username, password); hashed_password = _mongoc_hex_md5 (input); bson_free (input); bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "createUser", username); BSON_APPEND_UTF8 (&cmd, "pwd", hashed_password); BSON_APPEND_BOOL (&cmd, "digestPassword", false); if (custom_data) { BSON_APPEND_DOCUMENT (&cmd, "customData", custom_data); } if (roles) { BSON_APPEND_ARRAY (&cmd, "roles", roles); } else { bson_append_array_begin (&cmd, "roles", 5, &ar); bson_append_array_end (&cmd, &ar); } ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error); bson_free (hashed_password); bson_destroy (&cmd); } else if (error) { memcpy (error, &lerror, sizeof *error); } RETURN (ret); }
static void _mongoc_write_opquery (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; const char *key; uint32_t len = 0; bson_t ar; bson_t cmd; bson_t reply; char str[16]; bool has_more; bool ret = false; uint32_t i; int32_t max_bson_obj_size; int32_t max_write_batch_size; uint32_t overhead; uint32_t key_len; int data_offset = 0; bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); bson_init (&cmd); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_write_batch_size = mongoc_server_stream_max_write_batch_size (server_stream); again: has_more = false; i = 0; _mongoc_write_command_init (&cmd, command, collection); /* 1 byte to specify array type, 1 byte for field name's null terminator */ overhead = cmd.len + 2 + gCommandFieldLens[command->type]; reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); bson_append_array_begin (&cmd, gCommandFields[command->type], gCommandFieldLens[command->type], &ar); while ((bson = bson_reader_read (reader, &eof))) { key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str); len = bson->len; /* 1 byte to specify document type, 1 byte for key's null terminator */ if (_mongoc_write_command_will_overflow (overhead, key_len + len + 2 + ar.len, i, max_bson_obj_size, max_write_batch_size)) { has_more = true; break; } BSON_APPEND_DOCUMENT (&ar, key, bson); data_offset += len; i++; } bson_append_array_end (&cmd, &ar); if (!i) { _mongoc_write_command_too_large_error (error, i, len, max_bson_obj_size); result->failed = true; result->must_stop = true; ret = false; if (bson) { data_offset += len; } } else { mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd); parts.is_write_command = true; parts.assembled.operation_id = command->operation_id; if (!mongoc_cmd_parts_set_write_concern ( &parts, write_concern, server_stream->sd->max_wire_version, error)) { bson_reader_destroy (reader); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); if (!mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error)) { bson_reader_destroy (reader); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } ret = mongoc_cmd_parts_assemble (&parts, server_stream, error); if (ret) { ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); } else { /* assembling failed */ result->must_stop = true; bson_init (&reply); } if (!ret) { result->failed = true; if (bson_empty (&reply)) { /* assembling failed, or a network error running the command */ result->must_stop = true; } } _mongoc_write_result_merge (result, command, &reply, offset); offset += i; bson_destroy (&reply); mongoc_cmd_parts_cleanup (&parts); } bson_reader_destroy (reader); if (has_more && (ret || !command->flags.ordered) && !result->must_stop) { bson_reinit (&cmd); GOTO (again); } bson_destroy (&cmd); EXIT; }
static void _mongoc_write_command(mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { const uint8_t *data; bson_iter_t iter; const char *key; uint32_t len = 0; bson_t tmp; bson_t ar; bson_t cmd; bson_t reply; char str [16]; bool has_more; bool ret = false; uint32_t i; int32_t max_bson_obj_size; int32_t max_write_batch_size; int32_t min_wire_version; uint32_t key_len; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_write_batch_size = mongoc_server_stream_max_write_batch_size (server_stream); /* * If we have an unacknowledged write and the server supports the legacy * opcodes, then submit the legacy opcode so we don't need to wait for * a response from the server. */ min_wire_version = server_stream->sd->min_wire_version; if ((min_wire_version == 0) && !_mongoc_write_concern_needs_gle (write_concern)) { if (command->flags.bypass_document_validation != MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set bypassDocumentValidation for unacknowledged writes"); EXIT; } gLegacyWriteOps[command->type] (command, client, server_stream, database, collection, write_concern, offset, result, error); EXIT; } if (!command->n_documents || !bson_iter_init (&iter, command->documents) || !bson_iter_next (&iter)) { _empty_error (command, error); result->failed = true; EXIT; } again: bson_init (&cmd); has_more = false; i = 0; BSON_APPEND_UTF8 (&cmd, gCommandNames[command->type], collection); BSON_APPEND_DOCUMENT (&cmd, "writeConcern", WRITE_CONCERN_DOC (write_concern)); BSON_APPEND_BOOL (&cmd, "ordered", command->flags.ordered); if (command->flags.bypass_document_validation != MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) { BSON_APPEND_BOOL (&cmd, "bypassDocumentValidation", !!command->flags.bypass_document_validation); } if (!_mongoc_write_command_will_overflow (0, command->documents->len, command->n_documents, max_bson_obj_size, max_write_batch_size)) { /* copy the whole documents buffer as e.g. "updates": [...] */ BSON_APPEND_ARRAY (&cmd, gCommandFields[command->type], command->documents); i = command->n_documents; } else { bson_append_array_begin (&cmd, gCommandFields[command->type], -1, &ar); do { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { BSON_ASSERT (false); } bson_iter_document (&iter, &len, &data); key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str); if (_mongoc_write_command_will_overflow (ar.len, key_len + len + 2, i, max_bson_obj_size, max_write_batch_size)) { has_more = true; break; } if (!bson_init_static (&tmp, data, len)) { BSON_ASSERT (false); } BSON_APPEND_DOCUMENT (&ar, key, &tmp); bson_destroy (&tmp); i++; } while (bson_iter_next (&iter)); bson_append_array_end (&cmd, &ar); } if (!i) { too_large_error (error, i, len, max_bson_obj_size, NULL); result->failed = true; ret = false; } else { ret = mongoc_cluster_run_command (&client->cluster, server_stream->stream, MONGOC_QUERY_NONE, database, &cmd, &reply, error); if (!ret) { result->failed = true; } _mongoc_write_result_merge (result, command, &reply, offset); offset += i; bson_destroy (&reply); } bson_destroy (&cmd); if (has_more && (ret || !command->flags.ordered)) { GOTO (again); } EXIT; }
/* Uses old way of querying system.namespaces. */ bson_t * _mongoc_database_get_collection_info_legacy (mongoc_database_t *database, const bson_t *filter, bson_error_t *error) { mongoc_collection_t *col; mongoc_cursor_t *cursor; mongoc_read_prefs_t *read_prefs; uint32_t dbname_len; const bson_t *doc; bson_t legacy_filter; bson_iter_t iter; const char *name; const char *col_filter; bson_t q = BSON_INITIALIZER; bson_t *ret = NULL; bson_t col_array = BSON_INITIALIZER; const char *key; char keystr[16]; uint32_t n_cols = 0; BSON_ASSERT (database); col = mongoc_client_get_collection (database->client, database->name, "system.namespaces"); BSON_ASSERT (col); dbname_len = (uint32_t)strlen (database->name); /* Filtering on name needs to be handled differently for old servers. */ if (filter && bson_iter_init_find (&iter, filter, "name")) { /* on legacy servers, this must be a string (i.e. not a regex) */ if (!BSON_ITER_HOLDS_UTF8 (&iter)) { bson_set_error (error, MONGOC_ERROR_NAMESPACE, MONGOC_ERROR_NAMESPACE_INVALID_FILTER_TYPE, "On legacy servers, a filter on name can only be a string."); goto cleanup_filter; } BSON_ASSERT (BSON_ITER_HOLDS_UTF8 (&iter)); col_filter = bson_iter_utf8 (&iter, NULL); bson_init (&legacy_filter); bson_copy_to_excluding_noinit (filter, &legacy_filter, "name", NULL); /* We must db-qualify filters on name. */ bson_string_t *buf = bson_string_new (database->name); bson_string_append_c (buf, '.'); bson_string_append (buf, col_filter); BSON_APPEND_UTF8 (&legacy_filter, "name", buf->str); bson_string_free (buf, true); filter = &legacy_filter; } read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); cursor = mongoc_collection_find (col, MONGOC_QUERY_NONE, 0, 0, 0, filter ? filter : &q, NULL, read_prefs); ret = bson_new(); BSON_APPEND_ARRAY_BEGIN (ret, "collections", &col_array); while (mongoc_cursor_more (cursor) && !mongoc_cursor_error (cursor, error)) { if (mongoc_cursor_next (cursor, &doc)) { /* 2 gotchas here. * 1. need to ignore any system collections (prefixed with $) * 2. need to remove the database name from the collection so that clients * don't need to specialize their logic for old versions of the server. */ if (bson_iter_init_find (&iter, doc, "name") && BSON_ITER_HOLDS_UTF8 (&iter) && (name = bson_iter_utf8 (&iter, NULL)) && !strchr (name, '$') && (0 == strncmp (name, database->name, dbname_len))) { bson_t nprefix_col = BSON_INITIALIZER; bson_copy_to_excluding_noinit (doc, &nprefix_col, "name", NULL); BSON_APPEND_UTF8 (&nprefix_col, "name", name + (dbname_len + 1)); /* +1 for the '.' */ /* need to construct a key for this array element. */ bson_uint32_to_string(n_cols, &key, keystr, sizeof (keystr)); BSON_APPEND_DOCUMENT (&col_array, key, &nprefix_col); ++n_cols; } } } bson_append_array_end (ret, &col_array); mongoc_cursor_destroy (cursor); mongoc_read_prefs_destroy (read_prefs); cleanup_filter: mongoc_collection_destroy (col); return ret; }
static void route_result(cJSON* result) { cJSON* cjson_current; size_t i; for (cjson_current = result->child; cjson_current; cjson_current = cjson_current->next) { if (strcmp(cJSON_GetObjectItem(cjson_current, "poll_type")->valuestring, "message") == 0) { cJSON* cjson_value = cJSON_GetObjectItem(cjson_current, "value"); ullong from_uin = cJSON_GetObjectItem(cjson_value, "from_uin")->valuedouble; ullong number = get_friend_number(from_uin); msg_content_array_t content = fetch_content(cJSON_GetObjectItem(cjson_value, "content")); dump_message(number, str_from("friend_message"), &content); #ifdef _DEBUG { char* str = msg_content_array_to_json_object_string(&content, "content"); fprintf(stdout, "Received message from: %llu\nContent: %s\n", number, str); fflush(stdout); free(str); } #endif for (i = 0; i < robot.received_message_funcs_count; ++i) robot.received_message_funcs[i](from_uin, number, &content); msg_content_array_free(&content); } else if (strcmp(cJSON_GetObjectItem(cjson_current, "poll_type")->valuestring, "group_message") == 0) { cJSON* cjson_value = cJSON_GetObjectItem(cjson_current, "value"); ullong from_uin = cJSON_GetObjectItem(cjson_value, "from_uin")->valuedouble; ullong number = get_group_number(cJSON_GetObjectItem(cjson_value, "group_code")->valuedouble); msg_content_array_t content = fetch_content(cJSON_GetObjectItem(cjson_value, "content")); dump_message(number, str_from("group_message"), &content); #ifdef _DEBUG { char* str = msg_content_array_to_json_object_string(&content, "content"); fprintf(stdout, "Received group_message from: %llu\nContent: %s\n", number, str); fflush(stdout); free(str); } #endif for (i = 0; i < robot.received_group_message_funcs_count; ++i) robot.received_group_message_funcs[i](from_uin, number, &content); msg_content_array_free(&content); } else { bson_t document; bson_t content; bson_error_t error; time_t t; char* ptr = cJSON_PrintUnformatted(cjson_current); mongoc_collection_t* collection = mongoc_database_get_collection(robot.mongoc_database, "unprocessed"); time(&t); if (!bson_init_from_json(&content, ptr, strlen(ptr), &error)) { MONGOC_WARNING("%s\n", error.message); return; } bson_init(&document); BSON_APPEND_TIME_T(&document, "time", t); BSON_APPEND_DOCUMENT(&document, "content", &content); if (!mongoc_collection_insert(collection, MONGOC_INSERT_NONE, &document, NULL, &error)) MONGOC_WARNING("%s\n", error.message); bson_destroy(&document); bson_destroy(&content); } } }