/*--------------------------------------------------------------------------- * * _change_stream_init -- * * Called after @stream has the collection name, database name, read * preferences, and read concern set. Creates the change streams * cursor. * *-------------------------------------------------------------------------- */ void _change_stream_init (mongoc_change_stream_t *stream, const bson_t *pipeline, const bson_t *opts) { BSON_ASSERT (pipeline); stream->max_await_time_ms = -1; stream->batch_size = -1; bson_init (&stream->pipeline_to_append); bson_init (&stream->resume_token); bson_init (&stream->err_doc); if (!_mongoc_change_stream_opts_parse ( stream->client, opts, &stream->opts, &stream->err)) { return; } stream->full_document = BCON_NEW ("fullDocument", stream->opts.fullDocument); if (!bson_empty (&(stream->opts.resumeAfter))) { bson_append_document ( &stream->resume_token, "resumeAfter", 11, &(stream->opts.resumeAfter)); } _mongoc_timestamp_set (&stream->operation_time, &(stream->opts.startAtOperationTime)); stream->batch_size = stream->opts.batchSize; stream->max_await_time_ms = stream->opts.maxAwaitTimeMS; /* Accept two forms of user pipeline: * 1. A document like: { "pipeline": [...] } * 2. An array-like document: { "0": {}, "1": {}, ... } * If the passed pipeline is invalid, we pass it along and let the server * error instead. */ if (!bson_empty (pipeline)) { bson_iter_t iter; if (bson_iter_init_find (&iter, pipeline, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { if (!BSON_APPEND_VALUE (&stream->pipeline_to_append, "pipeline", bson_iter_value (&iter))) { CHANGE_STREAM_ERR ("pipeline"); } } else { if (!BSON_APPEND_ARRAY ( &stream->pipeline_to_append, "pipeline", pipeline)) { CHANGE_STREAM_ERR ("pipeline"); } } } if (stream->err.code == 0) { (void) _make_cursor (stream); } }
void test_batch_insert_with_continue( mongo *conn ) { bson *objs[5]; bson *objs2[5]; bson empty; int i; mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL ); mongo_create_simple_index( conn, TEST_NS, "n", MONGO_INDEX_UNIQUE, NULL ); for( i=0; i<5; i++ ) { objs[i] = bson_malloc( sizeof( bson ) ); bson_init( objs[i] ); bson_append_int( objs[i], "n", i ); bson_finish( objs[i] ); } ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 5, NULL, 0 ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_empty( &empty ) ) == 5 ); /* Add one duplicate value for n. */ objs2[0] = bson_malloc( sizeof( bson ) ); bson_init( objs2[0] ); bson_append_int( objs2[0], "n", 1 ); bson_finish( objs2[0] ); /* Add n for 6 - 9. */ for( i = 1; i < 5; i++ ) { objs2[i] = bson_malloc( sizeof( bson ) ); bson_init( objs2[i] ); bson_append_int( objs2[i], "n", i + 5 ); bson_finish( objs2[i] ); } /* Without continue on error, will fail immediately. */ ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5, NULL, 0 ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_empty( &empty ) ) == 5 ); /* With continue on error, will insert four documents. */ ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5, NULL, MONGO_CONTINUE_ON_ERROR ) == MONGO_OK ); ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_empty( &empty ) ) == 9 ); for( i=0; i<5; i++ ) { bson_destroy( objs2[i] ); bson_free( objs2[i] ); bson_destroy( objs[i] ); bson_free( objs[i] ); } }
INT32 migExport::_query() { INT32 rc = SDB_OK ; bson condition ; bson sort ; bson_init ( &condition ) ; bson_init ( &sort ) ; if( _pMigArg->pFiter ) { if ( !jsonToBson2 ( &condition, _pMigArg->pFiter, 0, 1 ) ) { rc = SDB_INVALIDARG ; ossPrintf ( "fiter format error"OSS_NEWLINE ) ; PD_LOG ( PDERROR, "fiter format error" ) ; goto error ; } } else { bson_empty( &condition ) ; } if( _pMigArg->pSort ) { if ( !jsonToBson2 ( &sort, _pMigArg->pSort, 0, 1 ) ) { rc = SDB_INVALIDARG ; ossPrintf ( "sort format error"OSS_NEWLINE ) ; PD_LOG ( PDERROR, "sort format error" ) ; goto error ; } } else { bson_empty( &sort ) ; } rc = sdbQuery ( _gCollection, &condition, NULL, &sort, NULL, 0, -1, &_gCursor ) ; if ( rc ) { if ( SDB_DMS_EOC != rc ) { PD_LOG ( PDERROR, "Failed to query collection, rc = %d", rc ) ; goto error ; } else { goto done ; } } done: bson_destroy ( &sort ) ; bson_destroy ( &condition ) ; return rc ; error: goto done ; }
static void test_bson_initializer (void) { bson_t b = BSON_INITIALIZER; assert(bson_empty(&b)); bson_append_bool(&b, "foo", -1, TRUE); assert(!bson_empty(&b)); bson_destroy(&b); }
int test_bson_iterator( void ) { bson b[1]; bson_iterator i[1]; bson_iterator_init( i, bson_empty( b ) ); bson_iterator_next( i ); bson_iterator_type( i ); bson_find( i, bson_empty( b ), "foo" ); return 0; }
dlist_t *mongodb_dump_keyvalue(bot_t * bot, char *db) { dlist_t *dl, *dptr; keyvalue_t *keyvalue_ptr; mongo_cursor *cursor; bson b; bson_iterator bi; char *key; char *value; dl = dptr = NULL; cursor = NULL; if (!db) return NULL; debug(bot, "mongodb_dump_keyvalue: Entered\n"); cursor = mongo_find(&gi->mongo_conn, db, bson_empty(&b), bson_empty(&b), 0, 0, 0); while (mongo_cursor_next(cursor) == MONGO_OK) { key = value = NULL; if (bson_find(&bi, mongo_cursor_bson(cursor), "key")) { key = (char *)bson_iterator_string(&bi); } if (bson_find(&bi, mongo_cursor_bson(cursor), "value")) { value = (char *)bson_iterator_string(&bi); } if (!key && !value) continue; keyvalue_ptr = (keyvalue_t *) calloc(1, sizeof(keyvalue_t)); if (key) keyvalue_ptr->key = strdup(key); if (value) keyvalue_ptr->value = strdup(value); dlist_Dinsert_after(&dl, keyvalue_ptr); } mongo_cursor_destroy(cursor); return dl; }
static int mongo_cursor_op_query( mongo_cursor *cursor ) { int res; bson empty; char *data; mongo_message *mm; /* Set up default values for query and fields, if necessary. */ if( ! cursor->query ) cursor->query = bson_empty( &empty ); else if( mongo_cursor_bson_valid( cursor, cursor->query ) != MONGO_OK ) return MONGO_ERROR; if( ! cursor->fields ) cursor->fields = bson_empty( &empty ); else if( mongo_cursor_bson_valid( cursor, cursor->fields ) != MONGO_OK ) return MONGO_ERROR; mm = mongo_message_create( 16 + /* header */ 4 + /* options */ strlen( cursor->ns ) + 1 + /* ns */ 4 + 4 + /* skip,return */ bson_size( cursor->query ) + bson_size( cursor->fields ) , 0 , 0 , MONGO_OP_QUERY ); data = &mm->data; data = mongo_data_append32( data , &cursor->options ); data = mongo_data_append( data , cursor->ns , strlen( cursor->ns ) + 1 ); data = mongo_data_append32( data , &cursor->skip ); data = mongo_data_append32( data , &cursor->limit ); data = mongo_data_append( data , cursor->query->data , bson_size( cursor->query ) ); if ( cursor->fields ) data = mongo_data_append( data , cursor->fields->data , bson_size( cursor->fields ) ); bson_fatal_msg( ( data == ( ( char * )mm ) + mm->head.len ), "query building fail!" ); res = mongo_message_send( cursor->conn , mm ); if( res != MONGO_OK ) { return MONGO_ERROR; } res = mongo_read_response( cursor->conn, ( mongo_reply ** )&( cursor->reply ) ); if( res != MONGO_OK ) { return MONGO_ERROR; } cursor->seen += cursor->reply->fields.num; cursor->flags |= MONGO_CURSOR_QUERY_SENT; return MONGO_OK; }
/* the caller knows either a client or server error has occurred. * `reply` contains the server reply or an empty document. */ static bool _is_resumable_error (const bson_t *reply) { bson_error_t error = {0}; /* Change Streams Spec resumable criteria: "any error encountered which is * not a server error (e.g. a timeout error or network error)" */ if (bson_empty (reply)) { return true; } if (_mongoc_cmd_check_ok (reply, MONGOC_ERROR_API_VERSION_2, &error)) { return true; } /* Change Streams Spec resumable criteria: "a server error response with an * error message containing the substring 'not master' or 'node is * recovering' */ if (strstr (error.message, "not master") || strstr (error.message, "node is recovering")) { return true; } /* Change Streams Spec resumable criteria: "any server error response from a * getMore command excluding those containing the following error codes" */ switch (error.code) { case 11601: /* Interrupted */ case 136: /* CappedPositionLost */ case 237: /* CursorKilled */ case MONGOC_ERROR_QUERY_FAILURE: /* error code omitted */ return false; default: return true; } }
static int _score_tags (const bson_t *read_tags, const bson_t *node_tags) { uint32_t len; bson_iter_t iter; const char *key; const char *str; int count; int i; bson_return_val_if_fail(read_tags, -1); bson_return_val_if_fail(node_tags, -1); count = bson_count_keys(read_tags); if (!bson_empty(read_tags) && bson_iter_init(&iter, read_tags)) { for (i = count; bson_iter_next(&iter); i--) { if (BSON_ITER_HOLDS_UTF8(&iter)) { key = bson_iter_key(&iter); str = bson_iter_utf8(&iter, &len); if (_contains_tag(node_tags, key, str, len)) { return count; } } } return -1; } return 0; }
EXPORT void mongo_bson_empty(struct bson_** b) { bson empty; bson* b_ = (bson*)malloc(sizeof(bson)); bson_empty(&empty); bson_copy(b_, &empty); *b = (struct bson_*)b_; }
Array HHVM_METHOD(MongoDBDriverReadPreference, __debugInfo) { MongoDBDriverReadPreferenceData* data = Native::data<MongoDBDriverReadPreferenceData>(this_); Array retval = Array::Create(); Variant v_tags; const bson_t *tags = mongoc_read_prefs_get_tags(data->m_read_preference); mongoc_read_mode_t mode = mongoc_read_prefs_get_mode(data->m_read_preference); switch (mode) { case MONGOC_READ_PRIMARY: retval.set(s_mode, "primary"); break; case MONGOC_READ_PRIMARY_PREFERRED: retval.set(s_mode, "primaryPreferred"); break; case MONGOC_READ_SECONDARY: retval.set(s_mode, "secondary"); break; case MONGOC_READ_SECONDARY_PREFERRED: retval.set(s_mode, "secondaryPreferred"); break; case MONGOC_READ_NEAREST: retval.set(s_mode, "nearest"); break; default: /* Do nothing */ break; } if (!bson_empty(tags)) { hippo_bson_conversion_options_t options = HIPPO_TYPEMAP_INITIALIZER; BsonToVariantConverter convertor(bson_get_data(tags), tags->len, options); convertor.convert(&v_tags); retval.set(s_tags, v_tags.toArray()); } if (mongoc_read_prefs_get_max_staleness_ms(data->m_read_preference) != 0) { retval.set(s_maxStalenessMS, mongoc_read_prefs_get_max_staleness_ms(data->m_read_preference)); } return retval; }
/* Test read timeout by causing the * server to sleep for 10s on a query. */ int test_read_timeout( void ) { mongo conn[1]; bson b, obj, out, fields; int res; CONN_CLIENT_TEST; bson_init( &b ); bson_append_code( &b, "$where", "sleep( 10 * 1000 );"); bson_finish( &b ); bson_init( &obj ); bson_append_string( &obj, "foo", "bar"); bson_finish( &obj ); res = mongo_insert( conn, "test.foo", &obj, NULL ); /* Set the connection timeout here. */ if( mongo_set_op_timeout( conn, 1000 ) != MONGO_OK ) { printf("Could not set socket timeout!."); exit(1); } res = mongo_find_one( conn, "test.foo", &b, bson_empty(&fields), &out ); ASSERT( res == MONGO_ERROR ); ASSERT( conn->err == MONGO_IO_ERROR ); ASSERT( conn->errcode == WSAETIMEDOUT ); return 0; }
MONGO_EXPORT int mongo_run_command( mongo *conn, const char *db, const bson *command, bson *out ) { bson response = {NULL, 0}; bson fields; int sl = strlen( db ); char *ns = bson_malloc( sl + 5 + 1 ); /* ".$cmd" + nul */ int res, success = 0; strcpy( ns, db ); strcpy( ns+sl, ".$cmd" ); res = mongo_find_one( conn, ns, command, bson_empty( &fields ), &response ); bson_free( ns ); if( res != MONGO_OK ) return MONGO_ERROR; else { bson_iterator it; if( bson_find( &it, &response, "ok" ) ) success = bson_iterator_bool( &it ); if( !success ) { conn->err = MONGO_COMMAND_FAILED; return MONGO_ERROR; } else { if( out ) *out = response; return MONGO_OK; } } }
/* the caller knows either a client or server error has occurred. * `reply` contains the server reply or an empty document. */ static bool _is_resumable_error (const bson_t *reply) { const char *msg = ""; uint32_t code; /* Change Streams Spec resumable criteria: "any error encountered which is * not a server error (e.g. a timeout error or network error)" */ if (bson_empty (reply)) { return true; } if (!_mongoc_parse_error_reply (reply, false /* check_wce */, &code, &msg)) { return true; } /* Change Streams Spec resumable criteria: "a server error response with an * error message containing the substring 'not master' or 'node is * recovering' */ if (strstr (msg, "not master") || strstr (msg, "node is recovering")) { return true; } /* Change Streams Spec resumable criteria: "any server error response from a * getMore command excluding those containing the following error codes" */ switch (code) { case 11601: /* Interrupted */ case 136: /* CappedPositionLost */ case 237: /* CursorKilled */ case 0: /* error code omitted */ return false; default: return true; } }
MONGO_EXPORT void gridfile_get_metadata( gridfile *gfile, bson* out ) { bson_iterator it; if ( bson_find( &it, gfile->meta, "metadata" ) ) bson_iterator_subobject( &it, out ); else bson_empty( out ); }
/* Construct the aggregate command in cmd: * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } */ static void _make_command (mongoc_change_stream_t *stream, bson_t *command) { bson_iter_t iter; bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */ bson_t change_stream_doc; bson_t pipeline; bson_t cursor_doc; bson_init (command); bson_append_utf8 (command, "aggregate", 9, stream->coll->collection, stream->coll->collectionlen); bson_append_array_begin (command, "pipeline", 8, &pipeline); /* Append the $changeStream stage */ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage); bson_append_document_begin ( &change_stream_stage, "$changeStream", 13, &change_stream_doc); bson_concat (&change_stream_doc, &stream->full_document); if (!bson_empty (&stream->resume_token)) { bson_concat (&change_stream_doc, &stream->resume_token); } bson_append_document_end (&change_stream_stage, &change_stream_doc); bson_append_document_end (&pipeline, &change_stream_stage); /* Append user pipeline if it exists */ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_t child_iter; uint32_t key_int = 1; char buf[16]; const char *key_str; BSON_ASSERT (bson_iter_recurse (&iter, &child_iter)); while (bson_iter_next (&child_iter)) { /* The user pipeline may consist of invalid stages or non-documents. * Append anyway, and rely on the server error. */ size_t keyLen = bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf)); bson_append_value ( &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter)); ++key_int; } } bson_append_array_end (command, &pipeline); /* Add batch size if needed */ bson_append_document_begin (command, "cursor", 6, &cursor_doc); if (stream->batch_size > 0) { bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size); } bson_append_document_end (command, &cursor_doc); }
void bson_iterator_code_scope(const bson_iterator * i, bson * scope){ if (bson_iterator_type(i) == bson_codewscope){ int code_len; bson_little_endian32(&code_len, bson_iterator_value(i)+4); bson_init(scope, (void*)(bson_iterator_value(i)+8+code_len), 0); }else{ bson_empty(scope); } }
void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) { if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) { int code_len; bson_little_endian32( &code_len, bson_iterator_value( i )+4 ); bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) ); } else { bson_empty( scope ); } }
INT32 migExport::_connectDB() { INT32 rc = SDB_OK ; bson obj ; bson_init( &obj ) ; // connection is established #ifdef SDB_SSL if ( _pMigArg->useSSL ) { rc = sdbSecureConnect ( _pMigArg->pHostname, _pMigArg->pSvcname, _pMigArg->pUser, _pMigArg->pPassword, &_gConnection ) ; } else #endif { rc = sdbConnect ( _pMigArg->pHostname, _pMigArg->pSvcname, _pMigArg->pUser, _pMigArg->pPassword, &_gConnection ) ; } if ( rc ) { PD_LOG ( PDERROR, "Failed to connect database %s:%s, rc = %d", _pMigArg->pHostname, _pMigArg->pSvcname, rc ) ; goto error ; } // set prefer instance if( _pMigArg->pPrefInst ) { if ( FALSE == jsonToBson2 ( &obj, _pMigArg->pPrefInst, 0, 1 ) ) { rc = SDB_INVALIDARG ; ossPrintf ( "Error: prefered instance's format error"OSS_NEWLINE ) ; PD_LOG ( PDERROR, "prefered instance's format error" ) ; goto error ; } rc = sdbSetSessionAttr ( _gConnection, &obj ) ; if ( SDB_OK != rc ) { PD_LOG ( PDERROR, "Failed to set session attribute, rc = %d", rc ) ; goto error ; } } else { bson_empty( &obj ) ; } done: bson_destroy ( &obj ) ; return rc ; error: goto done ; }
int find_radius_options(rlm_mongo_t *data, char *username, char *mac, char *password) { bson_buffer bb; bson query; bson field; bson result; bson_buffer_init(&bb); bson_append_string(&bb, data->search_field, username); if (strcmp(data->mac_field, "") != 0) { bson_append_string(&bb, data->mac_field, mac); } if (strcmp(data->enable_field, "") != 0) { bson_append_bool(&bb, data->enable_field, 1); } bson_from_buffer(&query, &bb); bson_buffer_destroy(&bb); bson_empty(&field); bson_empty(&result); MONGO_TRY{ if (mongo_find_one(conn, data->base, &query, &field, &result) == 0) { return 0; } }MONGO_CATCH{ mongo_start(data); return 0; } bson_iterator it; bson_iterator_init(&it, result.data); find_in_array(&it, data->username_field, username, data->password_field, password); bson_destroy(&result); return 1; }
static int _score_tags (const bson_t *read_tags, const bson_t *node_tags) { uint32_t len; bson_iter_t iter; bson_iter_t sub_iter; const char *key; const char *str; int count; bool node_matches_set; bson_return_val_if_fail(read_tags, -1); bson_return_val_if_fail(node_tags, -1); count = bson_count_keys(read_tags); /* Execute this block if read tags were provided, else bail and return 0 (all nodes equal) */ if (!bson_empty(read_tags) && bson_iter_init(&iter, read_tags)) { /* * Iterate over array of read tag sets provided (each element is a tag set) * Tag sets are provided in order of preference so return the count of the * first set that matches the node or -1 if no set matched the node. */ while (count && bson_iter_next(&iter)) { if (BSON_ITER_HOLDS_DOCUMENT(&iter) && bson_iter_recurse(&iter, &sub_iter)) { node_matches_set = true; /* Iterate over the key/value pairs (tags) in the current set */ while (bson_iter_next(&sub_iter) && BSON_ITER_HOLDS_UTF8(&sub_iter)) { key = bson_iter_key(&sub_iter); str = bson_iter_utf8(&sub_iter, &len); /* If any of the tags do not match, this node cannot satisfy this tag set. */ if (!_contains_tag(node_tags, key, str, len)) { node_matches_set = false; break; } } /* This set matched, return the count as the score */ if (node_matches_set) { return count; } /* Decrement the score and try to match the next set. */ count--; } } return -1; } return 0; }
static void clean( void ) { bson b; if ( mongo_cmd_drop_db( conn, DB ) != MONGO_OK ) { printf( "failed to drop db\n" ); exit( 1 ); } /* create the db */ mongo_insert( conn, DB ".creation", bson_empty( &b ), NULL ); ASSERT( !mongo_cmd_get_last_error( conn, DB, NULL ) ); }
bson gridfile_get_metadata( gridfile *gfile ) { bson sub; bson_iterator it; if ( bson_find( &it, gfile->meta, "metadata" ) ) { bson_iterator_subobject( &it, &sub ); return sub; } else { bson_empty( &sub ); return sub; } }
static request_t * _check_op_query (mock_server_t *server, test_collection_find_with_opts_t *test_data) { mongoc_query_flags_t flags; request_t *request; const bson_t *doc; bson_iter_t iter; uint32_t len; const uint8_t *data; bson_t query; /* Server Selection Spec: all queries to standalone set slaveOk. */ flags = test_data->expected_flags | MONGOC_QUERY_SLAVE_OK; request = mock_server_receives_query ( server, "db.collection", flags, test_data->expected_skip, test_data->expected_n_return, test_data->expected_op_query, test_data->expected_op_query_projection); ASSERT (request); /* check that nothing unexpected is in $query */ if (bson_empty (test_data->filter_bson)) { doc = request_get_doc (request, 0); if (bson_iter_init_find (&iter, doc, "$query")) { ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter)); bson_iter_document (&iter, &len, &data); bson_init_static (&query, data, (size_t) len); ASSERT (bson_empty (&query)); } } return request; }
int test_reconnect( const char *set_name ) { mongo conn[1]; int res = 0; int e = 0; bson b; INIT_SOCKETS_FOR_WINDOWS; mongo_replset_init( conn, set_name ); mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT ); mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT + 1 ); if( ( mongo_replset_connect( conn ) != MONGO_OK ) ) { mongo_destroy( conn ); return MONGO_ERROR; } else { fprintf( stderr, "Disconnect now:\n" ); sleep( 10 ); e = 1; do { res = mongo_find_one( conn, "foo.bar", bson_empty( &b ), bson_empty( &b ), NULL ); if( res == MONGO_ERROR && conn->err == MONGO_IO_ERROR ) { sleep( 2 ); if( e++ < 30 ) { fprintf( stderr, "Attempting reconnect %d.\n", e ); mongo_reconnect( conn ); } else { fprintf( stderr, "Fail.\n" ); return -1; } } } while( 1 ); } return 0; }
static ngx_int_t ngx_http_mongo_authenticate(ngx_log_t *log, ngx_http_gridfs_loc_conf_t *gridfs_loc_conf) { ngx_http_mongo_connection_t* mongo_conn; ngx_http_mongo_auth_t *mongo_auth; mongo_cursor *cursor = NULL; bson empty; char *test; int error; mongo_conn = ngx_http_get_mongo_connection( gridfs_loc_conf->mongo ); if (mongo_conn == NULL) { ngx_log_error(NGX_LOG_ERR, log, 0, "Mongo Connection not found: \"%V\"", &gridfs_loc_conf->mongo); } // Authenticate if (gridfs_loc_conf->user.data != NULL && gridfs_loc_conf->pass.data != NULL) { if (mongo_cmd_authenticate( &mongo_conn->conn, (const char*)gridfs_loc_conf->db.data, (const char*)gridfs_loc_conf->user.data, (const char*)gridfs_loc_conf->pass.data ) != MONGO_OK) { ngx_log_error(NGX_LOG_ERR, log, 0, "Invalid mongo user/pass: %s/%s", gridfs_loc_conf->user.data, gridfs_loc_conf->pass.data); return NGX_ERROR; } mongo_auth = ngx_array_push(mongo_conn->auths); mongo_auth->db = gridfs_loc_conf->db; mongo_auth->user = gridfs_loc_conf->user; mongo_auth->pass = gridfs_loc_conf->pass; } // Run a test command to test authentication. test = (char*)malloc( gridfs_loc_conf->db.len + sizeof(".test")); ngx_cpystrn((u_char*)test, (u_char*)gridfs_loc_conf->db.data, gridfs_loc_conf->db.len+1); ngx_cpystrn((u_char*)(test+gridfs_loc_conf->db.len),(u_char*)".test", sizeof(".test")); bson_empty(&empty); cursor = mongo_find(&mongo_conn->conn, test, &empty, NULL, 0, 0, 0); error = mongo_cmd_get_last_error(&mongo_conn->conn, (char*)gridfs_loc_conf->db.data, NULL); free(test); mongo_cursor_destroy(cursor); if (error) { ngx_log_error(NGX_LOG_ERR, log, 0, "Authentication Required"); return NGX_ERROR; } return NGX_OK; }
MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) { check_mongo_object( (void*)scope ); check_mongo_object( (void*)i ); if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) { int code_len; bson_little_endian32( &code_len, bson_iterator_value( i )+4 ); bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) ); _bson_reset( scope ); scope->finished = 1; } else { bson_empty( scope ); } }
static int _mongoc_read_prefs_score_nearest (const mongoc_read_prefs_t *read_prefs, const mongoc_cluster_node_t *node) { const bson_t *read_tags; const bson_t *node_tags; bson_return_val_if_fail(read_prefs, -1); bson_return_val_if_fail(node, -1); node_tags = &node->tags; read_tags = &read_prefs->tags; return bson_empty(read_tags) ? 1 : _score_tags(read_tags, node_tags); }
int mongo_run_command( mongo *conn, const char *db, bson *command, bson *out ) { bson fields; int sl = strlen( db ); char *ns = bson_malloc( sl + 5 + 1 ); /* ".$cmd" + nul */ int res; strcpy( ns, db ); strcpy( ns+sl, ".$cmd" ); res = mongo_find_one( conn, ns, command, bson_empty( &fields ), out ); bson_free( ns ); return res; }
int test_multiple_getmore( mongo *conn ) { mongo_cursor *cursor; bson b; int count; remove_sample_data( conn ); create_capped_collection( conn ); insert_sample_data( conn, 10000 ); cursor = mongo_find( conn, "test.cursors", bson_empty( &b ), bson_empty( &b ), 0, 0, 0 ); count = 0; while( mongo_cursor_next( cursor ) == MONGO_OK ) count++; ASSERT( count == 10000 ); ASSERT( mongo_cursor_next( cursor ) == MONGO_ERROR ); ASSERT( cursor->err == MONGO_CURSOR_EXHAUSTED ); mongo_cursor_destroy( cursor ); remove_sample_data( conn ); return 0; }