static mongo_sync_connection * _recovery_cache_connect (mongo_sync_conn_recovery_cache *cache, const gchar *address, gint port, gboolean slaveok) { mongo_sync_connection *s; mongo_connection *c; c = mongo_connect (address, port); if (!c) return NULL; s = g_realloc (c, sizeof (mongo_sync_connection)); _mongo_sync_conn_init (s, slaveok); if (!cache) { s->rs.seeds = g_list_append (NULL, g_strdup_printf ("%s:%d", address, port)); } else { _recovery_cache_load (cache, s); } return s; }
int test_namespace_validation_on_insert( void ) { mongo conn[1]; bson b[1], b2[1]; bson *objs[2]; INIT_SOCKETS_FOR_WINDOWS; if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } bson_init( b ); bson_append_int( b, "foo", 1 ); bson_finish( b ); ASSERT( mongo_insert( conn, "tet.fo$o", b, NULL ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_NS_INVALID ); ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 ); mongo_clear_errors( conn ); bson_init( b2 ); bson_append_int( b2, "foo", 1 ); bson_finish( b2 ); objs[0] = b; objs[1] = b2; ASSERT( mongo_insert_batch( conn, "tet.fo$o", (const bson **)objs, 2, NULL, 0 ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_NS_INVALID ); ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 ); return 0; }
void test_mongo_connection_set_timeout (void) { mongo_connection c, *conn; c.fd = -1; ok (mongo_connection_set_timeout (NULL, 100) == FALSE, "mongo_connection_set_timeout() should fail with a NULL connection"); ok (mongo_connection_set_timeout (&c, -1) == FALSE, "mongo_connection_set_timeout() should fail with a negative timeout"); ok (mongo_connection_set_timeout (&c, 100) == FALSE, "mongo_connection_set_timeout() should fail with an invalid FD"); begin_network_tests (0); conn = mongo_connect (config.primary_host, config.primary_port); /* No verification here, as some systems may or may not support this, thus, failing in a test is not fatal. */ mongo_connection_set_timeout (conn, 100); mongo_disconnect (conn); end_network_tests (); }
void test_large() { mongo_connection conn[1]; mongo_connection_options opts; gridfs gfs[1]; gridfile gfile[1]; FILE *fd; int i, n; char buffer[LARGE]; int64_t filesize = (int64_t)1024 * (int64_t)LARGE; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; strncpy(opts.host, "127.0.0.1", 255); opts.host[254] = '\0'; opts.port = 27017; if (mongo_connect( conn , &opts )) { printf("failed to connect\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); /* Create a very large file */ fill_buffer_randomly(buffer, (int64_t)LARGE); fd = fopen("bigfile", "w"); for(i=0; i<1024; i++) { fwrite(buffer, 1, LARGE, fd); } fclose(fd); /* Now read the file into GridFS */ gridfs_store_file(gfs, "bigfile", "bigfile", "text/html"); gridfs_find_filename(gfs, "bigfile", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); /* Read the file using the streaming interface */ gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html"); fd = fopen("bigfile", "r"); while((n = fread(buffer, 1, 1024, fd)) != 0) { gridfile_write_buffer(gfile, buffer, n); } gridfile_writer_done( gfile ); gridfs_find_filename(gfs, "bigfile-stream", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); gridfs_destroy(gfs); mongo_destroy(conn); }
static void *mongodb_open_conn(const DBConf *db_conf) { MongoDBConf *conf = db_conf->mongodb; mongo_connection *conn; /* ptr */ mongo_conn_return status; mongo_conf = conf; /* octstr_get_cstr(conf->username), octstr_get_cstr(conf->password), octstr_get_cstr(conf->database), */ conn = gw_malloc(sizeof(mongo_connection)); gw_assert(conn != NULL); info(0, "MongoDB: connecting to %s:%lu", octstr_get_cstr(conf->host), conf->port); status = mongo_connect(conn, octstr_get_cstr(conf->host), conf->port); switch (status) { case mongo_conn_success: info(0, "MongoDB: connected"); break; case mongo_conn_bad_arg: error(0, "MongoDB: bad arguments"); goto failed; case mongo_conn_no_socket: error(0, "MongoDB: no socket"); goto failed; case mongo_conn_fail: error(0, "MongoDB: connection failed"); goto failed; case mongo_conn_not_master: error(0, "MongoDB: not master"); goto failed; case mongo_conn_bad_set_name: error(0, "MongoDB: bad set name"); goto failed; case mongo_conn_cannot_find_primary: error(0, "MongoDB: cannot find primary"); goto failed; } /* if (conf->username && conf->password && !mongo_cmd_authenticate(conn, octstr_get_cstr(conf->database), octstr_get_cstr(conf->username), octstr_get_cstr(conf->password))) { error(0, "MongoDB: authentication failed"); goto failed; } */ return conn; failed: if (conn != NULL) { mongo_destroy(conn); gw_free(conn); } return NULL; }
void test_large() { mongo_connection conn[1]; gridfs gfs[1]; gridfile gfile[1]; FILE *fd; int i, n; char *buffer = malloc( LARGE ); if( buffer == NULL ) { printf("Failed to allocate memory."); exit(1); } uint64_t filesize = (uint64_t)1024 * (uint64_t)LARGE; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn, TEST_SERVER, 27017 )){ printf("failed to connect 1\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); /* Create a very large file */ fill_buffer_randomly(buffer, (uint64_t)LARGE); fd = fopen("bigfile", "w"); for(i=0; i<1024; i++) { fwrite(buffer, 1, LARGE, fd); } fclose(fd); /* Now read the file into GridFS */ gridfs_store_file(gfs, "bigfile", "bigfile", "text/html"); gridfs_find_filename(gfs, "bigfile", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); /* Read the file using the streaming interface */ gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html"); fd = fopen("bigfile", "r"); while((n = fread(buffer, 1, 1024, fd)) != 0) { gridfile_write_buffer(gfile, buffer, n); } gridfile_writer_done( gfile ); gridfs_find_filename(gfs, "bigfile-stream", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); gridfs_destroy(gfs); mongo_disconnect(conn); mongo_destroy(conn); }
static char* ngx_http_mongo_host(ngx_conf_t* directive, ngx_command_t* cmd, void* void_conf) { mongo_connection_options options; ngx_http_gridfs_loc_conf_t* gridfs_conf = void_conf; ngx_str_t *value; char ip[16]; char* current = ip; char* port; if (gridfs_conf->gridfs_conn->connected) { return "is duplicate"; } value = directive->args->elts; if (value[1].len == 0) { gridfs_conf->gridfs_conn = NGX_CONF_UNSET_PTR; return NGX_OK; } port = (char*)(value[1].data); while ((*port) != ':') { *current = *port; current++; port++; } *current='\0'; port++; strcpy(options.host, ip); options.port = atoi(port); switch (mongo_connect( gridfs_conf->gridfs_conn, &options )) { case mongo_conn_success: break; case mongo_conn_bad_arg: ngx_conf_log_error(NGX_LOG_ERR, directive, 0, "Mongo Exception: Bad Arguments"); return NGX_CONF_ERROR; case mongo_conn_no_socket: ngx_conf_log_error(NGX_LOG_ERR, directive, 0, "Mongo Exception: No Socket"); return NGX_CONF_ERROR; case mongo_conn_fail: ngx_conf_log_error(NGX_LOG_ERR, directive, 0, "Mongo Exception: Connection Failure"); return NGX_CONF_ERROR; case mongo_conn_not_master: ngx_conf_log_error(NGX_LOG_ERR, directive, 0, "Mongo Exception: Not Master"); return NGX_CONF_ERROR; default: ngx_conf_log_error(NGX_LOG_ERR, directive, 0, "Mongo Exception: Unknown Error"); return NGX_CONF_ERROR; } return NGX_CONF_OK; }
int mongo_start(rlm_mongo_t *data) { if (mongo_connect(conn, data->ip, data->port)){ radlog(L_ERR, "rlm_mongodb: Failed to connect"); return 0; } radlog(L_DBG, "Connected to MongoDB"); return 1; }
bool MongodbClient::Connect(const String &ip, int32 port) { int32 status = mongo_connect(clientData->connection, ip.c_str(), port ); if(MONGO_OK != status) { LogError(String("Connect"), clientData->connection->err); } return (MONGO_OK == status); }
void db_establish_connection(const char* _db_name) { uint64_t status = mongo_connect(db, "127.0.0.1", 27017); if(status != MONGO_OK) { printf("failed to connect mongo\n"); exit(-1); } if(!_db_name) { printf("require db name\n"); exit(-1); } db_name = (char*)_db_name; }
int test_insert_limits( void ) { char version[10]; mongo conn[1]; int i; char key[10]; bson b[1], b2[1]; bson *objs[2]; /* Test the default max BSON size. */ mongo_init( conn ); ASSERT( conn->max_bson_size == MONGO_DEFAULT_MAX_BSON_SIZE ); /* We'll perform the full test if we're running v2.0 or later. */ if( mongo_get_server_version( version ) != -1 && version[0] <= '1' ) return 0; if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE ); bson_init( b ); for(i=0; i<1200000; i++) { sprintf( key, "%d", i + 10000000 ); bson_append_int( b, key, i ); } bson_finish( b ); ASSERT( bson_size( b ) > conn->max_bson_size ); ASSERT( mongo_insert( conn, "test.foo", b, NULL ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); mongo_clear_errors( conn ); ASSERT( conn->err == 0 ); bson_init( b2 ); bson_append_int( b2, "foo", 1 ); bson_finish( b2 ); objs[0] = b; objs[1] = b2; ASSERT( mongo_insert_batch( conn, "test.foo", (const bson **)objs, 2, NULL, 0 ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); return 0; }
SEXP rmongo_connect(SEXP mongo_conn) { mongo* conn = _checkMongo(mongo_conn); mongo_host_port hp; SEXP host = getAttrib(mongo_conn, sym_host); int len = LENGTH(host); int i; if (len == 0) error("No hosts defined\n"); const char* name = CHAR(STRING_ELT(getAttrib(mongo_conn, sym_name), 0)); if (name[0] == '\0') { for (i = 0; i < len; i++) { mongo_parse_host(CHAR(STRING_ELT(host, i)), &hp); if (mongo_connect(conn, hp.host, hp.port) == MONGO_OK) break; } if (i == len) { if (len == 1) Rprintf("Unable to connect to %s:%d, error code = %d\n", hp.host, hp.port, conn->err); else Rprintf("Unable to connect to any of the given hosts, error code = %d\n", conn->err); return mongo_conn; } } else { mongo_replset_init(conn, name); for (i = 0; i < len; i++) { mongo_parse_host(CHAR(STRING_ELT(host, i)), &hp); mongo_replset_add_seed(conn, hp.host, hp.port); } if (mongo_replset_connect(conn) != MONGO_OK) Rprintf("Unable to connect to replset\n"); } int timeout = asInteger(getAttrib(mongo_conn, sym_timeout)); if (timeout > 0) mongo_set_op_timeout(conn, timeout); SEXP username = getAttrib(mongo_conn, sym_username); if (CHAR(STRING_ELT(username, 0))[0] != '\0') { SEXP password = getAttrib(mongo_conn, install("password")); SEXP db = getAttrib(mongo_conn, install("db")); SEXP ret = mongo_authenticate(mongo_conn, username, password, db); if (!LOGICAL(ret)[0]) { mongo_disconnect(conn); Rprintf("Authentication failed.\n"); } } return mongo_conn; }
static apr_status_t fetch_mongodb_value(const char *host, int port, const char *userfield, const char *passwordfield, const char *collection, const char *user, char **value, apr_pool_t *pool) { mongo_connection conn; /* ptr */ mongo_connection_options *opts; mongo_conn_return mongo_status; bson query[1]; bson *out; bson_buffer query_buf[1]; bson_bool_t found; mongo_cursor *cursor; *value = NULL; //conn = apr_palloc( pool, sizeof(mongo_connection)); opts = apr_palloc( pool, sizeof(mongo_connection_options)); strcpy( opts->host, host); opts->port = port; mongo_status = mongo_connect( pool, &conn, opts ); if ( mongo_status != mongo_conn_success) { char buf[120]; ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool,"couldn't connect to mongoDB - (%s)", mongo_strerror( mongo_status, buf,sizeof(buf) )); return APR_EGENERAL; } bson_buffer_init( pool, query_buf ); bson_append_string( query_buf, userfield, user); bson_from_buffer( query, query_buf ); out = apr_palloc(pool, sizeof(bson)); found = mongo_find_one( &conn, collection, query, NULL, out ); bson_destroy( query ); if ( found ) { bson_iterator it; if (bson_find( &it, out, passwordfield )) { // bson_iterator iCookies; // bson_iterator_init( &iCookies , bson_iterator_value(&it)); *value = apr_pstrdup( pool,bson_iterator_string(&it)); } } mongo_destroy( &conn ); return APR_SUCCESS; }
void test_mongo_packet_recv (void) { mongo_connection c, *conn; mongo_packet *p; bson *b; c.fd = -1; ok (mongo_packet_recv (NULL) == NULL, "mongo_packet_recv() fails with a NULL connection"); ok (errno == ENOTCONN, "mongo_packet_recv() sets errno to ENOTCONN if connection is NULL"); ok (mongo_packet_recv (&c) == NULL, "mongo_packet_recv() fails if the FD is less than zero"); ok (errno == EBADF, "mongo_packet_recv() sets errno to EBADF is the FD is bad"); begin_network_tests (2); b = bson_new (); bson_append_int32 (b, "getnonce", 1); bson_finish (b); p = mongo_wire_cmd_custom (42, config.db, 0, b); bson_free (b); conn = mongo_connect (config.primary_host, config.primary_port); mongo_packet_send (conn, p); mongo_wire_packet_free (p); ok ((p = mongo_packet_recv (conn)) != NULL, "mongo_packet_recv() works"); mongo_wire_packet_free (p); close (conn->fd); sleep (3); ok (mongo_packet_recv (conn) == NULL, "mongo_packet_recv() fails on a closed socket"); mongo_disconnect (conn); end_network_tests (); }
static ngx_int_t ngx_http_mongodb_rest_init_worker(ngx_cycle_t* cycle) { mongo_connection_options opts[1]; mongo_conn_return status; ngx_log_error(NGX_LOG_ERR, cycle->log, 0,"WORKER INIT"); strcpy( opts->host , MONGO_HOST); opts->port = MONGO_PORT; status = mongo_connect( cached_connection, opts ); if (status != mongo_conn_success){ return log_mongo_error(cycle->log, status); } return NGX_OK; }
int main() { mongo conn[1]; bson cmd[1]; bson out[1]; bson_iterator it[1]; char version[10]; const char *db = "test"; const char *col = "c.capped"; INIT_SOCKETS_FOR_WINDOWS; if ( mongo_connect( conn , TEST_SERVER , 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } mongo_cmd_drop_collection( conn, db, col, NULL ); ASSERT( mongo_create_capped_collection( conn, db, col, 1024, 100, NULL ) == MONGO_OK ); bson_init( cmd ); bson_append_string( cmd, "collstats", col ); bson_finish( cmd ); ASSERT( mongo_run_command( conn, db, cmd, out ) == MONGO_OK ); if( mongo_get_server_version( version ) != -1 ){ if( version[0] == '2' && version[2] >= '1' ) ASSERT( bson_find( it, out, "capped" ) == BSON_BOOL ); else ASSERT( bson_find( it, out, "capped" ) == BSON_INT ); } ASSERT( bson_find( it, out, "max" ) == BSON_INT ); bson_destroy( cmd ); bson_destroy( out ); mongo_cmd_drop_collection( conn, "test", col, NULL ); mongo_cmd_drop_db( conn, db ); mongo_destroy( conn ); return 0; }
bot_t *mongodb_init(bot_t * bot) { int status; status = mongo_connect(&gi->mongo_conn, "127.0.0.1", 27017); if (status != MONGO_OK) { debug(bot, "mongodb_init: Failed\n"); gi->mongo_isconnected = 0; return NULL; } debug(bot, "mongodb_init: Successfully connected to mongo\n"); gi->mongo_isconnected = 1; return bot; }
int main(void) { mongo_connection conn[1]; mongo_connection_options opts; gridfs gfs[1]; char data_before[UPPER]; size_t i; FILE *fd; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; /* strncpy(opts.host, TEST_SERVER, 255);*/ strncpy(opts.host, "127.0.0.1", 255); opts.host[254] = '\0'; opts.port = 27017; if (mongo_connect( conn , &opts )){ printf("failed to connect\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); for (i = LOWER; i <= UPPER; i+=DELTA) { fill_buffer_randomly(data_before, i); /* Input from buffer */ gridfs_store_buffer(gfs, data_before, i, "input-buffer", "text/html"); test_gridfile(gfs, data_before, i, "input-buffer", "text/html"); /* Input from file */ fd = fopen("input-file", "w"); fwrite(data_before, sizeof(char), i, fd); fclose(fd); gridfs_store_file(gfs, "input-file", "input-file", "text/html"); test_gridfile(gfs, data_before, i, "input-file", "text/html"); } gridfs_destroy(gfs); mongo_cmd_drop_db(conn, "test"); mongo_destroy(conn); return 0; }
int main (int argc, char *argv[]) { char db_name[100]="test2"; char collection_name[100]="students"; /* char db_name2[100]="test"; char collection_name2[100]="OceanTest_mongo"; */ char action[20]="collStats"; char *result; mongoc_collection_t *collection, *collection2; Gais_Data import; //mongoc_client_t *client; /*connect to db*/ collection = mongo_connect(27017,db_name,collection_name); //collection2 = mongo_connect(27017,db_name2,collection_name2); //client = mongo_connect(27017); /*show collection's state*/ result = malloc(sizeof(char)*RESULT_LEN); //run_command(result,db_name,collection_name,action); run_command(collection,result,db_name,collection_name,action); printf("\n%s\n",result); free(result); /* result = malloc(sizeof(char)*RESULT_LEN); run_command(collection2,result,db_name2,collection_name2,action); //run_command(client,result,db_name,collection_name,action); printf("\n2.%s\n",result); free(result); */ /*mongo_insert*/ //strcpy(import.lan,"CHT"); //strcpy(import.author,"me"); //mongo_insert(0,import,"hello mongodb",collection,0); mongoc_cleanup (); return 0; }
void test_streaming() { mongo_connection conn[1]; gridfs gfs[1]; gridfile gfile[1]; char *buf = malloc( LARGE ); char *small = malloc( LOWER ); if( buf == NULL || small == NULL ) { printf("Failed to allocate"); exit(1); } int n; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn , TEST_SERVER, 27017 )){ printf("failed to connect 3\n"); exit(1); } fill_buffer_randomly(small, (uint64_t)LOWER); fill_buffer_randomly(buf, (uint64_t)LARGE); gridfs_init(conn, "test", "fs", gfs); gridfs_store_buffer(gfs, small, LOWER, "small", "text/html"); test_gridfile(gfs, small, LOWER, "small", "text/html"); gridfs_destroy(gfs); gridfs_init(conn, "test", "fs", gfs); gridfile_writer_init(gfile, gfs, "large", "text/html"); for(n=0; n < (LARGE / 1024); n++) { gridfile_write_buffer(gfile, buf + (n * 1024), 1024); } gridfile_writer_done( gfile ); test_gridfile(gfs, buf, LARGE, "large", "text/html"); gridfs_destroy(gfs); mongo_destroy(conn); free(buf); free(small); }
int kmip_db_init(kmip_t *kmip) { kmip->db = (mongo_connection *)malloc(sizeof(mongo_connection)); if (kmip->db == NULL) return -1; if (mongo_connect( kmip->db , kmip->db_name, kmip->db_port )) { fprintf(stderr, "failed to connect\n"); return -1; } /* bson b; mongo_md5_state_t st; mongo_md5_byte_t digest[16]; bson_init( &b , json_to_bson( js ) , 1 ); */ return 0; }
void test_basic() { mongo_connection conn[1]; gridfs gfs[1]; char *data_before = malloc( UPPER ); if( data_before == NULL ) { printf("Failed to allocate"); exit(1); } uint64_t i; FILE *fd; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn, TEST_SERVER, 27017 )){ printf("failed to connect 2\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); fill_buffer_randomly( data_before, UPPER ); for (i = LOWER; i <= UPPER; i += DELTA) { /* Input from buffer */ gridfs_store_buffer(gfs, data_before, i, "input-buffer", "text/html"); test_gridfile(gfs, data_before, i, "input-buffer", "text/html"); /* Input from file */ fd = fopen("input-file", "w"); fwrite(data_before, sizeof(char), i, fd); fclose(fd); gridfs_store_file(gfs, "input-file", "input-file", "text/html"); test_gridfile(gfs, data_before, i, "input-file", "text/html"); } gridfs_destroy(gfs); mongo_disconnect(conn); mongo_destroy(conn); free( data_before ); }
void test_streaming() { mongo_connection conn[1]; mongo_connection_options opts; gridfs gfs[1]; gridfile gfile[1]; char buf[LARGE]; char small[LOWER]; int n; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; strncpy(opts.host, "127.0.0.1", 255); opts.host[254] = '\0'; opts.port = 27017; if (mongo_connect( conn , &opts )) { printf("failed to connect\n"); exit(1); } fill_buffer_randomly(small, (int64_t)LOWER); fill_buffer_randomly(buf, (int64_t)LARGE); gridfs_init(conn, "test", "fs", gfs); gridfs_store_buffer(gfs, small, LOWER, "small", "text/html"); test_gridfile(gfs, small, LOWER, "small", "text/html"); gridfs_destroy(gfs); gridfs_init(conn, "test", "fs", gfs); gridfile_writer_init(gfile, gfs, "large", "text/html"); for(n=0; n < (LARGE / 1024); n++) { gridfile_write_buffer(gfile, buf + (n * 1024), 1024); } gridfile_writer_done( gfile ); test_gridfile(gfs, buf, LARGE, "large", "text/html"); gridfs_destroy(gfs); mongo_destroy(conn); }
int main() { mongo conn[1]; INIT_SOCKETS_FOR_WINDOWS; if( mongo_connect( conn, TEST_SERVER, 27017 ) != MONGO_OK ) { printf( "Failed to connect" ); exit( 1 ); } test_multiple_getmore( conn ); test_tailable( conn ); test_builder_api( conn ); test_bad_query( conn ); test_copy_cursor_data( conn ); mongo_destroy( conn ); return 0; }
mongo_sync_connection * mongo_sync_connect (const gchar *address, gint port, gboolean slaveok) { mongo_sync_connection *s; mongo_connection *c; c = mongo_connect (address, port); if (!c) return NULL; s = g_realloc (c, sizeof (mongo_sync_connection)); s->slaveok = slaveok; s->safe_mode = FALSE; s->auto_reconnect = FALSE; s->rs.seeds = g_list_append (NULL, g_strdup_printf ("%s:%d", address, port)); s->rs.hosts = NULL; s->rs.primary = NULL; s->last_error = NULL; s->max_insert_size = MONGO_SYNC_DEFAULT_MAX_INSERT_SIZE; return s; }
void test_mongo_connection_get_requestid (void) { mongo_connection c, *conn; mongo_packet *p; bson *b; gint reqid; c.request_id = 42; ok (mongo_connection_get_requestid (NULL) == -1, "mongo_connection_get_requestid() fails with a NULL connection"); ok (mongo_connection_get_requestid (&c) == 42, "mongo_connection_get_requestid() works"); begin_network_tests (2); b = bson_new (); bson_append_int32 (b, "getnonce", 1); bson_finish (b); p = mongo_wire_cmd_custom (42, config.db, 0, b); bson_free (b); conn = mongo_connect (config.primary_host, config.primary_port); cmp_ok ((reqid = mongo_connection_get_requestid (conn)), "==", 0, "Initial request id is 0"); mongo_packet_send (conn, p); mongo_wire_packet_free (p); cmp_ok (reqid, "<", mongo_connection_get_requestid (conn), "Old request ID is smaller than the new one"); mongo_disconnect (conn); end_network_tests (); }
int main(){ bson obj; INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn , TEST_SERVER, 27017 )){ printf("failed to connect\n"); exit(1); } /*********************/ ASSERT(mongo_cmd_get_prev_error(conn, db, NULL) == MONGO_OK); ASSERT( conn->lasterrcode == 0 ); ASSERT( conn->lasterrstr == NULL ); ASSERT(mongo_cmd_get_last_error(conn, db, NULL) == MONGO_OK); ASSERT( conn->lasterrcode == 0 ); ASSERT( conn->lasterrstr == NULL ); ASSERT(mongo_cmd_get_prev_error(conn, db, &obj) == MONGO_OK); bson_destroy(&obj); ASSERT(mongo_cmd_get_last_error(conn, db, &obj) == MONGO_OK); bson_destroy(&obj); /*********************/ mongo_simple_int_command(conn, db, "forceerror", 1, NULL); ASSERT(mongo_cmd_get_prev_error(conn, db, NULL) == MONGO_ERROR); ASSERT( conn->lasterrcode == 10038 ); ASSERT( strcmp( (const char*)conn->lasterrstr, "forced error" ) == 0 ); ASSERT(mongo_cmd_get_last_error(conn, db, NULL) == MONGO_ERROR); ASSERT(mongo_cmd_get_prev_error(conn, db, &obj) == MONGO_ERROR); bson_destroy(&obj); ASSERT(mongo_cmd_get_last_error(conn, db, &obj) == MONGO_ERROR); bson_destroy(&obj); /* should clear lasterror but not preverror */ mongo_find_one(conn, ns, bson_empty(&obj), bson_empty(&obj), NULL); ASSERT(mongo_cmd_get_prev_error(conn, db, NULL) == MONGO_ERROR); ASSERT(mongo_cmd_get_last_error(conn, db, NULL) == MONGO_OK); ASSERT(mongo_cmd_get_prev_error(conn, db, &obj) == MONGO_ERROR); bson_destroy(&obj); ASSERT(mongo_cmd_get_last_error(conn, db, &obj) == MONGO_OK); bson_destroy(&obj); /*********************/ mongo_cmd_reset_error(conn, db); ASSERT(mongo_cmd_get_prev_error(conn, db, NULL) == MONGO_OK); ASSERT(mongo_cmd_get_last_error(conn, db, NULL) == MONGO_OK); ASSERT(mongo_cmd_get_prev_error(conn, db, &obj) == MONGO_OK); bson_destroy(&obj); ASSERT(mongo_cmd_get_last_error(conn, db, &obj) == MONGO_OK); bson_destroy(&obj); mongo_cmd_drop_db(conn, db); mongo_destroy(conn); return 0; }
int main() { mongo conn[1]; bson b, empty; mongo_cursor cursor[1]; unsigned char not_utf8[3]; int result = 0; const char *ns = "test.c.validate"; int i=0, j=0; bson bs[BATCH_SIZE]; bson *bp[BATCH_SIZE]; not_utf8[0] = 0xC0; not_utf8[1] = 0xC0; not_utf8[2] = '\0'; INIT_SOCKETS_FOR_WINDOWS; if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } /* Test checking for finished bson. */ bson_init( &b ); bson_append_int( &b, "foo", 1 ); ASSERT( mongo_insert( conn, "test.foo", &b, NULL ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_NOT_FINISHED ); bson_destroy( &b ); /* Test valid keys. */ bson_init( &b ); result = bson_append_string( &b , "a.b" , "17" ); ASSERT( result == BSON_OK ); ASSERT( b.err & BSON_FIELD_HAS_DOT ); /* Don't set INIT dollar if deb ref fields are being used. */ result = bson_append_string( &b , "$id" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$ref" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$db" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$ab" , "17" ); ASSERT( result == BSON_OK ); ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); result = bson_append_string( &b , "ab" , "this is valid utf8" ); ASSERT( result == BSON_OK ); ASSERT( ! ( b.err & BSON_NOT_UTF8 ) ); result = bson_append_string( &b , ( const char * )not_utf8, "valid" ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); ASSERT( bson_finish( &b ) == BSON_ERROR ); ASSERT( b.err & BSON_FIELD_HAS_DOT ); ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); ASSERT( b.err & BSON_NOT_UTF8 ); result = mongo_insert( conn, ns, &b, NULL ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err & MONGO_BSON_NOT_FINISHED ); result = mongo_update( conn, ns, bson_empty( &empty ), &b, 0, NULL ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err & MONGO_BSON_NOT_FINISHED ); mongo_cursor_init( cursor, conn, "test.cursors" ); mongo_cursor_set_query( cursor, &b ); result = mongo_cursor_next( cursor ); ASSERT( result == MONGO_ERROR ); ASSERT( cursor->err & MONGO_CURSOR_BSON_ERROR ); ASSERT( cursor->conn->err & MONGO_BSON_NOT_FINISHED ); bson_destroy( &b ); mongo_cursor_destroy( cursor ); /* Test valid strings. */ bson_init( &b ); result = bson_append_string( &b , "foo" , "bar" ); ASSERT( result == BSON_OK ); ASSERT( b.err == 0 ); result = bson_append_string( &b , "foo" , ( const char * )not_utf8 ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); b.err = 0; ASSERT( b.err == 0 ); result = bson_append_regex( &b , "foo" , ( const char * )not_utf8, "s" ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); for ( j=0; j < BATCH_SIZE; j++ ) bp[j] = &bs[j]; for ( j=0; j < BATCH_SIZE; j++ ) make_small_invalid( &bs[j], i ); result = mongo_insert_batch( conn, ns, (const bson **)bp, BATCH_SIZE, NULL, 0 ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_INVALID ); for ( j=0; j < BATCH_SIZE; j++ ) bson_destroy( &bs[j] ); bson_destroy( &b ); mongo_cmd_drop_db( conn, "test" ); mongo_disconnect( conn ); mongo_destroy( conn ); return 0; }
int main(){ mongo_connection conn[1]; bson_buffer bb; bson obj; bson cond; int i; bson_oid_t oid; const char* col = "c.update_test"; const char* ns = "test.c.update_test"; INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn , TEST_SERVER, 27017 )){ printf("failed to connect\n"); exit(1); } /* if the collection doesn't exist dropping it will fail */ if ( mongo_cmd_drop_collection(conn, "test", col, NULL) == MONGO_OK && mongo_find_one(conn, ns, bson_empty(&obj), bson_empty(&obj), NULL) != MONGO_OK ){ printf("failed to drop collection\n"); exit(1); } bson_oid_gen(&oid); { /* insert */ bson_buffer_init(&bb); bson_append_oid(&bb, "_id", &oid); bson_append_int(&bb, "a", 3 ); bson_from_buffer(&obj, &bb); mongo_insert(conn, ns, &obj); bson_destroy(&obj); } { /* insert */ bson op; bson_buffer_init(&bb); bson_append_oid(&bb, "_id", &oid); bson_from_buffer(&cond, &bb); bson_buffer_init(&bb); { bson_append_start_object(&bb, "$inc"); bson_append_int(&bb, "a", 2 ); bson_append_finish_object(&bb); } { bson_append_start_object(&bb, "$set"); bson_append_double(&bb, "b", -1.5 ); bson_append_finish_object(&bb); } bson_from_buffer(&op, &bb); for (i=0; i<5; i++) mongo_update(conn, ns, &cond, &op, 0); /* cond is used later */ bson_destroy(&op); } if( mongo_find_one(conn, ns, &cond, 0, &obj) != MONGO_OK ){ printf("Failed to find object\n"); exit(1); } else { int fields = 0; bson_iterator it; bson_iterator_init(&it, obj.data); bson_destroy(&cond); while(bson_iterator_next(&it)){ switch(bson_iterator_key(&it)[0]){ case '_': /* id */ ASSERT(bson_iterator_type(&it) == BSON_OID); ASSERT(!memcmp(bson_iterator_oid(&it)->bytes, oid.bytes, 12)); fields++; break; case 'a': ASSERT(bson_iterator_type(&it) == BSON_INT); ASSERT(bson_iterator_int(&it) == 3 + 5*2); fields++; break; case 'b': ASSERT(bson_iterator_type(&it) == BSON_DOUBLE); ASSERT(bson_iterator_double(&it) == -1.5); fields++; break; } } ASSERT(fields == 3); } bson_destroy(&obj); mongo_cmd_drop_db(conn, "test"); mongo_destroy(conn); return 0; }
static int mongodb_log(struct ast_cdr *cdr) { const char * ns; mongo conn[1]; ast_debug(1, "mongodb: Starting mongodb_log.\n"); mongo_init( &conn ); if (mongo_connect( &conn , ast_str_buffer(hostname), dbport ) != MONGO_OK){ mongo_destroy( &conn ); ast_log(LOG_ERROR, "Method: mongodb_log, MongoDB failed to connect.\n"); connected = 0; records = 0; return -1; } if (ast_str_strlen(dbuser) != 0 && (mongo_cmd_authenticate(&conn, ast_str_buffer(dbname), ast_str_buffer(dbuser), ast_str_buffer(password)) != MONGO_OK)) { mongo_destroy( &conn ); ast_log(LOG_ERROR, "Method: mongodb_log, MongoDB failed to authenticate to do %s with username %s!\n", ast_str_buffer(dbname), ast_str_buffer(dbuser)); connected = 0; records = 0; return -1; } ast_debug(1, "mongodb: Locking mongodb_lock.\n"); ast_mutex_lock(&mongodb_lock); ast_debug(1, "mongodb: Got connection, Preparing record.\n"); bson b[1]; ast_debug(1, "mongodb: Init bson.\n"); bson_init( &b ); bson_append_new_oid( &b, "_id" ); ast_debug(1, "mongodb: accountcode.\n"); bson_append_string( &b , "accountcode", cdr->accountcode); ast_debug(1, "mongodb: src.\n"); bson_append_string( &b , "src", cdr->src); ast_debug(1, "mongodb: dst.\n"); bson_append_string( &b, "dst" , cdr->dst ); ast_debug(1, "mongodb: dcontext.\n"); bson_append_string( &b, "dcontext" , cdr->dcontext ); ast_debug(1, "mongodb: clid.\n"); bson_append_string( &b, "clid" , cdr->clid ); ast_debug(1, "mongodb: channel.\n"); bson_append_string( &b, "channel" , cdr->channel ); ast_debug(1, "mongodb: dstchannel.\n"); bson_append_string( &b, "dstchannel" , cdr->dstchannel ); ast_debug(1, "mongodb: lastapp.\n"); bson_append_string( &b, "lastapp" , cdr->lastapp ); ast_debug(1, "mongodb: lastdata.\n"); bson_append_string( &b, "lastdata" , cdr->lastdata ); ast_debug(1, "mongodb: start.\n"); bson_append_date( &b, "start", (bson_date_t)cdr->start.tv_sec*1000); ast_debug(1, "mongodb: answer.\n"); bson_append_date( &b, "answer", (bson_date_t)cdr->answer.tv_sec*1000); ast_debug(1, "mongodb: end.\n"); bson_append_date( &b, "end" , (bson_date_t)cdr->end.tv_sec*1000); ast_debug(1, "mongodb: duration.\n"); bson_append_int( &b, "duration" , cdr->duration ); ast_debug(1, "mongodb: billsec.\n"); bson_append_int( &b, "billsec" , cdr->billsec ); ast_debug(1, "mongodb: disposition.\n"); bson_append_string( &b, "disposition" , ast_cdr_disp2str(cdr->disposition) ); ast_debug(1, "mongodb: amaflags.\n"); bson_append_string( &b, "amaflags" , ast_cdr_flags2str(cdr->amaflags) ); ast_debug(1, "mongodb: uniqueid.\n"); bson_append_string( &b, "uniqueid" , cdr->uniqueid ); ast_debug(1, "mongodb: userfield.\n"); bson_append_string( &b, "userfield" , cdr->userfield ); bson_finish(&b); ast_debug(1, "mongodb: Inserting a CDR record.\n"); mongo_insert( &conn , ast_str_buffer(dbnamespace) , &b ); bson_destroy(&b); mongo_destroy( &conn ); connected = 1; records++; totalrecords++; ast_debug(1, "Unlocking mongodb_lock.\n"); ast_mutex_unlock(&mongodb_lock); return 0; }