void MongodbClient::Disconnect() { if(IsConnected()) { mongo_disconnect(clientData->connection); } }
static void test_delete( void ) { mongo conn[1]; gridfs gfs[1]; gridfile gfile[1]; char *data = (char*)bson_malloc( 1024 ); const char *testFile = "test-delete"; INIT_SOCKETS_FOR_WINDOWS; CONN_CLIENT_TEST; GFS_INIT; ASSERT( gridfs_store_buffer( gfs, data, 1024, testFile, "text/html", GRIDFILE_DEFAULT ) == MONGO_OK ); ASSERT( gridfs_find_filename( gfs, testFile, gfile ) == MONGO_OK ); gridfile_destroy( gfile ); ASSERT( gridfs_remove_filename( gfs, testFile ) == MONGO_OK ); ASSERT( gridfs_find_filename( gfs, testFile, gfile ) == MONGO_ERROR ); ASSERT( gridfs_find_filename( gfs, "bogus-file-does-not-exist", gfile ) == MONGO_ERROR ); ASSERT( gridfs_remove_filename( gfs, "bogus-file-does-not-exist" ) == MONGO_ERROR ); gridfs_destroy( gfs ); mongo_disconnect( conn ); mongo_destroy( conn ); bson_free( data ); }
void test_mongo_connection_set_timeout (void) { mongo_connection c, *conn; c.fd = -1; ok (mongo_connection_set_timeout (NULL, 100) == FALSE, "mongo_connection_set_timeout() should fail with a NULL connection"); ok (mongo_connection_set_timeout (&c, -1) == FALSE, "mongo_connection_set_timeout() should fail with a negative timeout"); ok (mongo_connection_set_timeout (&c, 100) == FALSE, "mongo_connection_set_timeout() should fail with an invalid FD"); begin_network_tests (0); conn = mongo_connect (config.primary_host, config.primary_port); /* No verification here, as some systems may or may not support this, thus, failing in a test is not fatal. */ mongo_connection_set_timeout (conn, 100); mongo_disconnect (conn); end_network_tests (); }
mongo_conn_return mongo_reconnect( mongo_connection * conn ){ mongo_conn_return ret; mongo_disconnect(conn); /* single server */ if(conn->right_opts == NULL) return mongo_connect_helper(conn); /* repl pair */ ret = mongo_connect_helper(conn); if (ret == mongo_conn_success && mongo_cmd_ismaster(conn, NULL)){ return mongo_conn_success; } swap_repl_pair(conn); ret = mongo_connect_helper(conn); if (ret == mongo_conn_success){ if(mongo_cmd_ismaster(conn, NULL)) return mongo_conn_success; else return mongo_conn_not_master; } /* failed to connect to both servers */ return ret; }
void test_large() { mongo_connection conn[1]; gridfs gfs[1]; gridfile gfile[1]; FILE *fd; int i, n; char *buffer = malloc( LARGE ); if( buffer == NULL ) { printf("Failed to allocate memory."); exit(1); } uint64_t filesize = (uint64_t)1024 * (uint64_t)LARGE; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn, TEST_SERVER, 27017 )){ printf("failed to connect 1\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); /* Create a very large file */ fill_buffer_randomly(buffer, (uint64_t)LARGE); fd = fopen("bigfile", "w"); for(i=0; i<1024; i++) { fwrite(buffer, 1, LARGE, fd); } fclose(fd); /* Now read the file into GridFS */ gridfs_store_file(gfs, "bigfile", "bigfile", "text/html"); gridfs_find_filename(gfs, "bigfile", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); /* Read the file using the streaming interface */ gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html"); fd = fopen("bigfile", "r"); while((n = fread(buffer, 1, 1024, fd)) != 0) { gridfile_write_buffer(gfile, buffer, n); } gridfile_writer_done( gfile ); gridfs_find_filename(gfs, "bigfile-stream", gfile); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); gridfs_destroy(gfs); mongo_disconnect(conn); mongo_destroy(conn); }
bson_bool_t mongo_destroy( mongo_connection * conn ){ free(conn->left_opts); free(conn->right_opts); conn->left_opts = NULL; conn->right_opts = NULL; return mongo_disconnect( conn ); }
bool c_mongo_connection::close(void) { if(!connected) { return true; } mongo_disconnect(this); return true; }
//------------------------------------------------------------------------------ int main( int argc, char * argv[] ){ mongo conn; if( mongo_client( &conn , TEST_SERVER, TEST_PORT ) != MONGO_OK ) { std::cout << "failed to connect\n"; return EXIT_FAILURE; } mongo_cursor cursor; mongo_cursor_init( &cursor, &conn, "test.test" ); char hex_oid[25]; while( mongo_cursor_next( &cursor ) == MONGO_OK ) { std::cout << "row:\n"; bson_iterator it; bson_iterator_init( &it, mongo_cursor_bson( &cursor ) ); while( bson_iterator_next( &it ) ) { std::cout << " " << bson_iterator_key( &it ) << " = "; switch( bson_iterator_type( &it ) ) { case BSON_DOUBLE: std::cout << "(double) " << bson_iterator_double( &it ) << std::endl; break; case BSON_INT: std::cout << "(int) " << bson_iterator_int( &it ) << std::endl; break; case BSON_STRING: std::cout << "(string) \"" << bson_iterator_string( &it ) << "\"\n"; break; case BSON_OID: bson_oid_to_string( bson_iterator_oid( &it ), hex_oid ); std::cout << "(oid) \"" << hex_oid << "\"\n"; break; case BSON_OBJECT: std::cout << "(subobject) {...}\n"; break; case BSON_ARRAY: std::cout << "(array) [...]\n"; break; case BSON_TIMESTAMP: std::cout << "(timestamp) [...]\n"; break; default: std::cout << "(type " << bson_iterator_type( &it ) << std::endl; break; } } std::cout << std::endl; } mongo_disconnect( &conn ); return EXIT_SUCCESS; }
bson_bool_t mongo_destroy( mongo_connection * conn ){ if( conn->replset ) { mongo_replset_free_list( &conn->replset->seeds ); mongo_replset_free_list( &conn->replset->hosts ); free( conn->replset->name ); free( conn->replset ); conn->replset = NULL; } free( conn->primary ); free( conn->errstr ); return mongo_disconnect( conn ); }
MONGO_EXPORT int mongo_reconnect( mongo *conn ) { int res; mongo_disconnect( conn ); if( conn->replset ) { conn->replset->primary_connected = 0; mongo_replset_free_list( &conn->replset->hosts ); conn->replset->hosts = NULL; res = mongo_replset_connect( conn ); return res; } else return mongo_socket_connect( conn, conn->primary->host, conn->primary->port ); }
void test_mongo_disconnect (void) { mongo_connection *conn; conn = g_new0 (mongo_connection, 1); conn->fd = -1; errno = 0; mongo_disconnect (NULL); ok (errno == ENOTCONN, "mongo_disconnect() fails with ENOTCONN when passed a NULL connection"); mongo_disconnect (conn); ok (errno == 0, "mongo_disconnect() works"); conn = g_new0 (mongo_connection, 1); conn->fd = 100; mongo_disconnect (conn); ok (errno == 0, "mongo_disconnect() works, even with a bogus FD"); }
SEXP rmongo_connect(SEXP mongo_conn) { mongo* conn = _checkMongo(mongo_conn); mongo_host_port hp; SEXP host = getAttrib(mongo_conn, sym_host); int len = LENGTH(host); int i; if (len == 0) error("No hosts defined\n"); const char* name = CHAR(STRING_ELT(getAttrib(mongo_conn, sym_name), 0)); if (name[0] == '\0') { for (i = 0; i < len; i++) { mongo_parse_host(CHAR(STRING_ELT(host, i)), &hp); if (mongo_connect(conn, hp.host, hp.port) == MONGO_OK) break; } if (i == len) { if (len == 1) Rprintf("Unable to connect to %s:%d, error code = %d\n", hp.host, hp.port, conn->err); else Rprintf("Unable to connect to any of the given hosts, error code = %d\n", conn->err); return mongo_conn; } } else { mongo_replset_init(conn, name); for (i = 0; i < len; i++) { mongo_parse_host(CHAR(STRING_ELT(host, i)), &hp); mongo_replset_add_seed(conn, hp.host, hp.port); } if (mongo_replset_connect(conn) != MONGO_OK) Rprintf("Unable to connect to replset\n"); } int timeout = asInteger(getAttrib(mongo_conn, sym_timeout)); if (timeout > 0) mongo_set_op_timeout(conn, timeout); SEXP username = getAttrib(mongo_conn, sym_username); if (CHAR(STRING_ELT(username, 0))[0] != '\0') { SEXP password = getAttrib(mongo_conn, install("password")); SEXP db = getAttrib(mongo_conn, install("db")); SEXP ret = mongo_authenticate(mongo_conn, username, password, db); if (!LOGICAL(ret)[0]) { mongo_disconnect(conn); Rprintf("Authentication failed.\n"); } } return mongo_conn; }
void test_mongo_packet_recv_ssl (void) { mongo_connection c, *conn; mongo_packet *p; bson *b; c.fd = -1; ok (mongo_packet_recv (NULL) == NULL, "mongo_packet_recv() fails with a NULL connection"); ok (errno == ENOTCONN, "mongo_packet_recv() sets errno to ENOTCONN if connection is NULL"); ok (mongo_packet_recv (&c) == NULL, "mongo_packet_recv() fails if the FD is less than zero"); ok (errno == EBADF, "mongo_packet_recv() sets errno to EBADF is the FD is bad"); begin_ssl_tests (2); b = bson_new (); bson_append_int32 (b, "getnonce", 1); bson_finish (b); p = mongo_wire_cmd_custom (42, config.db, 0, b); bson_free (b); conn = mongo_ssl_connect (config.primary_host, config.primary_port, config.ssl_settings); mongo_packet_send (conn, p); mongo_wire_packet_free (p); ok ((p = mongo_packet_recv (conn)) != NULL, "mongo_packet_recv() works"); mongo_wire_packet_free (p); close (conn->fd); sleep (3); ok (mongo_packet_recv (conn) == NULL, "mongo_packet_recv() fails on a closed socket"); mongo_disconnect (conn); end_ssl_tests (); }
void test_basic( void ) { mongo conn[1]; gridfs gfs[1]; char *data_before = (char*)bson_malloc( UPPER ); int64_t i; FILE *fd; srand((unsigned int) time( NULL ) ); INIT_SOCKETS_FOR_WINDOWS; if ( mongo_client( conn, TEST_SERVER, 27017 ) ) { printf( "failed to connect 2\n" ); exit( 1 ); } gridfs_init( conn, "test", "fs", gfs ); fill_buffer_randomly( data_before, UPPER ); for ( i = LOWER; i <= UPPER; i += DELTA ) { /* Input from buffer */ gridfs_store_buffer( gfs, data_before, i, "input-buffer", "text/html", GRIDFILE_COMPRESS ); test_gridfile( gfs, data_before, i, "input-buffer", "text/html" ); /* Input from file */ fd = fopen( "input-file", "w" ); fwrite( data_before, sizeof( char ), (size_t)i, fd ); fclose( fd ); gridfs_store_file( gfs, "input-file", "input-file", "text/html", GRIDFILE_DEFAULT ); test_gridfile( gfs, data_before, i, "input-file", "text/html" ); gfs->caseInsensitive = 1; gridfs_store_file( gfs, "input-file", "input-file", "text/html", GRIDFILE_DEFAULT ); test_gridfile( gfs, data_before, i, "inPut-file", "text/html" ); } gridfs_destroy( gfs ); mongo_disconnect( conn ); mongo_destroy( conn ); free( data_before ); /* Clean up files. */ _unlink( "input-file" ); _unlink( "output" ); }
MONGO_EXPORT void mongo_destroy( mongo *conn ) { mongo_disconnect( conn ); if( conn->replset ) { mongo_replset_free_list( &conn->replset->seeds ); mongo_replset_free_list( &conn->replset->hosts ); bson_free( conn->replset->name ); bson_free( conn->replset ); conn->replset = NULL; } bson_free( conn->primary ); bson_free( conn->lasterrstr ); conn->err = 0; conn->lasterrcode = 0; conn->lasterrstr = NULL; }
void test_basic() { mongo_connection conn[1]; gridfs gfs[1]; char *data_before = malloc( UPPER ); if( data_before == NULL ) { printf("Failed to allocate"); exit(1); } uint64_t i; FILE *fd; srand(time(NULL)); INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn, TEST_SERVER, 27017 )){ printf("failed to connect 2\n"); exit(1); } gridfs_init(conn, "test", "fs", gfs); fill_buffer_randomly( data_before, UPPER ); for (i = LOWER; i <= UPPER; i += DELTA) { /* Input from buffer */ gridfs_store_buffer(gfs, data_before, i, "input-buffer", "text/html"); test_gridfile(gfs, data_before, i, "input-buffer", "text/html"); /* Input from file */ fd = fopen("input-file", "w"); fwrite(data_before, sizeof(char), i, fd); fclose(fd); gridfs_store_file(gfs, "input-file", "input-file", "text/html"); test_gridfile(gfs, data_before, i, "input-file", "text/html"); } gridfs_destroy(gfs); mongo_disconnect(conn); mongo_destroy(conn); free( data_before ); }
static ngx_int_t ngx_http_mongo_reconnect(ngx_log_t *log, ngx_http_mongo_connection_t *mongo_conn) { volatile int status = MONGO_CONN_FAIL; if (&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); ngx_msleep(MONGO_RECONNECT_WAITTIME); status = mongo_reconnect(&mongo_conn->conn); } else { status = MONGO_CONN_FAIL; } switch (status) { case MONGO_CONN_SUCCESS: break; case MONGO_CONN_NO_SOCKET: ngx_log_error(NGX_LOG_ERR, log, 0, "Mongo Exception: No Socket"); return NGX_ERROR; case MONGO_CONN_FAIL: ngx_log_error(NGX_LOG_ERR, log, 0, "Mongo Exception: Connection Failure %s:%i;", mongo_conn->conn.primary->host, mongo_conn->conn.primary->port); return NGX_ERROR; case MONGO_CONN_ADDR_FAIL: ngx_log_error(NGX_LOG_ERR, log, 0, "Mongo Exception: getaddrinfo Failure"); return NGX_ERROR; case MONGO_CONN_NOT_MASTER: ngx_log_error(NGX_LOG_ERR, log, 0, "Mongo Exception: Not Master"); return NGX_ERROR; default: ngx_log_error(NGX_LOG_ERR, log, 0, "Mongo Exception: Unknown Error"); return NGX_ERROR; } return NGX_OK; }
void test_mongo_connection_get_requestid (void) { mongo_connection c, *conn; mongo_packet *p; bson *b; gint reqid; c.request_id = 42; ok (mongo_connection_get_requestid (NULL) == -1, "mongo_connection_get_requestid() fails with a NULL connection"); ok (mongo_connection_get_requestid (&c) == 42, "mongo_connection_get_requestid() works"); begin_network_tests (2); b = bson_new (); bson_append_int32 (b, "getnonce", 1); bson_finish (b); p = mongo_wire_cmd_custom (42, config.db, 0, b); bson_free (b); conn = mongo_connect (config.primary_host, config.primary_port); cmp_ok ((reqid = mongo_connection_get_requestid (conn)), "==", 0, "Initial request id is 0"); mongo_packet_send (conn, p); mongo_wire_packet_free (p); cmp_ok (reqid, "<", mongo_connection_get_requestid (conn), "Old request ID is smaller than the new one"); mongo_disconnect (conn); end_network_tests (); }
static void test_random_write(void) { mongo conn[1]; gridfs gfs[1]; gridfile* gfile; char *data_before = (char*)bson_malloc( UPPER ); char *random_data = (char*)bson_malloc( UPPER ); char *buf = (char*) bson_malloc( UPPER ); int64_t i; FILE *fd; srand((unsigned int) time( NULL ) ); INIT_SOCKETS_FOR_WINDOWS; CONN_CLIENT_TEST; GFS_INIT; fill_buffer_randomly( data_before, UPPER ); fill_buffer_randomly( random_data, UPPER ); for ( i = LOWER; i <= UPPER; i += DELTA ) { int64_t j = i / 2 - 3; gridfs_offset bytes_to_write_first; int n; /* Input from buffer */ gridfs_store_buffer( gfs, data_before, i, "input-buffer", "text/html", GRIDFILE_DEFAULT ); if ( i > DEFAULT_CHUNK_SIZE * 4 ) { n = DEFAULT_CHUNK_SIZE * 3 + 6; memcpy(&data_before[j], random_data, n); // Let's overwrite the buffer with bytes crossing multiple chunks bytes_to_write_first = 10; } else { n = 6; memcpy(random_data, "123456", n); strncpy(&data_before[j], random_data, n); // Let's overwrite the buffer with a few some bytes bytes_to_write_first = 0; } gfile = gridfile_create(); ASSERT(gridfs_find_filename(gfs, "input-buffer", gfile) == 0); gridfile_writer_init(gfile, gfs, "input-buffer", "text/html", GRIDFILE_DEFAULT ); gridfile_seek(gfile, j); // Seek into the same buffer position within the GridFS file if ( bytes_to_write_first ) { ASSERT( gridfile_write_buffer(gfile, random_data, bytes_to_write_first) == bytes_to_write_first ); // Let's write 10 bytes first, and later the rest } ASSERT( gridfile_write_buffer(gfile, &random_data[bytes_to_write_first], n - bytes_to_write_first) == n - bytes_to_write_first ); // Try to write to the existing GridFS file on the position given by j gridfile_seek(gfile, j); gridfile_read_buffer( gfile, buf, n ); ASSERT(memcmp( buf, &data_before[j], n) == 0); gridfile_writer_done(gfile); ASSERT(gfile->pos == (gridfs_offset)(j + n)); gridfile_dealloc(gfile); test_gridfile( gfs, data_before, j + n > i ? j + n : i, "input-buffer", "text/html" ); /* Input from file */ fd = fopen( "input-file", "w" ); fwrite( data_before, sizeof( char ), (size_t) (j + n > i ? j + n : i), fd ); fclose( fd ); gridfs_store_file( gfs, "input-file", "input-file", "text/html", GRIDFILE_DEFAULT ); test_gridfile( gfs, data_before, j + n > i ? j + n : i, "input-file", "text/html" ); } gridfs_destroy( gfs ); mongo_disconnect( conn ); mongo_destroy( conn ); free( data_before ); free( random_data ); free( buf ); /* Clean up files. */ gridfs_test_unlink( "input-file" ); gridfs_test_unlink( "output" ); }
SEXP rmongo_disconnect(SEXP mongo_conn) { mongo* conn = _checkMongo(mongo_conn); mongo_disconnect(conn); return mongo_conn; }
int main() { mongo_connection conn[1]; bson_buffer bb; bson b; unsigned char not_utf8[3]; int result; not_utf8[0] = 0xC0; not_utf8[1] = 0xC0; not_utf8[3] = '\0'; INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn , TEST_SERVER, 27017 )){ printf("failed to connect\n"); exit(1); } /* Test valid keys. */ bson_buffer_init( & bb ); result = bson_append_string( &bb , "a.b" , "17" ); ASSERT( result == BSON_OK ); ASSERT( bb.err & BSON_FIELD_HAS_DOT ); result = bson_append_string( &bb , "$ab" , "17" ); ASSERT( result == BSON_OK ); ASSERT( bb.err & BSON_FIELD_INIT_DOLLAR ); result = bson_append_string( &bb , "ab" , "this is valid utf8" ); ASSERT( result == BSON_OK ); ASSERT( ! (bb.err & BSON_NOT_UTF8 ) ); result = bson_append_string( &bb , (const char*)not_utf8, "valid" ); ASSERT( result == BSON_ERROR ); ASSERT( bb.err & BSON_NOT_UTF8 ); bson_from_buffer(&b, &bb); ASSERT( b.err & BSON_FIELD_HAS_DOT ); ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); ASSERT( b.err & BSON_NOT_UTF8 ); bson_destroy(&b); /* Test valid strings. */ bson_buffer_init( & bb ); result = bson_append_string( &bb , "foo" , "bar" ); ASSERT( result == BSON_OK ); ASSERT( bb.err == 0 ); result = bson_append_string( &bb , "foo" , (const char*)not_utf8 ); ASSERT( result == BSON_ERROR ); ASSERT( bb.err & BSON_NOT_UTF8 ); bb.err = 0; ASSERT( bb.err == 0 ); result = bson_append_regex( &bb , "foo" , (const char*)not_utf8, "s" ); ASSERT( result == BSON_ERROR ); ASSERT( bb.err & BSON_NOT_UTF8 ); mongo_cmd_drop_db(conn, "test"); mongo_disconnect( conn ); mongo_destroy( conn ); return 0; }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; char* value; ngx_http_mongo_connection_t *mongo_conn; gridfs gfs; gridfile gfile; gridfs_offset length; ngx_uint_t numchunks; char* contenttype; char* md5; bson_date_t last_modified; volatile ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_oid_t oid; mongo_cursor ** cursors; gridfs_offset chunk_len; const char * chunk_data; bson_iterator it; bson chunk; ngx_pool_cleanup_t* gridfs_cln; ngx_http_gridfs_cleanup_t* gridfs_clndata; int status; volatile ngx_uint_t e = FALSE; volatile ngx_uint_t ecounter = 0; uint64_t range_start = 0; uint64_t range_end = 0; uint64_t current_buf_pos = 0; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); // ---------- ENSURE MONGO CONNECTION ---------- // mongo_conn = ngx_http_get_mongo_connection( gridfs_conf->mongo ); if (mongo_conn == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Connection not found: \"%V\"", &gridfs_conf->mongo); return NGX_HTTP_INTERNAL_SERVER_ERROR; } if (mongo_conn->conn.connected == 0) { if (ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Could not connect to mongo: \"%V\"", &gridfs_conf->mongo); if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } return NGX_HTTP_SERVICE_UNAVAILABLE; } if (ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to reauth to mongo: \"%V\"", &gridfs_conf->mongo); if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } return NGX_HTTP_SERVICE_UNAVAILABLE; } } // ---------- RETRIEVE KEY ---------- // location_name = core_conf->name; full_uri = request->uri; if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); value[full_uri.len - location_name.len] = '\0'; if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } // ---------- RETRIEVE GRIDFILE ---------- // bson_init(&query); switch (gridfs_conf->type) { case BSON_OID: bson_oid_from_string(&oid, value); bson_append_oid(&query, (char*)gridfs_conf->field.data, &oid); break; case BSON_INT: bson_append_int(&query, (char*)gridfs_conf->field.data, ngx_atoi((u_char*)value, strlen(value))); break; case BSON_STRING: bson_append_string(&query, (char*)gridfs_conf->field.data, value); break; } bson_finish(&query); do { e = FALSE; if (gridfs_init(&mongo_conn->conn, (const char*)gridfs_conf->db.data, (const char*)gridfs_conf->root_collection.data, &gfs) != MONGO_OK || (status = gridfs_find_query(&gfs, &query, &gfile) == MONGO_ERROR)) { e = TRUE; ecounter++; if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo connection dropped, could not reconnect"); if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } bson_destroy(&query); free(value); return NGX_HTTP_SERVICE_UNAVAILABLE; } } } while (e); bson_destroy(&query); free(value); /* Get information about the file */ length = gridfile_get_contentlength(&gfile); numchunks = gridfile_get_numchunks(&gfile); // NaN workaround if (numchunks > INT_MAX) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_NOT_FOUND; } contenttype = (char*)gridfile_get_contenttype(&gfile); md5 = (char*)gridfile_get_md5(&gfile); last_modified = gridfile_get_uploaddate(&gfile); // ---------- Partial Range // set follow-fork-mode child // attach (pid) // break ngx_http_gridfs_module.c:959 if (request->headers_in.range) { gridfs_parse_range(request, &request->headers_in.range->value, &range_start, &range_end, length); } // ---------- SEND THE HEADERS ---------- // if (range_start == 0 && range_end == 0) { request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; } else { request->headers_out.status = NGX_HTTP_PARTIAL_CONTENT; request->headers_out.content_length_n = length; //request->headers_out.content_range = range_end - range_start + 1; ngx_table_elt_t *content_range; content_range = ngx_list_push(&request->headers_out.headers); if (content_range == NULL) { return NGX_ERROR; } request->headers_out.content_range = content_range; content_range->hash = 1; ngx_str_set(&content_range->key, "Content-Range"); content_range->value.data = ngx_pnalloc(request->pool,sizeof("bytes -/") - 1 + 3 * NGX_OFF_T_LEN); if (content_range->value.data == NULL) { return NGX_ERROR; } /* "Content-Range: bytes SSSS-EEEE/TTTT" header */ content_range->value.len = ngx_sprintf(content_range->value.data, "bytes %O-%O/%O", range_start, range_end, request->headers_out.content_length_n) - content_range->value.data; request->headers_out.content_length_n = range_end - range_start + 1; } if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); // use md5 field as ETag if possible if (md5 != NULL) { request->headers_out.etag = ngx_list_push(&request->headers_out.headers); request->headers_out.etag->hash = 1; request->headers_out.etag->key.len = sizeof("ETag") - 1; request->headers_out.etag->key.data = (u_char*)"ETag"; ngx_buf_t *b; b = ngx_create_temp_buf(request->pool, strlen(md5) + 2); b->last = ngx_sprintf(b->last, "\"%s\"", md5); request->headers_out.etag->value.len = strlen(md5) + 2; request->headers_out.etag->value.data = b->start; } // use uploadDate field as last_modified if possible if (last_modified) { request->headers_out.last_modified_time = (time_t)(last_modified/1000); } /* Determine if content is gzipped, set headers accordingly */ if ( gridfile_get_boolean(&gfile,"gzipped") ) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, gridfile_get_field(&gfile,"gzipped") ); request->headers_out.content_encoding = ngx_list_push(&request->headers_out.headers); if (request->headers_out.content_encoding == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_ERROR; } request->headers_out.content_encoding->hash = 1; request->headers_out.content_encoding->key.len = sizeof("Content-Encoding") - 1; request->headers_out.content_encoding->key.data = (u_char *) "Content-Encoding"; request->headers_out.content_encoding->value.len = sizeof("gzip") - 1; request->headers_out.content_encoding->value.data = (u_char *) "gzip"; } ngx_http_send_header(request); // ---------- SEND THE BODY ---------- // /* Empty file */ if (numchunks == 0) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } buffer->pos = NULL; buffer->last = NULL; buffer->memory = 1; buffer->last_buf = 1; out.buf = buffer; out.next = NULL; gridfile_destroy(&gfile); gridfs_destroy(&gfs); return ngx_http_output_filter(request, &out); } cursors = (mongo_cursor **)ngx_pcalloc(request->pool, sizeof(mongo_cursor *) * numchunks); if (cursors == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_memzero( cursors, sizeof(mongo_cursor *) * numchunks); /* Hook in the cleanup function */ gridfs_cln = ngx_pool_cleanup_add(request->pool, sizeof(ngx_http_gridfs_cleanup_t)); if (gridfs_cln == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } gridfs_cln->handler = ngx_http_gridfs_cleanup; gridfs_clndata = gridfs_cln->data; gridfs_clndata->cursors = cursors; gridfs_clndata->numchunks = numchunks; /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Fetch the chunk from mongo */ do { e = FALSE; cursors[i] = gridfile_get_chunks(&gfile, i, 1); if (!(cursors[i] && mongo_cursor_next(cursors[i]) == MONGO_OK)) { e = TRUE; ecounter++; if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo connection dropped, could not reconnect"); if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_SERVICE_UNAVAILABLE; } } } while (e); chunk = cursors[i]->current; bson_find(&it, &chunk, "data"); chunk_len = bson_iterator_bin_len( &it ); // break ngx_http_gridfs_module.c:1099 chunk_data = bson_iterator_bin_data( &it ); if (range_start == 0 && range_end == 0) { /* <<no range request>> */ /* Set up the buffer chain */ buffer->pos = (u_char*)chunk_data; buffer->last = (u_char*)chunk_data + chunk_len; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); } else { /* <<range request>> */ if ( range_start >= (current_buf_pos+chunk_len) || range_end <= current_buf_pos) { /* no output */ ngx_pfree(request->pool, buffer); } else { if (range_start <= current_buf_pos) { buffer->pos = (u_char*)chunk_data; } else { buffer->pos = (u_char*)chunk_data + (range_start - current_buf_pos); } if (range_end < (current_buf_pos+chunk_len)) { buffer->last = (u_char*)chunk_data + (range_end - current_buf_pos + 1); } else { buffer->last = (u_char*)chunk_data + chunk_len; } if (buffer->pos == buffer->last) { ngx_log_error(NGX_LOG_ALERT, request->connection->log, 0, "zero size buf in writer " "range_start:%d range_end:%d " "current_buf_pos:%d chunk_len:%d i:%d numchunk:%d", range_start,range_end, current_buf_pos, chunk_len, i,numchunks); } buffer->memory = 1; buffer->last_buf = (i == numchunks-1) || (range_end < (current_buf_pos+chunk_len)); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); } } current_buf_pos += chunk_len; /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_ERROR; } } gridfile_destroy(&gfile); gridfs_destroy(&gfs); return rc; }
int main() { mongo conn[1]; bson b, empty; mongo_cursor cursor[1]; unsigned char not_utf8[3]; int result = 0; const char *ns = "test.c.validate"; int i=0, j=0; bson bs[BATCH_SIZE]; bson *bp[BATCH_SIZE]; not_utf8[0] = 0xC0; not_utf8[1] = 0xC0; not_utf8[2] = '\0'; INIT_SOCKETS_FOR_WINDOWS; if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } /* Test checking for finished bson. */ bson_init( &b ); bson_append_int( &b, "foo", 1 ); ASSERT( mongo_insert( conn, "test.foo", &b, NULL ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_NOT_FINISHED ); bson_destroy( &b ); /* Test valid keys. */ bson_init( &b ); result = bson_append_string( &b , "a.b" , "17" ); ASSERT( result == BSON_OK ); ASSERT( b.err & BSON_FIELD_HAS_DOT ); /* Don't set INIT dollar if deb ref fields are being used. */ result = bson_append_string( &b , "$id" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$ref" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$db" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$ab" , "17" ); ASSERT( result == BSON_OK ); ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); result = bson_append_string( &b , "ab" , "this is valid utf8" ); ASSERT( result == BSON_OK ); ASSERT( ! ( b.err & BSON_NOT_UTF8 ) ); result = bson_append_string( &b , ( const char * )not_utf8, "valid" ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); ASSERT( bson_finish( &b ) == BSON_ERROR ); ASSERT( b.err & BSON_FIELD_HAS_DOT ); ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); ASSERT( b.err & BSON_NOT_UTF8 ); result = mongo_insert( conn, ns, &b, NULL ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err & MONGO_BSON_NOT_FINISHED ); result = mongo_update( conn, ns, bson_empty( &empty ), &b, 0, NULL ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err & MONGO_BSON_NOT_FINISHED ); mongo_cursor_init( cursor, conn, "test.cursors" ); mongo_cursor_set_query( cursor, &b ); result = mongo_cursor_next( cursor ); ASSERT( result == MONGO_ERROR ); ASSERT( cursor->err & MONGO_CURSOR_BSON_ERROR ); ASSERT( cursor->conn->err & MONGO_BSON_NOT_FINISHED ); bson_destroy( &b ); mongo_cursor_destroy( cursor ); /* Test valid strings. */ bson_init( &b ); result = bson_append_string( &b , "foo" , "bar" ); ASSERT( result == BSON_OK ); ASSERT( b.err == 0 ); result = bson_append_string( &b , "foo" , ( const char * )not_utf8 ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); b.err = 0; ASSERT( b.err == 0 ); result = bson_append_regex( &b , "foo" , ( const char * )not_utf8, "s" ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); for ( j=0; j < BATCH_SIZE; j++ ) bp[j] = &bs[j]; for ( j=0; j < BATCH_SIZE; j++ ) make_small_invalid( &bs[j], i ); result = mongo_insert_batch( conn, ns, (const bson **)bp, BATCH_SIZE, NULL, 0 ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_INVALID ); for ( j=0; j < BATCH_SIZE; j++ ) bson_destroy( &bs[j] ); bson_destroy( &b ); mongo_cmd_drop_db( conn, "test" ); mongo_disconnect( conn ); mongo_destroy( conn ); return 0; }
static void ngx_http_mongodb_rest_exit_worker(ngx_cycle_t* cycle) { mongo_disconnect(cached_connection); mongo_destroy(cached_connection); }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; // --------------- xulin add start ------------------- char* ml_args; char* arg; unsigned int add_arg; unsigned int add_len; // --------------- xulin add end ------------------- char* value; ngx_http_mongo_connection_t *mongo_conn; gridfs gfs; gridfile gfile; gridfs_offset length; ngx_uint_t numchunks; char* contenttype; char* md5; bson_date_t last_modified; volatile ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_oid_t oid; mongo_cursor ** cursors; gridfs_offset chunk_len; const char * chunk_data; bson_iterator it; bson chunk; ngx_pool_cleanup_t* gridfs_cln; ngx_http_gridfs_cleanup_t* gridfs_clndata; int status; volatile ngx_uint_t e = FALSE; volatile ngx_uint_t ecounter = 0; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); // ---------- ENSURE MONGO CONNECTION ---------- // mongo_conn = ngx_http_get_mongo_connection( gridfs_conf->mongo ); if (mongo_conn == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Connection not found: \"%V\"", &gridfs_conf->mongo); return NGX_HTTP_INTERNAL_SERVER_ERROR; } if ( !(&mongo_conn->conn.connected) && (ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Could not connect to mongo: \"%V\"", &gridfs_conf->mongo); if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } return NGX_HTTP_SERVICE_UNAVAILABLE; } // ---------- RETRIEVE KEY ---------- // location_name = core_conf->name; full_uri = request->uri; if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1 + request->args.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); // ------------------------------------ xulin add start -------------------------------------- if (request->args.len > 0) { ml_args = (char*)malloc(sizeof(char) * (request->args.len + 1)); memcpy(ml_args, request->args.data, request->args.len); ml_args[request->args.len] = '\0'; add_len = full_uri.len - location_name.len; memcpy(value + add_len, "?", 1); add_len += 1; arg = strtok(ml_args, "&"); while (arg != NULL) { add_arg = 1; if (strstr(arg, "xc_md5") != NULL) { add_arg = 0; } else if (strstr(arg, "_xingcloud_t") != NULL) { add_arg = 0; } if (add_arg == 1) { memcpy(value + add_len, arg, strlen(arg)); add_len += strlen(arg); memcpy(value + add_len, "&", 1); add_len += 1; } arg = strtok(NULL, "&"); } free(ml_args); if (value[add_len - 1] == '?' || value[add_len - 1] == '&') { value[add_len - 1] = '\0'; } else { value[add_len] = '\0'; } } ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "ml_url = [%s]", value); // ------------------------------------ xulin add end -------------------------------------- if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } // ---------- RETRIEVE GRIDFILE ---------- // do { e = FALSE; if (gridfs_init(&mongo_conn->conn, (const char*)gridfs_conf->db.data, (const char*)gridfs_conf->root_collection.data, &gfs) != MONGO_OK) { e = TRUE; ecounter++; if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo connection dropped, could not reconnect"); if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } free(value); return NGX_HTTP_SERVICE_UNAVAILABLE; } } } while (e); bson_init(&query); switch (gridfs_conf->type) { case BSON_OID: bson_oid_from_string(&oid, value); bson_append_oid(&query, (char*)gridfs_conf->field.data, &oid); break; case BSON_INT: bson_append_int(&query, (char*)gridfs_conf->field.data, ngx_atoi((u_char*)value, strlen(value))); break; case BSON_STRING: bson_append_string(&query, (char*)gridfs_conf->field.data, value); break; } bson_finish(&query); status = gridfs_find_query(&gfs, &query, &gfile); bson_destroy(&query); free(value); if(status == MONGO_ERROR) { gridfs_destroy(&gfs); return NGX_HTTP_NOT_FOUND; } /* Get information about the file */ length = gridfile_get_contentlength(&gfile); numchunks = gridfile_get_numchunks(&gfile); contenttype = (char*)gridfile_get_contenttype(&gfile); md5 = (char*)gridfile_get_md5(&gfile); last_modified = gridfile_get_uploaddate(&gfile); // ---------- SEND THE HEADERS ---------- // request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); // use md5 field as ETag if possible if (md5 != NULL) { request->headers_out.etag = ngx_list_push(&request->headers_out.headers); request->headers_out.etag->hash = 1; request->headers_out.etag->key.len = sizeof("ETag") - 1; request->headers_out.etag->key.data = (u_char*)"ETag"; ngx_buf_t *b; b = ngx_create_temp_buf(request->pool, strlen(md5) + 2); b->last = ngx_sprintf(b->last, "\"%s\"", md5); request->headers_out.etag->value.len = strlen(md5) + 2; request->headers_out.etag->value.data = b->start; } // use uploadDate field as last_modified if possible if (last_modified) { request->headers_out.last_modified_time = (time_t)(last_modified/1000); } /* Determine if content is gzipped, set headers accordingly */ if ( gridfile_get_boolean(&gfile,"gzipped") ) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, gridfile_get_field(&gfile,"gzipped") ); request->headers_out.content_encoding = ngx_list_push(&request->headers_out.headers); if (request->headers_out.content_encoding == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_ERROR; } request->headers_out.content_encoding->hash = 1; request->headers_out.content_encoding->key.len = sizeof("Content-Encoding") - 1; request->headers_out.content_encoding->key.data = (u_char *) "Content-Encoding"; request->headers_out.content_encoding->value.len = sizeof("gzip") - 1; request->headers_out.content_encoding->value.data = (u_char *) "gzip"; } ngx_http_send_header(request); // ---------- SEND THE BODY ---------- // /* Empty file */ if (numchunks == 0) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } buffer->pos = NULL; buffer->last = NULL; buffer->memory = 1; buffer->last_buf = 1; out.buf = buffer; out.next = NULL; gridfile_destroy(&gfile); gridfs_destroy(&gfs); return ngx_http_output_filter(request, &out); } cursors = (mongo_cursor **)ngx_pcalloc(request->pool, sizeof(mongo_cursor *) * numchunks); if (cursors == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_memzero( cursors, sizeof(mongo_cursor *) * numchunks); /* Hook in the cleanup function */ gridfs_cln = ngx_pool_cleanup_add(request->pool, sizeof(ngx_http_gridfs_cleanup_t)); if (gridfs_cln == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } gridfs_cln->handler = ngx_http_gridfs_cleanup; gridfs_clndata = gridfs_cln->data; gridfs_clndata->cursors = cursors; gridfs_clndata->numchunks = numchunks; /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Fetch the chunk from mongo */ do { e = FALSE; cursors[i] = gridfile_get_chunks(&gfile, i, 1); if (!(cursors[i] && mongo_cursor_next(cursors[i]) == MONGO_OK)) { e = TRUE; ecounter++; if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo connection dropped, could not reconnect"); if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_SERVICE_UNAVAILABLE; } } } while (e); chunk = cursors[i]->current; bson_find(&it, &chunk, "data"); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); /* Set up the buffer chain */ buffer->pos = (u_char*)chunk_data; buffer->last = (u_char*)chunk_data + chunk_len; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_ERROR; } } gridfile_destroy(&gfile); gridfs_destroy(&gfs); return rc; }
void TMongoDriver::close() { if (isOpen()) mongo_disconnect(mongoConnection); }
static void test_large( void ) { mongo conn[1]; gridfs gfs[1]; gridfile gfile[1]; FILE *fd; size_t i, n; char *buffer = (char*)bson_malloc( LARGE ); char *read_buf = (char*)bson_malloc( LARGE ); gridfs_offset filesize = ( int64_t )1024 * ( int64_t )LARGE; mongo_write_concern wc; bson lastError; bson lastErrorCmd; srand( (unsigned int) time( NULL ) ); INIT_SOCKETS_FOR_WINDOWS; CONN_CLIENT_TEST; mongo_write_concern_init(&wc); wc.j = 1; mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); GFS_INIT; fd = fopen( "bigfile", "r" ); if( fd ) { fclose( fd ); } else { /* Create a very large file */ fill_buffer_randomly( buffer, ( int64_t )LARGE ); fd = fopen( "bigfile", "w" ); for( i=0; i<1024; i++ ) { fwrite( buffer, 1, LARGE, fd ); } fclose( fd ); } /* Now read the file into GridFS */ gridfs_remove_filename( gfs, "bigfile" ); gridfs_store_file( gfs, "bigfile", "bigfile", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS); gridfs_find_filename( gfs, "bigfile", gfile ); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); fd = fopen( "bigfile", "r" ); while( ( n = fread( buffer, 1, MEDIUM, fd ) ) != 0 ) { ASSERT( gridfile_read_buffer( gfile, read_buf, MEDIUM ) == n ); ASSERT( memcmp( buffer, read_buf, n ) == 0 ); } fclose( fd ); gridfile_destroy( gfile ); /* Read the file using the streaming interface */ gridfs_remove_filename( gfs, "bigfile" ); gridfs_remove_filename( gfs, "bigfile-stream" ); gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS ); mongo_write_concern_destroy( &wc ); mongo_write_concern_init(&wc); wc.j = 0; /* Let's reset write concern j field to zero, we will manually call getLastError with j = 1 */ mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); fd = fopen( "bigfile", "r" ); i = 0; while( ( n = fread( buffer, 1, READ_WRITE_BUF_SIZE, fd ) ) != 0 ) { ASSERT( gridfile_write_buffer( gfile, buffer, n ) == n ); if(i++ % 10 == 0) { bson_init( &lastErrorCmd ); bson_append_int( &lastErrorCmd, "getLastError", 1); bson_append_int( &lastErrorCmd, "j", 1); bson_finish( &lastErrorCmd ); bson_init( &lastError ); mongo_run_command( conn, "test", &lastErrorCmd, &lastError ); bson_destroy( &lastError ); bson_destroy( &lastErrorCmd ); } } mongo_write_concern_destroy( &wc ); mongo_write_concern_init(&wc); wc.j = 1; /* Let's reset write concern j field to 1 */ mongo_write_concern_finish(&wc); mongo_set_write_concern(conn, &wc); fclose( fd ); gridfile_writer_done( gfile ); gridfs_find_filename( gfs, "bigfile-stream", gfile ); ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == filesize ); gridfs_remove_filename( gfs, "bigfile-stream" ); gridfs_destroy( gfs ); mongo_disconnect( conn ); mongo_destroy( conn ); bson_free( buffer ); bson_free( read_buf ); mongo_write_concern_destroy( &wc ); }