SEXP mongo_gridfile_get_chunk_size(SEXP gfile) { gridfile* _gfile = _checkGridfile(gfile); SEXP ret; PROTECT(ret = allocVector(INTSXP, 1)); INTEGER(ret)[0] = gridfile_get_chunksize(_gfile); UNPROTECT(1); return ret; }
void test_gridfile(gridfs *gfs, char *data_before, uint64_t length, char *filename, char *content_type) { gridfile gfile[1]; char *data_after = malloc( LARGE ); if( data_after == NULL ) { printf("Failed to allocated memory"); exit(1); } FILE * fd; mongo_md5_state_t pms[1]; mongo_md5_byte_t digest[16]; char hex_digest[33]; uint64_t i = length; int n; gridfs_find_filename(gfs, filename, gfile); ASSERT(gridfile_exists(gfile)); fd = fopen("output", "w+"); gridfile_write_file(gfile, fd); fseek(fd, 0, SEEK_SET); ASSERT(fread(data_after, length, sizeof(char), fd)); fclose(fd); ASSERT( strncmp(data_before, data_after, length) == 0 ); gridfile_read( gfile, length, data_after ); ASSERT( strncmp(data_before, data_after, length) == 0 ); ASSERT( strcmp( gridfile_get_filename( gfile ), filename ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == length ); ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE ); ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0) ; ASSERT( strncmp( data_before, data_after, length ) == 0 ); mongo_md5_init(pms); n = 0; while( i > INT_MAX ) { mongo_md5_append(pms, (const mongo_md5_byte_t *)data_before + (n * INT_MAX), INT_MAX); i -= INT_MAX; n += 1; } if( i > 0 ) mongo_md5_append(pms, (const mongo_md5_byte_t *)data_before + (n * INT_MAX), i); mongo_md5_finish(pms, digest); digest2hex(digest, hex_digest); ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 ); gridfile_destroy(gfile); gridfs_remove_filename(gfs, filename); free( data_after ); }
gridfs_offset gridfile_read(gridfile* gfile, gridfs_offset size, char* buf) { mongo_cursor* chunks; bson chunk; int first_chunk; int last_chunk; int total_chunks; gridfs_offset chunksize; gridfs_offset contentlength; gridfs_offset bytes_left; int i; bson_iterator it; gridfs_offset chunk_len; const char * chunk_data; contentlength = gridfile_get_contentlength(gfile); chunksize = gridfile_get_chunksize(gfile); size = (contentlength - gfile->pos < size) ? contentlength - gfile->pos : size; bytes_left = size; first_chunk = (gfile->pos)/chunksize; last_chunk = (gfile->pos+size-1)/chunksize; total_chunks = last_chunk - first_chunk + 1; chunks = gridfile_get_chunks(gfile, first_chunk, total_chunks); for (i = 0; i < total_chunks; i++) { mongo_cursor_next(chunks); chunk = chunks->current; bson_find(&it, &chunk, "data"); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); if (i == 0) { chunk_data += (gfile->pos)%chunksize; chunk_len -= (gfile->pos)%chunksize; } if (bytes_left > chunk_len) { memcpy(buf, chunk_data, chunk_len); bytes_left -= chunk_len; buf += chunk_len; } else { memcpy(buf, chunk_data, bytes_left); } } mongo_cursor_destroy(chunks); gfile->pos = gfile->pos + size; return size; }
void test_gridfile(gridfs *gfs, char *data_before, size_t length, char *filename, char *content_type) { gridfile gfile[1]; char data_after[UPPER]; FILE * fd; mongo_md5_state_t pms[1]; mongo_md5_byte_t digest[16]; char hex_digest[33]; gridfs_find_filename(gfs, filename, gfile); ASSERT(gridfile_exists(gfile)); fd = fopen("output", "w+"); gridfile_write_file(gfile, fd); fseek(fd, 0, SEEK_SET); ASSERT(fread(data_after, length, sizeof(char), fd)); fclose(fd); ASSERT( strncmp(data_before, data_after, length) == 0 ); gridfile_read( gfile, length, data_after); ASSERT( strncmp(data_before, data_after, length) == 0 ); ASSERT( strcmp( gridfile_get_filename( gfile ), filename ) == 0 ); ASSERT( gridfile_get_contentlength( gfile ) == length ); ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE ); ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0) ; mongo_md5_init(pms); mongo_md5_append(pms, (const mongo_md5_byte_t *)data_before, (int)length); mongo_md5_finish(pms, digest); digest2hex(digest, hex_digest); ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 ); gridfile_destroy(gfile); gridfs_remove_filename(gfs, filename); }
static void test_gridfile( gridfs *gfs, char *data_before, int64_t length, char *filename, char *content_type ) { gridfile gfile[1]; FILE *stream; #ifdef DYING mongo_md5_state_t pms[1]; mongo_md5_byte_t digest[16]; #endif char hex_digest[33]; int64_t i = length; int n; char *data_after = (char*)bson_malloc( LARGE ); int truncBytes; char* lowerName; ASSERT(gridfs_find_filename( gfs, filename, gfile ) == MONGO_OK); ASSERT( gridfile_exists( gfile ) ); stream = fopen( "output", "w+" ); gridfile_write_file( gfile, stream ); fseek( stream, 0, SEEK_SET ); ASSERT( fread( data_after, (size_t)length, sizeof( char ), stream ) ); fclose( stream ); ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 ); gridfile_read_buffer( gfile, data_after, length ); ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 ); lowerName = (char*) bson_malloc( (int)strlen( filename ) + 1); strcpy( lowerName, filename ); _strlwr( lowerName ); ASSERT( strcmp( gridfile_get_filename( gfile ), lowerName ) == 0 ); bson_free( lowerName ); ASSERT( gridfile_get_contentlength( gfile ) == (size_t)length ); ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE ); ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0 ) ; ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 ); if( !( gfile->flags & GRIDFILE_COMPRESS ) ) { #ifdef DYING mongo_md5_init( pms ); n = 0; while( i > INT_MAX ) { mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), INT_MAX ); i -= INT_MAX; n += 1; } if( i > 0 ) mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), (int)i ); mongo_md5_finish( pms, digest ); digest2hex( digest, hex_digest ); #else { DIGEST_CTX ctx = rpmDigestInit(PGPHASHALGO_MD5, RPMDIGEST_NONE); const char * _digest = NULL; int xx; while( i > INT_MAX ) { xx = rpmDigestUpdate(ctx, (char *)data_before + (n*INT_MAX), INT_MAX); i -= INT_MAX; n += 1; } xx = rpmDigestFinal(ctx, &_digest, NULL, 1); strncpy(hex_digest, _digest, 32+1); hex_digest[32] = '\0'; _digest = _free(_digest); } #endif ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 ); } truncBytes = (int) (length > DEFAULT_CHUNK_SIZE * 4 ? length - DEFAULT_CHUNK_SIZE * 2 - 13 : 23); gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT); ASSERT( gridfile_truncate(gfile, (size_t)(length - truncBytes)) == (size_t)(length - truncBytes)); gridfile_writer_done( gfile ); gridfile_seek(gfile, 0); ASSERT( gridfile_get_contentlength( gfile ) == (size_t)(length - truncBytes) ); ASSERT( gridfile_read_buffer( gfile, data_after, length ) == (size_t)(length - truncBytes)); ASSERT( memcmp( data_before, data_after, (size_t)(length - truncBytes) ) == 0 ); gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT); gridfile_truncate(gfile, 0); gridfile_writer_done( gfile ); ASSERT( gridfile_get_contentlength( gfile ) == 0 ); ASSERT( gridfile_read_buffer( gfile, data_after, length ) == 0 ); gridfile_destroy( gfile ); ASSERT( gridfs_remove_filename( gfs, filename ) == MONGO_OK ); free( data_after ); gridfs_test_unlink( "output" ); }
EXPORT int mongo_gridfile_get_chunk_size(struct gridfile_* gf) { return gridfile_get_chunksize((gridfile*)gf); }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; char* value; gridfs gfs; gridfile gfile; gridfs_offset length; char* data; ngx_uint_t chunksize; ngx_uint_t numchunks; ngx_uint_t chunklength; char* contenttype; ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_buffer buf; bson_oid_t oid; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); location_name = core_conf->name; full_uri = request->uri; /* defensive */ if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Extract the value from the uri */ value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); value[full_uri.len - location_name.len] = '\0'; /* URL Decoding */ if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } /* If no mongo connection, create a default connection */ /* TODO: Find a better place for this logic */ if (!gridfs_conf->gridfs_conn->connected) { switch (mongo_connect(gridfs_conf->gridfs_conn, NULL)) { case mongo_conn_success: break; case mongo_conn_bad_arg: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Bad Arguments"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_no_socket: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: No Socket"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_fail: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Connection Failure"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_not_master: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Not Master"); return NGX_HTTP_INTERNAL_SERVER_ERROR; default: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Unknown Error"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } } /* Find the GridFile */ gridfs_init(gridfs_conf->gridfs_conn, (const char*)gridfs_conf->gridfs_db.data, (const char*)gridfs_conf->gridfs_root_collection.data, &gfs); bson_buffer_init(&buf); switch (gridfs_conf->gridfs_type) { case bson_oid: bson_oid_from_string(&oid, value); bson_append_oid(&buf, (char*)gridfs_conf->gridfs_field.data, &oid); break; case bson_int: bson_append_int(&buf, (char*)gridfs_conf->gridfs_field.data, atoi(value)); break; case bson_string: bson_append_string(&buf, (char*)gridfs_conf->gridfs_field.data, value); break; } bson_from_buffer(&query, &buf); if(!gridfs_find_query(&gfs, &query, &gfile)){ bson_destroy(&query); free(value); return NGX_HTTP_NOT_FOUND; } bson_destroy(&query); free(value); /* Get information about the file */ length = gridfile_get_contentlength(&gfile); chunksize = gridfile_get_chunksize(&gfile); numchunks = gridfile_get_numchunks(&gfile); contenttype = (char*)gridfile_get_contenttype(&gfile); /* Set the headers */ request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); ngx_http_send_header(request); /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Allocate space for the buffer of data */ data = ngx_pcalloc(request->pool, sizeof(char)*chunksize); if (data == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate buffer for data"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Set up the buffer chain */ chunklength = gridfile_read(&gfile, chunksize, data); buffer->pos = (u_char*)data; buffer->last = (u_char*)data + chunklength; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { return NGX_ERROR; } } return rc; }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; char* value; gridfs gfs; gridfile gfile; gridfs_offset length; ngx_uint_t chunksize; ngx_uint_t numchunks; char* contenttype; ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_buffer buf; bson_oid_t oid; mongo_cursor ** cursors; gridfs_offset chunk_len; const char * chunk_data; bson_iterator it; bson chunk; ngx_pool_cleanup_t* gridfs_cln; ngx_http_gridfs_cleanup_t* gridfs_clndata; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); location_name = core_conf->name; full_uri = request->uri; gridfs_cln = ngx_pool_cleanup_add(request->pool, sizeof(ngx_http_gridfs_cleanup_t)); if (gridfs_cln == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } gridfs_cln->handler = ngx_http_gridfs_cleanup; gridfs_clndata = gridfs_cln->data; /* defensive */ if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Extract the value from the uri */ value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); value[full_uri.len - location_name.len] = '\0'; /* URL Decoding */ if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } /* Find the GridFile */ gridfs_init(gridfs_conf->mongod_conn, (const char*)gridfs_conf->gridfs_db.data, (const char*)gridfs_conf->gridfs_root_collection.data, &gfs); bson_buffer_init(&buf); switch (gridfs_conf->gridfs_type) { case bson_oid: bson_oid_from_string(&oid, value); bson_append_oid(&buf, (char*)gridfs_conf->gridfs_field.data, &oid); break; case bson_int: bson_append_int(&buf, (char*)gridfs_conf->gridfs_field.data, ngx_atoi((u_char*)value, strlen(value))); break; case bson_string: bson_append_string(&buf, (char*)gridfs_conf->gridfs_field.data, value); break; } bson_from_buffer(&query, &buf); if(!gridfs_find_query(&gfs, &query, &gfile)){ bson_destroy(&query); free(value); return NGX_HTTP_NOT_FOUND; } bson_destroy(&query); free(value); /* Get information about the file */ length = gridfile_get_contentlength(&gfile); chunksize = gridfile_get_chunksize(&gfile); numchunks = gridfile_get_numchunks(&gfile); contenttype = (char*)gridfile_get_contenttype(&gfile); /* Set the headers */ request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); /* Determine if content is gzipped, set headers accordingly */ if ( gridfile_get_boolean(&gfile,"gzipped") ) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, gridfile_get_field(&gfile,"gzipped") ); request->headers_out.content_encoding = ngx_list_push(&request->headers_out.headers); if (request->headers_out.content_encoding == NULL) { return NGX_ERROR; } request->headers_out.content_encoding->hash = 1; request->headers_out.content_encoding->key.len = sizeof("Content-Encoding") - 1; request->headers_out.content_encoding->key.data = (u_char *) "Content-Encoding"; request->headers_out.content_encoding->value.len = sizeof("gzip") - 1; request->headers_out.content_encoding->value.data = (u_char *) "gzip"; } ngx_http_send_header(request); if (numchunks == 0) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } buffer->pos = NULL; buffer->last = NULL; buffer->memory = 1; buffer->last_buf = 1; out.buf = buffer; out.next = NULL; return ngx_http_output_filter(request, &out); } cursors = (mongo_cursor **)ngx_pcalloc(request->pool, sizeof(mongo_cursor *) * numchunks); /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Fetch the chunk from mongo */ cursors[i] = gridfile_get_chunks(&gfile, i, 1); mongo_cursor_next(cursors[i]); chunk = cursors[i]->current; bson_find(&it, &chunk, "data"); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); /* Set up the buffer chain */ buffer->pos = (u_char*)chunk_data; buffer->last = (u_char*)chunk_data + chunk_len; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { return NGX_ERROR; } } gridfs_clndata->cursors = cursors; gridfs_clndata->numchunks = numchunks; return rc; }
void test_gridfile( gridfs *gfs, char *data_before, int64_t length, char *filename, char *content_type ) { gridfile gfile[1]; FILE *stream; mongo_md5_state_t pms[1]; mongo_md5_byte_t digest[16]; char hex_digest[33]; int64_t i = length; int n; char *data_after = (char*)bson_malloc( LARGE ); int truncBytes; char* lowerName; ASSERT(gridfs_find_filename( gfs, filename, gfile ) == MONGO_OK); ASSERT( gridfile_exists( gfile ) ); stream = fopen( "output", "w+" ); gridfile_write_file( gfile, stream ); fseek( stream, 0, SEEK_SET ); ASSERT( fread( data_after, (size_t)length, sizeof( char ), stream ) ); fclose( stream ); ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 ); gridfile_read( gfile, length, data_after ); ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 ); lowerName = (char*) bson_malloc( (int)strlen( filename ) + 1); strcpy( lowerName, filename); _strlwr( lowerName ); ASSERT( strcmp( gridfile_get_filename( gfile ), lowerName ) == 0 ); bson_free( lowerName ); ASSERT( gridfile_get_contentlength( gfile ) == (size_t)length ); ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE ); ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0 ) ; ASSERT( memcmp( data_before, data_after, (size_t)length ) == 0 ); if( !( gfile->flags & GRIDFILE_COMPRESS ) ) { mongo_md5_init( pms ); n = 0; while( i > INT_MAX ) { mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), INT_MAX ); i -= INT_MAX; n += 1; } if( i > 0 ) mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), (int)i ); mongo_md5_finish( pms, digest ); digest2hex( digest, hex_digest ); ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 ); } truncBytes = (int) (length > DEFAULT_CHUNK_SIZE * 4 ? length - DEFAULT_CHUNK_SIZE * 2 - 13 : 23); gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT); ASSERT( gridfile_truncate(gfile, (size_t)(length - truncBytes)) == (size_t)(length - truncBytes)); gridfile_writer_done( gfile ); gridfile_seek(gfile, 0); ASSERT( gridfile_get_contentlength( gfile ) == (size_t)(length - truncBytes) ); ASSERT( gridfile_read( gfile, length, data_after ) == (size_t)(length - truncBytes)); ASSERT( memcmp( data_before, data_after, (size_t)(length - truncBytes) ) == 0 ); gridfile_writer_init( gfile, gfs, filename, content_type, GRIDFILE_DEFAULT); gridfile_truncate(gfile, 0); gridfile_writer_done( gfile ); ASSERT( gridfile_get_contentlength( gfile ) == 0 ); ASSERT( gridfile_read( gfile, length, data_after ) == 0 ); gridfile_destroy( gfile ); gridfs_remove_filename( gfs, filename ); free( data_after ); _unlink( "output" ); }