EXPORT int mongo_gridfs_find(struct gridfs_* gfs, struct bson_* query, struct gridfile_** gf) { gridfile* gf_ = (gridfile*)malloc(sizeof(gridfile)); if (gridfs_find_query((gridfs*)gfs, (bson*)query, gf_) != MONGO_OK) { free(gf_); return 0; } *gf = (struct gridfile_*)gf_; return 1; }
SEXP mongo_gridfs_find(SEXP gfs, SEXP query) { gridfs* _gfs = _checkGridfs(gfs); gridfile* gfile = Calloc(1, gridfile); int result; if (_isBSON(query)) { bson* _query = _checkBSON(query); result = gridfs_find_query(_gfs, _query, gfile); } else result = gridfs_find_filename(_gfs, CHAR(STRING_ELT(query, 0)), gfile); if (result != MONGO_OK) return R_NilValue; return _mongo_gridfile_create(gfs, gfile); }
int gridfs_find_filename( gridfs *gfs, const char *filename, gridfile *gfile ) { bson query; int i; bson_init( &query ); bson_append_string( &query, "filename", filename ); bson_finish( &query ); i = gridfs_find_query( gfs, &query, gfile ); bson_destroy( &query ); return i; }
int gridfs_find_filename(gridfs* gfs, const char* filename, gridfile* gfile) { bson query; bson_buffer buf; int i; bson_buffer_init(&buf); bson_append_string(&buf, "filename", filename); bson_from_buffer(&query, &buf) ; i = gridfs_find_query(gfs, &query, gfile); bson_destroy(&query); return i; }
//! Read raster data from MongoDB int clsRasterData::ReadFromMongoDB(gridfs *gfs, const char* remoteFilename) { gridfile gfile[1]; bson b[1]; bson_init(b); bson_append_string(b, "filename", remoteFilename); bson_finish(b); int flag = gridfs_find_query(gfs, b, gfile); if(0 != flag) { throw ModelException("clsRasterData", "ReadFromMongoDB", "The file " + string(remoteFilename) + " does not exist."); } size_t length = (size_t)gridfile_get_contentlength(gfile); char* buf = (char*)malloc(length); gridfile_read (gfile, length, buf); float *data = (float*)buf; bson bmeta[1]; gridfile_get_metadata(gfile, bmeta); bson_iterator iterator[1]; if ( bson_find( iterator, bmeta, "NCOLS" )) m_headers["NCOLS"] = (float)bson_iterator_int(iterator); if ( bson_find( iterator, bmeta, "NROWS" )) m_headers["NROWS"] = (float)bson_iterator_int(iterator); if ( bson_find( iterator, bmeta, "NODATA_VALUE" )) m_headers["NODATA_VALUE"] = (float)bson_iterator_double(iterator); if ( bson_find( iterator, bmeta, "XLLCENTER" )) m_headers["XLLCENTER"] = (float)bson_iterator_double(iterator); if ( bson_find( iterator, bmeta, "YLLCENTER" )) m_headers["YLLCENTER"] = (float)bson_iterator_double(iterator); if ( bson_find( iterator, bmeta, "CELLSIZE" )) { m_headers["CELLSIZE"] = (float)bson_iterator_double(iterator); //m_headers["DY"] = m_headers["DX"]; } int nRows = (int)m_headers["NROWS"]; int nCols = (int)m_headers["NCOLS"]; vector<float> values; vector<int> positionRows; vector<int> positionCols; //get all valid values float nodataFloat = m_headers["NODATA_VALUE"]; for (int i = 0; i < nRows; ++i) { for (int j = 0; j < nCols; ++j) { int index = i*nCols + j; float value = data[index]; if(FloatEqual(nodataFloat, value)) continue; values.push_back(value); positionRows.push_back(i); positionCols.push_back(j); } } //create float array m_nRows = values.size(); m_rasterData = new float[m_nRows]; m_rasterPositionData = new float*[m_nRows]; for (int i = 0; i < m_nRows; ++i) { m_rasterData[i] = values.at(i); m_rasterPositionData[i] = new float[2]; m_rasterPositionData[i][0] = float(positionRows.at(i)); m_rasterPositionData[i][1] = float(positionCols.at(i)); } bson_destroy(b); gridfile_destroy(gfile); free(buf); return 0; }
//! Read raster data from MongoDB and select by mask int clsRasterData::ReadFromMongoDB(gridfs *gfs, const char* remoteFilename,clsRasterData* mask) { if(mask == NULL) ReadFromMongoDB(gfs, remoteFilename); else { //clock_t start = clock(); gridfile gfile[1]; bson b[1]; bson_init(b); bson_append_string(b, "filename", remoteFilename); bson_finish(b); int flag = gridfs_find_query(gfs, b, gfile); if(0 != flag) { //cout << "Failed in ReadFromMongoDB, Remote file: " << remoteFilename << endl; return -1; } size_t length = (size_t)gridfile_get_contentlength(gfile); char* buf = (char*)malloc(length); gridfile_read (gfile, length, buf); float *data = (float*)buf; //clock_t end = clock(); //cout << "Read data: " << end - start << endl; //start = clock(); //get the data posotion from mask int nRows; float** validPosition; mask->getRasterPositionData(&nRows,&validPosition); m_nRows = nRows; m_mask = mask; //set header m_headers["NCOLS"] = mask->getASCCols(); m_headers["NROWS"] = mask->getASCRows(); m_headers["NODATA_VALUE"] = mask->getNoDataValue(); m_headers["CELLSIZE"] = mask->getCellWidth(); //m_headers["DY"] = mask->getCellWidth(); m_headers["XLLCENTER"] = mask->getXllCenter(); m_headers["YLLCENTER"] = mask->getYllCenter(); //read data m_rasterData = new float[nRows]; int ascCols = mask->getASCCols(); int ascRows = mask->getASCRows(); //int index = 0; //for (int i = 0; i < ascRows; ++i) //{ // for (int j = 0; j < ascCols; ++j) // { // float value = data[i*ascCols + j]; // if(index < nRows) // { // if(validPosition[index][0] == i && validPosition[index][1] == j) // { // m_rasterData[index] = value; // index++; // } // } // } //} for(int index = 0; index < nRows; index++) { int i = validPosition[index][0]; int j = validPosition[index][1]; int rasterIndex = i*ascCols + j; m_rasterData[index] = data[rasterIndex]; } //end = clock(); //cout << "Rearrange data: " << end-start << endl; bson_destroy(b); gridfile_destroy(gfile); free(buf); } return 0; }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; char* value; ngx_http_mongo_connection_t *mongo_conn; gridfs gfs; gridfile gfile; gridfs_offset length; ngx_uint_t numchunks; char* contenttype; char* md5; bson_date_t last_modified; volatile ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_oid_t oid; mongo_cursor ** cursors; gridfs_offset chunk_len; const char * chunk_data; bson_iterator it; bson chunk; ngx_pool_cleanup_t* gridfs_cln; ngx_http_gridfs_cleanup_t* gridfs_clndata; int status; volatile ngx_uint_t e = FALSE; volatile ngx_uint_t ecounter = 0; uint64_t range_start = 0; uint64_t range_end = 0; uint64_t current_buf_pos = 0; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); // ---------- ENSURE MONGO CONNECTION ---------- // mongo_conn = ngx_http_get_mongo_connection( gridfs_conf->mongo ); if (mongo_conn == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Connection not found: \"%V\"", &gridfs_conf->mongo); return NGX_HTTP_INTERNAL_SERVER_ERROR; } if (mongo_conn->conn.connected == 0) { if (ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Could not connect to mongo: \"%V\"", &gridfs_conf->mongo); if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } return NGX_HTTP_SERVICE_UNAVAILABLE; } if (ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to reauth to mongo: \"%V\"", &gridfs_conf->mongo); if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } return NGX_HTTP_SERVICE_UNAVAILABLE; } } // ---------- RETRIEVE KEY ---------- // location_name = core_conf->name; full_uri = request->uri; if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); value[full_uri.len - location_name.len] = '\0'; if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } // ---------- RETRIEVE GRIDFILE ---------- // bson_init(&query); switch (gridfs_conf->type) { case BSON_OID: bson_oid_from_string(&oid, value); bson_append_oid(&query, (char*)gridfs_conf->field.data, &oid); break; case BSON_INT: bson_append_int(&query, (char*)gridfs_conf->field.data, ngx_atoi((u_char*)value, strlen(value))); break; case BSON_STRING: bson_append_string(&query, (char*)gridfs_conf->field.data, value); break; } bson_finish(&query); do { e = FALSE; if (gridfs_init(&mongo_conn->conn, (const char*)gridfs_conf->db.data, (const char*)gridfs_conf->root_collection.data, &gfs) != MONGO_OK || (status = gridfs_find_query(&gfs, &query, &gfile) == MONGO_ERROR)) { e = TRUE; ecounter++; if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo connection dropped, could not reconnect"); if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } bson_destroy(&query); free(value); return NGX_HTTP_SERVICE_UNAVAILABLE; } } } while (e); bson_destroy(&query); free(value); /* Get information about the file */ length = gridfile_get_contentlength(&gfile); numchunks = gridfile_get_numchunks(&gfile); // NaN workaround if (numchunks > INT_MAX) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_NOT_FOUND; } contenttype = (char*)gridfile_get_contenttype(&gfile); md5 = (char*)gridfile_get_md5(&gfile); last_modified = gridfile_get_uploaddate(&gfile); // ---------- Partial Range // set follow-fork-mode child // attach (pid) // break ngx_http_gridfs_module.c:959 if (request->headers_in.range) { gridfs_parse_range(request, &request->headers_in.range->value, &range_start, &range_end, length); } // ---------- SEND THE HEADERS ---------- // if (range_start == 0 && range_end == 0) { request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; } else { request->headers_out.status = NGX_HTTP_PARTIAL_CONTENT; request->headers_out.content_length_n = length; //request->headers_out.content_range = range_end - range_start + 1; ngx_table_elt_t *content_range; content_range = ngx_list_push(&request->headers_out.headers); if (content_range == NULL) { return NGX_ERROR; } request->headers_out.content_range = content_range; content_range->hash = 1; ngx_str_set(&content_range->key, "Content-Range"); content_range->value.data = ngx_pnalloc(request->pool,sizeof("bytes -/") - 1 + 3 * NGX_OFF_T_LEN); if (content_range->value.data == NULL) { return NGX_ERROR; } /* "Content-Range: bytes SSSS-EEEE/TTTT" header */ content_range->value.len = ngx_sprintf(content_range->value.data, "bytes %O-%O/%O", range_start, range_end, request->headers_out.content_length_n) - content_range->value.data; request->headers_out.content_length_n = range_end - range_start + 1; } if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); // use md5 field as ETag if possible if (md5 != NULL) { request->headers_out.etag = ngx_list_push(&request->headers_out.headers); request->headers_out.etag->hash = 1; request->headers_out.etag->key.len = sizeof("ETag") - 1; request->headers_out.etag->key.data = (u_char*)"ETag"; ngx_buf_t *b; b = ngx_create_temp_buf(request->pool, strlen(md5) + 2); b->last = ngx_sprintf(b->last, "\"%s\"", md5); request->headers_out.etag->value.len = strlen(md5) + 2; request->headers_out.etag->value.data = b->start; } // use uploadDate field as last_modified if possible if (last_modified) { request->headers_out.last_modified_time = (time_t)(last_modified/1000); } /* Determine if content is gzipped, set headers accordingly */ if ( gridfile_get_boolean(&gfile,"gzipped") ) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, gridfile_get_field(&gfile,"gzipped") ); request->headers_out.content_encoding = ngx_list_push(&request->headers_out.headers); if (request->headers_out.content_encoding == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_ERROR; } request->headers_out.content_encoding->hash = 1; request->headers_out.content_encoding->key.len = sizeof("Content-Encoding") - 1; request->headers_out.content_encoding->key.data = (u_char *) "Content-Encoding"; request->headers_out.content_encoding->value.len = sizeof("gzip") - 1; request->headers_out.content_encoding->value.data = (u_char *) "gzip"; } ngx_http_send_header(request); // ---------- SEND THE BODY ---------- // /* Empty file */ if (numchunks == 0) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } buffer->pos = NULL; buffer->last = NULL; buffer->memory = 1; buffer->last_buf = 1; out.buf = buffer; out.next = NULL; gridfile_destroy(&gfile); gridfs_destroy(&gfs); return ngx_http_output_filter(request, &out); } cursors = (mongo_cursor **)ngx_pcalloc(request->pool, sizeof(mongo_cursor *) * numchunks); if (cursors == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_memzero( cursors, sizeof(mongo_cursor *) * numchunks); /* Hook in the cleanup function */ gridfs_cln = ngx_pool_cleanup_add(request->pool, sizeof(ngx_http_gridfs_cleanup_t)); if (gridfs_cln == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } gridfs_cln->handler = ngx_http_gridfs_cleanup; gridfs_clndata = gridfs_cln->data; gridfs_clndata->cursors = cursors; gridfs_clndata->numchunks = numchunks; /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Fetch the chunk from mongo */ do { e = FALSE; cursors[i] = gridfile_get_chunks(&gfile, i, 1); if (!(cursors[i] && mongo_cursor_next(cursors[i]) == MONGO_OK)) { e = TRUE; ecounter++; if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo connection dropped, could not reconnect"); if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_SERVICE_UNAVAILABLE; } } } while (e); chunk = cursors[i]->current; bson_find(&it, &chunk, "data"); chunk_len = bson_iterator_bin_len( &it ); // break ngx_http_gridfs_module.c:1099 chunk_data = bson_iterator_bin_data( &it ); if (range_start == 0 && range_end == 0) { /* <<no range request>> */ /* Set up the buffer chain */ buffer->pos = (u_char*)chunk_data; buffer->last = (u_char*)chunk_data + chunk_len; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); } else { /* <<range request>> */ if ( range_start >= (current_buf_pos+chunk_len) || range_end <= current_buf_pos) { /* no output */ ngx_pfree(request->pool, buffer); } else { if (range_start <= current_buf_pos) { buffer->pos = (u_char*)chunk_data; } else { buffer->pos = (u_char*)chunk_data + (range_start - current_buf_pos); } if (range_end < (current_buf_pos+chunk_len)) { buffer->last = (u_char*)chunk_data + (range_end - current_buf_pos + 1); } else { buffer->last = (u_char*)chunk_data + chunk_len; } if (buffer->pos == buffer->last) { ngx_log_error(NGX_LOG_ALERT, request->connection->log, 0, "zero size buf in writer " "range_start:%d range_end:%d " "current_buf_pos:%d chunk_len:%d i:%d numchunk:%d", range_start,range_end, current_buf_pos, chunk_len, i,numchunks); } buffer->memory = 1; buffer->last_buf = (i == numchunks-1) || (range_end < (current_buf_pos+chunk_len)); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); } } current_buf_pos += chunk_len; /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_ERROR; } } gridfile_destroy(&gfile); gridfs_destroy(&gfs); return rc; }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; // --------------- xulin add start ------------------- char* ml_args; char* arg; unsigned int add_arg; unsigned int add_len; // --------------- xulin add end ------------------- char* value; ngx_http_mongo_connection_t *mongo_conn; gridfs gfs; gridfile gfile; gridfs_offset length; ngx_uint_t numchunks; char* contenttype; char* md5; bson_date_t last_modified; volatile ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_oid_t oid; mongo_cursor ** cursors; gridfs_offset chunk_len; const char * chunk_data; bson_iterator it; bson chunk; ngx_pool_cleanup_t* gridfs_cln; ngx_http_gridfs_cleanup_t* gridfs_clndata; int status; volatile ngx_uint_t e = FALSE; volatile ngx_uint_t ecounter = 0; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); // ---------- ENSURE MONGO CONNECTION ---------- // mongo_conn = ngx_http_get_mongo_connection( gridfs_conf->mongo ); if (mongo_conn == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Connection not found: \"%V\"", &gridfs_conf->mongo); return NGX_HTTP_INTERNAL_SERVER_ERROR; } if ( !(&mongo_conn->conn.connected) && (ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Could not connect to mongo: \"%V\"", &gridfs_conf->mongo); if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } return NGX_HTTP_SERVICE_UNAVAILABLE; } // ---------- RETRIEVE KEY ---------- // location_name = core_conf->name; full_uri = request->uri; if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1 + request->args.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); // ------------------------------------ xulin add start -------------------------------------- if (request->args.len > 0) { ml_args = (char*)malloc(sizeof(char) * (request->args.len + 1)); memcpy(ml_args, request->args.data, request->args.len); ml_args[request->args.len] = '\0'; add_len = full_uri.len - location_name.len; memcpy(value + add_len, "?", 1); add_len += 1; arg = strtok(ml_args, "&"); while (arg != NULL) { add_arg = 1; if (strstr(arg, "xc_md5") != NULL) { add_arg = 0; } else if (strstr(arg, "_xingcloud_t") != NULL) { add_arg = 0; } if (add_arg == 1) { memcpy(value + add_len, arg, strlen(arg)); add_len += strlen(arg); memcpy(value + add_len, "&", 1); add_len += 1; } arg = strtok(NULL, "&"); } free(ml_args); if (value[add_len - 1] == '?' || value[add_len - 1] == '&') { value[add_len - 1] = '\0'; } else { value[add_len] = '\0'; } } ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "ml_url = [%s]", value); // ------------------------------------ xulin add end -------------------------------------- if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } // ---------- RETRIEVE GRIDFILE ---------- // do { e = FALSE; if (gridfs_init(&mongo_conn->conn, (const char*)gridfs_conf->db.data, (const char*)gridfs_conf->root_collection.data, &gfs) != MONGO_OK) { e = TRUE; ecounter++; if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo connection dropped, could not reconnect"); if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } free(value); return NGX_HTTP_SERVICE_UNAVAILABLE; } } } while (e); bson_init(&query); switch (gridfs_conf->type) { case BSON_OID: bson_oid_from_string(&oid, value); bson_append_oid(&query, (char*)gridfs_conf->field.data, &oid); break; case BSON_INT: bson_append_int(&query, (char*)gridfs_conf->field.data, ngx_atoi((u_char*)value, strlen(value))); break; case BSON_STRING: bson_append_string(&query, (char*)gridfs_conf->field.data, value); break; } bson_finish(&query); status = gridfs_find_query(&gfs, &query, &gfile); bson_destroy(&query); free(value); if(status == MONGO_ERROR) { gridfs_destroy(&gfs); return NGX_HTTP_NOT_FOUND; } /* Get information about the file */ length = gridfile_get_contentlength(&gfile); numchunks = gridfile_get_numchunks(&gfile); contenttype = (char*)gridfile_get_contenttype(&gfile); md5 = (char*)gridfile_get_md5(&gfile); last_modified = gridfile_get_uploaddate(&gfile); // ---------- SEND THE HEADERS ---------- // request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); // use md5 field as ETag if possible if (md5 != NULL) { request->headers_out.etag = ngx_list_push(&request->headers_out.headers); request->headers_out.etag->hash = 1; request->headers_out.etag->key.len = sizeof("ETag") - 1; request->headers_out.etag->key.data = (u_char*)"ETag"; ngx_buf_t *b; b = ngx_create_temp_buf(request->pool, strlen(md5) + 2); b->last = ngx_sprintf(b->last, "\"%s\"", md5); request->headers_out.etag->value.len = strlen(md5) + 2; request->headers_out.etag->value.data = b->start; } // use uploadDate field as last_modified if possible if (last_modified) { request->headers_out.last_modified_time = (time_t)(last_modified/1000); } /* Determine if content is gzipped, set headers accordingly */ if ( gridfile_get_boolean(&gfile,"gzipped") ) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, gridfile_get_field(&gfile,"gzipped") ); request->headers_out.content_encoding = ngx_list_push(&request->headers_out.headers); if (request->headers_out.content_encoding == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_ERROR; } request->headers_out.content_encoding->hash = 1; request->headers_out.content_encoding->key.len = sizeof("Content-Encoding") - 1; request->headers_out.content_encoding->key.data = (u_char *) "Content-Encoding"; request->headers_out.content_encoding->value.len = sizeof("gzip") - 1; request->headers_out.content_encoding->value.data = (u_char *) "gzip"; } ngx_http_send_header(request); // ---------- SEND THE BODY ---------- // /* Empty file */ if (numchunks == 0) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } buffer->pos = NULL; buffer->last = NULL; buffer->memory = 1; buffer->last_buf = 1; out.buf = buffer; out.next = NULL; gridfile_destroy(&gfile); gridfs_destroy(&gfs); return ngx_http_output_filter(request, &out); } cursors = (mongo_cursor **)ngx_pcalloc(request->pool, sizeof(mongo_cursor *) * numchunks); if (cursors == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_memzero( cursors, sizeof(mongo_cursor *) * numchunks); /* Hook in the cleanup function */ gridfs_cln = ngx_pool_cleanup_add(request->pool, sizeof(ngx_http_gridfs_cleanup_t)); if (gridfs_cln == NULL) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } gridfs_cln->handler = ngx_http_gridfs_cleanup; gridfs_clndata = gridfs_cln->data; gridfs_clndata->cursors = cursors; gridfs_clndata->numchunks = numchunks; /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Fetch the chunk from mongo */ do { e = FALSE; cursors[i] = gridfile_get_chunks(&gfile, i, 1); if (!(cursors[i] && mongo_cursor_next(cursors[i]) == MONGO_OK)) { e = TRUE; ecounter++; if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo connection dropped, could not reconnect"); if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); } gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_HTTP_SERVICE_UNAVAILABLE; } } } while (e); chunk = cursors[i]->current; bson_find(&it, &chunk, "data"); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); /* Set up the buffer chain */ buffer->pos = (u_char*)chunk_data; buffer->last = (u_char*)chunk_data + chunk_len; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { gridfile_destroy(&gfile); gridfs_destroy(&gfs); return NGX_ERROR; } } gridfile_destroy(&gfile); gridfs_destroy(&gfs); return rc; }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; char* value; gridfs gfs; gridfile gfile; gridfs_offset length; char* data; ngx_uint_t chunksize; ngx_uint_t numchunks; ngx_uint_t chunklength; char* contenttype; ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_buffer buf; bson_oid_t oid; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); location_name = core_conf->name; full_uri = request->uri; /* defensive */ if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Extract the value from the uri */ value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); value[full_uri.len - location_name.len] = '\0'; /* URL Decoding */ if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } /* If no mongo connection, create a default connection */ /* TODO: Find a better place for this logic */ if (!gridfs_conf->gridfs_conn->connected) { switch (mongo_connect(gridfs_conf->gridfs_conn, NULL)) { case mongo_conn_success: break; case mongo_conn_bad_arg: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Bad Arguments"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_no_socket: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: No Socket"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_fail: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Connection Failure"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_not_master: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Not Master"); return NGX_HTTP_INTERNAL_SERVER_ERROR; default: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Unknown Error"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } } /* Find the GridFile */ gridfs_init(gridfs_conf->gridfs_conn, (const char*)gridfs_conf->gridfs_db.data, (const char*)gridfs_conf->gridfs_root_collection.data, &gfs); bson_buffer_init(&buf); switch (gridfs_conf->gridfs_type) { case bson_oid: bson_oid_from_string(&oid, value); bson_append_oid(&buf, (char*)gridfs_conf->gridfs_field.data, &oid); break; case bson_int: bson_append_int(&buf, (char*)gridfs_conf->gridfs_field.data, atoi(value)); break; case bson_string: bson_append_string(&buf, (char*)gridfs_conf->gridfs_field.data, value); break; } bson_from_buffer(&query, &buf); if(!gridfs_find_query(&gfs, &query, &gfile)){ bson_destroy(&query); free(value); return NGX_HTTP_NOT_FOUND; } bson_destroy(&query); free(value); /* Get information about the file */ length = gridfile_get_contentlength(&gfile); chunksize = gridfile_get_chunksize(&gfile); numchunks = gridfile_get_numchunks(&gfile); contenttype = (char*)gridfile_get_contenttype(&gfile); /* Set the headers */ request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); ngx_http_send_header(request); /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Allocate space for the buffer of data */ data = ngx_pcalloc(request->pool, sizeof(char)*chunksize); if (data == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate buffer for data"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Set up the buffer chain */ chunklength = gridfile_read(&gfile, chunksize, data); buffer->pos = (u_char*)data; buffer->last = (u_char*)data + chunklength; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { return NGX_ERROR; } } return rc; }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; char* value; gridfs gfs; gridfile gfile; gridfs_offset length; ngx_uint_t chunksize; ngx_uint_t numchunks; char* contenttype; ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_buffer buf; bson_oid_t oid; mongo_cursor ** cursors; gridfs_offset chunk_len; const char * chunk_data; bson_iterator it; bson chunk; ngx_pool_cleanup_t* gridfs_cln; ngx_http_gridfs_cleanup_t* gridfs_clndata; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); location_name = core_conf->name; full_uri = request->uri; gridfs_cln = ngx_pool_cleanup_add(request->pool, sizeof(ngx_http_gridfs_cleanup_t)); if (gridfs_cln == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } gridfs_cln->handler = ngx_http_gridfs_cleanup; gridfs_clndata = gridfs_cln->data; /* defensive */ if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Extract the value from the uri */ value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); value[full_uri.len - location_name.len] = '\0'; /* URL Decoding */ if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } /* Find the GridFile */ gridfs_init(gridfs_conf->mongod_conn, (const char*)gridfs_conf->gridfs_db.data, (const char*)gridfs_conf->gridfs_root_collection.data, &gfs); bson_buffer_init(&buf); switch (gridfs_conf->gridfs_type) { case bson_oid: bson_oid_from_string(&oid, value); bson_append_oid(&buf, (char*)gridfs_conf->gridfs_field.data, &oid); break; case bson_int: bson_append_int(&buf, (char*)gridfs_conf->gridfs_field.data, ngx_atoi((u_char*)value, strlen(value))); break; case bson_string: bson_append_string(&buf, (char*)gridfs_conf->gridfs_field.data, value); break; } bson_from_buffer(&query, &buf); if(!gridfs_find_query(&gfs, &query, &gfile)){ bson_destroy(&query); free(value); return NGX_HTTP_NOT_FOUND; } bson_destroy(&query); free(value); /* Get information about the file */ length = gridfile_get_contentlength(&gfile); chunksize = gridfile_get_chunksize(&gfile); numchunks = gridfile_get_numchunks(&gfile); contenttype = (char*)gridfile_get_contenttype(&gfile); /* Set the headers */ request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); /* Determine if content is gzipped, set headers accordingly */ if ( gridfile_get_boolean(&gfile,"gzipped") ) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, gridfile_get_field(&gfile,"gzipped") ); request->headers_out.content_encoding = ngx_list_push(&request->headers_out.headers); if (request->headers_out.content_encoding == NULL) { return NGX_ERROR; } request->headers_out.content_encoding->hash = 1; request->headers_out.content_encoding->key.len = sizeof("Content-Encoding") - 1; request->headers_out.content_encoding->key.data = (u_char *) "Content-Encoding"; request->headers_out.content_encoding->value.len = sizeof("gzip") - 1; request->headers_out.content_encoding->value.data = (u_char *) "gzip"; } ngx_http_send_header(request); if (numchunks == 0) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } buffer->pos = NULL; buffer->last = NULL; buffer->memory = 1; buffer->last_buf = 1; out.buf = buffer; out.next = NULL; return ngx_http_output_filter(request, &out); } cursors = (mongo_cursor **)ngx_pcalloc(request->pool, sizeof(mongo_cursor *) * numchunks); /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Fetch the chunk from mongo */ cursors[i] = gridfile_get_chunks(&gfile, i, 1); mongo_cursor_next(cursors[i]); chunk = cursors[i]->current; bson_find(&it, &chunk, "data"); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); /* Set up the buffer chain */ buffer->pos = (u_char*)chunk_data; buffer->last = (u_char*)chunk_data + chunk_len; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { return NGX_ERROR; } } gridfs_clndata->cursors = cursors; gridfs_clndata->numchunks = numchunks; return rc; }