int mongo_cmd_add_user(mongo_connection* conn, const char* db, const char* user, const char* pass){ bson_buffer bb; bson user_obj; bson pass_obj; char hex_digest[33]; char* ns = bson_malloc(strlen(db) + strlen(".system.users") + 1); int res; strcpy(ns, db); strcpy(ns+strlen(db), ".system.users"); mongo_pass_digest(user, pass, hex_digest); bson_buffer_init(&bb); bson_append_string(&bb, "user", user); bson_from_buffer(&user_obj, &bb); bson_buffer_init(&bb); bson_append_start_object(&bb, "$set"); bson_append_string(&bb, "pwd", hex_digest); bson_append_finish_object(&bb); bson_from_buffer(&pass_obj, &bb); res = mongo_update(conn, ns, &user_obj, &pass_obj, MONGO_UPDATE_UPSERT); free(ns); bson_destroy(&user_obj); bson_destroy(&pass_obj); return res; }
int gridfs_find_query(gridfs* gfs, bson* query, gridfile* gfile ) { bson_buffer date_buffer; bson uploadDate; bson_buffer buf; bson finalQuery; bson out; int i; bson_buffer_init(&date_buffer); bson_append_int(&date_buffer, "uploadDate", -1); bson_from_buffer(&uploadDate, &date_buffer); bson_buffer_init(&buf); bson_append_bson(&buf, "query", query); bson_append_bson(&buf, "orderby", &uploadDate); bson_from_buffer(&finalQuery, &buf); i = (mongo_find_one(gfs->client, gfs->files_ns, &finalQuery, NULL, &out)); bson_destroy(&uploadDate); bson_destroy(&finalQuery); if (!i) return FALSE; else { gridfile_init(gfs, &out, gfile); bson_destroy(&out); return TRUE; } }
mongo_cursor* gridfile_get_chunks(gridfile* gfile, int start, int size) { bson_iterator it; bson_oid_t id; bson_buffer gte_buf; bson gte_bson; bson_buffer query_buf; bson query_bson; bson_buffer orderby_buf; bson orderby_bson; bson_buffer command_buf; bson command_bson; bson_type type; char *id_str; int id_int; type = bson_find(&it, gfile->meta, "_id"); if( type == bson_oid ) { id = *bson_iterator_oid(&it); bson_buffer_init(&query_buf); bson_append_oid(&query_buf, "files_id", &id); } else if (type == bson_string) { id_str = bson_iterator_string(&it); bson_buffer_init(&query_buf); bson_append_string(&query_buf, "files_id", id_str); } else if (type == bson_string) { id_int = bson_iterator_int(&it); bson_buffer_init(&query_buf); bson_append_int(&query_buf, "files_id", id_int); } else return NULL; if (size == 1) { bson_append_int(&query_buf, "n", start); } else { bson_buffer_init(>e_buf); bson_append_int(>e_buf, "$gte", start); bson_from_buffer(>e_bson, >e_buf); bson_append_bson(&query_buf, "n", >e_bson); } bson_from_buffer(&query_bson, &query_buf); bson_buffer_init(&orderby_buf); bson_append_int(&orderby_buf, "n", 1); bson_from_buffer(&orderby_bson, &orderby_buf); bson_buffer_init(&command_buf); bson_append_bson(&command_buf, "query", &query_bson); bson_append_bson(&command_buf, "orderby", &orderby_bson); bson_from_buffer(&command_bson, &command_buf); return mongo_find(gfile->gfs->client, gfile->gfs->chunks_ns, &command_bson, NULL, size, 0, 0); }
/* Create index on smsc and ts fields, as these are used for retrieving the DLR */ static void dlr_mongodb_ensure_index(void) { DBPoolConn *pconn; mongo_connection *conn = NULL; bson_buffer bb; bson key; pconn = dbpool_conn_consume(pool); if (pconn == NULL) { return; } conn = (mongo_connection*)pconn->conn; bson_buffer_init(&bb); bson_append_int(&bb, octstr_get_cstr(fields->field_smsc), 1); bson_append_int(&bb, octstr_get_cstr(fields->field_ts), 1); bson_from_buffer(&key, &bb); MONGO_TRY { mongo_create_index(conn, mongodb_namespace, &key, 0, NULL); } MONGO_CATCH { mongodb_error("dlr_mongodb_ensure_index", conn->exception.type); } dbpool_conn_produce(pconn); bson_destroy(&key); }
static void make_large(bson * out, int i){ int num; char numstr[4]; bson_buffer bb; bson_buffer_init(&bb); bson_append_new_oid(&bb, "_id"); bson_append_int(&bb, "x", i); bson_append_string(&bb, "base_url", "http://www.example.com/test-me"); bson_append_int(&bb, "total_word_count", 6743); bson_append_int(&bb, "access_time", 999); /*TODO use date*/ bson_append_start_object(&bb, "meta_tags"); bson_append_string(&bb, "description", "i am a long description string"); bson_append_string(&bb, "author", "Holly Man"); bson_append_string(&bb, "dynamically_created_meta_tag", "who know\n what"); bson_append_finish_object(&bb); bson_append_start_object(&bb, "page_structure"); bson_append_int(&bb, "counted_tags", 3450); bson_append_int(&bb, "no_of_js_attached", 10); bson_append_int(&bb, "no_of_images", 6); bson_append_finish_object(&bb); bson_append_start_array(&bb, "harvested_words"); for (num=0; num < 14*20; num++){ bson_numstr(numstr, num); bson_append_string(&bb, numstr, words[num%14]); } bson_append_finish_object(&bb); bson_from_buffer(out, &bb); }
/* Remove DLR */ static void dlr_mongodb_remove(const Octstr *smsc, const Octstr *ts, const Octstr *dst) { DBPoolConn *pconn; bson cond; bson_buffer cond_buf; mongo_connection *conn = NULL; pconn = dbpool_conn_consume(pool); if (pconn == NULL) { return; } conn = (mongo_connection*)pconn->conn; bson_buffer_init(&cond_buf); bson_append_string(&cond_buf, octstr_get_cstr(fields->field_smsc), octstr_get_cstr(smsc)); bson_append_string(&cond_buf, octstr_get_cstr(fields->field_ts), octstr_get_cstr(ts)); if (dst) { bson_append_string(&cond_buf, octstr_get_cstr(fields->field_dst), octstr_get_cstr(dst)); } bson_from_buffer(&cond, &cond_buf); MONGO_TRY { mongo_remove(conn, mongodb_namespace, &cond); } MONGO_CATCH { mongodb_error("dlr_mongodb_remove", conn->exception.type); } dbpool_conn_produce(pconn); bson_destroy(&cond); }
/* Saves accounting information */ static int mongo_account(void *instance, REQUEST *request) { rlm_mongo_t *data = (rlm_mongo_t *)instance; bson b; bson_buffer buf; const char *attr; char value[MAX_STRING_LEN+1]; VALUE_PAIR *vp = request->packet->vps; // shall we insert this packet or not int insert; bson_buffer_init(&buf); bson_append_new_oid(&buf, "_id"); insert = 0; while (vp) { attr = vp->name; if ((strcmp(attr, "Acct-Status-Type") == 0) && ((strcmp(data->only_stop, "") != 0))) { if ((vp->vp_integer & 0xffffff) != 2) { break; } else { insert = 1; } } switch (vp->type) { case PW_TYPE_INTEGER: bson_append_int(&buf, attr, vp->vp_integer & 0xffffff); break; case PW_TYPE_BYTE: case PW_TYPE_SHORT: bson_append_int(&buf, attr, vp->vp_integer); break; case PW_TYPE_DATE: bson_append_time_t(&buf, attr, vp->vp_date); break; default: vp_prints_value(value, sizeof(value), vp, 0); bson_append_string(&buf, attr, value); // akh RDEBUG("mongo default insert %s", value); break; } vp = vp->next; } bson_from_buffer(&b, &buf); MONGO_TRY { if (insert == 1) { mongo_insert(conn, data->acct_base, &b); RDEBUG("accounting record was inserted"); } } MONGO_CATCH { radlog(L_ERR, "mongo_insert failed"); return RLM_MODULE_FAIL; } bson_destroy(&b); return RLM_MODULE_OK; }
static void make_small(bson * out, int i){ bson_buffer bb; bson_buffer_init(&bb); bson_append_new_oid(&bb, "_id"); bson_append_int(&bb, "x", i); bson_from_buffer(out, &bb); }
static void find_range(const char* ns){ int i; bson b; for (i=0; i < PER_TRIAL; i++){ int j=0; mongo_cursor * cursor; bson_buffer bb; bson_buffer_init(&bb); bson_append_start_object(&bb, "x"); bson_append_int(&bb, "$gt", PER_TRIAL/2); bson_append_int(&bb, "$lt", PER_TRIAL/2 + BATCH_SIZE); bson_append_finish_object(&bb); bson_from_buffer(&b, &bb); cursor = mongo_find(conn, ns, &b, NULL, 0,0,0); ASSERT(cursor); while(mongo_cursor_next(cursor)) { j++; } ASSERT(j == BATCH_SIZE-1); mongo_cursor_destroy(cursor); bson_destroy(&b); } }
int mongo_simple_str_command(mongo_connection * conn, const char * db, const char* cmdstr, const char* arg, bson * realout) { bson out; bson cmd; bson_buffer bb; int success = 0; bson_buffer_init(&bb); bson_append_string(&bb, cmdstr, arg); bson_from_buffer(&cmd, &bb); if( mongo_run_command(conn, db, &cmd, &out) == MONGO_OK ) { bson_iterator it; if(bson_find(&it, &out, "ok")) success = bson_iterator_bool(&it); } bson_destroy(&cmd); if (realout) *realout = out; else bson_destroy(&out); if(success) return MONGO_OK; else return MONGO_ERROR; }
int64_t mongo_count(mongo_connection* conn, const char* db, const char* ns, bson* query){ bson_buffer bb; bson cmd; bson out; int64_t count = -1; bson_buffer_init(&bb); bson_append_string(&bb, "count", ns); if (query && bson_size(query) > 5) /* not empty */ bson_append_bson(&bb, "query", query); bson_from_buffer(&cmd, &bb); MONGO_TRY{ if(mongo_run_command(conn, db, &cmd, &out)){ bson_iterator it; if(bson_find(&it, &out, "n")) count = bson_iterator_long(&it); } }MONGO_CATCH{ bson_destroy(&cmd); MONGO_RETHROW(); } bson_destroy(&cmd); bson_destroy(&out); return count; }
static void get(ngx_str_t *db, ngx_str_t *collection, char* id, ngx_buf_t *b){ bson_buffer bb; bson obj; bson cond; char result[1000] = ""; char ns[1000]; //get the query bson_buffer_init(&bb); bson_append_string(&bb, "_id", id); bson_from_buffer(&cond, &bb); sprintf(ns, "%s.%s", db->data, collection->data); if(!mongo_find_one(cached_connection, ns, &cond, 0, &obj)){ strcpy(result, "{'error':'record not found'}"); } else { to_json(result, obj.data, 10); } b->pos = (u_char*)result; /* address of the first position of the data */ b->last = (u_char*)result + strlen(result); /* address of the last position of the data */ // destroy cond bb and other stuff bson_destroy(&cond); bson_destroy(&obj); }
/* Update DLR */ static void dlr_mongodb_update(const Octstr *smsc, const Octstr *ts, const Octstr *dst, int status) { DBPoolConn *pconn; bson cond, op; bson_buffer cond_buf, op_buf; mongo_connection *conn = NULL; pconn = dbpool_conn_consume(pool); if (pconn == NULL) { return; } conn = (mongo_connection*)pconn->conn; bson_buffer_init(&cond_buf); bson_append_string(&cond_buf, octstr_get_cstr(fields->field_smsc), octstr_get_cstr(smsc)); bson_append_string(&cond_buf, octstr_get_cstr(fields->field_ts), octstr_get_cstr(ts)); if (dst) { bson_append_string(&cond_buf, octstr_get_cstr(fields->field_dst), octstr_get_cstr(dst)); } bson_from_buffer(&cond, &cond_buf); bson_buffer_init(&op_buf); { bson_buffer *sub = bson_append_start_object(&op_buf, "$set"); bson_append_int(sub, octstr_get_cstr(fields->field_status), status); bson_append_finish_object(sub); } bson_from_buffer(&op, &op_buf); MONGO_TRY { mongo_update(conn, mongodb_namespace, &cond, &op, 0); } MONGO_CATCH { mongodb_error("dlr_mongodb_update", conn->exception.type); } dbpool_conn_produce(pconn); bson_destroy(&cond); bson_destroy(&op); }
bson_bool_t mongo_create_simple_index(mongo_connection * conn, const char * ns, const char* field, int options, bson * out){ bson_buffer bb; bson b; bson_bool_t success; bson_buffer_init(&bb); bson_append_int(&bb, field, 1); bson_from_buffer(&b, &bb); success = mongo_create_index(conn, ns, &b, options, out); bson_destroy(&b); return success; }
void gridfs_remove_filename(gridfs* gfs, const char* filename ) { bson query; bson_buffer buf; mongo_cursor* files; bson file; bson_iterator it; bson_oid_t id; bson b; bson_buffer_init(&buf); bson_append_string(&buf, "filename", filename); bson_from_buffer(&query, &buf); files = mongo_find(gfs->client, gfs->files_ns, &query, NULL, 0, 0, 0); bson_destroy(&query); /* Remove each file and it's chunks from files named filename */ while (mongo_cursor_next(files)) { file = files->current; bson_find(&it, &file, "_id"); id = *bson_iterator_oid(&it); /* Remove the file with the specified id */ bson_buffer_init(&buf); bson_append_oid(&buf, "_id", &id); bson_from_buffer(&b, &buf); mongo_remove( gfs->client, gfs->files_ns, &b); bson_destroy(&b); /* Remove all chunks from the file with the specified id */ bson_buffer_init(&buf); bson_append_oid(&buf, "files_id", &id); bson_from_buffer(&b, &buf); mongo_remove( gfs->client, gfs->chunks_ns, &b); bson_destroy(&b); } }
static bson gridfs_insert_file( gridfs* gfs, const char* name, const bson_oid_t id, gridfs_offset length, const char* contenttype) { bson command; bson res; bson ret; bson_buffer buf; bson_iterator it; /* Check run md5 */ bson_buffer_init(&buf); bson_append_oid(&buf, "filemd5", &id); bson_append_string(&buf, "root", gfs->prefix); bson_from_buffer(&command, &buf); assert(mongo_run_command(gfs->client, gfs->dbname, &command, &res)); bson_destroy(&command); /* Create and insert BSON for file metadata */ bson_buffer_init(&buf); bson_append_oid(&buf, "_id", &id); if (name != NULL && *name != '\0') { bson_append_string(&buf, "filename", name); } bson_append_int(&buf, "length", length); bson_append_int(&buf, "chunkSize", DEFAULT_CHUNK_SIZE); bson_append_date(&buf, "uploadDate", (bson_date_t)1000*time(NULL)); bson_find(&it, &res, "md5"); bson_append_string(&buf, "md5", bson_iterator_string(&it)); bson_destroy(&res); if (contenttype != NULL && *contenttype != '\0') { bson_append_string(&buf, "contentType", contenttype); } bson_from_buffer(&ret, &buf); mongo_insert(gfs->client, gfs->files_ns, &ret); return ret; }
bson_bool_t mongo_cmd_authenticate(mongo_connection* conn, const char* db, const char* user, const char* pass){ bson_buffer bb; bson from_db, auth_cmd; const char* nonce; bson_bool_t success = 0; mongo_md5_state_t st; mongo_md5_byte_t digest[16]; char hex_digest[33]; if( mongo_simple_int_command(conn, db, "getnonce", 1, &from_db) == MONGO_OK ) { bson_iterator it; bson_find(&it, &from_db, "nonce"); nonce = bson_iterator_string(&it); } else { return MONGO_ERROR; } mongo_pass_digest(user, pass, hex_digest); mongo_md5_init(&st); mongo_md5_append(&st, (const mongo_md5_byte_t*)nonce, strlen(nonce)); mongo_md5_append(&st, (const mongo_md5_byte_t*)user, strlen(user)); mongo_md5_append(&st, (const mongo_md5_byte_t*)hex_digest, 32); mongo_md5_finish(&st, digest); digest2hex(digest, hex_digest); bson_buffer_init(&bb); bson_append_int(&bb, "authenticate", 1); bson_append_string(&bb, "user", user); bson_append_string(&bb, "nonce", nonce); bson_append_string(&bb, "key", hex_digest); bson_from_buffer(&auth_cmd, &bb); bson_destroy(&from_db); if( mongo_run_command(conn, db, &auth_cmd, &from_db) == MONGO_OK ) { bson_iterator it; if(bson_find(&it, &from_db, "ok")) success = bson_iterator_bool(&it); } bson_destroy(&from_db); bson_destroy(&auth_cmd); if( success ) return MONGO_OK; else return MONGO_ERROR; }
int gridfs_find_filename(gridfs* gfs, const char* filename, gridfile* gfile) { bson query; bson_buffer buf; int i; bson_buffer_init(&buf); bson_append_string(&buf, "filename", filename); bson_from_buffer(&query, &buf) ; i = gridfs_find_query(gfs, &query, gfile); bson_destroy(&query); return i; }
static apr_status_t fetch_mongodb_value(const char *host, int port, const char *userfield, const char *passwordfield, const char *collection, const char *user, char **value, apr_pool_t *pool) { mongo_connection conn; /* ptr */ mongo_connection_options *opts; mongo_conn_return mongo_status; bson query[1]; bson *out; bson_buffer query_buf[1]; bson_bool_t found; mongo_cursor *cursor; *value = NULL; //conn = apr_palloc( pool, sizeof(mongo_connection)); opts = apr_palloc( pool, sizeof(mongo_connection_options)); strcpy( opts->host, host); opts->port = port; mongo_status = mongo_connect( pool, &conn, opts ); if ( mongo_status != mongo_conn_success) { char buf[120]; ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool,"couldn't connect to mongoDB - (%s)", mongo_strerror( mongo_status, buf,sizeof(buf) )); return APR_EGENERAL; } bson_buffer_init( pool, query_buf ); bson_append_string( query_buf, userfield, user); bson_from_buffer( query, query_buf ); out = apr_palloc(pool, sizeof(bson)); found = mongo_find_one( &conn, collection, query, NULL, out ); bson_destroy( query ); if ( found ) { bson_iterator it; if (bson_find( &it, out, passwordfield )) { // bson_iterator iCookies; // bson_iterator_init( &iCookies , bson_iterator_value(&it)); *value = apr_pstrdup( pool,bson_iterator_string(&it)); } } mongo_destroy( &conn ); return APR_SUCCESS; }
static void make_medium(bson * out, int i){ bson_buffer bb; bson_buffer_init(&bb); bson_append_new_oid(&bb, "_id"); bson_append_int(&bb, "x", i); bson_append_int(&bb, "integer", 5); bson_append_double(&bb, "number", 5.05); bson_append_bool(&bb, "boolean", 0); bson_append_start_array(&bb, "array"); bson_append_string(&bb, "0", "test"); bson_append_string(&bb, "1", "benchmark"); bson_append_finish_object(&bb); bson_from_buffer(out, &bb); }
static bson * chunk_new(bson_oid_t id, int chunkNumber, const char * data, int len) { bson * b; bson_buffer buf; b = (bson *)malloc(sizeof(bson)); if (b == NULL) return NULL; bson_buffer_init(&buf); bson_append_oid(&buf, "files_id", &id); bson_append_int(&buf, "n", chunkNumber); bson_append_binary(&buf, "data", 2, data, len); bson_from_buffer(b, &buf); return b; }
int find_radius_options(rlm_mongo_t *data, char *username, char *mac, char *password) { bson_buffer bb; bson query; bson field; bson result; bson_buffer_init(&bb); bson_append_string(&bb, data->search_field, username); if (strcmp(data->mac_field, "") != 0) { bson_append_string(&bb, data->mac_field, mac); } if (strcmp(data->enable_field, "") != 0) { bson_append_bool(&bb, data->enable_field, 1); } bson_from_buffer(&query, &bb); bson_buffer_destroy(&bb); bson_empty(&field); bson_empty(&result); MONGO_TRY{ if (mongo_find_one(conn, data->base, &query, &field, &result) == 0) { return 0; } }MONGO_CATCH{ mongo_start(data); return 0; } bson_iterator it; bson_iterator_init(&it, result.data); find_in_array(&it, data->username_field, username, data->password_field, password); bson_destroy(&result); return 1; }
/* Add a new DLR entry to MongoDB */ static void dlr_mongodb_add(struct dlr_entry *entry) { DBPoolConn *pconn; bson b; bson_buffer buf; mongo_connection *conn = NULL; pconn = dbpool_conn_consume(pool); if (pconn == NULL) { dlr_entry_destroy(entry); return; } conn = (mongo_connection*)pconn->conn; bson_buffer_init(&buf); bson_append_new_oid(&buf, "_id"); bson_append_string(&buf, octstr_get_cstr(fields->field_smsc), octstr_get_cstr(entry->smsc)); bson_append_string(&buf, octstr_get_cstr(fields->field_ts), octstr_get_cstr(entry->timestamp)); bson_append_string(&buf, octstr_get_cstr(fields->field_src), octstr_get_cstr(entry->source)); bson_append_string(&buf, octstr_get_cstr(fields->field_dst), octstr_get_cstr(entry->destination)); bson_append_string(&buf, octstr_get_cstr(fields->field_serv), octstr_get_cstr(entry->service)); bson_append_string(&buf, octstr_get_cstr(fields->field_url), octstr_get_cstr(entry->url)); bson_append_string(&buf, octstr_get_cstr(fields->field_account), octstr_get_cstr(entry->account)); bson_append_string(&buf, octstr_get_cstr(fields->field_binfo), octstr_get_cstr(entry->binfo)); bson_append_int(&buf, octstr_get_cstr(fields->field_mask), entry->mask); bson_append_string(&buf, octstr_get_cstr(fields->field_boxc), octstr_get_cstr(entry->boxc_id)); bson_append_int(&buf, octstr_get_cstr(fields->field_status), 0); bson_from_buffer(&b, &buf); /* TODO: namespace support */ MONGO_TRY { mongo_insert(conn, mongodb_namespace, &b); } MONGO_CATCH { mongodb_error("dlr_mongodb_insert", conn->exception.type); } dbpool_conn_produce(pconn); bson_destroy(&b); dlr_entry_destroy(entry); }
bson gridfile_get_chunk(gridfile* gfile, int n) { bson query; bson out; bson_buffer buf; bson_iterator it; bson_oid_t id; bson_buffer_init(&buf); bson_find(&it, gfile->meta, "_id"); id = *bson_iterator_oid(&it); bson_append_oid(&buf, "files_id", &id); bson_append_int(&buf, "n", n); bson_from_buffer(&query, &buf); assert(mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, &out)); return out; }
int mongo_create_index(mongo_connection * conn, const char * ns, bson * key, int options, bson * out){ bson_buffer bb; bson b; bson_iterator it; char name[255] = {'_'}; int i = 1; char idxns[1024]; bson_iterator_init(&it, key->data); while(i < 255 && bson_iterator_next(&it)){ strncpy(name + i, bson_iterator_key(&it), 255 - i); i += strlen(bson_iterator_key(&it)); } name[254] = '\0'; bson_buffer_init(&bb); bson_append_bson(&bb, "key", key); bson_append_string(&bb, "ns", ns); bson_append_string(&bb, "name", name); if (options & MONGO_INDEX_UNIQUE) bson_append_bool(&bb, "unique", 1); if (options & MONGO_INDEX_DROP_DUPS) bson_append_bool(&bb, "dropDups", 1); if (options & MONGO_INDEX_BACKGROUND) bson_append_bool(&bb, "background", 1); if (options & MONGO_INDEX_SPARSE) bson_append_bool(&bb, "sparse", 1); bson_from_buffer(&b, &bb); strncpy(idxns, ns, 1024-16); strcpy(strchr(idxns, '.'), ".system.indexes"); mongo_insert(conn, idxns, &b); bson_destroy(&b); *strchr(idxns, '.') = '\0'; /* just db not ns */ return mongo_cmd_get_last_error(conn, idxns, out); }
int main(){ mongo_connection conn[1]; bson_buffer bb; bson obj; bson cond; int i; bson_oid_t oid; const char* col = "c.update_test"; const char* ns = "test.c.update_test"; INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn , TEST_SERVER, 27017 )){ printf("failed to connect\n"); exit(1); } /* if the collection doesn't exist dropping it will fail */ if ( mongo_cmd_drop_collection(conn, "test", col, NULL) == MONGO_OK && mongo_find_one(conn, ns, bson_empty(&obj), bson_empty(&obj), NULL) != MONGO_OK ){ printf("failed to drop collection\n"); exit(1); } bson_oid_gen(&oid); { /* insert */ bson_buffer_init(&bb); bson_append_oid(&bb, "_id", &oid); bson_append_int(&bb, "a", 3 ); bson_from_buffer(&obj, &bb); mongo_insert(conn, ns, &obj); bson_destroy(&obj); } { /* insert */ bson op; bson_buffer_init(&bb); bson_append_oid(&bb, "_id", &oid); bson_from_buffer(&cond, &bb); bson_buffer_init(&bb); { bson_append_start_object(&bb, "$inc"); bson_append_int(&bb, "a", 2 ); bson_append_finish_object(&bb); } { bson_append_start_object(&bb, "$set"); bson_append_double(&bb, "b", -1.5 ); bson_append_finish_object(&bb); } bson_from_buffer(&op, &bb); for (i=0; i<5; i++) mongo_update(conn, ns, &cond, &op, 0); /* cond is used later */ bson_destroy(&op); } if( mongo_find_one(conn, ns, &cond, 0, &obj) != MONGO_OK ){ printf("Failed to find object\n"); exit(1); } else { int fields = 0; bson_iterator it; bson_iterator_init(&it, obj.data); bson_destroy(&cond); while(bson_iterator_next(&it)){ switch(bson_iterator_key(&it)[0]){ case '_': /* id */ ASSERT(bson_iterator_type(&it) == BSON_OID); ASSERT(!memcmp(bson_iterator_oid(&it)->bytes, oid.bytes, 12)); fields++; break; case 'a': ASSERT(bson_iterator_type(&it) == BSON_INT); ASSERT(bson_iterator_int(&it) == 3 + 5*2); fields++; break; case 'b': ASSERT(bson_iterator_type(&it) == BSON_DOUBLE); ASSERT(bson_iterator_double(&it) == -1.5); fields++; break; } } ASSERT(fields == 3); } bson_destroy(&obj); mongo_cmd_drop_db(conn, "test"); mongo_destroy(conn); return 0; }
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) { ngx_http_gridfs_loc_conf_t* gridfs_conf; ngx_http_core_loc_conf_t* core_conf; ngx_buf_t* buffer; ngx_chain_t out; ngx_str_t location_name; ngx_str_t full_uri; char* value; gridfs gfs; gridfile gfile; gridfs_offset length; char* data; ngx_uint_t chunksize; ngx_uint_t numchunks; ngx_uint_t chunklength; char* contenttype; ngx_uint_t i; ngx_int_t rc = NGX_OK; bson query; bson_buffer buf; bson_oid_t oid; gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module); core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module); location_name = core_conf->name; full_uri = request->uri; /* defensive */ if (full_uri.len < location_name.len) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Invalid location name or uri."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Extract the value from the uri */ value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1)); if (value == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate memory for value buffer."); return NGX_HTTP_INTERNAL_SERVER_ERROR; } memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len); value[full_uri.len - location_name.len] = '\0'; /* URL Decoding */ if (!url_decode(value)) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Malformed request."); free(value); return NGX_HTTP_BAD_REQUEST; } /* If no mongo connection, create a default connection */ /* TODO: Find a better place for this logic */ if (!gridfs_conf->gridfs_conn->connected) { switch (mongo_connect(gridfs_conf->gridfs_conn, NULL)) { case mongo_conn_success: break; case mongo_conn_bad_arg: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Bad Arguments"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_no_socket: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: No Socket"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_fail: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Connection Failure"); return NGX_HTTP_INTERNAL_SERVER_ERROR; case mongo_conn_not_master: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Not Master"); return NGX_HTTP_INTERNAL_SERVER_ERROR; default: ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Mongo Exception: Unknown Error"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } } /* Find the GridFile */ gridfs_init(gridfs_conf->gridfs_conn, (const char*)gridfs_conf->gridfs_db.data, (const char*)gridfs_conf->gridfs_root_collection.data, &gfs); bson_buffer_init(&buf); switch (gridfs_conf->gridfs_type) { case bson_oid: bson_oid_from_string(&oid, value); bson_append_oid(&buf, (char*)gridfs_conf->gridfs_field.data, &oid); break; case bson_int: bson_append_int(&buf, (char*)gridfs_conf->gridfs_field.data, atoi(value)); break; case bson_string: bson_append_string(&buf, (char*)gridfs_conf->gridfs_field.data, value); break; } bson_from_buffer(&query, &buf); if(!gridfs_find_query(&gfs, &query, &gfile)){ bson_destroy(&query); free(value); return NGX_HTTP_NOT_FOUND; } bson_destroy(&query); free(value); /* Get information about the file */ length = gridfile_get_contentlength(&gfile); chunksize = gridfile_get_chunksize(&gfile); numchunks = gridfile_get_numchunks(&gfile); contenttype = (char*)gridfile_get_contenttype(&gfile); /* Set the headers */ request->headers_out.status = NGX_HTTP_OK; request->headers_out.content_length_n = length; if (contenttype != NULL) { request->headers_out.content_type.len = strlen(contenttype); request->headers_out.content_type.data = (u_char*)contenttype; } else ngx_http_set_content_type(request); ngx_http_send_header(request); /* Read and serve chunk by chunk */ for (i = 0; i < numchunks; i++) { /* Allocate space for the response buffer */ buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t)); if (buffer == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate response buffer"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Allocate space for the buffer of data */ data = ngx_pcalloc(request->pool, sizeof(char)*chunksize); if (data == NULL) { ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "Failed to allocate buffer for data"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } /* Set up the buffer chain */ chunklength = gridfile_read(&gfile, chunksize, data); buffer->pos = (u_char*)data; buffer->last = (u_char*)data + chunklength; buffer->memory = 1; buffer->last_buf = (i == numchunks-1); out.buf = buffer; out.next = NULL; /* Serve the Chunk */ rc = ngx_http_output_filter(request, &out); /* TODO: More Codes to Catch? */ if (rc == NGX_ERROR) { return NGX_ERROR; } } return rc; }
int gridfs_init(mongo_connection * client, const char * dbname, const char * prefix, gridfs* gfs) { int options; bson_buffer bb; bson b; bson out; bson_bool_t success; gfs->client = client; /* Allocate space to own the dbname */ gfs->dbname = (const char *)malloc(strlen(dbname)+1); if (gfs->dbname == NULL) { return FALSE; } strcpy((char*)gfs->dbname, dbname); /* Allocate space to own the prefix */ if (prefix == NULL) prefix = "fs"; gfs->prefix = (const char *)malloc(strlen(prefix)+1); if (gfs->prefix == NULL) { free((char*)gfs->dbname); return FALSE; } strcpy((char *)gfs->prefix, prefix); /* Allocate space to own files_ns */ gfs->files_ns = (const char *) malloc (strlen(prefix)+strlen(dbname)+strlen(".files")+2); if (gfs->files_ns == NULL) { free((char*)gfs->dbname); free((char*)gfs->prefix); return FALSE; } strcpy((char*)gfs->files_ns, dbname); strcat((char*)gfs->files_ns, "."); strcat((char*)gfs->files_ns, prefix); strcat((char*)gfs->files_ns, ".files"); /* Allocate space to own chunks_ns */ gfs->chunks_ns = (const char *) malloc(strlen(prefix) + strlen(dbname) + strlen(".chunks") + 2); if (gfs->chunks_ns == NULL) { free((char*)gfs->dbname); free((char*)gfs->prefix); free((char*)gfs->files_ns); return FALSE; } strcpy((char*)gfs->chunks_ns, dbname); strcat((char*)gfs->chunks_ns, "."); strcat((char*)gfs->chunks_ns, prefix); strcat((char*)gfs->chunks_ns, ".chunks"); bson_buffer_init(&bb); bson_append_int(&bb, "filename", 1); bson_from_buffer(&b, &bb); options = 0; success = mongo_create_index(gfs->client, gfs->files_ns, &b, options, &out); bson_destroy(&b); if (!success) { free((char*)gfs->dbname); free((char*)gfs->prefix); free((char*)gfs->files_ns); free((char*)gfs->chunks_ns); return FALSE; } bson_buffer_init(&bb); bson_append_int(&bb, "files_id", 1); bson_append_int(&bb, "n", 1); bson_from_buffer(&b, &bb); options = MONGO_INDEX_UNIQUE; success = mongo_create_index(gfs->client, gfs->chunks_ns, &b, options, &out); bson_destroy(&b); if (!success) { free((char*)gfs->dbname); free((char*)gfs->prefix); free((char*)gfs->files_ns); free((char*)gfs->chunks_ns); return FALSE; } return TRUE; }
/** * Build perfSONAR data block. */ bson *nlcali_psdata(T self, const char *event, const char *m_id, int32_t sample_num) { struct timeval now; bson_buffer bb; bson *bp = NULL; assert(self && event && m_id); gettimeofday(&now, NULL); if (self->dirty) { nlcali_calc(self); } bson_buffer_init(&bb); bson_ensure_space(&bb, LOG_BUFSZ); bson_append_string(&bb, "mid", m_id); bson_append_start_array(&bb, "data"); bson_append_double(&bb, "ts", now.tv_sec + now.tv_usec/1e6); bson_append_int(&bb, "_sample", sample_num); bson_append_double(&bb, "sum_v", self->vsm.sum); bson_append_double(&bb, "min_v", self->vsm.min); bson_append_double(&bb, "max_v", self->vsm.max); bson_append_double(&bb, "mean_v", self->vsm.mean); bson_append_double(&bb, "sd_v", self->vsm.sd); bson_append_double(&bb, "sum_r", self->rsm.sum); bson_append_double(&bb, "min_r", self->rsm.min); bson_append_double(&bb, "max_r", self->rsm.max); bson_append_double(&bb, "sd_r", self->rsm.sd); bson_append_double(&bb, "sum_g", self->gsm.sum); bson_append_double(&bb, "min_g", self->gsm.min); bson_append_double(&bb, "max_g", self->gsm.max); bson_append_double(&bb, "sd_g", self->gsm.sd); bson_append_int(&bb, "count", self->vsm.count); bson_append_double(&bb, "dur", self->dur); bson_append_double(&bb, "dur_inst", self->dur_sum); /* add histogram data, if being recorded */ if (NL_HIST_HAS_DATA(self)) { int i; char idx[16]; /* rate hist */ bson_append_double(&bb, "h_rm", self->h_rmin); bson_append_double(&bb, "h_rw", self->h_rwidth); bson_append_start_array(&bb, "h_rd"); for (i=0; i < self->h_num; i++) { sprintf(idx, "%d", i); bson_append_int(&bb, idx, self->h_rdata[i]); } bson_append_finish_object(&bb); /* gap hist */ bson_append_double(&bb, "h_gm", self->h_gmin); bson_append_double(&bb, "h_gw", self->h_gwidth); bson_append_start_array(&bb, "h_gd"); for (i=0; i < self->h_num; i++) { sprintf(idx, "%d", i); bson_append_int(&bb, idx, self->h_gdata[i]); } bson_append_finish_object(&bb); } bson_append_finish_object(&bb); bp = malloc(sizeof(bson)); bson_from_buffer(bp, &bb); return(bp); error: if (bp) { bson_destroy(bp); free(bp); } bson_buffer_destroy(&bb); return(NULL); }
int main(){ mongo_connection conn[1]; mongo_connection_options opts; bson_buffer bb; bson b; mongo_cursor * cursor; int i; char hex_oid[25]; const char * col = "c.simple"; const char * ns = "test.c.simple"; INIT_SOCKETS_FOR_WINDOWS; strncpy(opts.host, TEST_SERVER, 255); opts.host[254] = '\0'; opts.port = 27017; if (mongo_connect( conn , &opts )){ printf("failed to connect\n"); exit(1); } /* if the collection doesn't exist dropping it will fail */ if (!mongo_cmd_drop_collection(conn, "test", col, NULL) && mongo_find_one(conn, ns, bson_empty(&b), bson_empty(&b), NULL)){ printf("failed to drop collection\n"); exit(1); } for(i=0; i< 5; i++){ bson_buffer_init( & bb ); bson_append_new_oid( &bb, "_id" ); bson_append_double( &bb , "a" , 17 ); bson_append_int( &bb , "b" , 17 ); bson_append_string( &bb , "c" , "17" ); { bson_buffer * sub = bson_append_start_object( &bb , "d" ); bson_append_int( sub, "i", 71 ); bson_append_finish_object(sub); } { bson_buffer * arr = bson_append_start_array( &bb , "e" ); bson_append_int( arr, "0", 71 ); bson_append_string( arr, "1", "71" ); bson_append_finish_object(arr); } bson_from_buffer(&b, &bb); mongo_insert( conn , ns , &b ); bson_destroy(&b); } cursor = mongo_find( conn , ns , bson_empty(&b) , 0 , 0 , 0 , 0 ); while (mongo_cursor_next(cursor)){ bson_iterator it; bson_iterator_init(&it, cursor->current.data); while(bson_iterator_next(&it)){ fprintf(stderr, " %s: ", bson_iterator_key(&it)); switch(bson_iterator_type(&it)){ case bson_double: fprintf(stderr, "(double) %e\n", bson_iterator_double(&it)); break; case bson_int: fprintf(stderr, "(int) %d\n", bson_iterator_int(&it)); break; case bson_string: fprintf(stderr, "(string) \"%s\"\n", bson_iterator_string(&it)); break; case bson_oid: bson_oid_to_string(bson_iterator_oid(&it), hex_oid); fprintf(stderr, "(oid) \"%s\"\n", hex_oid); break; case bson_object: fprintf(stderr, "(subobject) {...}\n"); break; case bson_array: fprintf(stderr, "(array) [...]\n"); break; default: fprintf(stderr, "(type %d)\n", bson_iterator_type(&it)); break; } } fprintf(stderr, "\n"); } mongo_cursor_destroy(cursor); mongo_cmd_drop_db(conn, "test"); mongo_destroy( conn ); return 0; }