void mongoDateToBSON(const Object& value, const char* key, bson_t* bson) { int64_t mili = (value->o_get("sec").toInt64() * 1000) + (value->o_get("usec").toInt64() / 1000); bson_append_date_time(bson, key, -1, mili); }
static struct select_doc* create_select(void) /* BSON object indicating the fields to return */ { struct select_doc* s_doc; s_doc = malloc (sizeof (struct select_doc)); s_doc->select = bson_new (); bson_append_utf8 (s_doc->select, "syslog_tag", 10, "s", 1); bson_append_utf8 (s_doc->select, "msg", 3, "ERROR", 5); bson_append_utf8 (s_doc->select, "sys", 3, "sys", 3); bson_append_date_time (s_doc->select, "time_rcvd", 9, 1ll); return s_doc; }
static void test_date_time (void) { bson_t bcon, expected; bson_init (&bcon); bson_init (&expected); bson_append_date_time (&expected, "foo", -1, 10000); BCON_APPEND (&bcon, "foo", BCON_DATE_TIME (10000)); bson_eq_bson (&bcon, &expected); bson_destroy (&bcon); bson_destroy (&expected); }
/* {{{ MongoDriver\BSON\UTCDateTime */ void VariantToBsonConverter::_convertUTCDateTime(bson_t *bson, const char *key, Object v) { int64_t milliseconds = v.o_get(s_MongoBsonUTCDateTime_milliseconds, false, s_MongoBsonUTCDateTime_className).toInt64(); bson_append_date_time(bson, key, -1, milliseconds); }
/** save a gridfs file */ bool mongoc_gridfs_file_save (mongoc_gridfs_file_t *file) { bson_t *selector, *update, child; const char *md5; const char *filename; const char *content_type; const bson_t *aliases; const bson_t *metadata; bool r; ENTRY; if (!file->is_dirty) { return 1; } if (file->page && _mongoc_gridfs_file_page_is_dirty (file->page)) { _mongoc_gridfs_file_flush_page (file); } md5 = mongoc_gridfs_file_get_md5 (file); filename = mongoc_gridfs_file_get_filename (file); content_type = mongoc_gridfs_file_get_content_type (file); aliases = mongoc_gridfs_file_get_aliases (file); metadata = mongoc_gridfs_file_get_metadata (file); selector = bson_new (); bson_append_value (selector, "_id", -1, &file->files_id); update = bson_new (); bson_append_document_begin (update, "$set", -1, &child); bson_append_int64 (&child, "length", -1, file->length); bson_append_int32 (&child, "chunkSize", -1, file->chunk_size); bson_append_date_time (&child, "uploadDate", -1, file->upload_date); if (md5) { bson_append_utf8 (&child, "md5", -1, md5, -1); } if (filename) { bson_append_utf8 (&child, "filename", -1, filename, -1); } if (content_type) { bson_append_utf8 (&child, "contentType", -1, content_type, -1); } if (aliases) { bson_append_array (&child, "aliases", -1, aliases); } if (metadata) { bson_append_document (&child, "metadata", -1, metadata); } bson_append_document_end (update, &child); r = mongoc_collection_update (file->gridfs->files, MONGOC_UPDATE_UPSERT, selector, update, NULL, &file->error); file->failed = !r; bson_destroy (selector); bson_destroy (update); file->is_dirty = 0; RETURN (r); }
static struct query_doc* create_query(struct queryopt* opt) { struct query_doc* qu_doc; bson_t* query_what, * order_what, * msg_what, * date_what; struct tm tm; time_t t; int64_t ts; qu_doc = malloc (sizeof (struct query_doc)); qu_doc->query = bson_new (); query_what = bson_new (); bson_init (query_what); bson_append_document_begin (qu_doc->query, "$query", 6, query_what); if (opt->bsever == 1) { bson_append_int32 (query_what, "syslog_sever", 12, opt->e_sever); } if (opt->blevel == 1) { bson_append_utf8 (query_what, "level", 5, opt->e_level, -1); } if (opt->bmsg == 1) { msg_what = bson_new (); bson_init (msg_what); bson_append_document_begin (query_what, "msg", 3, msg_what); bson_append_utf8 (msg_what, "$regex", 6, opt->e_msg, -1); bson_append_utf8 (msg_what, "$options", 8, "i", 1); bson_append_document_end (query_what, msg_what); } if (opt->bdate == 1) { date_what = bson_new (); bson_init (date_what); bson_append_document_begin (query_what, "time_rcvd", 9, date_what); if (opt->bdatef == 1) { tm.tm_isdst = -1; strptime (opt->e_date, "%d/%m/%Y-%H:%M:%S", &tm); tm.tm_hour = tm.tm_hour + 1; t = mktime (&tm); ts = 1000 * (int64_t) t; bson_append_date_time (date_what, "$gt", 3, ts); } if (opt->bdateu == 1) { tm.tm_isdst = -1; strptime (opt->e_dateu, "%d/%m/%Y-%H:%M:%S", &tm); tm.tm_hour = tm.tm_hour + 1; t = mktime (&tm); ts = 1000 * (int64_t) t; bson_append_date_time (date_what, "$lt", 3, ts); } bson_append_document_end (query_what, date_what); } if (opt->bsys == 1) { bson_append_utf8 (query_what, "sys", 3, opt->e_sys, -1); } bson_append_document_end (qu_doc->query, query_what); order_what = bson_new (); bson_init (order_what); bson_append_document_begin (qu_doc->query, "$orderby", 8, order_what); bson_append_date_time (order_what, "time_rcvd", 9, 1ll); bson_append_document_end (qu_doc->query, order_what); bson_free (order_what); return qu_doc; }
void run_loader(int thread, char *filename) { bson_t *record; mongoc_client_t *conn; mongoc_collection_t *collection; bson_error_t error; bson_t reply; int count; FILE *infile; char *rptr; char ilinebuf[BUFSIZ]; char rlinebuf[BUFSIZ]; char *ritem; char *rlast = NULL; int rfcount = 0; int batchcount = 0; char *str; int total = 0; //Get the highest used value on that shard so far conn = mongoc_client_new(DEFAULT_URI); if (!conn) { fprintf(stderr, "Failed to parse URI.\n"); exit(1); } collection = mongoc_client_get_collection(conn, DATA_DB, DATA_COLLECTION); long long chunkno = carve_chunk(conn, collection); printf("Thread %d reading %s\n", thread, filename); infile = fopen(filename, "r"); if (infile == NULL) { perror("Opening results file"); exit(1); } mongoc_bulk_operation_t *bulk = mongoc_collection_create_bulk_operation( collection, true, NULL); if(!bulk) { printf("Failed to create bulk op\n"); } rptr = fgets(rlinebuf, BUFSIZ, infile); rlinebuf[strlen(rlinebuf) - 1] = '\0'; //Read the Results Line while (rptr) { total++; if (total % (INSERT_THREADS_PER_SHARD * nshards) == thread) { ritem = strtok_r(rptr, "|", &rlast); rfcount = 0; record = bson_new(); //Two part ID - a loader (32 bits for that) and a one_up bson_append_int64(record, "_id", -1, (chunkno << 32) + total); while (ritem) { switch (resulttype[rfcount]) { case 0: //printf("%s\n",ritem); bson_append_utf8(record, resultfields[rfcount], -1, ritem, -1); break; case 1: bson_append_int32(record, resultfields[rfcount], -1, atoi(ritem)); break; case 2: if (strncmp(ritem, "NULL", 4)) { struct tm tm; if (strptime(ritem, "%Y-%m-%d", &tm)) { time_t t = mktime(&tm); // t is now your desired time_t bson_append_date_time(record, resultfields[rfcount], -1, (long long) t * 1000); } } break; default: printf("Unknown type col %d = %d\n", rfcount, resulttype[rfcount]); } ritem = strtok_r(NULL, "|", &rlast); rfcount++; } mongoc_bulk_operation_insert(bulk, record); bson_destroy(record); if (batchcount == (BATCHSIZE - 1)) { int ret = mongoc_bulk_operation_execute(bulk, &reply, &error); if (!ret) { printf( "Error: %s\n", error.message); } if (thread == 0) printf("%s %d\n", filename, total); bson_destroy(&reply); mongoc_bulk_operation_destroy(bulk); batchcount = 0; bulk = mongoc_collection_create_bulk_operation(collection, true, NULL); } else { batchcount++; } } //Read next line from file rptr = fgets(rlinebuf, BUFSIZ, infile); rlinebuf[strlen(rlinebuf) - 1] = '\0'; } int ret = mongoc_bulk_operation_execute(bulk, &reply, &error); if (!ret) { fprintf(stderr, "Error: %s\n", error.message); } if (thread == 0) printf("%s %d\n", filename, total); bson_destroy(&reply); mongoc_collection_destroy(collection); mongoc_bulk_operation_destroy(bulk); mongoc_client_destroy(conn); }
bool BsonAppendDate(BSON *b, const char* key, time_t v) { return bson_append_date_time(b, key, strlen(key), v); }
void core::append(const types::b_date& value) { stdx::string_view key = _impl->next_key(); bson_append_date_time(_impl->back(), key.data(), key.length(), value.to_int64()); }