static bool get_upserted_id (const bson_t *update, bson_value_t *upserted_id) { bson_iter_t iter; bson_iter_t id_iter; /* Versions of MongoDB before 2.6 don't return the _id for an upsert if _id * is not an ObjectId, so find it in the update document's query "q" or * update "u". It must be in one or both: if it were in neither the _id * would be server-generated, therefore an ObjectId, therefore returned and * we wouldn't call this function. If _id is in both the update document * *and* the query spec the update document _id takes precedence. */ bson_iter_init (&iter, update); if (bson_iter_find_descendant (&iter, "u._id", &id_iter)) { bson_value_copy (bson_iter_value (&id_iter), upserted_id); return true; } else { bson_iter_init (&iter, update); if (bson_iter_find_descendant (&iter, "q._id", &id_iter)) { bson_value_copy (bson_iter_value (&id_iter), upserted_id); return true; } } /* server bug? */ return false; }
void _aggregate_recurse_fill(bson_iter_t *iter, bson_t* new_doc, bson_t* existing_aggregate_doc, bson_t* merged_aggregate_doc, const char *key) { bson_iter_t child_iter; bson_t child_doc; while (bson_iter_next (iter)) { int new_key_length = strlen(bson_iter_key(iter)); if (strcmp("", key) != 0) { new_key_length += strlen(key) + 1; } char new_key[new_key_length]; if (strcmp("", key) == 0) { strcpy(new_key, bson_iter_key(iter)); } else { strcpy(new_key, key); strcat(new_key, "."); strcat(new_key, bson_iter_key(iter)); } if (strcmp("_id", new_key) == 0) { bson_value_t *existing_id = _aggregate_get_value_at_key(existing_aggregate_doc, "_id"); bson_append_value(merged_aggregate_doc, "_id", -1, existing_id); continue; } if (BSON_ITER_HOLDS_DOCUMENT (iter)) { const char *agg_key = NULL; const bson_value_t *agg_field = NULL; if (bson_iter_recurse (iter, &child_iter)) { if (bson_iter_next (&child_iter) && _aggregate_is_agg_operator(bson_iter_key(&child_iter))) { agg_key = bson_iter_key(&child_iter); agg_field = bson_iter_value(&child_iter); } if (agg_key && !bson_iter_next (&child_iter)) { bson_value_t *existing_value = _aggregate_get_value_at_key(existing_aggregate_doc, new_key); bson_value_t *new_doc_value = _aggregate_get_value_at_key(new_doc, (*agg_field).value.v_utf8.str + 1); bson_value_t * agg_result = _aggregate(existing_value, new_doc_value, agg_key); bson_append_value(merged_aggregate_doc, bson_iter_key(iter), -1, agg_result); continue; } } bson_append_document_begin (merged_aggregate_doc, bson_iter_key(iter), -1, &child_doc); if (bson_iter_recurse (iter, &child_iter)) { _aggregate_recurse_fill (&child_iter, new_doc, existing_aggregate_doc, &child_doc, new_key); } bson_append_document_end (merged_aggregate_doc, &child_doc); } else { bson_append_value(merged_aggregate_doc, bson_iter_key(iter), -1, bson_iter_value(iter)); } } }
static void mongoc_uri_bson_append_or_replace_key (bson_t *options, const char *option, const char *value) { bson_iter_t iter; bool found = false; if (bson_iter_init (&iter, options)) { bson_t tmp = BSON_INITIALIZER; while (bson_iter_next (&iter)) { const bson_value_t *bvalue; if (!strcasecmp(bson_iter_key (&iter), option)) { bson_append_utf8(&tmp, option, -1, value, -1); found = true; continue; } bvalue = bson_iter_value (&iter); BSON_APPEND_VALUE (&tmp, bson_iter_key (&iter), bvalue); } if (! found) { bson_append_utf8(&tmp, option, -1, value, -1); } bson_destroy (options); bson_copy_to (&tmp, options); bson_destroy (&tmp); } }
dir_t* dir_getDirFromBSON(const bson_t *doc) { bson_iter_t iter; const bson_value_t *value; const char *key; dir_t *dir = dir_create(); if (bson_iter_init(&iter, doc)) { while (bson_iter_next(&iter)) { key = bson_iter_key(&iter); value = bson_iter_value(&iter); if (strcmp(key, "_id") == 0) { strcpy(dir->id, value->value.v_utf8.str); } else if (strcmp(key, "name") == 0) { dir->name = strdup(value->value.v_utf8.str); } else if (strcmp(key, "parentId") == 0) { strcpy(dir->parentId, value->value.v_utf8.str); } /* if (bson_iter_find(&iter, "_id")) strcpy(dir->id, bson_iter_utf8(&iter, NULL)); if (bson_iter_find(&iter, "name")) strcpy(dir->name, bson_iter_utf8(&iter, NULL)); if (bson_iter_find(&iter, "parentId")) strcpy(dir->parentId, bson_iter_utf8(&iter, NULL)); */ } } return dir; }
/*--------------------------------------------------------------------------- * * _change_stream_init -- * * Called after @stream has the collection name, database name, read * preferences, and read concern set. Creates the change streams * cursor. * *-------------------------------------------------------------------------- */ void _change_stream_init (mongoc_change_stream_t *stream, const bson_t *pipeline, const bson_t *opts) { BSON_ASSERT (pipeline); stream->max_await_time_ms = -1; stream->batch_size = -1; bson_init (&stream->pipeline_to_append); bson_init (&stream->resume_token); bson_init (&stream->err_doc); if (!_mongoc_change_stream_opts_parse ( stream->client, opts, &stream->opts, &stream->err)) { return; } stream->full_document = BCON_NEW ("fullDocument", stream->opts.fullDocument); if (!bson_empty (&(stream->opts.resumeAfter))) { bson_append_document ( &stream->resume_token, "resumeAfter", 11, &(stream->opts.resumeAfter)); } _mongoc_timestamp_set (&stream->operation_time, &(stream->opts.startAtOperationTime)); stream->batch_size = stream->opts.batchSize; stream->max_await_time_ms = stream->opts.maxAwaitTimeMS; /* Accept two forms of user pipeline: * 1. A document like: { "pipeline": [...] } * 2. An array-like document: { "0": {}, "1": {}, ... } * If the passed pipeline is invalid, we pass it along and let the server * error instead. */ if (!bson_empty (pipeline)) { bson_iter_t iter; if (bson_iter_init_find (&iter, pipeline, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { if (!BSON_APPEND_VALUE (&stream->pipeline_to_append, "pipeline", bson_iter_value (&iter))) { CHANGE_STREAM_ERR ("pipeline"); } } else { if (!BSON_APPEND_ARRAY ( &stream->pipeline_to_append, "pipeline", pipeline)) { CHANGE_STREAM_ERR ("pipeline"); } } } if (stream->err.code == 0) { (void) _make_cursor (stream); } }
int main(int argc, char *argv[]) { mongoc_client_t *client; mongoc_collection_t *collection; mongoc_cursor_t *cursor; const bson_t *doc; bson_t *query; char *str; mongoc_init(); client = mongoc_client_new("mongodb://localhost:27017/"); collection = mongoc_client_get_collection(client, "stockopedia", "instruments"); query = bson_new(); bson_t *fields = bson_new(); BSON_APPEND_INT32(fields, "RIC", 1); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 0, query, fields, NULL); bson_iter_t iter; const bson_value_t *value; while (mongoc_cursor_next(cursor, &doc)) { str = bson_as_json(doc, NULL); printf("%s\n", str); if (bson_iter_init(&iter, doc)) { while (bson_iter_next(&iter)) { printf("Found a field named: %s\n", bson_iter_key(&iter)); value = bson_iter_value(&iter); if (value->value_type == BSON_TYPE_UTF8) { printf("It's a UTF8 : '%s'\n", value->value.v_utf8.str); } } } //printf("Found element key : '%s'\n", bson_iter_key(&iter)); // if (bson_iter_init(&iter, doc)) { // // } bson_free(str); } //Now fetch quotes for each RIC bson_destroy(query); mongoc_cursor_destroy(cursor); mongoc_collection_destroy(collection); mongoc_client_destroy(client); return 0; }
/* Construct the aggregate command in cmd: * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } */ static void _make_command (mongoc_change_stream_t *stream, bson_t *command) { bson_iter_t iter; bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */ bson_t change_stream_doc; bson_t pipeline; bson_t cursor_doc; bson_init (command); bson_append_utf8 (command, "aggregate", 9, stream->coll->collection, stream->coll->collectionlen); bson_append_array_begin (command, "pipeline", 8, &pipeline); /* Append the $changeStream stage */ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage); bson_append_document_begin ( &change_stream_stage, "$changeStream", 13, &change_stream_doc); bson_concat (&change_stream_doc, &stream->full_document); if (!bson_empty (&stream->resume_token)) { bson_concat (&change_stream_doc, &stream->resume_token); } bson_append_document_end (&change_stream_stage, &change_stream_doc); bson_append_document_end (&pipeline, &change_stream_stage); /* Append user pipeline if it exists */ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_t child_iter; uint32_t key_int = 1; char buf[16]; const char *key_str; BSON_ASSERT (bson_iter_recurse (&iter, &child_iter)); while (bson_iter_next (&child_iter)) { /* The user pipeline may consist of invalid stages or non-documents. * Append anyway, and rely on the server error. */ size_t keyLen = bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf)); bson_append_value ( &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter)); ++key_int; } } bson_append_array_end (command, &pipeline); /* Add batch size if needed */ bson_append_document_begin (command, "cursor", 6, &cursor_doc); if (stream->batch_size > 0) { bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size); } bson_append_document_end (command, &cursor_doc); }
bool _mongoc_convert_bson_value_t (mongoc_client_t *client, const bson_iter_t *iter, bson_value_t *value, bson_error_t *error) { bson_value_copy (bson_iter_value ((bson_iter_t *) iter), value); return true; }
bson_value_t* _aggregate_get_value_at_key(bson_t *doc, char *key) { bson_iter_t iter; bson_iter_t child_iter; if (!bson_iter_init (&iter, doc) || !bson_iter_find_descendant (&iter, key, &child_iter)) { return NULL; } bson_value_t *result = bson_malloc(sizeof(bson_value_t)); bson_value_copy(bson_iter_value(&child_iter), result); return result; }
int32_t _mongoc_write_result_merge_arrays (uint32_t offset, mongoc_write_result_t *result, /* IN */ bson_t *dest, /* IN */ bson_iter_t *iter) /* IN */ { const bson_value_t *value; bson_iter_t ar; bson_iter_t citer; int32_t idx; int32_t count = 0; int32_t aridx; bson_t child; const char *keyptr = NULL; char key[12]; int len; ENTRY; BSON_ASSERT (result); BSON_ASSERT (dest); BSON_ASSERT (iter); BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (iter)); aridx = bson_count_keys (dest); if (bson_iter_recurse (iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer)) { len = (int) bson_uint32_to_string (aridx++, &keyptr, key, sizeof key); bson_append_document_begin (dest, keyptr, len, &child); while (bson_iter_next (&citer)) { if (BSON_ITER_IS_KEY (&citer, "index")) { idx = bson_iter_int32 (&citer) + offset; BSON_APPEND_INT32 (&child, "index", idx); } else { value = bson_iter_value (&citer); BSON_APPEND_VALUE (&child, bson_iter_key (&citer), value); } } bson_append_document_end (dest, &child); count++; } } } RETURN (count); }
static void test_value_decimal128 (void) { const bson_value_t *value; bson_value_t copy; bson_iter_t iter; bson_decimal128_t dec; bson_t other = BSON_INITIALIZER; bson_t *doc; assert (bson_decimal128_from_string ("123.5", &dec)); doc = BCON_NEW ("decimal128", BCON_DECIMAL128 (&dec)); assert (bson_iter_init (&iter, doc) && bson_iter_next (&iter)); assert (value = bson_iter_value (&iter)); bson_value_copy (value, ©); assert (bson_append_value (&other, bson_iter_key (&iter), -1, ©)); bson_value_destroy (©); bson_destroy (doc); bson_destroy (&other); }
void getPhone(mongoc_collection_t *coll,char * contactId,char *mobile){ mongoc_cursor_t *cursor; bson_t *doc; bson_t *query; char *str; query = bson_new (); bson_oid_t se_oid; bson_oid_init_from_string(&se_oid, contactId); BSON_APPEND_OID (query, "_id",&se_oid); cursor = mongoc_collection_find (coll, MONGOC_QUERY_NONE, 0, 0, 0, query, NULL, NULL); double ivalue=0; int buse=0; while (mongoc_cursor_next (cursor, &doc)) { str = bson_as_json (doc, NULL); //printf ("%s\n", str); bson_free (str); bson_iter_t iter; bson_iter_t sub_iter; if (bson_iter_init (&iter, doc)) { while (bson_iter_next (&iter)) { //printf ("Found a field named: %s\n", bson_iter_key (&iter)); bson_value_t *value; value = bson_iter_value (&iter); //printf("type %d\n",value->value_type); if (value->value_type == BSON_TYPE_UTF8) { if(strcmp(bson_iter_key (&iter),"mobile")==0){ //printf("value %d\n", (int)value->value.v_double); strcpy(mobile,value->value.v_utf8.str); } } } } } bson_destroy (query); }
void _mongoc_write_result_merge (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ uint32_t offset) { int32_t server_index = 0; const bson_value_t *value; bson_iter_t iter; bson_iter_t citer; bson_iter_t ar; int32_t n_upserted = 0; int32_t affected = 0; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { affected = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) && bson_iter_next (&citer)) { result->failed = true; } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: result->nInserted += affected; break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += affected; break; case MONGOC_WRITE_COMMAND_UPDATE: /* server returns each upserted _id with its index into this batch * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */ if (bson_iter_init_find (&iter, reply, "upserted")) { if (BSON_ITER_HOLDS_ARRAY (&iter) && (bson_iter_recurse (&iter, &ar))) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "index") && BSON_ITER_HOLDS_INT32 (&citer)) { server_index = bson_iter_int32 (&citer); if (bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert ( result, offset + server_index, value); n_upserted++; } } } } result->nUpserted += n_upserted; /* * XXX: The following addition to nMatched needs some checking. * I'm highly skeptical of it. */ result->nMatched += BSON_MAX (0, (affected - n_upserted)); } else { result->nMatched += affected; } if (bson_iter_init_find (&iter, reply, "nModified") && BSON_ITER_HOLDS_INT32 (&iter)) { result->nModified += bson_iter_int32 (&iter); } break; default: BSON_ASSERT (false); break; } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter)) { _mongoc_write_result_merge_arrays ( offset, result, &result->writeErrors, &iter); } if (bson_iter_init_find (&iter, reply, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { uint32_t len; const uint8_t *data; bson_t write_concern_error; char str[16]; const char *key; /* writeConcernError is a subdocument in the server response * append it to the result->writeConcernErrors array */ bson_iter_document (&iter, &len, &data); bson_init_static (&write_concern_error, data, len); bson_uint32_to_string ( result->n_writeConcernErrors, &key, str, sizeof str); if (!bson_append_document ( &result->writeConcernErrors, key, -1, &write_concern_error)) { MONGOC_ERROR ("Error adding \"%s\" to writeConcernErrors.\n", key); } result->n_writeConcernErrors++; } /* inefficient if there are ever large numbers: for each label in each err, * we linear-search result->errorLabels to see if it's included yet */ _mongoc_bson_array_copy_labels_to (reply, &result->errorLabels); EXIT; }
static void getaddressbyID(char *msg,mongoc_collection_t *coll,char * deviceId,char * device_name){ mongoc_cursor_t *cursor; bson_t *doc; bson_t *query; char *str; query = bson_new (); bson_oid_t de_oid; bson_oid_init_from_string(&de_oid, deviceId); BSON_APPEND_OID (query, "_id",&de_oid); cursor = mongoc_collection_find (coll, MONGOC_QUERY_NONE, 0, 0, 0, query, NULL, NULL); double ivalue=0; int buse=0; double lat; double lon; //char lat[256]; //char lon[256]; //memset(lat,'\0',256); //memset(lon,'\0',256); while (mongoc_cursor_next (cursor, &doc)) { str = bson_as_json (doc, NULL); //printf ("%s\n", str); bson_free (str); bson_iter_t iter; bson_iter_t sub_iter; if (bson_iter_init (&iter, doc)) { while (bson_iter_next (&iter)) { //printf ("Found a field named: %s\n", bson_iter_key (&iter)); bson_value_t *value; value = bson_iter_value (&iter); //printf("type %d\n",value->value_type); if (value->value_type == BSON_TYPE_DOUBLE) { if(strcmp(bson_iter_key (&iter),"lat")==0){ (lat=value->value.v_double); } if(strcmp(bson_iter_key (&iter),"lon")==0){ (lon=value->value.v_double); } } if (value->value_type == BSON_TYPE_UTF8) { if(strcmp(bson_iter_key (&iter),"device_name")==0){ strcpy(device_name,value->value.v_utf8.str); } } } } } bson_destroy (query); CURL *curl; CURLcode res; curl = curl_easy_init(); if (curl) { static char str[20480]; memset(str,'\0',20480); char url[1028]; memset(url,'\0',1028); sprintf(url,"http://api.map.baidu.com/geocoder/v2/?ak=IDvNBsejl9oqMbPF316iKsXR&output=json&location=%.8f,%.8f",lat,lon); printf("%s\n",url); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); curl_easy_setopt(curl, CURLOPT_WRITEDATA, str); res = curl_easy_perform(curl); printf("%s\n",str); char *s = strstr(str, "formatted_address"); if(NULL!=s){ s=strstr(s, "\""); s=strstr(s+1, "\""); printf("\n%s\n",s); char *p=strstr(s+1, "\""); int len=strlen(s)-strlen(p)-1; strncpy(msg,s+1,len); msg[len]='\0'; printf("msg:%s\n",msg); printf("\n\n"); } curl_easy_cleanup(curl); } }
static void do_alarm(mongoc_collection_t *sensorcoll,char * se_id,char * alarmType,char * upperBoundC,char *lowerBoundC, char *duration,char *target,char * contactId,char* addr,mongoc_collection_t *alarmlogcoll,char* userId,char *device_name,char * de_id){ mongoc_cursor_t *cursor; bson_t *doc; bson_t *query; char *str; query = bson_new (); bson_oid_t se_oid; bson_oid_init_from_string(&se_oid, se_id); BSON_APPEND_OID (query, "_id",&se_oid); cursor = mongoc_collection_find (sensorcoll, MONGOC_QUERY_NONE, 0, 0, 0, query, NULL, NULL); double ivalue=0; double dalarmtime=0; int interval=0; int balarm=0; int buse=0; char name[1024]; memset(name,0,1024); while (mongoc_cursor_next (cursor, &doc)) { str = bson_as_json (doc, NULL); printf ("%s\n", str); bson_free (str); bson_iter_t iter; bson_iter_t sub_iter; if (bson_iter_init (&iter, doc)) { while (bson_iter_next (&iter)) { //printf ("Found a field named: %s\n", bson_iter_key (&iter)); bson_value_t *value; value = bson_iter_value (&iter); //printf("type %d\n",value->value_type); if (value->value_type == BSON_TYPE_INT32) { if(strcmp(bson_iter_key (&iter),"value")==0){ printf("value %d\n", value->value.v_int32); ivalue=value->value.v_double; buse =1; }else if(strcmp(bson_iter_key (&iter),"interval")==0){ interval=value->value.v_int32; }else if(strcmp(bson_iter_key (&iter),"balarm")==0){ balarm=value->value.v_int32; } } if(strcmp(bson_iter_key (&iter),"value")==0){ printf("value type %d,%d\n",value->value_type,BSON_TYPE_INT32); } if (value->value_type == BSON_TYPE_DOUBLE) { if(strcmp(bson_iter_key (&iter),"value")==0){ //printf("value %d\n", (int)value->value.v_double); buse =1; ivalue=value->value.v_double; } if(strcmp(bson_iter_key (&iter),"alarmtime")==0){ dalarmtime=value->value.v_double; } } if (value->value_type == BSON_TYPE_UTF8) { if(strcmp(bson_iter_key (&iter),"name")==0){ //printf("value %d\n", (int)value->value.v_double); strcpy(name,value->value.v_utf8.str); } if(strcmp(bson_iter_key (&iter),"value")==0){ //printf("value %d\n", (int)value->value.v_double); ivalue=atof(value->value.v_utf8.str); buse =1; } } } } } bson_destroy (query); printf("se_id:%s,buse:%d\n",se_id,buse); printf("alarmType:%s,%f : %s,%s",alarmType,ivalue,upperBoundC,lowerBoundC); time_t t; t = time(NULL); struct tm *lt; int ii = time(&t); int bdoalarm=1; if(balarm==1){ bdoalarm=1; if(dalarmtime>0 &&interval>0){ if((ii-dalarmtime)<3600){ bdoalarm=0; } } } if(buse ==1){ if(strcmp(alarmType,"val_above")==0){ int iupper=atoi(upperBoundC); if(iupper<ivalue){ printf("val_above %d value\n",iupper,ivalue); char msg[2048]; memset(msg,0,2048); snprintf(msg, sizeof(msg) - 1, "故障报警:%s的%s数值高于%d,位置是%s,请及时处理!", device_name, name, iupper, addr); char buf[1024]; URLEncodeGBK(msg, strlen(msg), buf,sizeof(buf)); printf("%s\n",buf); printf("alarm info: %s\n",msg); if(bdoalarm>0){ int code=sendsms_c(buf,contactId); saveAlarmLog(alarmlogcoll,se_id,code,contactId,msg,userId,device_name,name,de_id); updateAlarm(sensorcoll,se_id,interval+1,1); printf("send sms \n"); } }else { updateAlarm(sensorcoll,se_id,0,0); } }else if(strcmp(alarmType,"val_below")==0){ int iupper=atoi(lowerBoundC); if(iupper>ivalue){ printf("val_below %d value %d\n",iupper,ivalue); char msg[2048]; memset(msg,0,2048); snprintf(msg, sizeof(msg) - 1, "故障报警:%s的%s数值低于%d,位置是%s,请及时处理!", device_name, name, iupper, addr); char buf[1024]; URLEncodeGBK(msg, strlen(msg), buf,sizeof(buf)); printf("%s\n",buf); printf("alarm info: %s\n",msg); if(bdoalarm>0){ int code=sendsms_c(buf,contactId); saveAlarmLog(alarmlogcoll,se_id,code,contactId,msg,userId,device_name,name,de_id); updateAlarm(sensorcoll,se_id,interval+1,1); printf("send sms \n"); } }else { updateAlarm(sensorcoll,se_id,0,0); } }else if(strcmp(alarmType,"switch_on")==0){ if(ivalue==1){ char msg[2048]; memset(msg,0,2048); snprintf(msg, sizeof(msg) - 1, "故障报警:%s,%s,位置是%s,请及时处理!", device_name, name, addr); char buf[1024]; URLEncodeGBK(msg, strlen(msg), buf,sizeof(buf)); printf("%s\n",buf); printf("alarm info: %s\n",msg); if(bdoalarm>0){ int code=sendsms_c(buf,contactId); saveAlarmLog(alarmlogcoll,se_id,code,contactId,msg,userId,device_name,name,de_id); updateAlarm(sensorcoll,se_id,interval+1,1); printf("send sms \n"); } }else { updateAlarm(sensorcoll,se_id,0,0); } }else if(strcmp(alarmType,"offline")==0){ if(ivalue==1){ char msg[2048]; memset(msg,0,2048); snprintf(msg, sizeof(msg) - 1, "故障报警:%s,%s,位置是%s,请及时处理!", device_name, name, addr); char buf[1024]; URLEncodeGBK(msg, strlen(msg), buf,sizeof(buf)); printf("%s\n",buf); printf("alarm info: %s\n",msg); if(bdoalarm>0){ int code=sendsms_c(buf,contactId); saveAlarmLog(alarmlogcoll,se_id,code,contactId,msg,userId,device_name,name,de_id); updateAlarm(sensorcoll,se_id,interval+1,1); printf("send sms \n"); } }else { updateAlarm(sensorcoll,se_id,0,0); } } } }
void _mongoc_write_result_merge_legacy (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ int32_t error_api_version, mongoc_error_code_t default_code, uint32_t offset) { const bson_value_t *value; bson_iter_t iter; bson_iter_t ar; bson_iter_t citer; const char *err = NULL; int32_t code = 0; int32_t n = 0; int32_t upsert_idx = 0; mongoc_error_domain_t domain; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); domain = error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER : MONGOC_ERROR_COLLECTION; if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { n = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "err") && BSON_ITER_HOLDS_UTF8 (&iter)) { err = bson_iter_utf8 (&iter, NULL); } if (bson_iter_init_find (&iter, reply, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = bson_iter_int32 (&iter); } if (_is_duplicate_key_error (code)) { code = MONGOC_ERROR_DUPLICATE_KEY; } if (code || err) { if (!err) { err = "unknown error"; } if (bson_iter_init_find (&iter, reply, "wtimeout") && bson_iter_as_bool (&iter)) { if (!code) { code = (int32_t) MONGOC_ERROR_WRITE_CONCERN_ERROR; } _append_write_concern_err_legacy (result, err, code); } else { if (!code) { code = (int32_t) default_code; } _append_write_err_legacy (result, err, domain, code, offset); } } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: if (n) { result->nInserted += n; } break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += n; break; case MONGOC_WRITE_COMMAND_UPDATE: if (bson_iter_init_find (&iter, reply, "upserted") && !BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; value = bson_iter_value (&iter); _mongoc_write_result_append_upsert (result, offset, value); } else if (bson_iter_init_find (&iter, reply, "upserted") && BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; if (bson_iter_recurse (&iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert ( result, offset + upsert_idx, value); upsert_idx++; } } } } else if ((n == 1) && bson_iter_init_find (&iter, reply, "updatedExisting") && BSON_ITER_HOLDS_BOOL (&iter) && !bson_iter_bool (&iter)) { result->nUpserted += n; } else { result->nMatched += n; } break; default: break; } result->omit_nModified = true; EXIT; }
bool mongoc_change_stream_next (mongoc_change_stream_t *stream, const bson_t **bson) { bson_iter_t iter; bool ret = false; BSON_ASSERT (stream); BSON_ASSERT (bson); if (stream->err.code != 0) { goto end; } BSON_ASSERT (stream->cursor); if (!mongoc_cursor_next (stream->cursor, bson)) { const bson_t *err_doc; bson_error_t err; bool resumable = false; if (!mongoc_cursor_error_document (stream->cursor, &err, &err_doc)) { /* no error occurred, just no documents left. */ goto end; } resumable = _is_resumable_error (err_doc); if (resumable) { /* recreate the cursor. */ mongoc_cursor_destroy (stream->cursor); stream->cursor = NULL; if (!_make_cursor (stream)) { goto end; } if (!mongoc_cursor_next (stream->cursor, bson)) { resumable = !mongoc_cursor_error_document (stream->cursor, &err, &err_doc); if (resumable) { /* empty batch. */ goto end; } } } if (!resumable) { stream->err = err; bson_destroy (&stream->err_doc); bson_copy_to (err_doc, &stream->err_doc); goto end; } } /* we have received documents, either from the first call to next or after a * resume. */ if (!bson_iter_init_find (&iter, *bson, "_id")) { bson_set_error (&stream->err, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CHANGE_STREAM_NO_RESUME_TOKEN, "Cannot provide resume functionality when the resume " "token is missing"); goto end; } /* copy the resume token. */ bson_reinit (&stream->resume_token); BSON_APPEND_VALUE ( &stream->resume_token, "resumeAfter", bson_iter_value (&iter)); /* clear out the operation time, since we no longer need it to resume. */ _mongoc_timestamp_clear (&stream->operation_time); ret = true; end: /* Driver Sessions Spec: "When an implicit session is associated with a * cursor for use with getMore operations, the session MUST be returned to * the pool immediately following a getMore operation that indicates that the * cursor has been exhausted." */ if (stream->implicit_session) { /* if creating the change stream cursor errored, it may be null. */ if (!stream->cursor || stream->cursor->cursor_id == 0) { mongoc_client_session_destroy (stream->implicit_session); stream->implicit_session = NULL; } } return ret; }
void _mongoc_write_result_merge (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ uint32_t offset) { int32_t server_index = 0; const bson_value_t *value; bson_iter_t iter; bson_iter_t citer; bson_iter_t ar; int32_t n_upserted = 0; int32_t affected = 0; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { affected = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) && bson_iter_next (&citer)) { result->failed = true; } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: result->nInserted += affected; break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += affected; break; case MONGOC_WRITE_COMMAND_UPDATE: /* server returns each upserted _id with its index into this batch * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */ if (bson_iter_init_find (&iter, reply, "upserted")) { if (BSON_ITER_HOLDS_ARRAY (&iter) && (bson_iter_recurse (&iter, &ar))) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "index") && BSON_ITER_HOLDS_INT32 (&citer)) { server_index = bson_iter_int32 (&citer); if (bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert (result, offset + server_index, value); n_upserted++; } } } } result->nUpserted += n_upserted; /* * XXX: The following addition to nMatched needs some checking. * I'm highly skeptical of it. */ result->nMatched += BSON_MAX (0, (affected - n_upserted)); } else { result->nMatched += affected; } /* * SERVER-13001 - in a mixed sharded cluster a call to update could * return nModified (>= 2.6) or not (<= 2.4). If any call does not * return nModified we can't report a valid final count so omit the * field completely. */ if (bson_iter_init_find (&iter, reply, "nModified") && BSON_ITER_HOLDS_INT32 (&iter)) { result->nModified += bson_iter_int32 (&iter); } else { /* * nModified could be BSON_TYPE_NULL, which should also be omitted. */ result->omit_nModified = true; } break; default: BSON_ASSERT (false); break; } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter)) { _mongoc_write_result_merge_arrays (offset, result, &result->writeErrors, &iter); } if (bson_iter_init_find (&iter, reply, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { uint32_t len; const uint8_t *data; bson_t write_concern_error; char str[16]; const char *key; /* writeConcernError is a subdocument in the server response * append it to the result->writeConcernErrors array */ bson_iter_document (&iter, &len, &data); bson_init_static (&write_concern_error, data, len); bson_uint32_to_string (result->n_writeConcernErrors, &key, str, sizeof str); bson_append_document (&result->writeConcernErrors, key, -1, &write_concern_error); result->n_writeConcernErrors++; } EXIT; }
void _mongoc_write_result_merge_legacy (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ mongoc_error_code_t default_code, uint32_t offset) { const bson_value_t *value; bson_t holder, write_errors, child; bson_iter_t iter; bson_iter_t ar; bson_iter_t citer; const char *err = NULL; int32_t code = 0; int32_t n = 0; int32_t upsert_idx = 0; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { n = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "err") && BSON_ITER_HOLDS_UTF8 (&iter)) { err = bson_iter_utf8 (&iter, NULL); } if (bson_iter_init_find (&iter, reply, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = bson_iter_int32 (&iter); } if (code || err) { if (!code) { code = default_code; } if (!err) { err = "unknown error"; } bson_set_error (&result->error, MONGOC_ERROR_COLLECTION, code, "%s", err); result->failed = true; bson_init(&holder); bson_append_array_begin(&holder, "0", 1, &write_errors); bson_append_document_begin(&write_errors, "0", 1, &child); bson_append_int32(&child, "index", 5, 0); bson_append_int32(&child, "code", 4, code); bson_append_utf8(&child, "errmsg", 6, err, -1); bson_append_document_end(&write_errors, &child); bson_append_array_end(&holder, &write_errors); bson_iter_init(&iter, &holder); bson_iter_next(&iter); _mongoc_write_result_merge_arrays (offset, result, &result->writeErrors, &iter); bson_destroy(&holder); } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: if (n) { result->nInserted += n; } break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += n; break; case MONGOC_WRITE_COMMAND_UPDATE: if (bson_iter_init_find (&iter, reply, "upserted") && !BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; value = bson_iter_value (&iter); _mongoc_write_result_append_upsert (result, offset, value); } else if (bson_iter_init_find (&iter, reply, "upserted") && BSON_ITER_HOLDS_ARRAY (&iter)) { result->nUpserted += n; if (bson_iter_recurse (&iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert (result, offset + upsert_idx, value); upsert_idx++; } } } } else if ((n == 1) && bson_iter_init_find (&iter, reply, "updatedExisting") && BSON_ITER_HOLDS_BOOL (&iter) && !bson_iter_bool (&iter)) { result->nUpserted += n; } else { result->nMatched += n; } break; default: break; } result->omit_nModified = true; EXIT; }
/* construct the aggregate command in cmd. looks like one of the following: * for a collection change stream: * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } * for a database change stream: * { aggregate: 1, pipeline: [], cursor: { batchSize: x } } * for a client change stream: * { aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], * cursor: { batchSize: x } } */ static void _make_command (mongoc_change_stream_t *stream, bson_t *command) { bson_iter_t iter; bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */ bson_t change_stream_doc; bson_t pipeline; bson_t cursor_doc; bson_init (command); if (stream->change_stream_type == MONGOC_CHANGE_STREAM_COLLECTION) { bson_append_utf8 ( command, "aggregate", 9, stream->coll, (int) strlen (stream->coll)); } else { bson_append_int32 (command, "aggregate", 9, 1); } bson_append_array_begin (command, "pipeline", 8, &pipeline); /* append the $changeStream stage. */ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage); bson_append_document_begin ( &change_stream_stage, "$changeStream", 13, &change_stream_doc); bson_concat (&change_stream_doc, stream->full_document); if (!bson_empty (&stream->resume_token)) { bson_concat (&change_stream_doc, &stream->resume_token); } /* Change streams spec: "startAtOperationTime and resumeAfter are mutually * exclusive; if both startAtOperationTime and resumeAfter are set, the * server will return an error. Drivers MUST NOT throw a custom error, and * MUST defer to the server error." */ if (!_mongoc_timestamp_empty (&stream->operation_time)) { _mongoc_timestamp_append ( &stream->operation_time, &change_stream_doc, "startAtOperationTime"); } if (stream->change_stream_type == MONGOC_CHANGE_STREAM_CLIENT) { bson_append_bool (&change_stream_doc, "allChangesForCluster", 20, true); } bson_append_document_end (&change_stream_stage, &change_stream_doc); bson_append_document_end (&pipeline, &change_stream_stage); /* Append user pipeline if it exists */ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_t child_iter; uint32_t key_int = 1; char buf[16]; const char *key_str; BSON_ASSERT (bson_iter_recurse (&iter, &child_iter)); while (bson_iter_next (&child_iter)) { /* the user pipeline may consist of invalid stages or non-documents. * append anyway, and rely on the server error. */ size_t keyLen = bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf)); bson_append_value ( &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter)); ++key_int; } } bson_append_array_end (command, &pipeline); /* Add batch size if needed */ bson_append_document_begin (command, "cursor", 6, &cursor_doc); if (stream->batch_size > 0) { bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size); } bson_append_document_end (command, &cursor_doc); }
/* complete a write result, including only certain fields */ bool _mongoc_write_result_complete ( mongoc_write_result_t *result, /* IN */ int32_t error_api_version, /* IN */ const mongoc_write_concern_t *wc, /* IN */ mongoc_error_domain_t err_domain_override, /* IN */ bson_t *bson, /* OUT */ bson_error_t *error, /* OUT */ ...) { mongoc_error_domain_t domain; va_list args; const char *field; int n_args; bson_iter_t iter; bson_iter_t child; ENTRY; BSON_ASSERT (result); if (error_api_version >= MONGOC_ERROR_API_VERSION_2) { domain = MONGOC_ERROR_SERVER; } else if (err_domain_override) { domain = err_domain_override; } else if (result->error.domain) { domain = (mongoc_error_domain_t) result->error.domain; } else { domain = MONGOC_ERROR_COLLECTION; } /* produce either old fields like nModified from the deprecated Bulk API Spec * or new fields like modifiedCount from the CRUD Spec, which we partly obey */ if (bson && mongoc_write_concern_is_acknowledged (wc)) { n_args = 0; va_start (args, error); while ((field = va_arg (args, const char *))) { n_args++; if (!strcmp (field, "nInserted")) { BSON_APPEND_INT32 (bson, field, result->nInserted); } else if (!strcmp (field, "insertedCount")) { BSON_APPEND_INT32 (bson, field, result->nInserted); } else if (!strcmp (field, "nMatched")) { BSON_APPEND_INT32 (bson, field, result->nMatched); } else if (!strcmp (field, "matchedCount")) { BSON_APPEND_INT32 (bson, field, result->nMatched); } else if (!strcmp (field, "nModified")) { BSON_APPEND_INT32 (bson, field, result->nModified); } else if (!strcmp (field, "modifiedCount")) { BSON_APPEND_INT32 (bson, field, result->nModified); } else if (!strcmp (field, "nRemoved")) { BSON_APPEND_INT32 (bson, field, result->nRemoved); } else if (!strcmp (field, "deletedCount")) { BSON_APPEND_INT32 (bson, field, result->nRemoved); } else if (!strcmp (field, "nUpserted")) { BSON_APPEND_INT32 (bson, field, result->nUpserted); } else if (!strcmp (field, "upsertedCount")) { BSON_APPEND_INT32 (bson, field, result->nUpserted); } else if (!strcmp (field, "upserted") && !bson_empty0 (&result->upserted)) { BSON_APPEND_ARRAY (bson, field, &result->upserted); } else if (!strcmp (field, "upsertedId") && !bson_empty0 (&result->upserted) && bson_iter_init_find (&iter, &result->upserted, "0") && bson_iter_recurse (&iter, &child) && bson_iter_find (&child, "_id")) { /* "upsertedId", singular, for update_one() */ BSON_APPEND_VALUE (bson, "upsertedId", bson_iter_value (&child)); } } va_end (args); /* default: a standard result includes all Bulk API fields */ if (!n_args) { BSON_APPEND_INT32 (bson, "nInserted", result->nInserted); BSON_APPEND_INT32 (bson, "nMatched", result->nMatched); BSON_APPEND_INT32 (bson, "nModified", result->nModified); BSON_APPEND_INT32 (bson, "nRemoved", result->nRemoved); BSON_APPEND_INT32 (bson, "nUpserted", result->nUpserted); if (!bson_empty0 (&result->upserted)) { BSON_APPEND_ARRAY (bson, "upserted", &result->upserted); } } /* always append errors if there are any */ if (!n_args || !bson_empty (&result->writeErrors)) { BSON_APPEND_ARRAY (bson, "writeErrors", &result->writeErrors); } if (result->n_writeConcernErrors) { BSON_APPEND_ARRAY ( bson, "writeConcernErrors", &result->writeConcernErrors); } }
bool mongoc_gridfs_remove_by_filename (mongoc_gridfs_t *gridfs, const char *filename, bson_error_t *error) { mongoc_bulk_operation_t *bulk_files = NULL; mongoc_bulk_operation_t *bulk_chunks = NULL; mongoc_cursor_t *cursor = NULL; bson_error_t files_error; bson_error_t chunks_error; const bson_t *doc; const char *key; char keybuf[16]; int count = 0; bool chunks_ret; bool files_ret; bool ret = false; bson_iter_t iter; bson_t *files_q = NULL; bson_t *chunks_q = NULL; bson_t q = BSON_INITIALIZER; bson_t fields = BSON_INITIALIZER; bson_t ar = BSON_INITIALIZER; BSON_ASSERT (gridfs); if (!filename) { bson_set_error (error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_INVALID_FILENAME, "A non-NULL filename must be specified."); return false; } /* * Find all files matching this filename. Hopefully just one, but not * strictly required! */ BSON_APPEND_UTF8 (&q, "filename", filename); BSON_APPEND_INT32 (&fields, "_id", 1); cursor = _mongoc_cursor_new (gridfs->client, gridfs->files->ns, MONGOC_QUERY_NONE, 0, 0, 0, false /* is command */, &q, &fields, NULL, NULL); BSON_ASSERT (cursor); while (mongoc_cursor_next (cursor, &doc)) { if (bson_iter_init_find (&iter, doc, "_id")) { const bson_value_t *value = bson_iter_value (&iter); bson_uint32_to_string (count, &key, keybuf, sizeof keybuf); BSON_APPEND_VALUE (&ar, key, value); } } if (mongoc_cursor_error (cursor, error)) { goto failure; } bulk_files = mongoc_collection_create_bulk_operation (gridfs->files, false, NULL); bulk_chunks = mongoc_collection_create_bulk_operation (gridfs->chunks, false, NULL); files_q = BCON_NEW ("_id", "{", "$in", BCON_ARRAY (&ar), "}"); chunks_q = BCON_NEW ("files_id", "{", "$in", BCON_ARRAY (&ar), "}"); mongoc_bulk_operation_remove (bulk_files, files_q); mongoc_bulk_operation_remove (bulk_chunks, chunks_q); files_ret = mongoc_bulk_operation_execute (bulk_files, NULL, &files_error); chunks_ret = mongoc_bulk_operation_execute (bulk_chunks, NULL, &chunks_error); if (error) { if (!files_ret) { memcpy (error, &files_error, sizeof *error); } else if (!chunks_ret) { memcpy (error, &chunks_error, sizeof *error); } } ret = (files_ret && chunks_ret); failure: if (cursor) { mongoc_cursor_destroy (cursor); } if (bulk_files) { mongoc_bulk_operation_destroy (bulk_files); } if (bulk_chunks) { mongoc_bulk_operation_destroy (bulk_chunks); } bson_destroy (&q); bson_destroy (&fields); bson_destroy (&ar); if (files_q) { bson_destroy (files_q); } if (chunks_q) { bson_destroy (chunks_q); } return ret; }
static void test_value_basic (void) { static const uint8_t raw[16] = { 0 }; const bson_value_t *value; bson_value_t copy; bson_iter_t iter; bson_oid_t oid; bson_t other = BSON_INITIALIZER; bson_t *doc; bson_t sub = BSON_INITIALIZER; bool r; int i; bson_oid_init (&oid, NULL); doc = BCON_NEW ("double", BCON_DOUBLE (123.4), "utf8", "this is my string", "document", BCON_DOCUMENT (&sub), "array", BCON_DOCUMENT (&sub), "binary", BCON_BIN (BSON_SUBTYPE_BINARY, raw, sizeof raw), "undefined", BCON_UNDEFINED, "oid", BCON_OID (&oid), "bool", BCON_BOOL (true), "datetime", BCON_DATE_TIME (12345678), "null", BCON_NULL, "regex", BCON_REGEX ("^hello", "i"), "dbpointer", BCON_DBPOINTER ("test.test", &oid), "code", BCON_CODE ("var a = function() {}"), "symbol", BCON_SYMBOL ("my_symbol"), "codewscope", BCON_CODEWSCOPE ("var a = 1;", &sub), "int32", BCON_INT32 (1234), "timestamp", BCON_TIMESTAMP (1234, 4567), "int64", BCON_INT32 (4321), "maxkey", BCON_MAXKEY, "minkey", BCON_MINKEY); r = bson_iter_init (&iter, doc); assert (r); for (i = 0; i < 20; i++) { r = bson_iter_next (&iter); assert (r); value = bson_iter_value (&iter); assert (value); bson_value_copy (value, ©); r = bson_append_value (&other, bson_iter_key (&iter), -1, ©); assert (r); bson_value_destroy (©); } r = bson_iter_next (&iter); assert (!r); bson_destroy (doc); bson_destroy (&other); }
static void alarm_send(void) { mongoc_client_t *curclient = mongoc_client_new ("mongodb://172.16.0.7:27017/"); mongoc_collection_t *collection; mongoc_collection_t *sensorcoll; mongoc_collection_t *alarmcoll; mongoc_collection_t *contactcoll; mongoc_collection_t *alarmlogcoll; collection = mongoc_client_get_collection (curclient, "smart_trash_development", "devices"); sensorcoll = mongoc_client_get_collection (curclient, "smart_trash_development", "sensors"); alarmcoll = mongoc_client_get_collection (curclient, "smart_trash_development", "alarms"); alarmlogcoll = mongoc_client_get_collection (curclient, "smart_trash_development", "alarmlogs"); contactcoll = mongoc_client_get_collection (curclient, "smart_trash_development", "contacts"); mongoc_cursor_t *cursor; bson_t *doc; bson_t *query; char *str; query = bson_new (); cursor = mongoc_collection_find (alarmcoll, MONGOC_QUERY_NONE, 0, 0, 0, query, NULL, NULL); bson_oid_t se_oid; char se_id[1024]; memset(se_id,0,1024); char de_id[1024]; memset(de_id,'\0',1024); char alarmType[1024]; memset(alarmType,0,1024); char lowerBoundC[1024]; memset(lowerBoundC,0,1024); char upperBoundC[1024]; memset(upperBoundC,0,1024); char duration[1024]; memset(duration,0,1024); char target[1024]; memset(target,0,1024); char contactId[1024]; memset(contactId,0,1024); char userId[1024]; memset(userId,0,1024); while (mongoc_cursor_next (cursor, &doc)) { str = bson_as_json (doc, NULL); bson_free (str); bson_iter_t iter; bson_iter_t sub_iter; if (bson_iter_init (&iter, doc)) { while (bson_iter_next (&iter)) { bson_value_t *value; value = bson_iter_value (&iter); if (value->value_type == BSON_TYPE_UTF8) { if(strcmp(bson_iter_key (&iter),"sensorId")==0){ strcpy(se_id,value->value.v_utf8.str); }else if (strcmp(bson_iter_key (&iter),"alarmType")==0){ strcpy(alarmType,value->value.v_utf8.str); }else if (strcmp(bson_iter_key (&iter),"lowerBoundC")==0){ strcpy(lowerBoundC,value->value.v_utf8.str); }else if (strcmp(bson_iter_key (&iter),"upperBoundC")==0){ strcpy(upperBoundC,value->value.v_utf8.str); }else if (strcmp(bson_iter_key (&iter),"duration")==0){ strcpy(duration,value->value.v_utf8.str); }else if (strcmp(bson_iter_key (&iter),"target")==0){ strcpy(target,value->value.v_utf8.str); }else if (strcmp(bson_iter_key (&iter),"contactId")==0){ strcpy(contactId,value->value.v_utf8.str); }else if (strcmp(bson_iter_key (&iter),"userId")==0){ strcpy(userId,value->value.v_utf8.str); }else if (strcmp(bson_iter_key (&iter),"deviceId")==0){ strcpy(de_id,value->value.v_utf8.str); } } } } char mobile[256]; char addr[1024]; char device_name[2048]; memset(mobile,'\0',256); memset(addr,'\0',1024); memset(device_name,'\0',2048); getPhone(contactcoll,contactId,mobile); printf("\ndev_id:%s,lowerBoundC:%s,upperBoundC:%s\n",de_id,lowerBoundC,upperBoundC); getaddressbyID(addr,collection,de_id,device_name); do_alarm(sensorcoll,se_id,alarmType,upperBoundC,lowerBoundC,duration,target,mobile,addr,alarmlogcoll,userId,device_name,de_id); } bson_destroy (query); mongoc_collection_destroy (collection); mongoc_collection_destroy (sensorcoll); mongoc_collection_destroy (alarmcoll); mongoc_collection_destroy (alarmlogcoll); mongoc_client_destroy (curclient); }
/** * _mongoc_gridfs_file_new_from_bson: * * creates a gridfs file from a bson object * * This is only really useful for instantiating a gridfs file from a server * side object */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data) { mongoc_gridfs_file_t *file; const bson_value_t *value; const char *key; bson_iter_t iter; const uint8_t *buf; uint32_t buf_len; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (data); file = (mongoc_gridfs_file_t *)bson_malloc0 (sizeof *file); file->gridfs = gridfs; bson_copy_to (data, &file->bson); bson_iter_init (&iter, &file->bson); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (0 == strcmp (key, "_id")) { value = bson_iter_value (&iter); bson_value_copy (value, &file->files_id); } else if (0 == strcmp (key, "length")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } file->length = bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "chunkSize")) { if (!BSON_ITER_HOLDS_INT32 (&iter) && !BSON_ITER_HOLDS_INT64 (&iter) && !BSON_ITER_HOLDS_DOUBLE (&iter)) { GOTO (failure); } if (bson_iter_as_int64 (&iter) > INT32_MAX) { GOTO (failure); } file->chunk_size = (int32_t)bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "uploadDate")) { if (!BSON_ITER_HOLDS_DATE_TIME (&iter)){ GOTO (failure); } file->upload_date = bson_iter_date_time (&iter); } else if (0 == strcmp (key, "md5")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_md5 = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "filename")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_filename = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "contentType")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_content_type = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "aliases")) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) { GOTO (failure); } bson_iter_array (&iter, &buf_len, &buf); bson_init_static (&file->bson_aliases, buf, buf_len); } else if (0 == strcmp (key, "metadata")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { GOTO (failure); } bson_iter_document (&iter, &buf_len, &buf); bson_init_static (&file->bson_metadata, buf, buf_len); } } /* TODO: is there are a minimal object we should be verifying that we * actually have here? */ RETURN (file); failure: bson_destroy (&file->bson); RETURN (NULL); }
/* fire command-succeeded event as if we'd used a modern write command. * note, cluster.request_id was incremented once for the write, again * for the getLastError, so cluster.request_id is no longer valid; used the * passed-in request_id instead. */ static void _mongoc_monitor_legacy_write_succeeded (mongoc_client_t *client, int64_t duration, mongoc_write_command_t *command, const bson_t *gle, mongoc_server_stream_t *stream, int64_t request_id) { bson_iter_t iter; bson_t doc; int64_t ok = 1; int64_t n = 0; uint32_t code = 8; bool wtimeout = false; /* server error message */ const char *errmsg = NULL; size_t errmsg_len = 0; /* server errInfo subdocument */ bool has_errinfo = false; uint32_t len; const uint8_t *data; bson_t errinfo; /* server upsertedId value */ bool has_upserted_id = false; bson_value_t upserted_id; /* server updatedExisting value */ bool has_updated_existing = false; bool updated_existing = false; mongoc_apm_command_succeeded_t event; ENTRY; if (!client->apm_callbacks.succeeded) { EXIT; } /* first extract interesting fields from getlasterror response */ if (gle) { bson_iter_init (&iter, gle); while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "ok")) { ok = bson_iter_as_int64 (&iter); } else if (!strcmp (bson_iter_key (&iter), "n")) { n = bson_iter_as_int64 (&iter); } else if (!strcmp (bson_iter_key (&iter), "code")) { code = (uint32_t) bson_iter_as_int64 (&iter); if (code == 0) { /* server sent non-numeric error code? */ code = 8; } } else if (!strcmp (bson_iter_key (&iter), "upserted")) { has_upserted_id = true; bson_value_copy (bson_iter_value (&iter), &upserted_id); } else if (!strcmp (bson_iter_key (&iter), "updatedExisting")) { has_updated_existing = true; updated_existing = bson_iter_as_bool (&iter); } else if ((!strcmp (bson_iter_key (&iter), "err") || !strcmp (bson_iter_key (&iter), "errmsg")) && BSON_ITER_HOLDS_UTF8 (&iter)) { errmsg = bson_iter_utf8_unsafe (&iter, &errmsg_len); } else if (!strcmp (bson_iter_key (&iter), "errInfo") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_iter_document (&iter, &len, &data); bson_init_static (&errinfo, data, len); has_errinfo = true; } else if (!strcmp (bson_iter_key (&iter), "wtimeout")) { wtimeout = true; } } } /* based on PyMongo's _convert_write_result() */ bson_init (&doc); bson_append_int32 (&doc, "ok", 2, (int32_t) ok); if (errmsg && !wtimeout) { /* Failure, but pass to the success callback. Command Monitoring Spec: * "Commands that executed on the server and return a status of {ok: 1} * are considered successful commands and fire CommandSucceededEvent. * Commands that have write errors are included since the actual command * did succeed, only writes failed." */ append_write_err ( &doc, code, errmsg, errmsg_len, has_errinfo ? &errinfo : NULL); } else { /* Success, perhaps with a writeConcernError. */ if (errmsg) { append_write_concern_err (&doc, errmsg, errmsg_len); } if (command->type == MONGOC_WRITE_COMMAND_INSERT) { /* GLE result for insert is always 0 in most MongoDB versions. */ n = command->n_documents; } else if (command->type == MONGOC_WRITE_COMMAND_UPDATE) { if (has_upserted_id) { append_upserted (&doc, &upserted_id); } else if (has_updated_existing && !updated_existing && n == 1) { bson_t tmp; int32_t bson_len = 0; memcpy (&bson_len, command->payload.data, 4); bson_len = BSON_UINT32_FROM_LE (bson_len); bson_init_static (&tmp, command->payload.data, bson_len); has_upserted_id = get_upserted_id (&tmp, &upserted_id); if (has_upserted_id) { append_upserted (&doc, &upserted_id); } } } } bson_append_int32 (&doc, "n", 1, (int32_t) n); mongoc_apm_command_succeeded_init ( &event, duration, &doc, _mongoc_command_type_to_name (command->type), request_id, command->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.succeeded (&event); mongoc_apm_command_succeeded_cleanup (&event); bson_destroy (&doc); if (has_upserted_id) { bson_value_destroy (&upserted_id); } EXIT; }