SEXP R_mongo_collection_command_simple(SEXP ptr_col, SEXP command){ mongoc_collection_t *col = r2col(ptr_col); bson_t *cmd = r2bson(command); bson_t reply; bson_error_t err; if(!mongoc_collection_command_simple(col, cmd, NULL, &reply, &err)) stop(err.message); SEXP out = PROTECT(bson2list(&reply)); bson_destroy (&reply); UNPROTECT(1); return out; }
SEXP R_mongo_collection_aggregate(SEXP ptr_col, SEXP ptr_pipeline, SEXP no_timeout) { mongoc_collection_t *col = r2col(ptr_col); bson_t *pipeline = r2bson(ptr_pipeline); mongoc_query_flags_t flags = MONGOC_QUERY_NONE; if(asLogical(no_timeout)) flags += MONGOC_QUERY_NO_CURSOR_TIMEOUT; mongoc_cursor_t *c = mongoc_collection_aggregate (col, flags, pipeline, NULL, NULL); if(!c) stop("Error executing pipeline."); return cursor2r(c); }
SEXP R_mongo_collection_command(SEXP ptr_col, SEXP ptr_cmd, SEXP no_timeout){ mongoc_collection_t *col = r2col(ptr_col); bson_t *cmd = r2bson(ptr_cmd); mongoc_query_flags_t flags = MONGOC_QUERY_NONE; if(Rf_asLogical(no_timeout)) flags += MONGOC_QUERY_NO_CURSOR_TIMEOUT; mongoc_cursor_t *c = mongoc_collection_command(col, flags, 0, 0, 0, cmd, NULL, NULL); if(!c) stop("Error executing command."); return cursor2r(c, ptr_col); }
SEXP R_mongo_collection_find(SEXP ptr_col, SEXP ptr_query, SEXP ptr_fields, SEXP skip, SEXP limit, SEXP no_timeout) { mongoc_collection_t *col = r2col(ptr_col); bson_t *query = r2bson(ptr_query); bson_t *fields = r2bson(ptr_fields); mongoc_query_flags_t flags = MONGOC_QUERY_NONE; if(asLogical(no_timeout)) flags += MONGOC_QUERY_NO_CURSOR_TIMEOUT; mongoc_cursor_t *c = mongoc_collection_find(col, flags, asInteger(skip), asInteger(limit), 0, query, fields, NULL); return cursor2r(c); }
SEXP R_mongo_collection_create_index(SEXP ptr_col, SEXP ptr_bson) { mongoc_collection_t *col = r2col(ptr_col); bson_t *keys = r2bson(ptr_bson); const char * collection_name = mongoc_collection_get_name(col); char * index_name = mongoc_collection_keys_to_index_string (keys); bson_error_t err; //From: https://s3.amazonaws.com/mciuploads/mongo-c-driver/docs/latest/create-indexes.html bson_t * command = BCON_NEW ("createIndexes", BCON_UTF8 (collection_name), "indexes", "[", "{", "key", BCON_DOCUMENT (keys), "name", BCON_UTF8 (index_name), "}", "]"); if(!mongoc_collection_write_command_with_opts(col, command, NULL, NULL, &err)) stop(err.message); return Rf_ScalarLogical(1); }
SEXP R_mongo_collection_count (SEXP ptr, SEXP ptr_query, SEXP no_timeout){ mongoc_collection_t *col = r2col(ptr); bson_t *query = r2bson(ptr_query); bson_error_t err; mongoc_query_flags_t flags = MONGOC_QUERY_NONE; if(asLogical(no_timeout)) flags += MONGOC_QUERY_NO_CURSOR_TIMEOUT; int64_t count = mongoc_collection_count (col, flags, query, 0, 0, NULL, &err); if (count < 0) stop(err.message); //R does not support int64 return ScalarReal((double) count); }
SEXP R_mongo_restore(SEXP con, SEXP ptr_col, SEXP verb) { bool verbose = Rf_asLogical(verb); mongoc_collection_t *col = r2col(ptr_col); bson_reader_t *reader = bson_reader_new_from_handle(con, bson_reader_feed, bson_reader_finalize); mongoc_bulk_operation_t *bulk = NULL; const bson_t *b; bson_error_t err; int count = 0; int i = 0; bool done = false; bson_t reply; while(!done) { //note: default opts uses {ordered:true} bulk = mongoc_collection_create_bulk_operation_with_opts(col, NULL); for(i = 0; i < 1000; i++){ if(!(b = bson_reader_read (reader, &done))) break; mongoc_bulk_operation_insert (bulk, b); count++; } if(i == 0) break; if(!mongoc_bulk_operation_execute (bulk, &reply, &err)){ bson_reader_destroy(reader); mongoc_bulk_operation_destroy (bulk); Rf_error(err.message); } if(verbose) Rprintf("\rRestored %d records...", count); } if(verbose) Rprintf("\rDone! Inserted total of %d records.\n", count); if (!done) Rf_warning("Failed to read all documents.\n"); bson_reader_destroy(reader); mongoc_bulk_operation_destroy (bulk); return Rf_ScalarInteger(count); }
SEXP R_mongo_collection_update(SEXP ptr_col, SEXP ptr_selector, SEXP ptr_update, SEXP upsert, SEXP multiple){ mongoc_collection_t *col = r2col(ptr_col); bson_t *selector = r2bson(ptr_selector); bson_t *update = r2bson(ptr_update); //set update flags mongoc_update_flags_t flags = MONGOC_UPDATE_NONE; if(asLogical(upsert)) flags = flags + MONGOC_UPDATE_UPSERT; if(asLogical(multiple)) flags = flags + MONGOC_UPDATE_MULTI_UPDATE; bson_error_t err; if(!mongoc_collection_update(col, flags, selector, update, NULL, &err)) stop(err.message); return ScalarLogical(1); }
SEXP R_mongo_collection_insert_page(SEXP ptr_col, SEXP json_vec, SEXP stop_on_error){ if(!Rf_isString(json_vec) || !Rf_length(json_vec)) stop("json_vec must be character string of at least length 1"); //ordered means serial execution bool ordered = Rf_asLogical(stop_on_error); //create bulk operation bson_error_t err; bson_t *b; bson_t reply; mongoc_bulk_operation_t *bulk = mongoc_collection_create_bulk_operation_with_opts (r2col(ptr_col), NULL); for(int i = 0; i < Rf_length(json_vec); i++){ b = bson_new_from_json ((uint8_t*) Rf_translateCharUTF8(Rf_asChar(STRING_ELT(json_vec, i))), -1, &err); if(!b){ mongoc_bulk_operation_destroy (bulk); stop(err.message); } mongoc_bulk_operation_insert(bulk, b); bson_destroy (b); b = NULL; } //execute bulk operation bool success = mongoc_bulk_operation_execute (bulk, &reply, &err); mongoc_bulk_operation_destroy (bulk); //check for errors if(!success){ if(ordered){ Rf_errorcall(R_NilValue, err.message); } else { Rf_warningcall(R_NilValue, "Not all inserts were successful: %s\n", err.message); } } //get output SEXP out = PROTECT(bson2list(&reply)); bson_destroy (&reply); UNPROTECT(1); return out; }
SEXP R_mongo_collection_name (SEXP ptr){ mongoc_collection_t *col = r2col(ptr); const char * name = mongoc_collection_get_name(col); return mkStringUTF8(name); }