MONGO_EXPORT int mongo_cmd_add_user( mongo *conn, const char *db, const char *user, const char *pass ) { bson user_obj; bson pass_obj; char hex_digest[33]; char *ns = bson_malloc( strlen( db ) + strlen( ".system.users" ) + 1 ); int res; strcpy( ns, db ); strcpy( ns+strlen( db ), ".system.users" ); mongo_pass_digest( user, pass, hex_digest ); bson_init( &user_obj ); bson_append_string( &user_obj, "user", user ); bson_finish( &user_obj ); bson_init( &pass_obj ); bson_append_start_object( &pass_obj, "$set" ); bson_append_string( &pass_obj, "pwd", hex_digest ); bson_append_finish_object( &pass_obj ); bson_finish( &pass_obj ); res = mongo_update( conn, ns, &user_obj, &pass_obj, MONGO_UPDATE_UPSERT ); bson_free( ns ); bson_destroy( &user_obj ); bson_destroy( &pass_obj ); return res; }
int update_filesize(struct inode * e, off_t newsize) { bson cond, doc; mongo * conn = get_conn(); int res; if(newsize < e->size) return 0; e->size = newsize; bson_init(&cond); bson_append_oid(&cond, "_id", &e->oid); bson_finish(&cond); bson_init(&doc); bson_append_start_object(&doc, "$set"); bson_append_long(&doc, "size", newsize); bson_append_finish_object(&doc); bson_finish(&doc); res = mongo_update(conn, inodes_name, &cond, &doc, 0, NULL); bson_destroy(&cond); bson_destroy(&doc); if(res != 0) return -EIO; return 0; }
int mongodb_update_key_stat(bot_t * bot, char *db, char *key, int value) { bson b; bson_iterator i; mongo_cursor cursor; int ret = 0; if (!db || !key || value < 0) { return -1; } debug(bot, "mongodb_update_key_stat: Entered\n"); bson_init(&b); bson_append_string(&b, "key", key); bson_finish(&b); mongo_cursor_init(&cursor, &gi->mongo_conn, db); mongo_cursor_set_query(&cursor, &b); if (mongo_cursor_next(&cursor) != MONGO_OK) { mongodb_insert_key_stat(bot, db, key, 0); mongo_cursor_init(&cursor, &gi->mongo_conn, db); mongo_cursor_set_query(&cursor, &b); if (mongo_cursor_next(&cursor) != MONGO_OK) goto cleanup; } debug(bot, "mongodb_update_key_stat: Found!\n"); if (bson_find(&i, mongo_cursor_bson(&cursor), "value")) { bson c; ret = (int)bson_iterator_int(&i); ret++; bson_init(&c); bson_append_string(&c, "key", key); bson_append_int(&c, "value", ret); bson_finish(&c); debug(bot, "mongodb_update_key_stat: updating to %i\n", ret); mongo_update(&gi->mongo_conn, db, &b, &c, 0); bson_destroy(&c); bson_destroy(&b); mongo_cursor_destroy(&cursor); return ret; } debug(bot, "mongodb_update_key_stat: Key not found\n"); cleanup: bson_destroy(&b); mongo_cursor_destroy(&cursor); return -1; }
bool MongodbClient::SaveObject(MongodbObject *newObject, MongodbObject *oldObject) { int32 status = MONGO_ERROR; if(IsConnected()) { if(oldObject) { status = mongo_update(clientData->connection, namespaceName.c_str(), (bson *)oldObject->InternalObject(), (bson *)newObject->InternalObject(), 0, NULL); if(MONGO_OK != status) { LogError(String("SaveObject, update"), clientData->connection->err); } } else { status = mongo_insert(clientData->connection, namespaceName.c_str(), (bson *)newObject->InternalObject(), NULL); if(MONGO_OK != status) { LogError(String("SaveObject, insert"), clientData->connection->err); } } } return (MONGO_OK == status); }
bool MongodbClient::SaveObject(MongodbObject *object) { int32 status = MONGO_ERROR; if(IsConnected()) { MongodbObject *foundObject = FindObjectByKey(object->GetObjectName()); if(foundObject) { status = mongo_update(clientData->connection, namespaceName.c_str(), (bson *)foundObject->InternalObject(), (bson *)object->InternalObject(), 0, NULL); if(MONGO_OK != status) { LogError(String("SaveObject, update"), clientData->connection->err); } SafeRelease(foundObject); } else { status = mongo_insert(clientData->connection, namespaceName.c_str(), (bson *)object->InternalObject(), NULL); if(MONGO_OK != status) { LogError(String("SaveObject, insert"), clientData->connection->err); } } } return (MONGO_OK == status); }
void mongo_node_updateBlocks(node_t *node) { bson_t *query; bson_t *update; mongo_node_checkInit(); query = BCON_NEW("_id", BCON_UTF8(node->id)); update = BCON_NEW("$set", "{", "blocks", BCON_BIN(BSON_SUBTYPE_BINARY, (const uint8_t * ) node->blocks->bitarray, node->blocks->size), "blocksCount", BCON_INT32(node->blocksCount), "}"); mongo_update(nodeCollection, query, update); }
bool TMongoDriver::updateMulti(const QString &ns, const QVariantMap &criteria, const QVariantMap &object) { mongo_clear_errors(mongoConnection); int status = mongo_update(mongoConnection, qPrintable(ns), (const bson *)TBson::toBson(criteria).data(), (const bson *)TBson::toBson(object).data(), MONGO_UPDATE_MULTI, 0); if (status != MONGO_OK) { tSystemError("MongoDB Error: %s", mongoConnection->lasterrstr); return false; } return true; }
result_t MongoCollection::_update(const bson* cond, const bson* op, int flags, int32_t& retVal, AsyncEvent* ac) { if (ac->isSync()) return CHECK_ERROR(CALL_E_NOSYNC); obj_ptr<MongoDB> db(m_db); if (!db) return CHECK_ERROR(CALL_E_INVALID_CALL); retVal = mongo_update(db->m_conn, m_ns.c_str(), cond, op, flags, NULL); return 0; }
SEXP rmongo_update(SEXP mongo_conn, SEXP ns, SEXP cond, SEXP op, SEXP flags) { mongo* conn = _checkMongo(mongo_conn); const char* _ns = CHAR(STRING_ELT(ns, 0)); bson* _cond = _checkBSON(cond); bson* _op = _checkBSON(op); int _flags = 0; int i; int len = LENGTH(flags); for (i = 0; i < len; i++) _flags |= INTEGER(flags)[i]; SEXP ret; PROTECT(ret = allocVector(LGLSXP, 1)); LOGICAL(ret)[0] = (mongo_update(conn, _ns, _cond, _op, _flags) == MONGO_OK); UNPROTECT(1); return ret; }
int commit_inode(struct inode * e) { bson cond, doc; mongo * conn = get_conn(); char istr[4]; struct dirent * cde = e->dirents; int res; bson_init(&doc); bson_append_start_object(&doc, "$set"); bson_append_start_array(&doc, "dirents"); res = 0; while(cde) { bson_numstr(istr, res++); bson_append_string(&doc, istr, cde->path); cde = cde->next; } bson_append_finish_array(&doc); bson_append_int(&doc, "mode", e->mode); bson_append_long(&doc, "owner", e->owner); bson_append_long(&doc, "group", e->group); bson_append_long(&doc, "size", e->size); bson_append_time_t(&doc, "created", e->created); bson_append_time_t(&doc, "modified", e->modified); if(e->data && e->datalen > 0) bson_append_string_n(&doc, "data", e->data, e->datalen); bson_append_finish_object(&doc); bson_finish(&doc); bson_init(&cond); bson_append_oid(&cond, "_id", &e->oid); bson_finish(&cond); res = mongo_update(conn, inodes_name, &cond, &doc, MONGO_UPDATE_UPSERT, NULL); bson_destroy(&cond); bson_destroy(&doc); if(res != MONGO_OK) { fprintf(stderr, "Error committing inode %s\n", mongo_get_server_err_string(conn)); return -EIO; } return 0; }
static void tutorial_update( mongo *conn ) { bson cond[1], op[1]; bson_init( cond ); bson_append_string( cond, "name", "Joe"); bson_append_int( cond, "age", 33); bson_finish( cond ); bson_init( op ); bson_append_start_object( op, "$inc" ); bson_append_int( op, "visits", 1 ); bson_append_finish_object( op ); bson_finish( op ); mongo_update( conn, "tutorial.persons", cond, op, MONGO_UPDATE_BASIC, 0 ); bson_destroy( cond ); bson_destroy( op ); }
/* Update DLR */ static void dlr_mongodb_update(const Octstr *smsc, const Octstr *ts, const Octstr *dst, int status) { DBPoolConn *pconn; bson cond, op; bson_buffer cond_buf, op_buf; mongo_connection *conn = NULL; pconn = dbpool_conn_consume(pool); if (pconn == NULL) { return; } conn = (mongo_connection*)pconn->conn; bson_buffer_init(&cond_buf); bson_append_string(&cond_buf, octstr_get_cstr(fields->field_smsc), octstr_get_cstr(smsc)); bson_append_string(&cond_buf, octstr_get_cstr(fields->field_ts), octstr_get_cstr(ts)); if (dst) { bson_append_string(&cond_buf, octstr_get_cstr(fields->field_dst), octstr_get_cstr(dst)); } bson_from_buffer(&cond, &cond_buf); bson_buffer_init(&op_buf); { bson_buffer *sub = bson_append_start_object(&op_buf, "$set"); bson_append_int(sub, octstr_get_cstr(fields->field_status), status); bson_append_finish_object(sub); } bson_from_buffer(&op, &op_buf); MONGO_TRY { mongo_update(conn, mongodb_namespace, &cond, &op, 0); } MONGO_CATCH { mongodb_error("dlr_mongodb_update", conn->exception.type); } dbpool_conn_produce(pconn); bson_destroy(&cond); bson_destroy(&op); }
bool MongodbClient::SaveBinary(const String &key, uint8 *data, int32 dataSize) { int32 status = MONGO_ERROR; if(IsConnected()) { MongodbObject * binary = new MongodbObject(); DVASSERT(binary); binary->SetObjectName(key); binary->AddInt32(String("DataSize").c_str(), dataSize); binary->AddData(String("Data").c_str(), data, dataSize); binary->Finish(); MongodbObject *foundObject = FindObjectByKey(key); if(foundObject) { status = mongo_update(clientData->connection, namespaceName.c_str(), (bson *)foundObject->InternalObject(), (bson *)binary->InternalObject(), 0, NULL); if(MONGO_OK != status) { LogError(String("SaveBinary, update"), clientData->connection->err); } SafeRelease(foundObject); } else { status = mongo_insert(clientData->connection, namespaceName.c_str(), (bson *)binary->InternalObject(), NULL); if(MONGO_OK != status) { LogError(String("SaveBinary, insert"), clientData->connection->err); } } SafeRelease(binary); } return (MONGO_OK == status); }
int main() { mongo conn[1]; bson b, empty; mongo_cursor cursor[1]; unsigned char not_utf8[3]; int result = 0; const char *ns = "test.c.validate"; int i=0, j=0; bson bs[BATCH_SIZE]; bson *bp[BATCH_SIZE]; not_utf8[0] = 0xC0; not_utf8[1] = 0xC0; not_utf8[2] = '\0'; INIT_SOCKETS_FOR_WINDOWS; if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) { printf( "failed to connect\n" ); exit( 1 ); } /* Test checking for finished bson. */ bson_init( &b ); bson_append_int( &b, "foo", 1 ); ASSERT( mongo_insert( conn, "test.foo", &b, NULL ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_NOT_FINISHED ); bson_destroy( &b ); /* Test valid keys. */ bson_init( &b ); result = bson_append_string( &b , "a.b" , "17" ); ASSERT( result == BSON_OK ); ASSERT( b.err & BSON_FIELD_HAS_DOT ); /* Don't set INIT dollar if deb ref fields are being used. */ result = bson_append_string( &b , "$id" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$ref" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$db" , "17" ); ASSERT( result == BSON_OK ); ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); result = bson_append_string( &b , "$ab" , "17" ); ASSERT( result == BSON_OK ); ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); result = bson_append_string( &b , "ab" , "this is valid utf8" ); ASSERT( result == BSON_OK ); ASSERT( ! ( b.err & BSON_NOT_UTF8 ) ); result = bson_append_string( &b , ( const char * )not_utf8, "valid" ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); ASSERT( bson_finish( &b ) == BSON_ERROR ); ASSERT( b.err & BSON_FIELD_HAS_DOT ); ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); ASSERT( b.err & BSON_NOT_UTF8 ); result = mongo_insert( conn, ns, &b, NULL ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err & MONGO_BSON_NOT_FINISHED ); result = mongo_update( conn, ns, bson_empty( &empty ), &b, 0, NULL ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err & MONGO_BSON_NOT_FINISHED ); mongo_cursor_init( cursor, conn, "test.cursors" ); mongo_cursor_set_query( cursor, &b ); result = mongo_cursor_next( cursor ); ASSERT( result == MONGO_ERROR ); ASSERT( cursor->err & MONGO_CURSOR_BSON_ERROR ); ASSERT( cursor->conn->err & MONGO_BSON_NOT_FINISHED ); bson_destroy( &b ); mongo_cursor_destroy( cursor ); /* Test valid strings. */ bson_init( &b ); result = bson_append_string( &b , "foo" , "bar" ); ASSERT( result == BSON_OK ); ASSERT( b.err == 0 ); result = bson_append_string( &b , "foo" , ( const char * )not_utf8 ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); b.err = 0; ASSERT( b.err == 0 ); result = bson_append_regex( &b , "foo" , ( const char * )not_utf8, "s" ); ASSERT( result == BSON_ERROR ); ASSERT( b.err & BSON_NOT_UTF8 ); for ( j=0; j < BATCH_SIZE; j++ ) bp[j] = &bs[j]; for ( j=0; j < BATCH_SIZE; j++ ) make_small_invalid( &bs[j], i ); result = mongo_insert_batch( conn, ns, (const bson **)bp, BATCH_SIZE, NULL, 0 ); ASSERT( result == MONGO_ERROR ); ASSERT( conn->err == MONGO_BSON_INVALID ); for ( j=0; j < BATCH_SIZE; j++ ) bson_destroy( &bs[j] ); bson_destroy( &b ); mongo_cmd_drop_db( conn, "test" ); mongo_disconnect( conn ); mongo_destroy( conn ); return 0; }
int mongo_write(const char *path, const char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { struct inode * e; int res; size_t reallen; int32_t realend = size, blk_offset = 0; const off_t write_end = size + offset; char * lock; bson doc, cond; mongo * conn = get_conn(); uint8_t hash[20]; time_t now = time(NULL); e = (struct inode*)fi->fh; if((res = get_cached_inode(path, e)) != 0) return res; if(e->mode & S_IFDIR) return -EISDIR; /* Uncomment this for incredibly slow length calculations. for(;realend >= 0 && buf[realend] == '\0'; realend--); realend++; for(blk_offset = 0; blk_offset < realend && buf[blk_offset] == 0; blk_offset++); blk_offset -= blk_offset > 0 ? 1 : 0; * The code below uses SSE4 instructions to find the first/last * zero bytes by doing 16-byte comparisons at a time. This should give * a ~16 speed boost on blocks with lots of zero bytes over the dumb * method above. */ if(size >= 16) { __m128i zero = _mm_setzero_si128(); lock = (char*)buf + size - 16; while(lock >= buf) { __m128i x = _mm_loadu_si128((__m128i*)lock); res = _mm_movemask_epi8(_mm_cmpeq_epi8(zero, x)); if(res == 0xffff) { lock -= 16; continue; } realend = lock - buf + fls(res ^ 0xffff); break; } if(lock <= buf) realend = 0; lock = (char*)buf; while(lock - buf < realend) { __m128i x = _mm_loadu_si128((__m128i*)lock); res = _mm_movemask_epi8(_mm_cmpeq_epi8(zero, x)); if(res == 0xffff) { lock += 16; continue; } blk_offset = lock - buf + ffs(res ^ 0xffff) - 1; break; } } reallen = realend - blk_offset; if(reallen == 0) { pthread_mutex_lock(&e->wr_lock); res = insert_empty(&e->wr_extent, offset, size); goto end; } #ifdef __APPLE__ CC_SHA1(buf, size, hash); #else SHA1(buf, size, hash); #endif bson_init(&cond); bson_append_binary(&cond, "_id", 0, (char*)hash, sizeof(hash)); bson_finish(&cond); bson_init(&doc); bson_append_start_object(&doc, "$setOnInsert"); char * comp_out = get_compress_buf(); size_t comp_size = snappy_max_compressed_length(reallen); if((res = snappy_compress(buf + blk_offset, reallen, comp_out, &comp_size)) != SNAPPY_OK) { fprintf(stderr, "Error compressing input: %d\n", res); return -EIO; } bson_append_binary(&doc, "data", 0, comp_out, comp_size); bson_append_int(&doc, "offset", blk_offset); bson_append_int(&doc, "size", size); bson_append_time_t(&doc, "created", now); bson_append_finish_object(&doc); bson_finish(&doc); res = mongo_update(conn, blocks_name, &cond, &doc, MONGO_UPDATE_UPSERT, NULL); bson_destroy(&doc); bson_destroy(&cond); if(res != MONGO_OK) { fprintf(stderr, "Error committing block %s\n", conn->lasterrstr); return -EIO; } pthread_mutex_lock(&e->wr_lock); res = insert_hash(&e->wr_extent, offset, size, hash); end: if(write_end > e->size) e->size = write_end; if(now - e->wr_age > 3) { res = serialize_extent(e, e->wr_extent); if(res != 0) { pthread_mutex_unlock(&e->wr_lock); return res; } e->wr_age = now; } pthread_mutex_unlock(&e->wr_lock); if(res != 0) return res; res = update_filesize(e, write_end); if(res != 0) return res; return size; }
int main(){ mongo_connection conn[1]; bson_buffer bb; bson obj; bson cond; int i; bson_oid_t oid; const char* col = "c.update_test"; const char* ns = "test.c.update_test"; INIT_SOCKETS_FOR_WINDOWS; if (mongo_connect( conn , TEST_SERVER, 27017 )){ printf("failed to connect\n"); exit(1); } /* if the collection doesn't exist dropping it will fail */ if ( mongo_cmd_drop_collection(conn, "test", col, NULL) == MONGO_OK && mongo_find_one(conn, ns, bson_empty(&obj), bson_empty(&obj), NULL) != MONGO_OK ){ printf("failed to drop collection\n"); exit(1); } bson_oid_gen(&oid); { /* insert */ bson_buffer_init(&bb); bson_append_oid(&bb, "_id", &oid); bson_append_int(&bb, "a", 3 ); bson_from_buffer(&obj, &bb); mongo_insert(conn, ns, &obj); bson_destroy(&obj); } { /* insert */ bson op; bson_buffer_init(&bb); bson_append_oid(&bb, "_id", &oid); bson_from_buffer(&cond, &bb); bson_buffer_init(&bb); { bson_append_start_object(&bb, "$inc"); bson_append_int(&bb, "a", 2 ); bson_append_finish_object(&bb); } { bson_append_start_object(&bb, "$set"); bson_append_double(&bb, "b", -1.5 ); bson_append_finish_object(&bb); } bson_from_buffer(&op, &bb); for (i=0; i<5; i++) mongo_update(conn, ns, &cond, &op, 0); /* cond is used later */ bson_destroy(&op); } if( mongo_find_one(conn, ns, &cond, 0, &obj) != MONGO_OK ){ printf("Failed to find object\n"); exit(1); } else { int fields = 0; bson_iterator it; bson_iterator_init(&it, obj.data); bson_destroy(&cond); while(bson_iterator_next(&it)){ switch(bson_iterator_key(&it)[0]){ case '_': /* id */ ASSERT(bson_iterator_type(&it) == BSON_OID); ASSERT(!memcmp(bson_iterator_oid(&it)->bytes, oid.bytes, 12)); fields++; break; case 'a': ASSERT(bson_iterator_type(&it) == BSON_INT); ASSERT(bson_iterator_int(&it) == 3 + 5*2); fields++; break; case 'b': ASSERT(bson_iterator_type(&it) == BSON_DOUBLE); ASSERT(bson_iterator_double(&it) == -1.5); fields++; break; } } ASSERT(fields == 3); } bson_destroy(&obj); mongo_cmd_drop_db(conn, "test"); mongo_destroy(conn); return 0; }
/** * \brief This function tries to update tag group in MongoDB. It adds new * version of data. */ int vs_mongo_taggroup_update(struct VS_CTX *vs_ctx, struct VSNode *node, struct VSTagGroup *tg) { bson cond, op; bson bson_version; int ret; /* TODO: delete old version, when there is too much versions: int old_saved_version = tg->saved_version; */ bson_init(&cond); { bson_append_oid(&cond, "_id", &tg->oid); /* To be sure that right tag group will be updated */ bson_append_int(&cond, "node_id", node->id); bson_append_int(&cond, "taggroup_id", tg->id); } bson_finish(&cond); bson_init(&op); { /* Update item current_version in document */ bson_append_start_object(&op, "$set"); { bson_append_int(&op, "current_version", tg->version); } bson_append_finish_object(&op); /* Create new bson object representing current version and add it to * the object versions */ bson_append_start_object(&op, "$set"); { bson_init(&bson_version); { vs_mongo_taggroup_save_version(tg, &bson_version, UINT32_MAX); } bson_finish(&bson_version); bson_append_bson(&op, "versions", &bson_version); } bson_append_finish_object(&op); } bson_finish(&op); ret = mongo_update(vs_ctx->mongo_conn, vs_ctx->mongo_tg_ns, &cond, &op, MONGO_UPDATE_BASIC, 0); bson_destroy(&bson_version); bson_destroy(&cond); bson_destroy(&op); if(ret != MONGO_OK) { v_print_log(VRS_PRINT_ERROR, "Unable to update tag group %d to MongoDB: %s, error: %s\n", tg->id, vs_ctx->mongo_tg_ns, mongo_get_server_err_string(vs_ctx->mongo_conn)); return 0; } return 1; }
/* We can test write concern for update * and remove by doing operations on a capped collection. */ static void test_update_and_remove( mongo *conn ) { mongo_write_concern wc[1]; bson *objs[5]; bson query[1], update[1]; int i; create_capped_collection( conn ); for( i=0; i<5; i++ ) { objs[i] = bson_alloc(); bson_init( objs[i] ); bson_append_int( objs[i], "n", i ); bson_finish( objs[i] ); } ASSERT( mongo_insert_batch( conn, "test.wc", (const bson **)objs, 5, NULL, 0 ) == MONGO_OK ); ASSERT( mongo_count( conn, "test", "wc", bson_shared_empty( ) ) == 5 ); bson_init( query ); bson_append_int( query, "n", 2 ); bson_finish( query ); ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK ); bson_init( update ); bson_append_start_object( update, "$set" ); bson_append_string( update, "n", "a big long string" ); bson_append_finish_object( update ); bson_finish( update ); /* Update will appear to succeed with no write concern specified, but doesn't. */ ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK ); ASSERT( mongo_update( conn, "test.wc", query, update, 0, NULL ) == MONGO_OK ); ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK ); /* Remove will appear to succeed with no write concern specified, but doesn't. */ ASSERT( mongo_remove( conn, "test.wc", query, NULL ) == MONGO_OK ); ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK ); mongo_write_concern_init( wc ); mongo_write_concern_set_w( wc, 1 ); mongo_write_concern_finish( wc ); mongo_clear_errors( conn ); ASSERT( mongo_update( conn, "test.wc", query, update, 0, wc ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_WRITE_ERROR ); ASSERT_EQUAL_STRINGS( conn->lasterrstr, "failing update: objects in a capped ns cannot grow" ); mongo_clear_errors( conn ); ASSERT( mongo_remove( conn, "test.wc", query, wc ) == MONGO_ERROR ); ASSERT( conn->err == MONGO_WRITE_ERROR ); ASSERT_EQUAL_STRINGS( conn->lasterrstr, "can't remove from a capped collection" ); mongo_write_concern_destroy( wc ); bson_destroy( query ); bson_destroy( update ); for( i=0; i<5; i++ ) { bson_destroy( objs[i] ); bson_dealloc( objs[i] ); } }