int main() {
    
    mongo conn[1];
    bson b;
    const char *sock_path = "/tmp/mongodb-27017.sock";
    const char *ns = "test.c.unix_socket";
    const char *col = "c.unix_socket";

    ASSERT( mongo_client( conn, sock_path, -1 ) == MONGO_OK );

    mongo_cmd_drop_collection( conn, "test", col, NULL );

    bson_init( &b );
    bson_append_new_oid( &b, "_id" );
    bson_append_string( &b, "foo", "bar" );
    bson_append_int( &b, "x", 1);
    bson_finish( &b );

    ASSERT( mongo_insert( conn, ns, &b, NULL ) == MONGO_OK );

    mongo_cmd_drop_collection( conn, "test", col, NULL );

    bson_destroy( &b );
    mongo_destroy( conn );

    return 0;
}
Пример #2
0
/**
 * \brief This function tries to add new tag group to MongoDB
 */
int vs_mongo_taggroup_add_new(struct VS_CTX *vs_ctx,
		struct VSNode *node,
		struct VSTagGroup *tg)
{
	bson bson_tg;
	int ret;
	bson_init(&bson_tg);

	bson_oid_gen(&tg->oid);
	bson_append_oid(&bson_tg, "_id", &tg->oid);
	bson_append_int(&bson_tg, "node_id", node->id);
	bson_append_int(&bson_tg, "taggroup_id", tg->id);
	bson_append_int(&bson_tg, "custom_type", tg->custom_type);
	bson_append_int(&bson_tg, "current_version", tg->version);

	bson_append_start_object(&bson_tg, "versions");
	vs_mongo_taggroup_save_version(tg, &bson_tg, UINT32_MAX);
	bson_append_finish_object(&bson_tg);

	bson_finish(&bson_tg);

	ret = mongo_insert(vs_ctx->mongo_conn, vs_ctx->mongo_tg_ns, &bson_tg, 0);

	bson_destroy(&bson_tg);

	if(ret != MONGO_OK) {
		v_print_log(VRS_PRINT_ERROR,
				"Unable to write tag group %d of node %d to MongoDB: %s, error: %s\n",
				tg->id, node->id, vs_ctx->mongo_tg_ns,
				mongo_get_server_err_string(vs_ctx->mongo_conn));
		return 0;
	}

	return 1;
}
Пример #3
0
int test_namespace_validation_on_insert( void ) {
    mongo conn[1];
    bson b[1], b2[1];
    bson *objs[2];

    INIT_SOCKETS_FOR_WINDOWS;

    if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) {
        printf( "failed to connect\n" );
        exit( 1 );
    }

    bson_init( b );
    bson_append_int( b, "foo", 1 );
    bson_finish( b );

    ASSERT( mongo_insert( conn, "tet.fo$o", b, NULL ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_NS_INVALID );
    ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 );
    mongo_clear_errors( conn );

    bson_init( b2 );
    bson_append_int( b2, "foo", 1 );
    bson_finish( b2 );

    objs[0] = b;
    objs[1] = b2;

    ASSERT( mongo_insert_batch( conn, "tet.fo$o",
          (const bson **)objs, 2, NULL, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_NS_INVALID );
    ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 );

    return 0;
}
Пример #4
0
int mongodb_fmt_insert_arg(bot_t * bot, char *db, mongo_argument_t ** arg_array)
{
/* example: ^e |^mongotestfmt "%s=name %i=age %f=grade" Darqbot 50 99.9 */

	int i;
	bson b;

	debug(bot, "mongodb_fmt_insert_arg: Entered\n");

	if (!db || !arg_array)
		return -1;

	bson_init(&b);

	for (i = 0; arg_array[i] != NULL; i++) {
		switch (arg_array[i]->type) {
		case MONGODB_ARG_STRING:
			{
				bson_append_string(&b, arg_array[i]->name,
						   arg_array[i]->arg_str);
				break;
			}
		case MONGODB_ARG_INT:
			{
				bson_append_int(&b, arg_array[i]->name,
						arg_array[i]->arg_int);
				break;
			}
		case MONGODB_ARG_DOUBLE:
			{
				bson_append_double(&b, arg_array[i]->name,
						   arg_array[i]->arg_double);
				break;
			}
		case MONGODB_ARG_LONG:
			{
				bson_append_long(&b, arg_array[i]->name,
						 arg_array[i]->arg_long);
				break;
			}
		case MONGODB_ARG_OID:
			{
// FIX
				bson_append_oid(&b, arg_array[i]->name,
						&arg_array[i]->arg_oid);
				break;
			}
		default:
			break;
		}
	}

	bson_finish(&b);

	i = mongo_insert(&gi->mongo_conn, db, &b);

	bson_destroy(&b);

	return 0;
}
Пример #5
0
int main(int argc, char const *argv[])
{
  mongo conn[1];
  // int status = mongo_client( conn, "127.0.0.1", 27017 );
  int Age;
  char FirstName[25], LastName[25], Course[20];
/*
  if( status != MONGO_OK ) {
      switch ( conn->err ) {
        case MONGO_CONN_NO_SOCKET:  printf( "no socket\n" ); return 1;
        case MONGO_CONN_FAIL:       printf( "connection failed\n" ); return 1;
        case MONGO_CONN_NOT_MASTER: printf( "not master\n" ); return 1;
      }
  }
*/
  printf("Type student first name: ");
  scanf("%s", FirstName);
  printf("Type student last name: ");
  scanf("%s", LastName);
  printf("Type student course: ");
  scanf("%s", Course);
  printf("Type student age: ");
  scanf("%s", Age);
  // mongo insertion
  mongo_insert( conn, "students", FirstName, LastName, Age, Course);
  mongo_destroy( conn );
  return 0;
}
Пример #6
0
/* Test getaddrinfo() by successfully connecting to 'localhost'. */
int test_getaddrinfo( void ) {
    mongo conn[1];
    bson b[1];
    const char *ns = "test.foo";
    const char *errmsg = "getaddrinfo failed";

    if( mongo_client( conn, "badhost.example.com", 27017 ) == MONGO_OK ) {
        printf( "connected to bad host!\n" );
        exit( 1 );
    } else {
	ASSERT( strncmp( errmsg, conn->errstr, strlen( errmsg ) ) == 0 );
    }


    if( mongo_client( conn, "localhost", 27017 ) != MONGO_OK ) {
        printf( "failed to connect\n" );
        exit( 1 );
    }

    mongo_cmd_drop_collection( conn, "test", "foo", NULL );

    bson_init( b );
    bson_append_int( b, "foo", 17 );
    bson_finish( b );

    mongo_insert( conn , ns , b, NULL );

    ASSERT( mongo_count( conn, "test", "foo", NULL ) == 1 );

    bson_destroy( b );
    mongo_destroy( conn );


    return 0;
}
Пример #7
0
/* Test read timeout by causing the
 * server to sleep for 10s on a query.
 */
int test_read_timeout( void ) {
    mongo conn[1];
    bson b, obj, out, fields;
    int res;

    CONN_CLIENT_TEST;

    bson_init( &b );
    bson_append_code( &b, "$where", "sleep( 10 * 1000 );");
    bson_finish( &b );

    bson_init( &obj );
    bson_append_string( &obj, "foo", "bar");
    bson_finish( &obj );

    res = mongo_insert( conn, "test.foo", &obj, NULL );

    /* Set the connection timeout here. */
    
    if( mongo_set_op_timeout( conn, 1000 ) != MONGO_OK ) {
        printf("Could not set socket timeout!.");
	exit(1);
    }

    res = mongo_find_one( conn, "test.foo", &b, bson_empty(&fields), &out );
    ASSERT( res == MONGO_ERROR );

    ASSERT( conn->err == MONGO_IO_ERROR );
    ASSERT( conn->errcode == WSAETIMEDOUT );

    return 0;
}
Пример #8
0
bool MongodbClient::SaveObject(MongodbObject *newObject, MongodbObject *oldObject)
{
    int32 status = MONGO_ERROR;
    if(IsConnected())
    {
        if(oldObject)
        {
            status = mongo_update(clientData->connection, namespaceName.c_str(), (bson *)oldObject->InternalObject(), (bson *)newObject->InternalObject(), 0, NULL);
            if(MONGO_OK != status)
            {
                LogError(String("SaveObject, update"), clientData->connection->err);
            }
        }
        else 
        {
            status = mongo_insert(clientData->connection, namespaceName.c_str(), (bson *)newObject->InternalObject(), NULL);
            if(MONGO_OK != status)
            {
                LogError(String("SaveObject, insert"), clientData->connection->err);
            }
        }
    }
    
    return (MONGO_OK == status);
}
Пример #9
0
bool MongodbClient::SaveObject(MongodbObject *object)
{
    int32 status = MONGO_ERROR;
    if(IsConnected())
    {
        MongodbObject *foundObject = FindObjectByKey(object->GetObjectName());
        if(foundObject)
        {
            status = mongo_update(clientData->connection, namespaceName.c_str(), (bson *)foundObject->InternalObject(), (bson *)object->InternalObject(), 0, NULL);
            if(MONGO_OK != status)
            {
                LogError(String("SaveObject, update"), clientData->connection->err);
            }
            
            SafeRelease(foundObject);
        }
        else 
        {
            status = mongo_insert(clientData->connection, namespaceName.c_str(), (bson *)object->InternalObject(), NULL);
            if(MONGO_OK != status)
            {
                LogError(String("SaveObject, insert"), clientData->connection->err);
            }
        }
    }
    
    return (MONGO_OK == status);
}
Пример #10
0
/* Saves accounting information */
static int mongo_account(void *instance, REQUEST *request)
{
	rlm_mongo_t *data = (rlm_mongo_t *)instance;
	bson b;
	bson_buffer buf;
	const char *attr;
	char value[MAX_STRING_LEN+1];
	VALUE_PAIR *vp = request->packet->vps;
        // shall we insert this packet or not
        int insert;

	bson_buffer_init(&buf);
	bson_append_new_oid(&buf, "_id");

	insert = 0;

	while (vp) {
		attr = vp->name;
		if ((strcmp(attr, "Acct-Status-Type") == 0) && ((strcmp(data->only_stop, "") != 0))) {
			if ((vp->vp_integer & 0xffffff) != 2) {
				break;
			} else {
				insert = 1;
			}
		}
		switch (vp->type) {
			case PW_TYPE_INTEGER:
				bson_append_int(&buf, attr, vp->vp_integer & 0xffffff);
				break;
			case PW_TYPE_BYTE:
			case PW_TYPE_SHORT:
				bson_append_int(&buf, attr, vp->vp_integer);
				break;
			case PW_TYPE_DATE:
				bson_append_time_t(&buf, attr, vp->vp_date);
				break;
			default:
				vp_prints_value(value, sizeof(value), vp, 0);
				bson_append_string(&buf, attr, value);
				// akh
				RDEBUG("mongo default insert %s", value);
				break;
		}
		vp = vp->next;
	}
	bson_from_buffer(&b, &buf);

	MONGO_TRY {
		if (insert == 1) {
			mongo_insert(conn, data->acct_base, &b);
			RDEBUG("accounting record was inserted");
		}
	} MONGO_CATCH {
		radlog(L_ERR, "mongo_insert failed");
		return RLM_MODULE_FAIL;
	}

	bson_destroy(&b);
	return RLM_MODULE_OK;
}
Пример #11
0
int gridfs_store_buffer( gridfs *gfs, const char *data,
                          gridfs_offset length, const char *remotename,
                          const char *contenttype ) {

    char const *end = data + length;
    const char *data_ptr = data;
    bson_oid_t id;
    int chunkNumber = 0;
    int chunkLen;
    bson *oChunk;

    /* Large files Assertion */
    assert( length <= 0xffffffff );

    /* Generate and append an oid*/
    bson_oid_gen( &id );

    /* Insert the file's data chunk by chunk */
    while ( data_ptr < end ) {
        chunkLen = DEFAULT_CHUNK_SIZE < ( unsigned int )( end - data_ptr ) ?
                   DEFAULT_CHUNK_SIZE : ( unsigned int )( end - data_ptr );
        oChunk = chunk_new( id, chunkNumber, data_ptr, chunkLen );
        mongo_insert( gfs->client, gfs->chunks_ns, oChunk );
        chunk_free( oChunk );
        chunkNumber++;
        data_ptr += chunkLen;
    }

    /* Inserts file's metadata */
    return gridfs_insert_file( gfs, remotename, id, length, contenttype );
}
Пример #12
0
int test_insert_limits( const char *set_name ) {
    char version[10];
    mongo conn[1];
    mongo_write_concern wc[1];
    int i;
    char key[10];
    int res = 0;
    bson b[1], b2[1];
    bson *objs[2];

    mongo_write_concern_init( wc );
    wc->w = 1;
    mongo_write_concern_finish( wc );

    /* We'll perform the full test if we're running v2.0 or later. */
    if( mongo_get_server_version( version ) != -1 && version[0] <= '1' )
        return 0;

    mongo_replset_init( conn, set_name );
    mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT + 1 );
    mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT );
    res = mongo_replset_connect( conn );

    if( res != MONGO_OK ) {
        res = conn->err;
        return res;
    }

    ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE );

    bson_init( b );
    for(i=0; i<1200000; i++) {
        sprintf( key, "%d", i + 10000000 );
        bson_append_int( b, key, i );
    }
    bson_finish( b );

    ASSERT( bson_size( b ) > conn->max_bson_size );

    ASSERT( mongo_insert( conn, "test.foo", b, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    mongo_clear_errors( conn );
    ASSERT( conn->err == 0 );

    bson_init( b2 );
    bson_append_int( b2, "foo", 1 );
    bson_finish( b2 );

    objs[0] = b;
    objs[1] = b2;

    ASSERT( mongo_insert_batch( conn, "test.foo", (const bson**)objs, 2, wc, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    mongo_write_concern_destroy( wc );

    return 0;
}
Пример #13
0
static void single_insert_medium_test() {
    int i;
    bson b;
    for ( i=0; i<PER_TRIAL; i++ ) {
        make_medium( &b, i );
        mongo_insert( conn, DB ".single.medium", &b );
        bson_destroy( &b );
    }
}
Пример #14
0
static void single_insert_small_test() {
    int i;
    bson b;
    for ( i=0; i<PER_TRIAL; i++ ) {
        make_small( &b, i );
        mongo_insert( conn, DB ".single.small", &b );
        bson_destroy( &b );
    }
}
static void single_insert_large_test( void ) {
    int i;
    bson b;
    for ( i=0; i<PER_TRIAL; i++ ) {
        make_large( &b, i );
        mongo_insert( conn, DB ".single.large", &b, NULL );
        bson_destroy( &b );
    }
}
Пример #16
0
static void test_write_concern_input( mongo *conn ) {
    mongo_write_concern wc[1], wcbad[1];
    bson b[1];

    mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL );

    bson_init( b );
    bson_append_new_oid( b, "_id" );
    bson_finish( b );

    mongo_write_concern_init( wc );    
    mongo_write_concern_set_w( wc, 1 );

    /* Failure to finish write concern object. */
    ASSERT( mongo_insert( conn, TEST_NS, b, wc ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_CONCERN_INVALID );
    ASSERT_EQUAL_STRINGS( conn->errstr,
        "Must call mongo_write_concern_finish() before using *write_concern." );

    mongo_write_concern_finish( wc );

    /* Use a bad write concern. */
    mongo_clear_errors( conn );
    mongo_write_concern_init( wcbad );    
    mongo_write_concern_set_w( wcbad, 2 );
    mongo_write_concern_finish( wcbad );
    mongo_set_write_concern( conn, wcbad );
    ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "norepl" );

    /* Ensure that supplied write concern overrides default. */
    mongo_clear_errors( conn );
    ASSERT( mongo_insert( conn, TEST_NS, b, wc ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );

    conn->write_concern = NULL;
    mongo_write_concern_destroy( wc );
    mongo_write_concern_destroy( wcbad );
    bson_destroy( b );
}
Пример #17
0
static void index_insert_medium_test(){
    int i;
    bson b;
    ASSERT(mongo_create_simple_index(conn, DB ".index.medium", "x", 0, NULL));
    for (i=0; i<PER_TRIAL; i++){
        make_medium(&b, i);
        mongo_insert(conn, DB ".index.medium", &b);
        bson_destroy(&b);
    }
}
static void index_insert_large_test( void ) {
    int i;
    bson b;
    ASSERT( mongo_create_simple_index( conn, DB ".index.large", "x", 0, NULL ) == MONGO_OK );
    for ( i=0; i<PER_TRIAL; i++ ) {
        make_large( &b, i );
        mongo_insert( conn, DB ".index.large", &b, NULL );
        bson_destroy( &b );
    }
}
Пример #19
0
SEXP rmongo_insert(SEXP mongo_conn, SEXP ns, SEXP b) {
    mongo* conn = _checkMongo(mongo_conn);
    const char* _ns = CHAR(STRING_ELT(ns, 0));
    SEXP ret;
    PROTECT(ret = allocVector(LGLSXP, 1));
    bson* _b = _checkBSON(b);
    LOGICAL(ret)[0] = (mongo_insert(conn, _ns, _b) == MONGO_OK);
    UNPROTECT(1);
    return ret;
}
bool TMongoDriver::insert(const QString &ns, const QVariantMap &object)
{
    mongo_clear_errors(mongoConnection);
    int status = mongo_insert(mongoConnection, qPrintable(ns),
                              (const bson *)TBson::toBson(object).constData(), 0);
    if (status != MONGO_OK) {
        tSystemError("MongoDB Error: %s", mongoConnection->lasterrstr);
        return false;
    }
    return true;
}
Пример #21
0
static void clean(){
    bson b;
    if (!mongo_cmd_drop_db(conn, DB)){
        printf("failed to drop db\n");
        exit(1);
    }

    /* create the db */
    mongo_insert(conn, DB ".creation", bson_empty(&b));
    ASSERT(!mongo_cmd_get_last_error(conn, DB, NULL));
}
static void clean( void ) {
    bson b;
    if ( mongo_cmd_drop_db( conn, DB ) != MONGO_OK ) {
        printf( "failed to drop db\n" );
        exit( 1 );
    }

    /* create the db */
    mongo_insert( conn, DB ".creation", bson_shared_empty(), NULL );
    ASSERT( !mongo_cmd_get_last_error( conn, DB, NULL ) );
}
Пример #23
0
result_t MongoCollection::_insert(const bson* data, int32_t& retVal, AsyncEvent* ac)
{
    if (ac->isSync())
        return CHECK_ERROR(CALL_E_NOSYNC);

    obj_ptr<MongoDB> db(m_db);
    if (!db)
        return CHECK_ERROR(CALL_E_INVALID_CALL);

    retVal = mongo_insert(db->m_conn, m_ns.c_str(), data, NULL);
    return 0;
}
Пример #24
0
int test_insert_limits( void ) {
    char version[10];
    mongo conn[1];
    int i;
    char key[10];
    bson b[1], b2[1];
    bson *objs[2];

    /* Test the default max BSON size. */
    mongo_init( conn );
    ASSERT( conn->max_bson_size == MONGO_DEFAULT_MAX_BSON_SIZE );

    /* We'll perform the full test if we're running v2.0 or later. */
    if( mongo_get_server_version( version ) != -1 && version[0] <= '1' )
        return 0;

    if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) {
        printf( "failed to connect\n" );
        exit( 1 );
    }

    ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE );

    bson_init( b );
    for(i=0; i<1200000; i++) {
        sprintf( key, "%d", i + 10000000 );
        bson_append_int( b, key, i );
    }
    bson_finish( b );

    ASSERT( bson_size( b ) > conn->max_bson_size );

    ASSERT( mongo_insert( conn, "test.foo", b, NULL ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    mongo_clear_errors( conn );
    ASSERT( conn->err == 0 );

    bson_init( b2 );
    bson_append_int( b2, "foo", 1 );
    bson_finish( b2 );

    objs[0] = b;
    objs[1] = b2;

    ASSERT( mongo_insert_batch( conn, "test.foo", (const bson **)objs, 2,
          NULL, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_BSON_TOO_LARGE );

    return 0;
}
Пример #25
0
void insert_sample_data( mongo *conn, int n ) {
    bson b;
    int i;

    for( i=0; i<n; i++ ) {
        bson_init( &b );
        bson_append_int( &b, "a", i );
        bson_finish( &b );

        mongo_insert( conn, "test.cursors", &b, NULL );

        bson_destroy( &b );
    }
}
Пример #26
0
int
mongodb_insert_key(bot_t * bot, char *db, char *key, char *value,
		   char *fmt, ...)
{
	bson b;
	mongo_cursor cursor;
	va_list ap;
	char buf[1024], *buf_ptr = "NULL";

	if (!db || !key || !value) {
		return -1;
	}

	debug(bot, "mongodb_insert_key: Entered\n");

	if (fmt) {
		bz(buf);
		va_start(ap, fmt);
		vsnprintf_buf(buf, fmt, ap);
		va_end(ap);
		buf_ptr = buf;
	}

	bson_init(&b);
	bson_append_string(&b, "key", key);
	bson_finish(&b);
	mongo_cursor_init(&cursor, &gi->mongo_conn, db);
	mongo_cursor_set_query(&cursor, &b);

	if (mongo_cursor_next(&cursor) == MONGO_OK) {
		debug(bot, "mongodb_insert_key: Key already exist\n");
		bson_destroy(&b);
		mongo_cursor_destroy(&cursor);
		return -1;
	}

	bson_init(&b);
	bson_append_string(&b, "key", key);
	bson_append_string(&b, "value", value);
	bson_append_string(&b, "comment", buf_ptr);

	bson_finish(&b);

	mongo_insert(&gi->mongo_conn, db, &b);

	bson_destroy(&b);
	mongo_cursor_destroy(&cursor);

	return 0;
}
Пример #27
0
int gridfs_store_file( gridfs *gfs, const char *filename,
                       const char *remotename, const char *contenttype ) {

    char buffer[DEFAULT_CHUNK_SIZE];
    FILE *fd;
    bson_oid_t id;
    int chunkNumber = 0;
    gridfs_offset length = 0;
    gridfs_offset chunkLen = 0;
    bson *oChunk;

    /* Open the file and the correct stream */
    if ( strcmp( filename, "-" ) == 0 ) fd = stdin;
    else {
        fd = fopen( filename, "rb" );
        if (fd == NULL)
            return MONGO_ERROR;
    }

    /* Generate and append an oid*/
    bson_oid_gen( &id );

    /* Insert the file chunk by chunk */
    chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd );
    do {
        oChunk = chunk_new( id, chunkNumber, buffer, chunkLen );
        mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL );
        chunk_free( oChunk );
        length += chunkLen;
        chunkNumber++;
        chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd );
    }
    while ( chunkLen != 0 );

    /* Close the file stream */
    if ( fd != stdin ) fclose( fd );

    /* Large files Assertion */
    /* assert(length <= 0xffffffff); */

    /* Optional Remote Name */
    if ( remotename == NULL || *remotename == '\0' ) {
        remotename = filename;
    }

    /* Inserts file's metadata */
    return gridfs_insert_file( gfs, remotename, id, length, contenttype );
}
Пример #28
0
static int gridfs_insert_file2( gridfs *gfs, const char *name,
                                const bson_oid_t id, gridfs_offset length,
                                const char *contenttype, gridfile* gfile ) {
    bson command;
    bson ret;
    bson res;
    bson_iterator it;
    int result;
    int64_t d;

    /* Check run md5 */
    bson_init( &command );
    bson_append_oid( &command, "filemd5", &id );
    bson_append_string( &command, "root", gfs->prefix );
    bson_finish( &command );
    result = mongo_run_command( gfs->client, gfs->dbname, &command, &res );
    bson_destroy( &command );
    if (result != MONGO_OK)
        return result;

    /* Create and insert BSON for file metadata */
    bson_init( &ret );
    bson_append_oid( &ret, "_id", &id );
    if ( name != NULL && *name != '\0' ) {
        bson_append_string( &ret, "filename", name );
    }
    bson_append_long( &ret, "length", length );
    bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE );
    d = ( bson_date_t )1000*time( NULL );
    bson_append_date( &ret, "uploadDate", d);
    bson_find( &it, &res, "md5" );
    bson_append_string( &ret, "md5", bson_iterator_string( &it ) );
    bson_destroy( &res );
    if ( contenttype != NULL && *contenttype != '\0' ) {
        bson_append_string( &ret, "contentType", contenttype );
    }

    bson_append_bson(&ret, "metadata", gfile->meta);

    bson_finish( &ret );
    result = mongo_insert( gfs->client, gfs->files_ns, &ret );
    bson_destroy( &ret );

    return result;
}
Пример #29
0
/* Add a new DLR entry to MongoDB */
static void dlr_mongodb_add(struct dlr_entry *entry)
{
    DBPoolConn *pconn;
    bson b;
    bson_buffer buf;
    mongo_connection *conn = NULL;

    pconn = dbpool_conn_consume(pool);
    if (pconn == NULL) {
        dlr_entry_destroy(entry);
        return;
    }
    conn = (mongo_connection*)pconn->conn;

    bson_buffer_init(&buf);
    bson_append_new_oid(&buf, "_id");

    bson_append_string(&buf, octstr_get_cstr(fields->field_smsc), octstr_get_cstr(entry->smsc));
    bson_append_string(&buf, octstr_get_cstr(fields->field_ts), octstr_get_cstr(entry->timestamp));
    bson_append_string(&buf, octstr_get_cstr(fields->field_src), octstr_get_cstr(entry->source));
    bson_append_string(&buf, octstr_get_cstr(fields->field_dst), octstr_get_cstr(entry->destination));
    bson_append_string(&buf, octstr_get_cstr(fields->field_serv), octstr_get_cstr(entry->service));
    bson_append_string(&buf, octstr_get_cstr(fields->field_url), octstr_get_cstr(entry->url));
    bson_append_string(&buf, octstr_get_cstr(fields->field_account), octstr_get_cstr(entry->account));
    bson_append_string(&buf, octstr_get_cstr(fields->field_binfo), octstr_get_cstr(entry->binfo));
    bson_append_int(&buf, octstr_get_cstr(fields->field_mask), entry->mask);
    bson_append_string(&buf, octstr_get_cstr(fields->field_boxc), octstr_get_cstr(entry->boxc_id));
    bson_append_int(&buf, octstr_get_cstr(fields->field_status), 0);

    bson_from_buffer(&b, &buf);

    /* TODO: namespace support */
    MONGO_TRY {
        mongo_insert(conn, mongodb_namespace, &b);
    } MONGO_CATCH {
        mongodb_error("dlr_mongodb_insert", conn->exception.type);
    }

    dbpool_conn_produce(pconn);

    bson_destroy(&b);
    dlr_entry_destroy(entry);
}
Пример #30
0
bson gridfile_writer_done( gridfile* gfile )
{

  /* write any remaining pending chunk data.
   * pending data will always take up less than one chunk
   */
  bson* oChunk;
  if( gfile->pending_data )
  {
    oChunk = chunk_new(gfile->id, gfile->chunk_num, gfile->pending_data, gfile->pending_len);
    mongo_insert(gfile->gfs->client, gfile->gfs->chunks_ns, oChunk);
    chunk_free(oChunk);
    free(gfile->pending_data);
    gfile->length += gfile->pending_len;
  }

  /* insert into files collection */
  return gridfs_insert_file(gfile->gfs, gfile->remote_name, gfile->id,
      gfile->length, gfile->content_type);
}