Exemplo n.º 1
0
void bson_iterator_subobject(const bson_iterator * i, bson * sub){
    bson_init(sub, (char*)bson_iterator_value(i), 0);
}
Exemplo n.º 2
0
static void
test_mongoc_matcher_basic (void)
{
   bson_t matcher_query;
   bson_t *query;
   bson_t *to_match;
   bson_t *should_fail;
   bson_error_t error;

   bson_init(&matcher_query);

   query = BCON_NEW(
      "city", "New York",
      "state", "New York",
      "favorite color", "blue",
      "name", "{", "$not", "invalid", "}",
//      "zip", "{", "$in", "[", BCON_INT32(11201), BCON_INT32(90210), "]", "}",
      "$or", "[",
         "{", "age", "{", "$lt", BCON_INT32(18), "}", "}",
         "{", "age", "{", "$gt", BCON_INT32(45), "}", "}",
      "]"
   );

   mongoc_matcher_t * matcher = mongoc_matcher_new (query, &error);

   assert (matcher);

   _mongoc_matcher_op_to_bson(matcher->optree, &matcher_query);

#if 0
   {
      char *out = bson_as_json(&matcher_query, NULL);
      printf("bson: %s\n", out);
      free(out);
   }
#endif

   to_match = BCON_NEW(
      "city", "New York",
      "state", "New York",
      "favorite color", "blue",
      "zip", BCON_INT32(11201),
      "age", BCON_INT32(65)
   );

   assert(mongoc_matcher_match(matcher, to_match));

   should_fail = BCON_NEW(
      "city", "New York",
      "state", "New York",
      "favorite color", "blue",
      "zip", BCON_INT32(99999),
      "age", BCON_INT32(30)
   );

   assert(! mongoc_matcher_match(matcher, should_fail));

   bson_destroy(query);
   bson_destroy(to_match);
   bson_destroy(should_fail);

   mongoc_matcher_destroy(matcher);
}
void
_mongoc_write_result_merge_legacy (mongoc_write_result_t  *result,  /* IN */
                                   mongoc_write_command_t *command, /* IN */
                                   const bson_t           *reply,   /* IN */
                                   mongoc_error_code_t     default_code,
                                   uint32_t                offset)
{
   const bson_value_t *value;
   bson_t holder, write_errors, child;
   bson_iter_t iter;
   bson_iter_t ar;
   bson_iter_t citer;
   const char *err = NULL;
   int32_t code = 0;
   int32_t n = 0;
   int32_t upsert_idx = 0;

   ENTRY;

   BSON_ASSERT (result);
   BSON_ASSERT (reply);

   if (bson_iter_init_find (&iter, reply, "n") &&
       BSON_ITER_HOLDS_INT32 (&iter)) {
      n = bson_iter_int32 (&iter);
   }

   if (bson_iter_init_find (&iter, reply, "err") &&
       BSON_ITER_HOLDS_UTF8 (&iter)) {
      err = bson_iter_utf8 (&iter, NULL);
   }

   if (bson_iter_init_find (&iter, reply, "code") &&
       BSON_ITER_HOLDS_INT32 (&iter)) {
      code = bson_iter_int32 (&iter);
   }

   if (code || err) {
      if (!code) {
         code = default_code;
      }

      if (!err) {
         err = "unknown error";
      }

      bson_set_error (&result->error,
                      MONGOC_ERROR_COLLECTION,
                      code,
                      "%s", err);
      result->failed = true;

      bson_init(&holder);
      bson_append_array_begin(&holder, "0", 1, &write_errors);
      bson_append_document_begin(&write_errors, "0", 1, &child);
      bson_append_int32(&child, "index", 5, 0);
      bson_append_int32(&child, "code", 4, code);
      bson_append_utf8(&child, "errmsg", 6, err, -1);
      bson_append_document_end(&write_errors, &child);
      bson_append_array_end(&holder, &write_errors);
      bson_iter_init(&iter, &holder);
      bson_iter_next(&iter);

      _mongoc_write_result_merge_arrays (offset, result, &result->writeErrors,
                                         &iter);

      bson_destroy(&holder);
   }

   switch (command->type) {
   case MONGOC_WRITE_COMMAND_INSERT:
      if (n) {
         result->nInserted += n;
      }
      break;
   case MONGOC_WRITE_COMMAND_DELETE:
      result->nRemoved += n;
      break;
   case MONGOC_WRITE_COMMAND_UPDATE:
      if (bson_iter_init_find (&iter, reply, "upserted") &&
         !BSON_ITER_HOLDS_ARRAY (&iter)) {
         result->nUpserted += n;
         value = bson_iter_value (&iter);
         _mongoc_write_result_append_upsert (result, offset, value);
      } else if (bson_iter_init_find (&iter, reply, "upserted") &&
                 BSON_ITER_HOLDS_ARRAY (&iter)) {
         result->nUpserted += n;
         if (bson_iter_recurse (&iter, &ar)) {
            while (bson_iter_next (&ar)) {
               if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
                   bson_iter_recurse (&ar, &citer) &&
                   bson_iter_find (&citer, "_id")) {
                  value = bson_iter_value (&citer);
                  _mongoc_write_result_append_upsert (result,
                                                      offset + upsert_idx,
                                                      value);
                  upsert_idx++;
               }
            }
         }
      } else if ((n == 1) &&
                 bson_iter_init_find (&iter, reply, "updatedExisting") &&
                 BSON_ITER_HOLDS_BOOL (&iter) &&
                 !bson_iter_bool (&iter)) {
         result->nUpserted += n;
      } else {
         result->nMatched += n;
      }
      break;
   default:
      break;
   }

   result->omit_nModified = true;

   EXIT;
}
Exemplo n.º 4
0
static void
_mongoc_write_opquery (mongoc_write_command_t *command,
                       mongoc_client_t *client,
                       mongoc_server_stream_t *server_stream,
                       const char *database,
                       const char *collection,
                       const mongoc_write_concern_t *write_concern,
                       uint32_t offset,
                       mongoc_write_result_t *result,
                       bson_error_t *error)
{
   mongoc_cmd_parts_t parts;
   bson_iter_t iter;
   const char *key;
   uint32_t len = 0;
   bson_t ar;
   bson_t cmd;
   bson_t reply;
   char str[16];
   bool has_more;
   bool ret = false;
   uint32_t i;
   int32_t max_bson_obj_size;
   int32_t max_write_batch_size;
   uint32_t overhead;
   uint32_t key_len;
   int data_offset = 0;
   bson_reader_t *reader;
   const bson_t *bson;
   bool eof;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   bson_init (&cmd);
   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_write_batch_size =
      mongoc_server_stream_max_write_batch_size (server_stream);

again:
   has_more = false;
   i = 0;

   _mongoc_write_command_init (&cmd, command, collection);

   /* 1 byte to specify array type, 1 byte for field name's null terminator */
   overhead = cmd.len + 2 + gCommandFieldLens[command->type];


   reader = bson_reader_new_from_data (command->payload.data + data_offset,
                                       command->payload.len - data_offset);

   bson_append_array_begin (&cmd,
                            gCommandFields[command->type],
                            gCommandFieldLens[command->type],
                            &ar);

   while ((bson = bson_reader_read (reader, &eof))) {
      key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str);
      len = bson->len;
      /* 1 byte to specify document type, 1 byte for key's null terminator */
      if (_mongoc_write_command_will_overflow (overhead,
                                               key_len + len + 2 + ar.len,
                                               i,
                                               max_bson_obj_size,
                                               max_write_batch_size)) {
         has_more = true;
         break;
      }
      BSON_APPEND_DOCUMENT (&ar, key, bson);
      data_offset += len;
      i++;
   }

   bson_append_array_end (&cmd, &ar);

   if (!i) {
      _mongoc_write_command_too_large_error (error, i, len, max_bson_obj_size);
      result->failed = true;
      result->must_stop = true;
      ret = false;
      if (bson) {
         data_offset += len;
      }
   } else {
      mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd);
      parts.is_write_command = true;
      parts.assembled.operation_id = command->operation_id;
      if (!mongoc_cmd_parts_set_write_concern (
             &parts,
             write_concern,
             server_stream->sd->max_wire_version,
             error)) {
         bson_reader_destroy (reader);
         bson_destroy (&cmd);
         mongoc_cmd_parts_cleanup (&parts);
         EXIT;
      }

      BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts));
      if (!mongoc_cmd_parts_append_opts (
             &parts, &iter, server_stream->sd->max_wire_version, error)) {
         bson_reader_destroy (reader);
         bson_destroy (&cmd);
         mongoc_cmd_parts_cleanup (&parts);
         EXIT;
      }

      ret = mongoc_cmd_parts_assemble (&parts, server_stream, error);
      if (ret) {
         ret = mongoc_cluster_run_command_monitored (
            &client->cluster, &parts.assembled, &reply, error);
      } else {
         /* assembling failed */
         result->must_stop = true;
         bson_init (&reply);
      }

      if (!ret) {
         result->failed = true;
         if (bson_empty (&reply)) {
            /* assembling failed, or a network error running the command */
            result->must_stop = true;
         }
      }

      _mongoc_write_result_merge (result, command, &reply, offset);
      offset += i;
      bson_destroy (&reply);
      mongoc_cmd_parts_cleanup (&parts);
   }
   bson_reader_destroy (reader);

   if (has_more && (ret || !command->flags.ordered) && !result->must_stop) {
      bson_reinit (&cmd);
      GOTO (again);
   }

   bson_destroy (&cmd);
   EXIT;
}
Exemplo n.º 5
0
INT32 main ( INT32 argc, CHAR **argv )
{
   // initialize local variables
   CHAR *pHostName                   = NULL ;
   CHAR *pServiceName                = NULL ;
   CHAR *pUsr                        = NULL ;
   CHAR *pPasswd                     = NULL ;
   // define a connetion handle; use to connect to database
   sdbConnectionHandle connection    = 0 ;
   // define a collection space handle
   sdbCSHandle collectionspace       = 0 ;
   // define a collection handle
   sdbCollectionHandle collection    = 0 ;
   // define a cursor handle for query
   sdbCursorHandle cursor            = 0 ;
   sdbCursorHandle cursor1           = 0 ;
   // define local variables
   // initialize them before use
   INT32 rc    = SDB_OK ;
   bson obj ;
   bson rule ;
   bson record ;
   bson updatecondition ;
   bson tmp ;
   bson_iterator it ;

   // verify syntax
   if ( 5 != argc )
   {
      displaySyntax ( (CHAR*)argv[0] ) ;
      exit ( 0 ) ;
   }
   // read argument
   pHostName    = (CHAR*)argv[1] ;
   pServiceName = (CHAR*)argv[2] ;
   pUsr         = (CHAR*)argv[3] ;
   pPasswd      = (CHAR*)argv[4] ;

   // connect to database
   rc = sdbConnect ( pHostName, pServiceName, pUsr, pPasswd, &connection ) ;
   if( rc!=SDB_OK )
   {
      printf("Failed to connet to database, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }
   // create collection space
   rc = sdbCreateCollectionSpace ( connection, COLLECTION_SPACE_NAME,
                                   SDB_PAGESIZE_4K, &collectionspace ) ;
   if( rc!=SDB_OK )
   {
      printf("Failed to create collection space, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }

   // create collection in a specified colletion space.
   rc = sdbCreateCollection ( collectionspace, COLLECTION_NAME, &collection ) ;
   if( rc!=SDB_OK )
   {
      printf("Failed to create collection, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }
   
   // prepare record
   bson_init( &record ) ;
   bson_append_int( &record, "age", 10 ) ;
   rc = bson_finish( &record ) ;
   CHECK_RC ( rc, "Failed to build bson" ) ;

   // insert record into database
   rc = sdbInsert( collection, &record ) ;
   if( rc!=SDB_OK )
   {
      printf("Failed to insert, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }
   bson_destroy( &record ) ;
   // query all the record in this collection
   rc = sdbQuery(collection, NULL, NULL, NULL, NULL, 0, -1, &cursor ) ;
   if( rc!=SDB_OK )
   {
      printf("Failed to query, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }
   // get the record from cursor
   bson_init(&obj) ;
   bson_init(&tmp) ;
   rc=sdbNext( cursor, &obj ) ;
   if ( rc!= SDB_OK )
   {
      printf("Failed to get next, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }
   rc = bson_copy( &tmp, &obj ) ;
   CHECK_RC ( rc, "Failed to copy bson" ) ;
   printf("Before update, the record is:\n") ;
   bson_print( &tmp ) ;
   // set the update condition using "_id"
   bson_find( &it, &obj, "_id" ) ;
   bson_init( &updatecondition ) ;
   bson_append_element( &updatecondition, NULL, &it ) ;
   rc = bson_finish( &updatecondition ) ;
   CHECK_RC ( rc, "Failed to build bson" ) ;
   // set the update rule
   bson_init( &rule ) ;
   bson_append_start_object ( &rule, "$set" ) ;
   bson_append_int ( &rule, "age", 99 ) ;
   bson_append_finish_object ( &rule ) ;
   rc = bson_finish ( &rule ) ;
   CHECK_RC ( rc, "Failed to build bson" ) ;

   // update
   rc = sdbUpdate(collection, &rule, &updatecondition, NULL ) ;
   if ( rc!=SDB_OK )
   {
      printf("Failed to update, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }
   bson_destroy(&obj) ;
   bson_destroy(&rule) ;
   bson_destroy(&updatecondition) ;
   // query all the record in this collection again
   rc = sdbQuery(collection, NULL, NULL, NULL, NULL, 0, -1, &cursor1 ) ;
   if( rc!=SDB_OK )
   {
      printf("Failed to query, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }
   // get record from cursor1
   bson_init(&obj) ;
   rc=sdbNext( cursor1, &obj ) ;
   if ( rc!= SDB_OK )
   {
      printf("Failed to get next, rc = %d" OSS_NEWLINE, rc ) ;
      goto error ;
   }
   printf("after update, the record is:\n") ;
   bson_print( &obj ) ;
   bson_destroy(&obj) ;

done:
   // drop collection space
   rc = sdbDropCollectionSpace( connection, COLLECTION_SPACE_NAME ) ;
   if ( rc != SDB_OK )
   {
     printf("Failed to drop collection space, rc = %d" OSS_NEWLINE, rc ) ;
   }
   // disconnect the connection
   sdbDisconnect ( connection ) ;
   // release the local variables
   sdbReleaseCursor ( cursor ) ;
   sdbReleaseCursor ( cursor1 ) ;
   sdbReleaseCollection ( collection ) ;
   sdbReleaseCS ( collectionspace ) ;
   sdbReleaseConnection ( connection ) ;
   return 0;
error:
   goto done ;
}
Exemplo n.º 6
0
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) {
    ngx_http_gridfs_loc_conf_t* gridfs_conf;
    ngx_http_core_loc_conf_t* core_conf;
    ngx_buf_t* buffer;
    ngx_chain_t out;
    ngx_str_t location_name;
    ngx_str_t full_uri;
    // --------------- xulin add start -------------------
    char* ml_args;
    char* arg;
    unsigned int add_arg;
    unsigned int add_len; 
    // --------------- xulin add end -------------------
    char* value;
    ngx_http_mongo_connection_t *mongo_conn;
    gridfs gfs;
    gridfile gfile;
    gridfs_offset length;
    ngx_uint_t numchunks;
    char* contenttype;
    char* md5;
    bson_date_t last_modified;

    volatile ngx_uint_t i;
    ngx_int_t rc = NGX_OK;
    bson query;
    bson_oid_t oid;
    mongo_cursor ** cursors;
    gridfs_offset chunk_len;
    const char * chunk_data;
    bson_iterator it;
    bson chunk;
    ngx_pool_cleanup_t* gridfs_cln;
    ngx_http_gridfs_cleanup_t* gridfs_clndata;
    int status;
    volatile ngx_uint_t e = FALSE; 
    volatile ngx_uint_t ecounter = 0;

    gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module);
    core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module);

    // ---------- ENSURE MONGO CONNECTION ---------- //

    mongo_conn = ngx_http_get_mongo_connection( gridfs_conf->mongo );
    if (mongo_conn == NULL) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Mongo Connection not found: \"%V\"", &gridfs_conf->mongo);
        return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }
    
    if ( !(&mongo_conn->conn.connected)
         && (ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR
             || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR)) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Could not connect to mongo: \"%V\"", &gridfs_conf->mongo);
        if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); }
        return NGX_HTTP_SERVICE_UNAVAILABLE;
    }

    // ---------- RETRIEVE KEY ---------- //

    location_name = core_conf->name;
    full_uri = request->uri;

    if (full_uri.len < location_name.len) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Invalid location name or uri.");
        return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }

    value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1 + request->args.len + 1));
    if (value == NULL) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Failed to allocate memory for value buffer.");
        return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }
    memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len);
    
    // ------------------------------------ xulin add start --------------------------------------
    if (request->args.len > 0)
    {  
        ml_args = (char*)malloc(sizeof(char) * (request->args.len + 1));
        memcpy(ml_args, request->args.data, request->args.len);
        ml_args[request->args.len] = '\0';
        
        add_len = full_uri.len - location_name.len;
        memcpy(value + add_len, "?", 1);
        add_len += 1;
        arg = strtok(ml_args, "&");
        while (arg != NULL)
        {
            add_arg = 1;
            if (strstr(arg, "xc_md5") != NULL)
            {
                 add_arg = 0;
            }
            else if (strstr(arg, "_xingcloud_t") != NULL)
            {     
                 add_arg = 0;
            }        
            
            if (add_arg == 1)
            {
                 memcpy(value + add_len, arg, strlen(arg));
                 add_len += strlen(arg);
                 memcpy(value + add_len, "&", 1);
                 add_len += 1;
            }
            
            arg = strtok(NULL, "&");
        }
        
        free(ml_args);
        if (value[add_len - 1] == '?' || value[add_len - 1] == '&')
        {
            value[add_len - 1] = '\0';
        }
        else
        {
            value[add_len] = '\0';
        }
    }
    ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, "ml_url = [%s]", value);
    // ------------------------------------ xulin add end --------------------------------------


    if (!url_decode(value)) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Malformed request.");
        free(value);
        return NGX_HTTP_BAD_REQUEST;
    }

    // ---------- RETRIEVE GRIDFILE ---------- //

    do {
        e = FALSE;
        if (gridfs_init(&mongo_conn->conn,
                        (const char*)gridfs_conf->db.data,
                        (const char*)gridfs_conf->root_collection.data,
                        &gfs) != MONGO_OK) {
            e = TRUE; ecounter++;
            if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST
                || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR
                || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) {
                ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                              "Mongo connection dropped, could not reconnect");
                if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); }
                free(value);
                return NGX_HTTP_SERVICE_UNAVAILABLE;
            }
        }
    } while (e);

    bson_init(&query);
    switch (gridfs_conf->type) {
    case  BSON_OID:
        bson_oid_from_string(&oid, value);
        bson_append_oid(&query, (char*)gridfs_conf->field.data, &oid);
        break;
    case BSON_INT:
      bson_append_int(&query, (char*)gridfs_conf->field.data, ngx_atoi((u_char*)value, strlen(value)));
        break;
    case BSON_STRING:
        bson_append_string(&query, (char*)gridfs_conf->field.data, value);
        break;
    }
    bson_finish(&query);

    status = gridfs_find_query(&gfs, &query, &gfile);
    
    bson_destroy(&query);
    free(value);

    if(status == MONGO_ERROR) {
        gridfs_destroy(&gfs);
        return NGX_HTTP_NOT_FOUND;
    }

    /* Get information about the file */
    length = gridfile_get_contentlength(&gfile);
    numchunks = gridfile_get_numchunks(&gfile);
    contenttype = (char*)gridfile_get_contenttype(&gfile);

    md5 = (char*)gridfile_get_md5(&gfile);
    last_modified = gridfile_get_uploaddate(&gfile);

    // ---------- SEND THE HEADERS ---------- //

    request->headers_out.status = NGX_HTTP_OK;
    request->headers_out.content_length_n = length;
    if (contenttype != NULL) {
        request->headers_out.content_type.len = strlen(contenttype);
        request->headers_out.content_type.data = (u_char*)contenttype;
    }
    else ngx_http_set_content_type(request);

    // use md5 field as ETag if possible
    if (md5 != NULL) {
        request->headers_out.etag = ngx_list_push(&request->headers_out.headers);
        request->headers_out.etag->hash = 1;
        request->headers_out.etag->key.len = sizeof("ETag") - 1;
        request->headers_out.etag->key.data = (u_char*)"ETag";

        ngx_buf_t *b;  
        b = ngx_create_temp_buf(request->pool, strlen(md5) + 2);  
        b->last = ngx_sprintf(b->last, "\"%s\"", md5);
        request->headers_out.etag->value.len = strlen(md5) + 2;
        request->headers_out.etag->value.data = b->start;
    }
    
    // use uploadDate field as last_modified if possible
    if (last_modified) {
        request->headers_out.last_modified_time = (time_t)(last_modified/1000);
    }

    /* Determine if content is gzipped, set headers accordingly */
    if ( gridfile_get_boolean(&gfile,"gzipped") ) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, gridfile_get_field(&gfile,"gzipped") );
        request->headers_out.content_encoding = ngx_list_push(&request->headers_out.headers);
        if (request->headers_out.content_encoding == NULL) {
            gridfile_destroy(&gfile);
            gridfs_destroy(&gfs);
            return NGX_ERROR;
        }
        request->headers_out.content_encoding->hash = 1;
        request->headers_out.content_encoding->key.len = sizeof("Content-Encoding") - 1;
        request->headers_out.content_encoding->key.data = (u_char *) "Content-Encoding";
        request->headers_out.content_encoding->value.len = sizeof("gzip") - 1;
        request->headers_out.content_encoding->value.data = (u_char *) "gzip";
    }

    ngx_http_send_header(request);

    // ---------- SEND THE BODY ---------- //

    /* Empty file */
    if (numchunks == 0) {
        /* Allocate space for the response buffer */
        buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t));
        if (buffer == NULL) {
            ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                          "Failed to allocate response buffer");
            gridfile_destroy(&gfile);
            gridfs_destroy(&gfs);
            return NGX_HTTP_INTERNAL_SERVER_ERROR;
        }

        buffer->pos = NULL;
        buffer->last = NULL;
        buffer->memory = 1;
        buffer->last_buf = 1;
        out.buf = buffer;
        out.next = NULL;

        gridfile_destroy(&gfile);
        gridfs_destroy(&gfs);

        return ngx_http_output_filter(request, &out);
    }
    
    cursors = (mongo_cursor **)ngx_pcalloc(request->pool, sizeof(mongo_cursor *) * numchunks);
    if (cursors == NULL) {
      gridfile_destroy(&gfile);
      gridfs_destroy(&gfs);
      return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }
    
    ngx_memzero( cursors, sizeof(mongo_cursor *) * numchunks);

    /* Hook in the cleanup function */
    gridfs_cln = ngx_pool_cleanup_add(request->pool, sizeof(ngx_http_gridfs_cleanup_t));
    if (gridfs_cln == NULL) {
      gridfile_destroy(&gfile);
      gridfs_destroy(&gfs);
      return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }
    gridfs_cln->handler = ngx_http_gridfs_cleanup;
    gridfs_clndata = gridfs_cln->data;
    gridfs_clndata->cursors = cursors;
    gridfs_clndata->numchunks = numchunks;

    /* Read and serve chunk by chunk */
    for (i = 0; i < numchunks; i++) {

        /* Allocate space for the response buffer */
        buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t));
        if (buffer == NULL) {
            ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                          "Failed to allocate response buffer");
            gridfile_destroy(&gfile);
            gridfs_destroy(&gfs);
            return NGX_HTTP_INTERNAL_SERVER_ERROR;
        }

        /* Fetch the chunk from mongo */
        do {
            e = FALSE;
            cursors[i] = gridfile_get_chunks(&gfile, i, 1);
            if (!(cursors[i] && mongo_cursor_next(cursors[i]) == MONGO_OK)) {
                e = TRUE; ecounter++;
                if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST 
                    || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR
                    || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) {
                    ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                                  "Mongo connection dropped, could not reconnect");
                    if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); }
                    gridfile_destroy(&gfile);
                    gridfs_destroy(&gfs);
                    return NGX_HTTP_SERVICE_UNAVAILABLE;
                }
            }
        } while (e);

        chunk = cursors[i]->current;
        bson_find(&it, &chunk, "data");
        chunk_len = bson_iterator_bin_len( &it );
        chunk_data = bson_iterator_bin_data( &it );

        /* Set up the buffer chain */
        buffer->pos = (u_char*)chunk_data;
        buffer->last = (u_char*)chunk_data + chunk_len;
        buffer->memory = 1;
        buffer->last_buf = (i == numchunks-1);
        out.buf = buffer;
        out.next = NULL;

        /* Serve the Chunk */
        rc = ngx_http_output_filter(request, &out);

        /* TODO: More Codes to Catch? */
        if (rc == NGX_ERROR) {
            gridfile_destroy(&gfile);
            gridfs_destroy(&gfs);
            return NGX_ERROR;
        }
    }

    gridfile_destroy(&gfile);
    gridfs_destroy(&gfs);

    return rc;
}
Exemplo n.º 7
0
mongoc_cursor_t *
_mongoc_cursor_new (mongoc_client_t           *client,
                    const char                *db_and_collection,
                    mongoc_query_flags_t       flags,
                    uint32_t              skip,
                    uint32_t              limit,
                    uint32_t              batch_size,
                    bool                is_command,
                    const bson_t              *query,
                    const bson_t              *fields,
                    const mongoc_read_prefs_t *read_prefs)
{
   mongoc_read_mode_t mode;
   mongoc_cursor_t *cursor;
   const bson_t *tags;
   const char *mode_str;
   bson_t child;

   ENTRY;

   BSON_ASSERT(client);
   BSON_ASSERT(db_and_collection);
   BSON_ASSERT(query);

   /* we can't have exhaust queries with limits */
   BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && limit));

   /* we can't have exhaust queries with sharded clusters */
   BSON_ASSERT (!((flags & MONGOC_QUERY_EXHAUST) && client->cluster.isdbgrid));

   /*
    * Cursors execute their query lazily. This sadly means that we must copy
    * some extra data around between the bson_t structures. This should be
    * small in most cases, so it reduces to a pure memcpy. The benefit to this
    * design is simplified error handling by API consumers.
    */

   cursor = bson_malloc0(sizeof *cursor);
   cursor->client = client;
   bson_strcpy_w_null(cursor->ns, db_and_collection, sizeof cursor->ns);
   cursor->nslen = (uint32_t)strlen(cursor->ns);
   cursor->flags = flags;
   cursor->skip = skip;
   cursor->limit = limit;
   cursor->batch_size = batch_size;

   cursor->is_command = is_command;

   if (!bson_has_field (query, "$query")) {
      bson_init (&cursor->query);
      bson_append_document (&cursor->query, "$query", 6, query);
   } else {
      bson_copy_to (query, &cursor->query);
   }

   if (read_prefs) {
      cursor->read_prefs = mongoc_read_prefs_copy (read_prefs);

      mode = mongoc_read_prefs_get_mode (read_prefs);
      tags = mongoc_read_prefs_get_tags (read_prefs);

      if (mode != MONGOC_READ_PRIMARY) {
         flags |= MONGOC_QUERY_SLAVE_OK;

         if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) {
            bson_append_document_begin (&cursor->query, "$readPreference",
                                        15, &child);
            mode_str = _mongoc_cursor_get_read_mode_string (mode);
            bson_append_utf8 (&child, "mode", 4, mode_str, -1);
            if (tags) {
               bson_append_array (&child, "tags", 4, tags);
            }
            bson_append_document_end (&cursor->query, &child);
         }
      }
   }

   if (fields) {
      bson_copy_to(fields, &cursor->fields);
   } else {
      bson_init(&cursor->fields);
   }

   _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL);

   mongoc_counter_cursors_active_inc();

   RETURN(cursor);
}
Exemplo n.º 8
0
int main() {
    bson b, sub, out;
    bson_iterator it;
    mongo conn;
    mongo_cursor cursor;
    int result;

    /* Create a rich document like this one:
     *
     * { _id: ObjectId("4d95ea712b752328eb2fc2cc"),
     *   user_id: ObjectId("4d95ea712b752328eb2fc2cd"),
     *
     *   items: [
     *     { sku: "col-123",
     *       name: "John Coltrane: Impressions",
     *       price: 1099,
     *     },
     *
     *     { sku: "young-456",
     *       name: "Larry Young: Unity",
     *       price: 1199
     *     }
     *   ],
     *
     *   address: {
     *     street: "59 18th St.",
     *     zip: 10010
     *   },
     *
     *   total: 2298
     * }
     */
    bson_init( &b );
    bson_append_new_oid( &b, "_id" );
    bson_append_new_oid( &b, "user_id" );

    bson_append_start_array( &b, "items" );
    bson_append_start_object( &b, "0" );
    bson_append_string( &b, "name", "John Coltrane: Impressions" );
    bson_append_int( &b, "price", 1099 );
    bson_append_finish_object( &b );

    bson_append_start_object( &b, "1" );
    bson_append_string( &b, "name", "Larry Young: Unity" );
    bson_append_int( &b, "price", 1199 );
    bson_append_finish_object( &b );
    bson_append_finish_object( &b );

    bson_append_start_object( &b, "address" );
    bson_append_string( &b, "street", "59 18th St." );
    bson_append_int( &b, "zip", 10010 );
    bson_append_finish_object( &b );

    bson_append_int( &b, "total", 2298 );

    /* Finish the BSON obj. */
    bson_finish( &b );
    printf("Here's the whole BSON object:\n");
    bson_print( &b );

    /* Advance to the 'items' array */
    bson_find( &it, &b, "items" );

    /* Get the subobject representing items */
    bson_iterator_subobject_init( &it, &sub, 0 );

    /* Now iterate that object */
    printf("And here's the inner sub-object by itself.\n");
    bson_print( &sub );
    bson_destroy( &sub );

    /* Now make a connection to MongoDB. */
    if( mongo_client( &conn, "127.0.0.1", 27017 ) != MONGO_OK ) {
      switch( conn.err ) {
        case MONGO_CONN_SUCCESS:
          break;
        case MONGO_CONN_NO_SOCKET:
          printf( "FAIL: Could not create a socket!\n" );
          break;
        case MONGO_CONN_FAIL:
          printf( "FAIL: Could not connect to mongod. Make sure it's listening at 127.0.0.1:27017.\n" );
          break;
        default:
          printf( "MongoDB connection error number %d.\n", conn.err );
          break;
      }

      exit( 1 );
    }

    /* Insert the sample BSON document. */
    if( mongo_insert( &conn, "test.records", &b, NULL ) != MONGO_OK ) {
      printf( "FAIL: Failed to insert document with error %d\n", conn.err );
      exit( 1 );
    }

    /* Query for the sample document. */
    mongo_cursor_init( &cursor, &conn, "test.records" );
    mongo_cursor_set_query( &cursor, bson_shared_empty() );
    if( mongo_cursor_next( &cursor ) != MONGO_OK ) {
      printf( "FAIL: Failed to find inserted document." );
      exit( 1 );
    }

    printf( "Found saved BSON object:\n" );
    bson_print( (bson *)mongo_cursor_bson( &cursor ) );

    mongo_cmd_drop_collection( &conn, "test", "records", NULL );
    mongo_cursor_destroy( &cursor );
    bson_destroy( &b );
    mongo_destroy( &conn );

    return 0;
}
Exemplo n.º 9
0
BsonObj::BsonObj() {
	bson_init(this->obj);
	bson_finish(this->obj);
}
Exemplo n.º 10
0
/**
 * mongoc_database_add_user:
 * @database: A #mongoc_database_t.
 * @username: A string containing the username.
 * @password: (allow-none): A string containing password, or NULL.
 * @roles: (allow-none): An optional bson_t of roles.
 * @custom_data: (allow-none): An optional bson_t of data to store.
 * @error: (out) (allow-none): A location for a bson_error_t or %NULL.
 *
 * Creates a new user with access to @database.
 *
 * Returns: None.
 * Side effects: None.
 */
bool
mongoc_database_add_user (mongoc_database_t *database,
                          const char        *username,
                          const char        *password,
                          const bson_t      *roles,
                          const bson_t      *custom_data,
                          bson_error_t      *error)
{
   bson_error_t lerror;
   bson_t cmd;
   bson_t ar;
   char *input;
   char *hashed_password;
   bool ret = false;

   ENTRY;

   BSON_ASSERT (database);
   BSON_ASSERT (username);

   /*
    * CDRIVER-232:
    *
    * Perform a (slow and tedious) round trip to mongod to determine if
    * we can safely call createUser. Otherwise, we will fallback and
    * perform legacy insertion into users collection.
    */
   bson_init (&cmd);
   BSON_APPEND_UTF8 (&cmd, "usersInfo", username);
   ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, &lerror);
   bson_destroy (&cmd);

   if (!ret && (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) {
      ret = mongoc_database_add_user_legacy (database, username, password, error);
   } else if (ret) {
      input = bson_strdup_printf ("%s:mongo:%s", username, password);
      hashed_password = _mongoc_hex_md5 (input);
      bson_free (input);

      bson_init (&cmd);
      BSON_APPEND_UTF8 (&cmd, "createUser", username);
      BSON_APPEND_UTF8 (&cmd, "pwd", hashed_password);
      BSON_APPEND_BOOL (&cmd, "digestPassword", false);
      if (custom_data) {
         BSON_APPEND_DOCUMENT (&cmd, "customData", custom_data);
      }
      if (roles) {
         BSON_APPEND_ARRAY (&cmd, "roles", roles);
      } else {
         bson_append_array_begin (&cmd, "roles", 5, &ar);
         bson_append_array_end (&cmd, &ar);
      }

      ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error);

      if (!ret) fprintf (stderr, "%s\n", error->message);

      bson_destroy (&cmd);
   } else if (error) {
      memcpy (error, &lerror, sizeof *error);
   }

   RETURN (ret);
}
Exemplo n.º 11
0
mongoc_collection_t *
mongoc_database_create_collection (mongoc_database_t *database,
                                   const char        *name,
                                   const bson_t      *options,
                                   bson_error_t      *error)
{
   mongoc_collection_t *collection = NULL;
   bson_iter_t iter;
   bson_t cmd;
   bool capped = false;

   bson_return_val_if_fail (database, NULL);
   bson_return_val_if_fail (name, NULL);

   if (strchr (name, '$')) {
      bson_set_error (error,
                      MONGOC_ERROR_NAMESPACE,
                      MONGOC_ERROR_NAMESPACE_INVALID,
                      "The namespace \"%s\" is invalid.",
                      name);
      return NULL;
   }

   if (options) {
      if (bson_iter_init_find (&iter, options, "capped")) {
         if (!BSON_ITER_HOLDS_BOOL (&iter)) {
            bson_set_error (error,
                            MONGOC_ERROR_COMMAND,
                            MONGOC_ERROR_COMMAND_INVALID_ARG,
                            "The argument \"capped\" must be a boolean.");
            return NULL;
         }
         capped = bson_iter_bool (&iter);
      }

      if (bson_iter_init_find (&iter, options, "autoIndexId") &&
          !BSON_ITER_HOLDS_BOOL (&iter)) {
         bson_set_error (error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_COMMAND_INVALID_ARG,
                         "The argument \"autoIndexId\" must be a boolean.");
         return NULL;
      }

      if (bson_iter_init_find (&iter, options, "size")) {
         if (!BSON_ITER_HOLDS_INT32 (&iter) &&
             !BSON_ITER_HOLDS_INT64 (&iter)) {
            bson_set_error (error,
                            MONGOC_ERROR_COMMAND,
                            MONGOC_ERROR_COMMAND_INVALID_ARG,
                            "The argument \"size\" must be an integer.");
            return NULL;
         }
         if (!capped) {
            bson_set_error (error,
                            MONGOC_ERROR_COMMAND,
                            MONGOC_ERROR_COMMAND_INVALID_ARG,
                            "The \"size\" parameter requires {\"capped\": true}");
            return NULL;
         }
      }

      if (bson_iter_init_find (&iter, options, "max")) {
         if (!BSON_ITER_HOLDS_INT32 (&iter) &&
             !BSON_ITER_HOLDS_INT64 (&iter)) {
            bson_set_error (error,
                            MONGOC_ERROR_COMMAND,
                            MONGOC_ERROR_COMMAND_INVALID_ARG,
                            "The argument \"max\" must be an integer.");
            return NULL;
         }
         if (!capped) {
            bson_set_error (error,
                            MONGOC_ERROR_COMMAND,
                            MONGOC_ERROR_COMMAND_INVALID_ARG,
                            "The \"size\" parameter requires {\"capped\": true}");
            return NULL;
         }
      }
   }

   bson_init (&cmd);
   BSON_APPEND_UTF8 (&cmd, "create", name);

   if (options) {
      if (!bson_iter_init (&iter, options)) {
         bson_set_error (error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_COMMAND_INVALID_ARG,
                         "The argument \"options\" is corrupt or invalid.");
         bson_destroy (&cmd);
         return NULL;
      }

      while (bson_iter_next (&iter)) {
         if (!bson_append_iter (&cmd, bson_iter_key (&iter), -1, &iter)) {
            bson_set_error (error,
                            MONGOC_ERROR_COMMAND,
                            MONGOC_ERROR_COMMAND_INVALID_ARG,
                            "Failed to append \"options\" to create command.");
            bson_destroy (&cmd);
            return NULL;
         }
      }
   }

   if (mongoc_database_command_simple (database, &cmd, NULL, NULL, error)) {
      collection = _mongoc_collection_new (database->client,
                                           database->name,
                                           name,
                                           database->read_prefs,
                                           database->write_concern);
   }

   bson_destroy (&cmd);

   return collection;
}
Exemplo n.º 12
0
static bool
mongoc_database_add_user_legacy (mongoc_database_t *database,
                                 const char        *username,
                                 const char        *password,
                                 bson_error_t      *error)
{
   mongoc_collection_t *collection;
   mongoc_cursor_t *cursor = NULL;
   const bson_t *doc;
   bool ret = false;
   bson_t query;
   bson_t user;
   char *input;
   char *pwd = NULL;

   ENTRY;

   bson_return_val_if_fail(database, false);
   bson_return_val_if_fail(username, false);
   bson_return_val_if_fail(password, false);

   /*
    * Users are stored in the <dbname>.system.users virtual collection.
    * However, this will likely change to a command soon.
    */
   collection = mongoc_client_get_collection(database->client,
                                             database->name,
                                             "system.users");
   BSON_ASSERT(collection);

   /*
    * Hash the users password.
    */
   input = bson_strdup_printf("%s:mongo:%s", username, password);
   pwd = _mongoc_hex_md5(input);
   bson_free(input);

   /*
    * Check to see if the user exists. If so, we will update the
    * password instead of inserting a new user.
    */
   bson_init(&query);
   bson_append_utf8(&query, "user", 4, username, -1);
   cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 1, 0,
                                   &query, NULL, NULL);
   if (!mongoc_cursor_next(cursor, &doc)) {
      if (mongoc_cursor_error(cursor, error)) {
         GOTO (failure);
      }
      bson_init(&user);
      bson_append_utf8(&user, "user", 4, username, -1);
      bson_append_bool(&user, "readOnly", 8, false);
      bson_append_utf8(&user, "pwd", 3, pwd, -1);
   } else {
      bson_copy_to_excluding(doc, &user, "pwd", (char *)NULL);
      bson_append_utf8(&user, "pwd", 3, pwd, -1);
   }

   if (!mongoc_collection_save(collection, &user, NULL, error)) {
      GOTO (failure_with_user);
   }

   ret = true;

failure_with_user:
   bson_destroy(&user);

failure:
   if (cursor) {
      mongoc_cursor_destroy(cursor);
   }
   mongoc_collection_destroy(collection);
   bson_destroy(&query);
   bson_free(pwd);

   RETURN (ret);
}
static bool
txn_finish (mongoc_client_session_t *session,
            mongoc_txn_intent_t intent,
            bson_t *reply,
            bson_error_t *error)
{
   const char *cmd_name;
   bson_t cmd = BSON_INITIALIZER;
   bson_t opts = BSON_INITIALIZER;
   bson_error_t err_local;
   bson_error_t *err_ptr = error ? error : &err_local;
   bson_t reply_local = BSON_INITIALIZER;
   mongoc_write_err_type_t error_type;
   bool r = false;

   _mongoc_bson_init_if_set (reply);

   cmd_name = (intent == TXN_COMMIT ? "commitTransaction" : "abortTransaction");

   if (!mongoc_client_session_append (session, &opts, err_ptr)) {
      GOTO (done);
   }

   if (session->txn.opts.write_concern) {
      if (!mongoc_write_concern_append (session->txn.opts.write_concern,
                                        &opts)) {
         bson_set_error (err_ptr,
                         MONGOC_ERROR_TRANSACTION,
                         MONGOC_ERROR_TRANSACTION_INVALID_STATE,
                         "Invalid transaction write concern");
         GOTO (done);
      }
   }

   BSON_APPEND_INT32 (&cmd, cmd_name, 1);

   /* will be reinitialized by mongoc_client_write_command_with_opts */
   bson_destroy (&reply_local);
   r = mongoc_client_write_command_with_opts (
      session->client, "admin", &cmd, &opts, &reply_local, err_ptr);

   /* Transactions Spec: "Drivers MUST retry the commitTransaction command once
    * after it fails with a retryable error", same for abort */
   error_type = _mongoc_write_error_get_type (r, err_ptr, &reply_local);
   if (error_type == MONGOC_WRITE_ERR_RETRY) {
      bson_destroy (&reply_local);
      r = mongoc_client_write_command_with_opts (
         session->client, "admin", &cmd, &opts, &reply_local, err_ptr);

      error_type = _mongoc_write_error_get_type (r, err_ptr, &reply_local);
   }

   /* Transactions Spec: "add the UnknownTransactionCommitResult error label
    * when commitTransaction fails with a network error, server selection
    * error, or write concern failed / timeout." */
   if (intent == TXN_COMMIT && reply) {
      if ((!r && err_ptr->domain == MONGOC_ERROR_SERVER_SELECTION) ||
          error_type == MONGOC_WRITE_ERR_RETRY ||
          error_type == MONGOC_WRITE_ERR_WRITE_CONCERN) {
         bson_copy_to_excluding_noinit (
            &reply_local, reply, "errorLabels", NULL);
         copy_labels_plus_unknown_commit_result (&reply_local, reply);
      } else {
         /* maintain invariants: reply & reply_local are valid until the end */
         bson_destroy (reply);
         bson_steal (reply, &reply_local);
         bson_init (&reply_local);
      }

   } else if (intent == TXN_ABORT && !r) {
      /* we won't return an error from abortTransaction, so warn */
      MONGOC_WARNING ("Error in %s: %s", cmd_name, err_ptr->message);
   }

done:
   bson_destroy (&reply_local);
   bson_destroy (&cmd);
   bson_destroy (&opts);
   return r;
}
Exemplo n.º 14
0
int bson_from_buffer(bson * b, bson_buffer * buf){
    b->err = buf->err;
    bson_buffer_finish(buf);
    return bson_init(b, buf->buf, 1);
}
Exemplo n.º 15
0
int be_mongo_aclcheck(void *conf, const char *clientid, const char *username, const char *topic, int acc)
{
	struct mongo_backend *handle = (struct mongo_backend *) conf;
	mongoc_collection_t *collection;
	mongoc_cursor_t *cursor;
	bson_error_t error;
	const bson_t *doc;
	bson_iter_t iter;

	bool check = false;
	int match = 0, foundFlag = 0;

	bson_t query;

	bson_init(&query);
	bson_append_utf8(&query, "username", -1, username, -1);

	collection = mongoc_client_get_collection(handle->client, dbName, colName);

	cursor = mongoc_collection_find(collection,
									MONGOC_QUERY_NONE,
									0,
									0,
									0,
									&query,
									NULL,
									NULL);
	
	while (!mongoc_cursor_error (cursor, &error) &&
			mongoc_cursor_more (cursor)) {
		if (foundFlag == 0 && mongoc_cursor_next (cursor, &doc)) {
				bson_iter_init(&iter, doc);
				bson_iter_find(&iter, topicLoc);

				int64_t topId = (int64_t) bson_iter_as_int64(&iter);//, NULL);

				bson_destroy(&query);
				mongoc_cursor_destroy(cursor);
				mongoc_collection_destroy(collection);
				
				bson_init(&query);
				bson_append_int64(&query, topicID, -1, topId);
				collection = mongoc_client_get_collection(handle->client, dbName, topicLoc);
				cursor = mongoc_collection_find(collection,
												MONGOC_QUERY_NONE,
												0,
												0,
												0,
												&query,
												NULL,
												NULL);		
				foundFlag = 1;
		}
		if (foundFlag == 1 && mongoc_cursor_next(cursor, &doc)) {
				
			bson_iter_init(&iter, doc);
			bson_iter_find(&iter, topicLoc);
			uint32_t len;
			const uint8_t *arr;
			bson_iter_array(&iter, &len, &arr);
			bson_t b;
			
			
			
			if (bson_init_static(&b, arr, len))	{
				bson_iter_init(&iter, &b);
				while (bson_iter_next(&iter)) {
			
					char *str = bson_iter_dup_utf8(&iter, &len);

					mosquitto_topic_matches_sub(str, topic, &check);
					if (check) {
							match = 1;
							bson_free(str);
							break;
					}
					bson_free(str);
				}
			}

		}

	}
	

	if (mongoc_cursor_error (cursor, &error)) {
			fprintf (stderr, "Cursor Failure: %s\n", error.message);
			return 0;
	}
	

	bson_destroy(&query);
	mongoc_cursor_destroy (cursor);
	mongoc_collection_destroy(collection);


	



	return match;
}
Exemplo n.º 16
0
/* We can test write concern for update
 * and remove by doing operations on a capped collection. */
static void test_update_and_remove( mongo *conn ) {
    mongo_write_concern wc[1];
    bson *objs[5];
    bson query[1], update[1];
    int i;

    create_capped_collection( conn );

    for( i=0; i<5; i++ ) {
        objs[i] = bson_alloc();
        bson_init( objs[i] );
        bson_append_int( objs[i], "n", i );
        bson_finish( objs[i] );
    }

    ASSERT( mongo_insert_batch( conn, "test.wc", (const bson **)objs, 5,
        NULL, 0 ) == MONGO_OK );

    ASSERT( mongo_count( conn, "test", "wc", bson_shared_empty( ) ) == 5 );

    bson_init( query );
    bson_append_int( query, "n", 2 );
    bson_finish( query );

    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    bson_init( update );
        bson_append_start_object( update, "$set" );
            bson_append_string( update, "n", "a big long string" );
        bson_append_finish_object( update );
    bson_finish( update );

    /* Update will appear to succeed with no write concern specified, but doesn't. */
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );
    ASSERT( mongo_update( conn, "test.wc", query, update, 0, NULL ) == MONGO_OK );
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    /* Remove will appear to succeed with no write concern specified, but doesn't. */
    ASSERT( mongo_remove( conn, "test.wc", query, NULL ) == MONGO_OK );
    ASSERT( mongo_find_one( conn, "test.wc", query, bson_shared_empty( ), NULL ) == MONGO_OK );

    mongo_write_concern_init( wc );    
    mongo_write_concern_set_w( wc, 1 );
    mongo_write_concern_finish( wc );

    mongo_clear_errors( conn );
    ASSERT( mongo_update( conn, "test.wc", query, update, 0, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "failing update: objects in a capped ns cannot grow" );

    mongo_clear_errors( conn );
    ASSERT( mongo_remove( conn, "test.wc", query, wc ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "can't remove from a capped collection" );

    mongo_write_concern_destroy( wc );
    bson_destroy( query );
    bson_destroy( update );
    for( i=0; i<5; i++ ) {
        bson_destroy( objs[i] );
        bson_dealloc( objs[i] );
    }
}
Exemplo n.º 17
0
static void
ngx_wirte_tracking_into_mongodb(ngx_http_request_t *r,
                                ngx_http_vesb_aio_log_main_conf_t *valcf,ngx_http_vesb_aio_log_loc_conf_t *vallcf,int timeuse)
{
    mongo                     conn[1];
    char                      *conn_str;
    int                       port,status,insertstatus;
    time_t                    timenow;
    ngx_list_part_t 		      *part = &r->headers_in.headers.part;
    ngx_table_elt_t 		      *header = part->elts;
    ngx_uint_t				        i;

    if(r->headers_out.status == NGX_HTTP_OK)
    {
        conn_str = (char *)valcf->mongodb_conn_str.data;
        port = (int)valcf->mongodb_conn_port;
        status = mongo_client( conn, conn_str, port );
        if( status == MONGO_OK ) {
            mongo_set_op_timeout( conn, 10000 );//time oust 10000 ms
            time ( &timenow );
            bson b[1];
            bson_init( b );
            bson_append_new_oid( b, "_id" );

            if( r->headers_in.x_forwarded_for == NULL )
            {
                bson_append_string_n( b, "realclientip" , (char *)r->connection->addr_text.data, r->connection->addr_text.len);
            }
            else
            {
                bson_append_string_n( b, "realclientip" , (char *)r->headers_in.x_forwarded_for->value.data,r->headers_in.x_forwarded_for->value.len);
            }

            bson_append_int( b,"statuscode",r->headers_out.status);
            bson_append_int( b,"usetime",timeuse);
            bson_append_long( b,"requestsize",r->request_length);
            bson_append_long( b,"responsesize",r->headers_out.content_length_n);
            bson_append_time_t( b,"invoketime",timenow );


            if(vallcf->app_name.data != NULL)
            {
                bson_append_string_n( b,"appname",(char *)vallcf->app_name.data,vallcf->app_name.len);
            }
            else
            {
                bson_append_string( b,"appname","undefine");
            }

            /*get method name*/
            for(i=0;/* void */; ++i)
            {
                if(i >= part->nelts)
                {
                    if(part->next == NULL)
                    {
                        break;
                    }

                    part = part->next;
                    header = part->elts;
                    i=0;
                }

                if(header[i].hash == 0)
                {
                    continue;
                }

                if(ngx_strstr(header[i].key.data,"SOAPAction") != NULL)
                {
                    bson_append_string_n( b,"SOAPAction",(char *)header[i].value.data,header[i].value.len);
                }
                else if(ngx_strstr(header[i].key.data,"Content-Type") != NULL)
                {
                    bson_append_string_n( b,"Content-Type",(char *)header[i].value.data,header[i].value.len);
                }
            }


            bson_finish( b );
            insertstatus = mongo_insert( conn,"vesb.tracking", b , NULL );

            if( insertstatus != MONGO_OK ) {
                ngx_log_error(NGX_LOG_NOTICE, r->connection->log, 0, "insert tracking log in mongodb is failed!(error:%d)",conn[0].err);
            }
            bson_destroy( b );
        }
        else
        {
            ngx_log_error(NGX_LOG_NOTICE, r->connection->log, 0, "mongodb is unconnection!(error:%d)",status);
        }

        mongo_destroy( conn );

    }
}
Exemplo n.º 18
0
static void test_insert( mongo *conn ) {
    mongo_write_concern wc0[1], wc1[1];
    bson b[1], b2[1], b3[1], b4[1];
    bson *objs[2];

    mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL );

    mongo_write_concern_init( wc0 );    
    mongo_write_concern_set_w( wc0, 0 );
    mongo_write_concern_finish( wc0 );
    mongo_write_concern_init( wc1 );    
    mongo_write_concern_set_w( wc1, 1 );
    mongo_write_concern_finish( wc1 );

    bson_init( b4 );
    bson_append_string( b4, "foo", "bar" );
    bson_finish( b4 );

    ASSERT( mongo_insert( conn, TEST_NS, b4, wc1 ) == MONGO_OK );

    ASSERT( mongo_remove( conn, TEST_NS, bson_shared_empty( ), wc1 ) == MONGO_OK );

    bson_init( b );
    bson_append_new_oid( b, "_id" );
    bson_finish( b );

    ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) == MONGO_OK );

    /* This fails but returns OK with write concern w = 0 */
    ASSERT( mongo_insert( conn, TEST_NS, b, wc0 ) == MONGO_OK ); /* no getLastError request */

    ASSERT( mongo_insert( conn, TEST_NS, b, wc1 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );
    mongo_clear_errors( conn );

    /* Still fails but returns OK with write concern w = 0 */
    ASSERT( mongo_insert( conn, TEST_NS, b, wc0 ) == MONGO_OK );

    /* But not when we set a default write concern on the conn. */
    mongo_set_write_concern( conn, wc1 );
    ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) != MONGO_OK );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );

    /* Now test batch insert. */
    bson_init( b2 );
    bson_append_new_oid( b2, "_id" );
    bson_finish( b2 );

    bson_init( b3 );
    bson_append_new_oid( b3, "_id" );
    bson_finish( b3 );

    objs[0] = b2;
    objs[1] = b3;

    /* Insert two new documents by insert_batch. */
    conn->write_concern = NULL;
    ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 1 );
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, NULL, 0 ) == MONGO_OK );
    ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_shared_empty( ) ) == 3 );

    /* This should definitely fail if we try again with write concern. */
    mongo_clear_errors( conn );
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, wc1, 0 ) == MONGO_ERROR );
    ASSERT( conn->err == MONGO_WRITE_ERROR );
    ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." );
    ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" );
    ASSERT( conn->lasterrcode == 11000 );

    /* But it will succeed without the write concern set. */
    ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, NULL, 0 ) == MONGO_OK );

    bson_destroy( b );
    bson_destroy( b2 );
    bson_destroy( b3 );
    bson_destroy( b4 );
    mongo_write_concern_destroy( wc0 );
    mongo_write_concern_destroy( wc1 );
}
Exemplo n.º 19
0
INT32 main ( INT32 argc, CHAR **argv )
{
   // initialize local variables
   CHAR *pHostName                   = NULL ;
   CHAR *pServiceName                = NULL ;
   CHAR *pUsr                        = NULL ;
   CHAR *pPasswd                     = NULL ;
   // define a connetion handle; use to connect to database
   sdbConnectionHandle connection    = 0 ;
   // define a collection space handle
   sdbCSHandle collectionspace       = 0 ;
   // define a collection handle
   sdbCollectionHandle collection    = 0 ;
   // define a cursor handle for query
   sdbCursorHandle cursor            = 0 ;
   // define local variables
   // initialize them before use
   INT32 rc    = SDB_OK ;
   INT32 count = 0 ;
   bson obj ;
   bson rule ;
   bson objList [ NUM_RECORD ] ;

   // verify syntax
   if ( 5 != argc )
   {
      displaySyntax ( (CHAR*)argv[0] ) ;
      exit ( 0 ) ;
   }
   // read argument
   pHostName    = (CHAR*)argv[1] ;
   pServiceName = (CHAR*)argv[2] ;
   pUsr         = (CHAR*)argv[3] ;
   pPasswd      = (CHAR*)argv[4] ;

   // connect to database
   rc = sdbConnect ( pHostName, pServiceName, pUsr, pPasswd, &connection ) ;
   CHECK_RC ( rc, "Failed to connet to database" ) ;

   // create collection space
   rc = sdbCreateCollectionSpace ( connection, COLLECTION_SPACE_NAME,
                                   SDB_PAGESIZE_4K, &collectionspace ) ;
   CHECK_RC ( rc, "Failed to create collection space" ) ;

   // create collection in a specified colletion space.
   // Here,we build it up in the new collection.
   rc = sdbCreateCollection ( collectionspace, COLLECTION_NAME, &collection ) ;
   CHECK_RC ( rc, "Failed to create collection" ) ;

   // create record list using objList
   createRecordList ( &objList[0], NUM_RECORD ) ;

   // insert obj and free memory that allocated by createRecordList
   for ( count = 0; count < NUM_RECORD; count++ )
   {
      // all the contents of inserted record are the same except _id
      rc = sdbInsert ( collection, &objList[count] ) ;
      if ( rc )
      {
         printf ( "Failed to insert record, rc = %d" OSS_NEWLINE, rc ) ;
      }
      bson_destroy ( &objList[count] ) ;
   }

   // query all the record in this collection
   // and return the result by the cursor handle
   rc = sdbQuery(collection, NULL, NULL, NULL, NULL, 0, -1, &cursor ) ;
   CHECK_RC ( rc, "Failed to query" ) ;

   // get all the qureied records
   bson_init(&obj);
   while( !( rc=sdbNext( cursor, &obj ) ) )
   {
      bson_print( &obj ) ;
      bson_destroy(&obj) ;
      bson_init(&obj);
   }
   bson_destroy(&obj) ;
   if( rc==SDB_DMS_EOC )
   {
      printf("All the record had been list." OSS_NEWLINE ) ;
   }
   else if( rc!=SDB_OK )
   {
      CHECK_RC ( rc, "Failed to get the record" ) ;
   }

   // drop the specified collection
   rc = sdbDropCollection( collectionspace,COLLECTION_NAME ) ;
   CHECK_RC ( rc, "Failed to drop the specified collection" ) ;

   // drop the specified collection space
   rc = sdbDropCollectionSpace( connection,COLLECTION_SPACE_NAME ) ;
   CHECK_RC ( rc, "Failed to drop the specified collection space" ) ;

done:
   // disconnect the connection
   sdbDisconnect ( connection ) ;
   // release the local variables
   sdbReleaseCursor ( cursor ) ;
   sdbReleaseCollection ( collection ) ;
   sdbReleaseCS ( collectionspace ) ;
   sdbReleaseConnection ( connection ) ;
   return 0;
error:
   goto done ;
}
Exemplo n.º 20
0
static
void test_large( void )
{
    mongo conn[1];
    gridfs gfs[1];
    gridfile gfile[1];
    FILE *fd;
    size_t i, n;
    char *buffer = (char*)bson_malloc( LARGE );
    char *read_buf = (char*)bson_malloc( LARGE );
    gridfs_offset filesize = ( int64_t )1024 * ( int64_t )LARGE;
    mongo_write_concern wc;    
    bson lastError;
    bson lastErrorCmd;
    
    srand( (unsigned int) time( NULL ) );

    INIT_SOCKETS_FOR_WINDOWS;
    CONN_CLIENT_TEST;
    
    mongo_write_concern_init(&wc);
    wc.j = 1;
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    GFS_INIT;

    fd = fopen( "bigfile", "r" );
    if( fd ) {
      fclose( fd );
    } else {
      /* Create a very large file */
      fill_buffer_randomly( buffer, ( int64_t )LARGE );
      fd = fopen( "bigfile", "w" );
      for( i=0; i<1024; i++ ) {
        fwrite( buffer, 1, LARGE, fd );
      }
      fclose( fd );
    }

    /* Now read the file into GridFS */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_store_file( gfs, "bigfile", "bigfile", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS);

    gridfs_find_filename( gfs, "bigfile", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    
    fd = fopen( "bigfile", "r" );

    while( ( n = fread( buffer, 1, MEDIUM, fd ) ) != 0 ) {
      ASSERT( gridfile_read_buffer( gfile, read_buf, MEDIUM ) == n );
      ASSERT( memcmp( buffer, read_buf, n ) == 0 );
    }

    fclose( fd );
    gridfile_destroy( gfile );

    /* Read the file using the streaming interface */
    gridfs_remove_filename( gfs, "bigfile" );
    gridfs_remove_filename( gfs, "bigfile-stream" );
    gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html", GRIDFILE_NOMD5 | GRIDFILE_COMPRESS );

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 0; /* Let's reset write concern j field to zero, we will manually call getLastError with j = 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fd = fopen( "bigfile", "r" );
    i = 0;
    while( ( n = fread( buffer, 1, READ_WRITE_BUF_SIZE, fd ) ) != 0 ) {
        ASSERT( gridfile_write_buffer( gfile, buffer, n ) == n );     
        if(i++ % 10 == 0) {
          bson_init( &lastErrorCmd );
          bson_append_int( &lastErrorCmd, "getLastError", 1);
          bson_append_int( &lastErrorCmd, "j", 1);
          bson_finish( &lastErrorCmd );

          bson_init( &lastError );
          mongo_run_command( conn, "test", &lastErrorCmd, &lastError );

          bson_destroy( &lastError );
          bson_destroy( &lastErrorCmd );
        }
    }

    mongo_write_concern_destroy( &wc );
    mongo_write_concern_init(&wc);
    wc.j = 1; /* Let's reset write concern j field to 1 */
    mongo_write_concern_finish(&wc);
    mongo_set_write_concern(conn, &wc);

    fclose( fd );
    gridfile_writer_done( gfile );

    gridfs_find_filename( gfs, "bigfile-stream", gfile );

    ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 );
    ASSERT( gridfile_get_contentlength( gfile ) ==  filesize );
    gridfs_remove_filename( gfs, "bigfile-stream" );

    gridfs_destroy( gfs );
    mongo_disconnect( conn );
    mongo_destroy( conn );

    bson_free( buffer );
    bson_free( read_buf );
    mongo_write_concern_destroy( &wc );
}
Exemplo n.º 21
0
static void
_mongoc_write_opmsg (mongoc_write_command_t *command,
                     mongoc_client_t *client,
                     mongoc_server_stream_t *server_stream,
                     const char *database,
                     const char *collection,
                     const mongoc_write_concern_t *write_concern,
                     uint32_t index_offset,
                     mongoc_client_session_t *cs,
                     mongoc_write_result_t *result,
                     bson_error_t *error)
{
   mongoc_cmd_parts_t parts;
   bson_iter_t iter;
   bson_t cmd;
   bson_t reply;
   bool ret = false;
   int32_t max_msg_size;
   int32_t max_bson_obj_size;
   int32_t max_document_count;
   uint32_t header;
   uint32_t payload_batch_size = 0;
   uint32_t payload_total_offset = 0;
   bool ship_it = false;
   int document_count = 0;
   int32_t len;
   mongoc_server_stream_t *retry_server_stream = NULL;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

/* MongoDB has a extra allowance to allow updating 16mb document,
 * as the update operators would otherwise overflow the 16mb object limit
 */
#define BSON_OBJECT_ALLOWANCE (16 * 1024)
   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_msg_size = mongoc_server_stream_max_msg_size (server_stream);
   max_document_count =
      mongoc_server_stream_max_write_batch_size (server_stream);

   bson_init (&cmd);
   _mongoc_write_command_init (&cmd, command, collection);
   mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd);
   parts.assembled.operation_id = command->operation_id;
   parts.is_write_command = true;
   if (!mongoc_cmd_parts_set_write_concern (
          &parts, write_concern, server_stream->sd->max_wire_version, error)) {
      bson_destroy (&cmd);
      mongoc_cmd_parts_cleanup (&parts);
      EXIT;
   }

   if (parts.assembled.is_acknowledged) {
      mongoc_cmd_parts_set_session (&parts, cs);
   }

   /* Write commands that include multi-document operations are not retryable.
    * Set this explicitly so that mongoc_cmd_parts_assemble does not need to
    * inspect the command body later. */
   parts.allow_txn_number =
      (command->flags.has_multi_write || !parts.assembled.is_acknowledged)
         ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO
         : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES;

   BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts));
   if (!mongoc_cmd_parts_append_opts (
          &parts, &iter, server_stream->sd->max_wire_version, error)) {
      bson_destroy (&cmd);
      mongoc_cmd_parts_cleanup (&parts);
      EXIT;
   }

   if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) {
      bson_destroy (&cmd);
      mongoc_cmd_parts_cleanup (&parts);
      EXIT;
   }

   /*
    * OP_MSG header == 16 byte
    * + 4 bytes flagBits
    * + 1 byte payload type = 1
    * + 1 byte payload type = 2
    * + 4 byte size of payload
    * == 26 bytes opcode overhead
    * + X Full command document {insert: "test", writeConcern: {...}}
    * + Y command identifier ("documents", "deletes", "updates") ( + \0)
    */

   header =
      26 + parts.assembled.command->len + gCommandFieldLens[command->type] + 1;

   do {
      memcpy (&len,
              command->payload.data + payload_batch_size + payload_total_offset,
              4);
      len = BSON_UINT32_FROM_LE (len);

      if (len > max_bson_obj_size + BSON_OBJECT_ALLOWANCE) {
         /* Quit if the document is too large */
         _mongoc_write_command_too_large_error (
            error, index_offset, len, max_bson_obj_size);
         result->failed = true;
         break;

      } else if ((payload_batch_size + header) + len <= max_msg_size) {
         /* The current batch is still under max batch size in bytes */
         payload_batch_size += len;

         /* If this document filled the maximum document count */
         if (++document_count == max_document_count) {
            ship_it = true;
            /* If this document is the last document we have */
         } else if (payload_batch_size + payload_total_offset ==
                    command->payload.len) {
            ship_it = true;
         } else {
            ship_it = false;
         }
      } else {
         ship_it = true;
      }

      if (ship_it) {
         bool is_retryable = parts.is_retryable_write;

         /* Seek past the document offset we have already sent */
         parts.assembled.payload = command->payload.data + payload_total_offset;
         /* Only send the documents up to this size */
         parts.assembled.payload_size = payload_batch_size;
         parts.assembled.payload_identifier = gCommandFields[command->type];

         /* increment the transaction number for the first attempt of each
          * retryable write command */
         if (is_retryable) {
            bson_iter_t txn_number_iter;
            BSON_ASSERT (bson_iter_init_find (
               &txn_number_iter, parts.assembled.command, "txnNumber"));
            bson_iter_overwrite_int64 (
               &txn_number_iter,
               ++parts.assembled.session->server_session->txn_number);
         }
      retry:
         ret = mongoc_cluster_run_command_monitored (
            &client->cluster, &parts.assembled, &reply, error);

         /* Add this batch size so we skip these documents next time */
         payload_total_offset += payload_batch_size;
         payload_batch_size = 0;

         /* If a retryable error is encountered and the write is retryable,
          * select a new writable stream and retry. If server selection fails or
          * the selected server does not support retryable writes, fall through
          * and allow the original error to be reported. */
         if (!ret && is_retryable &&
             (error->domain == MONGOC_ERROR_STREAM ||
              _mongoc_write_is_retryable_error (&reply))) {
            bson_error_t ignored_error;

            /* each write command may be retried at most once */
            is_retryable = false;

            if (retry_server_stream) {
               mongoc_server_stream_cleanup (retry_server_stream);
            }

            retry_server_stream = mongoc_cluster_stream_for_writes (
               &client->cluster, &ignored_error);

            if (retry_server_stream &&
                retry_server_stream->sd->max_wire_version >=
                   WIRE_VERSION_RETRY_WRITES) {
               parts.assembled.server_stream = retry_server_stream;
               bson_destroy (&reply);
               GOTO (retry);
            }
         }

         if (!ret) {
            result->failed = true;
            result->must_stop = true;
         }

         /* Result merge needs to know the absolute index for a document
          * so it can rewrite the error message which contains the relative
          * document index per batch
          */
         _mongoc_write_result_merge (result, command, &reply, index_offset);
         index_offset += document_count;
         document_count = 0;
         bson_destroy (&reply);
      }
      /* While we have more documents to write */
   } while (payload_total_offset < command->payload.len);

   bson_destroy (&cmd);
   mongoc_cmd_parts_cleanup (&parts);

   if (retry_server_stream) {
      mongoc_server_stream_cleanup (retry_server_stream);
   }

   if (ret) {
      /* if a retry succeeded, clear the initial error */
      memset (&result->error, 0, sizeof (bson_error_t));
   }

   EXIT;
}
Exemplo n.º 22
0
static switch_status_t my_on_reporting(switch_core_session_t *session)
{
	switch_status_t status = SWITCH_STATUS_SUCCESS;
	switch_channel_t *channel = switch_core_session_get_channel(session);
	switch_event_header_t *hi;
	switch_caller_profile_t *caller_profile;
	switch_app_log_t *app_log;
	bson cdr;
	int is_b;
	char *tmp;

	if (globals.shutdown) {
		return SWITCH_STATUS_SUCCESS;
	}

	is_b = channel && switch_channel_get_originator_caller_profile(channel);
	if (!globals.log_b && is_b) {
		const char *force_cdr = switch_channel_get_variable(channel, SWITCH_FORCE_PROCESS_CDR_VARIABLE);
		if (!switch_true(force_cdr)) {
			return SWITCH_STATUS_SUCCESS;
		}
	}

	bson_init(&cdr);

	/* Channel data */
	bson_append_start_object(&cdr, "channel_data");
	bson_append_string(&cdr, "state", switch_channel_state_name(switch_channel_get_state(channel)));
	bson_append_string(&cdr, "direction", switch_channel_direction(channel) == SWITCH_CALL_DIRECTION_OUTBOUND ? "outbound" : "inbound");
	bson_append_int(&cdr, "state_number", switch_channel_get_state(channel));

	if ((tmp = switch_channel_get_flag_string(channel))) {
		bson_append_string(&cdr, "flags", tmp);
		free(tmp);
	}

	if ((tmp = switch_channel_get_cap_string(channel))) {
		bson_append_string(&cdr, "caps", tmp);
		free(tmp);
	}
	bson_append_finish_object(&cdr);				/* channel_data */


	/* Channel variables */
	bson_append_start_object(&cdr, "variables");

	if ((hi = switch_channel_variable_first(channel))) {
		for (; hi; hi = hi->next) {
			if (!zstr(hi->name) && !zstr(hi->value)) {
				bson_append_string(&cdr, hi->name, hi->value);
			}
		}
		switch_channel_variable_last(channel);
	}

	bson_append_finish_object(&cdr);				/* variables */


	/* App log */
	if ((app_log = switch_core_session_get_app_log(session))) {
		switch_app_log_t *ap;

		bson_append_start_object(&cdr, "app_log");
		for (ap = app_log; ap; ap = ap->next) {
			bson_append_start_object(&cdr, "application");
			bson_append_string(&cdr, "app_name", ap->app);
			bson_append_string(&cdr, "app_data", switch_str_nil(ap->arg));
			bson_append_long(&cdr, "app_stamp", ap->stamp);
			bson_append_finish_object(&cdr);		/* application */
		}

		bson_append_finish_object(&cdr);			/* app_log */
	}


	/* Callflow */
	caller_profile = switch_channel_get_caller_profile(channel);

	while (caller_profile) {
		bson_append_start_object(&cdr, "callflow");

		if (!zstr(caller_profile->dialplan)) {
			bson_append_string(&cdr, "dialplan", caller_profile->dialplan);
		}

		if (!zstr(caller_profile->profile_index)) {
			bson_append_string(&cdr, "profile_index", caller_profile->profile_index);
		}

		if (caller_profile->caller_extension) {
			switch_caller_application_t *ap;

			bson_append_start_object(&cdr, "extension");

			bson_append_string(&cdr, "name", caller_profile->caller_extension->extension_name);
			bson_append_string(&cdr, "number", caller_profile->caller_extension->extension_number);

			if (caller_profile->caller_extension->current_application) {
				bson_append_string(&cdr, "current_app", caller_profile->caller_extension->current_application->application_name);
			}

			for (ap = caller_profile->caller_extension->applications; ap; ap = ap->next) {
				bson_append_start_object(&cdr, "application");
				if (ap == caller_profile->caller_extension->current_application) {
					bson_append_bool(&cdr, "last_executed", 1);
				}
				bson_append_string(&cdr, "app_name", ap->application_name);
				bson_append_string(&cdr, "app_data", switch_str_nil(ap->application_data));
				bson_append_finish_object(&cdr);
			}

			if (caller_profile->caller_extension->children) {
				switch_caller_profile_t *cp = NULL;

				for (cp = caller_profile->caller_extension->children; cp; cp = cp->next) {

					if (!cp->caller_extension) {
						continue;
					}

					bson_append_start_object(&cdr, "sub_extensions");
					bson_append_start_object(&cdr, "extension");

					bson_append_string(&cdr, "name", cp->caller_extension->extension_name);
					bson_append_string(&cdr, "number", cp->caller_extension->extension_number);
					bson_append_string(&cdr, "dialplan", cp->dialplan);
					if (cp->caller_extension->current_application) {
						bson_append_string(&cdr, "current_app", cp->caller_extension->current_application->application_name);
					}

					for (ap = cp->caller_extension->applications; ap; ap = ap->next) {
						bson_append_start_object(&cdr, "application");
						if (ap == cp->caller_extension->current_application) {
							bson_append_bool(&cdr, "last_executed", 1);
						}
						bson_append_string(&cdr, "app_name", ap->application_name);
						bson_append_string(&cdr, "app_data", switch_str_nil(ap->application_data));
						bson_append_finish_object(&cdr);
					}

					bson_append_finish_object(&cdr);	/* extension */
					bson_append_finish_object(&cdr);	/* sub_extensions */
				}
			}

			bson_append_finish_object(&cdr);			/* extension */
		}

		bson_append_start_object(&cdr, "caller_profile");
		set_bson_profile_data(&cdr, caller_profile);

		if (caller_profile->origination_caller_profile) {
			switch_caller_profile_t *cp = NULL;

			bson_append_start_object(&cdr, "origination");
			for (cp = caller_profile->origination_caller_profile; cp; cp = cp->next) {
				bson_append_start_object(&cdr, "origination_caller_profile");
				set_bson_profile_data(&cdr, cp);
				bson_append_finish_object(&cdr);
			}
			bson_append_finish_object(&cdr);			/* origination */
		}

		if (caller_profile->originator_caller_profile) {
			switch_caller_profile_t *cp = NULL;

			bson_append_start_object(&cdr, "originator");
			for (cp = caller_profile->originator_caller_profile; cp; cp = cp->next) {
				bson_append_start_object(&cdr, "originator_caller_profile");
				set_bson_profile_data(&cdr, cp);
				bson_append_finish_object(&cdr);
			}
			bson_append_finish_object(&cdr);			/* originator */
		}

		if (caller_profile->originatee_caller_profile) {
			switch_caller_profile_t *cp = NULL;

			bson_append_start_object(&cdr, "originatee");
			for (cp = caller_profile->originatee_caller_profile; cp; cp = cp->next) {
				bson_append_start_object(&cdr, "originatee_caller_profile");
				set_bson_profile_data(&cdr, cp);
				bson_append_finish_object(&cdr);
			}
			bson_append_finish_object(&cdr);			/* originatee */
		}

		bson_append_finish_object(&cdr);				/* caller_profile */

		/* Timestamps */
		if (caller_profile->times) {
			bson_append_start_object(&cdr, "times");

			/* Insert timestamps as long ints (microseconds) to preserve accuracy */
			bson_append_long(&cdr, "created_time", caller_profile->times->created);
			bson_append_long(&cdr, "profile_created_time", caller_profile->times->profile_created);
			bson_append_long(&cdr, "progress_time", caller_profile->times->progress);
			bson_append_long(&cdr, "progress_media_time", caller_profile->times->progress_media);
			bson_append_long(&cdr, "answered_time", caller_profile->times->answered);
			bson_append_long(&cdr, "bridged_time", caller_profile->times->bridged);
			bson_append_long(&cdr, "last_hold_time", caller_profile->times->last_hold);
			bson_append_long(&cdr, "hold_accum_time", caller_profile->times->hold_accum);
			bson_append_long(&cdr, "hangup_time", caller_profile->times->hungup);
			bson_append_long(&cdr, "resurrect_time", caller_profile->times->resurrected);
			bson_append_long(&cdr, "transfer_time", caller_profile->times->transferred);
			bson_append_finish_object(&cdr);			/* times */
		}

		bson_append_finish_object(&cdr);				/* callflow */
		caller_profile = caller_profile->next;
	}

	bson_finish(&cdr);

	switch_mutex_lock(globals.mongo_mutex);

	if (mongo_insert(globals.mongo_conn, globals.mongo_namespace, &cdr) != MONGO_OK) {
		if (globals.mongo_conn->err == MONGO_IO_ERROR) {
			mongo_error_t db_status;
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "MongoDB connection failed; attempting reconnect...\n");
			db_status = mongo_reconnect(globals.mongo_conn);

			if (db_status != MONGO_OK) {
				switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "MongoDB reconnect failed with error code %d\n", db_status);
				status = SWITCH_STATUS_FALSE;
			} else {
				switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "MongoDB connection re-established.\n");
				if (mongo_insert(globals.mongo_conn, globals.mongo_namespace, &cdr) != MONGO_OK) {
					switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_insert: error code %d\n", globals.mongo_conn->err);
					status = SWITCH_STATUS_FALSE;
				}
			}

		} else {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "mongo_insert: error code %d\n", globals.mongo_conn->err);
			status = SWITCH_STATUS_FALSE;
		}
	}

	switch_mutex_unlock(globals.mongo_mutex);
	bson_destroy(&cdr);

	return status;
}
Exemplo n.º 23
0
static void
ha_replica_set_configure (ha_replica_set_t *replica_set,
                          ha_node_t        *primary)
{
   mongoc_database_t *database;
   mongoc_client_t *client;
   mongoc_cursor_t *cursor;
   const bson_t *doc;
   bson_error_t error;
   bson_iter_t iter;
   ha_node_t *node;
   bson_t ar;
   bson_t cmd;
   bson_t config;
   bson_t member;
   char *str;
   char *uristr;
   char hoststr[32];
   char key[8];
   int i = 0;

   uristr = bson_strdup_printf("mongodb://127.0.0.1:%hu/", primary->port);
   client = mongoc_client_new(uristr);
#ifdef MONGOC_ENABLE_SSL
   if (replica_set->ssl_opt) {
      mongoc_client_set_ssl_opts(client, replica_set->ssl_opt);
   }
#endif
   bson_free(uristr);

   bson_init(&cmd);
   bson_append_document_begin(&cmd, "replSetInitiate", -1, &config);
   bson_append_utf8(&config, "_id", 3, replica_set->name, -1);
   bson_append_array_begin(&config, "members", -1, &ar);
   for (node = replica_set->nodes; node; node = node->next) {
      snprintf(key, sizeof key, "%u", i);
      key[sizeof key - 1] = '\0';

      snprintf(hoststr, sizeof hoststr, "127.0.0.1:%hu", node->port);
      hoststr[sizeof hoststr - 1] = '\0';

      bson_append_document_begin(&ar, key, -1, &member);
      bson_append_int32(&member, "_id", -1, i);
      bson_append_utf8(&member, "host", -1, hoststr, -1);
      bson_append_bool(&member, "arbiterOnly", -1, node->is_arbiter);
      bson_append_document_end(&ar, &member);

      i++;
   }
   bson_append_array_end(&config, &ar);
   bson_append_document_end(&cmd, &config);

   str = bson_as_json(&cmd, NULL);
   MONGOC_DEBUG("Config: %s", str);
   bson_free(str);

   database = mongoc_client_get_database(client, "admin");

again:
   cursor = mongoc_database_command(database,
                                    MONGOC_QUERY_NONE,
                                    0,
                                    1,
                                    &cmd,
                                    NULL,
                                    NULL);

   while (mongoc_cursor_next(cursor, &doc)) {
      str = bson_as_json(doc, NULL);
      MONGOC_DEBUG("Reply: %s", str);
      bson_free(str);
      if (bson_iter_init_find(&iter, doc, "ok") &&
          bson_iter_as_bool(&iter)) {
         goto cleanup;
      }
   }

   if (mongoc_cursor_error(cursor, &error)) {
      mongoc_cursor_destroy(cursor);
      MONGOC_WARNING("%s: Retrying in 1 second.", error.message);
      sleep(1);
      goto again;
   }

cleanup:
   mongoc_cursor_destroy(cursor);
   mongoc_database_destroy(database);
   mongoc_client_destroy(client);
   bson_destroy(&cmd);
}
Exemplo n.º 24
0
/**
 * \brief This function tries to update tag group in MongoDB. It adds new
 * version of data.
 */
int vs_mongo_taggroup_update(struct VS_CTX *vs_ctx,
		struct VSNode *node,
		struct VSTagGroup *tg)
{
	bson cond, op;
	bson bson_version;
	int ret;

	/* TODO: delete old version, when there is too much versions:
	int old_saved_version = tg->saved_version;
	*/

	bson_init(&cond);
	{
		bson_append_oid(&cond, "_id", &tg->oid);
		/* To be sure that right tag group will be updated */
		bson_append_int(&cond, "node_id", node->id);
		bson_append_int(&cond, "taggroup_id", tg->id);
	}
	bson_finish(&cond);

	bson_init(&op);
	{
		/* Update item current_version in document */
		bson_append_start_object(&op, "$set");
		{
			bson_append_int(&op, "current_version", tg->version);
		}
		bson_append_finish_object(&op);
		/* Create new bson object representing current version and add it to
		 * the object versions */
		bson_append_start_object(&op, "$set");
		{
			bson_init(&bson_version);
			{
				vs_mongo_taggroup_save_version(tg, &bson_version, UINT32_MAX);
			}
			bson_finish(&bson_version);
			bson_append_bson(&op, "versions", &bson_version);
		}
		bson_append_finish_object(&op);
	}
	bson_finish(&op);

	ret = mongo_update(vs_ctx->mongo_conn, vs_ctx->mongo_tg_ns, &cond, &op,
			MONGO_UPDATE_BASIC, 0);

	bson_destroy(&bson_version);
	bson_destroy(&cond);
	bson_destroy(&op);

	if(ret != MONGO_OK) {
		v_print_log(VRS_PRINT_ERROR,
				"Unable to update tag group %d to MongoDB: %s, error: %s\n",
				tg->id, vs_ctx->mongo_tg_ns,
				mongo_get_server_err_string(vs_ctx->mongo_conn));
		return 0;
	}

	return 1;
}
Exemplo n.º 25
0
mongoc_cursor_t *
_mongoc_cursor_new (mongoc_client_t           *client,
                    const char                *db_and_collection,
                    mongoc_query_flags_t       flags,
                    uint32_t                   skip,
                    uint32_t                   limit,
                    uint32_t                   batch_size,
                    bool                       is_command,
                    const bson_t              *query,
                    const bson_t              *fields,
                    const mongoc_read_prefs_t *read_prefs)
{
   mongoc_read_prefs_t *local_read_prefs = NULL;
   mongoc_read_mode_t mode;
   mongoc_cursor_t *cursor;
   const bson_t *tags;
   bson_iter_t iter;
   const char *key;
   const char *mode_str;
   bson_t child;
   bool found = false;
   int i;

   ENTRY;

   BSON_ASSERT (client);
   BSON_ASSERT (db_and_collection);
   BSON_ASSERT (query);

   if (!read_prefs) {
      read_prefs = client->read_prefs;
   }

   cursor = bson_malloc0 (sizeof *cursor);

   /*
    * CDRIVER-244:
    *
    * If this is a command, we need to verify we can send it to the location
    * specified by the read preferences. Otherwise, log a warning that we
    * are rerouting to the primary instance.
    */
   if (is_command &&
       read_prefs &&
       (mongoc_read_prefs_get_mode (read_prefs) != MONGOC_READ_PRIMARY) &&
       bson_iter_init (&iter, query) &&
       bson_iter_next (&iter) &&
       (key = bson_iter_key (&iter))) {
      for (i = 0; gSecondaryOkCommands [i]; i++) {
         if (0 == strcasecmp (key, gSecondaryOkCommands [i])) {
            found = true;
            break;
         }
      }
      if (!found) {
         cursor->redir_primary = true;
         local_read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
         read_prefs = local_read_prefs;
         MONGOC_INFO ("Database command \"%s\" rerouted to primary node", key);
      }
   }

   /*
    * Cursors execute their query lazily. This sadly means that we must copy
    * some extra data around between the bson_t structures. This should be
    * small in most cases, so it reduces to a pure memcpy. The benefit to this
    * design is simplified error handling by API consumers.
    */

   cursor->client = client;
   bson_strncpy (cursor->ns, db_and_collection, sizeof cursor->ns);
   cursor->nslen = (uint32_t)strlen(cursor->ns);
   cursor->flags = flags;
   cursor->skip = skip;
   cursor->limit = limit;
   cursor->batch_size = batch_size;
   cursor->is_command = is_command;

#define MARK_FAILED(c) \
   do { \
      (c)->failed = true; \
      (c)->done = true; \
      (c)->end_of_event = true; \
      (c)->sent = true; \
   } while (0)

   /* we can't have exhaust queries with limits */
   if ((flags & MONGOC_QUERY_EXHAUST) && limit) {
      bson_set_error (&cursor->error,
                      MONGOC_ERROR_CURSOR,
                      MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                      "Cannot specify MONGOC_QUERY_EXHAUST and set a limit.");
      MARK_FAILED (cursor);
      GOTO (finish);
   }

   /* we can't have exhaust queries with sharded clusters */
   if ((flags & MONGOC_QUERY_EXHAUST) &&
       (client->cluster.mode == MONGOC_CLUSTER_SHARDED_CLUSTER)) {
      bson_set_error (&cursor->error,
                      MONGOC_ERROR_CURSOR,
                      MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                      "Cannot specify MONGOC_QUERY_EXHAUST with sharded cluster.");
      MARK_FAILED (cursor);
      GOTO (finish);
   }

   /*
    * Check types of various optional parameters.
    */
   if (!is_command) {
      if (bson_iter_init_find (&iter, query, "$explain") &&
          !(BSON_ITER_HOLDS_BOOL (&iter) || BSON_ITER_HOLDS_INT32 (&iter))) {
         bson_set_error (&cursor->error,
                         MONGOC_ERROR_CURSOR,
                         MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                         "$explain must be a boolean.");
         MARK_FAILED (cursor);
         GOTO (finish);
      }

      if (bson_iter_init_find (&iter, query, "$snapshot") &&
          !BSON_ITER_HOLDS_BOOL (&iter) &&
          !BSON_ITER_HOLDS_INT32 (&iter)) {
         bson_set_error (&cursor->error,
                         MONGOC_ERROR_CURSOR,
                         MONGOC_ERROR_CURSOR_INVALID_CURSOR,
                         "$snapshot must be a boolean.");
         MARK_FAILED (cursor);
         GOTO (finish);
      }
   }

   if (!cursor->is_command && !bson_has_field (query, "$query")) {
      bson_init (&cursor->query);
      bson_append_document (&cursor->query, "$query", 6, query);
   } else {
      bson_copy_to (query, &cursor->query);
   }

   if (read_prefs) {
      cursor->read_prefs = mongoc_read_prefs_copy (read_prefs);

      mode = mongoc_read_prefs_get_mode (read_prefs);
      tags = mongoc_read_prefs_get_tags (read_prefs);

      if (mode != MONGOC_READ_PRIMARY) {
         flags |= MONGOC_QUERY_SLAVE_OK;

         if ((mode != MONGOC_READ_SECONDARY_PREFERRED) || tags) {
            bson_append_document_begin (&cursor->query, "$readPreference",
                                        15, &child);
            mode_str = _mongoc_cursor_get_read_mode_string (mode);
            bson_append_utf8 (&child, "mode", 4, mode_str, -1);
            if (tags) {
               bson_append_array (&child, "tags", 4, tags);
            }
            bson_append_document_end (&cursor->query, &child);
         }
      }
   }

   if (fields) {
      bson_copy_to(fields, &cursor->fields);
   } else {
      bson_init(&cursor->fields);
   }

   _mongoc_buffer_init(&cursor->buffer, NULL, 0, NULL);

finish:
   mongoc_counter_cursors_active_inc();

   if (local_read_prefs) {
      mongoc_read_prefs_destroy (local_read_prefs);
   }

   RETURN (cursor);
}
Exemplo n.º 26
0
/**
 * \brief This function tries to load tag group from MongoDB
 *
 * \param[in] *vs_ctx		The verse server context
 * \param[in] *oid			The pointer at ObjectID of tag group in MongoDB
 * \param[in] *node			The node containing tag group
 * \param[in] taggroup_id	The tag group ID that is requested from database
 * \param[in] version		The version of tag group id that is requested
 * 							from database. When version is equal to -1, then
 * 							current version is loaded from MongoDB.
 *
 * \return This function returns pointer at tag group, when tag group is found.
 * Otherwise it returns NULL.
 */
struct VSTagGroup *vs_mongo_taggroup_load_linked(struct VS_CTX *vs_ctx,
		bson_oid_t *oid,
		struct VSNode *node,
		uint16 taggroup_id,
		uint32 req_version)
{
	struct VSTagGroup *tg = NULL;
	bson query;
	mongo_cursor cursor;
	uint32 node_id = -1, tmp_taggroup_id = -1,
			current_version = -1, custom_type = -1;
	int found = 0;
	bson_iterator tg_data_iter;
	const bson *bson_tg;

	bson_init(&query);
	bson_append_oid(&query, "_id", oid);
	bson_finish(&query);

	mongo_cursor_init(&cursor, vs_ctx->mongo_conn, vs_ctx->mongo_tg_ns);
	mongo_cursor_set_query(&cursor, &query);

	/* ObjectID should be unique */
	while( mongo_cursor_next(&cursor) == MONGO_OK ) {
		bson_tg = mongo_cursor_bson(&cursor);

		/* Try to get node id */
		if( bson_find(&tg_data_iter, bson_tg, "node_id") == BSON_INT ) {
			node_id = bson_iterator_int(&tg_data_iter);
		}

		/* Try to get tag group id */
		if( bson_find(&tg_data_iter, bson_tg, "taggroup_id") == BSON_INT ) {
			tmp_taggroup_id = bson_iterator_int(&tg_data_iter);
		}

		/* ObjectID is ALMOST unique. So it is check, if node id and
		 * tag group id matches */
		if(node_id == node->id && tmp_taggroup_id == taggroup_id) {
			found = 1;
			break;
		}
	}

	/* When tag group was found, then load required data from MongoDB */
	if(found == 1) {

		/* Try to get current version of tag group */
		if( bson_find(&tg_data_iter, bson_tg, "current_version") == BSON_INT ) {
			current_version = bson_iterator_int(&tg_data_iter);
		}

		/* Try to get custom type of tag group */
		if( bson_find(&tg_data_iter, bson_tg, "custom_type") == BSON_INT ) {
			custom_type = bson_iterator_int(&tg_data_iter);
		}

		/* Create tag group with specific ID */
		if((int)current_version != -1 && (int)custom_type != -1) {
			tg = vs_taggroup_create(node, taggroup_id, custom_type);

			if(tg != NULL) {

				tg->state = ENTITY_CREATED;

				/* Save ObjectID to tag group */
				memcpy(&tg->oid, oid, sizeof(bson_oid_t));

				/* Try to get versions of tag group */
				if( bson_find(&tg_data_iter, bson_tg, "versions") == BSON_OBJECT ) {
					bson bson_versions;
					bson_iterator version_iter;
					char str_num[15];

					/* Initialize sub-object of versions */
					bson_iterator_subobject_init(&tg_data_iter, &bson_versions, 0);

					sprintf(str_num, "%u", req_version);

					/* Try to find required version of tag group */
					if( bson_find(&version_iter, &bson_versions, str_num) == BSON_OBJECT ) {
						bson bson_version;

						/* Set version of tag group */
						tg->version = tg->saved_version = current_version;

						bson_iterator_subobject_init(&version_iter, &bson_version, 0);

						/* Try to load tags */
						vs_mongo_taggroup_load_data(tg, &bson_version);
					}
				}
			}
		}
	}

	bson_destroy(&query);
	mongo_cursor_destroy(&cursor);

	return tg;
}
Exemplo n.º 27
0
static ngx_int_t ngx_http_gridfs_handler(ngx_http_request_t* request) {
    ngx_http_gridfs_loc_conf_t* gridfs_conf;
    ngx_http_core_loc_conf_t* core_conf;
    ngx_buf_t* buffer;
    ngx_chain_t out;
    ngx_str_t location_name;
    ngx_str_t full_uri;
    char* value;
    ngx_http_mongo_connection_t *mongo_conn;
    gridfs gfs;
    gridfile gfile;
    gridfs_offset length;
    ngx_uint_t numchunks;
    char* contenttype;
    char* md5;
    bson_date_t last_modified;

    volatile ngx_uint_t i;
    ngx_int_t rc = NGX_OK;
    bson query;
    bson_oid_t oid;
    mongo_cursor ** cursors;
    gridfs_offset chunk_len;
    const char * chunk_data;
    bson_iterator it;
    bson chunk;
    ngx_pool_cleanup_t* gridfs_cln;
    ngx_http_gridfs_cleanup_t* gridfs_clndata;
    int status;
    volatile ngx_uint_t e = FALSE;
    volatile ngx_uint_t ecounter = 0;
    uint64_t range_start = 0;
    uint64_t range_end   = 0;
    uint64_t current_buf_pos = 0;

    gridfs_conf = ngx_http_get_module_loc_conf(request, ngx_http_gridfs_module);
    core_conf = ngx_http_get_module_loc_conf(request, ngx_http_core_module);

    // ---------- ENSURE MONGO CONNECTION ---------- //

    mongo_conn = ngx_http_get_mongo_connection( gridfs_conf->mongo );
    if (mongo_conn == NULL) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Mongo Connection not found: \"%V\"", &gridfs_conf->mongo);
        return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }

    if (mongo_conn->conn.connected == 0) {
        if (ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR) {
            ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                          "Could not connect to mongo: \"%V\"", &gridfs_conf->mongo);
            if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); }
            return NGX_HTTP_SERVICE_UNAVAILABLE;
        }
        if (ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) {
            ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                          "Failed to reauth to mongo: \"%V\"", &gridfs_conf->mongo);
            if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); }
            return NGX_HTTP_SERVICE_UNAVAILABLE;
        }
    }

    // ---------- RETRIEVE KEY ---------- //

    location_name = core_conf->name;
    full_uri = request->uri;

    if (full_uri.len < location_name.len) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Invalid location name or uri.");
        return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }

    value = (char*)malloc(sizeof(char) * (full_uri.len - location_name.len + 1));
    if (value == NULL) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Failed to allocate memory for value buffer.");
        return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }
    memcpy(value, full_uri.data + location_name.len, full_uri.len - location_name.len);
    value[full_uri.len - location_name.len] = '\0';

    if (!url_decode(value)) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                      "Malformed request.");
        free(value);
        return NGX_HTTP_BAD_REQUEST;
    }

    // ---------- RETRIEVE GRIDFILE ---------- //

    bson_init(&query);
    switch (gridfs_conf->type) {
    case  BSON_OID:
        bson_oid_from_string(&oid, value);
        bson_append_oid(&query, (char*)gridfs_conf->field.data, &oid);
        break;
    case BSON_INT:
      bson_append_int(&query, (char*)gridfs_conf->field.data, ngx_atoi((u_char*)value, strlen(value)));
        break;
    case BSON_STRING:
        bson_append_string(&query, (char*)gridfs_conf->field.data, value);
        break;
    }
    bson_finish(&query);

    do {
        e = FALSE;
        if (gridfs_init(&mongo_conn->conn,
                        (const char*)gridfs_conf->db.data,
                        (const char*)gridfs_conf->root_collection.data,
                        &gfs) != MONGO_OK
            || (status = gridfs_find_query(&gfs, &query, &gfile) == MONGO_ERROR)) {
            e = TRUE; ecounter++;
            if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST
                || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR
                || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) {
                ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                              "Mongo connection dropped, could not reconnect");
                if(&mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); }
                bson_destroy(&query);
                free(value);
                return NGX_HTTP_SERVICE_UNAVAILABLE;
            }
        }
    } while (e);

    bson_destroy(&query);
    free(value);

    /* Get information about the file */
    length = gridfile_get_contentlength(&gfile);
    numchunks = gridfile_get_numchunks(&gfile);

    // NaN workaround
    if (numchunks > INT_MAX)
    {
        gridfile_destroy(&gfile);
        gridfs_destroy(&gfs);
        return NGX_HTTP_NOT_FOUND;
    }

    contenttype = (char*)gridfile_get_contenttype(&gfile);

    md5 = (char*)gridfile_get_md5(&gfile);
    last_modified = gridfile_get_uploaddate(&gfile);

    // ---------- Partial Range
    // set follow-fork-mode child
    // attach (pid)
    // break ngx_http_gridfs_module.c:959

    if (request->headers_in.range) {
        gridfs_parse_range(request, &request->headers_in.range->value, &range_start, &range_end, length);
    }

    // ---------- SEND THE HEADERS ---------- //

    if (range_start == 0 && range_end == 0) {
        request->headers_out.status = NGX_HTTP_OK;
        request->headers_out.content_length_n = length;
    } else {
        request->headers_out.status = NGX_HTTP_PARTIAL_CONTENT;
        request->headers_out.content_length_n = length;
        //request->headers_out.content_range = range_end - range_start + 1;

        ngx_table_elt_t   *content_range;

        content_range = ngx_list_push(&request->headers_out.headers);
        if (content_range == NULL) {
            return NGX_ERROR;
        }

        request->headers_out.content_range = content_range;

        content_range->hash = 1;
        ngx_str_set(&content_range->key, "Content-Range");

        content_range->value.data = ngx_pnalloc(request->pool,sizeof("bytes -/") - 1 + 3 * NGX_OFF_T_LEN);
        if (content_range->value.data == NULL) {
            return NGX_ERROR;
        }

        /* "Content-Range: bytes SSSS-EEEE/TTTT" header */
        content_range->value.len = ngx_sprintf(content_range->value.data,
                                               "bytes %O-%O/%O",
                                               range_start, range_end,
                                               request->headers_out.content_length_n)
            - content_range->value.data;

        request->headers_out.content_length_n = range_end - range_start + 1;
    }
    if (contenttype != NULL) {
        request->headers_out.content_type.len = strlen(contenttype);
        request->headers_out.content_type.data = (u_char*)contenttype;
    }
    else ngx_http_set_content_type(request);

    // use md5 field as ETag if possible
    if (md5 != NULL) {
        request->headers_out.etag = ngx_list_push(&request->headers_out.headers);
        request->headers_out.etag->hash = 1;
        request->headers_out.etag->key.len = sizeof("ETag") - 1;
        request->headers_out.etag->key.data = (u_char*)"ETag";

        ngx_buf_t *b;
        b = ngx_create_temp_buf(request->pool, strlen(md5) + 2);
        b->last = ngx_sprintf(b->last, "\"%s\"", md5);
        request->headers_out.etag->value.len = strlen(md5) + 2;
        request->headers_out.etag->value.data = b->start;
    }

    // use uploadDate field as last_modified if possible
    if (last_modified) {
        request->headers_out.last_modified_time = (time_t)(last_modified/1000);
    }

    /* Determine if content is gzipped, set headers accordingly */
    if ( gridfile_get_boolean(&gfile,"gzipped") ) {
        ngx_log_error(NGX_LOG_ERR, request->connection->log, 0, gridfile_get_field(&gfile,"gzipped") );
        request->headers_out.content_encoding = ngx_list_push(&request->headers_out.headers);
        if (request->headers_out.content_encoding == NULL) {
            gridfile_destroy(&gfile);
            gridfs_destroy(&gfs);
            return NGX_ERROR;
        }
        request->headers_out.content_encoding->hash = 1;
        request->headers_out.content_encoding->key.len = sizeof("Content-Encoding") - 1;
        request->headers_out.content_encoding->key.data = (u_char *) "Content-Encoding";
        request->headers_out.content_encoding->value.len = sizeof("gzip") - 1;
        request->headers_out.content_encoding->value.data = (u_char *) "gzip";
    }

    ngx_http_send_header(request);

    // ---------- SEND THE BODY ---------- //

    /* Empty file */
    if (numchunks == 0) {
        /* Allocate space for the response buffer */
        buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t));
        if (buffer == NULL) {
            ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                          "Failed to allocate response buffer");
            gridfile_destroy(&gfile);
            gridfs_destroy(&gfs);
            return NGX_HTTP_INTERNAL_SERVER_ERROR;
        }

        buffer->pos = NULL;
        buffer->last = NULL;
        buffer->memory = 1;
        buffer->last_buf = 1;
        out.buf = buffer;
        out.next = NULL;

        gridfile_destroy(&gfile);
        gridfs_destroy(&gfs);

        return ngx_http_output_filter(request, &out);
    }

    cursors = (mongo_cursor **)ngx_pcalloc(request->pool, sizeof(mongo_cursor *) * numchunks);
    if (cursors == NULL) {
      gridfile_destroy(&gfile);
      gridfs_destroy(&gfs);
      return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }

    ngx_memzero( cursors, sizeof(mongo_cursor *) * numchunks);

    /* Hook in the cleanup function */
    gridfs_cln = ngx_pool_cleanup_add(request->pool, sizeof(ngx_http_gridfs_cleanup_t));
    if (gridfs_cln == NULL) {
      gridfile_destroy(&gfile);
      gridfs_destroy(&gfs);
      return NGX_HTTP_INTERNAL_SERVER_ERROR;
    }
    gridfs_cln->handler = ngx_http_gridfs_cleanup;
    gridfs_clndata = gridfs_cln->data;
    gridfs_clndata->cursors = cursors;
    gridfs_clndata->numchunks = numchunks;

    /* Read and serve chunk by chunk */
    for (i = 0; i < numchunks; i++) {

        /* Allocate space for the response buffer */
        buffer = ngx_pcalloc(request->pool, sizeof(ngx_buf_t));
        if (buffer == NULL) {
            ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                          "Failed to allocate response buffer");
            gridfile_destroy(&gfile);
            gridfs_destroy(&gfs);
            return NGX_HTTP_INTERNAL_SERVER_ERROR;
        }

        /* Fetch the chunk from mongo */
        do {
            e = FALSE;
            cursors[i] = gridfile_get_chunks(&gfile, i, 1);
            if (!(cursors[i] && mongo_cursor_next(cursors[i]) == MONGO_OK)) {
                e = TRUE; ecounter++;
                if (ecounter > MONGO_MAX_RETRIES_PER_REQUEST
                    || ngx_http_mongo_reconnect(request->connection->log, mongo_conn) == NGX_ERROR
                    || ngx_http_mongo_reauth(request->connection->log, mongo_conn) == NGX_ERROR) {
                    ngx_log_error(NGX_LOG_ERR, request->connection->log, 0,
                                  "Mongo connection dropped, could not reconnect");
                    if(mongo_conn->conn.connected) { mongo_disconnect(&mongo_conn->conn); }
                    gridfile_destroy(&gfile);
                    gridfs_destroy(&gfs);
                    return NGX_HTTP_SERVICE_UNAVAILABLE;
                }
            }
        } while (e);

        chunk = cursors[i]->current;
        bson_find(&it, &chunk, "data");
        chunk_len = bson_iterator_bin_len( &it ); // break ngx_http_gridfs_module.c:1099
        chunk_data = bson_iterator_bin_data( &it );

        if (range_start == 0 && range_end == 0) {
            /* <<no range request>> */
            /* Set up the buffer chain */
            buffer->pos = (u_char*)chunk_data;
            buffer->last = (u_char*)chunk_data + chunk_len;
            buffer->memory = 1;
            buffer->last_buf = (i == numchunks-1);
            out.buf = buffer;
            out.next = NULL;

            /* Serve the Chunk */
            rc = ngx_http_output_filter(request, &out);
        } else {
            /* <<range request>> */
            if ( range_start >= (current_buf_pos+chunk_len) ||
                 range_end <= current_buf_pos) {
                /* no output */
                ngx_pfree(request->pool, buffer);
            } else {
                if (range_start <= current_buf_pos) {
                    buffer->pos = (u_char*)chunk_data;
                } else {
                    buffer->pos = (u_char*)chunk_data + (range_start - current_buf_pos);
                }
                if (range_end < (current_buf_pos+chunk_len)) {
                    buffer->last = (u_char*)chunk_data + (range_end - current_buf_pos + 1);
                } else {
                    buffer->last = (u_char*)chunk_data + chunk_len;
                }
                if (buffer->pos == buffer->last) {
                    ngx_log_error(NGX_LOG_ALERT, request->connection->log, 0,
                                  "zero size buf in writer "
                                  "range_start:%d range_end:%d "
                                  "current_buf_pos:%d chunk_len:%d i:%d numchunk:%d",
                                  range_start,range_end,
                                  current_buf_pos, chunk_len,
                                  i,numchunks);
                }
                buffer->memory = 1;
                buffer->last_buf = (i == numchunks-1) || (range_end < (current_buf_pos+chunk_len));
                out.buf = buffer;
                out.next = NULL;

                /* Serve the Chunk */
                rc = ngx_http_output_filter(request, &out);
            }
        }

        current_buf_pos += chunk_len;

        /* TODO: More Codes to Catch? */
        if (rc == NGX_ERROR) {
            gridfile_destroy(&gfile);
            gridfs_destroy(&gfs);
            return NGX_ERROR;
        }
    }

    gridfile_destroy(&gfile);
    gridfs_destroy(&gfs);

    return rc;
}
Exemplo n.º 28
0
/**
 * \brief This function save current version o tag group to MongoDB
 */
static void vs_mongo_taggroup_save_version(struct VSTagGroup *tg,
		bson *bson_tg,
		uint32 version)
{
	bson bson_version;
	bson bson_tag;
	struct VBucket *bucket;
	struct VSTag *tag;
	char str_num[15];
	int item_id;

	bson_init(&bson_version);

	bson_append_int(&bson_version, "crc32", tg->crc32);

	bson_append_start_object(&bson_version, "tags");

	bucket = tg->tags.lb.first;
	while(bucket != NULL) {
		tag = (struct VSTag*)bucket->data;

		bson_init(&bson_tag);
		bson_append_int(&bson_tag, "data_type", tag->data_type);
		bson_append_int(&bson_tag, "count", tag->count);
		bson_append_int(&bson_tag, "custom_type", tag->custom_type);

		bson_append_start_array(&bson_tag, "data");
		switch(tag->data_type) {
		case VRS_VALUE_TYPE_UINT8:
			for(item_id = 0; item_id < tag->count; item_id++) {
				sprintf(str_num, "%d", item_id);
				bson_append_int(&bson_tag, str_num, ((uint8*)tag->value)[item_id]);
			}
			break;
		case VRS_VALUE_TYPE_UINT16:
			for(item_id = 0; item_id < tag->count; item_id++) {
				sprintf(str_num, "%d", item_id);
				bson_append_int(&bson_tag, str_num, ((uint16*)tag->value)[item_id]);
			}
			break;
		case VRS_VALUE_TYPE_UINT32:
			for(item_id = 0; item_id < tag->count; item_id++) {
				sprintf(str_num, "%d", item_id);
				bson_append_int(&bson_tag, str_num, ((uint32*)tag->value)[item_id]);
			}
			break;
		case VRS_VALUE_TYPE_UINT64:
			for(item_id = 0; item_id < tag->count; item_id++) {
				sprintf(str_num, "%d", item_id);
				bson_append_long(&bson_tag, str_num, ((uint64*)tag->value)[item_id]);
			}
			break;
		case VRS_VALUE_TYPE_REAL16:
			/* TODO */
			break;
		case VRS_VALUE_TYPE_REAL32:
			for(item_id = 0; item_id < tag->count; item_id++) {
				sprintf(str_num, "%d", item_id);
				bson_append_double(&bson_tag, str_num, ((float*)tag->value)[item_id]);
			}
			break;
		case VRS_VALUE_TYPE_REAL64:
			for(item_id = 0; item_id < tag->count; item_id++) {
				sprintf(str_num, "%d", item_id);
				bson_append_double(&bson_tag, str_num, ((double*)tag->value)[item_id]);
			}
			break;
		case VRS_VALUE_TYPE_STRING8:
			bson_append_string(&bson_tag, "0", (char*)tag->value);
			break;
		}
		bson_append_finish_array(&bson_tag);

		bson_finish(&bson_tag);

		sprintf(str_num, "%d", tag->id);
		bson_append_bson(&bson_version, str_num, &bson_tag);

		bucket = bucket->next;
	}

	bson_append_finish_object(&bson_version);

	bson_finish(&bson_version);

	sprintf(str_num, "%u", version);
	bson_append_bson(bson_tg, str_num, &bson_version);
}
static void
_mongoc_write_command(mongoc_write_command_t       *command,
                      mongoc_client_t              *client,
                      mongoc_server_stream_t       *server_stream,
                      const char                   *database,
                      const char                   *collection,
                      const mongoc_write_concern_t *write_concern,
                      uint32_t                      offset,
                      mongoc_write_result_t        *result,
                      bson_error_t                 *error)
{
   const uint8_t *data;
   bson_iter_t iter;
   const char *key;
   uint32_t len = 0;
   bson_t tmp;
   bson_t ar;
   bson_t cmd;
   bson_t reply;
   char str [16];
   bool has_more;
   bool ret = false;
   uint32_t i;
   int32_t max_bson_obj_size;
   int32_t max_write_batch_size;
   int32_t min_wire_version;
   uint32_t key_len;

   ENTRY;

   BSON_ASSERT (command);
   BSON_ASSERT (client);
   BSON_ASSERT (database);
   BSON_ASSERT (server_stream);
   BSON_ASSERT (collection);

   max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
   max_write_batch_size = mongoc_server_stream_max_write_batch_size (server_stream);

   /*
    * If we have an unacknowledged write and the server supports the legacy
    * opcodes, then submit the legacy opcode so we don't need to wait for
    * a response from the server.
    */

   min_wire_version = server_stream->sd->min_wire_version;
   if ((min_wire_version == 0) &&
       !_mongoc_write_concern_needs_gle (write_concern)) {
      if (command->flags.bypass_document_validation != MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
         bson_set_error (error,
                         MONGOC_ERROR_COMMAND,
                         MONGOC_ERROR_COMMAND_INVALID_ARG,
                         "Cannot set bypassDocumentValidation for unacknowledged writes");
         EXIT;
      }
      gLegacyWriteOps[command->type] (command, client, server_stream, database,
                                      collection, write_concern, offset,
                                      result, error);
      EXIT;
   }

   if (!command->n_documents ||
       !bson_iter_init (&iter, command->documents) ||
       !bson_iter_next (&iter)) {
      _empty_error (command, error);
      result->failed = true;
      EXIT;
   }

again:
   bson_init (&cmd);
   has_more = false;
   i = 0;

   BSON_APPEND_UTF8 (&cmd, gCommandNames[command->type], collection);
   BSON_APPEND_DOCUMENT (&cmd, "writeConcern",
                         WRITE_CONCERN_DOC (write_concern));
   BSON_APPEND_BOOL (&cmd, "ordered", command->flags.ordered);
   if (command->flags.bypass_document_validation != MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
      BSON_APPEND_BOOL (&cmd, "bypassDocumentValidation",
                        !!command->flags.bypass_document_validation);
   }

   if (!_mongoc_write_command_will_overflow (0,
                                             command->documents->len,
                                             command->n_documents,
                                             max_bson_obj_size,
                                             max_write_batch_size)) {
      /* copy the whole documents buffer as e.g. "updates": [...] */
      BSON_APPEND_ARRAY (&cmd,
                         gCommandFields[command->type],
                         command->documents);
      i = command->n_documents;
   } else {
      bson_append_array_begin (&cmd, gCommandFields[command->type], -1, &ar);

      do {
         if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) {
            BSON_ASSERT (false);
         }

         bson_iter_document (&iter, &len, &data);
         key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str);

         if (_mongoc_write_command_will_overflow (ar.len,
                                                  key_len + len + 2,
                                                  i,
                                                  max_bson_obj_size,
                                                  max_write_batch_size)) {
            has_more = true;
            break;
         }

         if (!bson_init_static (&tmp, data, len)) {
            BSON_ASSERT (false);
         }

         BSON_APPEND_DOCUMENT (&ar, key, &tmp);

         bson_destroy (&tmp);

         i++;
      } while (bson_iter_next (&iter));

      bson_append_array_end (&cmd, &ar);
   }

   if (!i) {
      too_large_error (error, i, len, max_bson_obj_size, NULL);
      result->failed = true;
      ret = false;
   } else {
      ret = mongoc_cluster_run_command (&client->cluster, server_stream->stream,
                                        MONGOC_QUERY_NONE, database, &cmd,
                                        &reply, error);

      if (!ret) {
         result->failed = true;
      }

      _mongoc_write_result_merge (result, command, &reply, offset);
      offset += i;
      bson_destroy (&reply);
   }

   bson_destroy (&cmd);

   if (has_more && (ret || !command->flags.ordered)) {
      GOTO (again);
   }

   EXIT;
}
Exemplo n.º 30
0
bson * bson_empty(bson * obj){
    static char * data = "\005\0\0\0\0";
    bson_init(obj, data, 0);
    return obj;
}