Ejemplo n.º 1
0
/*{{{ create / destroy */
CacheMng *cache_mng_create (Application *app)
{
    CacheMng *cmng;

    cmng = g_new0 (CacheMng, 1);
    cmng->app = app;
    cmng->h_entries = g_hash_table_new_full (g_direct_hash, g_direct_equal, NULL, cache_entry_destroy);
    cmng->q_lru = g_queue_new ();
    cmng->size = 0;
    cmng->check_time = time (NULL);
    cmng->max_size = conf_get_uint (application_get_conf (cmng->app), "filesystem.cache_dir_max_size");
    cmng->cache_dir = g_strdup_printf ("%s/%s", 
        conf_get_string (application_get_conf (cmng->app), "filesystem.cache_dir"), CACHE_MNGR_DIR);
    cmng->cache_hits = 0;
    cmng->cache_miss = 0;

    cache_mng_rm_cache_dir (cmng);
    if (g_mkdir_with_parents (cmng->cache_dir, 0700) != 0) {
        LOG_err (CMNG_LOG, "Failed to remove directory: %s", cmng->cache_dir);
        cache_mng_destroy (cmng);
        return NULL;
    }

    return cmng;
}
Ejemplo n.º 2
0
void stats_srv_add_op_history (StatSrv *stat_srv, const gchar *str)
{
    // stats server is disabled
    if (!conf_get_boolean (application_get_conf (stat_srv->app), "statistics.enabled"))
        return;

    // maintain queue size
    while (g_queue_get_length (stat_srv->q_op_history) + 1 >= conf_get_uint (application_get_conf (stat_srv->app), "statistics.history_size")) {
        gchar *tmp = g_queue_pop_tail (stat_srv->q_op_history);
        g_free (tmp);
    }

    g_queue_push_head (stat_srv->q_op_history, g_strdup (str));
}
Ejemplo n.º 3
0
// got HttpConnection object
static void fileio_write_on_multipart_init_con_cb (gpointer client, gpointer ctx)
{
    HttpConnection *con = (HttpConnection *) client;
    FileWriteData *wdata = (FileWriteData *) ctx;
    gboolean res;
    gchar *path;

    http_connection_acquire (con);

    path = g_strdup_printf ("%s?uploads", wdata->fop->fname);

    // send storage class with the init request
    http_connection_add_output_header (con, "x-amz-storage-class", conf_get_string (application_get_conf (con->app), "s3.storage_type"));

    res = http_connection_make_request (con,
        path, "POST", NULL, TRUE, NULL,
        fileio_write_on_multipart_init_cb,
        wdata
    );
    g_free (path);

    if (!res) {
        LOG_err (FIO_LOG, INO_CON_H"Failed to create HTTP request !", INO_T (wdata->ino), con);
        http_connection_release (con);
        wdata->on_buffer_written_cb (wdata->fop, wdata->ctx, FALSE, 0);
        g_free (wdata);
        return;
    }
}
Ejemplo n.º 4
0
static void fileio_simple_download_on_con_cb (gpointer client, gpointer ctx)
{
    HttpConnection *con = (HttpConnection *) client;
    FileIOSimpleDownload *fsim = (FileIOSimpleDownload *) ctx;
    gboolean res;

    LOG_debug (FIO_LOG, CON_H"Downloading data.", con);

    http_connection_acquire (con);

    http_connection_add_output_header (con, "x-amz-storage-class", conf_get_string (application_get_conf (con->app), "s3.storage_type"));

    res = http_connection_make_request (con,
        fsim->fname, "GET", NULL, TRUE, NULL,
        fileio_simple_download_on_sent_cb,
        fsim
    );

    if (!res) {
        LOG_err (FIO_LOG, CON_H"Failed to create HTTP request !", con);
        http_connection_release (con);
        fsim->on_download_cb (fsim->ctx, FALSE, NULL, 0);
        fileio_simple_download_destroy (fsim);
        return;
    }
}
Ejemplo n.º 5
0
// got HttpConnection object
static void fileio_release_on_update_headers_con_cb (gpointer client, gpointer ctx)
{
    HttpConnection *con = (HttpConnection *) client;
    FileIO *fop = (FileIO *) ctx;
    gchar *path;
    gchar *cpy_path;
    gboolean res;
    unsigned char digest[16];
    gchar *md5str;
    size_t i;

    LOG_debug (FIO_LOG, INO_CON_H"Updating object's headers..", INO_T (fop->ino), con);

    http_connection_acquire (con);

    if (fop->content_type)
        http_connection_add_output_header (con, "Content-Type", fop->content_type);
    http_connection_add_output_header (con, "x-amz-metadata-directive", "REPLACE");
    http_connection_add_output_header (con, "x-amz-storage-class", conf_get_string (application_get_conf (con->app), "s3.storage_type"));

    MD5_Final (digest, &fop->md5);
    md5str = g_malloc (33);
    for (i = 0; i < 16; ++i)
        sprintf(&md5str[i*2], "%02x", (unsigned int)digest[i]);
    http_connection_add_output_header (con, "x-amz-meta-md5", md5str);
    g_free (md5str);

    cpy_path = g_strdup_printf ("%s%s", conf_get_string (application_get_conf (fop->app), "s3.bucket_name"), fop->fname);
    http_connection_add_output_header (con, "x-amz-copy-source", cpy_path);
    g_free (cpy_path);

    path = g_strdup_printf ("%s", fop->fname);
    res = http_connection_make_request (con,
        path, "PUT", NULL, TRUE, NULL,
        fileio_release_on_update_header_cb,
        fop
    );
    g_free (path);

    if (!res) {
        LOG_err (FIO_LOG, INO_CON_H"Failed to create HTTP request !", INO_T (fop->ino), con);
        http_connection_release (con);
        fileio_destroy (fop);
        return;
    }
}
Ejemplo n.º 6
0
void fileio_write_buffer (FileIO *fop,
    const char *buf, size_t buf_size, off_t off, fuse_ino_t ino,
    FileIO_on_buffer_written_cb on_buffer_written_cb, gpointer ctx)
{
    FileWriteData *wdata;

    // XXX: allow only sequentially write
    // current written bytes should be always match offset
    if (off >= 0 && fop->current_size != (guint64)off) {
        LOG_err (FIO_LOG, INO_H"Write call with offset %"OFF_FMT" is not allowed !", INO_T (ino), off);
        on_buffer_written_cb (fop, ctx, FALSE, 0);
        return;
    }

    // add data to output buffer
    evbuffer_add (fop->write_buf, buf, buf_size);
    fop->current_size += buf_size;

    LOG_debug (FIO_LOG, INO_H"Write buf size: %zd", INO_T (ino), evbuffer_get_length (fop->write_buf));

    // CacheMng
    cache_mng_store_file_buf (application_get_cache_mng (fop->app),
        ino, buf_size, off, (unsigned char *) buf,
        NULL, NULL);

    // if current write buffer exceeds "part_size" - this is a multipart upload
    if (evbuffer_get_length (fop->write_buf) >= conf_get_uint (application_get_conf (fop->app), "s3.part_size")) {
        // init helper struct
        wdata = g_new0 (FileWriteData, 1);
        wdata->fop = fop;
        wdata->buf_size = buf_size;
        wdata->off = off;
        wdata->ino = ino;
        wdata->on_buffer_written_cb = on_buffer_written_cb;
        wdata->ctx = ctx;

        // init multipart upload
        if (!fop->multipart_initiated) {
            fileio_write_init_multipart (wdata);

        // else send the current part
        } else {
            fileio_write_send_part (wdata);
        }

    // or just notify client that we are ready for more data
    } else {
        on_buffer_written_cb (fop, ctx, TRUE, buf_size);
    }
}
Ejemplo n.º 7
0
// create S3HttpConnection object
// establish HTTP connections to S3
gpointer s3http_connection_create (Application *app)
{
    S3HttpConnection *con;
    int port;
    AppConf *conf;

    con = g_new0 (S3HttpConnection, 1);
    if (!con) {
        LOG_err (CON_LOG, "Failed to create S3HttpConnection !");
        return NULL;
    }
    
    conf = application_get_conf (app);
    con->app = app;
    con->bucket_name = g_strdup (application_get_bucket_name (app));

    con->is_acquired = FALSE;

    port = application_get_port (app);
    // if no port is specified, libevent returns -1
    if (port == -1) {
        port = conf->http_port;
    }

    LOG_debug (CON_LOG, "Connecting to %s:%d", 
        application_get_host (app),
        port
    );

    // XXX: implement SSL
    con->evcon = evhttp_connection_base_new (
        application_get_evbase (app),
        application_get_dnsbase (app),
        application_get_host (app),
        port
    );

    if (!con->evcon) {
        LOG_err (CON_LOG, "Failed to create evhttp_connection !");
        return NULL;
    }
    
    evhttp_connection_set_timeout (con->evcon, conf->timeout);
    evhttp_connection_set_retries (con->evcon, conf->retries);

    evhttp_connection_set_closecb (con->evcon, s3http_connection_on_close, con);

    return (gpointer)con;
}
Ejemplo n.º 8
0
static void fileio_release_update_headers (FileIO *fop)
{
    // update MD5 headers only if versioning is disabled
    if (conf_get_boolean (application_get_conf (fop->app), "s3.versioning")) {
        LOG_debug (FIO_LOG, INO_H"File uploaded !", INO_T (fop->ino));
        fileio_destroy (fop);
    } else {
        if (!client_pool_get_client (application_get_write_client_pool (fop->app),
            fileio_release_on_update_headers_con_cb, fop)) {
            LOG_err (FIO_LOG, INO_H"Failed to get HTTP client !", INO_T (fop->ino));
            fileio_destroy (fop);
            return;
        }
    }
}
Ejemplo n.º 9
0
// prepare HTTP request
static void dir_tree_file_read_prepare_request (DirTreeFileOpData *op_data, S3HttpClient *http, off_t off, size_t size)
{
    gchar *auth_str;
    char time_str[100];
    time_t t = time (NULL);
    gchar auth_key[300];
    gchar *url;
    gchar range[300];
    AppConf *conf;

    s3http_client_request_reset (http);

    s3http_client_set_cb_ctx (http, op_data);
//    s3http_client_set_on_chunk_cb (http, dir_tree_file_read_on_chunk_cb);
    s3http_client_set_on_last_chunk_cb (http, dir_tree_file_read_on_last_chunk_cb);
    s3http_client_set_output_length (http, 0);
    
    strftime (time_str, sizeof (time_str), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&t));

    auth_str = (gchar *)s3http_connection_get_auth_string (op_data->dtree->app, "GET", "", op_data->en->fullpath, time_str);
    snprintf (auth_key, sizeof (auth_key), "AWS %s:%s", application_get_access_key_id (op_data->dtree->app), auth_str);
    g_free (auth_str);
    snprintf (range, sizeof (range), "bytes=%"OFF_FMT"-%"OFF_FMT, off, off+size - 1);
    LOG_debug (DIR_TREE_LOG, "range: %s", range);

    s3http_client_add_output_header (http, "Authorization", auth_key);
    s3http_client_add_output_header (http, "Date", time_str);
    s3http_client_add_output_header (http, "Range", range);
    s3http_client_add_output_header (http, "Host", application_get_host_header (op_data->dtree->app));

    // XXX: HTTPS
    conf = application_get_conf (op_data->dtree->app);
    if (conf->path_style) {
        url = g_strdup_printf ("http://%s:%d/%s%s", application_get_host (op_data->dtree->app),
                                                    application_get_port (op_data->dtree->app),
                                                    application_get_bucket_name (op_data->dtree->app),
                                                    op_data->en->fullpath);
    } else {
        url = g_strdup_printf ("http://%s%d%s", application_get_host (op_data->dtree->app),
                                                application_get_port (op_data->dtree->app),
                                                op_data->en->fullpath);
    }
    
    s3http_client_start_request (http, S3Method_get, url);

    g_free (auth_str);
    g_free (url);
}
Ejemplo n.º 10
0
// connect to the remote server
static void s3http_client_connect (S3HttpClient *http)
{
    int port;
    AppConf *conf;
    
    if (http->connection_state == S3C_connecting)
        return;

    conf = application_get_conf (http->app);

    if (http->bev)
        bufferevent_free (http->bev);

    http->bev = bufferevent_socket_new (http->evbase, -1, 0);
    if (!http->bev) {
        LOG_err (HTTP_LOG, "Failed to create HTTP object!");
    }
    // XXX: 
    // bufferevent_set_timeouts (http->bev, 


    port = evhttp_uri_get_port (http->http_uri);
    // if no port is specified, libevent returns -1
    if (port == -1) {
        port = conf->http_port;
    }
    
    LOG_debug (HTTP_LOG, "Connecting to %s:%d .. %p",
        evhttp_uri_get_host (http->http_uri),
        port, http
    );

    http->connection_state = S3C_connecting;
    
    bufferevent_enable (http->bev, EV_WRITE);
    bufferevent_setcb (http->bev, 
        NULL, NULL, s3http_client_connection_event_cb,
        http
    );

    bufferevent_socket_connect_hostname (http->bev, http->dns_base, 
        AF_UNSPEC,
        evhttp_uri_get_host (http->http_uri),
        port
    );
}
Ejemplo n.º 11
0
// got HttpConnection object
static void fileio_read_on_con_cb (gpointer client, gpointer ctx)
{
    HttpConnection *con = (HttpConnection *) client;
    FileReadData *rdata = (FileReadData *) ctx;
    gboolean res;
    guint64 part_size;

    http_connection_acquire (con);

    part_size = conf_get_uint (application_get_conf (rdata->fop->app), "s3.part_size");

    // small file - get the whole file at once
    if (rdata->fop->file_size < part_size)
        rdata->request_offset = 0;

    // calculate offset
    else {
        gchar *range_hdr;

        if (part_size < rdata->size)
            part_size = rdata->size;

        rdata->request_offset = rdata->off;
        range_hdr = g_strdup_printf ("bytes=%"G_GUINT64_FORMAT"-%"G_GUINT64_FORMAT,
            (gint64)rdata->request_offset, (gint64)(rdata->request_offset + part_size));
        http_connection_add_output_header (con, "Range", range_hdr);
        g_free (range_hdr);
    }

    res = http_connection_make_request (con,
        rdata->fop->fname, "GET", NULL, TRUE, NULL,
        fileio_read_on_get_cb,
        rdata
    );

    if (!res) {
        LOG_err (FIO_LOG, INO_CON_H"Failed to create HTTP request !", INO_T (rdata->ino), con);
        http_connection_release (con);
        rdata->on_buffer_read_cb (rdata->ctx, FALSE, NULL, 0);
        g_free (rdata);
        return;
    }
}
Ejemplo n.º 12
0
DirTree *dir_tree_create (Application *app)
{
    DirTree *dtree;
    AppConf *conf;

    conf = application_get_conf (app);
    dtree = g_new0 (DirTree, 1);
    dtree->app = app;
    // children entries are destroyed by parent directory entries
    dtree->h_inodes = g_hash_table_new (g_direct_hash, g_direct_equal);
    dtree->max_ino = FUSE_ROOT_ID;
    dtree->current_age = 0;
    dtree->dir_cache_max_time = conf->dir_cache_max_time; //XXX
    dtree->current_write_ops = 0;

    dtree->root = dir_tree_add_entry (dtree, "/", DIR_DEFAULT_MODE, DET_dir, 0, 0, time (NULL));

    LOG_debug (DIR_TREE_LOG, "DirTree created");

    return dtree;
}
Ejemplo n.º 13
0
static void fileio_simple_upload_on_con_cb (gpointer client, gpointer ctx)
{
    HttpConnection *con = (HttpConnection *) client;
    FileIOSimpleUpload *fsim = (FileIOSimpleUpload *) ctx;
    time_t t;
    gchar str[10];
    char time_str[50];
    gboolean res;

    LOG_debug (FIO_LOG, CON_H"Uploading data. Size: %zu", con, evbuffer_get_length (fsim->write_buf));

    http_connection_acquire (con);

    snprintf (str, sizeof (str), "%d", fsim->mode);

    http_connection_add_output_header (con, "x-amz-storage-class", conf_get_string (application_get_conf (con->app), "s3.storage_type"));
    http_connection_add_output_header (con, "x-amz-meta-mode", str);

    t = time (NULL);
    if (strftime (time_str, sizeof (time_str), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&t))) {
        http_connection_add_output_header (con, "x-amz-meta-date", time_str);
    }

    res = http_connection_make_request (con,
        fsim->fname, "PUT", fsim->write_buf, TRUE, NULL,
        fileio_simple_upload_on_sent_cb,
        fsim
    );

    if (!res) {
        LOG_err (FIO_LOG, CON_H"Failed to create HTTP request !", con);
        http_connection_release (con);
        fsim->on_upload_cb (fsim->ctx, FALSE);
        fileio_simple_upload_destroy (fsim);
        return;
    }
}
Ejemplo n.º 14
0
// creates connection pool object
// create client_count clients
// return NULL if error
S3ClientPool *s3client_pool_create (Application *app, 
    gint client_count,
    S3ClientPool_client_create client_create, 
    S3ClientPool_client_destroy client_destroy, 
    S3ClientPool_client_set_on_released_cb client_set_on_released_cb,
    S3ClientPool_client_check_rediness client_check_rediness)
{
    S3ClientPool *pool;
    gint i;
    PoolClient *pc;
    AppConf *conf;

    conf = application_get_conf (app);

    pool = g_new0 (S3ClientPool, 1);
    pool->app = app;
    pool->evbase = application_get_evbase (app);
    pool->dns_base = application_get_dnsbase (app);
    pool->l_clients = NULL;
    pool->q_requests = g_queue_new ();
    pool->max_requests = conf->max_requests_per_pool;
   
    for (i = 0; i < client_count; i++) {
        pc = g_new0 (PoolClient, 1);
        pc->pool = pool;
        pc->client = client_create (app);
        pc->client_check_rediness = client_check_rediness;
        pc->client_destroy = client_destroy;
        // add to the list
        pool->l_clients = g_list_append (pool->l_clients, pc);
        // add callback
        client_set_on_released_cb (pc->client, s3client_pool_on_client_released, pc);
    }

    return pool;
}
Ejemplo n.º 15
0
StatSrv *stat_srv_create (Application *app)
{
    StatSrv *stat_srv;
    
    stat_srv = g_new0 (StatSrv, 1);
    stat_srv->app = app;
    stat_srv->q_op_history = g_queue_new ();
    stat_srv->boot_time = time (NULL);

    // stats server is disabled
    if (!conf_get_boolean (application_get_conf (stat_srv->app), "statistics.enabled")) {
        return stat_srv;
    }

    stat_srv->http = evhttp_new (application_get_evbase (app));
    if (!stat_srv->http) {
        LOG_err (STAT_LOG, "Failed to create statistics server !");
        return NULL;
    }

    // bind
    if (evhttp_bind_socket (stat_srv->http, 
        conf_get_string (application_get_conf (stat_srv->app), "statistics.host"),
        conf_get_int (application_get_conf (stat_srv->app), "statistics.port")) == -1) {
        LOG_err (STAT_LOG, "Failed to bind statistics server to  %s:%d",
            conf_get_string (application_get_conf (stat_srv->app), "statistics.host"),
            conf_get_int (application_get_conf (stat_srv->app), "statistics.port")
        );
        return NULL;
    }

    // install handlers
    evhttp_set_cb (stat_srv->http, conf_get_string (application_get_conf (stat_srv->app), "statistics.stats_path"), stat_srv_on_stats_cb, stat_srv);
    evhttp_set_gencb (stat_srv->http, stat_srv_on_gen_cb, stat_srv);

    return stat_srv;
}
Ejemplo n.º 16
0
gboolean s3http_connection_make_request (S3HttpConnection *con, 
    const gchar *resource_path, const gchar *request_str,
    const gchar *http_cmd,
    struct evbuffer *out_buffer,
    S3HttpConnection_responce_cb responce_cb,
    S3HttpConnection_error_cb error_cb,
    gpointer ctx)
{
    gchar *auth_str;
    struct evhttp_request *req;
    gchar auth_key[300];
	time_t t;
    char time_str[50];
    RequestData *data;
    int res;
    enum evhttp_cmd_type cmd_type;
    AppConf *conf;

    data = g_new0 (RequestData, 1);
    data->responce_cb = responce_cb;
    data->error_cb = error_cb;
    data->ctx = ctx;
    data->con = con;
    
    if (!strcasecmp (http_cmd, "GET")) {
        cmd_type = EVHTTP_REQ_GET;
    } else if (!strcasecmp (http_cmd, "PUT")) {
        cmd_type = EVHTTP_REQ_PUT;
    } else if (!strcasecmp (http_cmd, "DELETE")) {
        cmd_type = EVHTTP_REQ_DELETE;
    } else if (!strcasecmp (http_cmd, "HEAD")) {
        cmd_type = EVHTTP_REQ_HEAD;
    } else {
        LOG_err (CON_LOG, "Unsupported HTTP method: %s", http_cmd);
        return FALSE;
    }
    
    t = time (NULL);
    strftime (time_str, sizeof (time_str), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&t));
    auth_str = s3http_connection_get_auth_string (con->app, http_cmd, "", resource_path, time_str);
    snprintf (auth_key, sizeof (auth_key), "AWS %s:%s", application_get_access_key_id (con->app), auth_str);
    g_free (auth_str);

    req = evhttp_request_new (s3http_connection_on_responce_cb, data);
    if (!req) {
        LOG_err (CON_LOG, "Failed to create HTTP request object !");
        return FALSE;
    }

    evhttp_add_header (req->output_headers, "Authorization", auth_key);
    evhttp_add_header (req->output_headers, "Host", application_get_host_header (con->app));
	evhttp_add_header (req->output_headers, "Date", time_str);

    if (out_buffer) {
        evbuffer_add_buffer (req->output_buffer, out_buffer);
    }

    conf = application_get_conf (con->app);
    if (conf->path_style) {
        request_str = g_strdup_printf("/%s%s", application_get_bucket_name (con->app), request_str);
    }

    LOG_debug (CON_LOG, "[%p] bucket: %s path: %s", con, application_get_bucket_name (con->app), request_str);

    res = evhttp_make_request (s3http_connection_get_evcon (con), req, cmd_type, request_str);

    if (res < 0)
        return FALSE;
    else
        return TRUE;
}    
Ejemplo n.º 17
0
// got HttpConnection object
static void fileio_release_on_part_con_cb (gpointer client, gpointer ctx)
{
    HttpConnection *con = (HttpConnection *) client;
    FileIO *fop = (FileIO *) ctx;
    gchar *path;
    gboolean res;
    FileIOPart *part;
    size_t buf_len;
    const gchar *buf;

    LOG_debug (FIO_LOG, INO_CON_H"Releasing fop. Size: %zu", INO_T (fop->ino), con, evbuffer_get_length (fop->write_buf));

    // add part information to the list
    part = g_new0 (FileIOPart, 1);
    part->part_number = fop->part_number;
    buf_len = evbuffer_get_length (fop->write_buf);
    buf = (const gchar *)evbuffer_pullup (fop->write_buf, buf_len);

    // XXX: move to separate thread
    // 1. calculate MD5 of a part.
    get_md5_sum (buf, buf_len, &part->md5str, &part->md5b);
    // 2. calculate MD5 of multiple message blocks
    MD5_Update (&fop->md5, buf, buf_len);

    fop->l_parts = g_list_append (fop->l_parts, part);

    // if this is a multipart
    if (fop->multipart_initiated) {

        if (!fop->uploadid) {
            LOG_err (FIO_LOG, INO_CON_H"UploadID is not set, aborting operation !", INO_T (fop->ino), con);
            fileio_destroy (fop);
            return;
        }

        path = g_strdup_printf ("%s?partNumber=%u&uploadId=%s",
            fop->fname, fop->part_number, fop->uploadid);
        fop->part_number++;

    } else {
        path = g_strdup (fop->fname);
    }

#ifdef MAGIC_ENABLED
    // guess MIME type
    gchar *mime_type = magic_buffer (application_get_magic_ctx (fop->app), buf, buf_len);
    if (mime_type) {
        LOG_debug (FIO_LOG, "Guessed MIME type of %s as %s", path, mime_type);
        fop->content_type = g_strdup (mime_type);
    } else {
        LOG_err (FIO_LOG, "Failed to guess MIME type of %s !", path);
    }
#endif

    http_connection_acquire (con);

    // add output headers
    http_connection_add_output_header (con, "Content-MD5", part->md5b);
    if (fop->content_type)
        http_connection_add_output_header (con, "Content-Type", fop->content_type);


    // if this is the full file
    if (!fop->multipart_initiated) {
        time_t t;
        gchar time_str[50];

        // Add current time
        t = time (NULL);
        if (strftime (time_str, sizeof (time_str), "%a, %d %b %Y %H:%M:%S GMT", gmtime(&t))) {
            http_connection_add_output_header (con, "x-amz-meta-date", time_str);
        }

        http_connection_add_output_header (con, "x-amz-storage-class", conf_get_string (application_get_conf (con->app), "s3.storage_type"));
    }

    res = http_connection_make_request (con,
        path, "PUT", fop->write_buf, TRUE, NULL,
        fileio_release_on_part_sent_cb,
        fop
    );
    g_free (path);

    if (!res) {
        LOG_err (FIO_LOG, INO_CON_H"Failed to create HTTP request !", INO_T (fop->ino), con);
        http_connection_release (con);
        fileio_destroy (fop);
        return;
    }
}
Ejemplo n.º 18
0
static void stat_srv_on_stats_cb (struct evhttp_request *req, void *ctx)
{
    StatSrv *stat_srv = (StatSrv *) ctx;
    struct evbuffer *evb = NULL;
    gint ref = 0;
    GString *str;
    struct evhttp_uri *uri;
    guint32 total_inodes, file_num, dir_num;
    guint64 read_ops, write_ops, readdir_ops, lookup_ops;
    guint32 cache_entries;
    guint64 total_cache_size, cache_hits, cache_miss;
    struct tm *cur_p;
    struct tm cur;
    time_t now;
    char ts[50];

    uri = evhttp_uri_parse (evhttp_request_get_uri (req));
    LOG_debug (STAT_LOG, "Incoming request: %s from %s:%d", 
        evhttp_request_get_uri (req), req->remote_host, req->remote_port);

    if (uri) {
        const gchar *query;
        
        query = evhttp_uri_get_query (uri);
        if (query) {
            const gchar *refresh = NULL;
            struct evkeyvalq q_params;
            
            TAILQ_INIT (&q_params);
            evhttp_parse_query_str (query, &q_params);
            refresh = http_find_header (&q_params, "refresh");
            if (refresh)
                ref = atoi (refresh);

            evhttp_clear_headers (&q_params);
        }
        evhttp_uri_free (uri);
    }

    str = g_string_new (NULL);
    
    now = time (NULL);
    localtime_r (&now, &cur);
    cur_p = &cur;
    if (!strftime (ts, sizeof (ts), "%H:%M:%S", cur_p))
        ts[0] = '\0';

    g_string_append_printf (str, "RioFS version: %s Uptime: %u sec, Now: %s, Log level: %d, Dir cache time: %u sec<BR>", 
        VERSION, (guint32)(now - stat_srv->boot_time), ts, log_level,
        conf_get_uint (application_get_conf (stat_srv->app), "filesystem.dir_cache_max_time"));

    // DirTree
    dir_tree_get_stats (application_get_dir_tree (stat_srv->app), &total_inodes, &file_num, &dir_num);
    g_string_append_printf (str, "<BR>DirTree: <BR>-Total inodes: %u, Total files: %u, Total directories: %u<BR>",
        total_inodes, file_num, dir_num);

    // Fuse
    rfuse_get_stats (application_get_rfuse (stat_srv->app), &read_ops, &write_ops, &readdir_ops, &lookup_ops);
    g_string_append_printf (str, "<BR>Fuse: <BR>-Read ops: %"G_GUINT64_FORMAT", Write ops: %"G_GUINT64_FORMAT
        ", Readdir ops: %"G_GUINT64_FORMAT", Lookup ops: %"G_GUINT64_FORMAT"<BR>",
        read_ops, write_ops, readdir_ops, lookup_ops);

    // CacheMng
    cache_mng_get_stats (application_get_cache_mng (stat_srv->app), &cache_entries, &total_cache_size, &cache_hits, &cache_miss);
    g_string_append_printf (str, "<BR>CacheMng: <BR>-Total entries: %"G_GUINT32_FORMAT", Total cache size: %"G_GUINT64_FORMAT
        " bytes, Cache hits: %"G_GUINT64_FORMAT", Cache misses: %"G_GUINT64_FORMAT" <BR>", 
        cache_entries, total_cache_size, cache_hits, cache_miss);
    
    g_string_append_printf (str, "<BR>Read workers (%d): <BR>", 
        client_pool_get_client_count (application_get_read_client_pool (stat_srv->app)));
    client_pool_get_client_stats_info (application_get_read_client_pool (stat_srv->app), str, &print_format_http);
    
    g_string_append_printf (str, "<BR>Write workers (%d): <BR>",
        client_pool_get_client_count (application_get_write_client_pool (stat_srv->app)));
    client_pool_get_client_stats_info (application_get_write_client_pool (stat_srv->app), str, &print_format_http);
    
    g_string_append_printf (str, "<BR>Op workers (%d): <BR>",
        client_pool_get_client_count (application_get_ops_client_pool (stat_srv->app)));
    client_pool_get_client_stats_info (application_get_ops_client_pool (stat_srv->app), str, &print_format_http);

    g_string_append_printf (str, "<BR><BR>Operation history (%u max items): <BR>",
        conf_get_uint (application_get_conf (stat_srv->app), "statistics.history_size"));
    g_queue_foreach (stat_srv->q_op_history, (GFunc) stat_srv_print_history_item, str);

    evb = evbuffer_new ();

    evbuffer_add_printf (evb, "<HTTP>");
    if (ref) {
        evbuffer_add_printf (evb, "<HEAD><meta http-equiv=\"refresh\" content=\"%d\"></HEAD>", ref);
    }
    evbuffer_add_printf (evb, "<BODY>");
    evbuffer_add (evb, str->str, str->len);
    evbuffer_add_printf (evb, "</BODY></HTTP>");
    evhttp_send_reply (req, HTTP_OK, "OK", evb);
    evbuffer_free (evb);

    g_string_free (str, TRUE);
}
Ejemplo n.º 19
0
static void fileio_read_on_head_cb (HttpConnection *con, void *ctx, gboolean success,
    G_GNUC_UNUSED const gchar *buf, G_GNUC_UNUSED size_t buf_len,
    struct evkeyvalq *headers)
{
    FileReadData *rdata = (FileReadData *) ctx;
    const char *content_len_header;
    DirTree *dtree;

    // release HttpConnection
    http_connection_release (con);

    if (!success) {
        LOG_err (FIO_LOG, INO_CON_H"Failed to get HEAD from server !", INO_T (rdata->ino), con);
        rdata->on_buffer_read_cb (rdata->ctx, FALSE, NULL, 0);
        g_free (rdata);
        return;
    }

    rdata->fop->head_req_sent = TRUE;
    // update DirTree
    dtree = application_get_dir_tree (rdata->fop->app);
    dir_tree_set_entry_exist (dtree, rdata->ino);

    // consistency checking:

    // 1. check local and remote file sizes
    content_len_header = http_find_header (headers, "Content-Length");
    if (content_len_header) {
        guint64 local_size = 0;
        gint64 size = 0;

        size = strtoll ((char *)content_len_header, NULL, 10);
        if (size < 0) {
            LOG_err (FIO_LOG, INO_CON_H"Header contains incorrect file size!", INO_T (rdata->ino), con);
            size = 0;
        }

        rdata->fop->file_size = size;
        LOG_debug (FIO_LOG, INO_H"Remote file size: %"G_GUINT64_FORMAT, INO_T (rdata->ino), rdata->fop->file_size);

        local_size = cache_mng_get_file_length (application_get_cache_mng (rdata->fop->app), rdata->ino);
        if (local_size != rdata->fop->file_size) {
            LOG_debug (FIO_LOG, INO_H"Local and remote file sizes do not match, invalidating local cached file!",
                INO_T (rdata->ino));
            cache_mng_remove_file (application_get_cache_mng (rdata->fop->app), rdata->ino);
        }
    }

    // 2. use one of the following ways to check that local and remote files are identical
    // if versioning is enabled: compare version IDs
    // if bucket has versioning disabled: compare MD5 sums
    if (conf_get_boolean (application_get_conf (rdata->fop->app), "s3.versioning")) {
        const char *versioning_header = http_find_header (headers, "x-amz-version-id");
        if (versioning_header) {
            const gchar *local_version_id = cache_mng_get_version_id (application_get_cache_mng (rdata->fop->app), rdata->ino);
            if (local_version_id && !strcmp (local_version_id, versioning_header)) {
                LOG_debug (FIO_LOG, INO_H"Both version IDs match, using local cached file!", INO_T (rdata->ino));
            } else {
                LOG_debug (FIO_LOG, INO_H"Version IDs do not match, invalidating local cached file!: %s %s",
                    INO_T (rdata->ino), local_version_id, versioning_header);
                cache_mng_remove_file (application_get_cache_mng (rdata->fop->app), rdata->ino);
            }

        // header was not found
        } else {
            LOG_debug (FIO_LOG, INO_H"Versioning header was not found, invalidating local cached file!", INO_T (rdata->ino));
            cache_mng_remove_file (application_get_cache_mng (rdata->fop->app), rdata->ino);
        }

    //check for MD5
    } else  {
        const char *md5_header = http_find_header (headers, "x-amz-meta-md5");
        if (md5_header) {
            gchar *md5str = NULL;

            // at this point we have both remote and local MD5 sums
            if (cache_mng_get_md5 (application_get_cache_mng (rdata->fop->app), rdata->ino, &md5str)) {
                if (!strncmp (md5_header, md5str, 32)) {
                    LOG_debug (FIO_LOG, INO_H"MD5 sums match, using local cached file!", INO_T (rdata->ino));
                } else {
                    LOG_debug (FIO_LOG, INO_H"MD5 sums do not match, invalidating local cached file!", INO_T (rdata->ino));
                    cache_mng_remove_file (application_get_cache_mng (rdata->fop->app), rdata->ino);
                }
            } else {
                LOG_debug (FIO_LOG, INO_H"Failed to get local MD5 sum, invalidating local cached file!", INO_T (rdata->ino));
                cache_mng_remove_file (application_get_cache_mng (rdata->fop->app), rdata->ino);
            }

            if (md5str)
                g_free (md5str);

        // header was not found
        } else {
            LOG_debug (FIO_LOG, INO_H"MD5 sum header was not found, invalidating local cached file!", INO_T (rdata->ino));
            cache_mng_remove_file (application_get_cache_mng (rdata->fop->app), rdata->ino);
        }
    }

    // resume downloading file
    fileio_read_get_buf (rdata);
}