static void fileio_read_get_buf (FileReadData *rdata) { if ((guint64)rdata->off >= rdata->fop->file_size) { // requested range is outsize the file size LOG_debug (FIO_LOG, INO_H"requested size is beyond the file size!", INO_T (rdata->ino)); fileio_read_on_cache_cb (NULL, 0, TRUE, rdata); return; } // set new request size: // 1. file must have some size // 2. offset must be less than the file size // 3. offset + size is greater than the file size if (rdata->fop->file_size > 0 && rdata->off >= 0 && rdata->fop->file_size > (guint64)rdata->off && (guint64) (rdata->off + rdata->size) > rdata->fop->file_size) { rdata->size = rdata->fop->file_size - rdata->off; // special case, when zero-size file is requested } else if (rdata->fop->file_size == 0) { rdata->size = 0; } else { // request size and offset are ok } LOG_debug (FIO_LOG, INO_H"requesting [%"OFF_FMT": %"G_GUINT64_FORMAT"], file size: %"G_GUINT64_FORMAT, INO_T (rdata->ino), rdata->off, rdata->size, rdata->fop->file_size); cache_mng_retrieve_file_buf (application_get_cache_mng (rdata->fop->app), rdata->ino, rdata->size, rdata->off, fileio_read_on_cache_cb, rdata); }
// replies on bucket versioning information static void application_on_bucket_versioning_cb (gpointer ctx, gboolean success, const gchar *buf, size_t buf_len) { Application *app = (Application *)ctx; gchar *tmp; if (!success) { LOG_err (APP_LOG, "Failed to get bucket versioning!"); application_exit (app); return; } if (buf_len > 1) { tmp = (gchar *)buf; tmp[buf_len - 1] = '\0'; if (strstr (buf, "<Status>Enabled</Status>")) { LOG_debug (APP_LOG, "Bucket has versioning enabled !"); conf_set_boolean (app->conf, "s3.versioning", TRUE); } else { LOG_debug (APP_LOG, "Bucket has versioning disabled !"); conf_set_boolean (app->conf, "s3.versioning", FALSE); } } else { conf_set_boolean (app->conf, "s3.versioning", FALSE); } application_finish_initialization_and_run (app); }
// add client's callback to the awaiting queue // return TRUE if added, FALSE if list is full gboolean s3client_pool_get_client (S3ClientPool *pool, S3ClientPool_on_client_ready on_client_ready, gpointer ctx) { GList *l; RequestData *data; PoolClient *pc; // check if the awaiting queue is full if (g_queue_get_length (pool->q_requests) >= pool->max_requests) { LOG_debug (POOL, "Pool's client awaiting queue is full !"); return FALSE; } // check if there is a client which is ready to execute a new request for (l = g_list_first (pool->l_clients); l; l = g_list_next (l)) { pc = (PoolClient *) l->data; // check if client is ready if (pc->client_check_rediness (pc->client)) { on_client_ready (pc->client, ctx); return TRUE; } } LOG_debug (POOL, "all Pool's clients are busy ..ctx: %p", ctx); // add client to the end of queue data = g_new0 (RequestData, 1); data->on_client_ready = on_client_ready; data->ctx = ctx; g_queue_push_tail (pool->q_requests, data); return TRUE; }
void on_input_data_cb (HttpClient *http, struct evbuffer *input_buf, gpointer ctx) { struct evbuffer *in_buf = (struct evbuffer *) ctx; LOG_debug (HTTP_TEST, "CLN: >>>> got %zd bytes! Total: %ld length.", evbuffer_get_length (input_buf), http_client_get_input_length (http)); evbuffer_add_buffer (in_buf, input_buf); LOG_debug (HTTP_TEST, "CLN: Resulting buf: %zd", evbuffer_get_length (in_buf)); }
// add new chunk range to the chunks pending queue void dir_tree_file_read (DirTree *dtree, fuse_ino_t ino, size_t size, off_t off, DirTree_file_read_cb file_read_cb, fuse_req_t req, struct fuse_file_info *fi) { DirEntry *en; char full_name[1024]; DirTreeFileOpData *op_data; DirTreeFileRange *range; en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (ino)); // if entry does not exist // or it's not a directory type ? if (!en) { LOG_msg (DIR_TREE_LOG, "Entry (ino = %"INO_FMT") not found !", ino); file_read_cb (req, FALSE, NULL, 0); return; } op_data = (DirTreeFileOpData *) en->op_data; LOG_debug (DIR_TREE_LOG, "[%p %p] Read Object inode %"INO_FMT", size: %zd, off: %"OFF_FMT, req, op_data, ino, size, off); op_data->file_read_cb = file_read_cb; op_data->en = en; op_data->dtree = dtree; range = g_new0 (DirTreeFileRange, 1); range->off = off; range->size = size; range->c_req = req; g_queue_push_tail (op_data->q_ranges_requested, range); LOG_debug (DIR_TREE_LOG, "[%p] more data b: %zd", range->c_req, range->size); // already reading data if (op_data->op_in_progress) { return; } if (op_data->http) { LOG_debug (DIR_TREE_LOG, "Adding from main"); range = g_queue_pop_head (op_data->q_ranges_requested); if (range) { op_data->c_size = range->size; op_data->c_off = range->off; op_data->c_req = range->c_req; g_free (range); // perform the next chunk request op_data->op_in_progress = TRUE; dir_tree_file_read_prepare_request (op_data, op_data->http, op_data->c_off, op_data->c_size); } return; } }
gboolean s3http_connection_file_send (S3HttpConnection *con, int fd, const gchar *resource_path, S3HttpConnection_on_entry_sent_cb on_entry_sent_cb, gpointer ctx) { gchar *req_path; gboolean res; FileSendData *data; struct evbuffer *output_buf = NULL; struct stat st; data = g_new0 (FileSendData, 1); data->on_entry_sent_cb = on_entry_sent_cb; data->ctx = ctx; LOG_debug (CON_SEND_LOG, "Sending file.. %p", data); if (fstat (fd, &st) < 0) { LOG_err (CON_SEND_LOG, "Failed to stat temp file !"); s3http_connection_on_file_send_error (con, (void *) data); return FALSE; } output_buf = evbuffer_new (); if (!output_buf || evbuffer_add_file (output_buf, fd, 0, st.st_size) < 0) { LOG_err (CON_SEND_LOG, "Failed to read temp file !"); s3http_connection_on_file_send_error (con, (void *) data); if (output_buf) evbuffer_free (output_buf); return FALSE; } req_path = g_strdup_printf ("%s", resource_path); LOG_debug (CON_SEND_LOG, "[%p %p] Sending %s file, req: %s, %"OFF_FMT" buff: %zd", con, data, resource_path, req_path, st.st_size, evbuffer_get_length (output_buf)); res = s3http_connection_make_request (con, resource_path, req_path, "PUT", output_buf, s3http_connection_on_file_send_done, s3http_connection_on_file_send_error, data ); g_free (req_path); evbuffer_free (output_buf); if (!res) { LOG_err (CON_SEND_LOG, "Failed to create HTTP request !"); s3http_connection_on_file_send_error (con, (void *) data); return FALSE; } return TRUE; }
static void on_output_timer (evutil_socket_t fd, short event, void *ctx) { OutData *out = (OutData *) ctx; struct timeval tv; struct evbuffer *out_buf; char *buf; char c; LOG_debug (HTTP_TEST, "SRV: on output timer .."); if (out->test_id < TID_body && out->timer_count >= evbuffer_get_length (out->out_buf)) { bufferevent_free (out->bev); evconnlistener_disable (out->listener); event_base_loopbreak (out->evbase); LOG_debug (HTTP_TEST, "SRV: All headers data sent !! "); return; } out_buf = evbuffer_new (); if (out->test_id < TID_body) { buf = (char *)evbuffer_pullup (out->out_buf, -1); c = buf[out->timer_count]; evbuffer_add (out_buf, &c, sizeof (c)); out->timer_count++; LOG_debug (HTTP_TEST, "SRV: Sending %zd bytes:\n>>%s<<\n", evbuffer_get_length (out_buf), evbuffer_pullup (out_buf, -1)); } else { if (!out->header_sent) { evbuffer_add_buffer (out_buf, out->out_buf); out->header_sent = TRUE; } /* if (evbuffer_get_length (out->in_file) < 1) { bufferevent_free (out->bev); evconnlistener_disable (out->listener); event_base_loopbreak (out->evbase); LOG_debug (HTTP_TEST, "SRV: All data sent !! "); return; }*/ evbuffer_remove_buffer (out->in_file, out_buf, 1024*100); LOG_debug (HTTP_TEST, "SRV: Sending BODY %zd bytes", evbuffer_get_length (out_buf)); } bufferevent_write_buffer (out->bev, out_buf); evbuffer_free (out_buf); evutil_timerclear(&tv); tv.tv_sec = 0; tv.tv_usec = 500; event_add(out->timeout, &tv); }
// return directory buffer from the cache // or regenerate directory cache void dir_tree_fill_dir_buf (DirTree *dtree, fuse_ino_t ino, size_t size, off_t off, dir_tree_readdir_cb readdir_cb, fuse_req_t req) { DirEntry *en; DirTreeFillDirData *dir_fill_data; time_t t; LOG_debug (DIR_TREE_LOG, "Requesting directory buffer for dir ino %"INO_FMT", size: %zd, off: %"OFF_FMT, ino, size, off); en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (ino)); // if directory does not exist // or it's not a directory type ? if (!en || en->type != DET_dir) { LOG_msg (DIR_TREE_LOG, "Directory (ino = %"INO_FMT") not found !", ino); readdir_cb (req, FALSE, size, off, NULL, 0); return; } t = time (NULL); // already have directory buffer in the cache if (en->dir_cache_size && t >= en->dir_cache_created && t - en->dir_cache_created <= dtree->dir_cache_max_time) { LOG_debug (DIR_TREE_LOG, "Sending directory buffer (ino = %"INO_FMT") from cache !", ino); readdir_cb (req, TRUE, size, off, en->dir_cache, en->dir_cache_size); return; } LOG_debug (DIR_TREE_LOG, "cache time: %ld now: %ld", en->dir_cache_created, t); // reset dir cache if (en->dir_cache) g_free (en->dir_cache); en->dir_cache_size = 0; en->dir_cache_created = 0; dir_fill_data = g_new0 (DirTreeFillDirData, 1); dir_fill_data->dtree = dtree; dir_fill_data->ino = ino; dir_fill_data->size = size; dir_fill_data->off = off; dir_fill_data->readdir_cb = readdir_cb; dir_fill_data->req = req; dir_fill_data->en = en; if (!s3client_pool_get_client (application_get_ops_client_pool (dtree->app), dir_tree_fill_dir_on_http_ready, dir_fill_data)) { LOG_err (DIR_TREE_LOG, "Failed to get HTTP client !"); readdir_cb (req, FALSE, size, off, NULL, 0); g_free (dir_fill_data); } }
// read HTTP headers // return TRUE if all haders are read, FALSE if not enough data static gboolean s3http_client_parse_headers (S3HttpClient *http, struct evbuffer *input_buf) { size_t line_length = 0; char *line = NULL; S3HttpClientHeader *header; while ((line = evbuffer_readln (input_buf, &line_length, EVBUFFER_EOL_CRLF)) != NULL) { char *skey, *svalue; // the last line if (*line == '\0') { g_free (line); return TRUE; } // LOG_debug (HTTP_LOG, "HEADER line: %s\n", line); svalue = line; skey = strsep (&svalue, ":"); if (svalue == NULL) { LOG_debug (HTTP_LOG, "Wrong header data received !"); g_free (line); return FALSE; } svalue += strspn (svalue, " "); header = g_new0 (S3HttpClientHeader, 1); header->key = g_strdup (skey); header->value = g_strdup (svalue); http->l_input_headers = g_list_append (http->l_input_headers, header); if (!strcmp (skey, "Content-Length")) { char *endp; http->input_length = evutil_strtoll (svalue, &endp, 10); if (*svalue == '\0' || *endp != '\0') { LOG_debug (HTTP_LOG, "Illegal content length: %s", svalue); http->input_length = 0; } } g_free (line); } LOG_debug (HTTP_LOG, "Wrong header line: %s", line); // if we are here - not all headers have been received ! return FALSE; }
// create max_files and fill with random data // return list of {file name, content md5} static GList *populate_file_list (gint max_files, GList *l_files, gchar *in_dir) { gint i; gchar *out_dir; GError *error = NULL; FileData *fdata; gchar *name; FILE *f; out_dir = g_dir_make_tmp (NULL, &error); g_assert (out_dir); LOG_debug (POOL_TEST, "In dir: %s Out dir: %s", in_dir, out_dir); for (i = 0; i < max_files; i++) { char *bytes; size_t bytes_len; fdata = g_new0 (FileData, 1); fdata->checked = FALSE; bytes_len = g_random_int_range (100000, 1000000); bytes = g_malloc (bytes_len + 1); RAND_pseudo_bytes ((unsigned char *)bytes, bytes_len); *(bytes + bytes_len) = '\0'; name = get_random_string (15, TRUE); fdata->in_name = g_strdup_printf ("%s/%s", in_dir, name); f = fopen (fdata->in_name, "w"); fwrite (bytes, 1, bytes_len + 1, f); fclose (f); fdata->out_name = g_strdup_printf ("%s/%s", out_dir, name); get_md5_sum (bytes, bytes_len + 1, &fdata->md5, NULL); fdata->fout = fopen (fdata->out_name, "w"); g_assert (fdata->fout); fdata->url = g_strdup_printf ("http://127.0.0.1:8011/%s", name); g_assert (fdata->url); LOG_debug (POOL_TEST, "%s -> %s, size: %u", fdata->in_name, fdata->md5, bytes_len); l_files = g_list_append (l_files, fdata); } return l_files; }
static void fileio_simple_download_on_con_cb (gpointer client, gpointer ctx) { HttpConnection *con = (HttpConnection *) client; FileIOSimpleDownload *fsim = (FileIOSimpleDownload *) ctx; gboolean res; LOG_debug (FIO_LOG, CON_H"Downloading data.", con); http_connection_acquire (con); http_connection_add_output_header (con, "x-amz-storage-class", conf_get_string (application_get_conf (con->app), "s3.storage_type")); res = http_connection_make_request (con, fsim->fname, "GET", NULL, TRUE, NULL, fileio_simple_download_on_sent_cb, fsim ); if (!res) { LOG_err (FIO_LOG, CON_H"Failed to create HTTP request !", con); http_connection_release (con); fsim->on_download_cb (fsim->ctx, FALSE, NULL, 0); fileio_simple_download_destroy (fsim); return; } }
// remove file gboolean dir_tree_file_remove (DirTree *dtree, fuse_ino_t ino, DirTree_file_remove_cb file_remove_cb, fuse_req_t req) { DirEntry *en; FileRemoveData *data; LOG_debug (DIR_TREE_LOG, "Removing inode %"INO_FMT, ino); en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (ino)); // if entry does not exist // or it's not a directory type ? if (!en) { LOG_err (DIR_TREE_LOG, "Entry (ino = %"INO_FMT") not found !", ino); file_remove_cb (req, FALSE); return FALSE; } if (en->type != DET_file) { LOG_err (DIR_TREE_LOG, "Entry (ino = %"INO_FMT") is not a file !", ino); file_remove_cb (req, FALSE); return FALSE; } data = g_new0 (FileRemoveData, 1); data->dtree = dtree; data->ino = ino; data->en = en; data->file_remove_cb = file_remove_cb; data->req = req; s3client_pool_get_client (application_get_ops_client_pool (dtree->app), dir_tree_file_remove_on_http_client_cb, data); return TRUE; }
void dir_tree_update_entry (DirTree *dtree, const gchar *path, DirEntryType type, fuse_ino_t parent_ino, const gchar *entry_name, long long size) { DirEntry *parent_en; DirEntry *en; LOG_debug (DIR_TREE_LOG, "Updating %s %ld", entry_name, size); // get parent parent_en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (parent_ino)); if (!parent_en || parent_en->type != DET_dir) { LOG_err (DIR_TREE_LOG, "DirEntry is not a directory ! ino: %"INO_FMT, parent_ino); return; } // get child en = g_hash_table_lookup (parent_en->h_dir_tree, entry_name); if (en) { en->age = dtree->current_age; en->size = size; } else { mode_t mode; if (type == DET_file) mode = FILE_DEFAULT_MODE; else mode = DIR_DEFAULT_MODE; dir_tree_add_entry (dtree, entry_name, mode, type, parent_ino, size, time (NULL)); } }
static void s3http_connection_on_file_send_done (S3HttpConnection *con, void *ctx, const gchar *buf, size_t buf_len, G_GNUC_UNUSED struct evkeyvalq *headers) { FileSendData *data = (FileSendData *) ctx; LOG_debug (CON_SEND_LOG, "File is sent ! %p %p", data); if (data->on_entry_sent_cb) data->on_entry_sent_cb (data->ctx, TRUE); else LOG_debug (CON_SEND_LOG, "No callback function !"); s3http_connection_release (con); g_free (data); }
// file is closed, free context data void dir_tree_file_release (DirTree *dtree, fuse_ino_t ino, struct fuse_file_info *fi) { DirEntry *en; DirTreeFileOpData *op_data; LOG_debug (DIR_TREE_LOG, "dir_tree_file_release inode %d", ino); en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (ino)); // if entry does not exist // or it's not a directory type ? if (!en) { LOG_msg (DIR_TREE_LOG, "Entry (ino = %"INO_FMT") not found !", ino); //XXX return; } op_data = (DirTreeFileOpData *) en->op_data; // op_data->en = en; // op_data->ino = ino; if (op_data->http) s3http_client_release (op_data->http); // releasing written file if (op_data->tmp_write_fd) { if (!s3client_pool_get_client (application_get_write_client_pool (dtree->app), dir_tree_file_release_on_http_ready, op_data)) { LOG_err (DIR_TREE_LOG, "Failed to get S3HttpConnection from the pool !"); } } else { file_op_data_destroy (op_data); } }
void dir_tree_dir_create (DirTree *dtree, fuse_ino_t parent_ino, const char *name, mode_t mode, dir_tree_mkdir_cb mkdir_cb, fuse_req_t req) { DirEntry *dir_en, *en; DirTreeFileOpData *op_data; LOG_debug (DIR_TREE_LOG, "Creating dir: %s", name); dir_en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (parent_ino)); // entry not found if (!dir_en || dir_en->type != DET_dir) { LOG_msg (DIR_TREE_LOG, "Directory (%"INO_FMT") not found !", parent_ino); mkdir_cb (req, FALSE, 0, 0, 0, 0); return; } // create a new entry en = dir_tree_add_entry (dtree, name, mode, DET_dir, parent_ino, 10, time (NULL)); if (!en) { LOG_msg (DIR_TREE_LOG, "Failed to create dir: %s !", name); mkdir_cb (req, FALSE, 0, 0, 0, 0); return; } //XXX: set as new en->is_modified = FALSE; // do not delete it en->age = G_MAXUINT32; en->mode = DIR_DEFAULT_MODE; mkdir_cb (req, TRUE, en->ino, en->mode, en->size, en->ctime); }
// add new file entry to directory, return new inode void dir_tree_file_create (DirTree *dtree, fuse_ino_t parent_ino, const char *name, mode_t mode, DirTree_file_create_cb file_create_cb, fuse_req_t req, struct fuse_file_info *fi) { DirEntry *dir_en, *en; DirTreeFileOpData *op_data; LOG_debug (DIR_TREE_LOG, "Adding new entry '%s' to directory ino: %"INO_FMT, name, parent_ino); dir_en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (parent_ino)); // entry not found if (!dir_en || dir_en->type != DET_dir) { LOG_msg (DIR_TREE_LOG, "Directory (%"INO_FMT") not found !", parent_ino); file_create_cb (req, FALSE, 0, 0, 0, fi); return; } // create a new entry en = dir_tree_add_entry (dtree, name, mode, DET_file, parent_ino, 0, time (NULL)); if (!en) { LOG_msg (DIR_TREE_LOG, "Failed to create file: %s !", name); file_create_cb (req, FALSE, 0, 0, 0, fi); return; } //XXX: set as new en->is_modified = TRUE; op_data = file_op_data_create (dtree, en->ino); op_data->en = en; op_data->ino = en->ino; en->op_data = (gpointer) op_data; file_create_cb (req, TRUE, en->ino, en->mode, en->size, fi); }
// existing file is opened, create context data gboolean dir_tree_file_open (DirTree *dtree, fuse_ino_t ino, struct fuse_file_info *fi, DirTree_file_open_cb file_open_cb, fuse_req_t req) { DirTreeFileOpData *op_data; DirEntry *en; op_data = file_op_data_create (dtree, ino); op_data->c_fi = fi; op_data->c_req = req; op_data->file_open_cb = file_open_cb; en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (ino)); // if entry does not exist // or it's not a directory type ? if (!en) { LOG_msg (DIR_TREE_LOG, "Entry (ino = %"INO_FMT") not found !", ino); file_open_cb (op_data->c_req, FALSE, op_data->c_fi); return FALSE; } op_data->en = en; op_data->en->op_data = (gpointer) op_data; LOG_debug (DIR_TREE_LOG, "[%p %p] dir_tree_open inode %"INO_FMT, op_data, fi, ino); if (!s3client_pool_get_client (application_get_read_client_pool (dtree->app), dir_tree_file_open_on_http_ready, op_data)) { LOG_err (DIR_TREE_LOG, "Failed to get S3HttpConnection from the pool !"); } return TRUE; }
/*{{{ GET request */ static void fileio_read_on_get_cb (HttpConnection *con, void *ctx, gboolean success, const gchar *buf, size_t buf_len, G_GNUC_UNUSED struct evkeyvalq *headers) { FileReadData *rdata = (FileReadData *) ctx; const char *versioning_header = NULL; // release HttpConnection http_connection_release (con); if (!success) { LOG_err (FIO_LOG, INO_CON_H"Failed to get file from server !", INO_T (rdata->ino), con); rdata->on_buffer_read_cb (rdata->ctx, FALSE, NULL, 0); g_free (rdata); return; } // store it in the local cache cache_mng_store_file_buf (application_get_cache_mng (rdata->fop->app), rdata->ino, buf_len, rdata->request_offset, (unsigned char *) buf, NULL, NULL); // update version ID versioning_header = http_find_header (headers, "x-amz-version-id"); if (versioning_header) { cache_mng_update_version_id (application_get_cache_mng (rdata->fop->app), rdata->ino, versioning_header); } LOG_debug (FIO_LOG, INO_H"Storing [%"G_GUINT64_FORMAT" %zu]", INO_T(rdata->ino), rdata->request_offset, buf_len); // and read it fileio_read_get_buf (rdata); }
// multipart is sent static void fileio_release_on_complete_cb (HttpConnection *con, void *ctx, gboolean success, G_GNUC_UNUSED const gchar *buf, G_GNUC_UNUSED size_t buf_len, G_GNUC_UNUSED struct evkeyvalq *headers) { FileIO *fop = (FileIO *) ctx; const gchar *versioning_header; http_connection_release (con); if (!success) { LOG_err (FIO_LOG, INO_CON_H"Failed to send Multipart data to the server !", INO_T (fop->ino), con); fileio_destroy (fop); return; } versioning_header = http_find_header (headers, "x-amz-version-id"); if (versioning_header) { cache_mng_update_version_id (application_get_cache_mng (fop->app), fop->ino, versioning_header); } // done LOG_debug (FIO_LOG, INO_CON_H"Multipart Upload is done !", INO_T (fop->ino), con); // fileio_destroy (fop); fileio_release_update_headers (fop); }
void http_connection_get_container_meta (HttpConnection *con, HttpConnection_container_meta_cb container_meta_cb, gpointer ctx) { gchar *req_path; gboolean res; ContainerMeta *meta; LOG_debug (CON_CONT, "Getting container meta for: %s", application_get_container_name (con->app)); // acquire HTTP client http_connection_acquire (con); meta = g_new0 (ContainerMeta, 1); meta->ctx = ctx; meta->container_meta_cb = container_meta_cb; req_path = g_strdup_printf ("/%s", application_get_container_name (con->app)); res = http_connection_make_request_to_storage_url (con, req_path, "HEAD", NULL, http_connection_on_container_meta_cb, meta ); g_free (req_path); if (!res) { LOG_err (CON_CONT, "Failed to create HTTP request !"); container_meta_cb (ctx, FALSE); http_connection_release (con); return; } }
static void run_request_test (struct event_base *evbase, struct evdns_base *dns_base, TestID test_id) { OutData *out; struct evbuffer *in_buf; char c = 'x'; LOG_debug (HTTP_TEST, "===================== TEST ID : %d =======================", test_id); out = g_new0 (OutData, 1); out->evbase = evbase; out->test_id = test_id; out->evhttp = evhttp_new (evbase); evhttp_bind_socket (out->evhttp, "127.0.0.1", 8080); evhttp_set_gencb (out->evhttp, on_request_gencb, out); //out->http = http_client_create (evbase, dns_base); in_buf = evbuffer_new (); http_client_set_cb_ctx (out->http, out); http_client_set_on_chunk_cb (out->http, on_input_data_cb); http_client_set_close_cb (out->http, on_http_close); //http_client_set_output_length (out->http, 1); //http_client_add_output_data (out->http, &c, 1); http_client_start_request_to_storage_url (out->http, Method_get, "/index.html", NULL, NULL); event_base_dispatch (evbase); http_client_destroy (out->http); LOG_debug (HTTP_TEST, "Resulting buff: %zd", evbuffer_get_length (in_buf)); evbuffer_free (in_buf); g_free (out->first_line); g_free (out->header_line); evconnlistener_free (out->listener); evtimer_del (out->timeout); event_free (out->timeout); evbuffer_free (out->out_buf); evbuffer_free (out->in_file); g_free (out); LOG_debug (HTTP_TEST, "===================== END TEST ID : %d =======================", test_id); }
// removes file from local storage void cache_mng_remove_file (CacheMng *cmng, fuse_ino_t ino) { struct _CacheEntry *entry; char path[PATH_MAX]; entry = g_hash_table_lookup (cmng->h_entries, GUINT_TO_POINTER (ino)); if (entry) { cmng->size -= range_length (entry->avail_range); g_queue_delete_link (cmng->q_lru, entry->ll_lru); g_hash_table_remove (cmng->h_entries, GUINT_TO_POINTER (ino)); cache_mng_file_name (cmng, path, sizeof (path), ino); unlink (path); LOG_debug (CMNG_LOG, INO_H"Entry is removed", INO_T (ino)); } else { LOG_debug (CMNG_LOG, INO_H"Entry not found", INO_T (ino)); } }
static void dir_tree_file_read_on_last_chunk_cb (S3HttpClient *http, struct evbuffer *input_buf, gpointer ctx) { gchar *buf = NULL; size_t buf_len; DirTreeFileOpData *op_data = (DirTreeFileOpData *) ctx; DirTreeFileRange *range; buf_len = evbuffer_get_length (input_buf); buf = (gchar *) evbuffer_pullup (input_buf, buf_len); /* range = g_queue_pop_head (op_data->q_ranges_requested); if (range) { op_data->c_size = range->size; op_data->c_off = range->off; op_data->c_req = range->c_req; } */ op_data->total_read += buf_len; LOG_debug (DIR_TREE_LOG, "[%p %p] lTOTAL read: %zu (req: %zu), orig size: %zu, TOTAL: %"OFF_FMT", Qsize: %zu", op_data->c_req, http, buf_len, op_data->c_size, op_data->en->size, op_data->total_read, g_queue_get_length (op_data->q_ranges_requested)); if (op_data->file_read_cb) op_data->file_read_cb (op_data->c_req, TRUE, buf, buf_len); evbuffer_drain (input_buf, buf_len); // if there are more pending chunk requests if (g_queue_get_length (op_data->q_ranges_requested) > 0) { range = g_queue_pop_head (op_data->q_ranges_requested); LOG_debug (DIR_TREE_LOG, "[%p] more data: %zd", range->c_req, range->size); op_data->c_size = range->size; op_data->c_off = range->off; op_data->c_req = range->c_req; g_free (range); op_data->op_in_progress = TRUE; // perform the next chunk request dir_tree_file_read_prepare_request (op_data, http, op_data->c_off, op_data->c_size); } else { LOG_debug (DIR_TREE_LOG, "Done downloading !!"); op_data->op_in_progress = FALSE; } }
// callback: void dir_tree_fill_on_dir_buf_cb (gpointer callback_data, gboolean success) { DirTreeFillDirData *dir_fill_data = (DirTreeFillDirData *) callback_data; LOG_debug (DIR_TREE_LOG, "Dir fill callback: %s", success ? "SUCCESS" : "FAILED"); if (!success) { dir_fill_data->readdir_cb (dir_fill_data->req, FALSE, dir_fill_data->size, dir_fill_data->off, NULL, 0); } else { struct dirbuf b; // directory buffer GHashTableIter iter; gpointer value; // construct directory buffer // add "." and ".." memset (&b, 0, sizeof(b)); s3fuse_add_dirbuf (dir_fill_data->req, &b, ".", dir_fill_data->en->ino); s3fuse_add_dirbuf (dir_fill_data->req, &b, "..", dir_fill_data->en->ino); LOG_debug (DIR_TREE_LOG, "Entries in directory : %u", g_hash_table_size (dir_fill_data->en->h_dir_tree)); // get all directory items g_hash_table_iter_init (&iter, dir_fill_data->en->h_dir_tree); while (g_hash_table_iter_next (&iter, NULL, &value)) { DirEntry *tmp_en = (DirEntry *) value; // add only updated entries if (tmp_en->age >= dir_fill_data->dtree->current_age) s3fuse_add_dirbuf (dir_fill_data->req, &b, tmp_en->basename, tmp_en->ino); } // done, save as cache dir_fill_data->en->dir_cache_size = b.size; dir_fill_data->en->dir_cache = g_malloc (b.size); dir_fill_data->en->dir_cache_created = time (NULL); memcpy (dir_fill_data->en->dir_cache, b.p, b.size); // send buffer to fuse dir_fill_data->readdir_cb (dir_fill_data->req, TRUE, dir_fill_data->size, dir_fill_data->off, b.p, b.size); //free buffer g_free (b.p); } g_free (dir_fill_data); }
// create and add a new entry (file or dir) to DirTree static DirEntry *dir_tree_add_entry (DirTree *dtree, const gchar *basename, mode_t mode, DirEntryType type, fuse_ino_t parent_ino, off_t size, time_t ctime) { DirEntry *en; DirEntry *parent_en = NULL; en = g_new0 (DirEntry, 1); // get the parent, for inodes > 0 if (parent_ino) { parent_en = g_hash_table_lookup (dtree->h_inodes, GUINT_TO_POINTER (parent_ino)); if (!parent_en) { LOG_err (DIR_TREE_LOG, "Parent not found for ino: %llu !", parent_ino); return NULL; } // update directory buffer dir_tree_entry_modified (dtree, parent_en); if (parent_ino == 1) en->fullpath = g_strdup_printf ("/%s", basename); else en->fullpath = g_strdup_printf ("%s/%s", parent_en->fullpath, basename); } else { en->fullpath = g_strdup ("/"); } en->ino = dtree->max_ino++; en->age = dtree->current_age; en->basename = g_strdup (basename); en->mode = mode; en->size = size; en->parent_ino = parent_ino; en->type = type; en->ctime = ctime; en->is_modified = FALSE; // cache is empty en->dir_cache = NULL; en->dir_cache_size = 0; en->dir_cache_created = 0; LOG_debug (DIR_TREE_LOG, "Creating new DirEntry: %s, inode: %d, fullpath: %s, mode: %d", en->basename, en->ino, en->fullpath, en->mode); if (type == DET_dir) { en->h_dir_tree = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, dir_entry_destroy); } // add to global inode hash g_hash_table_insert (dtree->h_inodes, GUINT_TO_POINTER (en->ino), en); // add to the parent's hash if (parent_ino) g_hash_table_insert (parent_en->h_dir_tree, en->basename, en); return en; }
static void start_srv (struct event_base *base, gchar *in_dir) { app->http_srv = evhttp_new (base); g_assert (app->http_srv); evhttp_bind_socket (app->http_srv, "127.0.0.1", 8011); evhttp_set_gencb (app->http_srv, on_srv_gen_request, in_dir); LOG_debug (HTTP_TEST, "SRV: started"); }
static void on_srv_gen_request (struct evhttp_request *req, void *ctx) { const char *uri = evhttp_request_get_uri(req); if (!strstr (uri, "/storage/")) { LOG_debug (HTTP_TEST, "%s", uri); g_assert_not_reached (); } }
static void file_op_data_destroy (DirTreeFileOpData *op_data) { LOG_debug (DIR_TREE_LOG, "Destroying opdata !"); if (g_queue_get_length (op_data->q_ranges_requested) > 0) g_queue_free_full (op_data->q_ranges_requested, g_free); else g_queue_free (op_data->q_ranges_requested); g_free (op_data); }
static void fileio_read_on_cache_cb (unsigned char *buf, size_t size, gboolean success, void *ctx) { FileReadData *rdata = (FileReadData *) ctx; // we got data from the cache if (success) { LOG_debug (FIO_LOG, INO_H"Reading from cache", INO_T (rdata->ino)); rdata->on_buffer_read_cb (rdata->ctx, TRUE, (char *)buf, size); g_free (rdata); } else { LOG_debug (FIO_LOG, INO_H"Reading from server !", INO_T (rdata->ino)); if (!client_pool_get_client (application_get_read_client_pool (rdata->fop->app), fileio_read_on_con_cb, rdata)) { LOG_err (FIO_LOG, INO_H"Failed to get HTTP client !", INO_T (rdata->ino)); rdata->on_buffer_read_cb (rdata->ctx, FALSE, NULL, 0); g_free (rdata); return; } } }