static void delete_status_bucket(struct cloudmig_ctx *ctx) { dpl_status_t dplret; dpl_vec_t *objects = NULL; cloudmig_log(DEBUG_LVL, "[Deleting files]: Deleting status bucket...\n"); dplret = dpl_list_bucket(ctx->src_ctx, ctx->status.bucket_name, NULL, NULL, &objects, NULL); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not list bucket %s for deletion : %s\n", __FUNCTION__, ctx->status.bucket_name, dpl_status_str(dplret)); goto deletebucket; } dpl_object_t** cur_object = (dpl_object_t**)objects->array; for (int i = 0; i < objects->n_items; ++i, ++cur_object) delete_file(ctx, ctx->status.bucket_name, (*cur_object)->key); deletebucket: dpl_deletebucket(ctx->src_ctx, ctx->status.bucket_name); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not delete bucket %s : %s.\n", __FUNCTION__, ctx->status.bucket_name, dpl_status_str(dplret)); return ; } cloudmig_log(DEBUG_LVL, "[Deleting Source] Bucket %s deleted.\n", ctx->status.bucket_name); }
static dpl_status_t dpl_vdir_opendir(dpl_ctx_t *ctx, char *bucket, dpl_ino_t ino, void **dir_hdlp) { dpl_dir_t *dir; int ret, ret2; DPL_TRACE(ctx, DPL_TRACE_VDIR, "opendir bucket=%s ino=%s", bucket, ino.key); dir = malloc(sizeof (*dir)); if (NULL == dir) { ret = DPL_FAILURE; goto end; } memset(dir, 0, sizeof (*dir)); dir->ctx = ctx; dir->ino = ino; //AWS prefers NULL for listing the root dir ret2 = dpl_list_bucket(ctx, bucket, !strcmp(ino.key, "") ? NULL : ino.key, ctx->delim, &dir->files, &dir->directories); if (DPL_SUCCESS != ret2) { DPLERR(0, "list_bucket failed %s:%s", bucket, ino.key); ret = DPL_FAILURE; goto end; } //printf("%s:%s n_files=%d n_dirs=%d\n", bucket, ino.key, dir->files->n_items, dir->directories->n_items); if (NULL != dir_hdlp) *dir_hdlp = dir; DPL_TRACE(dir->ctx, DPL_TRACE_VDIR, "dir_hdl=%p", dir); ret = DPL_SUCCESS; end: if (DPL_SUCCESS != ret) { if (NULL != dir->files) dpl_vec_objects_free(dir->files); if (NULL != dir->directories) dpl_vec_common_prefixes_free(dir->directories); if (NULL != dir) free(dir); } DPL_TRACE(ctx, DPL_TRACE_VDIR, "ret=%d", ret); return ret; }
dpl_status_t ls_recurse(struct ls_data *ls_data, char *dir, int level) { int ret; if (1 == ls_data->aflag) { dpl_vec_t *objects = NULL; int i; //raw listing ret = dpl_list_bucket(ctx, ctx->cur_bucket, NULL, NULL, &objects, NULL); if (DPL_SUCCESS != ret) { fprintf(stderr, "listbucket failure %s (%d)\n", dpl_status_str(ret), ret); return ret; } for (i = 0;i < objects->n_items;i++) { dpl_object_t *obj = (dpl_object_t *) objects->array[i]; if (0 == ls_data->pflag) { if (ls_data->lflag) { struct tm *stm; stm = localtime(&obj->last_modified); printf("%12llu %04d-%02d-%02d %02d:%02d %s\n", (unsigned long long) obj->size, 1900 + stm->tm_year, 1 + stm->tm_mon, stm->tm_mday, stm->tm_hour, stm->tm_min, obj->key); } else { printf("%s\n", obj->key); } } ls_data->total_size += obj->size; } if (NULL != objects) dpl_vec_objects_free(objects); } else { void *dir_hdl; dpl_dirent_t entry; dpl_ino_t cur_ino; if (1 == ls_data->Rflag) { ret = dpl_chdir(ctx, dir); if (DPL_SUCCESS != ret) return ret; cur_ino = dpl_cwd(ctx, ctx->cur_bucket); printf("%s%s%s:\n", 0 == level ? "" : "\n", ctx->delim, cur_ino.key); ret = dpl_opendir(ctx, ".", &dir_hdl); if (DPL_SUCCESS != ret) return ret; } else { ret = dpl_opendir(ctx, dir, &dir_hdl); if (DPL_SUCCESS != ret) return ret; } while (!dpl_eof(dir_hdl)) { ret = dpl_readdir(dir_hdl, &entry); if (DPL_SUCCESS != ret) return ret; if (0 == ls_data->pflag) { if (ls_data->lflag) { struct tm *stm; stm = localtime(&entry.last_modified); printf("%12llu %04d-%02d-%02d %02d:%02d %s\n", (unsigned long long) entry.size, 1900 + stm->tm_year, 1 + stm->tm_mon, stm->tm_mday, stm->tm_hour, stm->tm_min, entry.name); } else { printf("%s\n", entry.name); } } ls_data->total_size += entry.size; if (1 == ls_data->Rflag && strcmp(entry.name, ".") && (DPL_FTYPE_DIR == entry.type)) { ret = ls_recurse(ls_data, entry.name, level + 1); if (DPL_SUCCESS != ret) return ret; } } dpl_closedir(dir_hdl); if (1 == ls_data->Rflag && level > 0) { ret = dpl_chdir(ctx, ".."); if (DPL_SUCCESS != ret) return ret; } } return DPL_SUCCESS; }
int status_retrieve_states(struct cloudmig_ctx* ctx) { assert(ctx != NULL); assert(ctx->status.bucket_name != NULL); assert(ctx->status.buckets == NULL); dpl_status_t dplret = DPL_SUCCESS; int ret = EXIT_FAILURE; dpl_vec_t *objects; size_t migstatus_size = 0; ctx->src_ctx->cur_bucket = ctx->status.bucket_name; cloudmig_log(INFO_LVL, "[Loading Status]: Retrieving status...\n"); // Retrieve the list of files for the buckets states dplret = dpl_list_bucket(ctx->src_ctx, ctx->status.bucket_name, NULL, NULL, -1, &objects, NULL); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not list status bucket's files: %s\n", __FUNCTION__, ctx->status.bucket_name, dpl_status_str(dplret)); goto err; } // Allocate enough room for each bucket. ctx->status.n_buckets = objects->n_items; ctx->status.cur_state = 0; // -1 cause we dont want to allocate an entry for ".cloudmig" ctx->status.buckets = calloc(objects->n_items, sizeof(*(ctx->status.buckets))); if (ctx->status.buckets == NULL) { PRINTERR("%s: Could not allocate state data for each bucket: %s\n", __FUNCTION__, strerror(errno)); goto err; } // Now fill each one of these structures int i_bucket = 0; for (int i=0; i < objects->n_items; ++i, ++i_bucket) { dpl_object_t* obj = (dpl_object_t*)(objects->items[i]->ptr); if (strcmp(".cloudmig", obj->path) == 0) { // save the file size migstatus_size = obj->size; // fix the n_buckets of the status ctx --ctx->status.n_buckets; // Now get to next entry without advancing in the buckets. ++i; if (i >= objects->n_items) break ; } ctx->status.buckets[i_bucket].filename = strdup(obj->path); if (ctx->status.buckets[i_bucket].filename == NULL) { PRINTERR("%s: Could not allocate state data for each bucket: %s\n", __FUNCTION__, strerror(errno)); goto err; } ctx->status.buckets[i_bucket].size = obj->size; ctx->status.buckets[i_bucket].next_entry_off = 0; // The buffer will be read/allocated when needed. // Otherwise, it may use up too much memory ctx->status.buckets[i_bucket].buf = NULL; } if (status_retrieve_associated_buckets(ctx, migstatus_size) == EXIT_FAILURE) { PRINTERR("%s: Could not associate status files to dest buckets.\n", __FUNCTION__); goto err; } ret = EXIT_SUCCESS; cloudmig_log(INFO_LVL, "[Loading Status]: Status data retrieved.\n"); err: if (ret == EXIT_FAILURE && ctx->status.buckets != NULL) { for (int i=0; i < ctx->status.n_buckets; ++i) { if (ctx->status.buckets[i].filename) free(ctx->status.buckets[i].filename); } free(ctx->status.buckets); ctx->status.buckets = NULL; } if (objects != NULL) dpl_vec_objects_free(objects); ctx->src_ctx->cur_bucket = NULL; return ret; }
static dpl_status_t dpl_vdir_lookup(dpl_ctx_t *ctx, char *bucket, dpl_ino_t parent_ino, const char *obj_name, dpl_ino_t *obj_inop, dpl_ftype_t *obj_typep) { int ret, ret2; dpl_vec_t *files = NULL; dpl_vec_t *directories = NULL; int i; dpl_ino_t obj_ino; dpl_ftype_t obj_type; int delim_len = strlen(ctx->delim); int obj_name_len = strlen(obj_name); memset(&obj_ino, 0, sizeof (obj_ino)); DPL_TRACE(ctx, DPL_TRACE_VDIR, "lookup bucket=%s parent_ino=%s obj_name=%s", bucket, parent_ino.key, obj_name); if (!strcmp(obj_name, ".")) { if (NULL != obj_inop) *obj_inop = parent_ino; if (NULL != obj_typep) *obj_typep = DPL_FTYPE_DIR; ret = DPL_SUCCESS; goto end; } else if (!strcmp(obj_name, "..")) { char *p, *p2; if (!strcmp(parent_ino.key, "")) { //silent success for root dir if (NULL != obj_inop) *obj_inop = DPL_ROOT_INO; if (NULL != obj_typep) *obj_typep = DPL_FTYPE_DIR; ret = DPL_SUCCESS; goto end; } obj_ino = parent_ino; p = dpl_strrstr(obj_ino.key, ctx->delim); if (NULL == p) { fprintf(stderr, "parent key shall contain delim %s\n", ctx->delim); ret = DPL_FAILURE; goto end; } p -= delim_len; for (p2 = p;p2 > obj_ino.key;p2--) { if (!strncmp(p2, ctx->delim, delim_len)) { DPRINTF("found delim\n"); p2 += delim_len; break ; } } *p2 = 0; if (NULL != obj_inop) *obj_inop = obj_ino; if (NULL != obj_typep) *obj_typep = DPL_FTYPE_DIR; ret = DPL_SUCCESS; goto end; } //AWS do not like "" as a prefix ret2 = dpl_list_bucket(ctx, bucket, !strcmp(parent_ino.key, "") ? NULL : parent_ino.key, ctx->delim, &files, &directories); if (DPL_SUCCESS != ret2) { DPLERR(0, "list_bucket failed %s:%s", bucket, parent_ino.key); ret = DPL_FAILURE; goto end; } for (i = 0;i < files->n_items;i++) { dpl_object_t *obj = (dpl_object_t *) files->array[i]; int key_len; char *p; p = dpl_strrstr(obj->key, ctx->delim); if (NULL != p) p += delim_len; else p = obj->key; DPRINTF("cmp obj_key=%s obj_name=%s\n", p, obj_name); if (!strcmp(p, obj_name)) { DPRINTF("ok\n"); key_len = strlen(obj->key); if (key_len >= DPL_MAXNAMLEN) { DPLERR(0, "key is too long"); ret = DPL_FAILURE; goto end; } memcpy(obj_ino.key, obj->key, key_len); obj_ino.key[key_len] = 0; if (key_len >= delim_len && !strcmp(obj->key + key_len - delim_len, ctx->delim)) obj_type = DPL_FTYPE_DIR; else obj_type = DPL_FTYPE_REG; if (NULL != obj_inop) *obj_inop = obj_ino; if (NULL != obj_typep) *obj_typep = obj_type; ret = DPL_SUCCESS; goto end; } } for (i = 0;i < directories->n_items;i++) { dpl_common_prefix_t *prefix = (dpl_common_prefix_t *) directories->array[i]; int key_len; char *p, *p2; p = dpl_strrstr(prefix->prefix, ctx->delim); if (NULL == p) { fprintf(stderr, "prefix %s shall contain delim %s\n", prefix->prefix, ctx->delim); continue ; } DPRINTF("p='%s'\n", p); p -= delim_len; for (p2 = p;p2 > prefix->prefix;p2--) { DPRINTF("p2='%s'\n", p2); if (!strncmp(p2, ctx->delim, delim_len)) { DPRINTF("found delim\n"); p2 += delim_len; break ; } } key_len = p - p2 + 1; DPRINTF("cmp (prefix=%s) prefix=%.*s obj_name=%s\n", prefix->prefix, key_len, p2, obj_name); if (key_len == obj_name_len && !strncmp(p2, obj_name, obj_name_len)) { DPRINTF("ok\n"); key_len = strlen(prefix->prefix); if (key_len >= DPL_MAXNAMLEN) { DPLERR(0, "key is too long"); ret = DPL_FAILURE; goto end; } memcpy(obj_ino.key, prefix->prefix, key_len); obj_ino.key[key_len] = 0; obj_type = DPL_FTYPE_DIR; if (NULL != obj_inop) *obj_inop = obj_ino; if (NULL != obj_typep) *obj_typep = obj_type; ret = DPL_SUCCESS; goto end; } } ret = DPL_ENOENT; end: if (NULL != files) dpl_vec_objects_free(files); if (NULL != directories) dpl_vec_common_prefixes_free(directories); DPL_TRACE(ctx, DPL_TRACE_VDIR, "ret=%d", ret); return ret; }
int main(int argc, char **argv) { int ret; dpl_ctx_t *ctx; char *folder = NULL; int folder_len; dpl_dict_t *metadata = NULL; char *data_buf = NULL; size_t data_len; char *data_buf_returned = NULL; u_int data_len_returned; dpl_dict_t *metadata_returned = NULL; dpl_dict_t *metadata2_returned = NULL; dpl_dict_var_t *metadatum = NULL; dpl_sysmd_t sysmd; char new_path[MAXPATHLEN]; dpl_vec_t *files = NULL; dpl_vec_t *sub_directories = NULL; int i; if (2 != argc) { fprintf(stderr, "usage: restrest folder\n"); ret = 1; goto end; } folder = argv[1]; folder_len = strlen(folder); if (folder_len < 1) { fprintf(stderr, "bad folder\n"); ret = 1; goto end; } if (folder[folder_len-1] != '/') { fprintf(stderr, "folder name must end with a slash\n"); ret = 1; goto end; } ret = dpl_init(); //init droplet library if (DPL_SUCCESS != ret) { fprintf(stderr, "dpl_init failed\n"); ret = 1; goto end; } //open default profile ctx = dpl_ctx_new(NULL, //droplet directory, default: "~/.droplet" NULL); //droplet profile, default: "default" if (NULL == ctx) { fprintf(stderr, "dpl_ctx_new failed\n"); ret = 1; goto free_dpl; } //ctx->trace_level = ~0; //ctx->trace_buffers = 1; /**/ fprintf(stderr, "creating folder\n"); ret = dpl_put(ctx, //the context NULL, //no bucket folder, //the folder NULL, //no option DPL_FTYPE_DIR, //directory NULL, //no condition NULL, //no range NULL, //no metadata NULL, //no sysmd NULL, //object body 0); //object length if (DPL_SUCCESS != ret) { fprintf(stderr, "dpl_put failed: %s (%d)\n", dpl_status_str(ret), ret); ret = 1; goto free_all; } /**/ data_len = 10000; data_buf = malloc(data_len); if (NULL == data_buf) { fprintf(stderr, "alloc data failed\n"); ret = 1; goto free_all; } memset(data_buf, 'z', data_len); metadata = dpl_dict_new(13); if (NULL == metadata) { fprintf(stderr, "dpl_dict_new failed\n"); ret = 1; goto free_all; } ret = dpl_dict_add(metadata, "foo", "bar", 0); if (DPL_SUCCESS != ret) { fprintf(stderr, "dpl_dict_add failed\n"); ret = 1; goto free_all; } ret = dpl_dict_add(metadata, "foo2", "qux", 0); if (DPL_SUCCESS != ret) { fprintf(stderr, "dpl_dict_add failed\n"); ret = 1; goto free_all; } /**/ fprintf(stderr, "atomic creation of an object+MD\n"); ret = dpl_post(ctx, //the context NULL, //no bucket folder, //the folder NULL, //no option DPL_FTYPE_REG, //regular object NULL, //condition NULL, //range metadata, //the metadata NULL, //no sysmd data_buf, //object body data_len, //object length NULL, //no query params &sysmd); //the returned sysmd if (DPL_SUCCESS != ret) { fprintf(stderr, "dpl_post failed: %s (%d)\n", dpl_status_str(ret), ret); ret = 1; goto free_all; } if (!(sysmd.mask & DPL_SYSMD_MASK_PATH)) { fprintf(stderr, "path is absent from sysmd\n"); ret = 1; goto free_all; } fprintf(stderr, "resource path %s\n", sysmd.path); snprintf(new_path, sizeof (new_path), "%su.1", folder); ret = dpl_copy(ctx, NULL, //no src bucket sysmd.path, //the src resource NULL, //no dst bucket new_path, //dst resource NULL, //no option DPL_FTYPE_REG, //regular file DPL_COPY_DIRECTIVE_MOVE, //rename NULL, //no metadata NULL, //no sysmd NULL); //no server side condition if (DPL_SUCCESS != ret) { fprintf(stderr, "dpl_move %s to %s failed: %s (%d)\n", sysmd.path, new_path, dpl_status_str(ret), ret); ret = 1; goto free_all; } /**/ fprintf(stderr, "getting object+MD\n"); ret = dpl_get(ctx, //the context NULL, //no bucket new_path, //the key NULL, //no opion DPL_FTYPE_REG, //object type NULL, //no condition NULL, //no range &data_buf_returned, //data object &data_len_returned, //data object length &metadata_returned, //metadata NULL); //sysmd if (DPL_SUCCESS != ret) { fprintf(stderr, "dpl_get_id failed: %s (%d)\n", dpl_status_str(ret), ret); ret = 1; goto free_all; } fprintf(stderr, "checking object\n"); if (data_len != data_len_returned) { fprintf(stderr, "data lengths mismatch\n"); ret = 1; goto free_all; } if (0 != memcmp(data_buf, data_buf_returned, data_len)) { fprintf(stderr, "data content mismatch\n"); ret = 1; goto free_all; } fprintf(stderr, "checking metadata\n"); metadatum = dpl_dict_get(metadata_returned, "foo"); if (NULL == metadatum) { fprintf(stderr, "missing metadatum\n"); ret = 1; goto free_all; } assert(metadatum->val->type == DPL_VALUE_STRING); if (strcmp(metadatum->val->string, "bar")) { fprintf(stderr, "bad value in metadatum\n"); ret = 1; goto free_all; } metadatum = dpl_dict_get(metadata_returned, "foo2"); if (NULL == metadatum) { fprintf(stderr, "missing metadatum\n"); ret = 1; goto free_all; } assert(metadatum->val->type == DPL_VALUE_STRING); if (strcmp(metadatum->val->string, "qux")) { fprintf(stderr, "bad value in metadatum\n"); ret = 1; goto free_all; } /**/ fprintf(stderr, "setting MD only\n"); ret = dpl_dict_add(metadata, "foo", "bar2", 0); if (DPL_SUCCESS != ret) { fprintf(stderr, "error updating metadatum: %s (%d)\n", dpl_status_str(ret), ret); ret = 1; goto free_all; } ret = dpl_copy(ctx, //the context NULL, //no src bucket new_path, //the key NULL, //no dst bucket new_path, //the same key NULL, //no option DPL_FTYPE_REG, //object type DPL_COPY_DIRECTIVE_METADATA_REPLACE, //tell server to replace metadata metadata, //the updated metadata NULL, //no sysmd NULL); //no condition if (DPL_SUCCESS != ret) { fprintf(stderr, "error updating metadata: %s (%d)\n", dpl_status_str(ret), ret); ret = 1; goto free_all; } /**/ fprintf(stderr, "getting MD only\n"); ret = dpl_head(ctx, //the context NULL, //no bucket, new_path, //the key NULL, //no option DPL_FTYPE_UNDEF, //no matter the file type NULL, //no condition, &metadata2_returned, NULL); if (DPL_SUCCESS != ret) { fprintf(stderr, "error getting metadata: %s (%d)\n", dpl_status_str(ret), ret); ret = 1; goto free_all; } fprintf(stderr, "checking metadata\n"); metadatum = dpl_dict_get(metadata2_returned, "foo"); if (NULL == metadatum) { fprintf(stderr, "missing metadatum\n"); ret = 1; goto free_all; } assert(metadatum->val->type == DPL_VALUE_STRING); if (strcmp(metadatum->val->string, "bar2")) { fprintf(stderr, "bad value in metadatum\n"); ret = 1; goto free_all; } metadatum = dpl_dict_get(metadata2_returned, "foo2"); if (NULL == metadatum) { fprintf(stderr, "missing metadatum\n"); ret = 1; goto free_all; } assert(metadatum->val->type == DPL_VALUE_STRING); if (strcmp(metadatum->val->string, "qux")) { fprintf(stderr, "bad value in metadatum\n"); ret = 1; goto free_all; } /**/ fprintf(stderr, "listing of folder\n"); ret = dpl_list_bucket(ctx, NULL, folder, "/", -1, &files, &sub_directories); if (DPL_SUCCESS != ret) { fprintf(stderr, "error listing folder: %s (%d)\n", dpl_status_str(ret), ret); ret = 1; goto free_all; } for (i = 0;i < files->n_items;i++) { dpl_object_t *obj = (dpl_object_t *) dpl_vec_get(files, i); dpl_sysmd_t obj_sysmd; dpl_dict_t *obj_md = NULL; ret = dpl_head(ctx, NULL, //no bucket obj->path, NULL, //option DPL_FTYPE_UNDEF, //no matter the file type NULL, //condition &obj_md, //user metadata &obj_sysmd); //system metadata if (DPL_SUCCESS != ret) { fprintf(stderr, "getattr error on %s: %s (%d)\n", obj->path, dpl_status_str(ret), ret); ret = 1; goto free_all; } fprintf(stderr, "file %s: size=%ld mtime=%lu\n", obj->path, obj_sysmd.size, obj_sysmd.mtime); //dpl_dict_print(obj_md, stderr, 5); dpl_dict_free(obj_md); } for (i = 0;i < sub_directories->n_items;i++) { dpl_common_prefix_t *dir = (dpl_common_prefix_t *) dpl_vec_get(sub_directories, i); fprintf(stderr, "dir %s\n", dir->prefix); } /**/ fprintf(stderr, "delete object+MD\n"); ret = dpl_delete(ctx, //the context NULL, //no bucket new_path, //the key NULL, //no option DPL_FTYPE_UNDEF, //no matter the file type NULL); //no condition if (DPL_SUCCESS != ret) { fprintf(stderr, "error deleting object: %s (%d)\n", dpl_status_str(ret), ret); ret = 1; goto free_all; } ret = 0; free_all: if (NULL != sub_directories) dpl_vec_common_prefixes_free(sub_directories); if (NULL != files) dpl_vec_objects_free(files); if (NULL != metadata2_returned) dpl_dict_free(metadata2_returned); if (NULL != metadata_returned) dpl_dict_free(metadata_returned); if (NULL != data_buf_returned) free(data_buf_returned); if (NULL != metadata) dpl_dict_free(metadata); if (NULL != data_buf) free(data_buf); dpl_ctx_free(ctx); //free context free_dpl: dpl_free(); //free droplet library end: return ret; }