static int migrate_with_retries(struct cldmig_info *tinfo, struct file_transfer_state *filestate, int (*migfunc)(struct cldmig_info *, struct file_transfer_state*), int n_attempts) { int ret; int failures = 0; retry: ret = migfunc(tinfo, filestate); if (ret != EXIT_SUCCESS) { if (++failures < n_attempts) { cloudmig_log(ERR_LVL, "[Migrating] : failure, retrying migration of file %s\n", filestate->obj_path); goto retry; } cloudmig_log(ERR_LVL, "[Migrating] : Could not migrate file %s\n", filestate->obj_path); } return ret; }
static void delete_status_bucket(struct cloudmig_ctx *ctx) { dpl_status_t dplret; dpl_vec_t *objects = NULL; cloudmig_log(DEBUG_LVL, "[Deleting files]: Deleting status bucket...\n"); dplret = dpl_list_bucket(ctx->src_ctx, ctx->status.bucket_name, NULL, NULL, &objects, NULL); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not list bucket %s for deletion : %s\n", __FUNCTION__, ctx->status.bucket_name, dpl_status_str(dplret)); goto deletebucket; } dpl_object_t** cur_object = (dpl_object_t**)objects->array; for (int i = 0; i < objects->n_items; ++i, ++cur_object) delete_file(ctx, ctx->status.bucket_name, (*cur_object)->key); deletebucket: dpl_deletebucket(ctx->src_ctx, ctx->status.bucket_name); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not delete bucket %s : %s.\n", __FUNCTION__, ctx->status.bucket_name, dpl_status_str(dplret)); return ; } cloudmig_log(DEBUG_LVL, "[Deleting Source] Bucket %s deleted.\n", ctx->status.bucket_name); }
static void delete_source_bucket(struct cloudmig_ctx *ctx, struct transfer_state *bucket_state) { // Ptr used to change from bucket status filename to bucket name // ie: the '.' of "file.cloudmig" char *dotptr = strrchr(bucket_state->filename, '.'); struct file_state_entry *fste = NULL; dpl_status_t dplret; if (dotptr == NULL) // though it should never happen... return ; cloudmig_log(DEBUG_LVL, "[Deleting Source]: Deleting source bucket" " for status file '%s'...\n", bucket_state->filename); // Here the buffer should never be allocated, so map the bucket state. if (cloudmig_map_bucket_state(ctx, bucket_state) == EXIT_FAILURE) return ; *dotptr = '\0'; // loop on the bucket state for each entry, to delete the files. while (bucket_state->next_entry_off < bucket_state->size) { fste = (void*)(bucket_state->buf + bucket_state->next_entry_off); delete_file(ctx, bucket_state->filename, (char*)(fste+1)); // Next entry... bucket_state->next_entry_off += sizeof(*fste) + ntohl(fste->namlen); } free(bucket_state->buf); /* * Remove bucket now that all of its files were deleted. */ *dotptr = '\0'; dplret = dpl_deletebucket(ctx->src_ctx, bucket_state->filename); if (dplret != DPL_SUCCESS) { /* * In case of an http 409 error (EEXIST or ENOENT), * do not do anything. * Maybe files were added in the bucket in the meantime ? * The user will have to manage it himself, it's his fault. */ PRINTERR("%s: Could not remove bucket %s : %s.\n" "The bucket may have been tampered with" " since the migration's start.\n", __FUNCTION__, bucket_state->filename, dpl_status_str(dplret)); } else cloudmig_log(DEBUG_LVL, "[Deleting Source]: Source bucket '%s' deleted successfully.\n", bucket_state->filename); *dotptr = '.'; }
/* * Main migration function. * * It manages every step of the migration, and the deletion of old objects * if the migration was a success. */ int migrate(struct cloudmig_ctx* ctx) { int nb_failures = 0; int ret; cloudmig_log(DEBUG_LVL, "Starting migration...\n"); for (int i=0; i < ctx->options.nb_threads; ++i) { ctx->tinfos[i].stop = false; if (pthread_create(&ctx->tinfos[i].thr, NULL, (void*(*)(void*))migrate_worker_loop, &ctx->tinfos[i]) == -1) { PRINTERR("Could not start worker thread %i/%i", i, ctx->options.nb_threads); nb_failures = 1; // Stop all the already-running threads before attempting to join migration_stop(ctx); break ; } } /* * Join all the threads, and cumulate their error counts */ for (int i=0; i < ctx->options.nb_threads; i++) { int errcount; ret = pthread_join(ctx->tinfos[i].thr, (void**)&errcount); if (ret != 0) cloudmig_log(WARN_LVL, "Could not join thread %i: %s.\n", i, strerror(errno)); else nb_failures += errcount; } // In any case, attempt to update the status digest before doing anything else (void)status_digest_upload(ctx->status->digest); // Check if it was the end of the transfer by checking the number of failures if (nb_failures == 0) // 0 == number of failures that occured. { cloudmig_log(INFO_LVL, "Migration finished with success !\n"); if (ctx->tinfos[0].config_flags & DELETE_SOURCE_DATA) delete_source(ctx); } else { PRINTERR("An error occured during the migration." " At least one file could not be transfered\n", 0); goto err; } err: return nb_failures; }
void delete_source(struct cloudmig_ctx *ctx) { cloudmig_log(INFO_LVL, "[Deleting Source]: Starting deletion of the migration's source...\n"); for (int i = 0; i < ctx->status.nb_states; ++i) delete_source_bucket(ctx, &(ctx->status.bucket_states[i])); delete_status_bucket(ctx); cloudmig_log(INFO_LVL, "[Deleting Source]: Deletion of the migration's source done.\n"); }
static int migrate_object(struct cldmig_info *tinfo, struct file_transfer_state* filestate) { int failures = 0; int ret = EXIT_FAILURE; int (*migfunc)(struct cldmig_info*, struct file_transfer_state*) = NULL; cloudmig_log(DEBUG_LVL, "[Migrating] : starting migration of file %s\n", filestate->obj_path); switch (filestate->fixed.type) { case DPL_FTYPE_DIR: migfunc = &create_directory; break ; case DPL_FTYPE_SYMLINK: migfunc = &create_symlink; break ; case DPL_FTYPE_REG: default: migfunc = &transfer_file; break ; } ret = migrate_with_retries(tinfo, filestate, migfunc, 3); if (ret != EXIT_SUCCESS) goto ret; status_store_entry_complete(tinfo->ctx, filestate); display_trigger_update(tinfo->ctx->display); cloudmig_log(INFO_LVL, "[Migrating] : file %s migrated.\n", filestate->obj_path); ret: return (failures == 3); }
static void delete_file(struct cloudmig_ctx *ctx, char *bucket, char *filename) { dpl_status_t dplret; cloudmig_log(DEBUG_LVL, "[Deleting Source]\t Deleting file '%s'...\n", filename); dplret = dpl_delete(ctx->src_ctx, bucket, filename, NULL); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not delete the file %s" " from the bucket %s : %s", __FUNCTION__, filename, bucket, dpl_status_str(dplret)); } }
static int status_retrieve_associated_buckets(struct cloudmig_ctx* ctx, size_t fsize) { int ret = EXIT_FAILURE; dpl_status_t dplret; dpl_dict_t *metadata = NULL; struct cldmig_state_entry *entry = NULL; char *buffer = NULL; unsigned int buflen = 0; cloudmig_log(INFO_LVL, "[Loading Status]: Retrieving source/destination" " buckets associations...\n"); if (ctx->status.general.buf == NULL) { PRINTERR("%s: Could not allocate memory for migration status buffer.\n", __FUNCTION__); goto end; } dplret = dpl_fget(ctx->src_ctx, ".cloudmig", NULL /*opt*/, NULL/*cond*/, NULL/*range*/, &buffer, &buflen, &metadata, NULL/*sysmd*/); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not read the general migration status file: %s.\n", __FUNCTION__, dpl_status_str(dplret)); goto end; } ctx->status.general.buf = buffer; ctx->status.general.size = fsize; buffer= NULL; /* * Now that we mapped the status file, * Let's read it and associate bucket status files with * the destination buckets. */ // Switch from big endian 64 to host endian ctx->status.general.head.total_sz = be64toh(((struct cldmig_state_header*)ctx->status.general.buf)->total_sz); ctx->status.general.head.done_sz = be64toh(((struct cldmig_state_header*)ctx->status.general.buf)->done_sz); ctx->status.general.head.nb_objects = be64toh(((struct cldmig_state_header*)ctx->status.general.buf)->nb_objects); ctx->status.general.head.done_objects = be64toh(((struct cldmig_state_header*)ctx->status.general.buf)->done_objects); // Now map the matching buckets for (entry = (void*)ctx->status.general.buf + sizeof(struct cldmig_state_header); (long int)entry < (long int)(ctx->status.general.buf + ctx->status.general.size); entry = (void*)((char*)(entry) + sizeof(*entry) + ntohl(entry->file) + ntohl(entry->bucket))) { cloudmig_log(DEBUG_LVL, "[Loading Status]: searching match for status file %.*s.\n", ntohl(entry->file), (char*)(entry+1)); // Match the current entry with the right bucket. for (int i=0; i < ctx->status.n_buckets; ++i) { // Is it the right one ? if (!strcmp((char*)(entry+1), ctx->status.buckets[i].filename)) { // copy the destination bucket name ctx->status.buckets[i].dest_bucket = strdup((char*)entry + sizeof(*entry) + ntohl(entry->file)); if (ctx->status.buckets[i].dest_bucket == NULL) { PRINTERR("%s: Could not allocate memory while" " loading status...\n", __FUNCTION__); goto end; } cloudmig_log(DEBUG_LVL, "[Loading Status]: matched status file %s to dest bucket %s.\n", ctx->status.buckets[i].filename, ctx->status.buckets[i].dest_bucket); break ; } } } ret = EXIT_SUCCESS; cloudmig_log(INFO_LVL, "[Loading Status]: Source/Destination" " buckets associations done.\n"); end: if (buffer) free(buffer); if (metadata) dpl_dict_free(metadata); return ret; }
int status_retrieve_states(struct cloudmig_ctx* ctx) { assert(ctx != NULL); assert(ctx->status.bucket_name != NULL); assert(ctx->status.buckets == NULL); dpl_status_t dplret = DPL_SUCCESS; int ret = EXIT_FAILURE; dpl_vec_t *objects; size_t migstatus_size = 0; ctx->src_ctx->cur_bucket = ctx->status.bucket_name; cloudmig_log(INFO_LVL, "[Loading Status]: Retrieving status...\n"); // Retrieve the list of files for the buckets states dplret = dpl_list_bucket(ctx->src_ctx, ctx->status.bucket_name, NULL, NULL, -1, &objects, NULL); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not list status bucket's files: %s\n", __FUNCTION__, ctx->status.bucket_name, dpl_status_str(dplret)); goto err; } // Allocate enough room for each bucket. ctx->status.n_buckets = objects->n_items; ctx->status.cur_state = 0; // -1 cause we dont want to allocate an entry for ".cloudmig" ctx->status.buckets = calloc(objects->n_items, sizeof(*(ctx->status.buckets))); if (ctx->status.buckets == NULL) { PRINTERR("%s: Could not allocate state data for each bucket: %s\n", __FUNCTION__, strerror(errno)); goto err; } // Now fill each one of these structures int i_bucket = 0; for (int i=0; i < objects->n_items; ++i, ++i_bucket) { dpl_object_t* obj = (dpl_object_t*)(objects->items[i]->ptr); if (strcmp(".cloudmig", obj->path) == 0) { // save the file size migstatus_size = obj->size; // fix the n_buckets of the status ctx --ctx->status.n_buckets; // Now get to next entry without advancing in the buckets. ++i; if (i >= objects->n_items) break ; } ctx->status.buckets[i_bucket].filename = strdup(obj->path); if (ctx->status.buckets[i_bucket].filename == NULL) { PRINTERR("%s: Could not allocate state data for each bucket: %s\n", __FUNCTION__, strerror(errno)); goto err; } ctx->status.buckets[i_bucket].size = obj->size; ctx->status.buckets[i_bucket].next_entry_off = 0; // The buffer will be read/allocated when needed. // Otherwise, it may use up too much memory ctx->status.buckets[i_bucket].buf = NULL; } if (status_retrieve_associated_buckets(ctx, migstatus_size) == EXIT_FAILURE) { PRINTERR("%s: Could not associate status files to dest buckets.\n", __FUNCTION__); goto err; } ret = EXIT_SUCCESS; cloudmig_log(INFO_LVL, "[Loading Status]: Status data retrieved.\n"); err: if (ret == EXIT_FAILURE && ctx->status.buckets != NULL) { for (int i=0; i < ctx->status.n_buckets; ++i) { if (ctx->status.buckets[i].filename) free(ctx->status.buckets[i].filename); } free(ctx->status.buckets); ctx->status.buckets = NULL; } if (objects != NULL) dpl_vec_objects_free(objects); ctx->src_ctx->cur_bucket = NULL; return ret; }