void cloud_print_error() { if (statusG < S3StatusErrorAccessDenied) { fprintf(stderr, "Return status: %s\n", S3_get_status_name(statusG)); } else { fprintf(stderr, "Return status: %s\n", S3_get_status_name(statusG)); fprintf(stderr, "%s\n", errorDetailsG); } }
static void printError() { if (statusG < S3StatusErrorAccessDenied) { fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(statusG)); } else { fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(statusG)); fprintf(stderr, "%s\n", errorDetailsG); } }
int myS3Error (int status, int irodsErrorCode) { if (status < 0) return status; rodsLogError (LOG_ERROR, irodsErrorCode, "myS3Error: error:%s", S3_get_status_name((S3Status) status)); return (irodsErrorCode - status); }
static void AbortMultipartUploadCompleteCallback (S3Status requestStatus, const S3ErrorDetails *s3ErrorDetails, void *callbackData) { (void) callbackData; (void) s3ErrorDetails; fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(requestStatus)); }
static int get_s3_object(char *objectName, get_object_callback_data *data, S3GetObjectHandler *getObjectHandler){ assert(objectName && data && getObjectHandler); memset(data, 0, sizeof(get_object_callback_data)); char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, objectName); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; data->buffer_offset = 0; data->buffer = NULL; double before_s3_get = ct_now(); int retry_count = RETRYCOUNT; do { S3_get_object(&localbucketContext, objectName, NULL, 0, 0, NULL, getObjectHandler, data); } while (S3_status_is_retryable(data->status) && should_retry(&retry_count)); CT_TRACE("S3 get of %s took %fs", objectName, ct_now() - before_s3_get); if (data->buffer == NULL) { return -ENOMEM; } if (data->status != S3StatusOK) { CT_ERROR(-EIO, "S3Error %s", S3_get_status_name(data->status)); return -EIO; } double before_checksum = ct_now(); unsigned char md5[MD5_DIGEST_LENGTH]; char md5_s[MD5_ASCII]; MD5_CTX mdContext; MD5_Init (&mdContext); MD5_Update (&mdContext, data->buffer, data->contentLength); MD5_Final (md5, &mdContext); int i; for(i = 0; i < MD5_DIGEST_LENGTH; i++){ sprintf(&md5_s[i*2], "%02x", md5[i]); } if(strcmp(md5_s, data->md5) != 0){ CT_ERROR(-EIO, "Bad MD5 checksum for %s, computed %s, expected %s", objectName, md5_s, data->md5); return -EIO; } CT_TRACE("Checksum of %s took %fs", objectName, ct_now() - before_checksum); return 0; }
int main(int argc, char** argv) { if (2 > argc){ usage(); return 1; } char *slash = argv[1]; while (*slash && (*slash != '/')) { slash++; } *slash++ = 0; const char *bucketName = argv[1]; const char *key = slash; S3Protocol protocolG = S3ProtocolHTTP; const char* accessKeyIdG = getenv("S3_ACCESS_KEY_ID"); const char* secretAccessKeyG = getenv("S3_SECRET_ACCESS_KEY"); S3Status status; const char* hostname = getenv("S3_HOSTNAME"); if ((status = S3_initialize("s3", S3_INIT_ALL, hostname)) != S3StatusOK) { fprintf(stdout, "Failed to initialize libs3: %s\n", S3_get_status_name(status)); exit(-1); } S3BucketContext bucketContext = { 0, bucketName, protocolG, S3UriStylePath, accessKeyIdG, secretAccessKeyG, 0 }; S3ResponseHandler responseHandler = { 0, &responseCompleteCallback }; //----------------------create bucket-----------------// S3_delete_object(&bucketContext, key, 0, &responseHandler, 0); S3_deinitialize(); return 0; }
static void S3_init() { S3Status status; const char *hostname = getenv("S3_HOSTNAME"); if ((status = S3_initialize("s3", S3_INIT_ALL, hostname)) != S3StatusOK) { fprintf(stderr, "Failed to initialize libs3: %s\n", S3_get_status_name(status)); exit(-1); } }
static void s3_init(){ S3Status status; char host[STR] = ""; get_host(host); if ((status = S3_initialize("s3", S3_INIT_ALL, host)) != S3StatusOK) { fprintf(stderr, "Failed to initialize libs3: %s\n", S3_get_status_name(status)); exit(-1); } }
static void responseCompleteCallback(S3Status status, const S3ErrorDetails *error, void *callbackData) { const char* statusName = S3_get_status_name(status); if (!strcmp(statusName,"OK")){ printf("Response status code is : %s\n", statusName); printf("This object is exist.\n"); } else{ printf("Response status code is : %s\n", statusName); printf("Don't found this object.\n"); } }
int main(int argc, char** args) { if (3 > argc){ usage(); return 1; } const char* bucketName = args[1]; const char* key = args[2]; S3Protocol protocolG = S3ProtocolHTTP; const char* accessKeyIdG = getenv("S3_ACCESS_KEY_ID"); const char* secretAccessKeyG = getenv("S3_SECRET_ACCESS_KEY"); S3Status status; const char* hostname = getenv("S3_HOSTNAME"); if ((status = S3_initialize("s3", S3_INIT_ALL, hostname)) != S3StatusOK) { fprintf(stdout, "Failed to initialize libs3: %s\n", S3_get_status_name(status)); exit(-1); } S3BucketContext bucketContext = { 0, bucketName, protocolG, S3UriStylePath, accessKeyIdG, secretAccessKeyG, 0 }; S3ResponseHandler responseHandler = { &responsePropertiesCallback, &responseCompleteCallback }; //----------------------check whether object is exist-----------------// S3_head_object(&bucketContext, key, 0, &responseHandler, 0); S3_deinitialize(); return 0; }
static void print_error_details(){ fprintf(stderr, "\nERROR: %s\n", S3_get_status_name(RequestStatus)); fprintf(stderr, "%s\n", RequestErrDetails); }
int putFileIntoS3(char *fileName, char *s3ObjName) { S3Status status; char *key; struct stat statBuf; uint64_t fileSize; FILE *fd; char *accessKeyId; char *secretAccessKey; put_object_callback_data data; accessKeyId = getenv("S3_ACCESS_KEY_ID"); if (accessKeyId == NULL) { printf("S3_ACCESS_KEY_ID environment variable is undefined"); return(-1); } secretAccessKey = getenv("S3_SECRET_ACCESS_KEY"); if (secretAccessKey == NULL) { printf("S3_SECRET_ACCESS_KEY environment variable is undefined"); return(-1); } key = (char *) strchr(s3ObjName, '/'); if (key == NULL) { printf("S3 Key for the Object Not defined\n"); return(-1); } *key = '\0'; key++; if (stat(fileName, &statBuf) == -1) { printf("Unknown input file"); return(-1); } fileSize = statBuf.st_size; fd = fopen(fileName, "r" ); if (fd == NULL) { printf("Unable to open input file"); return(-1); } data.infile = fd; S3BucketContext bucketContext = {s3ObjName, 1, 0, accessKeyId, secretAccessKey}; S3PutObjectHandler putObjectHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &putObjectDataCallback }; if ((status = S3_initialize("s3", S3_INIT_ALL)) != S3StatusOK) { printf("Failed to initialize libs3: %s\n",S3_get_status_name(status)); return(-1); } S3_put_object(&bucketContext, key, fileSize, NULL, 0, &putObjectHandler, &data); if (statusG != S3StatusOK) { printf("Put failed: %i\n", statusG); S3_deinitialize(); return(-1); } S3_deinitialize(); fclose(fd); return(0); }
int main(int argc, char** argv) { if (3 > argc){ usage(); return 1; } char *slash = argv[1]; while (*slash && (*slash != '/')) { slash++; } if (!*slash || !*(slash + 1)) { fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n", argv[1]); usage(); exit(-1); } *slash++ = 0; const char* bucketName = argv[1]; const char *key = slash; const char *uploadId = 0; const char *filename = argv[2]; uint64_t contentLength = 0; const char *cacheControl = 0, *contentType = 0, *md5 = 0; const char *contentDispositionFilename = 0, *contentEncoding = 0; int64_t expires = -1; S3CannedAcl cannedAcl = S3CannedAclPrivate; int metaPropertiesCount = 0; S3NameValue metaProperties[S3_MAX_METADATA_COUNT]; char useServerSideEncryption = 0; int noStatus = 0; put_object_callback_data data; data.infile = 0; data.gb = 0; data.noStatus = noStatus; if (filename) { if (!contentLength) { struct stat statbuf; // Stat the file to get its length if (stat(filename, &statbuf) == -1) { fprintf(stderr, "\nERROR: Failed to stat file %s: ", filename); perror(0); exit(-1); } contentLength = statbuf.st_size; } // Open the file if (!(data.infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) { fprintf(stderr, "\nERROR: Failed to open input file %s: ", filename); perror(0); exit(-1); } } else{ usage(); } data.contentLength = data.originalContentLength = contentLength; S3Protocol protocolG = S3ProtocolHTTP; const char* accessKeyIdG = getenv("S3_ACCESS_KEY_ID"); const char* secretAccessKeyG = getenv("S3_SECRET_ACCESS_KEY"); S3Status status; const char* hostname = getenv("S3_HOSTNAME"); if ((status = S3_initialize("s3", S3_INIT_ALL, hostname)) != S3StatusOK) { fprintf(stdout, "Failed to initialize libs3: %s\n", S3_get_status_name(status)); exit(-1); } S3BucketContext bucketContext = { 0, bucketName, protocolG, S3UriStylePath, accessKeyIdG, secretAccessKeyG, 0 }; S3PutProperties putProperties = { contentType, md5, cacheControl, contentDispositionFilename, contentEncoding, expires, cannedAcl, metaPropertiesCount, metaProperties, useServerSideEncryption }; S3PutObjectHandler putObjectHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &putObjectDataCallback }; //----------------------create bucket-----------------// S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, &putObjectHandler, &data); S3_deinitialize(); return 0; }
static int ct_restore_data(struct hsm_copyaction_private *hcp, const char *src, const char *dst, int dst_fd, const struct hsm_action_item *hai, long hal_flags) { struct hsm_extent he; __u64 file_offset = hai->hai_extent.offset; struct stat dst_st; __u64 write_total = 0; __u64 length = hai->hai_extent.length; time_t last_report_time; time_t now; int rc = 0; double start_ct_now = ct_now(); // Restore a file from the object store back to Lustre CT_TRACE("Restoring %s to %s", src, dst); if (fstat(dst_fd, &dst_st) < 0) { rc = -errno; CT_ERROR(rc, "cannot stat '%s'", dst); return rc; } if (!S_ISREG(dst_st.st_mode)) { rc = -EINVAL; CT_ERROR(rc, "'%s' is not a regular file", dst); return rc; } he.offset = file_offset; he.length = 0; rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } errno = 0; last_report_time = time(NULL); long long int object_chunk_size = chunk_size; // will be assigned the correct value based on the metadata do { // Downloading from the object store char src_chunk_s[S3_MAX_KEY_SIZE]; S3GetObjectHandler getObjectHandler = { getResponseHandler, &getObjectDataCallback }; if (length == -1) { // Discover length and chunk size from the first object's metadata snprintf(src_chunk_s, sizeof(src_chunk_s), "%s.0", src); if (file_offset == 0) { // Download data and metadata from the first chunk get_object_callback_data data; rc = get_s3_object(src_chunk_s, &data, &getObjectHandler); if(rc < 0){ goto out; } length = data.totalLength; object_chunk_size = data.chunk_size; char *uncompress_buf = NULL; uncompress_buf = malloc(object_chunk_size); if (uncompress_buf == NULL) { rc = -ENOMEM; goto out; } double before_decompression = ct_now(); int decompressed_size = LZ4_decompress_safe(data.buffer, uncompress_buf, data.contentLength, object_chunk_size); if (decompressed_size < 0) { rc = -1; CT_ERROR(rc, "Decompression error"); goto out; } CT_TRACE("Decompressing a chunk from %s of %llu bytes took %fs and the uncompressed size is %i bytes", src, data.contentLength, ct_now() - before_decompression, decompressed_size); double before_lustre_write = ct_now(); pwrite(dst_fd, uncompress_buf, decompressed_size, file_offset); CT_TRACE("Writing a chunk from %s of %llu bytes offset %llu to lustre took %fs", src_chunk_s, object_chunk_size, file_offset, ct_now() - before_lustre_write); if (uncompress_buf != NULL) free(uncompress_buf); if (data.buffer != NULL) free(data.buffer); write_total = decompressed_size; file_offset += decompressed_size; he.offset = file_offset; he.length = data.contentLength; rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } if (write_total == length) { // Completed the full write with the first object rc = 0; break; } } else { // Only make a head request to get the metadata of the first object get_object_callback_data data; char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, src_chunk_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; int retry_count = RETRYCOUNT; do { S3_head_object(&localbucketContext, src_chunk_s, NULL, &headResponseHandler, &data); } while (S3_status_is_retryable(data.status) && should_retry(&retry_count)); if (data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status)); goto out; } object_chunk_size = data.chunk_size; length = data.totalLength; } } else { snprintf(src_chunk_s, sizeof(src_chunk_s), "%s.%llu", src, file_offset / object_chunk_size); long long unsigned int chunk; if (length - write_total > object_chunk_size) { // upper bound is the chunk_size chunk = object_chunk_size; } else { // limited by the file chunk = length - write_total; } get_object_callback_data data; rc = get_s3_object(src_chunk_s, &data, &getObjectHandler); if(rc < 0){ goto out; } char *uncompress_buf = NULL; uncompress_buf = malloc(object_chunk_size); if (uncompress_buf == NULL) { rc = -ENOMEM; goto out; } double before_decompression = ct_now(); int decompressed_size = LZ4_decompress_safe(data.buffer, uncompress_buf, data.contentLength, object_chunk_size); if (decompressed_size < 0) { rc = -1; CT_ERROR(rc, "Decompression error"); goto out; } CT_TRACE("Decompressing a chunk from %s of %llu bytes took %fs and the uncompressed size is %i", src, data.contentLength, ct_now() - before_decompression, decompressed_size); double before_lustre_write = ct_now(); pwrite(dst_fd, uncompress_buf, decompressed_size, file_offset); CT_TRACE("Writing a chunk from %s of %llu bytes offset %llu to lustre took %fs", src_chunk_s, chunk, file_offset, ct_now() - before_lustre_write); if (uncompress_buf != NULL) free(uncompress_buf); if (data.buffer != NULL) free(data.buffer); now = time(NULL); if (now >= last_report_time + ct_opt.o_report_int) { last_report_time = now; CT_TRACE("sending progress report for restoring %s", src); rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } } write_total += decompressed_size; file_offset += decompressed_size; } rc = 0; } while (file_offset < length); if (hai->hai_action == HSMA_RESTORE) { /* * truncate restored file * size is taken from the archive this is done to support * restore after a force release which leaves the file with the * wrong size (can big bigger than the new size) * make sure the file is on disk before reporting success. */ rc = ftruncate(dst_fd, length); if (rc < 0) { rc = -errno; CT_ERROR(rc, "cannot truncate '%s' to size %llu", dst, length); err_major++; } } out: CT_TRACE("copied "LPU64" bytes in %f seconds", length, ct_now() - start_ct_now); return rc; }
static int ct_archive_data(struct hsm_copyaction_private *hcp, const char *src, const char *dst, int src_fd, const struct hsm_action_item *hai, long hal_flags) { struct hsm_extent he; __u64 file_offset = hai->hai_extent.offset; struct stat src_st; char *uncompress_buf = NULL; char *compress_buf = NULL; __u64 write_total = 0; __u64 length = hai->hai_extent.length; time_t last_report_time; int rc = 0; double start_ct_now = ct_now(); time_t now; int compression_bound = LZ4_compressBound(chunk_size); // Archiving a file from Lustre to the object store CT_TRACE("Archiving %s to %s", src, dst); if (fstat(src_fd, &src_st) < 0) { rc = -errno; CT_ERROR(rc, "cannot stat '%s'", src); return rc; } if (!S_ISREG(src_st.st_mode)) { rc = -EINVAL; CT_ERROR(rc, "'%s' is not a regular file", src); return rc; } if (hai->hai_extent.offset > (__u64)src_st.st_size) { rc = -EINVAL; CT_ERROR(rc, "Trying to start reading past end ("LPU64" > " "%jd) of '%s' source file", hai->hai_extent.offset, (intmax_t)src_st.st_size, src); return rc; } strippingInfo stripping_params; stripping_params.lmm_stripe_count = 1; stripping_params.lmm_stripe_size = ONE_MB; if (ct_save_stripe(src_fd, src, &stripping_params)) { return -1; } /* Don't read beyond a given extent */ if (length > src_st.st_size - hai->hai_extent.offset) length = src_st.st_size - hai->hai_extent.offset; last_report_time = time(NULL); he.offset = file_offset; he.length = 0; rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } errno = 0; uncompress_buf = malloc(chunk_size); if (uncompress_buf == NULL) { rc = -ENOMEM; goto out; } compress_buf = malloc(compression_bound); if (compress_buf == NULL) { rc = -ENOMEM; goto out; } int chunk_id = -1; const char totalLength[] = "totallength"; const char chunksize[] = "chunksize"; const char stripe_size[] = "stripesize"; const char stripe_count[] = "stripecount"; const char path[] = "path"; const char uid[] = "uid"; const char gid[] = "gid"; char totalLength_s[TOTALLENGTH]; char chunksize_s[TOTALLENGTH]; char stripe_size_s[TOTALLENGTH]; char stripe_count_s[TOTALLENGTH]; char path_s[PATH_MAX]; char uid_s[TOTALLENGTH]; char gid_s[TOTALLENGTH]; snprintf(totalLength_s, sizeof(totalLength_s), "%llu", length); snprintf(chunksize_s, sizeof(chunksize_s), "%i", chunk_size); snprintf(stripe_size_s, sizeof(stripe_size_s), "%i", stripping_params.lmm_stripe_size); snprintf(stripe_count_s, sizeof(stripe_count_s), "%i", stripping_params.lmm_stripe_count); snprintf(path_s, sizeof(path_s), "%s", src); // FIXME should use fid2path to get the normal path snprintf(uid_s, sizeof(uid_s), "%i", src_st.st_uid); snprintf(gid_s, sizeof(gid_s), "%i", src_st.st_gid); // Saving some metadata for disaster recovery S3NameValue metadata[7] = { { totalLength, totalLength_s, }, { chunksize, chunksize_s, }, { stripe_size, stripe_size_s, }, { stripe_count, stripe_count_s, }, { path, path_s }, { uid, uid_s }, { gid, gid_s } }; S3PutProperties putProperties = { // application/x-lz4 does not officially exist "application/x-lz4", // contentType NULL, // md5 NULL, // cacheControl NULL, // contentDispositionFilename NULL, // contentEncoding -1, // expires 0, // cannedAcl sizeof(metadata) / sizeof(S3NameValue), // metaDataCount metadata, // S3NameValue *metaData 0, // useServerSideEncryption }; do { // Uploading to object store if (chunk_id == -1) { CT_TRACE("start copy of "LPU64" bytes from '%s' to '%s'", length, src, dst); } // size of the current chunk, limited by chunk_size long long unsigned int chunk; if (length - write_total > chunk_size) { // upper bound is the chunk_size chunk = chunk_size; } else { // limited by the file chunk = length - write_total; } chunk_id = file_offset / chunk_size; put_object_callback_data data; data.buffer_offset = 0; double before_lustre_read = ct_now(); pread(src_fd, uncompress_buf, chunk, file_offset); CT_TRACE("Reading a chunk from %s of %llu bytes offset %llu from lustre took %fs", src, chunk, file_offset, ct_now() - before_lustre_read); double before_compression = ct_now(); int compressed_size = LZ4_compress_default(uncompress_buf, compress_buf, chunk, compression_bound); CT_TRACE("Compressing a chunk from %s took %fs and the compressed size is %i bytes", src, ct_now() - before_compression, compressed_size); if (compressed_size <= 0) { rc = -1; CT_ERROR(rc, "Compression error"); goto out; } data.contentLength = compressed_size; data.buffer = compress_buf; S3PutObjectHandler putObjectHandler = { putResponseHandler, &putObjectDataCallback }; char dst_chunk_s[S3_MAX_KEY_SIZE]; snprintf(dst_chunk_s, sizeof(dst_chunk_s), "%s.%i", dst, chunk_id); char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_chunk_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; double before_s3_put = ct_now(); int retry_count = RETRYCOUNT; do { S3_put_object(&localbucketContext, dst_chunk_s, compressed_size, &putProperties, NULL, &putObjectHandler, &data); } while (S3_status_is_retryable(data.status) && should_retry(&retry_count)); CT_TRACE("S3 put of %s took %fs", dst_chunk_s, ct_now() - before_s3_put); if (data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status)); goto out; } he.offset = file_offset; he.length = chunk; now = time(NULL); if (now >= last_report_time + ct_opt.o_report_int) { last_report_time = now; CT_TRACE("sending progress report for archiving %s", src); rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } } write_total += chunk; file_offset += chunk; } while (file_offset < length); rc = 0; // We need to delete every chunk of higher chunk_id if they // exists, this can happen if the new file is smaller // TODO only delete objects if this is a dirty write chunk_id += 1; do { char dst_s[S3_MAX_KEY_SIZE]; int retry_count; snprintf(dst_s, sizeof(dst_s), "%s.%i", dst, chunk_id); get_object_callback_data head_data; get_object_callback_data delete_data; char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; CT_TRACE("Checking if chunk %i exists", chunk_id); retry_count = RETRYCOUNT; do { S3_head_object(&localbucketContext, dst_s, NULL, &headResponseHandler, &head_data); } while (S3_status_is_retryable(head_data.status) && should_retry(&retry_count)); if (head_data.status == S3StatusHttpErrorNotFound) { // Object do not exist, this mean we stop deleting chunks CT_TRACE("Chunk %i do not exists", chunk_id); break; } if (head_data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(head_data.status)); goto out; } CT_TRACE("Deleting chunk %i", chunk_id); retry_count = RETRYCOUNT; do { S3_delete_object(&localbucketContext, dst_s, NULL, &deleteResponseHandler, &delete_data); } while (S3_status_is_retryable(delete_data.status) && should_retry(&retry_count)); if (delete_data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(delete_data.status)); goto out; } chunk_id++; } while (true); out: if (uncompress_buf != NULL) free(uncompress_buf); if (compress_buf != NULL) free(compress_buf); CT_TRACE("copied "LPU64" bytes in %f seconds", length, ct_now() - start_ct_now); return rc; }
int ct_remove(const struct hsm_action_item *hai, const long hal_flags) { struct hsm_copyaction_private *hcp = NULL; char dst[PATH_MAX]; int rc; int retry_count; char dst_s[S3_MAX_KEY_SIZE]; rc = ct_begin(&hcp, hai); if (rc < 0) goto end_ct_remove; ct_path_archive(dst, sizeof(dst), &hai->hai_fid); CT_TRACE("removing file '%s'", dst); if (ct_opt.o_dry_run) { rc = 0; goto end_ct_remove; } // Get the metadata from the first object to get the number of chunks get_object_callback_data data; snprintf(dst_s, sizeof(dst_s), "%s.0", dst); char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; retry_count = RETRYCOUNT; do { S3_head_object(&localbucketContext, dst_s, NULL, &headResponseHandler, &data); } while (S3_status_is_retryable(data.status) && should_retry(&retry_count)); if (data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status)); goto end_ct_remove; } int chunk; for (chunk = data.totalLength / data.chunk_size; chunk >= 0; chunk--) { snprintf(dst_s, sizeof(dst_s), "%s.%i", dst, chunk); get_object_callback_data delete_data; CT_TRACE("Deleting chunk '%s'", dst_s); char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; retry_count = RETRYCOUNT; do { S3_delete_object(&localbucketContext, dst_s, NULL, &deleteResponseHandler, &delete_data); } while (S3_status_is_retryable(delete_data.status) && should_retry(&retry_count)); if (delete_data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(delete_data.status)); goto end_ct_remove; } } rc = 0; end_ct_remove: rc = ct_action_done(&hcp, hai, 0, rc); return rc; }