int main(int argc, char** args) { if (3 > argc){ usage(); return 1; } const char* bucketName = args[1]; const char* key = args[2]; S3Protocol protocolG = S3ProtocolHTTP; const char* accessKeyIdG = getenv("S3_ACCESS_KEY_ID"); const char* secretAccessKeyG = getenv("S3_SECRET_ACCESS_KEY"); S3Status status; const char* hostname = getenv("S3_HOSTNAME"); if ((status = S3_initialize("s3", S3_INIT_ALL, hostname)) != S3StatusOK) { fprintf(stdout, "Failed to initialize libs3: %s\n", S3_get_status_name(status)); exit(-1); } S3BucketContext bucketContext = { 0, bucketName, protocolG, S3UriStylePath, accessKeyIdG, secretAccessKeyG, 0 }; S3ResponseHandler responseHandler = { &responsePropertiesCallback, &responseCompleteCallback }; //----------------------check whether object is exist-----------------// S3_head_object(&bucketContext, key, 0, &responseHandler, 0); S3_deinitialize(); return 0; }
static int ct_archive_data(struct hsm_copyaction_private *hcp, const char *src, const char *dst, int src_fd, const struct hsm_action_item *hai, long hal_flags) { struct hsm_extent he; __u64 file_offset = hai->hai_extent.offset; struct stat src_st; char *uncompress_buf = NULL; char *compress_buf = NULL; __u64 write_total = 0; __u64 length = hai->hai_extent.length; time_t last_report_time; int rc = 0; double start_ct_now = ct_now(); time_t now; int compression_bound = LZ4_compressBound(chunk_size); // Archiving a file from Lustre to the object store CT_TRACE("Archiving %s to %s", src, dst); if (fstat(src_fd, &src_st) < 0) { rc = -errno; CT_ERROR(rc, "cannot stat '%s'", src); return rc; } if (!S_ISREG(src_st.st_mode)) { rc = -EINVAL; CT_ERROR(rc, "'%s' is not a regular file", src); return rc; } if (hai->hai_extent.offset > (__u64)src_st.st_size) { rc = -EINVAL; CT_ERROR(rc, "Trying to start reading past end ("LPU64" > " "%jd) of '%s' source file", hai->hai_extent.offset, (intmax_t)src_st.st_size, src); return rc; } strippingInfo stripping_params; stripping_params.lmm_stripe_count = 1; stripping_params.lmm_stripe_size = ONE_MB; if (ct_save_stripe(src_fd, src, &stripping_params)) { return -1; } /* Don't read beyond a given extent */ if (length > src_st.st_size - hai->hai_extent.offset) length = src_st.st_size - hai->hai_extent.offset; last_report_time = time(NULL); he.offset = file_offset; he.length = 0; rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } errno = 0; uncompress_buf = malloc(chunk_size); if (uncompress_buf == NULL) { rc = -ENOMEM; goto out; } compress_buf = malloc(compression_bound); if (compress_buf == NULL) { rc = -ENOMEM; goto out; } int chunk_id = -1; const char totalLength[] = "totallength"; const char chunksize[] = "chunksize"; const char stripe_size[] = "stripesize"; const char stripe_count[] = "stripecount"; const char path[] = "path"; const char uid[] = "uid"; const char gid[] = "gid"; char totalLength_s[TOTALLENGTH]; char chunksize_s[TOTALLENGTH]; char stripe_size_s[TOTALLENGTH]; char stripe_count_s[TOTALLENGTH]; char path_s[PATH_MAX]; char uid_s[TOTALLENGTH]; char gid_s[TOTALLENGTH]; snprintf(totalLength_s, sizeof(totalLength_s), "%llu", length); snprintf(chunksize_s, sizeof(chunksize_s), "%i", chunk_size); snprintf(stripe_size_s, sizeof(stripe_size_s), "%i", stripping_params.lmm_stripe_size); snprintf(stripe_count_s, sizeof(stripe_count_s), "%i", stripping_params.lmm_stripe_count); snprintf(path_s, sizeof(path_s), "%s", src); // FIXME should use fid2path to get the normal path snprintf(uid_s, sizeof(uid_s), "%i", src_st.st_uid); snprintf(gid_s, sizeof(gid_s), "%i", src_st.st_gid); // Saving some metadata for disaster recovery S3NameValue metadata[7] = { { totalLength, totalLength_s, }, { chunksize, chunksize_s, }, { stripe_size, stripe_size_s, }, { stripe_count, stripe_count_s, }, { path, path_s }, { uid, uid_s }, { gid, gid_s } }; S3PutProperties putProperties = { // application/x-lz4 does not officially exist "application/x-lz4", // contentType NULL, // md5 NULL, // cacheControl NULL, // contentDispositionFilename NULL, // contentEncoding -1, // expires 0, // cannedAcl sizeof(metadata) / sizeof(S3NameValue), // metaDataCount metadata, // S3NameValue *metaData 0, // useServerSideEncryption }; do { // Uploading to object store if (chunk_id == -1) { CT_TRACE("start copy of "LPU64" bytes from '%s' to '%s'", length, src, dst); } // size of the current chunk, limited by chunk_size long long unsigned int chunk; if (length - write_total > chunk_size) { // upper bound is the chunk_size chunk = chunk_size; } else { // limited by the file chunk = length - write_total; } chunk_id = file_offset / chunk_size; put_object_callback_data data; data.buffer_offset = 0; double before_lustre_read = ct_now(); pread(src_fd, uncompress_buf, chunk, file_offset); CT_TRACE("Reading a chunk from %s of %llu bytes offset %llu from lustre took %fs", src, chunk, file_offset, ct_now() - before_lustre_read); double before_compression = ct_now(); int compressed_size = LZ4_compress_default(uncompress_buf, compress_buf, chunk, compression_bound); CT_TRACE("Compressing a chunk from %s took %fs and the compressed size is %i bytes", src, ct_now() - before_compression, compressed_size); if (compressed_size <= 0) { rc = -1; CT_ERROR(rc, "Compression error"); goto out; } data.contentLength = compressed_size; data.buffer = compress_buf; S3PutObjectHandler putObjectHandler = { putResponseHandler, &putObjectDataCallback }; char dst_chunk_s[S3_MAX_KEY_SIZE]; snprintf(dst_chunk_s, sizeof(dst_chunk_s), "%s.%i", dst, chunk_id); char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_chunk_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; double before_s3_put = ct_now(); int retry_count = RETRYCOUNT; do { S3_put_object(&localbucketContext, dst_chunk_s, compressed_size, &putProperties, NULL, &putObjectHandler, &data); } while (S3_status_is_retryable(data.status) && should_retry(&retry_count)); CT_TRACE("S3 put of %s took %fs", dst_chunk_s, ct_now() - before_s3_put); if (data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status)); goto out; } he.offset = file_offset; he.length = chunk; now = time(NULL); if (now >= last_report_time + ct_opt.o_report_int) { last_report_time = now; CT_TRACE("sending progress report for archiving %s", src); rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } } write_total += chunk; file_offset += chunk; } while (file_offset < length); rc = 0; // We need to delete every chunk of higher chunk_id if they // exists, this can happen if the new file is smaller // TODO only delete objects if this is a dirty write chunk_id += 1; do { char dst_s[S3_MAX_KEY_SIZE]; int retry_count; snprintf(dst_s, sizeof(dst_s), "%s.%i", dst, chunk_id); get_object_callback_data head_data; get_object_callback_data delete_data; char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; CT_TRACE("Checking if chunk %i exists", chunk_id); retry_count = RETRYCOUNT; do { S3_head_object(&localbucketContext, dst_s, NULL, &headResponseHandler, &head_data); } while (S3_status_is_retryable(head_data.status) && should_retry(&retry_count)); if (head_data.status == S3StatusHttpErrorNotFound) { // Object do not exist, this mean we stop deleting chunks CT_TRACE("Chunk %i do not exists", chunk_id); break; } if (head_data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(head_data.status)); goto out; } CT_TRACE("Deleting chunk %i", chunk_id); retry_count = RETRYCOUNT; do { S3_delete_object(&localbucketContext, dst_s, NULL, &deleteResponseHandler, &delete_data); } while (S3_status_is_retryable(delete_data.status) && should_retry(&retry_count)); if (delete_data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(delete_data.status)); goto out; } chunk_id++; } while (true); out: if (uncompress_buf != NULL) free(uncompress_buf); if (compress_buf != NULL) free(compress_buf); CT_TRACE("copied "LPU64" bytes in %f seconds", length, ct_now() - start_ct_now); return rc; }
static int ct_restore_data(struct hsm_copyaction_private *hcp, const char *src, const char *dst, int dst_fd, const struct hsm_action_item *hai, long hal_flags) { struct hsm_extent he; __u64 file_offset = hai->hai_extent.offset; struct stat dst_st; __u64 write_total = 0; __u64 length = hai->hai_extent.length; time_t last_report_time; time_t now; int rc = 0; double start_ct_now = ct_now(); // Restore a file from the object store back to Lustre CT_TRACE("Restoring %s to %s", src, dst); if (fstat(dst_fd, &dst_st) < 0) { rc = -errno; CT_ERROR(rc, "cannot stat '%s'", dst); return rc; } if (!S_ISREG(dst_st.st_mode)) { rc = -EINVAL; CT_ERROR(rc, "'%s' is not a regular file", dst); return rc; } he.offset = file_offset; he.length = 0; rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } errno = 0; last_report_time = time(NULL); long long int object_chunk_size = chunk_size; // will be assigned the correct value based on the metadata do { // Downloading from the object store char src_chunk_s[S3_MAX_KEY_SIZE]; S3GetObjectHandler getObjectHandler = { getResponseHandler, &getObjectDataCallback }; if (length == -1) { // Discover length and chunk size from the first object's metadata snprintf(src_chunk_s, sizeof(src_chunk_s), "%s.0", src); if (file_offset == 0) { // Download data and metadata from the first chunk get_object_callback_data data; rc = get_s3_object(src_chunk_s, &data, &getObjectHandler); if(rc < 0){ goto out; } length = data.totalLength; object_chunk_size = data.chunk_size; char *uncompress_buf = NULL; uncompress_buf = malloc(object_chunk_size); if (uncompress_buf == NULL) { rc = -ENOMEM; goto out; } double before_decompression = ct_now(); int decompressed_size = LZ4_decompress_safe(data.buffer, uncompress_buf, data.contentLength, object_chunk_size); if (decompressed_size < 0) { rc = -1; CT_ERROR(rc, "Decompression error"); goto out; } CT_TRACE("Decompressing a chunk from %s of %llu bytes took %fs and the uncompressed size is %i bytes", src, data.contentLength, ct_now() - before_decompression, decompressed_size); double before_lustre_write = ct_now(); pwrite(dst_fd, uncompress_buf, decompressed_size, file_offset); CT_TRACE("Writing a chunk from %s of %llu bytes offset %llu to lustre took %fs", src_chunk_s, object_chunk_size, file_offset, ct_now() - before_lustre_write); if (uncompress_buf != NULL) free(uncompress_buf); if (data.buffer != NULL) free(data.buffer); write_total = decompressed_size; file_offset += decompressed_size; he.offset = file_offset; he.length = data.contentLength; rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } if (write_total == length) { // Completed the full write with the first object rc = 0; break; } } else { // Only make a head request to get the metadata of the first object get_object_callback_data data; char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, src_chunk_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; int retry_count = RETRYCOUNT; do { S3_head_object(&localbucketContext, src_chunk_s, NULL, &headResponseHandler, &data); } while (S3_status_is_retryable(data.status) && should_retry(&retry_count)); if (data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status)); goto out; } object_chunk_size = data.chunk_size; length = data.totalLength; } } else { snprintf(src_chunk_s, sizeof(src_chunk_s), "%s.%llu", src, file_offset / object_chunk_size); long long unsigned int chunk; if (length - write_total > object_chunk_size) { // upper bound is the chunk_size chunk = object_chunk_size; } else { // limited by the file chunk = length - write_total; } get_object_callback_data data; rc = get_s3_object(src_chunk_s, &data, &getObjectHandler); if(rc < 0){ goto out; } char *uncompress_buf = NULL; uncompress_buf = malloc(object_chunk_size); if (uncompress_buf == NULL) { rc = -ENOMEM; goto out; } double before_decompression = ct_now(); int decompressed_size = LZ4_decompress_safe(data.buffer, uncompress_buf, data.contentLength, object_chunk_size); if (decompressed_size < 0) { rc = -1; CT_ERROR(rc, "Decompression error"); goto out; } CT_TRACE("Decompressing a chunk from %s of %llu bytes took %fs and the uncompressed size is %i", src, data.contentLength, ct_now() - before_decompression, decompressed_size); double before_lustre_write = ct_now(); pwrite(dst_fd, uncompress_buf, decompressed_size, file_offset); CT_TRACE("Writing a chunk from %s of %llu bytes offset %llu to lustre took %fs", src_chunk_s, chunk, file_offset, ct_now() - before_lustre_write); if (uncompress_buf != NULL) free(uncompress_buf); if (data.buffer != NULL) free(data.buffer); now = time(NULL); if (now >= last_report_time + ct_opt.o_report_int) { last_report_time = now; CT_TRACE("sending progress report for restoring %s", src); rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } } write_total += decompressed_size; file_offset += decompressed_size; } rc = 0; } while (file_offset < length); if (hai->hai_action == HSMA_RESTORE) { /* * truncate restored file * size is taken from the archive this is done to support * restore after a force release which leaves the file with the * wrong size (can big bigger than the new size) * make sure the file is on disk before reporting success. */ rc = ftruncate(dst_fd, length); if (rc < 0) { rc = -errno; CT_ERROR(rc, "cannot truncate '%s' to size %llu", dst, length); err_major++; } } out: CT_TRACE("copied "LPU64" bytes in %f seconds", length, ct_now() - start_ct_now); return rc; }
int ct_remove(const struct hsm_action_item *hai, const long hal_flags) { struct hsm_copyaction_private *hcp = NULL; char dst[PATH_MAX]; int rc; int retry_count; char dst_s[S3_MAX_KEY_SIZE]; rc = ct_begin(&hcp, hai); if (rc < 0) goto end_ct_remove; ct_path_archive(dst, sizeof(dst), &hai->hai_fid); CT_TRACE("removing file '%s'", dst); if (ct_opt.o_dry_run) { rc = 0; goto end_ct_remove; } // Get the metadata from the first object to get the number of chunks get_object_callback_data data; snprintf(dst_s, sizeof(dst_s), "%s.0", dst); char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; retry_count = RETRYCOUNT; do { S3_head_object(&localbucketContext, dst_s, NULL, &headResponseHandler, &data); } while (S3_status_is_retryable(data.status) && should_retry(&retry_count)); if (data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status)); goto end_ct_remove; } int chunk; for (chunk = data.totalLength / data.chunk_size; chunk >= 0; chunk--) { snprintf(dst_s, sizeof(dst_s), "%s.%i", dst, chunk); get_object_callback_data delete_data; CT_TRACE("Deleting chunk '%s'", dst_s); char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; retry_count = RETRYCOUNT; do { S3_delete_object(&localbucketContext, dst_s, NULL, &deleteResponseHandler, &delete_data); } while (S3_status_is_retryable(delete_data.status) && should_retry(&retry_count)); if (delete_data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(delete_data.status)); goto end_ct_remove; } } rc = 0; end_ct_remove: rc = ct_action_done(&hcp, hai, 0, rc); return rc; }