static void s3store_task(gpointer a, gpointer s) { BlueSkyStoreAsync *async = (BlueSkyStoreAsync *)a; S3Store *store = (S3Store *)s; async->status = ASYNC_RUNNING; async->exec_time = bluesky_now_hires(); if (async->op == STORE_OP_GET) { struct get_info info; info.buf = g_string_new(""); info.success = 0; struct S3GetObjectHandler handler; handler.responseHandler.propertiesCallback = s3store_properties_callback; handler.responseHandler.completeCallback = s3store_response_callback; handler.getObjectDataCallback = s3store_get_handler; S3_get_object(&store->bucket, async->key, NULL, async->start, async->len, NULL, &handler, &info); async->range_done = TRUE; if (info.success) { async->data = bluesky_string_new_from_gstring(info.buf); async->result = 0; } else { g_string_free(info.buf, TRUE); } } else if (async->op == STORE_OP_PUT) { struct put_info info; info.success = 0; info.val = async->data; info.offset = 0; struct S3PutObjectHandler handler; handler.responseHandler.propertiesCallback = s3store_properties_callback; handler.responseHandler.completeCallback = s3store_response_callback; handler.putObjectDataCallback = s3store_put_handler; S3_put_object(&store->bucket, async->key, async->data->len, NULL, NULL, &handler, &info); if (info.success) { async->result = 0; } else { g_warning("Error completing S3 put operation; client must retry!"); } } bluesky_store_async_mark_complete(async); bluesky_store_async_unref(async); }
S3Status cloud_put_object(const char *bucketName, const char *key, uint64_t contentLength, put_filler_t filler) { S3BucketContext bucketContext = { 0, bucketName, protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG }; S3PutProperties putProperties = { NULL, NULL, NULL, NULL, NULL, -1, cannedAcl, 0, NULL }; S3PutObjectHandler putObjectHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &putObjectDataCallback }; put_object_callback_data data; data.offset = 0; data.contentLength = data.remainingLength = contentLength; data.filler = filler; data.noStatus = 0; S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, &putObjectHandler, &data); return statusG; }
int putFileIntoS3 (char *fileName, char *s3ObjName, rodsLong_t fileSize) { #if 0 S3Status status; #else int status; #endif char key[MAX_NAME_LEN], myBucket[MAX_NAME_LEN]; callback_data_t data; S3BucketContext bucketContext; bzero (&data, sizeof (data)); if ((status = parseS3Path (s3ObjName, myBucket, key)) < 0) return status; data.fd = fopen (fileName, "r"); if (data.fd == NULL) { status = UNIX_FILE_OPEN_ERR - errno; rodsLog (LOG_ERROR, "putFileIntoS3: open error for fileName %s, status = %d", fileName, status); return status; } data.contentLength = data.originalContentLength = fileSize; if ((status = myS3Init ()) != S3StatusOK) return (status); /* {myBucket, 1, 0, S3Auth.accessKeyId, S3Auth.secretAccessKey}; */ /* XXXXX using S3UriStyleVirtualHost causes operation containing * the sub-string "S3" to fail. use S3UriStylePath instead */ /* this initialization failed for 3-2.0 because of the hostName * element S3BucketContext bucketContext = {myBucket, S3ProtocolHTTPS, S3UriStylePath, S3Auth.accessKeyId, S3Auth.secretAccessKey}; */ bzero (&bucketContext, sizeof (bucketContext)); bucketContext.bucketName = myBucket; bucketContext.protocol = S3ProtocolHTTPS; bucketContext.uriStyle = S3UriStylePath; bucketContext.accessKeyId = S3Auth.accessKeyId; bucketContext.secretAccessKey = S3Auth.secretAccessKey; S3PutObjectHandler putObjectHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &putObjectDataCallback }; S3_put_object(&bucketContext, key, fileSize, NULL, 0, &putObjectHandler, &data); if (data.status != S3StatusOK) { status = myS3Error (data.status, S3_PUT_ERROR); } /* S3_deinitialize(); */ fclose (data.fd); return (status); }
ssize_t __s3fs_put_object(const char *bucketName, const char *key, const uint8_t *buf, ssize_t contentLength) { const char *cacheControl = 0, *contentType = 0, *md5 = 0; const char *contentDispositionFilename = 0, *contentEncoding = 0; int64_t expires = -1; S3CannedAcl cannedAcl = S3CannedAclPrivate; int metaPropertiesCount = 0; S3NameValue metaProperties[S3_MAX_METADATA_COUNT]; int noStatus = 0; put_object_callback_data data; memset(&data, 0, sizeof(put_object_callback_data)); data.data = buf; // data.gb = 0; data.noStatus = noStatus; data.contentLength = data.originalContentLength = contentLength; S3_init(); S3BucketContext bucketContext = { 0, bucketName, protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG }; S3PutProperties putProperties = { contentType, md5, cacheControl, contentDispositionFilename, contentEncoding, expires, cannedAcl, metaPropertiesCount, metaProperties }; S3PutObjectHandler putObjectHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &putObjectDataCallback }; do { S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, &putObjectHandler, &data); } while (S3_status_is_retryable(statusG) && should_retry()); int result = data.written; if (statusG != S3StatusOK) { printError(); result = -1; } else if (data.contentLength) { fprintf(stderr, "\nERROR: Failed to read remaining %llu bytes from " "input\n", (unsigned long long) data.contentLength); } S3_deinitialize(); return result; }
int putFileIntoS3(char *fileName, char *s3ObjName) { S3Status status; char *key; struct stat statBuf; uint64_t fileSize; FILE *fd; char *accessKeyId; char *secretAccessKey; put_object_callback_data data; accessKeyId = getenv("S3_ACCESS_KEY_ID"); if (accessKeyId == NULL) { printf("S3_ACCESS_KEY_ID environment variable is undefined"); return(-1); } secretAccessKey = getenv("S3_SECRET_ACCESS_KEY"); if (secretAccessKey == NULL) { printf("S3_SECRET_ACCESS_KEY environment variable is undefined"); return(-1); } key = (char *) strchr(s3ObjName, '/'); if (key == NULL) { printf("S3 Key for the Object Not defined\n"); return(-1); } *key = '\0'; key++; if (stat(fileName, &statBuf) == -1) { printf("Unknown input file"); return(-1); } fileSize = statBuf.st_size; fd = fopen(fileName, "r" ); if (fd == NULL) { printf("Unable to open input file"); return(-1); } data.infile = fd; S3BucketContext bucketContext = {s3ObjName, 1, 0, accessKeyId, secretAccessKey}; S3PutObjectHandler putObjectHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &putObjectDataCallback }; if ((status = S3_initialize("s3", S3_INIT_ALL)) != S3StatusOK) { printf("Failed to initialize libs3: %s\n",S3_get_status_name(status)); return(-1); } S3_put_object(&bucketContext, key, fileSize, NULL, 0, &putObjectHandler, &data); if (statusG != S3StatusOK) { printf("Put failed: %i\n", statusG); S3_deinitialize(); return(-1); } S3_deinitialize(); fclose(fd); return(0); }
int main(int argc, char** argv) { if (3 > argc){ usage(); return 1; } char *slash = argv[1]; while (*slash && (*slash != '/')) { slash++; } if (!*slash || !*(slash + 1)) { fprintf(stderr, "\nERROR: Invalid bucket/key name: %s\n", argv[1]); usage(); exit(-1); } *slash++ = 0; const char* bucketName = argv[1]; const char *key = slash; const char *uploadId = 0; const char *filename = argv[2]; uint64_t contentLength = 0; const char *cacheControl = 0, *contentType = 0, *md5 = 0; const char *contentDispositionFilename = 0, *contentEncoding = 0; int64_t expires = -1; S3CannedAcl cannedAcl = S3CannedAclPrivate; int metaPropertiesCount = 0; S3NameValue metaProperties[S3_MAX_METADATA_COUNT]; char useServerSideEncryption = 0; int noStatus = 0; put_object_callback_data data; data.infile = 0; data.gb = 0; data.noStatus = noStatus; if (filename) { if (!contentLength) { struct stat statbuf; // Stat the file to get its length if (stat(filename, &statbuf) == -1) { fprintf(stderr, "\nERROR: Failed to stat file %s: ", filename); perror(0); exit(-1); } contentLength = statbuf.st_size; } // Open the file if (!(data.infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) { fprintf(stderr, "\nERROR: Failed to open input file %s: ", filename); perror(0); exit(-1); } } else{ usage(); } data.contentLength = data.originalContentLength = contentLength; S3Protocol protocolG = S3ProtocolHTTP; const char* accessKeyIdG = getenv("S3_ACCESS_KEY_ID"); const char* secretAccessKeyG = getenv("S3_SECRET_ACCESS_KEY"); S3Status status; const char* hostname = getenv("S3_HOSTNAME"); if ((status = S3_initialize("s3", S3_INIT_ALL, hostname)) != S3StatusOK) { fprintf(stdout, "Failed to initialize libs3: %s\n", S3_get_status_name(status)); exit(-1); } S3BucketContext bucketContext = { 0, bucketName, protocolG, S3UriStylePath, accessKeyIdG, secretAccessKeyG, 0 }; S3PutProperties putProperties = { contentType, md5, cacheControl, contentDispositionFilename, contentEncoding, expires, cannedAcl, metaPropertiesCount, metaProperties, useServerSideEncryption }; S3PutObjectHandler putObjectHandler = { { &responsePropertiesCallback, &responseCompleteCallback }, &putObjectDataCallback }; //----------------------create bucket-----------------// S3_put_object(&bucketContext, key, contentLength, &putProperties, 0, &putObjectHandler, &data); S3_deinitialize(); return 0; }
static int ct_archive_data(struct hsm_copyaction_private *hcp, const char *src, const char *dst, int src_fd, const struct hsm_action_item *hai, long hal_flags) { struct hsm_extent he; __u64 file_offset = hai->hai_extent.offset; struct stat src_st; char *uncompress_buf = NULL; char *compress_buf = NULL; __u64 write_total = 0; __u64 length = hai->hai_extent.length; time_t last_report_time; int rc = 0; double start_ct_now = ct_now(); time_t now; int compression_bound = LZ4_compressBound(chunk_size); // Archiving a file from Lustre to the object store CT_TRACE("Archiving %s to %s", src, dst); if (fstat(src_fd, &src_st) < 0) { rc = -errno; CT_ERROR(rc, "cannot stat '%s'", src); return rc; } if (!S_ISREG(src_st.st_mode)) { rc = -EINVAL; CT_ERROR(rc, "'%s' is not a regular file", src); return rc; } if (hai->hai_extent.offset > (__u64)src_st.st_size) { rc = -EINVAL; CT_ERROR(rc, "Trying to start reading past end ("LPU64" > " "%jd) of '%s' source file", hai->hai_extent.offset, (intmax_t)src_st.st_size, src); return rc; } strippingInfo stripping_params; stripping_params.lmm_stripe_count = 1; stripping_params.lmm_stripe_size = ONE_MB; if (ct_save_stripe(src_fd, src, &stripping_params)) { return -1; } /* Don't read beyond a given extent */ if (length > src_st.st_size - hai->hai_extent.offset) length = src_st.st_size - hai->hai_extent.offset; last_report_time = time(NULL); he.offset = file_offset; he.length = 0; rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } errno = 0; uncompress_buf = malloc(chunk_size); if (uncompress_buf == NULL) { rc = -ENOMEM; goto out; } compress_buf = malloc(compression_bound); if (compress_buf == NULL) { rc = -ENOMEM; goto out; } int chunk_id = -1; const char totalLength[] = "totallength"; const char chunksize[] = "chunksize"; const char stripe_size[] = "stripesize"; const char stripe_count[] = "stripecount"; const char path[] = "path"; const char uid[] = "uid"; const char gid[] = "gid"; char totalLength_s[TOTALLENGTH]; char chunksize_s[TOTALLENGTH]; char stripe_size_s[TOTALLENGTH]; char stripe_count_s[TOTALLENGTH]; char path_s[PATH_MAX]; char uid_s[TOTALLENGTH]; char gid_s[TOTALLENGTH]; snprintf(totalLength_s, sizeof(totalLength_s), "%llu", length); snprintf(chunksize_s, sizeof(chunksize_s), "%i", chunk_size); snprintf(stripe_size_s, sizeof(stripe_size_s), "%i", stripping_params.lmm_stripe_size); snprintf(stripe_count_s, sizeof(stripe_count_s), "%i", stripping_params.lmm_stripe_count); snprintf(path_s, sizeof(path_s), "%s", src); // FIXME should use fid2path to get the normal path snprintf(uid_s, sizeof(uid_s), "%i", src_st.st_uid); snprintf(gid_s, sizeof(gid_s), "%i", src_st.st_gid); // Saving some metadata for disaster recovery S3NameValue metadata[7] = { { totalLength, totalLength_s, }, { chunksize, chunksize_s, }, { stripe_size, stripe_size_s, }, { stripe_count, stripe_count_s, }, { path, path_s }, { uid, uid_s }, { gid, gid_s } }; S3PutProperties putProperties = { // application/x-lz4 does not officially exist "application/x-lz4", // contentType NULL, // md5 NULL, // cacheControl NULL, // contentDispositionFilename NULL, // contentEncoding -1, // expires 0, // cannedAcl sizeof(metadata) / sizeof(S3NameValue), // metaDataCount metadata, // S3NameValue *metaData 0, // useServerSideEncryption }; do { // Uploading to object store if (chunk_id == -1) { CT_TRACE("start copy of "LPU64" bytes from '%s' to '%s'", length, src, dst); } // size of the current chunk, limited by chunk_size long long unsigned int chunk; if (length - write_total > chunk_size) { // upper bound is the chunk_size chunk = chunk_size; } else { // limited by the file chunk = length - write_total; } chunk_id = file_offset / chunk_size; put_object_callback_data data; data.buffer_offset = 0; double before_lustre_read = ct_now(); pread(src_fd, uncompress_buf, chunk, file_offset); CT_TRACE("Reading a chunk from %s of %llu bytes offset %llu from lustre took %fs", src, chunk, file_offset, ct_now() - before_lustre_read); double before_compression = ct_now(); int compressed_size = LZ4_compress_default(uncompress_buf, compress_buf, chunk, compression_bound); CT_TRACE("Compressing a chunk from %s took %fs and the compressed size is %i bytes", src, ct_now() - before_compression, compressed_size); if (compressed_size <= 0) { rc = -1; CT_ERROR(rc, "Compression error"); goto out; } data.contentLength = compressed_size; data.buffer = compress_buf; S3PutObjectHandler putObjectHandler = { putResponseHandler, &putObjectDataCallback }; char dst_chunk_s[S3_MAX_KEY_SIZE]; snprintf(dst_chunk_s, sizeof(dst_chunk_s), "%s.%i", dst, chunk_id); char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_chunk_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; double before_s3_put = ct_now(); int retry_count = RETRYCOUNT; do { S3_put_object(&localbucketContext, dst_chunk_s, compressed_size, &putProperties, NULL, &putObjectHandler, &data); } while (S3_status_is_retryable(data.status) && should_retry(&retry_count)); CT_TRACE("S3 put of %s took %fs", dst_chunk_s, ct_now() - before_s3_put); if (data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status)); goto out; } he.offset = file_offset; he.length = chunk; now = time(NULL); if (now >= last_report_time + ct_opt.o_report_int) { last_report_time = now; CT_TRACE("sending progress report for archiving %s", src); rc = llapi_hsm_action_progress(hcp, &he, length, 0); if (rc < 0) { /* Action has been canceled or something wrong * is happening. Stop copying data. */ CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed", src, dst); goto out; } } write_total += chunk; file_offset += chunk; } while (file_offset < length); rc = 0; // We need to delete every chunk of higher chunk_id if they // exists, this can happen if the new file is smaller // TODO only delete objects if this is a dirty write chunk_id += 1; do { char dst_s[S3_MAX_KEY_SIZE]; int retry_count; snprintf(dst_s, sizeof(dst_s), "%s.%i", dst, chunk_id); get_object_callback_data head_data; get_object_callback_data delete_data; char bucket_name[S3_MAX_BUCKET_NAME_SIZE]; getBucketName(sizeof(bucket_name), bucket_name, dst_s); // Get a local copy of the general bucketContext than overwrite the // pointer to the bucket_name S3BucketContext localbucketContext; memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext)); localbucketContext.bucketName = bucket_name; CT_TRACE("Checking if chunk %i exists", chunk_id); retry_count = RETRYCOUNT; do { S3_head_object(&localbucketContext, dst_s, NULL, &headResponseHandler, &head_data); } while (S3_status_is_retryable(head_data.status) && should_retry(&retry_count)); if (head_data.status == S3StatusHttpErrorNotFound) { // Object do not exist, this mean we stop deleting chunks CT_TRACE("Chunk %i do not exists", chunk_id); break; } if (head_data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(head_data.status)); goto out; } CT_TRACE("Deleting chunk %i", chunk_id); retry_count = RETRYCOUNT; do { S3_delete_object(&localbucketContext, dst_s, NULL, &deleteResponseHandler, &delete_data); } while (S3_status_is_retryable(delete_data.status) && should_retry(&retry_count)); if (delete_data.status != S3StatusOK) { rc = -EIO; CT_ERROR(rc, "S3Error %s", S3_get_status_name(delete_data.status)); goto out; } chunk_id++; } while (true); out: if (uncompress_buf != NULL) free(uncompress_buf); if (compress_buf != NULL) free(compress_buf); CT_TRACE("copied "LPU64" bytes in %f seconds", length, ct_now() - start_ct_now); return rc; }