void verify_retry_results(wa::storage::retry_policy policy, web::http::status_code primary_status_code, web::http::status_code secondary_status_code, wa::storage::location_mode mode, std::function<std::chrono::milliseconds (int)> allowed_delta, std::vector<wa::storage::retry_info> expected_retry_info_list)
{
    auto initial_location = get_initial_location(mode);
    auto next_location = get_next_location(mode, initial_location);

    wa::storage::operation_context op_context;
    wa::storage::request_result result(utility::datetime::utc_now(),
        initial_location,
        web::http::http_response(initial_location == wa::storage::storage_location::secondary ? secondary_status_code : primary_status_code),
        false);

    int retry_count = 0;
    for (auto iter = expected_retry_info_list.cbegin(); iter != expected_retry_info_list.cend(); ++iter)
    {
        auto retry_info = policy.evaluate(wa::storage::retry_context(retry_count++, result, next_location, mode), op_context);

        CHECK(retry_info.should_retry());
        CHECK(iter->target_location() == retry_info.target_location());
        CHECK(iter->updated_location_mode() == retry_info.updated_location_mode());
        CHECK_CLOSE(iter->retry_interval().count(), retry_info.retry_interval().count(), allowed_delta(retry_count).count());

        std::this_thread::sleep_for(retry_info.retry_interval());

        result = wa::storage::request_result(utility::datetime::utc_now(),
            retry_info.target_location(),
            web::http::http_response(retry_info.target_location() == wa::storage::storage_location::secondary ? secondary_status_code : primary_status_code),
            false);
        mode = retry_info.updated_location_mode();
        next_location = get_next_location(mode, next_location);
    }

    auto retry_info = policy.evaluate(wa::storage::retry_context(retry_count++, result, next_location, mode), op_context);
    CHECK(!retry_info.should_retry());
}
예제 #2
0
int __s3fs_remove_object(const char *bucketName, const char *key) {
    S3_init();
    S3BucketContext bucketContext =
    {
        0,
        bucketName,
        protocolG,
        uriStyleG,
        accessKeyIdG,
        secretAccessKeyG
    };

    S3ResponseHandler responseHandler =
    { 
        0,
        &responseCompleteCallback
    };

    do {
        S3_delete_object(&bucketContext, key, 0, &responseHandler, 0);
    } while (S3_status_is_retryable(statusG) && should_retry());

    int result = statusG == S3StatusOK ? 0 : -1;

    if ((statusG != S3StatusOK) &&
        (statusG != S3StatusErrorPreconditionFailed)) {
        printError();
    }

    S3_deinitialize();

    return result;    
}
예제 #3
0
ssize_t __s3fs_get_object(const char *bucketName, const char *key, uint8_t **buf, 
                        ssize_t start_byte, ssize_t byte_count) {

    int64_t ifModifiedSince = -1, ifNotModifiedSince = -1;
    const char *ifMatch = 0, *ifNotMatch = 0;
    uint64_t startByte = start_byte, byteCount = byte_count;

    S3_init();

    struct get_callback_data get_context;
    get_context.buf = NULL;
    get_context.bytes_read = 0;
    
    S3BucketContext bucketContext =
    {
        0,
        bucketName,
        protocolG,
        uriStyleG,
        accessKeyIdG,
        secretAccessKeyG
    };

    S3GetConditions getConditions =
    {
        ifModifiedSince,
        ifNotModifiedSince,
        ifMatch,
        ifNotMatch
    };

    S3GetObjectHandler getObjectHandler =
    {
        { &responsePropertiesCallback, &responseCompleteCallback },
        &getObjectDataCallback
    };

    do {
        S3_get_object(&bucketContext, key, &getConditions, startByte,
                      byteCount, 0, &getObjectHandler, &get_context);
    } while (S3_status_is_retryable(statusG) && should_retry());

    ssize_t status = get_context.bytes_read;
    if (statusG != S3StatusOK) {
        status = -1;
        if (get_context.buf) {
            free (get_context.buf);
        }
        printError();
    } else {
        *buf = get_context.buf; 
    }

    S3_deinitialize();

    return status;
}
static int get_s3_object(char *objectName,
                         get_object_callback_data *data,
                         S3GetObjectHandler *getObjectHandler){

    assert(objectName && data && getObjectHandler);
    memset(data, 0, sizeof(get_object_callback_data));

    char bucket_name[S3_MAX_BUCKET_NAME_SIZE];
    getBucketName(sizeof(bucket_name), bucket_name, objectName);

    // Get a local copy of the general bucketContext than overwrite the
    // pointer to the bucket_name
    S3BucketContext localbucketContext;
    memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext));
    localbucketContext.bucketName = bucket_name;

    data->buffer_offset = 0;
    data->buffer = NULL;

    double before_s3_get = ct_now();
    int retry_count = RETRYCOUNT;

    do {
        S3_get_object(&localbucketContext, objectName, NULL, 0, 0, NULL, getObjectHandler, data);
    } while (S3_status_is_retryable(data->status) && should_retry(&retry_count));

    CT_TRACE("S3 get of %s took %fs", objectName, ct_now() - before_s3_get);

    if (data->buffer == NULL) {
       return -ENOMEM;
    }
    if (data->status != S3StatusOK) {
        CT_ERROR(-EIO, "S3Error %s", S3_get_status_name(data->status));
        return -EIO;
    }

    double before_checksum = ct_now();
    unsigned char md5[MD5_DIGEST_LENGTH];
    char md5_s[MD5_ASCII];
    MD5_CTX mdContext;
    MD5_Init (&mdContext);
    MD5_Update (&mdContext, data->buffer, data->contentLength);
    MD5_Final (md5, &mdContext);
    int i;

    for(i = 0; i < MD5_DIGEST_LENGTH; i++){
        sprintf(&md5_s[i*2], "%02x", md5[i]);
    }

    if(strcmp(md5_s, data->md5) != 0){
        CT_ERROR(-EIO, "Bad MD5 checksum for %s, computed %s, expected %s",
            objectName, md5_s, data->md5);
        return -EIO;
    }
    CT_TRACE("Checksum of %s took %fs", objectName, ct_now() - before_checksum);
    return 0;
}
예제 #5
0
파일: lister.cpp 프로젝트: ephemerr/s3bar
void list_bucket(const char *bucketName, const char *prefix,
                        const char *marker, const char *delimiter,
                        int maxkeys, const char* ak, const char* sk)
{

    S3_init();
    savedCommonPrefixes.clear();
    savedContents.clear();
    savedKeys.clear();

    S3BucketContext bucketContext =
    {
        0,
        bucketName,
        S3ProtocolHTTPS,
        S3UriStyleVirtualHost,
        ak,
        sk
    };

    S3ListBucketHandler listBucketHandler =
    {
        { &responsePropertiesCallback, &responseCompleteCallback },
        &listBucketCallback
    };

    list_bucket_callback_data data;

    snprintf(data.nextMarker, sizeof(data.nextMarker), "%s", marker);
    data.keyCount = 0;
    data.allDetails = 0;

    do {
        data.isTruncated = 0;
        do {
            S3_list_bucket(&bucketContext, prefix, data.nextMarker,
                           delimiter, maxkeys, 0, &listBucketHandler, &data);
        } while (S3_status_is_retryable((S3Status)statusG) && should_retry());
        if (statusG != S3StatusOK) {
            break;
        }
    } while (data.isTruncated && (!maxkeys || (data.keyCount < maxkeys)));

    if (statusG == S3StatusOK) {
//        if (!data.keyCount) {
//            printListBucketHeader(allDetails);
//        }
    }
    else {
        printError();
    }

    S3_deinitialize();
}
예제 #6
0
    retry_info basic_linear_retry_policy::evaluate(const retry_context& retry_context, operation_context context)
    {
        auto result = basic_common_retry_policy::evaluate(retry_context, context);
        
        if (result.should_retry())
        {
            result.set_retry_interval(m_delta_backoff);
            align_retry_interval(result);
        }

        return result;
    }
예제 #7
0
    retry_info basic_exponential_retry_policy::evaluate(const retry_context& retry_context, operation_context context)
    {
        auto result = basic_common_retry_policy::evaluate(retry_context, context);

        if (result.should_retry())
        {
            auto random_backoff = m_rand_distribution(m_rand_engine);
            std::chrono::milliseconds increment(static_cast<std::chrono::milliseconds::rep>((std::pow(2, retry_context.current_retry_count()) - 1) * random_backoff * 1000));
            auto interval = increment < std::chrono::milliseconds::zero() ? max_exponential_retry_interval : min_exponential_retry_interval + increment;
            result.set_retry_interval(std::min(interval, max_exponential_retry_interval));
            align_retry_interval(result);
        }

        return result;
    }
예제 #8
0
static int bio_stm_read(BIO *h, char *buf, int size)
{
  uint64_t nread;
  ph_stream_t *stm = h->ptr;

  if (buf == NULL || size == 0 || stm == NULL) {
    return 0;
  }

  BIO_clear_retry_flags(h);
  if (ph_stm_read(stm, buf, size, &nread)) {
    return (int)nread;
  }

  if (should_retry(stm)) {
    BIO_set_retry_read(h);
  }

  return -1;
}
예제 #9
0
static int bio_stm_write(BIO *h, const char *buf, int size)
{
  uint64_t nwrote;
  ph_stream_t *stm = h->ptr;

  if (buf == NULL || size == 0 || stm == NULL) {
    return 0;
  }

  BIO_clear_retry_flags(h);
  if (ph_stm_write(stm, buf, size, &nwrote)) {
    return (int)nwrote;
  }

  if (should_retry(stm)) {
    BIO_set_retry_write(h);
  }

  return -1;
}
예제 #10
0
int __s3fs_test_bucket(const char *bucketName)
{
    S3_init();

    S3ResponseHandler responseHandler =
    {
        &responsePropertiesCallback, &responseCompleteCallback
    };

    char locationConstraint[64];
    do {
        S3_test_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG,
                       0, bucketName, sizeof(locationConstraint),
                       locationConstraint, 0, &responseHandler, 0);
    } while (S3_status_is_retryable(statusG) && should_retry());

    const char *reason = "Unknown";
    int result = statusG == S3StatusOK ? 1 : 0;

    switch (statusG) {
    case S3StatusOK:
        // bucket exists
        reason = locationConstraint[0] ? locationConstraint : "USA";
        break;
    case S3StatusErrorNoSuchBucket:
        reason = "Does Not Exist";
        break;
    case S3StatusErrorAccessDenied:
        reason = "Access Denied";
        break;
    default:
        break;
    }

    fprintf(stderr, "S3 test_bucket: %s\n", reason);

    S3_deinitialize();

    return result;
}
예제 #11
0
파일: wcd.c 프로젝트: flsafe/Webdir
static int bucket_exists(char *bname){

    s3_init();

    if (! load_settings()){
			printf("Configure your ~/.webdir-settings file!\n");
			exit(1);
		}

    char skey[STR];
    get_secret_key(skey);
    char akey[STR];
    get_access_key(akey);
		char hostname[STR];
		get_host(hostname);

    S3ResponseHandler res_handler = {
        &res_properties, &res_complete/* Can properties be null?*/
    };

    char loc_constraint[STR];
    do {
        S3_test_bucket(S3ProtocolHTTPS, 
                       S3UriStylePath, 
                       akey, 
                       skey,
                       hostname, 
                       bname, 
                       sizeof(loc_constraint),
                       loc_constraint, 
                       0, 
                       &res_handler, 
                       0);
    } while (S3_status_is_retryable(RequestStatus) && should_retry());


    S3_deinitialize();

		return RequestStatus == S3StatusOK; // Bucket exists
}
예제 #12
0
ssize_t __s3fs_put_object(const char *bucketName, const char *key, const uint8_t *buf, ssize_t contentLength)
{
    const char *cacheControl = 0, *contentType = 0, *md5 = 0;
    const char *contentDispositionFilename = 0, *contentEncoding = 0;
    int64_t expires = -1;
    S3CannedAcl cannedAcl = S3CannedAclPrivate;
    int metaPropertiesCount = 0;
    S3NameValue metaProperties[S3_MAX_METADATA_COUNT];
    int noStatus = 0;

    put_object_callback_data data;
    memset(&data, 0, sizeof(put_object_callback_data));
    data.data = buf;
    // data.gb = 0;
    data.noStatus = noStatus;

    data.contentLength = data.originalContentLength = contentLength;

    S3_init();
    
    S3BucketContext bucketContext =
    {
        0,
        bucketName,
        protocolG,
        uriStyleG,
        accessKeyIdG,
        secretAccessKeyG
    };

    S3PutProperties putProperties =
    {
        contentType,
        md5,
        cacheControl,
        contentDispositionFilename,
        contentEncoding,
        expires,
        cannedAcl,
        metaPropertiesCount,
        metaProperties
    };

    S3PutObjectHandler putObjectHandler =
    {
        { &responsePropertiesCallback, &responseCompleteCallback },
        &putObjectDataCallback
    };

    do {
        S3_put_object(&bucketContext, key, contentLength, &putProperties, 0,
                      &putObjectHandler, &data);
    } while (S3_status_is_retryable(statusG) && should_retry());

    int result = data.written;

    if (statusG != S3StatusOK) {
        printError();
        result = -1;
    }
    else if (data.contentLength) {
        fprintf(stderr, "\nERROR: Failed to read remaining %llu bytes from "
                "input\n", (unsigned long long) data.contentLength);
    }

    S3_deinitialize();
    return result;
}
예제 #13
0
int __s3fs_clear_bucket(const char *bucketName) {
    S3_init();

    const char *prefix = 0, *marker = 0, *delimiter = 0;
    int maxkeys = 0, allDetails = 0;
    
    S3BucketContext bucketContext =
    {
        0,
        bucketName,
        protocolG,
        uriStyleG,
        accessKeyIdG,
        secretAccessKeyG
    };

    S3ListBucketHandler listBucketHandler =
    {
        { &responsePropertiesCallback, &responseCompleteCallback },
        &traverseBucketCallback
    };

    traverse_bucket_callback_data data;

    snprintf(data.nextMarker, sizeof(data.nextMarker), "%s", marker);
    data.keyCount = 0;
    data.keylist = NULL;
    data.allDetails = allDetails;

    do {
        data.isTruncated = 0;
        do {
            S3_list_bucket(&bucketContext, prefix, data.nextMarker,
                           delimiter, maxkeys, 0, &listBucketHandler, &data);
        } while (S3_status_is_retryable(statusG) && should_retry());
        if (statusG != S3StatusOK) {
            break;
        }
    } while (data.isTruncated && (!maxkeys || (data.keyCount < maxkeys)));

    int rv = statusG == S3StatusOK ? 0 : -1;

    S3_deinitialize();

    struct node *klist = data.keylist;

    // try to remove objects
    if (rv == 0) {
        while (klist) {
            struct node *el = klist;
            int thisrv = __s3fs_remove_object(bucketName, el->key);
            if (thisrv < 0) {
                rv = -1;
            }
            klist = klist->next;
        }
    }

    // free keylist
    klist = data.keylist;
    while (klist) {
        struct node *el = klist;
        klist = klist->next;
        free(el->key);
        free(el);
    }

    return rv;
}
static int ct_restore_data(struct hsm_copyaction_private *hcp, const char *src,
                           const char *dst, int dst_fd,
                           const struct hsm_action_item *hai, long hal_flags)
{
    struct hsm_extent he;
    __u64             file_offset = hai->hai_extent.offset;
    struct stat       dst_st;
    __u64             write_total = 0;
    __u64             length = hai->hai_extent.length;
    time_t            last_report_time;
    time_t            now;
    int               rc = 0;
    double            start_ct_now = ct_now();

    // Restore a file from the object store back to Lustre

    CT_TRACE("Restoring %s to %s", src, dst);
    if (fstat(dst_fd, &dst_st) < 0) {
        rc = -errno;
        CT_ERROR(rc, "cannot stat '%s'", dst);
        return rc;
    }

    if (!S_ISREG(dst_st.st_mode)) {
        rc = -EINVAL;
        CT_ERROR(rc, "'%s' is not a regular file", dst);
        return rc;
    }

    he.offset = file_offset;
    he.length = 0;
    rc = llapi_hsm_action_progress(hcp, &he, length, 0);
    if (rc < 0) {
        /* Action has been canceled or something wrong
         * is happening. Stop copying data. */
        CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed",
                 src, dst);
        goto out;
    }

    errno = 0;

    last_report_time = time(NULL);

    long long int object_chunk_size = chunk_size; // will be assigned the correct value based on the metadata

    do {
        // Downloading from the object store

        char src_chunk_s[S3_MAX_KEY_SIZE];

        S3GetObjectHandler getObjectHandler =
        {
            getResponseHandler,
            &getObjectDataCallback
        };

        if (length == -1) {
            // Discover length and chunk size from the first object's metadata
            snprintf(src_chunk_s, sizeof(src_chunk_s), "%s.0", src);
            if (file_offset == 0) {
                // Download data and metadata from the first chunk
                get_object_callback_data data;
                rc = get_s3_object(src_chunk_s, &data, &getObjectHandler);
                if(rc < 0){
                    goto out;
                }

                length = data.totalLength;
                object_chunk_size = data.chunk_size;

                char *uncompress_buf = NULL;
                uncompress_buf = malloc(object_chunk_size);
                if (uncompress_buf == NULL) {
                    rc = -ENOMEM;
                    goto out;
                }

                double before_decompression = ct_now();
                int decompressed_size = LZ4_decompress_safe(data.buffer, uncompress_buf, data.contentLength, object_chunk_size);
                if (decompressed_size < 0) {
                    rc = -1;
                    CT_ERROR(rc, "Decompression error");
                    goto out;
                }
                CT_TRACE("Decompressing a chunk from %s of %llu bytes took %fs and the uncompressed size is %i bytes",
                    src, data.contentLength, ct_now() - before_decompression, decompressed_size);

                double before_lustre_write = ct_now();
                pwrite(dst_fd, uncompress_buf, decompressed_size, file_offset);
                CT_TRACE("Writing a chunk from %s of %llu bytes offset %llu to lustre took %fs",
                    src_chunk_s, object_chunk_size, file_offset, ct_now() - before_lustre_write);

                if (uncompress_buf != NULL)
                    free(uncompress_buf);
                if (data.buffer != NULL)
                    free(data.buffer);

                write_total = decompressed_size;
                file_offset += decompressed_size;

                he.offset = file_offset;
                he.length = data.contentLength;
                rc = llapi_hsm_action_progress(hcp, &he, length, 0);
                if (rc < 0) {
                    /* Action has been canceled or something wrong
                     * is happening. Stop copying data. */
                    CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed",
                             src, dst);
                    goto out;
                }

                if (write_total == length) {
                    // Completed the full write with the first object
                    rc = 0;
                    break;
                }
            }
            else {
                // Only make a head request to get the metadata of the first object
                get_object_callback_data data;

                char bucket_name[S3_MAX_BUCKET_NAME_SIZE];
                getBucketName(sizeof(bucket_name), bucket_name, src_chunk_s);

                // Get a local copy of the general bucketContext than overwrite the
                // pointer to the bucket_name
                S3BucketContext localbucketContext;
                memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext));
                localbucketContext.bucketName = bucket_name;

                int retry_count = RETRYCOUNT;
                do {
                    S3_head_object(&localbucketContext, src_chunk_s, NULL, &headResponseHandler, &data);
                } while (S3_status_is_retryable(data.status) && should_retry(&retry_count));

                if (data.status != S3StatusOK) {
                    rc = -EIO;
                    CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status));
                    goto out;
                }
                object_chunk_size = data.chunk_size;
                length = data.totalLength;
            }
        }
        else {
            snprintf(src_chunk_s, sizeof(src_chunk_s), "%s.%llu", src, file_offset / object_chunk_size);

            long long unsigned int chunk;
            if (length - write_total > object_chunk_size) {
                // upper bound is the chunk_size
                chunk = object_chunk_size;
            }
            else {
                // limited by the file
                chunk = length - write_total;
            }

            get_object_callback_data data;
            rc = get_s3_object(src_chunk_s, &data, &getObjectHandler);
            if(rc < 0){
                goto out;
            }

            char *uncompress_buf = NULL;
            uncompress_buf = malloc(object_chunk_size);
            if (uncompress_buf == NULL) {
                rc = -ENOMEM;
                goto out;
            }

            double before_decompression = ct_now();
            int decompressed_size = LZ4_decompress_safe(data.buffer, uncompress_buf, data.contentLength, object_chunk_size);
            if (decompressed_size < 0) {
                rc = -1;
                CT_ERROR(rc, "Decompression error");
                goto out;
            }
            CT_TRACE("Decompressing a chunk from %s of %llu bytes took %fs and the uncompressed size is %i",
                src, data.contentLength, ct_now() - before_decompression, decompressed_size);

            double before_lustre_write = ct_now();
            pwrite(dst_fd, uncompress_buf, decompressed_size, file_offset);
            CT_TRACE("Writing a chunk from %s of %llu bytes offset %llu to lustre took %fs",
                src_chunk_s, chunk, file_offset, ct_now() - before_lustre_write);

            if (uncompress_buf != NULL)
                free(uncompress_buf);
            if (data.buffer != NULL)
                free(data.buffer);

            now = time(NULL);
            if (now >= last_report_time + ct_opt.o_report_int) {
                last_report_time = now;
                CT_TRACE("sending progress report for restoring %s", src);
                rc = llapi_hsm_action_progress(hcp, &he, length, 0);
                if (rc < 0) {
                    /* Action has been canceled or something wrong
                     * is happening. Stop copying data. */
                    CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed",
                             src, dst);
                    goto out;
                }
            }

            write_total += decompressed_size;
            file_offset += decompressed_size;
        }
        rc = 0;
    } while (file_offset < length);

    if (hai->hai_action == HSMA_RESTORE) {
        /*
         * truncate restored file
         * size is taken from the archive this is done to support
         * restore after a force release which leaves the file with the
         * wrong size (can big bigger than the new size)
         * make sure the file is on disk before reporting success.
         */
        rc = ftruncate(dst_fd, length);
        if (rc < 0) {
            rc = -errno;
            CT_ERROR(rc, "cannot truncate '%s' to size %llu",
                     dst, length);
            err_major++;
        }
    }

out:
    CT_TRACE("copied "LPU64" bytes in %f seconds",
             length, ct_now() - start_ct_now);

    return rc;
}
static int ct_archive_data(struct hsm_copyaction_private *hcp, const char *src,
                           const char *dst, int src_fd,
                           const struct hsm_action_item *hai, long hal_flags)
{
    struct hsm_extent he;
    __u64             file_offset = hai->hai_extent.offset;
    struct stat       src_st;
    char              *uncompress_buf = NULL;
    char              *compress_buf = NULL;
    __u64             write_total = 0;
    __u64             length = hai->hai_extent.length;
    time_t            last_report_time;
    int               rc = 0;
    double            start_ct_now = ct_now();
    time_t            now;
    int               compression_bound = LZ4_compressBound(chunk_size);

    // Archiving a file from Lustre to the object store
    CT_TRACE("Archiving %s to %s", src, dst);
    if (fstat(src_fd, &src_st) < 0) {
        rc = -errno;
        CT_ERROR(rc, "cannot stat '%s'", src);
        return rc;
    }

    if (!S_ISREG(src_st.st_mode)) {
        rc = -EINVAL;
        CT_ERROR(rc, "'%s' is not a regular file", src);
        return rc;
    }

    if (hai->hai_extent.offset > (__u64)src_st.st_size) {
        rc = -EINVAL;
        CT_ERROR(rc, "Trying to start reading past end ("LPU64" > "
                 "%jd) of '%s' source file", hai->hai_extent.offset,
                 (intmax_t)src_st.st_size, src);
        return rc;
    }

    strippingInfo stripping_params;
    stripping_params.lmm_stripe_count = 1;
    stripping_params.lmm_stripe_size = ONE_MB;

    if (ct_save_stripe(src_fd, src, &stripping_params)) {
        return -1;
    }

    /* Don't read beyond a given extent */
    if (length > src_st.st_size - hai->hai_extent.offset)
        length = src_st.st_size - hai->hai_extent.offset;

    last_report_time = time(NULL);

    he.offset = file_offset;
    he.length = 0;
    rc = llapi_hsm_action_progress(hcp, &he, length, 0);
    if (rc < 0) {
        /* Action has been canceled or something wrong
         * is happening. Stop copying data. */
        CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed",
                 src, dst);
        goto out;
    }

    errno = 0;

    uncompress_buf = malloc(chunk_size);
    if (uncompress_buf == NULL) {
        rc = -ENOMEM;
        goto out;
    }

    compress_buf = malloc(compression_bound);
    if (compress_buf == NULL) {
        rc = -ENOMEM;
        goto out;
    }

    int chunk_id = -1;

    const char totalLength[] = "totallength";
    const char chunksize[] = "chunksize";
    const char stripe_size[] = "stripesize";
    const char stripe_count[] = "stripecount";
    const char path[] = "path";
    const char uid[] = "uid";
    const char gid[] = "gid";

    char totalLength_s[TOTALLENGTH];
    char chunksize_s[TOTALLENGTH];
    char stripe_size_s[TOTALLENGTH];
    char stripe_count_s[TOTALLENGTH];
    char path_s[PATH_MAX];
    char uid_s[TOTALLENGTH];
    char gid_s[TOTALLENGTH];

    snprintf(totalLength_s, sizeof(totalLength_s), "%llu", length);
    snprintf(chunksize_s, sizeof(chunksize_s), "%i", chunk_size);
    snprintf(stripe_size_s, sizeof(stripe_size_s), "%i", stripping_params.lmm_stripe_size);
    snprintf(stripe_count_s, sizeof(stripe_count_s), "%i", stripping_params.lmm_stripe_count);
    snprintf(path_s, sizeof(path_s), "%s", src); // FIXME should use fid2path to get the normal path
    snprintf(uid_s, sizeof(uid_s), "%i", src_st.st_uid);
    snprintf(gid_s, sizeof(gid_s), "%i", src_st.st_gid);

    // Saving some metadata for disaster recovery
    S3NameValue metadata[7] =
    {
        {
            totalLength,
            totalLength_s,
        },
        {
            chunksize,
            chunksize_s,
        },
        {
            stripe_size,
            stripe_size_s,
        },
        {
            stripe_count,
            stripe_count_s,
        },
        {
            path,
            path_s
        },
        {
            uid,
            uid_s
        },
        {
            gid,
            gid_s
        }
    };

    S3PutProperties putProperties =
    {
        // application/x-lz4 does not officially exist
        "application/x-lz4", // contentType
        NULL, // md5
        NULL, // cacheControl
        NULL, // contentDispositionFilename
        NULL, // contentEncoding
        -1, // expires
        0, // cannedAcl
        sizeof(metadata) / sizeof(S3NameValue), // metaDataCount
        metadata, // S3NameValue *metaData
        0, // useServerSideEncryption
    };

    do {
        // Uploading to object store

        if (chunk_id == -1) {
            CT_TRACE("start copy of "LPU64" bytes from '%s' to '%s'",
                     length, src, dst);
        }

        // size of the current chunk, limited by chunk_size
        long long unsigned int  chunk;

        if (length - write_total > chunk_size) {
            // upper bound is the chunk_size
            chunk = chunk_size;
        }
        else {
            // limited by the file
            chunk = length - write_total;
        }

        chunk_id = file_offset / chunk_size;

        put_object_callback_data data;

        data.buffer_offset = 0;
        double before_lustre_read = ct_now();
        pread(src_fd, uncompress_buf, chunk, file_offset);
        CT_TRACE("Reading a chunk from %s of %llu bytes offset %llu from lustre took %fs",
            src, chunk, file_offset, ct_now() - before_lustre_read);

        double before_compression = ct_now();
        int compressed_size = LZ4_compress_default(uncompress_buf, compress_buf, chunk, compression_bound);
        CT_TRACE("Compressing a chunk from %s took %fs and the compressed size is %i bytes",
            src,  ct_now() - before_compression, compressed_size);

        if (compressed_size <= 0) {
            rc = -1;
            CT_ERROR(rc, "Compression error");
            goto out;
        }
        data.contentLength = compressed_size;
        data.buffer = compress_buf;

        S3PutObjectHandler putObjectHandler =
        {
            putResponseHandler,
            &putObjectDataCallback
        };

        char dst_chunk_s[S3_MAX_KEY_SIZE];
        snprintf(dst_chunk_s, sizeof(dst_chunk_s), "%s.%i", dst, chunk_id);

        char bucket_name[S3_MAX_BUCKET_NAME_SIZE];
        getBucketName(sizeof(bucket_name), bucket_name, dst_chunk_s);

        // Get a local copy of the general bucketContext than overwrite the
        // pointer to the bucket_name
        S3BucketContext localbucketContext;
        memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext));
        localbucketContext.bucketName = bucket_name;

        double before_s3_put = ct_now();
        int retry_count = RETRYCOUNT;
        do {
            S3_put_object(&localbucketContext, dst_chunk_s, compressed_size, &putProperties, NULL, &putObjectHandler, &data);
        } while (S3_status_is_retryable(data.status) && should_retry(&retry_count));
        CT_TRACE("S3 put of %s took %fs",
            dst_chunk_s, ct_now() - before_s3_put);

        if (data.status != S3StatusOK) {
            rc = -EIO;
            CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status));
            goto out;
        }

        he.offset = file_offset;
        he.length = chunk;

        now = time(NULL);
        if (now >= last_report_time + ct_opt.o_report_int) {
            last_report_time = now;
            CT_TRACE("sending progress report for archiving %s", src);
            rc = llapi_hsm_action_progress(hcp, &he, length, 0);
            if (rc < 0) {
                /* Action has been canceled or something wrong
                 * is happening. Stop copying data. */
                CT_ERROR(rc, "progress ioctl for copy '%s'->'%s' failed",
                         src, dst);
                goto out;
            }
        }

        write_total += chunk;
        file_offset += chunk;
    } while (file_offset < length);
    rc = 0;

    // We need to delete every chunk of higher chunk_id if they
    // exists, this can happen if the new file is smaller
    // TODO only delete objects if this is a dirty write

    chunk_id += 1;
    do {
        char dst_s[S3_MAX_KEY_SIZE];
        int retry_count;

        snprintf(dst_s, sizeof(dst_s), "%s.%i", dst, chunk_id);
        get_object_callback_data head_data;
        get_object_callback_data delete_data;

        char bucket_name[S3_MAX_BUCKET_NAME_SIZE];
        getBucketName(sizeof(bucket_name), bucket_name, dst_s);

        // Get a local copy of the general bucketContext than overwrite the
        // pointer to the bucket_name
        S3BucketContext localbucketContext;
        memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext));
        localbucketContext.bucketName = bucket_name;

        CT_TRACE("Checking if chunk %i exists", chunk_id);
        retry_count = RETRYCOUNT;
        do {
            S3_head_object(&localbucketContext, dst_s, NULL, &headResponseHandler, &head_data);
        } while (S3_status_is_retryable(head_data.status) && should_retry(&retry_count));

        if (head_data.status == S3StatusHttpErrorNotFound) {
            // Object do not exist, this mean we stop deleting chunks
            CT_TRACE("Chunk %i do not exists", chunk_id);
            break;
        }

        if (head_data.status != S3StatusOK) {
            rc = -EIO;
            CT_ERROR(rc, "S3Error %s", S3_get_status_name(head_data.status));
            goto out;
        }

        CT_TRACE("Deleting chunk %i", chunk_id);
        retry_count = RETRYCOUNT;
        do {
            S3_delete_object(&localbucketContext, dst_s, NULL, &deleteResponseHandler, &delete_data);
        } while (S3_status_is_retryable(delete_data.status) && should_retry(&retry_count));

        if (delete_data.status != S3StatusOK) {
            rc = -EIO;
            CT_ERROR(rc, "S3Error %s", S3_get_status_name(delete_data.status));
            goto out;
        }

        chunk_id++;
    } while (true);

out:
    if (uncompress_buf != NULL)
        free(uncompress_buf);
    if (compress_buf != NULL)
        free(compress_buf);

    CT_TRACE("copied "LPU64" bytes in %f seconds",
             length, ct_now() - start_ct_now);

    return rc;
}
int ct_remove(const struct hsm_action_item *hai, const long hal_flags)
{
    struct hsm_copyaction_private *hcp = NULL;
    char dst[PATH_MAX];
    int  rc;
    int  retry_count;
    char dst_s[S3_MAX_KEY_SIZE];

    rc = ct_begin(&hcp, hai);
    if (rc < 0)
        goto end_ct_remove;

    ct_path_archive(dst, sizeof(dst), &hai->hai_fid);

    CT_TRACE("removing file '%s'", dst);

    if (ct_opt.o_dry_run) {
        rc = 0;
        goto end_ct_remove;
    }

    // Get the metadata from the first object to get the number of chunks
    get_object_callback_data data;

    snprintf(dst_s, sizeof(dst_s), "%s.0", dst);

    char bucket_name[S3_MAX_BUCKET_NAME_SIZE];
    getBucketName(sizeof(bucket_name), bucket_name, dst_s);

    // Get a local copy of the general bucketContext than overwrite the
    // pointer to the bucket_name
    S3BucketContext localbucketContext;
    memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext));
    localbucketContext.bucketName = bucket_name;

    retry_count = RETRYCOUNT;
    do {
        S3_head_object(&localbucketContext, dst_s, NULL, &headResponseHandler, &data);
    } while (S3_status_is_retryable(data.status) && should_retry(&retry_count));

    if (data.status != S3StatusOK) {
        rc = -EIO;
        CT_ERROR(rc, "S3Error %s", S3_get_status_name(data.status));
        goto end_ct_remove;
    }

    int chunk;
    for (chunk = data.totalLength / data.chunk_size; chunk >= 0; chunk--) {
        snprintf(dst_s, sizeof(dst_s), "%s.%i", dst, chunk);
        get_object_callback_data delete_data;

        CT_TRACE("Deleting chunk '%s'", dst_s);

        char bucket_name[S3_MAX_BUCKET_NAME_SIZE];
        getBucketName(sizeof(bucket_name), bucket_name, dst_s);

        // Get a local copy of the general bucketContext than overwrite the
        // pointer to the bucket_name
        S3BucketContext localbucketContext;
        memcpy(&localbucketContext, &bucketContext, sizeof(S3BucketContext));
        localbucketContext.bucketName = bucket_name;

        retry_count = RETRYCOUNT;
        do {
            S3_delete_object(&localbucketContext, dst_s, NULL, &deleteResponseHandler, &delete_data);
        } while (S3_status_is_retryable(delete_data.status) && should_retry(&retry_count));

        if (delete_data.status != S3StatusOK) {
            rc = -EIO;
            CT_ERROR(rc, "S3Error %s", S3_get_status_name(delete_data.status));
            goto end_ct_remove;
        }
    }
    rc = 0;

end_ct_remove:
    rc = ct_action_done(&hcp, hai, 0, rc);

    return rc;
}