static void delete_status_bucket(struct cloudmig_ctx *ctx) { dpl_status_t dplret; dpl_vec_t *objects = NULL; cloudmig_log(DEBUG_LVL, "[Deleting files]: Deleting status bucket...\n"); dplret = dpl_list_bucket(ctx->src_ctx, ctx->status.bucket_name, NULL, NULL, &objects, NULL); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not list bucket %s for deletion : %s\n", __FUNCTION__, ctx->status.bucket_name, dpl_status_str(dplret)); goto deletebucket; } dpl_object_t** cur_object = (dpl_object_t**)objects->array; for (int i = 0; i < objects->n_items; ++i, ++cur_object) delete_file(ctx, ctx->status.bucket_name, (*cur_object)->key); deletebucket: dpl_deletebucket(ctx->src_ctx, ctx->status.bucket_name); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not delete bucket %s : %s.\n", __FUNCTION__, ctx->status.bucket_name, dpl_status_str(dplret)); return ; } cloudmig_log(DEBUG_LVL, "[Deleting Source] Bucket %s deleted.\n", ctx->status.bucket_name); }
int dfs_mkdir(const char *path, mode_t mode) { dpl_status_t rc; int ret; tpath_entry *pe = NULL; char *key = NULL; LOG(LOG_DEBUG, "path=%s, mode=0x%x", path, (int)mode); rc = dfs_mkdir_timeout(ctx, path); if (DPL_SUCCESS != rc) { LOG(LOG_ERR, "dfs_mkdir_timeout: %s", dpl_status_str(rc)); ret = -1; goto err; } pe = g_hash_table_lookup(hash, path); if (! pe) { if (-1 == populate_hash(hash, path, FILE_DIR, &pe)) { LOG(LOG_ERR, "populate with path %s failed", path); ret = -1; goto err; } LOG(LOG_DEBUG, "added a new dir entry in hashtable: %s", path); } pe->filetype = FILE_DIR; ret = 0; err: LOG(LOG_DEBUG, "path=%s ret=%s", path, dpl_status_str(ret)); return ret; }
static void update_md(gpointer data, gpointer user_data) { pentry_t *pe = NULL; char *path = NULL; dpl_ftype_t type; dpl_ino_t ino; dpl_status_t rc; dpl_dict_t *metadata = NULL; struct list *dirent = NULL; (void)user_data; pe = data; path = pentry_get_path(pe); LOG(LOG_DEBUG, "path=%s", path); ino = dpl_cwd(ctx, ctx->cur_bucket); rc = dfs_namei_timeout(ctx, path, ctx->cur_bucket, ino, NULL, NULL, &type); LOG(LOG_DEBUG, "path=%s, dpl_namei: %s, type=%s", path, dpl_status_str(rc), ftype_to_str(type)); if (DPL_SUCCESS != rc) { LOG(LOG_NOTICE, "dfs_namei_timeout: %s", dpl_status_str(rc)); goto end; } rc = dfs_getattr_timeout(ctx, path, &metadata); if (DPL_SUCCESS != rc && DPL_EISDIR != rc) { LOG(LOG_ERR, "dfs_getattr_timeout: %s", dpl_status_str(rc)); goto end; } /* If this is a directory, update its entries' metadata */ if (DPL_FTYPE_DIR == type) { dirent = pentry_get_dirents(pe); if (dirent) list_map(dirent, cb_map_dirents, pe); } if (pentry_md_trylock(pe)) goto end; if (metadata) pentry_set_metadata(pe, metadata); pentry_set_atime(pe); (void)pentry_md_unlock(pe); end: if (metadata) dpl_dict_free(metadata); }
static void cb_map_dirents(void *elem, void *cb_arg) { char *path = NULL; dpl_dict_t *metadata = NULL; dpl_status_t rc; dpl_ftype_t type; dpl_ino_t ino, parent_ino, obj_ino; pentry_t *pe_dirent = NULL; pentry_t *pe = NULL; path = elem; pe = cb_arg; LOG(LOG_DEBUG, "path='%s', dirent='%s'", path, pentry_get_path(pe)); pe_dirent = g_hash_table_lookup(hash, path); if (! pe_dirent) { LOG(LOG_ERR, "'%s' is not an entry anymore in '%s'", path, pentry_get_path(pe)); goto end; } rc = dfs_namei_timeout(ctx, path, ctx->cur_bucket, ino, &parent_ino, &obj_ino, &type); LOG(LOG_DEBUG, "path=%s, dpl_namei: %s, type=%s, parent_ino=%s, obj_ino=%s", path, dpl_status_str(rc), ftype_to_str(type), parent_ino.key, obj_ino.key); if (DPL_SUCCESS != rc) { LOG(LOG_NOTICE, "dfs_namei_timeout: %s", dpl_status_str(rc)); goto end; } rc = dfs_getattr_timeout(ctx, path, &metadata); if (DPL_SUCCESS != rc && DPL_EISDIR != rc) { LOG(LOG_ERR, "dfs_getattr_timeout: %s", dpl_status_str(rc)); goto end; } if (pentry_md_trylock(pe_dirent)) goto end; if (metadata) pentry_set_metadata(pe_dirent, metadata); pentry_set_atime(pe_dirent); (void)pentry_md_unlock(pe_dirent); end: if (metadata) dpl_dict_free(metadata); }
int dfs_readlink(const char *path, char *buf, size_t bufsiz) { dpl_dict_t *dict = NULL; dpl_status_t rc; int ret; char *dest = NULL; size_t dest_size = 0; rc = dfs_getattr_timeout(ctx, path, &dict); if (DPL_SUCCESS != rc) { LOG(LOG_ERR, "dfs_getattr_timeout: %s", dpl_status_str(rc)); ret = -1; goto err; } if (! dict) { LOG(LOG_ERR, "dpl_getattr: %s", dpl_status_str(rc)); ret = -1; goto err; } dest = dpl_dict_get_value(dict, "symlink"); if (! dest) { LOG(LOG_ERR, "empty link path"); ret = -1; goto err; } dest_size = strlen(dest); if (dest_size > bufsiz) { LOG(LOG_NOTICE, "link length too big: '%s'", dest); dest_size = bufsiz; } if (! strncpy(buf, dest, dest_size)) { LOG(LOG_ERR, "path=%s: strcpy: %s", path, strerror(errno)); ret = -1; goto err; } ret = 0; err: if (dict) dpl_dict_free(dict); LOG(LOG_DEBUG, "%s", path); return 0; }
int read_write_all_vfile(int fd, dpl_vfile_t *vfile) { dpl_status_t rc = DPL_FAILURE; int blksize = WRITE_BLOCK_SIZE; char *buf = NULL; LOG(LOG_DEBUG, "fd=%d", fd); buf = alloca(blksize); while (1) { int r = read(fd, buf, blksize); if (-1 == r) { LOG(LOG_ERR, "read (fd=%d): %s", fd, strerror(errno)); return -1; } if (0 == r) break; rc = dpl_write(vfile, buf, r); if (DPL_SUCCESS != rc) { LOG(LOG_ERR, "dpl_write: %s (%d)", dpl_status_str(rc), rc); return -1; } } return 0; }
int dfs_readdir(const char *path, void *data, fuse_fill_dir_t fill, off_t offset, struct fuse_file_info *info) { void *dir_hdl = NULL; tfs_ctx *ctx = fuse_get_context()->private_data; dpl_dirent_t dirent; dpl_status_t rc = DPL_FAILURE; int ret; LOG(LOG_DEBUG, "path=%s, data=%p, fill=%p, offset=%lld, info=%p", path, data, (void *) fill, (long long) offset, (void *) info); if (info) dir_hdl = (void *) info->fh; LOG(LOG_DEBUG, "dir_hdl=%p", dir_hdl); if (! dir_hdl) { rc = dfs_opendir_timeout(ctx, path, &dir_hdl); if (DPL_SUCCESS != rc) { LOG(LOG_ERR, "dfs_opendir_timeout: %s", dpl_status_str(rc)); ret = rc; goto err; } } while (DPL_SUCCESS == dpl_readdir(dir_hdl, &dirent)) { if (! strcmp("", dirent.name)) continue; LOG(LOG_DEBUG, "dirent.name=%s", dirent.name); if (0 != fill(data, dirent.name, NULL, 0)) break; } ret = 0; err: LOG(LOG_DEBUG, "path=%s ret=%s", path, dpl_status_str(ret)); return ret; }
static void delete_source_bucket(struct cloudmig_ctx *ctx, struct transfer_state *bucket_state) { // Ptr used to change from bucket status filename to bucket name // ie: the '.' of "file.cloudmig" char *dotptr = strrchr(bucket_state->filename, '.'); struct file_state_entry *fste = NULL; dpl_status_t dplret; if (dotptr == NULL) // though it should never happen... return ; cloudmig_log(DEBUG_LVL, "[Deleting Source]: Deleting source bucket" " for status file '%s'...\n", bucket_state->filename); // Here the buffer should never be allocated, so map the bucket state. if (cloudmig_map_bucket_state(ctx, bucket_state) == EXIT_FAILURE) return ; *dotptr = '\0'; // loop on the bucket state for each entry, to delete the files. while (bucket_state->next_entry_off < bucket_state->size) { fste = (void*)(bucket_state->buf + bucket_state->next_entry_off); delete_file(ctx, bucket_state->filename, (char*)(fste+1)); // Next entry... bucket_state->next_entry_off += sizeof(*fste) + ntohl(fste->namlen); } free(bucket_state->buf); /* * Remove bucket now that all of its files were deleted. */ *dotptr = '\0'; dplret = dpl_deletebucket(ctx->src_ctx, bucket_state->filename); if (dplret != DPL_SUCCESS) { /* * In case of an http 409 error (EEXIST or ENOENT), * do not do anything. * Maybe files were added in the bucket in the meantime ? * The user will have to manage it himself, it's his fault. */ PRINTERR("%s: Could not remove bucket %s : %s.\n" "The bucket may have been tampered with" " since the migration's start.\n", __FUNCTION__, bucket_state->filename, dpl_status_str(dplret)); } else cloudmig_log(DEBUG_LVL, "[Deleting Source]: Source bucket '%s' deleted successfully.\n", bucket_state->filename); *dotptr = '.'; }
int cmd_setattr(int argc, char **argv) { int ret; char opt; char *path = NULL; dpl_dict_t *metadata = NULL; var_set("status", "1", VAR_CMD_SET, NULL); optind = 0; while ((opt = getopt(argc, argv, usage_getoptstr(setattr_usage))) != -1) switch (opt) { case 'm': metadata = dpl_parse_metadata(optarg); if (NULL == metadata) { fprintf(stderr, "error parsing metadata\n"); return SHELL_CONT; } break ; case '?': default: usage_help(&setattr_cmd); return SHELL_CONT; } argc -= optind; argv += optind; if (1 != argc) { usage_help(&setattr_cmd); return SHELL_CONT; } path = argv[0]; ret = dpl_setattr(ctx, path, metadata); if (DPL_SUCCESS != ret) { fprintf(stderr, "status: %s (%d)\n", dpl_status_str(ret), ret); goto end; } var_set("status", "0", VAR_CMD_SET, NULL); end: if (NULL != metadata) dpl_dict_free(metadata); return SHELL_CONT; }
/* the caller has to free() metadatap */ static int download_headers(char * path, dpl_dict_t **headersp) { int ret; dpl_status_t rc; dpl_ino_t ino, obj_ino; dpl_dict_t *dict = NULL; if (! headersp) { ret = -1; goto err; } rc = dfs_namei_timeout(ctx, path, ctx->cur_bucket, ino, NULL, &obj_ino, NULL); if (DPL_ENOENT == rc ) { LOG(LOG_INFO, "dfs_namei_timeout: %s", dpl_status_str(rc)); ret = -1; goto err; } if (DPL_SUCCESS != rc) { LOG(LOG_ERR, "dfs_namei_timeout: %s", dpl_status_str(rc)); ret = -1; goto err; } rc = dfs_head_all_timeout(ctx, ctx->cur_bucket, obj_ino.key, NULL, NULL, &dict); if (DPL_SUCCESS != rc) { LOG(LOG_ERR, "dpl_head_all_timeout: %s", dpl_status_str(rc)); ret = 1; goto err; } *headersp = dict; ret = 0; err: return ret; }
int main(int argc, char **argv) { int ret; dpl_ctx_t *ctx; char opt; char *profile = NULL; while ((opt = getopt(argc, argv, "p:")) != -1) switch (opt) { case 'p': profile = strdup(optarg); assert(NULL != profile); break ; case '?': default: usage(); } argc -= optind; argv += optind; if (argc != 0) usage(); ret = dpl_init(); if (DPL_SUCCESS != ret) { fprintf(stderr, "dpl_init failed\n"); exit(1); } ctx = dpl_ctx_new(NULL, profile); if (NULL == ctx) { fprintf(stderr, "dpl_ctx_new failed\n"); exit(1); } ret = dpl_print_capabilities(ctx); if (DPL_SUCCESS != ret) { fprintf(stderr, "get cap failed: %s (%d)\n", dpl_status_str(ret), ret); exit(1); } dpl_ctx_free(ctx); dpl_free(); exit(0); }
dpl_status_t dpltest_upload_file(dpl_ctx_t *ctx, char *path, char *blob_buf, int blob_size, int buffered, int block_size) { dpl_status_t ret, ret2; dpl_canned_acl_t canned_acl = DPL_CANNED_ACL_PRIVATE; dpl_dict_t *metadata = NULL; int retries = 0; dpl_sysmd_t sysmd; memset(&sysmd, 0, sizeof (sysmd)); sysmd.mask = DPL_SYSMD_MASK_CANNED_ACL; sysmd.canned_acl = canned_acl; retry: if (retries >= 3) { fprintf(stderr, "too many retries: %s (%d)\n", dpl_status_str(ret), ret); ret = DPL_FAILURE; goto end; } retries++; //XXX buffered and block_size ignored for now ret2 = dpl_fput(ctx, path, NULL, NULL, NULL, metadata, &sysmd, blob_buf, blob_size); if (DPL_SUCCESS != ret2) { if (DPL_ENOENT == ret2) { ret = DPL_ENOENT; } goto retry; } ret = DPL_SUCCESS; end: if (NULL != metadata) dpl_dict_free(metadata); return ret; }
void copy_nameless_object_with_new_md() { dpl_async_task_t *atask = NULL; dpl_buf_t *buf = NULL; dpl_status_t ret; dpl_dict_t *metadata = NULL; banner("11 - copy nameless object with new metadata"); metadata = dpl_dict_new(13); if (NULL == metadata) { ret = DPL_ENOMEM; exit(1); } ret = dpl_dict_add(metadata, "bar", "qux", 0); if (DPL_SUCCESS != ret) { fprintf(stderr, "error updating metadatum: %s (%d)\n", dpl_status_str(ret), ret); exit(1); } /* * note: With Dewpoint, it would be possible to copy nameless object into another nameless object. * Does it make sense ? for now we copy it into a named object */ atask = (dpl_async_task_t *) dpl_copy_id_async_prepare(ctx, NULL, //no src bucket id1, //the src resource NULL, //no dst bucket file1_path, //dst resource NULL, //no option DPL_FTYPE_REG, //regular file DPL_COPY_DIRECTIVE_COPY, //rename metadata, //metadata NULL, //no sysmd NULL); //no server side condition if (NULL == atask) { fprintf(stderr, "error preparing task\n"); exit(1); } atask->cb_func = cb_copy_nameless_object_with_new_md; atask->cb_arg = atask; dpl_task_pool_put(pool, (dpl_task_t *) atask); }
static void delete_file(struct cloudmig_ctx *ctx, char *bucket, char *filename) { dpl_status_t dplret; cloudmig_log(DEBUG_LVL, "[Deleting Source]\t Deleting file '%s'...\n", filename); dplret = dpl_delete(ctx->src_ctx, bucket, filename, NULL); if (dplret != DPL_SUCCESS) { PRINTERR("%s: Could not delete the file %s" " from the bucket %s : %s", __FUNCTION__, filename, bucket, dpl_status_str(dplret)); } }
void update_metadata_named_object() { dpl_async_task_t *atask = NULL; dpl_status_t ret; dpl_option_t option; dpl_dict_t *metadata = NULL; banner("6 - append metadata to existing named object"); metadata = dpl_dict_new(13); if (NULL == metadata) { ret = DPL_ENOMEM; exit(1); } ret = dpl_dict_add(metadata, "foo", "bar", 0); if (DPL_SUCCESS != ret) { fprintf(stderr, "error updating metadatum: %s (%d)\n", dpl_status_str(ret), ret); exit(1); } option.mask = DPL_OPTION_APPEND_METADATA; atask = (dpl_async_task_t *) dpl_put_async_prepare(ctx, NULL, //no bucket file3_path, //the id &option, //option DPL_FTYPE_REG, //regular object NULL, //condition NULL, //range metadata, //the metadata NULL, //no sysmd NULL); //object body dpl_dict_free(metadata); if (NULL == atask) { fprintf(stderr, "error preparing task\n"); exit(1); } atask->cb_func = cb_update_metadata_named_object; atask->cb_arg = atask; dpl_task_pool_put(pool, (dpl_task_t *) atask); }
void cb_make_folder(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "make dir failed: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } dpl_async_task_free(atask); add_nameless_object(); }
void cb_add_nonexisting_named_object(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "add named object failed: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } dpl_async_task_free(atask); add_existing_named_object(); }
void cb_add_existing_named_object_no_precond(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; if (DPL_EPRECOND != atask->ret) { fprintf(stderr, "abnormal answer: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } dpl_async_task_free(atask); add_existing_named_object(); }
void cb_append_to_nonexisting_named_object(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "abnormal answer: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } dpl_async_task_free(atask); update_metadata_named_object(); }
void cb_rename_object(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "rename object failed: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } dpl_async_task_free(atask); get_object(); }
void cb_copy_named_object_with_new_md(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "rename object failed: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } dpl_async_task_free(atask); free_all(); }
int dfs_rename(const char *src, const char *dst) { dpl_status_t rc; dpl_ftype_t type; char *p = NULL; int ret = 0; tfs_ctx *ctx = fuse_get_context()->private_data; LOG(LOG_DEBUG, "src=%s dst=%s", src, dst); if (0 == strcmp(dst, ".")) { p = strrchr(src, '/'); dst = p ? p + 1 : src; } rc = dfs_getattr_timeout(ctx, src, NULL, &type); if (! DPL_SUCCESS == rc && (DPL_ENOENT != rc)) { LOG(LOG_ERR, "dpl_getattr_timeout: %s", dpl_status_str(rc)); ret = -1; goto err; } rc = dfs_rename_timeout(ctx, src, dst, type); if (DPL_SUCCESS != rc) { LOG(LOG_ERR, "dpl_rename_timeout: %s", dpl_status_str(rc)); ret = rc; goto err; } ret = 0; err: LOG(LOG_DEBUG, "src=%s dst=%s ret=%s", src, dst, dpl_status_str(ret)); return ret; }
void copy_named_object_with_new_md() { dpl_async_task_t *atask = NULL; dpl_buf_t *buf = NULL; dpl_status_t ret; dpl_dict_t *metadata = NULL; banner("12 - copy named object with new metadata"); metadata = dpl_dict_new(13); if (NULL == metadata) { ret = DPL_ENOMEM; exit(1); } ret = dpl_dict_add(metadata, "qux", "baz", 0); if (DPL_SUCCESS != ret) { fprintf(stderr, "error updating metadatum: %s (%d)\n", dpl_status_str(ret), ret); exit(1); } atask = (dpl_async_task_t *) dpl_copy_async_prepare(ctx, NULL, //no src bucket file1_path, //the src resource NULL, //no dst bucket file4_path, //dst resource NULL, //no option DPL_FTYPE_REG, //regular file DPL_COPY_DIRECTIVE_COPY, //rename metadata, //metadata NULL, //no sysmd NULL); //no server side condition if (NULL == atask) { fprintf(stderr, "error preparing task\n"); exit(1); } atask->cb_func = cb_copy_named_object_with_new_md; atask->cb_arg = atask; dpl_task_pool_put(pool, (dpl_task_t *) atask); }
void cb_get_metadata(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; dpl_dict_var_t *metadatum = NULL; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "error getting metadata: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } fprintf(stderr, "checking metadata\n"); metadatum = dpl_dict_get(atask->u.head.metadata, "foo"); if (NULL == metadatum) { fprintf(stderr, "missing metadatum\n"); exit(1); } assert(metadatum->val->type == DPL_VALUE_STRING); if (strcmp(dpl_sbuf_get_str(metadatum->val->string), "bar2")) { fprintf(stderr, "bad value in metadatum\n"); exit(1); } metadatum = dpl_dict_get(atask->u.head.metadata, "foo2"); if (NULL == metadatum) { fprintf(stderr, "missing metadatum\n"); exit(1); } assert(metadatum->val->type == DPL_VALUE_STRING); if (strcmp(dpl_sbuf_get_str(metadatum->val->string), "qux")) { fprintf(stderr, "bad value in metadatum\n"); exit(1); } dpl_async_task_free(atask); list_bucket(); }
void cb_add_nameless_object(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "add object failed: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } fprintf(stderr, "id=%s path=%s\n", atask->u.post.sysmd_returned.id, atask->u.post.sysmd_returned.path); strcpy(id1, atask->u.post.sysmd_returned.id); dpl_async_task_free(atask); add_nonexisting_named_object(); }
void cb_get_object_meta(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; int i; dpl_dict_var_t *metadatum = NULL; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "dpl_get failed: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } printf("metadata:\n"); dpl_dict_print(atask->u.head.metadata, stdout, 0); dpl_async_task_free(atask); copy_nameless_object_with_new_md(); }
/* * Write data to a volume using libdroplet. */ ssize_t object_store_device::d_write(int fd, const void *buffer, size_t count) { if (m_vfd) { dpl_status_t status; status = dpl_pwrite(m_vfd, (char *)buffer, count, m_offset); switch (status) { case DPL_SUCCESS: m_offset += count; return count; default: Mmsg2(errmsg, _("Failed to write %s using dpl_write(): ERR=%s.\n"), getVolCatName(), dpl_status_str(status)); return droplet_errno_to_system_errno(status); } } else { errno = EBADF; return -1; } }
void cb_head_object(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "getattr error on %s: %s (%d)\n", atask->u.head.resource, dpl_status_str(atask->ret), atask->ret); exit(1); } fprintf(stderr, "file %s: size=%ld mtime=%lu\n", atask->u.head.resource, atask->u.head.sysmd.size, atask->u.head.sysmd.mtime); //dpl_dict_print(atask->u.head.metadata, stderr, 5); dpl_async_task_free(atask); pthread_mutex_lock(&list_lock); n_ok++; pthread_cond_signal(&list_cond); pthread_mutex_unlock(&list_lock); }
void cb_get_named_object_partially(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; int i; dpl_dict_var_t *metadatum = NULL; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "dpl_get failed: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } printf("data len: %d\n", dpl_buf_size(atask->u.get.buf)); printf("metadata:\n"); dpl_dict_print(atask->u.get.metadata, stdout, 0); dpl_async_task_free(atask); get_object_meta(); }
void cb_get_nameless_object(void *handle) { dpl_async_task_t *atask = (dpl_async_task_t *) handle; int i; dpl_dict_var_t *metadatum = NULL; if (DPL_SUCCESS != atask->ret) { fprintf(stderr, "dpl_get failed: %s (%d)\n", dpl_status_str(atask->ret), atask->ret); exit(1); } printf("data:\n"); write(1, dpl_buf_ptr(atask->u.get.buf), MIN(dpl_buf_size(atask->u.get.buf), 10)); printf("...\nmetadata:\n"); dpl_dict_print(atask->u.get.metadata, stdout, 0); dpl_async_task_free(atask); get_named_object(); }