/** * _export_dir_deinit -- Free memory used by an export dir * * @expdir : Pointer to the export directory to free * * Not for external use. */ static void _export_dir_deinit(struct export_dir *dir) { GF_VALIDATE_OR_GOTO(GF_EXP, dir, out); GF_FREE(dir->dir_name); _exp_dict_destroy(dir->netgroups); dict_unref(dir->netgroups); _exp_dict_destroy(dir->hosts); dict_unref(dir->hosts); GF_FREE(dir); out: return; }
/** * _export_file_init -- Initialize an exports file structure. * * @return : success: Pointer to an allocated exports file struct * failure: NULL * * Not for external use. */ struct exports_file * _exports_file_init() { struct exports_file *file = NULL; file = GF_CALLOC(1, sizeof(*file), gf_common_mt_nfs_exports); if (!file) { gf_msg(GF_EXP, GF_LOG_CRITICAL, ENOMEM, NFS_MSG_NO_MEMORY, "Failed to allocate file struct!"); goto out; } file->exports_dict = dict_new(); file->exports_map = dict_new(); if (!file->exports_dict || !file->exports_map) { gf_msg(GF_EXP, GF_LOG_CRITICAL, ENOMEM, NFS_MSG_NO_MEMORY, "Failed to allocate dict!"); goto free_and_out; } goto out; free_and_out: if (file->exports_dict) dict_unref(file->exports_dict); GF_FREE(file); file = NULL; out: return file; }
static dict_t * make_seq_dict (int argc, char **argv) { char index[] = "4294967296"; // 1<<32 int i = 0; int ret = 0; dict_t *dict = dict_new (); if (!dict) return NULL; for (i = 0; i < argc; i++) { snprintf(index, sizeof(index), "%d", i); ret = dict_set_str (dict, index, argv[i]); if (ret == -1) break; } if (ret) { dict_unref (dict); dict = NULL; } return dict; }
/* * Make a copy of dict "d". Shallow if "deep" is FALSE. * The refcount of the new dict is set to 1. * See item_copy() for "copyID". * Returns NULL when out of memory. */ dict_T * dict_copy(dict_T *orig, int deep, int copyID) { dict_T *copy; dictitem_T *di; int todo; hashitem_T *hi; if (orig == NULL) return NULL; copy = dict_alloc(); if (copy != NULL) { if (copyID != 0) { orig->dv_copyID = copyID; orig->dv_copydict = copy; } todo = (int)orig->dv_hashtab.ht_used; for (hi = orig->dv_hashtab.ht_array; todo > 0 && !got_int; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; di = dictitem_alloc(hi->hi_key); if (di == NULL) break; if (deep) { if (item_copy(&HI2DI(hi)->di_tv, &di->di_tv, deep, copyID) == FAIL) { vim_free(di); break; } } else copy_tv(&HI2DI(hi)->di_tv, &di->di_tv); if (dict_add(copy, di) == FAIL) { dictitem_free(di); break; } } } ++copy->dv_refcount; if (todo > 0) { dict_unref(copy); copy = NULL; } } return copy; }
int cli_cmd_volume_remove_brick_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; gf_answer_t answer = GF_ANSWER_NO; int sent = 0; int parse_error = 0; const char *question = "Removing brick(s) can result in data loss. " "Do you want to Continue?"; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_remove_brick_parse (words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } answer = cli_cmd_get_confirmation (state, question); if (GF_ANSWER_NO == answer) { ret = 0; goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REMOVE_BRICK]; if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume remove-brick failed"); } if (options) dict_unref (options); return ret; }
/** * _exp_file_deinit -- Free memory used by an export file * * @expfile : Pointer to the exports file to free * * Externally usable. */ void exp_file_deinit(struct exports_file *expfile) { if (!expfile) goto out; if (expfile->exports_dict) { dict_foreach(expfile->exports_dict, _exp_file_dict_destroy, NULL); dict_unref(expfile->exports_dict); } if (expfile->exports_map) { dict_foreach(expfile->exports_map, _exp_file_dict_destroy, NULL); dict_unref(expfile->exports_map); } GF_FREE(expfile->filename); GF_FREE(expfile); out: return; }
int cli_cmd_quota_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = 0; int parse_err = 0; int32_t type = 0; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; gf_answer_t answer = GF_ANSWER_NO; const char *question = "Disabling quota will delete all the quota " "configuration. Do you want to continue?"; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_QUOTA]; if (proc == NULL) { ret = -1; goto out; } frame = create_frame (THIS, THIS->ctx->pool); if (!frame) { ret = -1; goto out; } ret = cli_cmd_quota_parse (words, wordcount, &options); if (ret < 0) { cli_usage_out (word->pattern); parse_err = 1; goto out; } else if (dict_get_int32 (options, "type", &type) == 0 && type == GF_QUOTA_OPTION_TYPE_DISABLE) { answer = cli_cmd_get_confirmation (state, question); if (answer == GF_ANSWER_NO) goto out; } if (proc->fn) ret = proc->fn (frame, THIS, options); out: if (options) dict_unref (options); if (ret && parse_err == 0) cli_out ("Quota command failed"); return ret; }
compound_args_t* compound_fop_alloc (int length, glusterfs_compound_fop_t fop, dict_t *xdata) { compound_args_t *args = NULL; args = GF_CALLOC (1, sizeof (args), gf_mt_compound_req_t); if (!args) return NULL; /* fop_enum can be used by xlators to see which fops are * included as part of compound fop. This will help in checking * for compatibility or support without going through the entire * fop list packed. */ args->fop_enum = fop; args->fop_length = length; args->enum_list = GF_CALLOC (length, sizeof (*args->enum_list), gf_common_mt_int); if (!args->enum_list) goto out; args->req_list = GF_CALLOC (length, sizeof (*args->req_list), gf_mt_default_args_t); if (!args->req_list) goto out; if (xdata) { args->xdata = dict_copy_with_ref (xdata, args->xdata); if (!args->xdata) goto out; } return args; out: if (args->xdata) dict_unref (args->xdata); if (args->req_list) GF_FREE (args->req_list); if (args->enum_list) GF_FREE (args->enum_list); if (args) GF_FREE (args); return NULL; }
static void cluster_marker_unwind (call_frame_t *frame, char *key, void *value, size_t size, dict_t *dict) { xl_marker_local_t *local = frame->local; int ret = 0; int32_t op_ret = 0; int32_t op_errno = 0; gf_boolean_t unref = _gf_false; frame->local = local->xl_local; if (local->count[MCNT_FOUND]) { if (!dict) { dict = dict_new(); if (dict) { unref = _gf_true; } else { op_ret = -1; op_errno = ENOMEM; goto out; } } ret = dict_set_static_bin (dict, key, value, size); if (ret) { op_ret = -1; op_errno = ENOMEM; goto out; } } op_errno = evaluate_marker_results (local->gauge, local->count); if (op_errno) op_ret = -1; out: if (local->xl_specf_unwind) { local->xl_specf_unwind (frame, op_ret, op_errno, dict, NULL); } else { STACK_UNWIND_STRICT (getxattr, frame, op_ret, op_errno, dict, NULL); } GF_FREE (local); if (unref) dict_unref (dict); }
/** * auth_cache_purge -- Purge the dict in the cache and create a new empty one. * * @cache: Cache to purge * */ void auth_cache_purge (struct auth_cache *cache) { dict_t *new_cache_dict = dict_new (); dict_t *old_cache_dict = cache->cache_dict; if (!cache) goto out; (void)__sync_lock_test_and_set (&cache->cache_dict, new_cache_dict); dict_unref (old_cache_dict); out: return; }
void stripe_local_wipe (stripe_local_t *local) { if (!local) goto out; loc_wipe (&local->loc); loc_wipe (&local->loc2); if (local->fd) fd_unref (local->fd); if (local->inode) inode_unref (local->inode); if (local->xattr) dict_unref (local->xattr); if (local->xdata) dict_unref (local->xdata); out: return; }
int cli_cmd_volume_replace_brick_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; #ifdef GF_SOLARIS_HOST_OS cli_out ("Command not supported on Solaris"); goto out; #endif proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REPLACE_BRICK]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = cli_cmd_volume_replace_brick_parse (words, wordcount, &options); if (ret) { cli_usage_out (word->pattern); parse_error = 1; goto out; } if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (options) dict_unref (options); if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume replace-brick failed"); } return ret; }
int cli_cmd_uuid_get_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; int sent = 0; int parse_error = 0; dict_t *dict = NULL; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; cli_local_t *local = NULL; xlator_t *this = NULL; this = THIS; if (wordcount != 3) { cli_usage_out (word->pattern); parse_error = 1; goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_UUID_GET]; frame = create_frame (this, this->ctx->pool); if (!frame) goto out; dict = dict_new (); if (!dict) goto out; CLI_LOCAL_INIT (local, words, frame, dict); if (proc->fn) ret = proc->fn (frame, this, dict); out: if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("uuid get failed"); } if (dict) dict_unref (dict); CLI_STACK_DESTROY (frame); return ret; }
int cli_cmd_volume_top_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *options = NULL; int sent = 0; int parse_error = 0; ret = cli_cmd_volume_top_parse (words, wordcount, &options); if (ret) { parse_error = 1; cli_usage_out (word->pattern); goto out; } proc = &cli_rpc_prog->proctable[GLUSTER_CLI_TOP_VOLUME]; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; if (proc->fn) { ret = proc->fn (frame, THIS, options); } out: if (options) dict_unref (options); if (ret) { cli_cmd_sent_status_get (&sent); if ((sent == 0) && (parse_error == 0)) cli_out ("Volume top failed"); } return ret; }
int glfsh_get_index_dir_loc (loc_t *rootloc, xlator_t *xl, loc_t *dirloc, int32_t *op_errno) { void *index_gfid = NULL; int ret = 0; dict_t *xattr = NULL; struct iatt iattr = {0}; struct iatt parent = {0}; ret = syncop_getxattr (xl, rootloc, &xattr, GF_XATTROP_INDEX_GFID); if (ret < 0) { *op_errno = -ret; goto out; } ret = dict_get_ptr (xattr, GF_XATTROP_INDEX_GFID, &index_gfid); if (ret < 0) { *op_errno = EINVAL; goto out; } uuid_copy (dirloc->gfid, index_gfid); dirloc->path = ""; dirloc->inode = inode_new (rootloc->inode->table); ret = syncop_lookup (xl, dirloc, NULL, &iattr, NULL, &parent); dirloc->path = NULL; if (ret < 0) { *op_errno = -ret; goto out; } ret = glfsh_link_inode_update_loc (dirloc, &iattr); if (ret) goto out; glfs_loc_touchup (dirloc); ret = 0; out: if (xattr) dict_unref (xattr); return ret; }
int cli_cmd_umount_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; int ret = -1; dict_t *dict = NULL; if (!(wordcount == 3 || (wordcount == 4 && strcmp (words[3], "lazy") == 0))) { cli_usage_out (word->pattern); goto out; } dict = dict_new (); if (!dict) goto out; ret = dict_set_str (dict, "path", (char *)words[2]); if (ret != 0) goto out; ret = dict_set_int32 (dict, "lazy", wordcount == 4); if (ret != 0) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_UMOUNT]; if (proc && proc->fn) { frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = proc->fn (frame, THIS, dict); } out: if (dict) dict_unref (dict); if (!proc && ret) cli_out ("Umount command failed"); return ret; }
int cli_cmd_pmap_b2p_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { int ret = -1; rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; dict_t *dict = NULL; frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; dict = dict_new (); if (!dict) goto out; if (wordcount != 4) { cli_usage_out (word->pattern); goto out; } ret = dict_set_str (dict, "brick", (char *)words[3]); if (ret) goto out; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PMAP_PORTBYBRICK]; if (proc->fn) { ret = proc->fn (frame, THIS, dict); } out: if (!proc && ret) { if (dict) dict_unref (dict); if (wordcount > 1) cli_out ("Fetching spec for volume %s failed", (char *)words[3]); } return ret; }
int cli_cmd_mount_cbk (struct cli_state *state, struct cli_cmd_word *word, const char **words, int wordcount) { rpc_clnt_procedure_t *proc = NULL; call_frame_t *frame = NULL; int ret = -1; dict_t *dict = NULL; void *dataa[] = {NULL, NULL}; if (wordcount < 4) { cli_usage_out (word->pattern); goto out; } dict = make_seq_dict (wordcount - 3, (char **)words + 3); if (!dict) goto out; dataa[0] = (void *)words[2]; dataa[1] = dict; proc = &cli_rpc_prog->proctable[GLUSTER_CLI_MOUNT]; if (proc && proc->fn) { frame = create_frame (THIS, THIS->ctx->pool); if (!frame) goto out; ret = proc->fn (frame, THIS, dataa); } out: if (dict) dict_unref (dict); if (!proc && ret) cli_out ("Mount command failed"); return ret; }
/// Free fmark_T item void free_fmark(fmark_T fm) { dict_unref(fm.additional_data); }
static int glusterd_handle_post_validate_fn (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_v3_post_val_req op_req = {{0},}; xlator_t *this = NULL; char *op_errstr = NULL; dict_t *dict = NULL; dict_t *rsp_dict = NULL; this = THIS; GF_ASSERT (this); GF_ASSERT (req); ret = xdr_to_generic (req->msg[0], &op_req, (xdrproc_t)xdr_gd1_mgmt_v3_post_val_req); if (ret < 0) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "Failed to decode post validation " "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } if (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL) { gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND, "%s doesn't " "belong to the cluster. Ignoring request.", uuid_utoa (op_req.uuid)); ret = -1; goto out; } dict = dict_new (); if (!dict) goto out; ret = dict_unserialize (op_req.dict.dict_val, op_req.dict.dict_len, &dict); if (ret) { gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to unserialize the dictionary"); goto out; } rsp_dict = dict_new (); if (!rsp_dict) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, "Failed to get new dictionary"); return -1; } ret = gd_mgmt_v3_post_validate_fn (op_req.op, op_req.op_ret, dict, &op_errstr, rsp_dict); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL, "Post Validation failed on operation %s", gd_op_list[op_req.op]); } ret = glusterd_mgmt_v3_post_validate_send_resp (req, op_req.op, ret, op_errstr, rsp_dict); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL, "Failed to send Post Validation " "response for operation %s", gd_op_list[op_req.op]); goto out; } out: if (op_errstr && (strcmp (op_errstr, ""))) GF_FREE (op_errstr); free (op_req.dict.dict_val); if (dict) dict_unref (dict); if (rsp_dict) dict_unref (rsp_dict); /* Return 0 from handler to avoid double deletion of req obj */ return 0; }
static int glusterd_handle_mgmt_v3_unlock_fn (rpcsvc_request_t *req) { gd1_mgmt_v3_unlock_req lock_req = {{0},}; int32_t ret = -1; glusterd_op_lock_ctx_t *ctx = NULL; xlator_t *this = NULL; gf_boolean_t is_synctasked = _gf_false; gf_boolean_t free_ctx = _gf_false; this = THIS; GF_ASSERT (this); GF_ASSERT (req); ret = xdr_to_generic (req->msg[0], &lock_req, (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req); if (ret < 0) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "Failed to decode unlock " "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } gf_msg_debug (this->name, 0, "Received volume unlock req " "from uuid: %s", uuid_utoa (lock_req.uuid)); if (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL) { gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND, "%s doesn't " "belong to the cluster. Ignoring request.", uuid_utoa (lock_req.uuid)); ret = -1; goto out; } ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t); if (!ctx) { ret = -1; goto out; } gf_uuid_copy (ctx->uuid, lock_req.uuid); ctx->req = req; ctx->dict = dict_new (); if (!ctx->dict) { ret = -1; goto out; } ret = dict_unserialize (lock_req.dict.dict_val, lock_req.dict.dict_len, &ctx->dict); if (ret) { gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to unserialize the dictionary"); goto out; } is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false); if (is_synctasked) { ret = glusterd_syctasked_mgmt_v3_unlock (req, &lock_req, ctx); if (ret) { gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL, "Failed to release mgmt_v3_locks"); /* Ignore the return code, as it shouldn't be propagated * from the handler function so as to avoid double * deletion of the req */ ret = 0; } /* The above function does not take ownership of ctx. * Therefore we need to free the ctx explicitly. */ free_ctx = _gf_true; } else { /* Shouldn't ignore the return code here, and it should * be propagated from the handler function as in failure * case it doesn't delete the req object */ ret = glusterd_op_state_machine_mgmt_v3_unlock (req, &lock_req, ctx); if (ret) gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL, "Failed to release mgmt_v3_locks"); } out: if (ctx && (ret || free_ctx)) { if (ctx->dict) dict_unref (ctx->dict); GF_FREE (ctx); } free (lock_req.dict.dict_val); gf_msg_trace (this->name, 0, "Returning %d", ret); return ret; }
int quotad_aggregator_getlimit (rpcsvc_request_t *req) { call_frame_t *frame = NULL; gf_cli_req cli_req = {{0}, }; gf_cli_rsp cli_rsp = {0}; gfs3_lookup_req args = {{0,},}; gfs3_lookup_rsp rsp = {0,}; quotad_aggregator_state_t *state = NULL; xlator_t *this = NULL; dict_t *dict = NULL; int ret = -1, op_errno = 0; char *gfid_str = NULL; uuid_t gfid = {0}; GF_VALIDATE_OR_GOTO ("quotad-aggregator", req, err); this = THIS; ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { //failed to decode msg; gf_log ("", GF_LOG_ERROR, "xdr decoding error"); req->rpc_err = GARBAGE_ARGS; goto err; } if (cli_req.dict.dict_len) { dict = dict_new (); ret = dict_unserialize (cli_req.dict.dict_val, cli_req.dict.dict_len, &dict); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "Failed to " "unserialize req-buffer to dictionary"); goto err; } } ret = dict_get_str (dict, "gfid", &gfid_str); if (ret) { goto err; } uuid_parse ((const char*)gfid_str, gfid); frame = quotad_aggregator_get_frame_from_req (req); if (frame == NULL) { rsp.op_errno = ENOMEM; goto err; } state = frame->root->state; state->xdata = dict; ret = dict_set_int32 (state->xdata, QUOTA_LIMIT_KEY, 42); if (ret) goto err; ret = dict_set_int32 (state->xdata, QUOTA_SIZE_KEY, 42); if (ret) goto err; ret = dict_set_int32 (state->xdata, GET_ANCESTRY_PATH_KEY,42); if (ret) goto err; memcpy (&args.gfid, &gfid, 16); args.bname = alloca (req->msg[0].iov_len); args.xdata.xdata_val = alloca (req->msg[0].iov_len); ret = qd_nameless_lookup (this, frame, &args, state->xdata, quotad_aggregator_getlimit_cbk); if (ret) { rsp.op_errno = ret; goto err; } return ret; err: cli_rsp.op_ret = -1; cli_rsp.op_errno = op_errno; cli_rsp.op_errstr = ""; quotad_aggregator_getlimit_cbk (this, frame, &cli_rsp); dict_unref (dict); return ret; }
struct glfs_object * glfs_h_mknod (struct glfs *fs, struct glfs_object *parent, const char *path, mode_t mode, dev_t dev, struct stat *stat) { int ret = -1; xlator_t *subvol = NULL; inode_t *inode = NULL; loc_t loc = {0, }; struct iatt iatt = {0, }; uuid_t gfid; dict_t *xattr_req = NULL; struct glfs_object *object = NULL; /* validate in args */ if ((fs == NULL) || (parent == NULL) || (path == NULL)) { errno = EINVAL; return NULL; } __glfs_entry_fs (fs); /* get the active volume */ subvol = glfs_active_subvol (fs); if (!subvol) { ret = -1; errno = EIO; goto out; } /* get/refresh the in arg objects inode in correlation to the xlator */ inode = glfs_resolve_inode (fs, subvol, parent); if (!inode) { errno = ESTALE; goto out; } xattr_req = dict_new (); if (!xattr_req) { ret = -1; errno = ENOMEM; goto out; } uuid_generate (gfid); ret = dict_set_static_bin (xattr_req, "gfid-req", gfid, 16); if (ret) { ret = -1; errno = ENOMEM; goto out; } GLFS_LOC_FILL_PINODE (inode, loc, ret, errno, out, path); /* fop/op */ ret = syncop_mknod (subvol, &loc, mode, dev, xattr_req, &iatt); DECODE_SYNCOP_ERR (ret); /* populate out args */ if (ret == 0) { ret = glfs_loc_link (&loc, &iatt); if (ret != 0) { goto out; } if (stat) glfs_iatt_to_stat (fs, &iatt, stat); ret = glfs_create_object (&loc, &object); } out: if (ret && object != NULL) { glfs_h_close (object); object = NULL; } loc_wipe(&loc); if (inode) inode_unref (inode); if (xattr_req) dict_unref (xattr_req); glfs_subvol_done (fs, subvol); return object; }
static int glusterd_handle_mgmt_v3_unlock_fn (rpcsvc_request_t *req) { gd1_mgmt_v3_unlock_req lock_req = {{0},}; int32_t ret = -1; glusterd_op_lock_ctx_t *ctx = NULL; glusterd_peerinfo_t *peerinfo = NULL; xlator_t *this = NULL; gf_boolean_t is_synctasked = _gf_false; gf_boolean_t free_ctx = _gf_false; this = THIS; GF_ASSERT (this); GF_ASSERT (req); ret = xdr_to_generic (req->msg[0], &lock_req, (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock " "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } gf_log (this->name, GF_LOG_DEBUG, "Received volume unlock req " "from uuid: %s", uuid_utoa (lock_req.uuid)); if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) { gf_log (this->name, GF_LOG_WARNING, "%s doesn't " "belong to the cluster. Ignoring request.", uuid_utoa (lock_req.uuid)); ret = -1; goto out; } ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t); if (!ctx) { ret = -1; goto out; } uuid_copy (ctx->uuid, lock_req.uuid); ctx->req = req; ctx->dict = dict_new (); if (!ctx->dict) { ret = -1; goto out; } ret = dict_unserialize (lock_req.dict.dict_val, lock_req.dict.dict_len, &ctx->dict); if (ret) { gf_log (this->name, GF_LOG_WARNING, "failed to unserialize the dictionary"); goto out; } is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false); if (is_synctasked) { ret = glusterd_syctasked_mgmt_v3_unlock (req, &lock_req, ctx); /* The above function does not take ownership of ctx. * Therefore we need to free the ctx explicitly. */ free_ctx = _gf_true; } else { ret = glusterd_op_state_machine_mgmt_v3_unlock (req, &lock_req, ctx); } out: if (ret || free_ctx) { if (ctx->dict) dict_unref (ctx->dict); if (ctx) GF_FREE (ctx); } free (lock_req.dict.dict_val); gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret); return ret; }
static int glusterd_handle_post_validate_fn (rpcsvc_request_t *req) { int32_t ret = -1; gd1_mgmt_v3_post_val_req op_req = {{0},}; glusterd_peerinfo_t *peerinfo = NULL; xlator_t *this = NULL; char *op_errstr = NULL; dict_t *dict = NULL; dict_t *rsp_dict = NULL; this = THIS; GF_ASSERT (this); GF_ASSERT (req); ret = xdr_to_generic (req->msg[0], &op_req, (xdrproc_t)xdr_gd1_mgmt_v3_post_val_req); if (ret < 0) { gf_log (this->name, GF_LOG_ERROR, "Failed to decode post validation " "request received from peer"); req->rpc_err = GARBAGE_ARGS; goto out; } if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) { gf_log (this->name, GF_LOG_WARNING, "%s doesn't " "belong to the cluster. Ignoring request.", uuid_utoa (op_req.uuid)); ret = -1; goto out; } dict = dict_new (); if (!dict) goto out; ret = dict_unserialize (op_req.dict.dict_val, op_req.dict.dict_len, &dict); if (ret) { gf_log (this->name, GF_LOG_WARNING, "failed to unserialize the dictionary"); goto out; } rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, "Failed to get new dictionary"); return -1; } ret = gd_mgmt_v3_post_validate_fn (op_req.op, op_req.op_ret, dict, &op_errstr, rsp_dict); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Post Validation failed on operation %s", gd_op_list[op_req.op]); } ret = glusterd_mgmt_v3_post_validate_send_resp (req, op_req.op, ret, op_errstr, rsp_dict); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to send Post Validation " "response for operation %s", gd_op_list[op_req.op]); goto out; } out: if (op_errstr && (strcmp (op_errstr, ""))) GF_FREE (op_errstr); free (op_req.dict.dict_val); if (dict) dict_unref (dict); if (rsp_dict) dict_unref (rsp_dict); gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret); return ret; }
static int luaV_dict_gc (lua_State *L) { dict_unref(luaV_unbox(L, luaV_Dict, 1)); return 0; }
struct glfs_object * glfs_h_creat (struct glfs *fs, struct glfs_object *parent, const char *path, int flags, mode_t mode, struct stat *stat) { int ret = -1; struct glfs_fd *glfd = NULL; xlator_t *subvol = NULL; inode_t *inode = NULL; loc_t loc = {0, }; struct iatt iatt = {0, }; uuid_t gfid; dict_t *xattr_req = NULL; struct glfs_object *object = NULL; /* validate in args */ if ((fs == NULL) || (parent == NULL) || (path == NULL)) { errno = EINVAL; return NULL; } __glfs_entry_fs (fs); /* get the active volume */ subvol = glfs_active_subvol (fs); if (!subvol) { ret = -1; errno = EIO; goto out; } /* get/refresh the in arg objects inode in correlation to the xlator */ inode = glfs_resolve_inode (fs, subvol, parent); if (!inode) { errno = ESTALE; goto out; } xattr_req = dict_new (); if (!xattr_req) { ret = -1; errno = ENOMEM; goto out; } uuid_generate (gfid); ret = dict_set_static_bin (xattr_req, "gfid-req", gfid, 16); if (ret) { ret = -1; errno = ENOMEM; goto out; } GLFS_LOC_FILL_PINODE (inode, loc, ret, errno, out, path); glfd = glfs_fd_new (fs); if (!glfd) goto out; glfd->fd = fd_create (loc.inode, getpid()); if (!glfd->fd) { ret = -1; errno = ENOMEM; goto out; } /* fop/op */ ret = syncop_create (subvol, &loc, flags, mode, glfd->fd, xattr_req, &iatt); DECODE_SYNCOP_ERR (ret); /* populate out args */ if (ret == 0) { /* TODO: If the inode existed in the cache (say file already exists), then the glfs_loc_link will not update the loc.inode, as a result we will have a 0000 GFID that we would copy out to the object, this needs to be fixed. */ ret = glfs_loc_link (&loc, &iatt); if (ret != 0) { goto out; } if (stat) glfs_iatt_to_stat (fs, &iatt, stat); ret = glfs_create_object (&loc, &object); } out: if (ret && object != NULL) { glfs_h_close (object); object = NULL; } loc_wipe(&loc); if (inode) inode_unref (inode); if (xattr_req) dict_unref (xattr_req); if (glfd) { glfs_fd_destroy (glfd); glfd = NULL; } glfs_subvol_done (fs, subvol); return object; }
int glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout, glusterd_conn_notify_t notify) { int ret = -1; dict_t *options = NULL; struct rpc_clnt *rpc = NULL; xlator_t *this = THIS; glusterd_svc_t *svc = NULL; if (!this) goto out; options = dict_new(); if (!options) goto out; svc = glusterd_conn_get_svc_object(conn); if (!svc) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL, "Failed to get the service"); goto out; } ret = rpc_transport_unix_options_build(options, sockpath, frame_timeout); if (ret) goto out; ret = dict_set_int32n(options, "transport.socket.ignore-enoent", SLEN("transport.socket.ignore-enoent"), 1); if (ret) goto out; /* @options is free'd by rpc_transport when destroyed */ rpc = rpc_clnt_new(options, this, (char *)svc->name, 16); if (!rpc) { ret = -1; goto out; } ret = rpc_clnt_register_notify(rpc, glusterd_conn_common_notify, conn); if (ret) goto out; ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath); if (ret < 0) goto out; else ret = 0; conn->frame_timeout = frame_timeout; conn->rpc = rpc; conn->notify = notify; out: if (options) dict_unref(options); if (ret) { if (rpc) { rpc_clnt_unref(rpc); rpc = NULL; } } return ret; }