static int
glusterd_handle_mgmt_v3_lock_fn (rpcsvc_request_t *req)
{
        gd1_mgmt_v3_lock_req    lock_req        = {{0},};
        int32_t                 ret             = -1;
        glusterd_peerinfo_t    *peerinfo        = NULL;
        glusterd_op_lock_ctx_t *ctx             = NULL;
        xlator_t               *this            = NULL;
        gf_boolean_t            is_synctasked   = _gf_false;
        gf_boolean_t            free_ctx        = _gf_false;

        this = THIS;
        GF_ASSERT (this);
        GF_ASSERT (req);

        ret = xdr_to_generic (req->msg[0], &lock_req,
                              (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
        if (ret < 0) {
                gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
                        "request received from peer");
                req->rpc_err = GARBAGE_ARGS;
                goto out;
        }

        gf_log (this->name, GF_LOG_DEBUG, "Received mgmt_v3 lock req "
                "from uuid: %s", uuid_utoa (lock_req.uuid));

        if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
                gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
                        "belong to the cluster. Ignoring request.",
                        uuid_utoa (lock_req.uuid));
                ret = -1;
                goto out;
        }

        ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
        if (!ctx) {
                ret = -1;
                goto out;
        }

        uuid_copy (ctx->uuid, lock_req.uuid);
        ctx->req = req;

        ctx->dict = dict_new ();
        if (!ctx->dict) {
                ret = -1;
                goto out;
        }

        ret = dict_unserialize (lock_req.dict.dict_val,
                                lock_req.dict.dict_len, &ctx->dict);
        if (ret) {
                gf_log (this->name, GF_LOG_WARNING,
                        "failed to unserialize the dictionary");
                goto out;
        }

        is_synctasked = dict_get_str_boolean (ctx->dict,
                                              "is_synctasked", _gf_false);
        if (is_synctasked) {
                ret = glusterd_synctasked_mgmt_v3_lock (req, &lock_req, ctx);
                /* The above function does not take ownership of ctx.
                 * Therefore we need to free the ctx explicitly. */
                free_ctx = _gf_true;
        }
        else {
                ret = glusterd_op_state_machine_mgmt_v3_lock (req, &lock_req,
                                                              ctx);
        }

out:

        if (ret || free_ctx) {
                if (ctx->dict)
                        dict_unref (ctx->dict);
                if (ctx)
                        GF_FREE (ctx);
        }

        free (lock_req.dict.dict_val);

        gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
        return ret;
}
static int
glusterd_handle_mgmt_v3_lock_fn (rpcsvc_request_t *req)
{
        gd1_mgmt_v3_lock_req    lock_req        = {{0},};
        int32_t                 ret             = -1;
        glusterd_op_lock_ctx_t *ctx             = NULL;
        xlator_t               *this            = NULL;
        gf_boolean_t            is_synctasked   = _gf_false;
        gf_boolean_t            free_ctx        = _gf_false;

        this = THIS;
        GF_ASSERT (this);
        GF_ASSERT (req);

        ret = xdr_to_generic (req->msg[0], &lock_req,
                              (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
        if (ret < 0) {
                gf_msg (this->name, GF_LOG_ERROR, 0,
                        GD_MSG_REQ_DECODE_FAIL, "Failed to decode lock "
                        "request received from peer");
                req->rpc_err = GARBAGE_ARGS;
                goto out;
        }

        gf_msg_debug (this->name, 0, "Received mgmt_v3 lock req "
                "from uuid: %s", uuid_utoa (lock_req.uuid));

        if (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL) {
                gf_msg (this->name, GF_LOG_WARNING, 0,
                        GD_MSG_PEER_NOT_FOUND, "%s doesn't "
                        "belong to the cluster. Ignoring request.",
                        uuid_utoa (lock_req.uuid));
                ret = -1;
                goto out;
        }

        ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
        if (!ctx) {
                ret = -1;
                goto out;
        }

        gf_uuid_copy (ctx->uuid, lock_req.uuid);
        ctx->req = req;

        ctx->dict = dict_new ();
        if (!ctx->dict) {
                ret = -1;
                goto out;
        }

        ret = dict_unserialize (lock_req.dict.dict_val,
                                lock_req.dict.dict_len, &ctx->dict);
        if (ret) {
                gf_msg (this->name, GF_LOG_WARNING, 0,
                        GD_MSG_DICT_UNSERIALIZE_FAIL,
                        "failed to unserialize the dictionary");
                goto out;
        }

        is_synctasked = dict_get_str_boolean (ctx->dict,
                                              "is_synctasked", _gf_false);
        if (is_synctasked) {
                ret = glusterd_synctasked_mgmt_v3_lock (req, &lock_req, ctx);
                if (ret) {
                        gf_msg (this->name, GF_LOG_ERROR, 0,
                                GD_MSG_MGMTV3_LOCK_GET_FAIL,
                                "Failed to acquire mgmt_v3_locks");
                        /* Ignore the return code, as it shouldn't be propagated
                         * from the handler function so as to avoid double
                         * deletion of the req
                         */
                        ret = 0;
                }

                /* The above function does not take ownership of ctx.
                 * Therefore we need to free the ctx explicitly. */
                free_ctx = _gf_true;
        }
        else {
                /* Shouldn't ignore the return code here, and it should
                 * be propagated from the handler function as in failure
                 * case it doesn't delete the req object
                 */
                ret = glusterd_op_state_machine_mgmt_v3_lock (req, &lock_req,
                                                              ctx);
                if (ret)
                        gf_msg (this->name, GF_LOG_ERROR, 0,
                                GD_MSG_MGMTV3_LOCK_GET_FAIL,
                                "Failed to acquire mgmt_v3_locks");
        }

out:

        if (ctx && (ret || free_ctx)) {
                if (ctx->dict)
                        dict_unref (ctx->dict);

                GF_FREE (ctx);
        }

        free (lock_req.dict.dict_val);

        gf_msg_trace (this->name, 0, "Returning %d", ret);
        return ret;
}