Ejemplo n.º 1
0
void next_granted_cookie(struct granted_cookie *cookie)
{
	PTHREAD_MUTEX_lock(&granted_mutex);
	granted_cookie.gc_cookie++;
	*cookie = granted_cookie;
	PTHREAD_MUTEX_unlock(&granted_mutex);
}
Ejemplo n.º 2
0
/**
 * @brief Initialize an NFS4 open owner object
 *
 * @param[in] owner The owner record
 *
 */
static void init_nfs4_owner(state_owner_t *owner)
{
	glist_init(&owner->so_owner.so_nfs4_owner.so_state_list);

	/* Increment refcount on related owner */
	if (owner->so_owner.so_nfs4_owner.so_related_owner != NULL)
		inc_state_owner_ref(owner->so_owner.so_nfs4_owner.
				    so_related_owner);

	/* Increment reference count for clientid record */
	inc_client_id_ref(owner->so_owner.so_nfs4_owner.so_clientrec);

	PTHREAD_MUTEX_lock(&owner->so_owner.so_nfs4_owner.so_clientrec
			   ->cid_mutex);

	if (owner->so_type == STATE_OPEN_OWNER_NFSV4) {
		/* If open owner, add to clientid lock owner list */
		glist_add_tail(&owner->so_owner.so_nfs4_owner.so_clientrec->
			       cid_openowners,
			       &owner->so_owner.so_nfs4_owner.so_perclient);
	} else if (owner->so_type == STATE_LOCK_OWNER_NFSV4) {
		/* If lock owner, add to clientid open owner list */
		glist_add_tail(&owner->so_owner.so_nfs4_owner.so_clientrec->
			       cid_lockowners,
			       &owner->so_owner.so_nfs4_owner.so_perclient);
	}

	PTHREAD_MUTEX_unlock(&owner->so_owner.so_nfs4_owner.so_clientrec
			     ->cid_mutex);
}
Ejemplo n.º 3
0
int32_t dec_session_ref(nfs41_session_t *session)
{
    int i;
    int32_t refcnt = atomic_dec_int32_t(&session->refcount);

    if (refcnt == 0) {

        /* Unlink the session from the client's list of
           sessions */
        PTHREAD_MUTEX_lock(&session->clientid_record->cid_mutex);
        glist_del(&session->session_link);
        PTHREAD_MUTEX_unlock(&session->clientid_record->cid_mutex);

        /* Decrement our reference to the clientid record */
        dec_client_id_ref(session->clientid_record);
        /* Destroy this session's mutexes and condition variable */

        for (i = 0; i < NFS41_NB_SLOTS; i++)
            PTHREAD_MUTEX_destroy(&session->slots[i].lock);

        PTHREAD_COND_destroy(&session->cb_cond);
        PTHREAD_MUTEX_destroy(&session->cb_mutex);

        /* Destroy the session's back channel (if any) */
        if (session->flags & session_bc_up)
            nfs_rpc_destroy_chan(&session->cb_chan);

        /* Free the memory for the session */
        pool_free(nfs41_session_pool, session);
    }

    return refcnt;
}
Ejemplo n.º 4
0
/**
 * @brief Check for expired TCP DRCs.
 */
static inline void drc_free_expired(void)
{
	drc_t *drc;
	time_t now = time(NULL);
	struct rbtree_x_part *t;
	struct opr_rbtree_node *odrc = NULL;

	DRC_ST_LOCK();

	if ((drc_st->tcp_drc_recycle_qlen < 1) ||
	    (now - drc_st->last_expire_check) < 600) /* 10m */
		goto unlock;

	do {
		drc = TAILQ_FIRST(&drc_st->tcp_drc_recycle_q);
		if (drc && (drc->d_u.tcp.recycle_time > 0)
		    && ((now - drc->d_u.tcp.recycle_time) >
			drc_st->expire_delta) && (drc->refcnt == 0)) {
			LogFullDebug(COMPONENT_DUPREQ,
				     "remove expired drc %p from recycle queue",
				     drc);
			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc->d_u.tcp.hk);

			odrc =
			    opr_rbtree_lookup(&t->t, &drc->d_u.tcp.recycle_k);
			if (!odrc) {
				LogCrit(COMPONENT_DUPREQ,
					"BUG: asked to dequeue DRC not on queue");
			} else {
				(void)opr_rbtree_remove(&t->t,
							&drc->d_u.tcp.
							recycle_k);
			}
			TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, drc,
				     d_u.tcp.recycle_q);
			--(drc_st->tcp_drc_recycle_qlen);
			/* expect DRC to be reachable from some xprt(s) */
			PTHREAD_MUTEX_lock(&drc->mtx);
			drc->flags &= ~DRC_FLAG_RECYCLE;
			/* but if not, dispose it */
			if (drc->refcnt == 0) {
				PTHREAD_MUTEX_unlock(&drc->mtx);
				free_tcp_drc(drc);
				continue;
			}
			PTHREAD_MUTEX_unlock(&drc->mtx);
		} else {
			LogFullDebug(COMPONENT_DUPREQ,
				     "unexpired drc %p in recycle queue expire check (nothing happens)",
				     drc);
			drc_st->last_expire_check = now;
			break;
		}

	} while (1);

 unlock:
	DRC_ST_UNLOCK();
}
Ejemplo n.º 5
0
int nfs4_op_renew(struct nfs_argop4 *op, compound_data_t *data,
		  struct nfs_resop4 *resp)
{
	RENEW4args * const arg_RENEW4 = &op->nfs_argop4_u.oprenew;
	RENEW4res * const res_RENEW4 = &resp->nfs_resop4_u.oprenew;
	nfs_client_id_t *clientid;
	int rc;

	/* Lock are not supported */
	memset(resp, 0, sizeof(struct nfs_resop4));
	resp->resop = NFS4_OP_RENEW;

	if (data->minorversion > 0) {
		res_RENEW4->status = NFS4ERR_NOTSUPP;
		return res_RENEW4->status;
	}

	/* Tell the admin what I am doing... */
	LogFullDebug(COMPONENT_CLIENTID,
		     "RENEW Client id = %" PRIx64,
		     arg_RENEW4->clientid);

	/* Is this an existing client id ? */
	rc = nfs_client_id_get_confirmed(arg_RENEW4->clientid, &clientid);

	if (rc != CLIENT_ID_SUCCESS) {
		/* Unknown client id */
		res_RENEW4->status = clientid_error_to_nfsstat(rc);
		return res_RENEW4->status;
	}

	PTHREAD_MUTEX_lock(&clientid->cid_mutex);

	if (!reserve_lease(clientid)) {
		res_RENEW4->status = NFS4ERR_EXPIRED;
	} else {
		update_lease(clientid);
		/* update the lease, check the state of callback
		 * path and return correct error */
		if (nfs_param.nfsv4_param.allow_delegations &&
		    get_cb_chan_down(clientid) && clientid->curr_deleg_grants) {
			res_RENEW4->status =  NFS4ERR_CB_PATH_DOWN;
			/* Set the time for first PATH_DOWN response */
			if (clientid->first_path_down_resp_time == 0)
				clientid->first_path_down_resp_time =
								time(NULL);
		} else {
			res_RENEW4->status = NFS4_OK;
			/* Reset */
			clientid->first_path_down_resp_time = 0;
		}
	}

	PTHREAD_MUTEX_unlock(&clientid->cid_mutex);

	dec_client_id_ref(clientid);

	return res_RENEW4->status;
}				/* nfs4_op_renew */
Ejemplo n.º 6
0
/**
 *
 * @brief Remove an entry (request) from a duplicate request cache.
 *
 * The expected pattern is that nfs_rpc_execute shall delete requests only
 * in error conditions.  The refcnt of the corresponding duplicate request
 * entry is unchanged (ie., the caller must still call nfs_dupreq_rele).
 *
 * We assert req->rq_u1 now points to the corresonding duplicate request
 * cache entry.
 *
 * @param[in] req The svc_req structure.
 *
 * @return DUPREQ_SUCCESS if successful.
 *
 */
dupreq_status_t nfs_dupreq_delete(struct svc_req *req)
{
	dupreq_entry_t *dv = (dupreq_entry_t *)req->rq_u1;
	dupreq_status_t status = DUPREQ_SUCCESS;
	struct rbtree_x_part *t;
	drc_t *drc;

	/* do nothing if req is marked no-cache */
	if (dv == (void *)DUPREQ_NOCACHE)
		goto out;

	/* do nothing if nfs_dupreq_start failed completely */
	if (dv == (void *)DUPREQ_BAD_ADDR1)
		goto out;

	PTHREAD_MUTEX_lock(&dv->mtx);
	drc = dv->hin.drc;
	dv->state = DUPREQ_DELETED;
	PTHREAD_MUTEX_unlock(&dv->mtx);

	LogFullDebug(COMPONENT_DUPREQ,
		     "deleting dv=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
		     dv, dv->hin.tcp.rq_xid, drc,
		     dupreq_state_table[dv->state], dupreq_status_table[status],
		     dv->refcnt);

	/* XXX dv holds a ref on drc */
	t = rbtx_partition_of_scalar(&drc->xt, dv->hk);

	PTHREAD_MUTEX_lock(&t->mtx);
	rbtree_x_cached_remove(&drc->xt, t, &dv->rbt_k, dv->hk);

	PTHREAD_MUTEX_unlock(&t->mtx);
	PTHREAD_MUTEX_lock(&drc->mtx);

	if (TAILQ_IS_ENQUEUED(dv, fifo_q))
		TAILQ_REMOVE(&drc->dupreq_q, dv, fifo_q);
	--(drc->size);

	/* release dv's ref and unlock */
	nfs_dupreq_put_drc(req->rq_xprt, drc, DRC_FLAG_LOCKED);
	/* !LOCKED */

 out:
	return status;
}
Ejemplo n.º 7
0
static void handle_free(handle_pool_entry_t *p_handle)
{
	memset(p_handle, 0, sizeof(handle_pool_entry_t));

	PTHREAD_MUTEX_lock(&handle_pool_mutex);
	pool_free(handle_pool, p_handle);
	PTHREAD_MUTEX_unlock(&handle_pool_mutex);
}
Ejemplo n.º 8
0
static void digest_free(digest_pool_entry_t *p_digest)
{
	memset(p_digest, 0, sizeof(digest_pool_entry_t));

	PTHREAD_MUTEX_lock(&digest_pool_mutex);
	pool_free(digest_pool, p_digest);
	PTHREAD_MUTEX_unlock(&digest_pool_mutex);
}
Ejemplo n.º 9
0
static digest_pool_entry_t *digest_alloc()
{
	digest_pool_entry_t *p_new;

	PTHREAD_MUTEX_lock(&digest_pool_mutex);
	p_new = pool_alloc(digest_pool);
	PTHREAD_MUTEX_unlock(&digest_pool_mutex);

	return p_new;
}
Ejemplo n.º 10
0
static handle_pool_entry_t *handle_alloc()
{
	handle_pool_entry_t *p_new;

	PTHREAD_MUTEX_lock(&handle_pool_mutex);
	p_new = pool_alloc(handle_pool);
	PTHREAD_MUTEX_unlock(&handle_pool_mutex);

	return p_new;
}
Ejemplo n.º 11
0
static handle_pool_entry_t *handle_alloc()
{
	handle_pool_entry_t *p_new;

	PTHREAD_MUTEX_lock(&handle_pool_mutex);
	p_new = pool_alloc(handle_pool, NULL);
	PTHREAD_MUTEX_unlock(&handle_pool_mutex);

	memset(p_new, 0, sizeof(handle_pool_entry_t));

	return p_new;
}
Ejemplo n.º 12
0
static digest_pool_entry_t *digest_alloc()
{
	digest_pool_entry_t *p_new;

	PTHREAD_MUTEX_lock(&digest_pool_mutex);
	p_new = pool_alloc(digest_pool, NULL);
	PTHREAD_MUTEX_unlock(&digest_pool_mutex);

	memset(p_new, 0, sizeof(digest_pool_entry_t));

	return p_new;
}
Ejemplo n.º 13
0
void nlm_signal_async_resp(void *key)
{
	PTHREAD_MUTEX_lock(&nlm_async_resp_mutex);

	if (resp_key == key) {
		resp_key = NULL;
		pthread_cond_signal(&nlm_async_resp_cond);
		LogFullDebug(COMPONENT_NLM, "Signaled condition variable");
	} else {
		LogFullDebug(COMPONENT_NLM, "Didn't signal condition variable");
	}

	PTHREAD_MUTEX_unlock(&nlm_async_resp_mutex);
}
Ejemplo n.º 14
0
void uid2grp_release_group_data(struct group_data *gdata)
{
	unsigned int refcount;

	PTHREAD_MUTEX_lock(&gdata->lock);
	refcount = --gdata->refcount;
	PTHREAD_MUTEX_unlock(&gdata->lock);

	if (refcount == 0) {
		gsh_free(gdata->groups);
		gsh_free(gdata);
	} else if (refcount == (unsigned int)-1) {
		LogAlways(COMPONENT_IDMAPPER, "negative refcount on gdata: %p",
			  gdata);
	}
}
Ejemplo n.º 15
0
void state_del_locked(state_t *state)
{
	char str[LOG_BUFF_LEN];
	struct display_buffer dspbuf = {sizeof(str), str, str};
	bool str_valid = false;
	struct fsal_obj_handle *obj;
	struct gsh_export *export;
	state_owner_t *owner;

	if (isDebug(COMPONENT_STATE)) {
		display_stateid(&dspbuf, state);
		str_valid = true;
	}

	/* Remove the entry from the HashTable. If it fails, we have lost the
	 * race with another caller of state_del/state_del_locked.
	 */
	if (!nfs4_State_Del(state)) {
		if (str_valid)
			LogDebug(COMPONENT_STATE,
				 "Racing to delete %s", str);
		return;
	}

	if (str_valid)
		LogFullDebug(COMPONENT_STATE, "Deleting %s", str);

	/* Protect extraction of all the referenced objects, we don't
	 * actually need to test them or take references because we assure
	 * that there is exactly one state_del_locked call that proceeds
	 * this far, and thus if the refereces were non-NULL, they must still
	 * be good. Holding the mutex is not strictly necessary for this
	 * reason, however, static and dynamic code analysis have no way of
	 * knowing this reference is safe.
	 */
	PTHREAD_MUTEX_lock(&state->state_mutex);
	obj = get_state_obj_ref(state);

	if (obj == NULL) {
		LogDebug(COMPONENT_STATE,
			 "Entry for state is stale");
		PTHREAD_MUTEX_unlock(&state->state_mutex);
		return;
	}

	export = state->state_export;
Ejemplo n.º 16
0
/**
 * @brief Release previously-ref'd DRC.
 *
 * Release previously-ref'd DRC.  If its refcnt drops to 0, the DRC
 * is queued for later recycling.
 *
 * @param[in] xprt  The SVCXPRT associated with DRC, if applicable
 * @param[in] drc   The DRC
 * @param[in] flags Control flags
 */
void nfs_dupreq_put_drc(SVCXPRT *xprt, drc_t *drc, uint32_t flags)
{
	if (!(flags & DRC_FLAG_LOCKED))
		PTHREAD_MUTEX_lock(&drc->mtx);
	/* drc LOCKED */

	if (drc->refcnt == 0) {
		LogCrit(COMPONENT_DUPREQ,
			"drc %p refcnt will underrun refcnt=%u", drc,
			drc->refcnt);
	}

	nfs_dupreq_unref_drc(drc);

	LogFullDebug(COMPONENT_DUPREQ, "drc %p refcnt==%u", drc, drc->refcnt);

	switch (drc->type) {
	case DRC_UDP_V234:
		/* do nothing */
		break;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		if (drc->refcnt == 0) {
			if (!(drc->flags & DRC_FLAG_RECYCLE)) {
				/* note t's lock order wrt drc->mtx is
				 * the opposite of drc->xt[*].lock */
				drc->d_u.tcp.recycle_time = time(NULL);
				drc->flags |= DRC_FLAG_RECYCLE;
				PTHREAD_MUTEX_unlock(&drc->mtx); /* !LOCKED */
				DRC_ST_LOCK();
				TAILQ_INSERT_TAIL(&drc_st->tcp_drc_recycle_q,
						  drc, d_u.tcp.recycle_q);
				++(drc_st->tcp_drc_recycle_qlen);
				LogFullDebug(COMPONENT_DUPREQ,
					     "enqueue drc %p for recycle", drc);
				DRC_ST_UNLOCK();
				return;
			}
		}
	default:
		break;
	};

	PTHREAD_MUTEX_unlock(&drc->mtx); /* !LOCKED */
}
Ejemplo n.º 17
0
void free_nfs4_owner(state_owner_t *owner)
{
	state_nfs4_owner_t *nfs4_owner = &owner->so_owner.so_nfs4_owner;

	if (nfs4_owner->so_related_owner != NULL)
		dec_state_owner_ref(nfs4_owner->so_related_owner);

	/* Release the saved response. */
	nfs4_Compound_FreeOne(&nfs4_owner->so_resp);

	/* Remove the owner from the owners per clientid list. */
	PTHREAD_MUTEX_lock(&nfs4_owner->so_clientrec->cid_mutex);

	glist_del(&nfs4_owner->so_perclient);

	PTHREAD_MUTEX_unlock(&nfs4_owner->so_clientrec->cid_mutex);

	dec_client_id_ref(nfs4_owner->so_clientrec);
}
Ejemplo n.º 18
0
static ssize_t tcp_conn_send(struct _9p_conn *conn, const void *buf, size_t len,
			     int flags)
{
	ssize_t ret;

	PTHREAD_MUTEX_lock(&conn->sock_lock);
	ret = send(conn->trans_data.sockfd, buf, len, flags);
	PTHREAD_MUTEX_unlock(&conn->sock_lock);

	if (ret < 0)
		server_stats_transport_done(conn->client,
					    0, 0, 0,
					    0, 0, 1);
	else
		server_stats_transport_done(conn->client,
					    0, 0, 0,
					    ret, 1, 0);
	return ret;
}
Ejemplo n.º 19
0
/**
 * @brief Decrement the call path refcnt on a cache entry.
 *
 * We assert req->rq_u1 now points to the corresonding duplicate request
 * cache entry (dv).
 *
 * In the common case, a refcnt of 0 indicates that dv is cached.  If
 * also dv->state == DUPREQ_DELETED, the request entry has been discarded
 * and should be destroyed here.
 *
 * @param[in] req  The svc_req structure.
 * @param[in] func The function descriptor for this request type
 */
void nfs_dupreq_rele(struct svc_req *req, const nfs_function_desc_t *func)
{
	dupreq_entry_t *dv = (dupreq_entry_t *) req->rq_u1;

	/* no-cache cleanup */
	if (dv == (void *)DUPREQ_NOCACHE) {
		LogFullDebug(COMPONENT_DUPREQ, "releasing no-cache res %p",
			     req->rq_u2);
		func->free_function(req->rq_u2);
		free_nfs_res(req->rq_u2);
		goto out;
	}

	PTHREAD_MUTEX_lock(&dv->mtx);

	LogFullDebug(COMPONENT_DUPREQ,
		     "releasing dv=%p xid=%u on DRC=%p state=%s, refcnt=%d",
		     dv, dv->hin.tcp.rq_xid, dv->hin.drc,
		     dupreq_state_table[dv->state], dv->refcnt);

	(dv->refcnt)--;
	if (dv->refcnt == 0) {
		if (dv->state == DUPREQ_DELETED) {
			PTHREAD_MUTEX_unlock(&dv->mtx);
			/* deep free */
			nfs_dupreq_free_dupreq(dv);
			return;
		}
	}
	PTHREAD_MUTEX_unlock(&dv->mtx);

 out:
	/* dispose RPC header */
	if (req->rq_auth)
		SVCAUTH_RELEASE(req->rq_auth, req);

	(void)free_rpc_msg(req->rq_msg);
}
Ejemplo n.º 20
0
/**
 * @brief Up Thread
 *
 * @param Arg reference to void
 *
 */
void *GPFSFSAL_UP_Thread(void *Arg)
{
	struct gpfs_filesystem *gpfs_fs = Arg;
	struct fsal_up_vector *event_func;
	char thr_name[16];
	int rc = 0;
	struct pnfs_deviceid devid;
	struct stat buf;
	struct glock fl;
	struct callback_arg callback;
	struct gpfs_file_handle handle;
	int reason = 0;
	int flags = 0;
	unsigned int *fhP;
	int retry = 0;
	struct gsh_buffdesc key;
	uint32_t expire_time_attr = 0;
	uint32_t upflags;
	int errsv = 0;
	fsal_status_t fsal_status = {0,};

#ifdef _VALGRIND_MEMCHECK
		memset(&handle, 0, sizeof(handle));
		memset(&buf, 0, sizeof(buf));
		memset(&fl, 0, sizeof(fl));
		memset(&devid, 0, sizeof(devid));
#endif

	snprintf(thr_name, sizeof(thr_name),
		 "fsal_up_%"PRIu64".%"PRIu64,
		 gpfs_fs->fs->dev.major, gpfs_fs->fs->dev.minor);
	SetNameFunction(thr_name);

	LogFullDebug(COMPONENT_FSAL_UP,
		     "Initializing FSAL Callback context for %d.",
		     gpfs_fs->root_fd);

	/* wait for nfs init completion to get general_fridge
	 * initialized which is needed for processing some upcall events
	 */
	nfs_init_wait();

	/* Start querying for events and processing. */
	while (1) {
		LogFullDebug(COMPONENT_FSAL_UP,
			     "Requesting event from FSAL Callback interface for %d.",
			     gpfs_fs->root_fd);

		handle.handle_size = GPFS_MAX_FH_SIZE;
		handle.handle_key_size = OPENHANDLE_KEY_LEN;
		handle.handle_version = OPENHANDLE_VERSION;

		callback.interface_version =
		    GPFS_INTERFACE_VERSION + GPFS_INTERFACE_SUB_VER;

		callback.mountdirfd = gpfs_fs->root_fd;
		callback.handle = &handle;
		callback.reason = &reason;
		callback.flags = &flags;
		callback.buf = &buf;
		callback.fl = &fl;
		callback.dev_id = &devid;
		callback.expire_attr = &expire_time_attr;

		rc = gpfs_ganesha(OPENHANDLE_INODE_UPDATE, &callback);
		errsv = errno;

		if (rc != 0) {
			rc = -(rc);
			if (rc > GPFS_INTERFACE_VERSION) {
				LogFatal(COMPONENT_FSAL_UP,
					 "Ganesha version %d mismatch GPFS version %d.",
					 callback.interface_version, rc);
				return NULL;
			}

			if (errsv == EINTR)
				continue;

			LogCrit(COMPONENT_FSAL_UP,
				"OPENHANDLE_INODE_UPDATE failed for %d. rc %d, errno %d (%s) reason %d",
				gpfs_fs->root_fd, rc, errsv,
				strerror(errsv), reason);

			/* @todo 1000 retry logic will go away once the
			 * OPENHANDLE_INODE_UPDATE ioctl separates EINTR
			 * and EUNATCH.
			 */
			if (errsv == EUNATCH && ++retry > 1000)
				LogFatal(COMPONENT_FSAL_UP,
					 "GPFS file system %d has gone away.",
					 gpfs_fs->root_fd);

			continue;
		}

		retry = 0;

		/* flags is int, but only the least significant 2 bytes
		 * are valid.  We are getting random bits into the upper
		 * 2 bytes! Workaround this until the kernel module
		 * gets fixed.
		 */
		flags = flags & 0xffff;

		LogDebug(COMPONENT_FSAL_UP,
			 "inode update: rc %d reason %d update ino %"
			 PRId64 " flags:%x",
			 rc, reason, callback.buf->st_ino, flags);

		LogFullDebug(COMPONENT_FSAL_UP,
			     "inode update: flags:%x callback.handle:%p handle size = %u handle_type:%d handle_version:%d key_size = %u handle_fsid=%X.%X f_handle:%p expire: %d",
			     *callback.flags, callback.handle,
			     callback.handle->handle_size,
			     callback.handle->handle_type,
			     callback.handle->handle_version,
			     callback.handle->handle_key_size,
			     callback.handle->handle_fsid[0],
			     callback.handle->handle_fsid[1],
			     callback.handle->f_handle, expire_time_attr);

		callback.handle->handle_version = OPENHANDLE_VERSION;

		fhP = (int *)&(callback.handle->f_handle[0]);
		LogFullDebug(COMPONENT_FSAL_UP,
			     " inode update: handle %08x %08x %08x %08x %08x %08x %08x",
			     fhP[0], fhP[1], fhP[2], fhP[3], fhP[4], fhP[5],
			     fhP[6]);

		/* Here is where we decide what type of event this is
		 * ... open,close,read,...,invalidate? */
		key.addr = &handle;
		key.len = handle.handle_key_size;

		LogDebug(COMPONENT_FSAL_UP, "Received event to process for %d",
			 gpfs_fs->root_fd);

		/* We need valid up_vector while processing some of the
		 * events below. Setup up vector and hold the mutex while
		 * processing the event for the entire duration.
		 */
		PTHREAD_MUTEX_lock(&gpfs_fs->upvector_mutex);
		if (!setup_up_vector(gpfs_fs)) {
			PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex);
			return NULL;
		}
		event_func = gpfs_fs->up_vector;

		switch (reason) {
		case INODE_LOCK_GRANTED:	/* Lock Event */
		case INODE_LOCK_AGAIN:	/* Lock Event */
			{
				LogMidDebug(COMPONENT_FSAL_UP,
					    "%s: owner %p pid %d type %d start %lld len %lld",
					    reason ==
					    INODE_LOCK_GRANTED ?
					    "inode lock granted" :
					    "inode lock again", fl.lock_owner,
					    fl.flock.l_pid, fl.flock.l_type,
					    (long long)fl.flock.l_start,
					    (long long)fl.flock.l_len);

				fsal_lock_param_t lockdesc = {
					.lock_sle_type = FSAL_POSIX_LOCK,
					.lock_type = fl.flock.l_type,
					.lock_start = fl.flock.l_start,
					.lock_length = fl.flock.l_len
				};
				if (reason == INODE_LOCK_AGAIN)
					fsal_status = up_async_lock_avail(
							 general_fridge,
							 event_func,
							 &key,
							 fl.lock_owner,
							 &lockdesc, NULL, NULL);
				else
					fsal_status = up_async_lock_grant(
							 general_fridge,
							 event_func,
							 &key,
							 fl.lock_owner,
							 &lockdesc, NULL, NULL);
			}
			break;

		case BREAK_DELEGATION:	/* Delegation Event */
			LogDebug(COMPONENT_FSAL_UP,
				 "delegation recall: flags:%x ino %" PRId64,
				 flags, callback.buf->st_ino);
			fsal_status = up_async_delegrecall(general_fridge,
						  event_func,
						  &key, NULL, NULL);
			break;

		case LAYOUT_FILE_RECALL:	/* Layout file recall Event */
			{
				struct pnfs_segment segment = {
					.offset = 0,
					.length = UINT64_MAX,
					.io_mode = LAYOUTIOMODE4_ANY
				};
				LogDebug(COMPONENT_FSAL_UP,
					 "layout file recall: flags:%x ino %"
					 PRId64, flags, callback.buf->st_ino);

				fsal_status = up_async_layoutrecall(
							general_fridge,
							event_func,
							&key,
							LAYOUT4_NFSV4_1_FILES,
							false, &segment,
							NULL, NULL, NULL,
							NULL);
			}
			break;

		case LAYOUT_RECALL_ANY:	/* Recall all layouts Event */
			LogDebug(COMPONENT_FSAL_UP,
				 "layout recall any: flags:%x ino %" PRId64,
				 flags, callback.buf->st_ino);

	    /**
	     * @todo This functionality needs to be implemented as a
	     * bulk FSID CB_LAYOUTRECALL.  RECALL_ANY isn't suitable
	     * since it can't be restricted to just one FSAL.  Also
	     * an FSID LAYOUTRECALL lets you have multiplke
	     * filesystems exported from one FSAL and not yank layouts
	     * on all of them when you only need to recall them for one.
	     */
			break;

		case LAYOUT_NOTIFY_DEVICEID:	/* Device update Event */
			LogDebug(COMPONENT_FSAL_UP,
				 "layout dev update: flags:%x ino %"
				 PRId64 " seq %d fd %d fsid 0x%" PRIx64,
				 flags,
				callback.buf->st_ino,
				devid.device_id2,
				devid.device_id4,
				devid.devid);

			memset(&devid, 0, sizeof(devid));
			devid.fsal_id = FSAL_ID_GPFS;

			fsal_status = up_async_notify_device(general_fridge,
						event_func,
						NOTIFY_DEVICEID4_DELETE_MASK,
						LAYOUT4_NFSV4_1_FILES,
						&devid,
						true, NULL,
						NULL);
			break;

		case INODE_UPDATE:	/* Update Event */
			{
				struct attrlist attr;

				LogMidDebug(COMPONENT_FSAL_UP,
					    "inode update: flags:%x update ino %"
					    PRId64 " n_link:%d",
					    flags, callback.buf->st_ino,
					    (int)callback.buf->st_nlink);

				/** @todo: This notification is completely
				 * asynchronous.  If we happen to change some
				 * of the attributes later, we end up over
				 * writing those with these possibly stale
				 * values as we don't know when we get to
				 * update with these up call values. We should
				 * probably use time stamp or let the up call
				 * always provide UP_TIMES flag in which case
				 * we can compare the current ctime vs up call
				 * provided ctime before updating the
				 * attributes.
				 *
				 * For now, we think size attribute is more
				 * important than others, so invalidate the
				 * attributes and let ganesha fetch attributes
				 * as needed if this update includes a size
				 * change. We are careless for other attribute
				 * changes, and we may end up with stale values
				 * until this gets fixed!
				 */
				if (flags & (UP_SIZE | UP_SIZE_BIG)) {
					fsal_status = event_func->invalidate(
						event_func, &key,
						FSAL_UP_INVALIDATE_CACHE);
					break;
				}

				/* Check for accepted flags, any other changes
				   just invalidate. */
				if (flags &
				    ~(UP_SIZE | UP_NLINK | UP_MODE | UP_OWN |
				     UP_TIMES | UP_ATIME | UP_SIZE_BIG)) {
					fsal_status = event_func->invalidate(
						event_func, &key,
						FSAL_UP_INVALIDATE_CACHE);
				} else {
					/* buf may not have all attributes set.
					 * Set the mask to what is changed
					 */
					attr.valid_mask = 0;
					attr.acl = NULL;
					upflags = 0;
					if (flags & UP_SIZE)
						attr.valid_mask |=
						   ATTR_CHGTIME | ATTR_CHANGE |
						   ATTR_SIZE | ATTR_SPACEUSED;
					if (flags & UP_SIZE_BIG) {
						attr.valid_mask |=
						   ATTR_CHGTIME | ATTR_CHANGE |
						   ATTR_SIZE | ATTR_SPACEUSED;
						upflags |=
						   fsal_up_update_filesize_inc |
						   fsal_up_update_spaceused_inc;
					}
					if (flags & UP_MODE)
						attr.valid_mask |=
						   ATTR_CHGTIME | ATTR_CHANGE |
						   ATTR_MODE;
					if (flags & UP_OWN)
						attr.valid_mask |=
						   ATTR_CHGTIME | ATTR_CHANGE |
						   ATTR_OWNER | ATTR_GROUP |
						   ATTR_MODE;
					if (flags & UP_TIMES)
						attr.valid_mask |=
						   ATTR_CHGTIME | ATTR_CHANGE |
						   ATTR_ATIME | ATTR_CTIME |
						    ATTR_MTIME;
					if (flags & UP_ATIME)
						attr.valid_mask |=
						   ATTR_CHGTIME | ATTR_CHANGE |
						   ATTR_ATIME;
					if (flags & UP_NLINK)
						attr.valid_mask |=
							ATTR_NUMLINKS;
					attr.request_mask = attr.valid_mask;

					attr.expire_time_attr =
					    expire_time_attr;

					posix2fsal_attributes(&buf, &attr);
					fsal_status = event_func->update(
							event_func, &key,
							&attr, upflags);

					if ((flags & UP_NLINK)
					    && (attr.numlinks == 0)) {
						upflags = fsal_up_nlink;
						attr.valid_mask = 0;
						attr.request_mask = 0;
						fsal_status = up_async_update
						    (general_fridge,
						     event_func,
						     &key, &attr,
						     upflags, NULL, NULL);
					}
				}
			}
			break;

		case THREAD_STOP:  /* We wanted to terminate this thread */
			LogDebug(COMPONENT_FSAL_UP,
				"Terminating the GPFS up call thread for %d",
				gpfs_fs->root_fd);
			PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex);
			return NULL;

		case INODE_INVALIDATE:
			LogMidDebug(COMPONENT_FSAL_UP,
				    "inode invalidate: flags:%x update ino %"
				    PRId64, flags, callback.buf->st_ino);

			upflags = FSAL_UP_INVALIDATE_CACHE;
			fsal_status = event_func->invalidate_close(
						event_func,
						&key,
						upflags);
			break;

		case THREAD_PAUSE:
			/* File system image is probably going away, but
			 * we don't need to do anything here as we
			 * eventually get other errors that stop this
			 * thread.
			 */
			PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex);
			continue; /* get next event */

		default:
			PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex);
			LogWarn(COMPONENT_FSAL_UP, "Unknown event: %d", reason);
			continue;
		}

		PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex);

		if (FSAL_IS_ERROR(fsal_status) &&
		    fsal_status.major != ERR_FSAL_NOENT) {
			LogWarn(COMPONENT_FSAL_UP,
				"Event %d could not be processed for fd %d rc %s",
				reason, gpfs_fs->root_fd,
				fsal_err_txt(fsal_status));
		}
	}

	return NULL;
}				/* GPFSFSAL_UP_Thread */
int nfs4_op_release_lockowner(struct nfs_argop4 *op, compound_data_t *data,
			      struct nfs_resop4 *resp)
{
	RELEASE_LOCKOWNER4args * const arg_RELEASE_LOCKOWNER4 =
	    &op->nfs_argop4_u.oprelease_lockowner;
	RELEASE_LOCKOWNER4res * const res_RELEASE_LOCKOWNER4 =
	    &resp->nfs_resop4_u.oprelease_lockowner;
	nfs_client_id_t *nfs_client_id;
	state_owner_t *lock_owner;
	state_nfs4_owner_name_t owner_name;
	int rc;

	LogDebug(COMPONENT_NFS_V4_LOCK,
		 "Entering NFS v4 RELEASE_LOCKOWNER handler ----------------------");

	resp->resop = NFS4_OP_RELEASE_LOCKOWNER;
	res_RELEASE_LOCKOWNER4->status = NFS4_OK;

	if (data->minorversion > 0) {
		res_RELEASE_LOCKOWNER4->status = NFS4ERR_NOTSUPP;
		return res_RELEASE_LOCKOWNER4->status;
	}

	/* Check clientid */
	rc = nfs_client_id_get_confirmed(arg_RELEASE_LOCKOWNER4->lock_owner.
					 clientid,
					 &nfs_client_id);

	if (rc != CLIENT_ID_SUCCESS) {
		res_RELEASE_LOCKOWNER4->status = clientid_error_to_nfsstat(rc);
		goto out2;
	}

	PTHREAD_MUTEX_lock(&nfs_client_id->cid_mutex);

	if (!reserve_lease(nfs_client_id)) {
		PTHREAD_MUTEX_unlock(&nfs_client_id->cid_mutex);

		dec_client_id_ref(nfs_client_id);

		res_RELEASE_LOCKOWNER4->status = NFS4ERR_EXPIRED;
		goto out2;
	}

	PTHREAD_MUTEX_unlock(&nfs_client_id->cid_mutex);

	/* look up the lock owner and see if we can find it */
	convert_nfs4_lock_owner(&arg_RELEASE_LOCKOWNER4->lock_owner,
				&owner_name);

	/* If this lock owner is not known yet, allocated
	 * and set up a new one
	 */
	lock_owner = create_nfs4_owner(&owner_name,
				       nfs_client_id,
				       STATE_LOCK_OWNER_NFSV4,
				       NULL,
				       0,
				       NULL,
				       CARE_NOT);

	if (lock_owner == NULL) {
		/* the owner doesn't exist, we are done */
		LogDebug(COMPONENT_NFS_V4_LOCK, "lock owner does not exist");
		res_RELEASE_LOCKOWNER4->status = NFS4_OK;
		goto out1;
	}

	res_RELEASE_LOCKOWNER4->status = release_lock_owner(lock_owner);

	/* Release the reference to the lock owner acquired
	 * via create_nfs4_owner
	 */
	dec_state_owner_ref(lock_owner);

 out1:

	/* Update the lease before exit */
	PTHREAD_MUTEX_lock(&nfs_client_id->cid_mutex);

	update_lease(nfs_client_id);

	PTHREAD_MUTEX_unlock(&nfs_client_id->cid_mutex);

	dec_client_id_ref(nfs_client_id);

 out2:

	LogDebug(COMPONENT_NFS_V4_LOCK,
		 "Leaving NFS v4 RELEASE_LOCKOWNER handler -----------------------");

	return res_RELEASE_LOCKOWNER4->status;
}				/* nfs4_op_release_lock_owner */
Ejemplo n.º 22
0
/* group_data has a reference counter. If it goes to zero, it implies
 * that it is out of the cache (AVL trees) and should be freed. The
 * reference count is 1 when we put it into AVL trees. We decrement when
 * we take it out of AVL trees. Also incremented when we pass this to
 * out siders (uid2grp and friends) and decremented when they are done
 * (in uid2grp_unref()).
 *
 * When a group_data needs to be removed or expired after a certain
 * timeout, we take it out of the cache (AVL trees). When everyone using
 * the group_data are done, the refcount will go to zero at which point
 * we free group_data as well as the buffer holding supplementary
 * groups.
 */
void uid2grp_hold_group_data(struct group_data *gdata)
{
	PTHREAD_MUTEX_lock(&gdata->lock);
	gdata->refcount++;
	PTHREAD_MUTEX_unlock(&gdata->lock);
}
int nfs4_op_create_session(struct nfs_argop4 *op, compound_data_t *data,
                           struct nfs_resop4 *resp)
{
    /* Result of looking up the clientid in the confirmed ID
       table */
    nfs_client_id_t *conf = NULL;	/* XXX these are not good names */
    /* Result of looking up the clientid in the unconfirmed ID
       table */
    nfs_client_id_t *unconf = NULL;
    /* The found clientid (either one of the preceding) */
    nfs_client_id_t *found = NULL;
    /* The found client record */
    nfs_client_record_t *client_record;
    /* The created session */
    nfs41_session_t *nfs41_session = NULL;
    /* Client supplied clientid */
    clientid4 clientid = 0;
    /* The client address as a string, for gratuitous logging */
    const char *str_client_addr = "(unknown)";
    /* The client name, for gratuitous logging */
    char str_client[CLIENTNAME_BUFSIZE];
    /* Display buffer for client name */
    struct display_buffer dspbuf_client = {
        sizeof(str_client), str_client, str_client
    };
    /* The clientid4 broken down into fields */
    char str_clientid4[DISPLAY_CLIENTID_SIZE];
    /* Display buffer for clientid4 */
    struct display_buffer dspbuf_clientid4 = {
        sizeof(str_clientid4), str_clientid4, str_clientid4
    };
    /* Return code from clientid calls */
    int i, rc = 0;
    /* Component for logging */
    log_components_t component = COMPONENT_CLIENTID;
    /* Abbreviated alias for arguments */
    CREATE_SESSION4args * const arg_CREATE_SESSION4 =
        &op->nfs_argop4_u.opcreate_session;
    /* Abbreviated alias for response */
    CREATE_SESSION4res * const res_CREATE_SESSION4 =
        &resp->nfs_resop4_u.opcreate_session;
    /* Abbreviated alias for successful response */
    CREATE_SESSION4resok * const res_CREATE_SESSION4ok =
        &res_CREATE_SESSION4->CREATE_SESSION4res_u.csr_resok4;

    /* Make sure str_client is always printable even
     * if log level changes midstream.
     */
    display_printf(&dspbuf_client, "(unknown)");
    display_reset_buffer(&dspbuf_client);

    if (op_ctx->client != NULL)
        str_client_addr = op_ctx->client->hostaddr_str;

    if (isDebug(COMPONENT_SESSIONS))
        component = COMPONENT_SESSIONS;

    resp->resop = NFS4_OP_CREATE_SESSION;
    res_CREATE_SESSION4->csr_status = NFS4_OK;
    clientid = arg_CREATE_SESSION4->csa_clientid;

    display_clientid(&dspbuf_clientid4, clientid);

    if (data->minorversion == 0)
        return res_CREATE_SESSION4->csr_status = NFS4ERR_INVAL;

    LogDebug(component,
             "CREATE_SESSION client addr=%s clientid=%s -------------------",
             str_client_addr, str_clientid4);

    /* First try to look up unconfirmed record */
    rc = nfs_client_id_get_unconfirmed(clientid, &unconf);

    if (rc == CLIENT_ID_SUCCESS) {
        client_record = unconf->cid_client_record;
        found = unconf;
    } else {
        rc = nfs_client_id_get_confirmed(clientid, &conf);
        if (rc != CLIENT_ID_SUCCESS) {
            /* No record whatsoever of this clientid */
            LogDebug(component,
                     "%s clientid=%s",
                     clientid_error_to_str(rc), str_clientid4);

            if (rc == CLIENT_ID_EXPIRED)
                rc = CLIENT_ID_STALE;

            res_CREATE_SESSION4->csr_status =
                clientid_error_to_nfsstat_no_expire(rc);

            return res_CREATE_SESSION4->csr_status;
        }

        client_record = conf->cid_client_record;
        found = conf;
    }

    PTHREAD_MUTEX_lock(&client_record->cr_mutex);

    inc_client_record_ref(client_record);

    if (isFullDebug(component)) {
        char str[LOG_BUFF_LEN];
        struct display_buffer dspbuf = {sizeof(str), str, str};

        display_client_record(&dspbuf, client_record);

        LogFullDebug(component,
                     "Client Record %s cr_confirmed_rec=%p cr_unconfirmed_rec=%p",
                     str,
                     client_record->cr_confirmed_rec,
                     client_record->cr_unconfirmed_rec);
    }

    /* At this point one and only one of conf and unconf is
     * non-NULL, and found also references the single clientid
     * record that was found.
     */

    LogDebug(component,
             "CREATE_SESSION clientid=%s csa_sequence=%" PRIu32
             " clientid_cs_seq=%" PRIu32 " data_oppos=%d data_use_drc=%d",
             str_clientid4, arg_CREATE_SESSION4->csa_sequence,
             found->cid_create_session_sequence, data->oppos,
             data->use_drc);

    if (isFullDebug(component)) {
        char str[LOG_BUFF_LEN];
        struct display_buffer dspbuf = {sizeof(str), str, str};

        display_client_id_rec(&dspbuf, found);
        LogFullDebug(component, "Found %s", str);
    }

    data->use_drc = false;

    if (data->oppos == 0) {
        /* Special case : the request is used without use of
         * OP_SEQUENCE
         */
        if ((arg_CREATE_SESSION4->csa_sequence + 1 ==
                found->cid_create_session_sequence)
                && (found->cid_create_session_slot.cache_used)) {
            data->use_drc = true;
            data->cached_res =
                &found->cid_create_session_slot.cached_result;

            res_CREATE_SESSION4->csr_status = NFS4_OK;

            dec_client_id_ref(found);

            LogDebug(component,
                     "CREATE_SESSION replay=%p special case",
                     data->cached_res);

            goto out;
        } else if (arg_CREATE_SESSION4->csa_sequence !=
                   found->cid_create_session_sequence) {
            res_CREATE_SESSION4->csr_status =
                NFS4ERR_SEQ_MISORDERED;

            dec_client_id_ref(found);

            LogDebug(component,
                     "CREATE_SESSION returning NFS4ERR_SEQ_MISORDERED");

            goto out;
        }

    }

    if (unconf != NULL) {
        /* First must match principal */
        if (!nfs_compare_clientcred(&unconf->cid_credential,
                                    &data->credential)) {
            if (isDebug(component)) {
                char *unconfirmed_addr = "(unknown)";

                if (unconf->gsh_client != NULL)
                    unconfirmed_addr =
                        unconf->gsh_client->hostaddr_str;

                LogDebug(component,
                         "Unconfirmed ClientId %s->'%s': Principals do not match... unconfirmed addr=%s Return NFS4ERR_CLID_INUSE",
                         str_clientid4,
                         str_client_addr,
                         unconfirmed_addr);
            }

            dec_client_id_ref(unconf);
            res_CREATE_SESSION4->csr_status = NFS4ERR_CLID_INUSE;
            goto out;
        }
    }

    if (conf != NULL) {
        if (isDebug(component) && conf != NULL)
            display_clientid_name(&dspbuf_client, conf);

        /* First must match principal */
        if (!nfs_compare_clientcred(&conf->cid_credential,
                                    &data->credential)) {
            if (isDebug(component)) {
                char *confirmed_addr = "(unknown)";

                if (conf->gsh_client != NULL)
                    confirmed_addr =
                        conf->gsh_client->hostaddr_str;

                LogDebug(component,
                         "Confirmed ClientId %s->%s addr=%s: Principals do not match... confirmed addr=%s Return NFS4ERR_CLID_INUSE",
                         str_clientid4, str_client,
                         str_client_addr, confirmed_addr);
            }

            /* Release our reference to the confirmed clientid. */
            dec_client_id_ref(conf);
            res_CREATE_SESSION4->csr_status = NFS4ERR_CLID_INUSE;
            goto out;
        }

        /* In this case, the record was confirmed proceed with
           CREATE_SESSION */
    }

    /* We don't need to do any further principal checks, we can't
     * have a confirmed clientid record with a different principal
     * than the unconfirmed record.
     */

    /* At this point, we need to try and create the session before
     * we modify the confirmed and/or unconfirmed clientid
     * records.
     */

    /* Check flags value (test CSESS15) */
    if (arg_CREATE_SESSION4->csa_flags &
            ~(CREATE_SESSION4_FLAG_PERSIST |
              CREATE_SESSION4_FLAG_CONN_BACK_CHAN |
              CREATE_SESSION4_FLAG_CONN_RDMA)) {
        LogDebug(component,
                 "Invalid create session flags %" PRIu32,
                 arg_CREATE_SESSION4->csa_flags);
        dec_client_id_ref(found);
        res_CREATE_SESSION4->csr_status = NFS4ERR_INVAL;
        goto out;
    }

    /* Record session related information at the right place */
    nfs41_session = pool_alloc(nfs41_session_pool);

    if (nfs41_session == NULL) {
        LogCrit(component, "Could not allocate memory for a session");
        dec_client_id_ref(found);
        res_CREATE_SESSION4->csr_status = NFS4ERR_SERVERFAULT;
        goto out;
    }

    nfs41_session->clientid = clientid;
    nfs41_session->clientid_record = found;
    nfs41_session->refcount = 2;	/* sentinel ref + call path ref */
    nfs41_session->fore_channel_attrs =
        arg_CREATE_SESSION4->csa_fore_chan_attrs;
    nfs41_session->back_channel_attrs =
        arg_CREATE_SESSION4->csa_back_chan_attrs;
    nfs41_session->xprt = data->req->rq_xprt;
    nfs41_session->flags = false;
    nfs41_session->cb_program = 0;
    PTHREAD_MUTEX_init(&nfs41_session->cb_mutex, NULL);
    PTHREAD_COND_init(&nfs41_session->cb_cond, NULL);
    for (i = 0; i < NFS41_NB_SLOTS; i++)
        PTHREAD_MUTEX_init(&nfs41_session->slots[i].lock, NULL);

    /* Take reference to clientid record on behalf the session. */
    inc_client_id_ref(found);

    /* add to head of session list (encapsulate?) */
    PTHREAD_MUTEX_lock(&found->cid_mutex);
    glist_add(&found->cid_cb.v41.cb_session_list,
              &nfs41_session->session_link);
    PTHREAD_MUTEX_unlock(&found->cid_mutex);

    /* Set ca_maxrequests */
    nfs41_session->fore_channel_attrs.ca_maxrequests = NFS41_NB_SLOTS;
    nfs41_Build_sessionid(&clientid, nfs41_session->session_id);

    res_CREATE_SESSION4ok->csr_sequence = arg_CREATE_SESSION4->csa_sequence;

    /* return the input for wanting of something better (will
     * change in later versions)
     */
    res_CREATE_SESSION4ok->csr_fore_chan_attrs =
        nfs41_session->fore_channel_attrs;
    res_CREATE_SESSION4ok->csr_back_chan_attrs =
        nfs41_session->back_channel_attrs;
    res_CREATE_SESSION4ok->csr_flags = 0;

    memcpy(res_CREATE_SESSION4ok->csr_sessionid,
           nfs41_session->session_id,
           NFS4_SESSIONID_SIZE);

    /* Create Session replay cache */
    data->cached_res = &found->cid_create_session_slot.cached_result;
    found->cid_create_session_slot.cache_used = true;

    LogDebug(component, "CREATE_SESSION replay=%p", data->cached_res);

    if (!nfs41_Session_Set(nfs41_session)) {
        LogDebug(component, "Could not insert session into table");

        /* Release the session resources by dropping our reference
         * and the sentinel reference.
         */
        dec_session_ref(nfs41_session);
        dec_session_ref(nfs41_session);

        /* Decrement our reference to the clientid record */
        dec_client_id_ref(found);

        /* Maybe a more precise status would be better */
        res_CREATE_SESSION4->csr_status = NFS4ERR_SERVERFAULT;
        goto out;
    }

    /* Make sure we have a reference to the confirmed clientid
       record if any */
    if (conf == NULL) {
        conf = client_record->cr_confirmed_rec;

        if (isDebug(component) && conf != NULL)
            display_clientid_name(&dspbuf_client, conf);

        /* Need a reference to the confirmed record for below */
        if (conf != NULL)
            inc_client_id_ref(conf);
    }

    if (conf != NULL && conf->cid_clientid != clientid) {
        /* Old confirmed record - need to expire it */
        if (isDebug(component)) {
            char str[LOG_BUFF_LEN];
            struct display_buffer dspbuf = {sizeof(str), str, str};

            display_client_id_rec(&dspbuf, conf);
            LogDebug(component, "Expiring %s", str);
        }

        /* Expire clientid and release our reference. */
        nfs_client_id_expire(conf, false);
        dec_client_id_ref(conf);
        conf = NULL;
    }

    if (conf != NULL) {
        /* At this point we are updating the confirmed
         * clientid.  Update the confirmed record from the
         * unconfirmed record.
         */
        display_clientid(&dspbuf_clientid4, conf->cid_clientid);
        LogDebug(component,
                 "Updating clientid %s->%s cb_program=%u",
                 str_clientid4, str_client,
                 arg_CREATE_SESSION4->csa_cb_program);

        if (unconf != NULL) {
            /* unhash the unconfirmed clientid record */
            remove_unconfirmed_client_id(unconf);
            /* Release our reference to the unconfirmed entry */
            dec_client_id_ref(unconf);
        }

        if (isDebug(component)) {
            char str[LOG_BUFF_LEN];
            struct display_buffer dspbuf = {sizeof(str), str, str};

            display_client_id_rec(&dspbuf, conf);
            LogDebug(component, "Updated %s", str);
        }
    } else {
        /* This is a new clientid */
        if (isFullDebug(component)) {
            char str[LOG_BUFF_LEN];
            struct display_buffer dspbuf = {sizeof(str), str, str};

            display_client_id_rec(&dspbuf, unconf);
            LogFullDebug(component, "Confirming new %s", str);
        }

        rc = nfs_client_id_confirm(unconf, component);

        if (rc != CLIENT_ID_SUCCESS) {
            res_CREATE_SESSION4->csr_status =
                clientid_error_to_nfsstat_no_expire(rc);

            /* Need to destroy the session */
            if (!nfs41_Session_Del(nfs41_session->session_id))
                LogDebug(component,
                         "Oops nfs41_Session_Del failed");

            /* Release our reference to the unconfirmed record */
            dec_client_id_ref(unconf);
            goto out;
        }
        nfs4_chk_clid(unconf);

        conf = unconf;
        unconf = NULL;

        if (isDebug(component)) {
            char str[LOG_BUFF_LEN];
            struct display_buffer dspbuf = {sizeof(str), str, str};

            display_client_id_rec(&dspbuf, conf);
            LogDebug(component, "Confirmed %s", str);
        }
    }
    conf->cid_create_session_sequence++;

    /* Bump the lease timer */
    conf->cid_last_renew = time(NULL);

    /* Release our reference to the confirmed record */
    dec_client_id_ref(conf);

    if (isFullDebug(component)) {
        char str[LOG_BUFF_LEN];
        struct display_buffer dspbuf = {sizeof(str), str, str};

        display_client_record(&dspbuf, client_record);
        LogFullDebug(component,
                     "Client Record %s cr_confirmed_rec=%p cr_unconfirmed_rec=%p",
                     str,
                     client_record->cr_confirmed_rec,
                     client_record->cr_unconfirmed_rec);
    }

    /* Handle the creation of the back channel, if the client
       requested one. */
    if (arg_CREATE_SESSION4->csa_flags &
            CREATE_SESSION4_FLAG_CONN_BACK_CHAN) {
        nfs41_session->cb_program = arg_CREATE_SESSION4->csa_cb_program;
        if (nfs_rpc_create_chan_v41(
                    nfs41_session,
                    arg_CREATE_SESSION4->csa_sec_parms.csa_sec_parms_len,
                    arg_CREATE_SESSION4->csa_sec_parms.csa_sec_parms_val)
                == 0) {
            res_CREATE_SESSION4ok->csr_flags |=
                CREATE_SESSION4_FLAG_CONN_BACK_CHAN;
        }
    }

    if (isDebug(component)) {
        char str[LOG_BUFF_LEN];
        struct display_buffer dspbuf = {sizeof(str), str, str};

        display_session(&dspbuf, nfs41_session);

        LogDebug(component,
                 "success %s csa_flags 0x%X csr_flags 0x%X",
                 str, arg_CREATE_SESSION4->csa_flags,
                 res_CREATE_SESSION4ok->csr_flags);
    }

    /* Release our reference to the session */
    dec_session_ref(nfs41_session);

    /* Successful exit */
    res_CREATE_SESSION4->csr_status = NFS4_OK;

out:
    PTHREAD_MUTEX_unlock(&client_record->cr_mutex);
    /* Release our reference to the client record and return */
    dec_client_record_ref(client_record);
    return res_CREATE_SESSION4->csr_status;
}
int nfs4_op_setclientid_confirm(struct nfs_argop4 *op, compound_data_t *data,
				struct nfs_resop4 *resp)
{
	SETCLIENTID_CONFIRM4args * const arg_SETCLIENTID_CONFIRM4 =
	    &op->nfs_argop4_u.opsetclientid_confirm;
	SETCLIENTID_CONFIRM4res * const res_SETCLIENTID_CONFIRM4 =
	    &resp->nfs_resop4_u.opsetclientid_confirm;
	nfs_client_id_t *conf = NULL;
	nfs_client_id_t *unconf = NULL;
	nfs_client_record_t *client_record;
	clientid4 clientid = 0;
	char str_verifier[NFS4_VERIFIER_SIZE * 2 + 1];
	const char *str_client_addr = "(unknown)";
	/* The client name, for gratuitous logging */
	char str_client[CLIENTNAME_BUFSIZE];
	/* Display buffer for client name */
	struct display_buffer dspbuf_client = {
		sizeof(str_client), str_client, str_client};
	/* The clientid4 broken down into fields */
	char str_clientid4[DISPLAY_CLIENTID_SIZE];
	/* Display buffer for clientid4 */
	struct display_buffer dspbuf_clientid4 = {
		sizeof(str_clientid4), str_clientid4, str_clientid4};
	int rc;

	/* Make sure str_client is always printable even
	 * if log level changes midstream.
	 */
	display_printf(&dspbuf_client, "(unknown)");
	display_reset_buffer(&dspbuf_client);

	resp->resop = NFS4_OP_SETCLIENTID_CONFIRM;
	res_SETCLIENTID_CONFIRM4->status = NFS4_OK;
	clientid = arg_SETCLIENTID_CONFIRM4->clientid;

	display_clientid(&dspbuf_clientid4, clientid);

	if (data->minorversion > 0) {
		res_SETCLIENTID_CONFIRM4->status = NFS4ERR_NOTSUPP;
		return res_SETCLIENTID_CONFIRM4->status;
	}

	if (op_ctx->client != NULL)
		str_client_addr = op_ctx->client->hostaddr_str;

	if (isDebug(COMPONENT_CLIENTID)) {
		sprint_mem(str_verifier,
			   arg_SETCLIENTID_CONFIRM4->setclientid_confirm,
			   NFS4_VERIFIER_SIZE);
	} else {
		str_verifier[0] = '\0';
	}

	LogDebug(COMPONENT_CLIENTID,
		 "SETCLIENTID_CONFIRM client addr=%s clientid=%s setclientid_confirm=%s",
		 str_client_addr, str_clientid4, str_verifier);

	/* First try to look up unconfirmed record */
	rc = nfs_client_id_get_unconfirmed(clientid, &unconf);

	if (rc == CLIENT_ID_SUCCESS) {
		client_record = unconf->cid_client_record;

		if (isFullDebug(COMPONENT_CLIENTID)) {
			char str[LOG_BUFF_LEN] = "\0";
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_client_id_rec(&dspbuf, unconf);
			LogFullDebug(COMPONENT_CLIENTID, "Found %s", str);
		}
	} else {
		rc = nfs_client_id_get_confirmed(clientid, &conf);

		if (rc != CLIENT_ID_SUCCESS) {
			/* No record whatsoever of this clientid */
			LogDebug(COMPONENT_CLIENTID,
				 "%s clientid = %s",
				 clientid_error_to_str(rc), str_clientid4);
			res_SETCLIENTID_CONFIRM4->status =
			    clientid_error_to_nfsstat_no_expire(rc);

			return res_SETCLIENTID_CONFIRM4->status;
		}

		client_record = conf->cid_client_record;

		if (isFullDebug(COMPONENT_CLIENTID)) {
			char str[LOG_BUFF_LEN] = "\0";
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_client_id_rec(&dspbuf, conf);
			LogFullDebug(COMPONENT_CLIENTID, "Found %s", str);
		}
	}

	PTHREAD_MUTEX_lock(&client_record->cr_mutex);

	inc_client_record_ref(client_record);

	if (isFullDebug(COMPONENT_CLIENTID)) {
		char str[LOG_BUFF_LEN] = "\0";
		struct display_buffer dspbuf = {sizeof(str), str, str};

		display_client_record(&dspbuf, client_record);

		LogFullDebug(COMPONENT_CLIENTID,
			     "Client Record %s cr_confirmed_rec=%p cr_unconfirmed_rec=%p",
			     str,
			     client_record->cr_confirmed_rec,
			     client_record->cr_unconfirmed_rec);
	}

	/* At this point one and only one of pconf and punconf is non-NULL */

	if (unconf != NULL) {
		/* First must match principal */
		if (!nfs_compare_clientcred(&unconf->cid_credential,
					    &data->credential)
		    || op_ctx->client == NULL
		    || unconf->gsh_client == NULL
		    || op_ctx->client != unconf->gsh_client) {
			if (isDebug(COMPONENT_CLIENTID)) {
				char *unconfirmed_addr = "(unknown)";

				if (unconf->gsh_client != NULL)
					unconfirmed_addr =
					    unconf->gsh_client->hostaddr_str;

				LogDebug(COMPONENT_CLIENTID,
					 "Unconfirmed ClientId %s->'%s': Principals do not match... unconfirmed addr=%s Return NFS4ERR_CLID_INUSE",
					 str_clientid4,
					 str_client_addr,
					 unconfirmed_addr);
			}

			res_SETCLIENTID_CONFIRM4->status = NFS4ERR_CLID_INUSE;
			dec_client_id_ref(unconf);
			goto out;
		} else if (unconf->cid_confirmed == CONFIRMED_CLIENT_ID &&
			   memcmp(unconf->cid_verifier,
				  arg_SETCLIENTID_CONFIRM4->setclientid_confirm,
				  NFS4_VERIFIER_SIZE) == 0) {
			/* We must have raced with another
			   SETCLIENTID_CONFIRM */
			if (isDebug(COMPONENT_CLIENTID)) {
				char str[LOG_BUFF_LEN] = "\0";
				struct display_buffer dspbuf = {
					sizeof(str), str, str};

				display_client_id_rec(&dspbuf, unconf);
				LogDebug(COMPONENT_CLIENTID,
					 "Race against confirm for %s", str);
			}

			res_SETCLIENTID_CONFIRM4->status = NFS4_OK;
			dec_client_id_ref(unconf);

			goto out;
		} else if (unconf->cid_confirmed != UNCONFIRMED_CLIENT_ID) {
			/* We raced with another thread that dealt
			 * with this unconfirmed record.  Release our
			 * reference, and pretend we didn't find a
			 * record.
			 */
			if (isDebug(COMPONENT_CLIENTID)) {
				char str[LOG_BUFF_LEN] = "\0";
				struct display_buffer dspbuf = {
					sizeof(str), str, str};

				display_client_id_rec(&dspbuf, unconf);

				LogDebug(COMPONENT_CLIENTID,
					 "Race against expire for %s", str);
			}

			res_SETCLIENTID_CONFIRM4->status =
			    NFS4ERR_STALE_CLIENTID;

			dec_client_id_ref(unconf);

			goto out;
		}
	}

	if (conf != NULL) {
		if (isDebug(COMPONENT_CLIENTID) && conf != NULL)
			display_clientid_name(&dspbuf_client, conf);

		/* First must match principal */
		if (!nfs_compare_clientcred(&conf->cid_credential,
					    &data->credential)
		    || op_ctx->client == NULL
		    || conf->gsh_client == NULL
		    || op_ctx->client != conf->gsh_client) {
			if (isDebug(COMPONENT_CLIENTID)) {
				char *confirmed_addr = "(unknown)";

				if (conf->gsh_client != NULL)
					confirmed_addr =
					    conf->gsh_client->hostaddr_str;

				LogDebug(COMPONENT_CLIENTID,
					 "Confirmed ClientId %s->%s addr=%s: Principals do not match...  confirmed addr=%s Return NFS4ERR_CLID_INUSE",
					 str_clientid4,
					 str_client,
					 str_client_addr,
					 confirmed_addr);
			}

			res_SETCLIENTID_CONFIRM4->status = NFS4ERR_CLID_INUSE;
		} else if (memcmp(
				conf->cid_verifier,
				arg_SETCLIENTID_CONFIRM4->setclientid_confirm,
				NFS4_VERIFIER_SIZE) == 0) {
			/* In this case, the record was confirmed and
			 * we have received a retry
			 */
			if (isDebug(COMPONENT_CLIENTID)) {
				char str[LOG_BUFF_LEN] = "\0";
				struct display_buffer dspbuf = {
					sizeof(str), str, str};

				display_client_id_rec(&dspbuf, conf);
				LogDebug(COMPONENT_CLIENTID,
					 "Retry confirm for %s", str);
			}

			res_SETCLIENTID_CONFIRM4->status = NFS4_OK;
		} else {
			/* This is a case not covered... Return
			 * NFS4ERR_CLID_INUSE
			 */
			if (isDebug(COMPONENT_CLIENTID)) {
				char str[LOG_BUFF_LEN] = "\0";
				struct display_buffer dspbuf = {
					sizeof(str), str, str};
				char str_conf_verifier[NFS4_VERIFIER_SIZE * 2 +
						       1];

				sprint_mem(str_conf_verifier,
					   conf->cid_verifier,
					   NFS4_VERIFIER_SIZE);

				display_client_id_rec(&dspbuf, conf);

				LogDebug(COMPONENT_CLIENTID,
					 "Confirm verifier=%s doesn't match verifier=%s for %s",
					 str_conf_verifier, str_verifier, str);
			}

			res_SETCLIENTID_CONFIRM4->status = NFS4ERR_CLID_INUSE;
		}

		/* Release our reference to the confirmed clientid. */
		dec_client_id_ref(conf);
		goto out;
	}

	/* We don't need to do any further principal checks, we can't
	 * have a confirmed clientid record with a different principal
	 * than the unconfirmed record.  Also, at this point, we have
	 * a matching unconfirmed clientid (punconf != NULL and pconf
	 * == NULL).
	 */

	/* Make sure we have a reference to the confirmed clientid
	 * record if any
	 */
	if (conf == NULL) {
		conf = client_record->cr_confirmed_rec;

		if (isDebug(COMPONENT_CLIENTID) && conf != NULL)
			display_clientid_name(&dspbuf_client, conf);

		/* Need a reference to the confirmed record for below */
		if (conf != NULL)
			inc_client_id_ref(conf);
	}

	if (conf != NULL && conf->cid_clientid != clientid) {
		/* Old confirmed record - need to expire it */
		if (isDebug(COMPONENT_CLIENTID)) {
			char str[LOG_BUFF_LEN] = "\0";
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_client_id_rec(&dspbuf, conf);
			LogDebug(COMPONENT_CLIENTID, "Expiring %s", str);
		}

		/* Expire clientid and release our reference. */
		nfs_client_id_expire(conf, false);

		dec_client_id_ref(conf);

		conf = NULL;
	}

	if (conf != NULL) {
		/* At this point we are updating the confirmed
		 * clientid.  Update the confirmed record from the
		 * unconfirmed record.
		 */
		if (isFullDebug(COMPONENT_CLIENTID)) {
			char str[LOG_BUFF_LEN] = "\0";
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_client_id_rec(&dspbuf, unconf);
			LogFullDebug(COMPONENT_CLIENTID, "Updating from %s",
				     str);
		}

		/* Copy callback information into confirmed clientid record */
		memcpy(conf->cid_cb.v40.cb_client_r_addr,
		       unconf->cid_cb.v40.cb_client_r_addr,
		       sizeof(conf->cid_cb.v40.cb_client_r_addr));

		conf->cid_cb.v40.cb_addr = unconf->cid_cb.v40.cb_addr;
		conf->cid_cb.v40.cb_program = unconf->cid_cb.v40.cb_program;

		conf->cid_cb.v40.cb_callback_ident =
		    unconf->cid_cb.v40.cb_callback_ident;

		nfs_rpc_destroy_chan(&conf->cid_cb.v40.cb_chan);

		memcpy(conf->cid_verifier, unconf->cid_verifier,
		       NFS4_VERIFIER_SIZE);

		/* unhash the unconfirmed clientid record */
		remove_unconfirmed_client_id(unconf);

		/* Release our reference to the unconfirmed entry */
		dec_client_id_ref(unconf);

		if (isDebug(COMPONENT_CLIENTID)) {
			char str[LOG_BUFF_LEN] = "\0";
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_client_id_rec(&dspbuf, conf);
			LogDebug(COMPONENT_CLIENTID, "Updated %s", str);
		}
		/* Check and update call back channel state */
		if (nfs_param.nfsv4_param.allow_delegations &&
		    nfs_test_cb_chan(conf) != RPC_SUCCESS) {
			set_cb_chan_down(conf, true);
			LogCrit(COMPONENT_CLIENTID,
				"setclid confirm: Callback channel is down");
		} else {
			set_cb_chan_down(conf, false);
			LogDebug(COMPONENT_CLIENTID,
				"setclid confirm: Callback channel is UP");
		}

		/* Release our reference to the confirmed clientid. */
		dec_client_id_ref(conf);
	} else {
		/* This is a new clientid */
		if (isFullDebug(COMPONENT_CLIENTID)) {
			char str[LOG_BUFF_LEN] = "\0";
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_client_id_rec(&dspbuf, unconf);
			LogFullDebug(COMPONENT_CLIENTID,
				     "Confirming new %s",
				     str);
		}

		rc = nfs_client_id_confirm(unconf, COMPONENT_CLIENTID);

		if (rc != CLIENT_ID_SUCCESS) {
			res_SETCLIENTID_CONFIRM4->status =
			    clientid_error_to_nfsstat_no_expire(rc);

			LogEvent(COMPONENT_CLIENTID,
				 "FAILED to confirm client");

			/* Release our reference to the unconfirmed record */
			dec_client_id_ref(unconf);

			goto out;
		}

		/* check if the client can perform reclaims */
		nfs4_chk_clid(unconf);

		if (isDebug(COMPONENT_CLIENTID)) {
			char str[LOG_BUFF_LEN] = "\0";
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_client_id_rec(&dspbuf, unconf);

			LogDebug(COMPONENT_CLIENTID, "Confirmed %s", str);
		}

		/* Check and update call back channel state */
		if (nfs_param.nfsv4_param.allow_delegations &&
		    nfs_test_cb_chan(unconf) != RPC_SUCCESS) {
			set_cb_chan_down(unconf, true);
			LogCrit(COMPONENT_CLIENTID,
				"setclid confirm: Callback channel is down");
		} else {
			set_cb_chan_down(unconf, false);
			LogDebug(COMPONENT_CLIENTID,
				"setclid confirm: Callback channel is UP");
		}

		/* Release our reference to the now confirmed record */
		dec_client_id_ref(unconf);
	}

	if (isFullDebug(COMPONENT_CLIENTID)) {
		char str[LOG_BUFF_LEN] = "\0";
		struct display_buffer dspbuf = {sizeof(str), str, str};

		display_client_record(&dspbuf, client_record);
		LogFullDebug(COMPONENT_CLIENTID,
			     "Client Record %s cr_confirmed_rec=%p cr_unconfirmed_rec=%p",
			     str,
			     client_record->cr_confirmed_rec,
			     client_record->cr_unconfirmed_rec);
	}

	/* Successful exit */
	res_SETCLIENTID_CONFIRM4->status = NFS4_OK;

 out:

	PTHREAD_MUTEX_unlock(&client_record->cr_mutex);
	/* Release our reference to the client record and return */
	dec_client_record_ref(client_record);
	return res_SETCLIENTID_CONFIRM4->status;
}
Ejemplo n.º 25
0
int nfs4_op_lockt(struct nfs_argop4 *op, compound_data_t *data,
		  struct nfs_resop4 *resp)
{
	/* Alias for arguments */
	LOCKT4args * const arg_LOCKT4 = &op->nfs_argop4_u.oplockt;
	/* Alias for response */
	LOCKT4res * const res_LOCKT4 = &resp->nfs_resop4_u.oplockt;
	/* Return code from state calls */
	state_status_t state_status = STATE_SUCCESS;
	/* Client id record */
	nfs_client_id_t *clientid = NULL;
	/* Lock owner name */
	state_nfs4_owner_name_t owner_name;
	/* Lock owner record */
	state_owner_t *lock_owner = NULL;
	/* Owner of conflicting lock */
	state_owner_t *conflict_owner = NULL;
	/* Description of lock to test */
	fsal_lock_param_t lock_desc = { FSAL_NO_LOCK, 0, 0 };
	/* Description of conflicting lock */
	fsal_lock_param_t conflict_desc;
	/* return code from id confirm calls */
	int rc;

	LogDebug(COMPONENT_NFS_V4_LOCK,
		 "Entering NFS v4 LOCKT handler ----------------------------");

	/* Initialize to sane default */
	resp->resop = NFS4_OP_LOCKT;

	res_LOCKT4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, false);

	if (res_LOCKT4->status != NFS4_OK)
		return res_LOCKT4->status;


	/* Lock length should not be 0 */
	if (arg_LOCKT4->length == 0LL) {
		res_LOCKT4->status = NFS4ERR_INVAL;
		return res_LOCKT4->status;
	}

	if (nfs_in_grace()) {
		res_LOCKT4->status = NFS4ERR_GRACE;
		return res_LOCKT4->status;
	}

	/* Convert lock parameters to internal types */
	switch (arg_LOCKT4->locktype) {
	case READ_LT:
	case READW_LT:
		lock_desc.lock_type = FSAL_LOCK_R;
		break;

	case WRITE_LT:
	case WRITEW_LT:
		lock_desc.lock_type = FSAL_LOCK_W;
		break;
	default:
		LogDebug(COMPONENT_NFS_V4_LOCK,
			 "Invalid lock type");
		res_LOCKT4->status = NFS4ERR_INVAL;
		return res_LOCKT4->status;
	}

	lock_desc.lock_start = arg_LOCKT4->offset;

	if (arg_LOCKT4->length != STATE_LOCK_OFFSET_EOF)
		lock_desc.lock_length = arg_LOCKT4->length;
	else
		lock_desc.lock_length = 0;

	/* Check for range overflow.  Comparing beyond 2^64 is not
	 * possible in 64 bit precision, but off+len > 2^64-1 is
	 * equivalent to len > 2^64-1 - off
	 */

	if (lock_desc.lock_length >
	    (STATE_LOCK_OFFSET_EOF - lock_desc.lock_start)) {
		res_LOCKT4->status = NFS4ERR_INVAL;
		return res_LOCKT4->status;
	}

	/* Check clientid */
	rc = nfs_client_id_get_confirmed(data->minorversion == 0 ?
						arg_LOCKT4->owner.clientid :
						data->session->clientid,
					 &clientid);

	if (rc != CLIENT_ID_SUCCESS) {
		res_LOCKT4->status = clientid_error_to_nfsstat(rc);
		return res_LOCKT4->status;
	}

	PTHREAD_MUTEX_lock(&clientid->cid_mutex);

	if (data->minorversion == 0 && !reserve_lease(clientid)) {
		PTHREAD_MUTEX_unlock(&clientid->cid_mutex);
		dec_client_id_ref(clientid);
		res_LOCKT4->status = NFS4ERR_EXPIRED;
		return res_LOCKT4->status;
	}

	PTHREAD_MUTEX_unlock(&clientid->cid_mutex);

	/* Is this lock_owner known ? */
	convert_nfs4_lock_owner(&arg_LOCKT4->owner, &owner_name);

	/* This lock owner is not known yet, allocated and set up a new one */
	lock_owner = create_nfs4_owner(&owner_name,
				       clientid,
				       STATE_LOCK_OWNER_NFSV4,
				       NULL,
				       0,
				       NULL,
				       CARE_ALWAYS);

	LogStateOwner("Lock: ", lock_owner);

	if (lock_owner == NULL) {
		LogEvent(COMPONENT_NFS_V4_LOCK,
			 "LOCKT unable to create lock owner");
		res_LOCKT4->status = NFS4ERR_SERVERFAULT;
		goto out;
	}

	LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCKT",
		data->current_entry, lock_owner, &lock_desc);

	if (data->minorversion == 0) {
		op_ctx->clientid =
		    &lock_owner->so_owner.so_nfs4_owner.so_clientid;
	}

	/* Now we have a lock owner and a stateid.  Go ahead and test
	 * the lock in SAL (and FSAL).
	 */

	state_status = state_test(data->current_entry,
				  lock_owner,
				  &lock_desc,
				  &conflict_owner,
				  &conflict_desc);

	if (state_status == STATE_LOCK_CONFLICT) {
		/* A conflicting lock from a different lock_owner,
		 * returns NFS4ERR_DENIED
		 */
		LogStateOwner("Conflict: ", conflict_owner);

		Process_nfs4_conflict(&res_LOCKT4->LOCKT4res_u.denied,
				      conflict_owner,
				      &conflict_desc);
	}

	if (data->minorversion == 0)
		op_ctx->clientid = NULL;

	/* Release NFS4 Open Owner reference */
	dec_state_owner_ref(lock_owner);

	/* Return result */
	res_LOCKT4->status = nfs4_Errno_state(state_status);

 out:

	/* Update the lease before exit */
	if (data->minorversion == 0) {
		PTHREAD_MUTEX_lock(&clientid->cid_mutex);
		update_lease(clientid);
		PTHREAD_MUTEX_unlock(&clientid->cid_mutex);
	}

	dec_client_id_ref(clientid);

	return res_LOCKT4->status;
}				/* nfs4_op_lockt */
Ejemplo n.º 26
0
/**
 * @brief Create an NFSv4 state owner
 *
 * @param[in]  name          Owner name
 * @param[in]  clientid      Client record
 * @param[in]  type          Owner type
 * @param[in]  related_owner For lock owners, the related open owner
 * @param[in]  init_seqid    The starting seqid (for NFSv4.0)
 * @param[out] pisnew        Whether the owner actually is new
 * @param[in]  care          Care flag (to unify v3/v4 owners?)
 *
 * @return A new state owner or NULL.
 */
state_owner_t *create_nfs4_owner(state_nfs4_owner_name_t *name,
				 nfs_client_id_t *clientid,
				 state_owner_type_t type,
				 state_owner_t *related_owner,
				 unsigned int init_seqid, bool_t *pisnew,
				 care_t care)
{
	state_owner_t key;
	state_owner_t *owner;
	bool_t isnew;

	/* set up the content of the open_owner */
	memset(&key, 0, sizeof(key));

	key.so_type = type;
	key.so_owner.so_nfs4_owner.so_seqid = init_seqid;
	key.so_owner.so_nfs4_owner.so_related_owner = related_owner;
	key.so_owner.so_nfs4_owner.so_clientid = clientid->cid_clientid;
	key.so_owner.so_nfs4_owner.so_clientrec = clientid;
	key.so_owner_len = name->son_owner_len;
	key.so_owner_val = name->son_owner_val;
	key.so_owner.so_nfs4_owner.so_resp.resop = NFS4_OP_ILLEGAL;
	key.so_owner.so_nfs4_owner.so_args.argop = NFS4_OP_ILLEGAL;
	key.so_refcount = 1;
#if 0
	/* WAITING FOR COMMUNITY FIX */
	/* setting lock owner confirmed */
	if (type == STATE_LOCK_OWNER_NFSV4)
		key.so_owner.so_nfs4_owner.so_confirmed = 1;
#endif

	if (isFullDebug(COMPONENT_STATE)) {
		char str[LOG_BUFF_LEN];
		struct display_buffer dspbuf = {sizeof(str), str, str};

		display_owner(&dspbuf, &key);
		LogFullDebug(COMPONENT_STATE, "Key=%s", str);
	}

	owner = get_state_owner(care, &key, init_nfs4_owner, &isnew);

	if (owner != NULL && related_owner != NULL) {
		PTHREAD_MUTEX_lock(&owner->so_mutex);
		/* Related owner already exists. */
		if (owner->so_owner.so_nfs4_owner.so_related_owner == NULL) {
			/* Attach related owner to owner now that we know it. */
			inc_state_owner_ref(related_owner);
			owner->so_owner.so_nfs4_owner.so_related_owner =
			    related_owner;
		} else if (owner->so_owner.so_nfs4_owner.so_related_owner !=
			   related_owner) {
			char str1[LOG_BUFF_LEN / 2];
			char str2[LOG_BUFF_LEN / 2];
			struct display_buffer dspbuf1 = {
						sizeof(str1), str1, str1};
			struct display_buffer dspbuf2 = {
						sizeof(str2), str2, str2};

			display_owner(&dspbuf1, related_owner);
			display_owner(&dspbuf2, owner);

			LogCrit(COMPONENT_NFS_V4_LOCK,
				"Related {%s} doesn't match for {%s}", str1,
				str2);
			PTHREAD_MUTEX_unlock(&owner->so_mutex);

			/* Release the reference to the owner. */
			dec_state_owner_ref(owner);

			return NULL;
		}
		PTHREAD_MUTEX_unlock(&owner->so_mutex);
	}

	if (!isnew && owner != NULL && pisnew != NULL) {
		if (isDebug(COMPONENT_STATE)) {
			char str[LOG_BUFF_LEN];
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_owner(&dspbuf, owner);

			LogDebug(COMPONENT_STATE,
				 "Previously known owner {%s} is being reused",
				 str);
		}
	}

	if (pisnew != NULL)
		*pisnew = isnew;

	return owner;
}
Ejemplo n.º 27
0
int nfs4_op_lock(struct nfs_argop4 *op, compound_data_t *data,
		 struct nfs_resop4 *resp)
{
	/* Shorter alias for arguments */
	LOCK4args * const arg_LOCK4 = &op->nfs_argop4_u.oplock;
	/* Shorter alias for response */
	LOCK4res * const res_LOCK4 = &resp->nfs_resop4_u.oplock;
	/* Status code from state calls */
	state_status_t state_status = STATE_SUCCESS;
	/* Data for lock state to be created */
	union state_data candidate_data;
	/* Status code for protocol functions */
	nfsstat4 nfs_status = 0;
	/* Created or found lock state */
	state_t *lock_state = NULL;
	/* Associated open state */
	state_t *state_open = NULL;
	/* The lock owner */
	state_owner_t *lock_owner = NULL;
	/* The open owner */
	state_owner_t *open_owner = NULL;
	/* The owner of a conflicting lock */
	state_owner_t *conflict_owner = NULL;
	/* The owner in which to store the response for NFSv4.0 */
	state_owner_t *resp_owner = NULL;
	/* Sequence ID, for NFSv4.0 */
	seqid4 seqid = 0;
	/* The client performing these operations */
	nfs_client_id_t *clientid = NULL;
	/* Name for the lock owner */
	state_nfs4_owner_name_t owner_name;
	/* Description of requrested lock */
	fsal_lock_param_t lock_desc;
	/* Description of conflicting lock */
	fsal_lock_param_t conflict_desc;
	/* Whether to block */
	state_blocking_t blocking = STATE_NON_BLOCKING;
	/* Tracking data for the lock state */
	struct state_refer refer;
	/* Indicate if we let FSAL to handle requests during grace. */
	bool_t fsal_grace = false;
	int rc;

	LogDebug(COMPONENT_NFS_V4_LOCK,
		 "Entering NFS v4 LOCK handler ----------------------");

	/* Initialize to sane starting values */
	resp->resop = NFS4_OP_LOCK;
	res_LOCK4->status = NFS4_OK;

	/* Record the sequence info */
	if (data->minorversion > 0) {
		memcpy(refer.session,
		       data->session->session_id,
		       sizeof(sessionid4));
		refer.sequence = data->sequence;
		refer.slot = data->slot;
	}

	res_LOCK4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, false);

	if (res_LOCK4->status != NFS4_OK)
		return res_LOCK4->status;

	/* Convert lock parameters to internal types */
	switch (arg_LOCK4->locktype) {
	case READW_LT:
		blocking = STATE_NFSV4_BLOCKING;
		/* Fall through */

	case READ_LT:
		lock_desc.lock_type = FSAL_LOCK_R;
		break;

	case WRITEW_LT:
		blocking = STATE_NFSV4_BLOCKING;
		/* Fall through */

	case WRITE_LT:
		lock_desc.lock_type = FSAL_LOCK_W;
		break;

	default:
		LogDebug(COMPONENT_NFS_V4_LOCK,
			 "Invalid lock type");
		res_LOCK4->status = NFS4ERR_INVAL;
		return res_LOCK4->status;
	}

	lock_desc.lock_start = arg_LOCK4->offset;
	lock_desc.lock_sle_type = FSAL_POSIX_LOCK;
	lock_desc.lock_reclaim = arg_LOCK4->reclaim;

	if (arg_LOCK4->length != STATE_LOCK_OFFSET_EOF)
		lock_desc.lock_length = arg_LOCK4->length;
	else
		lock_desc.lock_length = 0;

	if (arg_LOCK4->locker.new_lock_owner) {
		/* Check stateid correctness and get pointer to state */
		nfs_status = nfs4_Check_Stateid(
			&arg_LOCK4->locker.locker4_u.open_owner.open_stateid,
			data->current_obj,
			&state_open,
			data,
			STATEID_SPECIAL_FOR_LOCK,
			arg_LOCK4->locker.locker4_u.open_owner.open_seqid,
			data->minorversion == 0,
			lock_tag);

		if (nfs_status != NFS4_OK) {
			if (nfs_status == NFS4ERR_REPLAY) {
				open_owner = get_state_owner_ref(state_open);

				LogStateOwner("Open: ", open_owner);

				if (open_owner != NULL) {
					resp_owner = open_owner;
					seqid = arg_LOCK4->locker.locker4_u
						.open_owner.open_seqid;
					goto check_seqid;
				}
			}

			res_LOCK4->status = nfs_status;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs4_Check_Stateid for open owner");
			return res_LOCK4->status;
		}

		open_owner = get_state_owner_ref(state_open);

		LogStateOwner("Open: ", open_owner);

		if (open_owner == NULL) {
			/* State is going stale. */
			res_LOCK4->status = NFS4ERR_STALE;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs4_Check_Stateid, stale open owner");
			goto out2;
		}

		lock_state = NULL;
		lock_owner = NULL;
		resp_owner = open_owner;
		seqid = arg_LOCK4->locker.locker4_u.open_owner.open_seqid;

		LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG,
			"LOCK New lock owner from open owner",
			data->current_obj, open_owner, &lock_desc);

		/* Check is the clientid is known or not */
		rc = nfs_client_id_get_confirmed(
			data->minorversion == 0 ? arg_LOCK4->locker.
				  locker4_u.open_owner.lock_owner.clientid
				: data->session->clientid,
			&clientid);

		if (rc != CLIENT_ID_SUCCESS) {
			res_LOCK4->status = clientid_error_to_nfsstat(rc);
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs_client_id_get");
			goto out2;
		}

		if (isDebug(COMPONENT_CLIENTID) && (clientid !=
		    open_owner->so_owner.so_nfs4_owner.so_clientrec)) {
			char str_open[LOG_BUFF_LEN / 2];
			struct display_buffer dspbuf_open = {
				sizeof(str_open), str_open, str_open};
			char str_lock[LOG_BUFF_LEN / 2];
			struct display_buffer dspbuf_lock = {
				sizeof(str_lock), str_lock, str_lock};

			display_client_id_rec(&dspbuf_open,
					      open_owner->so_owner
					      .so_nfs4_owner.so_clientrec);
			display_client_id_rec(&dspbuf_lock, clientid);

			LogDebug(COMPONENT_CLIENTID,
				 "Unexpected, new lock owner clientid {%s} doesn't match open owner clientid {%s}",
				 str_lock, str_open);
		}

		/* The related stateid is already stored in state_open */

		/* An open state has been found. Check its type */
		if (state_open->state_type != STATE_TYPE_SHARE) {
			res_LOCK4->status = NFS4ERR_BAD_STATEID;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed open stateid is not a SHARE");
			goto out2;
		}

		/* Is this lock_owner known ? */
		convert_nfs4_lock_owner(&arg_LOCK4->locker.locker4_u.open_owner.
					lock_owner, &owner_name);
		LogStateOwner("Lock: ", lock_owner);
	} else {
		/* Existing lock owner Find the lock stateid From
		 * that, get the open_owner
		 *
		 * There was code here before to handle all-0 stateid,
		 * but that really doesn't apply - when we handle
		 * temporary locks for I/O operations (which is where
		 * we will see all-0 or all-1 stateid, those will not
		 * come in through nfs4_op_lock.
		 *
		 * Check stateid correctness and get pointer to state
		 */
		nfs_status = nfs4_Check_Stateid(
			&arg_LOCK4->locker.locker4_u.lock_owner.lock_stateid,
			data->current_obj,
			&lock_state,
			data,
			STATEID_SPECIAL_FOR_LOCK,
			arg_LOCK4->locker.locker4_u.lock_owner.lock_seqid,
			data->minorversion == 0,
			lock_tag);

		if (nfs_status != NFS4_OK) {
			if (nfs_status == NFS4ERR_REPLAY) {
				lock_owner = get_state_owner_ref(lock_state);

				LogStateOwner("Lock: ", lock_owner);

				if (lock_owner != NULL) {
					open_owner = lock_owner->so_owner
						.so_nfs4_owner.so_related_owner;
					inc_state_owner_ref(open_owner);
					resp_owner = lock_owner;
					seqid = arg_LOCK4->locker.locker4_u
						.lock_owner.lock_seqid;
					goto check_seqid;
				}
			}

			res_LOCK4->status = nfs_status;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs4_Check_Stateid for existing lock owner");
			return res_LOCK4->status;
		}

		/* Check if lock state belongs to same export */
		if (!state_same_export(lock_state, op_ctx->ctx_export)) {
			LogEvent(COMPONENT_STATE,
				 "Lock Owner Export Conflict, Lock held for export %"
				 PRIu16" request for export %"PRIu16,
				 state_export_id(lock_state),
				 op_ctx->ctx_export->export_id);
			res_LOCK4->status = NFS4ERR_INVAL;
			goto out2;
		}

		/* A lock state has been found. Check its type */
		if (lock_state->state_type != STATE_TYPE_LOCK) {
			res_LOCK4->status = NFS4ERR_BAD_STATEID;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed existing lock owner,  state type is not LOCK");
			goto out2;
		}

		/* Get the old lockowner. We can do the following
		 * 'cast', in NFSv4 lock_owner4 and open_owner4 are
		 * different types but with the same definition
		 */
		lock_owner = get_state_owner_ref(lock_state);

		LogStateOwner("Lock: ", lock_owner);

		if (lock_owner == NULL) {
			/* State is going stale. */
			res_LOCK4->status = NFS4ERR_STALE;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs4_Check_Stateid, stale open owner");
			goto out2;
		}

		open_owner =
			lock_owner->so_owner.so_nfs4_owner.so_related_owner;
		LogStateOwner("Open: ", open_owner);
		inc_state_owner_ref(open_owner);
		state_open = lock_state->state_data.lock.openstate;
		inc_state_t_ref(state_open);
		resp_owner = lock_owner;
		seqid = arg_LOCK4->locker.locker4_u.lock_owner.lock_seqid;

		LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG,
			"LOCK Existing lock owner", data->current_obj,
			lock_owner, &lock_desc);

		/* Get the client for this open owner */
		clientid = open_owner->so_owner.so_nfs4_owner.so_clientrec;
		inc_client_id_ref(clientid);
	}

 check_seqid:

	/* Check seqid (lock_seqid or open_seqid) */
	if (data->minorversion == 0) {
		if (!Check_nfs4_seqid(resp_owner,
				      seqid,
				      op,
				      data->current_obj,
				      resp,
				      lock_tag)) {
			/* Response is all setup for us and LogDebug
			 * told what was wrong
			 */
			goto out2;
		}
	}

	/* Lock length should not be 0 */
	if (arg_LOCK4->length == 0LL) {
		res_LOCK4->status = NFS4ERR_INVAL;
		LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length == 0");
		goto out;
	}

	/* Check for range overflow.  Comparing beyond 2^64 is not
	 * possible int 64 bits precision, but off+len > 2^64-1 is
	 * equivalent to len > 2^64-1 - off
	 */

	if (lock_desc.lock_length >
	    (STATE_LOCK_OFFSET_EOF - lock_desc.lock_start)) {
		res_LOCK4->status = NFS4ERR_INVAL;
		LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length overflow");
		goto out;
	}

	/* Check if open state has correct access for type of lock.
	 *
	 * Don't need to check for conflicting states since this open
	 * state assures there are no conflicting states.
	 */
	if (((arg_LOCK4->locktype == WRITE_LT ||
	      arg_LOCK4->locktype == WRITEW_LT)
	      &&
	      ((state_open->state_data.share.share_access &
		OPEN4_SHARE_ACCESS_WRITE) == 0))
	    ||
	    ((arg_LOCK4->locktype == READ_LT ||
	      arg_LOCK4->locktype == READW_LT)
	      &&
	      ((state_open->state_data.share.share_access &
		OPEN4_SHARE_ACCESS_READ) == 0))) {
		/* The open state doesn't allow access based on the
		 * type of lock
		 */
		LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
			"LOCK failed, SHARE doesn't allow access",
			data->current_obj, lock_owner, &lock_desc);

		res_LOCK4->status = NFS4ERR_OPENMODE;

		goto out;
	}

	/* Do grace period checking (use resp_owner below since a new
	 * lock request with a new lock owner doesn't have a lock owner
	 * yet, but does have an open owner - resp_owner is always one or
	 * the other and non-NULL at this point - so makes for a better log).
	 */
	if (nfs_in_grace()) {
		if (op_ctx->fsal_export->exp_ops.
			fs_supports(op_ctx->fsal_export, fso_grace_method))
				fsal_grace = true;

		if (!fsal_grace && !arg_LOCK4->reclaim) {
			LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
			"LOCK failed, non-reclaim while in grace",
				data->current_obj, resp_owner, &lock_desc);
			res_LOCK4->status = NFS4ERR_GRACE;
			goto out;
		}
		if (!fsal_grace && arg_LOCK4->reclaim
		    && !clientid->cid_allow_reclaim) {
			LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
				"LOCK failed, invalid reclaim while in grace",
				data->current_obj, resp_owner, &lock_desc);
			res_LOCK4->status = NFS4ERR_NO_GRACE;
			goto out;
		}
	} else {
		if (arg_LOCK4->reclaim) {
			LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
				"LOCK failed, reclaim while not in grace",
				data->current_obj, resp_owner, &lock_desc);
			res_LOCK4->status = NFS4ERR_NO_GRACE;
			goto out;
		}
	}

	/* Test if this request is attempting to create a new lock owner */
	if (arg_LOCK4->locker.new_lock_owner) {
		bool_t isnew;

		/* A lock owner is always associated with a previously
		   made open which has itself a previously made
		   stateid */

		/* This lock owner is not known yet, allocated and set
		   up a new one */
		lock_owner = create_nfs4_owner(&owner_name,
					       clientid,
					       STATE_LOCK_OWNER_NFSV4,
					       open_owner,
					       0,
					       &isnew,
					       CARE_ALWAYS);

		LogStateOwner("Lock: ", lock_owner);

		if (lock_owner == NULL) {
			res_LOCK4->status = NFS4ERR_RESOURCE;
			LogLock(COMPONENT_NFS_V4_LOCK, NIV_EVENT,
				"LOCK failed to create new lock owner",
				data->current_obj, open_owner, &lock_desc);
			goto out2;
		}

		if (!isnew) {
			PTHREAD_MUTEX_lock(&lock_owner->so_mutex);
			/* Check lock_seqid if it has attached locks. */
			if (!glist_empty(&lock_owner->so_lock_list)
			    && (data->minorversion == 0)
			    && !Check_nfs4_seqid(lock_owner,
						 arg_LOCK4->locker.locker4_u.
						     open_owner.lock_seqid,
						 op,
						 data->current_obj,
						 resp,
						 lock_tag)) {
				LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
					"LOCK failed to create new lock owner, re-use",
					data->current_obj,
					open_owner, &lock_desc);
				dump_all_locks(
					"All locks (re-use of lock owner)");

				PTHREAD_MUTEX_unlock(&lock_owner->so_mutex);
				/* Response is all setup for us and
				 * LogDebug told what was wrong
				 */
				goto out2;
			}

			PTHREAD_MUTEX_unlock(&lock_owner->so_mutex);

			/* Lock owner is known, see if we also already have
			 * a stateid. Do this here since it's impossible for
			 * there to be such a state if the lock owner was
			 * previously unknown.
			 */
			lock_state = nfs4_State_Get_Obj(data->current_obj,
							  lock_owner);
		}

		if (lock_state == NULL) {
			/* Prepare state management structure */
			memset(&candidate_data, 0, sizeof(candidate_data));
			candidate_data.lock.openstate = state_open;

			/* Add the lock state to the lock table */
			state_status = state_add(data->current_obj,
						 STATE_TYPE_LOCK,
						 &candidate_data,
						 lock_owner,
						 &lock_state,
						 data->minorversion > 0 ?
							&refer : NULL);

			if (state_status != STATE_SUCCESS) {
				res_LOCK4->status = NFS4ERR_RESOURCE;

				LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
					"LOCK failed to add new stateid",
					data->current_obj, lock_owner,
					&lock_desc);

				goto out2;
			}

			glist_init(&lock_state->state_data.lock.state_locklist);

			/* Add lock state to the list of lock states belonging
			   to the open state */
			glist_add_tail(
				&state_open->state_data.share.share_lockstates,
				&lock_state->state_data.lock.state_sharelist);

		}
	}

	if (data->minorversion == 0) {
		op_ctx->clientid =
		    &lock_owner->so_owner.so_nfs4_owner.so_clientid;
	}

	/* Now we have a lock owner and a stateid.  Go ahead and push
	 * lock into SAL (and FSAL). */
	state_status = state_lock(data->current_obj,
				  lock_owner,
				  lock_state,
				  blocking,
				  NULL,	/* No block data for now */
				  &lock_desc,
				  &conflict_owner,
				  &conflict_desc);

	if (state_status != STATE_SUCCESS) {
		if (state_status == STATE_LOCK_CONFLICT) {
			/* A conflicting lock from a different
			   lock_owner, returns NFS4ERR_DENIED */
			Process_nfs4_conflict(&res_LOCK4->LOCK4res_u.denied,
					      conflict_owner,
					      &conflict_desc);
		}

		LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed with status %s",
			 state_err_str(state_status));

		res_LOCK4->status = nfs4_Errno_state(state_status);

		/* Save the response in the lock or open owner */
		if (res_LOCK4->status != NFS4ERR_RESOURCE
		    && res_LOCK4->status != NFS4ERR_BAD_STATEID
		    && data->minorversion == 0) {
			Copy_nfs4_state_req(resp_owner,
					    seqid,
					    op,
					    data->current_obj,
					    resp,
					    lock_tag);
		}

		if (arg_LOCK4->locker.new_lock_owner) {
			/* Need to destroy new state */
			state_del(lock_state);
		}
		goto out2;
	}

	if (data->minorversion == 0)
		op_ctx->clientid = NULL;

	res_LOCK4->status = NFS4_OK;

	/* Handle stateid/seqid for success */
	update_stateid(lock_state,
		       &res_LOCK4->LOCK4res_u.resok4.lock_stateid,
		       data,
		       lock_tag);

	if (arg_LOCK4->locker.new_lock_owner) {
		/* Also save the response in the lock owner */
		Copy_nfs4_state_req(lock_owner,
				    arg_LOCK4->locker.locker4_u.open_owner.
				    lock_seqid,
				    op,
				    data->current_obj,
				    resp,
				    lock_tag);

	}

	if (isFullDebug(COMPONENT_NFS_V4_LOCK)) {
		char str[LOG_BUFF_LEN];
		struct display_buffer dspbuf = {sizeof(str), str, str};

		display_stateid(&dspbuf, lock_state);

		LogFullDebug(COMPONENT_NFS_V4_LOCK, "LOCK stateid %s", str);
	}

	LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK applied",
		data->current_obj, lock_owner, &lock_desc);

 out:

	if (data->minorversion == 0) {
		/* Save the response in the lock or open owner */
		Copy_nfs4_state_req(resp_owner,
				    seqid,
				    op,
				    data->current_obj,
				    resp,
				    lock_tag);
	}

 out2:

	if (state_open != NULL)
		dec_state_t_ref(state_open);

	if (lock_state != NULL)
		dec_state_t_ref(lock_state);

	LogStateOwner("Open: ", open_owner);
	LogStateOwner("Lock: ", lock_owner);

	if (open_owner != NULL)
		dec_state_owner_ref(open_owner);

	if (lock_owner != NULL)
		dec_state_owner_ref(lock_owner);

	if (clientid != NULL)
		dec_client_id_ref(clientid);

	return res_LOCK4->status;
}				/* nfs4_op_lock */
Ejemplo n.º 28
0
/**
 * @brief Start a duplicate request transaction
 *
 * Finds any matching request entry in the cache, if one exists, else
 * creates one in the START state.  On any non-error return, the refcnt
 * of the corresponding entry is incremented.
 *
 * @param[in] reqnfs  The NFS request data
 * @param[in] req     The request to be cached
 *
 * @retval DUPREQ_SUCCESS if successful.
 * @retval DUPREQ_INSERT_MALLOC_ERROR if an error occured during insertion.
 */
dupreq_status_t nfs_dupreq_start(nfs_request_t *reqnfs,
				 struct svc_req *req)
{
	dupreq_status_t status = DUPREQ_SUCCESS;
	dupreq_entry_t *dv, *dk = NULL;
	bool release_dk = true;
	nfs_res_t *res = NULL;
	drc_t *drc;

	/* Disabled? */
	if (nfs_param.core_param.drc.disabled) {
		req->rq_u1 = (void *)DUPREQ_NOCACHE;
		res = alloc_nfs_res();
		goto out;
	}

	req->rq_u1 = (void *)DUPREQ_BAD_ADDR1;
	req->rq_u2 = (void *)DUPREQ_BAD_ADDR1;

	drc = nfs_dupreq_get_drc(req);
	if (!drc) {
		status = DUPREQ_INSERT_MALLOC_ERROR;
		goto out;
	}

	switch (drc->type) {
	case DRC_TCP_V4:
		if (reqnfs->funcdesc->service_function == nfs4_Compound) {
			if (!nfs_dupreq_v4_cacheable(reqnfs)) {
				/* for such requests, we merely thread
				 * the request through for later
				 * cleanup--all v41 caching is handled
				 * by the v41 slot reply cache */
				req->rq_u1 = (void *)DUPREQ_NOCACHE;
				res = alloc_nfs_res();
				goto out;
			}
		}
		break;
	default:
		/* likewise for other protocol requests we may not or choose not
		 * to cache */
		if (!(reqnfs->funcdesc->dispatch_behaviour & CAN_BE_DUP)) {
			req->rq_u1 = (void *)DUPREQ_NOCACHE;
			res = alloc_nfs_res();
			goto out;
		}
		break;
	}

	dk = alloc_dupreq();
	if (dk == NULL) {
		release_dk = false;
		status = DUPREQ_ERROR;
		goto release_dk;
	}

	dk->hin.drc = drc;	/* trans. call path ref to dv */

	switch (drc->type) {
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		dk->hin.tcp.rq_xid = req->rq_xid;
		/* XXX needed? */
		dk->hin.rq_prog = req->rq_prog;
		dk->hin.rq_vers = req->rq_vers;
		dk->hin.rq_proc = req->rq_proc;
		break;
	case DRC_UDP_V234:
		dk->hin.tcp.rq_xid = req->rq_xid;
		if (unlikely(!copy_xprt_addr(&dk->hin.addr, req->rq_xprt))) {
			status = DUPREQ_INSERT_MALLOC_ERROR;
			goto release_dk;
		}
		dk->hin.rq_prog = req->rq_prog;
		dk->hin.rq_vers = req->rq_vers;
		dk->hin.rq_proc = req->rq_proc;
		break;
	default:
		/* error */
		status = DUPREQ_ERROR;
		goto release_dk;
	}

	/* TI-RPC computed checksum */
	dk->hk = req->rq_cksum;

	dk->state = DUPREQ_START;
	dk->timestamp = time(NULL);

	{
		struct opr_rbtree_node *nv;
		struct rbtree_x_part *t =
		    rbtx_partition_of_scalar(&drc->xt, dk->hk);
		PTHREAD_MUTEX_lock(&t->mtx);	/* partition lock */
		nv = rbtree_x_cached_lookup(&drc->xt, t, &dk->rbt_k, dk->hk);
		if (nv) {
			/* cached request */
			dv = opr_containerof(nv, dupreq_entry_t, rbt_k);
			PTHREAD_MUTEX_lock(&dv->mtx);
			if (unlikely(dv->state == DUPREQ_START)) {
				status = DUPREQ_BEING_PROCESSED;
			} else {
				/* satisfy req from the DRC, incref,
				   extend window */
				res = dv->res;
				PTHREAD_MUTEX_lock(&drc->mtx);
				drc_inc_retwnd(drc);
				PTHREAD_MUTEX_unlock(&drc->mtx);
				status = DUPREQ_EXISTS;
				(dv->refcnt)++;
			}
			LogDebug(COMPONENT_DUPREQ,
				 "dupreq hit dk=%p, dk xid=%u cksum %" PRIu64
				 " state=%s", dk, dk->hin.tcp.rq_xid, dk->hk,
				 dupreq_state_table[dk->state]);
			req->rq_u1 = dv;
			PTHREAD_MUTEX_unlock(&dv->mtx);
		} else {
			/* new request */
			res = req->rq_u2 = dk->res = alloc_nfs_res();
			(void)rbtree_x_cached_insert(&drc->xt, t, &dk->rbt_k,
						     dk->hk);
			(dk->refcnt)++;
			/* add to q tail */
			PTHREAD_MUTEX_lock(&drc->mtx);
			TAILQ_INSERT_TAIL(&drc->dupreq_q, dk, fifo_q);
			++(drc->size);
			PTHREAD_MUTEX_unlock(&drc->mtx);
			req->rq_u1 = dk;
			release_dk = false;
			dv = dk;
		}
		PTHREAD_MUTEX_unlock(&t->mtx);
	}

	LogFullDebug(COMPONENT_DUPREQ,
		     "starting dv=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
		     dv, dk->hin.tcp.rq_xid, drc,
		     dupreq_state_table[dv->state], dupreq_status_table[status],
		     dv->refcnt);

 release_dk:
	if (release_dk)
		nfs_dupreq_free_dupreq(dk);

	nfs_dupreq_put_drc(req->rq_xprt, drc, DRC_FLAG_NONE);	/* dk ref */

 out:
	if (res)
		reqnfs->res_nfs = req->rq_u2 = res;

	return status;
}
Ejemplo n.º 29
0
/**
 * @brief Completes a request in the cache
 *
 * Completes a cache insertion operation begun in nfs_dupreq_start.
 * The refcnt of the corresponding duplicate request entry is unchanged
 * (ie, the caller must still call nfs_dupreq_rele).
 *
 * In contrast with the prior DRC implementation, completing a request
 * in the current implementation may under normal conditions cause one
 * or more cached requests to be retired.  Requests are retired in the
 * order they were inserted.  The primary retire algorithm is a high
 * water mark, and a windowing heuristic.  One or more requests will be
 * retired if the water mark/timeout is exceeded, and if a no duplicate
 * requests have been found in the cache in a configurable window of
 * immediately preceding requests.  A timeout may supplement the water mark,
 * in future.
 *
 * req->rq_u1 has either a magic value, or points to a duplicate request
 * cache entry allocated in nfs_dupreq_start.
 *
 * @param[in] req     The request
 * @param[in] res_nfs The response
 *
 * @return DUPREQ_SUCCESS if successful.
 * @return DUPREQ_INSERT_MALLOC_ERROR if an error occured.
 */
dupreq_status_t nfs_dupreq_finish(struct svc_req *req, nfs_res_t *res_nfs)
{
	dupreq_entry_t *ov = NULL, *dv = (dupreq_entry_t *)req->rq_u1;
	dupreq_status_t status = DUPREQ_SUCCESS;
	struct rbtree_x_part *t;
	drc_t *drc = NULL;

	/* do nothing if req is marked no-cache */
	if (dv == (void *)DUPREQ_NOCACHE)
		goto out;

	/* do nothing if nfs_dupreq_start failed completely */
	if (dv == (void *)DUPREQ_BAD_ADDR1)
		goto out;

	PTHREAD_MUTEX_lock(&dv->mtx);
	dv->res = res_nfs;
	dv->timestamp = time(NULL);
	dv->state = DUPREQ_COMPLETE;
	drc = dv->hin.drc;
	PTHREAD_MUTEX_unlock(&dv->mtx);

	/* cond. remove from q head */
	PTHREAD_MUTEX_lock(&drc->mtx);

	LogFullDebug(COMPONENT_DUPREQ,
		     "completing dv=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
		     dv, dv->hin.tcp.rq_xid, drc,
		     dupreq_state_table[dv->state], dupreq_status_table[status],
		     dv->refcnt);

	/* ok, do the new retwnd calculation here.  then, put drc only if
	 * we retire an entry */
	if (drc_should_retire(drc)) {
		/* again: */
		ov = TAILQ_FIRST(&drc->dupreq_q);
		if (likely(ov)) {
			/* finished request count against retwnd */
			drc_dec_retwnd(drc);
			/* check refcnt */
			if (ov->refcnt > 0) {
				/* ov still in use, apparently */
				goto unlock;
			}
			/* remove q entry */
			TAILQ_REMOVE(&drc->dupreq_q, ov, fifo_q);
			--(drc->size);

			/* remove dict entry */
			t = rbtx_partition_of_scalar(&drc->xt, ov->hk);
			/* interlock */
			PTHREAD_MUTEX_unlock(&drc->mtx);
			PTHREAD_MUTEX_lock(&t->mtx);	/* partition lock */
			rbtree_x_cached_remove(&drc->xt, t, &ov->rbt_k, ov->hk);
			PTHREAD_MUTEX_unlock(&t->mtx);

			LogDebug(COMPONENT_DUPREQ,
				 "retiring ov=%p xid=%u on DRC=%p state=%s, status=%s, refcnt=%d",
				 ov, ov->hin.tcp.rq_xid,
				 ov->hin.drc, dupreq_state_table[dv->state],
				 dupreq_status_table[status], ov->refcnt);

			/* deep free ov */
			nfs_dupreq_free_dupreq(ov);
			goto out;
		}
	}

 unlock:
	PTHREAD_MUTEX_unlock(&drc->mtx);

 out:
	return status;
}
Ejemplo n.º 30
0
/**
 * @brief Find and reference a DRC to process the supplied svc_req.
 *
 * @param[in] req  The svc_req being processed.
 *
 * @return The ref'd DRC if sucessfully located, else NULL.
 */
static /* inline */ drc_t *
nfs_dupreq_get_drc(struct svc_req *req)
{
	enum drc_type dtype = get_drc_type(req);
	drc_t *drc = NULL;
	bool drc_check_expired = false;

	switch (dtype) {
	case DRC_UDP_V234:
		LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
		drc = &(drc_st->udp_drc);
		DRC_ST_LOCK();
		(void)nfs_dupreq_ref_drc(drc);
		DRC_ST_UNLOCK();
		goto out;
	case DRC_TCP_V4:
	case DRC_TCP_V3:
		/* Idempotent address, no need for lock;
		 * xprt will be valid as long as svc_req.
		 */
		drc = (drc_t *)req->rq_xprt->xp_u2;
		if (drc) {
			/* found, no danger of removal */
			LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p",
				     drc, req->rq_xprt);
			PTHREAD_MUTEX_lock(&drc->mtx);	/* LOCKED */
		} else {
			drc_t drc_k;
			struct rbtree_x_part *t = NULL;
			struct opr_rbtree_node *ndrc = NULL;
			drc_t *tdrc = NULL;

			memset(&drc_k, 0, sizeof(drc_k));
			drc_k.type = dtype;

			/* Since the drc can last longer than the xprt,
			 * copy the address. Read operation of constant data,
			 * no xprt lock required.
			 */
			(void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt);

			drc_k.d_u.tcp.hk =
			    CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr,
					       sizeof(sockaddr_t), 911);
			{
				char str[SOCK_NAME_MAX];

				sprint_sockaddr(&drc_k.d_u.tcp.addr,
						str, sizeof(str));
				LogFullDebug(COMPONENT_DUPREQ,
					     "get drc for addr: %s", str);
			}

			t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t,
						     drc_k.d_u.tcp.hk);
			DRC_ST_LOCK();
			ndrc =
			    opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k);
			if (ndrc) {
				/* reuse old DRC */
				tdrc =
				    opr_containerof(ndrc, drc_t,
						    d_u.tcp.recycle_k);
				PTHREAD_MUTEX_lock(&tdrc->mtx);	/* LOCKED */
				if (tdrc->flags & DRC_FLAG_RECYCLE) {
					TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q,
						     tdrc, d_u.tcp.recycle_q);
					--(drc_st->tcp_drc_recycle_qlen);
					tdrc->flags &= ~DRC_FLAG_RECYCLE;
				}
				drc = tdrc;
				LogFullDebug(COMPONENT_DUPREQ,
					     "recycle TCP DRC=%p for xprt=%p",
					     tdrc, req->rq_xprt);
			}
			if (!drc) {
				drc = alloc_tcp_drc(dtype);
				LogFullDebug(COMPONENT_DUPREQ,
					     "alloc new TCP DRC=%p for xprt=%p",
					     drc, req->rq_xprt);
				/* assign addr */
				memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr,
				       sizeof(sockaddr_t));
				/* assign already-computed hash */
				drc->d_u.tcp.hk = drc_k.d_u.tcp.hk;
				PTHREAD_MUTEX_lock(&drc->mtx);	/* LOCKED */
				/* xprt ref */
				drc->refcnt = 1;
				/* insert dict */
				opr_rbtree_insert(&t->t,
						  &drc->d_u.tcp.recycle_k);
			}
			DRC_ST_UNLOCK();
			drc->d_u.tcp.recycle_time = 0;

			(void)nfs_dupreq_ref_drc(drc);	/* xprt ref */

			/* try to expire unused DRCs somewhat in proportion to
			 * new connection arrivals */
			drc_check_expired = true;

			LogFullDebug(COMPONENT_DUPREQ,
				     "after ref drc %p refcnt==%u ", drc,
				     drc->refcnt);

			/* Idempotent address, no need for lock;
			 * set once here, never changes.
			 * No other fields are modified.
			 * Assumes address stores are atomic.
			 */
			req->rq_xprt->xp_u2 = (void *)drc;
		}
		break;
	default:
		/* XXX error */
		break;
	}

	/* call path ref */
	(void)nfs_dupreq_ref_drc(drc);
	PTHREAD_MUTEX_unlock(&drc->mtx);

	if (drc_check_expired)
		drc_free_expired();

out:
	return drc;
}