/*
 * smb_process_node_notify_change_queue
 *
 * This function searches notify change request queue and sends
 * 'NODE MODIFIED' reply to all requests which are related to a
 * specific node.
 * WatchTree flag: We handle this flag in a special manner just
 * for DAVE clients. When something is changed, we notify all
 * requests which came from DAVE clients on the same volume which
 * has been modified. We don't care about the tree that they wanted
 * us to monitor. any change in any part of the volume will lead
 * to notifying all notify change requests from DAVE clients on the
 * different parts of the volume hierarchy.
 */
void
smb_process_node_notify_change_queue(smb_node_t *node)
{
	smb_request_t	*sr;
	smb_request_t	*tmp;
	smb_node_t	*nc_node;
	boolean_t	sig = B_FALSE;

	ASSERT(node->n_magic == SMB_NODE_MAGIC);

	if (!(node->flags & NODE_FLAGS_NOTIFY_CHANGE))
		return;

	node->flags |= NODE_FLAGS_CHANGED;

	smb_slist_enter(&smb_ncr_list);
	smb_slist_enter(&smb_nce_list);
	sr = smb_slist_head(&smb_ncr_list);
	while (sr) {
		ASSERT(sr->sr_magic == SMB_REQ_MAGIC);
		tmp = smb_slist_next(&smb_ncr_list, sr);

		nc_node = sr->sr_ncr.nc_node;
		if (nc_node == node) {
			mutex_enter(&sr->sr_mutex);
			switch (sr->sr_state) {
			case SMB_REQ_STATE_WAITING_EVENT:
				smb_slist_obj_move(&smb_nce_list,
				    &smb_ncr_list, sr);
				smb_srqueue_waitq_enter(
				    sr->session->s_srqueue);
				sr->sr_state = SMB_REQ_STATE_EVENT_OCCURRED;
				sig = B_TRUE;
				break;
			default:
				ASSERT(0);
				break;
			}
			mutex_exit(&sr->sr_mutex);
		}
		sr = tmp;
	}
	smb_slist_exit(&smb_nce_list);
	smb_slist_exit(&smb_ncr_list);
	if (sig)
		smb_thread_signal(&smb_thread_notify_daemon);
}
示例#2
0
/*
 * This function is invoked when a share is disabled to disconnect trees
 * and close files.  Cleaning up may involve VOP and/or VFS calls, which
 * may conflict/deadlock with stuck threads if something is amiss with the
 * file system.  Queueing the request for asynchronous processing allows the
 * call to return immediately so that, if the unshare is being done in the
 * context of a forced unmount, the forced unmount will always be able to
 * proceed (unblocking stuck I/O and eventually allowing all blocked unshare
 * processes to complete).
 *
 * The path lookup to find the root vnode of the VFS in question and the
 * release of this vnode are done synchronously prior to any associated
 * unmount.  Doing these asynchronous to an associated unmount could run
 * the risk of a spurious EBUSY for a standard unmount or an EIO during
 * the path lookup due to a forced unmount finishing first.
 */
int
smb_kshare_unexport_list(smb_ioc_share_t *ioc)
{
	smb_server_t	*sv = NULL;
	smb_unshare_t	*ux;
	nvlist_t	*shrlist = NULL;
	nvpair_t	*nvp;
	boolean_t	unexport = B_FALSE;
	char		*shrname;
	int		rc;

	if ((rc = smb_server_lookup(&sv)) != 0)
		return (rc);

	if ((rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, 0)) != 0)
		goto out;

	for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL;
	    nvp = nvlist_next_nvpair(shrlist, nvp)) {
		if (nvpair_type(nvp) != DATA_TYPE_NVLIST)
			continue;

		shrname = nvpair_name(nvp);
		ASSERT(shrname);

		if ((rc = smb_kshare_unexport(sv, shrname)) != 0)
			continue;

		ux = kmem_cache_alloc(smb_kshare_cache_unexport, KM_SLEEP);
		(void) strlcpy(ux->us_sharename, shrname, MAXNAMELEN);

		smb_slist_insert_tail(&sv->sv_export.e_unexport_list, ux);
		unexport = B_TRUE;
	}

	if (unexport)
		smb_thread_signal(&sv->sv_export.e_unexport_thread);
	rc = 0;

out:
	if (shrlist != NULL)
		nvlist_free(shrlist);
	smb_server_release(sv);
	return (rc);
}
/*
 * smb_process_session_notify_change_queue
 *
 * This function traverses notify change request queue and sends
 * cancel replies to all of requests that are related to a specific
 * session.
 */
void
smb_process_session_notify_change_queue(
    smb_session_t	*session,
    smb_tree_t		*tree)
{
	smb_request_t	*sr;
	smb_request_t	*tmp;
	boolean_t	sig = B_FALSE;

	smb_slist_enter(&smb_ncr_list);
	smb_slist_enter(&smb_nce_list);
	sr = smb_slist_head(&smb_ncr_list);
	while (sr) {
		ASSERT(sr->sr_magic == SMB_REQ_MAGIC);
		tmp = smb_slist_next(&smb_ncr_list, sr);
		if ((sr->session == session) &&
		    (tree == NULL || sr->tid_tree == tree)) {
			mutex_enter(&sr->sr_mutex);
			switch (sr->sr_state) {
			case SMB_REQ_STATE_WAITING_EVENT:
				smb_slist_obj_move(
				    &smb_nce_list,
				    &smb_ncr_list,
				    sr);
				smb_srqueue_waitq_enter(
				    sr->session->s_srqueue);
				sr->sr_state = SMB_REQ_STATE_CANCELED;
				sig = B_TRUE;
				break;
			default:
				ASSERT(0);
				break;
			}
			mutex_exit(&sr->sr_mutex);
		}
		sr = tmp;
	}
	smb_slist_exit(&smb_nce_list);
	smb_slist_exit(&smb_ncr_list);
	if (sig)
		smb_thread_signal(&smb_thread_notify_daemon);
}
void
smb_reply_specific_cancel_request(struct smb_request *zsr)
{
	smb_request_t	*sr;
	smb_request_t	*tmp;
	boolean_t	sig = B_FALSE;

	smb_slist_enter(&smb_ncr_list);
	smb_slist_enter(&smb_nce_list);
	sr = smb_slist_head(&smb_ncr_list);
	while (sr) {
		ASSERT(sr->sr_magic == SMB_REQ_MAGIC);
		tmp = smb_slist_next(&smb_ncr_list, sr);
		if ((sr->session == zsr->session) &&
		    (sr->smb_uid == zsr->smb_uid) &&
		    (sr->smb_pid == zsr->smb_pid) &&
		    (sr->smb_tid == zsr->smb_tid) &&
		    (sr->smb_mid == zsr->smb_mid)) {
			mutex_enter(&sr->sr_mutex);
			switch (sr->sr_state) {
			case SMB_REQ_STATE_WAITING_EVENT:
				smb_slist_obj_move(&smb_nce_list,
				    &smb_ncr_list, sr);
				smb_srqueue_waitq_enter(
				    sr->session->s_srqueue);
				sr->sr_state = SMB_REQ_STATE_CANCELED;
				sig = B_TRUE;
				break;
			default:
				ASSERT(0);
				break;
			}
			mutex_exit(&sr->sr_mutex);
		}
		sr = tmp;
	}
	smb_slist_exit(&smb_nce_list);
	smb_slist_exit(&smb_ncr_list);
	if (sig)
		smb_thread_signal(&smb_thread_notify_daemon);
}