Пример #1
0
static void notify_deferred_opens(struct share_mode_lock *lck)
{
 	int i;
 
 	for (i=0; i<lck->num_share_modes; i++) {
 		struct share_mode_entry *e = &lck->share_modes[i];
 
 		if (!is_deferred_open_entry(e)) {
 			continue;
 		}
 
 		if (procid_is_me(&e->pid)) {
 			/*
 			 * We need to notify ourself to retry the open.  Do
 			 * this by finding the queued SMB record, moving it to
 			 * the head of the queue and changing the wait time to
 			 * zero.
 			 */
 			schedule_deferred_open_smb_message(e->op_mid);
 		} else {
			char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE];

			share_mode_entry_to_message(msg, e);

 			message_send_pid(e->pid, MSG_SMB_OPEN_RETRY,
 					 msg, MSG_SMB_SHARE_MODE_ENTRY_SIZE, True);
 		}
 	}
}
Пример #2
0
static void notify_deferred_opens(struct messaging_context *msg_ctx,
				  struct share_mode_lock *lck)
{
 	int i;

	if (!should_notify_deferred_opens()) {
		return;
	}

 	for (i=0; i<lck->num_share_modes; i++) {
 		struct share_mode_entry *e = &lck->share_modes[i];

 		if (!is_deferred_open_entry(e)) {
 			continue;
 		}

 		if (procid_is_me(&e->pid)) {
			struct smbd_server_connection *sconn;
 			/*
 			 * We need to notify ourself to retry the open.  Do
 			 * this by finding the queued SMB record, moving it to
 			 * the head of the queue and changing the wait time to
 			 * zero.
 			 */
			sconn = msg_ctx_to_sconn(msg_ctx);
			if (sconn != NULL) {
				schedule_deferred_open_message_smb(
					sconn, e->op_mid);
			}
 		} else {
			char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE];

			share_mode_entry_to_message(msg, e);

			messaging_send_buf(msg_ctx, e->pid, MSG_SMB_OPEN_RETRY,
					   (uint8 *)msg,
					   MSG_SMB_SHARE_MODE_ENTRY_SIZE);
 		}
 	}
}
Пример #3
0
static void notify_deferred_opens(struct smbd_server_connection *sconn,
				  struct share_mode_lock *lck)
{
	uint32_t i, num_deferred;
	struct share_mode_entry *deferred;

	if (!should_notify_deferred_opens()) {
		return;
	}

	num_deferred = 0;
	for (i=0; i<lck->data->num_share_modes; i++) {
		if (is_deferred_open_entry(&lck->data->share_modes[i])) {
			num_deferred += 1;
		}
	}
	if (num_deferred == 0) {
		return;
	}

	deferred = talloc_array(talloc_tos(), struct share_mode_entry,
				num_deferred);
	if (deferred == NULL) {
		return;
	}

	num_deferred = 0;
	for (i=0; i<lck->data->num_share_modes; i++) {
		struct share_mode_entry *e = &lck->data->share_modes[i];
		if (is_deferred_open_entry(e)) {
			deferred[num_deferred] = *e;
			num_deferred += 1;
		}
	}

	/*
	 * We need to sort the notifications by initial request time. Imagine
	 * two opens come in asyncronously, both conflicting with the open we
	 * just close here. If we don't sort the notifications, the one that
	 * came in last might get the response before the one that came in
	 * first. This is demonstrated with the smbtorture4 raw.mux test.
	 *
	 * As long as we had the UNUSED_SHARE_MODE_ENTRY, we happened to
	 * survive this particular test. Without UNUSED_SHARE_MODE_ENTRY, we
	 * shuffle the share mode entries around a bit, so that we do not
	 * survive raw.mux anymore.
	 *
	 * We could have kept the ordering in del_share_mode, but as the
	 * ordering was never formalized I think it is better to do it here
	 * where it is necessary.
	 */

	qsort(deferred, num_deferred, sizeof(struct share_mode_entry),
	      compare_share_mode_times);

	for (i=0; i<num_deferred; i++) {
		struct share_mode_entry *e = &deferred[i];

 		if (procid_is_me(&e->pid)) {
 			/*
 			 * We need to notify ourself to retry the open.  Do
 			 * this by finding the queued SMB record, moving it to
 			 * the head of the queue and changing the wait time to
 			 * zero.
 			 */
			schedule_deferred_open_message_smb(sconn, e->op_mid);
 		} else {
			char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE];

			share_mode_entry_to_message(msg, e);

			messaging_send_buf(sconn->msg_ctx, e->pid,
					   MSG_SMB_OPEN_RETRY,
					   (uint8 *)msg,
					   MSG_SMB_SHARE_MODE_ENTRY_SIZE);
 		}
 	}
	TALLOC_FREE(deferred);
}