static void notify_deferred_opens(struct messaging_context *msg_ctx, struct share_mode_lock *lck) { int i; if (!should_notify_deferred_opens()) { return; } for (i=0; i<lck->num_share_modes; i++) { struct share_mode_entry *e = &lck->share_modes[i]; if (!is_deferred_open_entry(e)) { continue; } if (procid_is_me(&e->pid)) { /* * We need to notify ourself to retry the open. Do * this by finding the queued SMB record, moving it to * the head of the queue and changing the wait time to * zero. */ schedule_deferred_open_message_smb(e->op_mid); } else { char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE]; share_mode_entry_to_message(msg, e); messaging_send_buf(msg_ctx, e->pid, MSG_SMB_OPEN_RETRY, (uint8 *)msg, MSG_SMB_SHARE_MODE_ENTRY_SIZE); } } }
static void aio_open_handle_completion(struct event_context *event_ctx, struct fd_event *event, uint16 flags, void *p) { struct aio_open_private_data *opd = NULL; int jobid = 0; int ret; DEBUG(10, ("aio_open_handle_completion called with flags=%d\n", (int)flags)); if ((flags & EVENT_FD_READ) == 0) { return; } ret = pthreadpool_finished_job(open_pool, &jobid); if (ret) { smb_panic("aio_open_handle_completion"); /* notreached. */ return; } opd = find_open_private_data_by_jobid(jobid); if (opd == NULL) { DEBUG(0, ("aio_open_handle_completion cannot find jobid %d\n", jobid)); smb_panic("aio_open_handle_completion - no jobid"); /* notreached. */ return; } DEBUG(10,("aio_open_handle_completion: jobid %d mid %llu " "for file %s/%s completed\n", jobid, (unsigned long long)opd->mid, opd->dname, opd->fname)); opd->in_progress = false; /* Find outstanding event and reschdule. */ if (!schedule_deferred_open_message_smb(opd->sconn, opd->mid)) { /* * Outstanding event didn't exist or was * cancelled. Free up the fd and throw * away the result. */ if (opd->ret_fd != -1) { close(opd->ret_fd); opd->ret_fd = -1; } TALLOC_FREE(opd); } }
static void notify_deferred_opens(struct smbd_server_connection *sconn, struct share_mode_lock *lck) { uint32_t i, num_deferred; struct share_mode_entry *deferred; if (!should_notify_deferred_opens()) { return; } num_deferred = 0; for (i=0; i<lck->data->num_share_modes; i++) { if (is_deferred_open_entry(&lck->data->share_modes[i])) { num_deferred += 1; } } if (num_deferred == 0) { return; } deferred = talloc_array(talloc_tos(), struct share_mode_entry, num_deferred); if (deferred == NULL) { return; } num_deferred = 0; for (i=0; i<lck->data->num_share_modes; i++) { struct share_mode_entry *e = &lck->data->share_modes[i]; if (is_deferred_open_entry(e)) { deferred[num_deferred] = *e; num_deferred += 1; } } /* * We need to sort the notifications by initial request time. Imagine * two opens come in asyncronously, both conflicting with the open we * just close here. If we don't sort the notifications, the one that * came in last might get the response before the one that came in * first. This is demonstrated with the smbtorture4 raw.mux test. * * As long as we had the UNUSED_SHARE_MODE_ENTRY, we happened to * survive this particular test. Without UNUSED_SHARE_MODE_ENTRY, we * shuffle the share mode entries around a bit, so that we do not * survive raw.mux anymore. * * We could have kept the ordering in del_share_mode, but as the * ordering was never formalized I think it is better to do it here * where it is necessary. */ qsort(deferred, num_deferred, sizeof(struct share_mode_entry), compare_share_mode_times); for (i=0; i<num_deferred; i++) { struct share_mode_entry *e = &deferred[i]; if (procid_is_me(&e->pid)) { /* * We need to notify ourself to retry the open. Do * this by finding the queued SMB record, moving it to * the head of the queue and changing the wait time to * zero. */ schedule_deferred_open_message_smb(sconn, e->op_mid); } else { char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE]; share_mode_entry_to_message(msg, e); messaging_send_buf(sconn->msg_ctx, e->pid, MSG_SMB_OPEN_RETRY, (uint8 *)msg, MSG_SMB_SHARE_MODE_ENTRY_SIZE); } } TALLOC_FREE(deferred); }
static void aio_open_handle_completion(struct tevent_context *event_ctx, struct tevent_fd *event, uint16_t flags, void *p) { struct aio_open_private_data *opd = NULL; int jobid = 0; int ret; struct smbXsrv_connection *xconn; DEBUG(10, ("aio_open_handle_completion called with flags=%d\n", (int)flags)); if ((flags & TEVENT_FD_READ) == 0) { return; } ret = pthreadpool_pipe_finished_jobs(open_pool, &jobid, 1); if (ret != 1) { smb_panic("aio_open_handle_completion"); /* notreached. */ return; } opd = find_open_private_data_by_jobid(jobid); if (opd == NULL) { DEBUG(0, ("aio_open_handle_completion cannot find jobid %d\n", jobid)); smb_panic("aio_open_handle_completion - no jobid"); /* notreached. */ return; } DEBUG(10,("aio_open_handle_completion: jobid %d mid %llu " "for file %s/%s completed\n", jobid, (unsigned long long)opd->mid, opd->dname, opd->fname)); opd->in_progress = false; /* * TODO: In future we need a proper algorithm * to find the correct connection for a fsp. * For now we only have one connection, so this is correct... */ xconn = opd->sconn->client->connections; /* Find outstanding event and reschedule. */ if (!schedule_deferred_open_message_smb(xconn, opd->mid)) { /* * Outstanding event didn't exist or was * cancelled. Free up the fd and throw * away the result. */ if (opd->ret_fd != -1) { close(opd->ret_fd); opd->ret_fd = -1; } TALLOC_FREE(opd); } }