static void notify_deferred_opens(struct share_mode_lock *lck) { int i; for (i=0; i<lck->num_share_modes; i++) { struct share_mode_entry *e = &lck->share_modes[i]; if (!is_deferred_open_entry(e)) { continue; } if (procid_is_me(&e->pid)) { /* * We need to notify ourself to retry the open. Do * this by finding the queued SMB record, moving it to * the head of the queue and changing the wait time to * zero. */ schedule_deferred_open_smb_message(e->op_mid); } else { char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE]; share_mode_entry_to_message(msg, e); message_send_pid(e->pid, MSG_SMB_OPEN_RETRY, msg, MSG_SMB_SHARE_MODE_ENTRY_SIZE, True); } } }
static void notify_deferred_opens(struct messaging_context *msg_ctx, struct share_mode_lock *lck) { int i; if (!should_notify_deferred_opens()) { return; } for (i=0; i<lck->num_share_modes; i++) { struct share_mode_entry *e = &lck->share_modes[i]; if (!is_deferred_open_entry(e)) { continue; } if (procid_is_me(&e->pid)) { struct smbd_server_connection *sconn; /* * We need to notify ourself to retry the open. Do * this by finding the queued SMB record, moving it to * the head of the queue and changing the wait time to * zero. */ sconn = msg_ctx_to_sconn(msg_ctx); if (sconn != NULL) { schedule_deferred_open_message_smb( sconn, e->op_mid); } } else { char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE]; share_mode_entry_to_message(msg, e); messaging_send_buf(msg_ctx, e->pid, MSG_SMB_OPEN_RETRY, (uint8 *)msg, MSG_SMB_SHARE_MODE_ENTRY_SIZE); } } }
bool serverid_exists(const struct server_id *id) { struct db_context *db; struct serverid_exists_state state; struct serverid_key key; TDB_DATA tdbkey; NTSTATUS status; if (procid_is_me(id)) { return true; } if (!process_exists(*id)) { return false; } if (id->unique_id == SERVERID_UNIQUE_ID_NOT_TO_VERIFY) { return true; } db = serverid_db(); if (db == NULL) { return false; } serverid_fill_key(id, &key); tdbkey = make_tdb_data((uint8_t *)&key, sizeof(key)); state.id = id; state.exists = false; status = dbwrap_parse_record(db, tdbkey, server_exists_parse, &state); if (!NT_STATUS_IS_OK(status)) { return false; } return state.exists; }
BOOL rename_share_filename(struct share_mode_lock *lck, const char *servicepath, const char *newname) { size_t sp_len; size_t fn_len; size_t msg_len; char *frm = NULL; int i; if (!lck) { return False; } DEBUG(10, ("rename_share_filename: servicepath %s newname %s\n", servicepath, newname)); /* * rename_internal_fsp() and rename_internals() add './' to * head of newname if newname does not contain a '/'. */ while (newname[0] && newname[1] && newname[0] == '.' && newname[1] == '/') { newname += 2; } lck->servicepath = talloc_strdup(lck, servicepath); lck->filename = talloc_strdup(lck, newname); if (lck->filename == NULL || lck->servicepath == NULL) { DEBUG(0, ("rename_share_filename: talloc failed\n")); return False; } lck->modified = True; sp_len = strlen(lck->servicepath); fn_len = strlen(lck->filename); msg_len = MSG_FILE_RENAMED_MIN_SIZE + sp_len + 1 + fn_len + 1; /* Set up the name changed message. */ frm = TALLOC(lck, msg_len); if (!frm) { return False; } SDEV_T_VAL(frm,0,lck->dev); SINO_T_VAL(frm,8,lck->ino); DEBUG(10,("rename_share_filename: msg_len = %u\n", (unsigned int)msg_len )); safe_strcpy(&frm[16], lck->servicepath, sp_len); safe_strcpy(&frm[16 + sp_len + 1], lck->filename, fn_len); /* Send the messages. */ for (i=0; i<lck->num_share_modes; i++) { struct share_mode_entry *se = &lck->share_modes[i]; if (!is_valid_share_mode_entry(se)) { continue; } /* But not to ourselves... */ if (procid_is_me(&se->pid)) { continue; } DEBUG(10,("rename_share_filename: sending rename message to pid %u " "dev %x, inode %.0f sharepath %s newname %s\n", (unsigned int)procid_to_pid(&se->pid), (unsigned int)lck->dev, (double)lck->ino, lck->servicepath, lck->filename )); become_root(); message_send_pid(se->pid, MSG_SMB_FILE_RENAME, frm, msg_len, True); unbecome_root(); } return True; }
static void notify_deferred_opens(struct smbd_server_connection *sconn, struct share_mode_lock *lck) { uint32_t i, num_deferred; struct share_mode_entry *deferred; if (!should_notify_deferred_opens()) { return; } num_deferred = 0; for (i=0; i<lck->data->num_share_modes; i++) { if (is_deferred_open_entry(&lck->data->share_modes[i])) { num_deferred += 1; } } if (num_deferred == 0) { return; } deferred = talloc_array(talloc_tos(), struct share_mode_entry, num_deferred); if (deferred == NULL) { return; } num_deferred = 0; for (i=0; i<lck->data->num_share_modes; i++) { struct share_mode_entry *e = &lck->data->share_modes[i]; if (is_deferred_open_entry(e)) { deferred[num_deferred] = *e; num_deferred += 1; } } /* * We need to sort the notifications by initial request time. Imagine * two opens come in asyncronously, both conflicting with the open we * just close here. If we don't sort the notifications, the one that * came in last might get the response before the one that came in * first. This is demonstrated with the smbtorture4 raw.mux test. * * As long as we had the UNUSED_SHARE_MODE_ENTRY, we happened to * survive this particular test. Without UNUSED_SHARE_MODE_ENTRY, we * shuffle the share mode entries around a bit, so that we do not * survive raw.mux anymore. * * We could have kept the ordering in del_share_mode, but as the * ordering was never formalized I think it is better to do it here * where it is necessary. */ qsort(deferred, num_deferred, sizeof(struct share_mode_entry), compare_share_mode_times); for (i=0; i<num_deferred; i++) { struct share_mode_entry *e = &deferred[i]; if (procid_is_me(&e->pid)) { /* * We need to notify ourself to retry the open. Do * this by finding the queued SMB record, moving it to * the head of the queue and changing the wait time to * zero. */ schedule_deferred_open_message_smb(sconn, e->op_mid); } else { char msg[MSG_SMB_SHARE_MODE_ENTRY_SIZE]; share_mode_entry_to_message(msg, e); messaging_send_buf(sconn->msg_ctx, e->pid, MSG_SMB_OPEN_RETRY, (uint8 *)msg, MSG_SMB_SHARE_MODE_ENTRY_SIZE); } } TALLOC_FREE(deferred); }
bool serverids_exist(const struct server_id *ids, int num_ids, bool *results) { int *todo_idx = NULL; struct server_id *todo_ids = NULL; bool *todo_results = NULL; int todo_num = 0; int *remote_idx = NULL; int remote_num = 0; int *verify_idx = NULL; int verify_num = 0; int t, idx; bool result = false; struct db_context *db; db = serverid_db(); if (db == NULL) { return false; } todo_idx = talloc_array(talloc_tos(), int, num_ids); if (todo_idx == NULL) { goto fail; } todo_ids = talloc_array(talloc_tos(), struct server_id, num_ids); if (todo_ids == NULL) { goto fail; } todo_results = talloc_array(talloc_tos(), bool, num_ids); if (todo_results == NULL) { goto fail; } remote_idx = talloc_array(talloc_tos(), int, num_ids); if (remote_idx == NULL) { goto fail; } verify_idx = talloc_array(talloc_tos(), int, num_ids); if (verify_idx == NULL) { goto fail; } for (idx=0; idx<num_ids; idx++) { results[idx] = false; if (server_id_is_disconnected(&ids[idx])) { continue; } if (procid_is_me(&ids[idx])) { results[idx] = true; continue; } if (procid_is_local(&ids[idx])) { bool exists = process_exists_by_pid(ids[idx].pid); if (!exists) { continue; } if (ids[idx].unique_id == SERVERID_UNIQUE_ID_NOT_TO_VERIFY) { results[idx] = true; continue; } verify_idx[verify_num] = idx; verify_num += 1; continue; } if (!lp_clustering()) { continue; } remote_idx[remote_num] = idx; remote_num += 1; } if (remote_num != 0 && ctdb_serverids_exist_supported(messaging_ctdbd_connection())) { int old_remote_num = remote_num; remote_num = 0; todo_num = 0; for (t=0; t<old_remote_num; t++) { idx = remote_idx[t]; if (ids[idx].unique_id == SERVERID_UNIQUE_ID_NOT_TO_VERIFY) { remote_idx[remote_num] = idx; remote_num += 1; continue; } todo_idx[todo_num] = idx; todo_ids[todo_num] = ids[idx]; todo_results[todo_num] = false; todo_num += 1; } /* * Note: this only uses CTDB_CONTROL_CHECK_SRVIDS * to verify that the server_id still exists, * which means only the server_id.unique_id and * server_id.vnn are verified, while server_id.pid * is not verified at all. * * TODO: do we want to verify server_id.pid somehow? */ if (!ctdb_serverids_exist(messaging_ctdbd_connection(), todo_ids, todo_num, todo_results)) { goto fail; } for (t=0; t<todo_num; t++) { idx = todo_idx[t]; results[idx] = todo_results[t]; } } if (remote_num != 0) { todo_num = 0; for (t=0; t<remote_num; t++) { idx = remote_idx[t]; todo_idx[todo_num] = idx; todo_ids[todo_num] = ids[idx]; todo_results[todo_num] = false; todo_num += 1; } if (!ctdb_processes_exist(messaging_ctdbd_connection(), todo_ids, todo_num, todo_results)) { goto fail; } for (t=0; t<todo_num; t++) { idx = todo_idx[t]; if (!todo_results[t]) { continue; } if (ids[idx].unique_id == SERVERID_UNIQUE_ID_NOT_TO_VERIFY) { results[idx] = true; continue; } verify_idx[verify_num] = idx; verify_num += 1; } } for (t=0; t<verify_num; t++) { struct serverid_exists_state state; struct serverid_key key; TDB_DATA tdbkey; NTSTATUS status; idx = verify_idx[t]; serverid_fill_key(&ids[idx], &key); tdbkey = make_tdb_data((uint8_t *)&key, sizeof(key)); state.id = &ids[idx]; state.exists = false; status = dbwrap_parse_record(db, tdbkey, server_exists_parse, &state); if (!NT_STATUS_IS_OK(status)) { results[idx] = false; continue; } results[idx] = state.exists; } result = true; fail: TALLOC_FREE(verify_idx); TALLOC_FREE(remote_idx); TALLOC_FREE(todo_results); TALLOC_FREE(todo_ids); TALLOC_FREE(todo_idx); return result; }