bool remove_share_oplock(struct share_mode_lock *lck, files_struct *fsp) { struct share_mode_entry entry, *e; /* Don't care about the pid owner being correct here - just a search. */ fill_share_mode_entry(&entry, fsp, (uid_t)-1, 0, NO_OPLOCK); e = find_share_mode_entry(lck->data, &entry); if (e == NULL) { return False; } if (EXCLUSIVE_OPLOCK_TYPE(e->op_type)) { /* * Going from exclusive or batch, * we always go through FAKE_LEVEL_II * first. */ if (!EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type)) { smb_panic("remove_share_oplock: logic error"); } e->op_type = FAKE_LEVEL_II_OPLOCK; } else { e->op_type = NO_OPLOCK; } lck->data->modified = True; return True; }
static void release_file_oplock(files_struct *fsp) { struct smbd_server_connection *sconn = fsp->conn->sconn; struct kernel_oplocks *koplocks = sconn->oplocks.kernel_ops; if ((fsp->oplock_type != NO_OPLOCK) && koplocks) { koplocks->ops->release_oplock(koplocks, fsp, NO_OPLOCK); } if (fsp->oplock_type == LEVEL_II_OPLOCK) { sconn->oplocks.level_II_open--; } else if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type)) { sconn->oplocks.exclusive_open--; } SMB_ASSERT(sconn->oplocks.exclusive_open>=0); SMB_ASSERT(sconn->oplocks.level_II_open>=0); fsp->oplock_type = NO_OPLOCK; fsp->sent_oplock_break = NO_BREAK_SENT; flush_write_cache(fsp, SAMBA_OPLOCK_RELEASE_FLUSH); delete_write_cache(fsp); TALLOC_FREE(fsp->oplock_timeout); }
bool update_num_read_oplocks(files_struct *fsp, struct share_mode_lock *lck) { struct share_mode_data *d = lck->data; struct byte_range_lock *br_lck; uint32_t num_read_oplocks = 0; uint32_t i; if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type)) { /* * If we're the only one, we don't need a brlock entry */ remove_stale_share_mode_entries(d); SMB_ASSERT(d->num_share_modes == 1); SMB_ASSERT(EXCLUSIVE_OPLOCK_TYPE(d->share_modes[0].op_type)); return true; } for (i=0; i<d->num_share_modes; i++) { struct share_mode_entry *e = &d->share_modes[i]; uint32_t e_lease_type = get_lease_type(d, e); if (e_lease_type & SMB2_LEASE_READ) { num_read_oplocks += 1; } } br_lck = brl_get_locks_readonly(fsp); if (br_lck == NULL) { return false; } if (brl_num_read_oplocks(br_lck) == num_read_oplocks) { return true; } br_lck = brl_get_locks(talloc_tos(), fsp); if (br_lck == NULL) { return false; } brl_set_num_read_oplocks(br_lck, num_read_oplocks); TALLOC_FREE(br_lck); return true; }
BOOL is_valid_share_mode_entry(const struct share_mode_entry *e) { int num_props = 0; num_props += ((e->op_type == NO_OPLOCK) ? 1 : 0); num_props += (EXCLUSIVE_OPLOCK_TYPE(e->op_type) ? 1 : 0); num_props += (LEVEL_II_OPLOCK_TYPE(e->op_type) ? 1 : 0); SMB_ASSERT(num_props <= 1); return (num_props != 0); }
static uint8_t map_samba_oplock_levels_to_smb2(int oplock_type) { if (BATCH_OPLOCK_TYPE(oplock_type)) { return SMB2_OPLOCK_LEVEL_BATCH; } else if (EXCLUSIVE_OPLOCK_TYPE(oplock_type)) { return SMB2_OPLOCK_LEVEL_EXCLUSIVE; } else if (oplock_type == LEVEL_II_OPLOCK) { return SMB2_OPLOCK_LEVEL_II; } else if (oplock_type == LEASE_OPLOCK) { return SMB2_OPLOCK_LEVEL_LEASE; } else { return SMB2_OPLOCK_LEVEL_NONE; } }
static uint8_t map_samba_oplock_levels_to_smb2(int oplock_type) { if (BATCH_OPLOCK_TYPE(oplock_type)) { return SMB2_OPLOCK_LEVEL_BATCH; } else if (EXCLUSIVE_OPLOCK_TYPE(oplock_type)) { return SMB2_OPLOCK_LEVEL_EXCLUSIVE; } else if (oplock_type == LEVEL_II_OPLOCK) { /* * Don't use LEVEL_II_OPLOCK_TYPE here as * this also includes FAKE_LEVEL_II_OPLOCKs * which are internal only. */ return SMB2_OPLOCK_LEVEL_II; } else { return SMB2_OPLOCK_LEVEL_NONE; } }
bool is_valid_share_mode_entry(const struct share_mode_entry *e) { int num_props = 0; if (e->stale) { return false; } num_props += ((e->op_type == NO_OPLOCK) ? 1 : 0); num_props += (EXCLUSIVE_OPLOCK_TYPE(e->op_type) ? 1 : 0); num_props += (LEVEL_II_OPLOCK_TYPE(e->op_type) ? 1 : 0); if ((num_props > 1) && serverid_exists(&e->pid)) { smb_panic("Invalid share mode entry"); } return (num_props != 0); }
BOOL is_valid_share_mode_entry(const struct share_mode_entry *e) { int num_props = 0; if (e->op_type == UNUSED_SHARE_MODE_ENTRY) { /* cope with dead entries from the process not existing. These should not be considered valid, otherwise we end up doing zero timeout sharing violation */ return False; } num_props += ((e->op_type == NO_OPLOCK) ? 1 : 0); num_props += (EXCLUSIVE_OPLOCK_TYPE(e->op_type) ? 1 : 0); num_props += (LEVEL_II_OPLOCK_TYPE(e->op_type) ? 1 : 0); SMB_ASSERT(num_props <= 1); return (num_props != 0); }
static void downgrade_file_oplock(files_struct *fsp) { struct smbd_server_connection *sconn = fsp->conn->sconn; struct kernel_oplocks *koplocks = sconn->oplocks.kernel_ops; if (!EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type)) { DEBUG(0, ("trying to downgrade an already-downgraded oplock!\n")); return; } if (koplocks) { koplocks->ops->release_oplock(koplocks, fsp, LEVEL_II_OPLOCK); } fsp->oplock_type = LEVEL_II_OPLOCK; sconn->oplocks.exclusive_open--; sconn->oplocks.level_II_open++; fsp->sent_oplock_break = NO_BREAK_SENT; TALLOC_FREE(fsp->oplock_timeout); }
NTSTATUS set_file_oplock(files_struct *fsp) { struct smbd_server_connection *sconn = fsp->conn->sconn; struct kernel_oplocks *koplocks = sconn->oplocks.kernel_ops; bool use_kernel = lp_kernel_oplocks(SNUM(fsp->conn)) && koplocks; if (fsp->oplock_type == LEVEL_II_OPLOCK) { if (use_kernel && !(koplocks->flags & KOPLOCKS_LEVEL2_SUPPORTED)) { DEBUG(10, ("Refusing level2 oplock, kernel oplocks " "don't support them\n")); return NT_STATUS_NOT_SUPPORTED; } } if ((fsp->oplock_type != NO_OPLOCK) && use_kernel && !koplocks->ops->set_oplock(koplocks, fsp, fsp->oplock_type)) { return map_nt_error_from_unix(errno); } fsp->sent_oplock_break = NO_BREAK_SENT; if (fsp->oplock_type == LEVEL_II_OPLOCK) { sconn->oplocks.level_II_open++; } else if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type)) { sconn->oplocks.exclusive_open++; } DEBUG(5,("set_file_oplock: granted oplock on file %s, %s/%lu, " "tv_sec = %x, tv_usec = %x\n", fsp_str_dbg(fsp), file_id_string_tos(&fsp->file_id), fsp->fh->gen_id, (int)fsp->open_time.tv_sec, (int)fsp->open_time.tv_usec )); return NT_STATUS_OK; }
ssize_t write_file(struct smb_request *req, files_struct *fsp, const char *data, SMB_OFF_T pos, size_t n) { write_cache *wcp = fsp->wcp; ssize_t total_written = 0; int write_path = -1; if (fsp->print_file) { uint32_t t; int ret; ret = print_spool_write(fsp, data, n, pos, &t); if (ret) { errno = ret; return -1; } return t; } if (!fsp->can_write) { errno = EPERM; return -1; } if (!fsp->modified) { fsp->modified = True; if (SMB_VFS_FSTAT(fsp, &fsp->fsp_name->st) == 0) { trigger_write_time_update(fsp); if (!fsp->posix_open && (lp_store_dos_attributes(SNUM(fsp->conn)) || MAP_ARCHIVE(fsp->conn))) { int dosmode = dos_mode(fsp->conn, fsp->fsp_name); if (!IS_DOS_ARCHIVE(dosmode)) { file_set_dosmode(fsp->conn, fsp->fsp_name, dosmode | FILE_ATTRIBUTE_ARCHIVE, NULL, false); } } /* * If this is the first write and we have an exclusive oplock then setup * the write cache. */ if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type) && !wcp) { setup_write_cache(fsp, fsp->fsp_name->st.st_ex_size); wcp = fsp->wcp; } } } #ifdef WITH_PROFILE DO_PROFILE_INC(writecache_total_writes); if (!fsp->oplock_type) { DO_PROFILE_INC(writecache_non_oplock_writes); } #endif /* * If this file is level II oplocked then we need * to grab the shared memory lock and inform all * other files with a level II lock that they need * to flush their read caches. We keep the lock over * the shared memory area whilst doing this. */ /* This should actually be improved to span the write. */ contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE); contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE); #ifdef WITH_PROFILE if (profile_p && profile_p->writecache_total_writes % 500 == 0) { DEBUG(3,("WRITECACHE: initwrites=%u abutted=%u total=%u \ nonop=%u allocated=%u active=%u direct=%u perfect=%u readhits=%u\n", profile_p->writecache_init_writes, profile_p->writecache_abutted_writes, profile_p->writecache_total_writes, profile_p->writecache_non_oplock_writes, profile_p->writecache_allocated_write_caches, profile_p->writecache_num_write_caches, profile_p->writecache_direct_writes, profile_p->writecache_num_perfect_writes, profile_p->writecache_read_hits )); DEBUG(3,("WRITECACHE: Flushes SEEK=%d, READ=%d, WRITE=%d, READRAW=%d, OPLOCK=%d, CLOSE=%d, SYNC=%d\n", profile_p->writecache_flushed_writes[SEEK_FLUSH], profile_p->writecache_flushed_writes[READ_FLUSH], profile_p->writecache_flushed_writes[WRITE_FLUSH], profile_p->writecache_flushed_writes[READRAW_FLUSH], profile_p->writecache_flushed_writes[OPLOCK_RELEASE_FLUSH], profile_p->writecache_flushed_writes[CLOSE_FLUSH], profile_p->writecache_flushed_writes[SYNC_FLUSH] )); }
BOOL is_locked(files_struct *fsp, SMB_BIG_UINT count, SMB_BIG_UINT offset, enum brl_type lock_type) { int snum = SNUM(fsp->conn); int strict_locking = lp_strict_locking(snum); enum brl_flavour lock_flav = lp_posix_cifsu_locktype(); BOOL ret = True; if (count == 0) { return False; } if (!lp_locking(snum) || !strict_locking) { return False; } if (strict_locking == Auto) { if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type) && (lock_type == READ_LOCK || lock_type == WRITE_LOCK)) { DEBUG(10,("is_locked: optimisation - exclusive oplock on file %s\n", fsp->fsp_name )); ret = False; } else if ((fsp->oplock_type == LEVEL_II_OPLOCK) && (lock_type == READ_LOCK)) { DEBUG(10,("is_locked: optimisation - level II oplock on file %s\n", fsp->fsp_name )); ret = False; } else { struct byte_range_lock *br_lck = brl_get_locks(fsp); if (!br_lck) { return False; } ret = !brl_locktest(br_lck, global_smbpid, procid_self(), offset, count, lock_type, lock_flav); byte_range_lock_destructor(br_lck); } } else { struct byte_range_lock *br_lck = brl_get_locks(fsp); if (!br_lck) { return False; } ret = !brl_locktest(br_lck, global_smbpid, procid_self(), offset, count, lock_type, lock_flav); byte_range_lock_destructor(br_lck); } DEBUG(10,("is_locked: flavour = %s brl start=%.0f len=%.0f %s for fnum %d file %s\n", lock_flav_name(lock_flav), (double)offset, (double)count, ret ? "locked" : "unlocked", fsp->fnum, fsp->fsp_name )); return ret; }
ssize_t write_file(files_struct *fsp, const char *data, SMB_OFF_T pos, size_t n) { write_cache *wcp = fsp->wcp; ssize_t total_written = 0; int write_path = -1; if (fsp->print_file) { #ifdef AVM_NO_PRINTING errno = EBADF; return -1; #else fstring sharename; uint32 jobid; if (!rap_to_pjobid(fsp->rap_print_jobid, sharename, &jobid)) { DEBUG(3,("write_file: Unable to map RAP jobid %u to jobid.\n", (unsigned int)fsp->rap_print_jobid )); errno = EBADF; return -1; } return print_job_write(SNUM(fsp->conn), jobid, data, pos, n); #endif /* AVM_NO_PRINTING */ } if (!fsp->can_write) { errno = EPERM; return(0); } if (!fsp->modified) { SMB_STRUCT_STAT st; fsp->modified = True; if (SMB_VFS_FSTAT(fsp,fsp->fh->fd,&st) == 0) { int dosmode = dos_mode(fsp->conn,fsp->fsp_name,&st); if ((lp_store_dos_attributes(SNUM(fsp->conn)) || MAP_ARCHIVE(fsp->conn)) && !IS_DOS_ARCHIVE(dosmode)) { file_set_dosmode(fsp->conn,fsp->fsp_name,dosmode | aARCH,&st, False); } /* * If this is the first write and we have an exclusive oplock then setup * the write cache. */ if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type) && !wcp) { setup_write_cache(fsp, st.st_size); wcp = fsp->wcp; } } } #ifdef WITH_PROFILE DO_PROFILE_INC(writecache_total_writes); if (!fsp->oplock_type) { DO_PROFILE_INC(writecache_non_oplock_writes); } #endif /* * If this file is level II oplocked then we need * to grab the shared memory lock and inform all * other files with a level II lock that they need * to flush their read caches. We keep the lock over * the shared memory area whilst doing this. */ release_level_2_oplocks_on_change(fsp); #ifdef WITH_PROFILE if (profile_p && profile_p->writecache_total_writes % 500 == 0) { DEBUG(3,("WRITECACHE: initwrites=%u abutted=%u total=%u \ nonop=%u allocated=%u active=%u direct=%u perfect=%u readhits=%u\n", profile_p->writecache_init_writes, profile_p->writecache_abutted_writes, profile_p->writecache_total_writes, profile_p->writecache_non_oplock_writes, profile_p->writecache_allocated_write_caches, profile_p->writecache_num_write_caches, profile_p->writecache_direct_writes, profile_p->writecache_num_perfect_writes, profile_p->writecache_read_hits )); DEBUG(3,("WRITECACHE: Flushes SEEK=%d, READ=%d, WRITE=%d, READRAW=%d, OPLOCK=%d, CLOSE=%d, SYNC=%d\n", profile_p->writecache_flushed_writes[SEEK_FLUSH], profile_p->writecache_flushed_writes[READ_FLUSH], profile_p->writecache_flushed_writes[WRITE_FLUSH], profile_p->writecache_flushed_writes[READRAW_FLUSH], profile_p->writecache_flushed_writes[OPLOCK_RELEASE_FLUSH], profile_p->writecache_flushed_writes[CLOSE_FLUSH], profile_p->writecache_flushed_writes[SYNC_FLUSH] )); }
ssize_t write_file(struct smb_request *req, files_struct *fsp, const char *data, off_t pos, size_t n) { struct write_cache *wcp = fsp->wcp; ssize_t total_written = 0; int write_path = -1; if (fsp->print_file) { uint32_t t; int ret; ret = print_spool_write(fsp, data, n, pos, &t); if (ret) { errno = ret; return -1; } return t; } if (!fsp->can_write) { errno = EPERM; return -1; } /* * If this is the first write and we have an exclusive oplock * then setup the write cache. */ if (!fsp->modified && EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type) && (wcp == NULL)) { setup_write_cache(fsp, fsp->fsp_name->st.st_ex_size); wcp = fsp->wcp; } mark_file_modified(fsp); #ifdef WITH_PROFILE DO_PROFILE_INC(writecache_total_writes); if (!fsp->oplock_type) { DO_PROFILE_INC(writecache_non_oplock_writes); } #endif /* * If this file is level II oplocked then we need * to grab the shared memory lock and inform all * other files with a level II lock that they need * to flush their read caches. We keep the lock over * the shared memory area whilst doing this. */ /* This should actually be improved to span the write. */ contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE); contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE); #ifdef WITH_PROFILE if (profile_p && profile_p->writecache_total_writes % 500 == 0) { DEBUG(3,("WRITECACHE: initwrites=%u abutted=%u total=%u \ nonop=%u allocated=%u active=%u direct=%u perfect=%u readhits=%u\n", profile_p->writecache_init_writes, profile_p->writecache_abutted_writes, profile_p->writecache_total_writes, profile_p->writecache_non_oplock_writes, profile_p->writecache_allocated_write_caches, profile_p->writecache_num_write_caches, profile_p->writecache_direct_writes, profile_p->writecache_num_perfect_writes, profile_p->writecache_read_hits )); DEBUG(3,("WRITECACHE: Flushes SEEK=%d, READ=%d, WRITE=%d, READRAW=%d, OPLOCK=%d, CLOSE=%d, SYNC=%d\n", profile_p->writecache_flushed_writes[SEEK_FLUSH], profile_p->writecache_flushed_writes[READ_FLUSH], profile_p->writecache_flushed_writes[WRITE_FLUSH], profile_p->writecache_flushed_writes[READRAW_FLUSH], profile_p->writecache_flushed_writes[OPLOCK_RELEASE_FLUSH], profile_p->writecache_flushed_writes[CLOSE_FLUSH], profile_p->writecache_flushed_writes[SYNC_FLUSH] )); }
bool strict_lock_default(files_struct *fsp, struct lock_struct *plock) { int strict_locking = lp_strict_locking(fsp->conn->params); bool ret = False; if (plock->size == 0) { return True; } if (!lp_locking(fsp->conn->params) || !strict_locking) { return True; } if (strict_locking == Auto) { if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type) && (plock->lock_type == READ_LOCK || plock->lock_type == WRITE_LOCK)) { DEBUG(10,("is_locked: optimisation - exclusive oplock on file %s\n", fsp_str_dbg(fsp))); ret = True; } else if ((fsp->oplock_type == LEVEL_II_OPLOCK) && (plock->lock_type == READ_LOCK)) { DEBUG(10,("is_locked: optimisation - level II oplock on file %s\n", fsp_str_dbg(fsp))); ret = True; } else { struct byte_range_lock *br_lck; br_lck = brl_get_locks_readonly(fsp); if (!br_lck) { return True; } ret = brl_locktest(br_lck, plock->context.smblctx, plock->context.pid, plock->start, plock->size, plock->lock_type, plock->lock_flav); } } else { struct byte_range_lock *br_lck; br_lck = brl_get_locks_readonly(fsp); if (!br_lck) { return True; } ret = brl_locktest(br_lck, plock->context.smblctx, plock->context.pid, plock->start, plock->size, plock->lock_type, plock->lock_flav); } DEBUG(10,("strict_lock_default: flavour = %s brl start=%.0f " "len=%.0f %s for fnum %llu file %s\n", lock_flav_name(plock->lock_flav), (double)plock->start, (double)plock->size, ret ? "unlocked" : "locked", (unsigned long long)plock->fnum, fsp_str_dbg(fsp))); return ret; }