bool push_blocking_lock_request( struct byte_range_lock *br_lck, struct smb_request *req, files_struct *fsp, int lock_timeout, int lock_num, uint32_t lock_pid, enum brl_type lock_type, enum brl_flavour lock_flav, uint64_t offset, uint64_t count, uint32_t blocking_pid) { struct blocking_lock_record *blr; NTSTATUS status; if(req_is_in_chain(req)) { DEBUG(0,("push_blocking_lock_request: cannot queue a chained request (currently).\n")); return False; } /* * Now queue an entry on the blocking lock queue. We setup * the expiration time here. */ blr = talloc(NULL, struct blocking_lock_record); if (blr == NULL) { DEBUG(0,("push_blocking_lock_request: Malloc fail !\n" )); return False; } blr->next = NULL; blr->prev = NULL; blr->fsp = fsp; if (lock_timeout == -1) { blr->expire_time.tv_sec = 0; blr->expire_time.tv_usec = 0; /* Never expire. */ } else { blr->expire_time = timeval_current_ofs(lock_timeout/1000, (lock_timeout % 1000) * 1000); } blr->lock_num = lock_num; blr->lock_pid = lock_pid; blr->blocking_pid = blocking_pid; blr->lock_flav = lock_flav; blr->lock_type = lock_type; blr->offset = offset; blr->count = count; /* Specific brl_lock() implementations can fill this in. */ blr->blr_private = NULL; /* Add a pending lock record for this. */ status = brl_lock(smbd_messaging_context(), br_lck, lock_pid, procid_self(), offset, count, lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK, blr->lock_flav, lock_timeout ? True : False, /* blocking_lock. */ NULL, blr); if (!NT_STATUS_IS_OK(status)) { DEBUG(0,("push_blocking_lock_request: failed to add PENDING_LOCK record.\n")); TALLOC_FREE(blr); return False; } SMB_PERFCOUNT_DEFER_OP(&req->pcd, &req->pcd); blr->req = talloc_move(blr, &req); DLIST_ADD_END(blocking_lock_queue, blr, struct blocking_lock_record *); recalc_brl_timeout(); /* Ensure we'll receive messages when this is unlocked. */ if (!blocking_lock_unlock_state) { messaging_register(smbd_messaging_context(), NULL, MSG_SMB_UNLOCK, received_unlock_msg); blocking_lock_unlock_state = true; } DEBUG(3,("push_blocking_lock_request: lock request blocked with " "expiry time (%u sec. %u usec) (+%d msec) for fnum = %d, name = %s\n", (unsigned int)blr->expire_time.tv_sec, (unsigned int)blr->expire_time.tv_usec, lock_timeout, blr->fsp->fnum, blr->fsp->fsp_name )); return True; }
NTSTATUS schedule_aio_write_and_X(connection_struct *conn, struct smb_request *smbreq, files_struct *fsp, const char *data, SMB_OFF_T startpos, size_t numtowrite) { struct aio_extra *aio_ex; SMB_STRUCT_AIOCB *a; size_t bufsize; size_t min_aio_write_size = lp_aio_write_size(SNUM(conn)); int ret; /* Ensure aio is initialized. */ if (!initialize_async_io_handler()) { return NT_STATUS_RETRY; } if (fsp->base_fsp != NULL) { /* No AIO on streams yet */ DEBUG(10, ("AIO on streams not yet supported\n")); return NT_STATUS_RETRY; } if ((!min_aio_write_size || (numtowrite < min_aio_write_size)) && !SMB_VFS_AIO_FORCE(fsp)) { /* Too small a write for aio request. */ DEBUG(10,("schedule_aio_write_and_X: write size (%u) too " "small for minimum aio_write of %u\n", (unsigned int)numtowrite, (unsigned int)min_aio_write_size )); return NT_STATUS_RETRY; } /* Only do this on non-chained and non-chaining writes not using the * write cache. */ if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) { return NT_STATUS_RETRY; } if (outstanding_aio_calls >= aio_pending_size) { DEBUG(3,("schedule_aio_write_and_X: Already have %d aio " "activities outstanding.\n", outstanding_aio_calls )); DEBUG(10,("schedule_aio_write_and_X: failed to schedule " "aio_write for file %s, offset %.0f, len = %u " "(mid = %u)\n", fsp_str_dbg(fsp), (double)startpos, (unsigned int)numtowrite, (unsigned int)smbreq->mid )); return NT_STATUS_RETRY; } bufsize = smb_size + 6*2; if (!(aio_ex = create_aio_extra(NULL, fsp, bufsize))) { DEBUG(0,("schedule_aio_write_and_X: malloc fail.\n")); return NT_STATUS_NO_MEMORY; } aio_ex->handle_completion = handle_aio_write_complete; aio_ex->write_through = BITSETW(smbreq->vwv+7,0); construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data); srv_set_message((char *)aio_ex->outbuf.data, 6, 0, True); SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */ init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid, (uint64_t)startpos, (uint64_t)numtowrite, WRITE_LOCK, &aio_ex->lock); /* Take the lock until the AIO completes. */ if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) { TALLOC_FREE(aio_ex); return NT_STATUS_FILE_LOCK_CONFLICT; } a = &aio_ex->acb; /* Now set up the aio record for the write call. */ a->aio_fildes = fsp->fh->fd; a->aio_buf = discard_const_p(char, data); a->aio_nbytes = numtowrite; a->aio_offset = startpos; a->aio_sigevent.sigev_notify = SIGEV_SIGNAL; a->aio_sigevent.sigev_signo = RT_SIGNAL_AIO; a->aio_sigevent.sigev_value.sival_ptr = aio_ex; ret = SMB_VFS_AIO_WRITE(fsp, a); if (ret == -1) { DEBUG(3,("schedule_aio_wrote_and_X: aio_write failed. " "Error %s\n", strerror(errno) )); SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock); TALLOC_FREE(aio_ex); return NT_STATUS_RETRY; } outstanding_aio_calls++; aio_ex->smbreq = talloc_move(aio_ex, &smbreq); /* This should actually be improved to span the write. */ contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE); contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE); if (!aio_ex->write_through && !lp_syncalways(SNUM(fsp->conn)) && fsp->aio_write_behind) { /* Lie to the client and immediately claim we finished the * write. */ SSVAL(aio_ex->outbuf.data,smb_vwv2,numtowrite); SSVAL(aio_ex->outbuf.data,smb_vwv4,(numtowrite>>16)&1); show_msg((char *)aio_ex->outbuf.data); if (!srv_send_smb(aio_ex->smbreq->sconn, (char *)aio_ex->outbuf.data, true, aio_ex->smbreq->seqnum+1, IS_CONN_ENCRYPTED(fsp->conn), &aio_ex->smbreq->pcd)) { exit_server_cleanly("schedule_aio_write_and_X: " "srv_send_smb failed."); } DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write " "behind for file %s\n", fsp_str_dbg(fsp))); }
bool push_blocking_lock_request( struct byte_range_lock *br_lck, struct smb_request *req, files_struct *fsp, int lock_timeout, int lock_num, uint64_t smblctx, enum brl_type lock_type, enum brl_flavour lock_flav, uint64_t offset, uint64_t count, uint64_t blocking_smblctx) { struct smbd_server_connection *sconn = req->sconn; struct blocking_lock_record *blr; NTSTATUS status; if (req->smb2req) { return push_blocking_lock_request_smb2(br_lck, req, fsp, lock_timeout, lock_num, smblctx, lock_type, lock_flav, offset, count, blocking_smblctx); } if(req_is_in_chain(req)) { DEBUG(0,("push_blocking_lock_request: cannot queue a chained request (currently).\n")); return False; } /* * Now queue an entry on the blocking lock queue. We setup * the expiration time here. */ blr = talloc(NULL, struct blocking_lock_record); if (blr == NULL) { DEBUG(0,("push_blocking_lock_request: Malloc fail !\n" )); return False; } blr->next = NULL; blr->prev = NULL; blr->fsp = fsp; if (lock_timeout == -1) { blr->expire_time.tv_sec = 0; blr->expire_time.tv_usec = 0; /* Never expire. */ } else { blr->expire_time = timeval_current_ofs_msec(lock_timeout); } blr->lock_num = lock_num; blr->smblctx = smblctx; blr->blocking_smblctx = blocking_smblctx; blr->lock_flav = lock_flav; blr->lock_type = lock_type; blr->offset = offset; blr->count = count; /* Specific brl_lock() implementations can fill this in. */ blr->blr_private = NULL; /* Add a pending lock record for this. */ status = brl_lock(req->sconn->msg_ctx, br_lck, smblctx, messaging_server_id(req->sconn->msg_ctx), offset, count, lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK, blr->lock_flav, True, NULL, blr); if (!NT_STATUS_IS_OK(status)) { DEBUG(0,("push_blocking_lock_request: failed to add PENDING_LOCK record.\n")); TALLOC_FREE(blr); return False; } SMB_PERFCOUNT_DEFER_OP(&req->pcd, &req->pcd); blr->req = talloc_move(blr, &req); DLIST_ADD_END(sconn->smb1.locks.blocking_lock_queue, blr, struct blocking_lock_record *); recalc_brl_timeout(sconn); /* Ensure we'll receive messages when this is unlocked. */ if (!sconn->smb1.locks.blocking_lock_unlock_state) { messaging_register(sconn->msg_ctx, sconn, MSG_SMB_UNLOCK, received_unlock_msg); sconn->smb1.locks.blocking_lock_unlock_state = true; } DEBUG(3,("push_blocking_lock_request: lock request blocked with " "expiry time (%u sec. %u usec) (+%d msec) for %s, name = %s\n", (unsigned int)blr->expire_time.tv_sec, (unsigned int)blr->expire_time.tv_usec, lock_timeout, fsp_fnum_dbg(blr->fsp), fsp_str_dbg(blr->fsp))); return True; }
NTSTATUS schedule_aio_read_and_X(connection_struct *conn, struct smb_request *smbreq, files_struct *fsp, SMB_OFF_T startpos, size_t smb_maxcnt) { struct aio_extra *aio_ex; SMB_STRUCT_AIOCB *a; size_t bufsize; size_t min_aio_read_size = lp_aio_read_size(SNUM(conn)); int ret; /* Ensure aio is initialized. */ if (!initialize_async_io_handler()) { return NT_STATUS_RETRY; } if (fsp->base_fsp != NULL) { /* No AIO on streams yet */ DEBUG(10, ("AIO on streams not yet supported\n")); return NT_STATUS_RETRY; } if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size)) && !SMB_VFS_AIO_FORCE(fsp)) { /* Too small a read for aio request. */ DEBUG(10,("schedule_aio_read_and_X: read size (%u) too small " "for minimum aio_read of %u\n", (unsigned int)smb_maxcnt, (unsigned int)min_aio_read_size )); return NT_STATUS_RETRY; } /* Only do this on non-chained and non-chaining reads not using the * write cache. */ if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) { return NT_STATUS_RETRY; } if (outstanding_aio_calls >= aio_pending_size) { DEBUG(10,("schedule_aio_read_and_X: Already have %d aio " "activities outstanding.\n", outstanding_aio_calls )); return NT_STATUS_RETRY; } /* The following is safe from integer wrap as we've already checked smb_maxcnt is 128k or less. Wct is 12 for read replies */ bufsize = smb_size + 12 * 2 + smb_maxcnt; if ((aio_ex = create_aio_extra(NULL, fsp, bufsize)) == NULL) { DEBUG(10,("schedule_aio_read_and_X: malloc fail.\n")); return NT_STATUS_NO_MEMORY; } aio_ex->handle_completion = handle_aio_read_complete; construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data); srv_set_message((char *)aio_ex->outbuf.data, 12, 0, True); SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */ init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid, (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK, &aio_ex->lock); /* Take the lock until the AIO completes. */ if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) { TALLOC_FREE(aio_ex); return NT_STATUS_FILE_LOCK_CONFLICT; } a = &aio_ex->acb; /* Now set up the aio record for the read call. */ a->aio_fildes = fsp->fh->fd; a->aio_buf = smb_buf(aio_ex->outbuf.data); a->aio_nbytes = smb_maxcnt; a->aio_offset = startpos; a->aio_sigevent.sigev_notify = SIGEV_SIGNAL; a->aio_sigevent.sigev_signo = RT_SIGNAL_AIO; a->aio_sigevent.sigev_value.sival_ptr = aio_ex; ret = SMB_VFS_AIO_READ(fsp, a); if (ret == -1) { DEBUG(0,("schedule_aio_read_and_X: aio_read failed. " "Error %s\n", strerror(errno) )); SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock); TALLOC_FREE(aio_ex); return NT_STATUS_RETRY; } outstanding_aio_calls++; aio_ex->smbreq = talloc_move(aio_ex, &smbreq); DEBUG(10,("schedule_aio_read_and_X: scheduled aio_read for file %s, " "offset %.0f, len = %u (mid = %u)\n", fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt, (unsigned int)aio_ex->smbreq->mid )); return NT_STATUS_OK; }