static void new_break_message_smb1(files_struct *fsp, int cmd, char result[SMB1_BREAK_MESSAGE_LENGTH]) { memset(result,'\0',smb_size); srv_set_message(result,8,0,true); SCVAL(result,smb_com,SMBlockingX); SSVAL(result,smb_tid,fsp->conn->cnum); SSVAL(result,smb_pid,0xFFFF); SSVAL(result,smb_uid,0); SSVAL(result,smb_mid,0xFFFF); SCVAL(result,smb_vwv0,0xFF); SSVAL(result,smb_vwv2,fsp->fnum); SCVAL(result,smb_vwv3,LOCKING_ANDX_OPLOCK_RELEASE); SCVAL(result,smb_vwv3+1,cmd); }
void reply_pipe_read_and_X(struct smb_request *req) { smb_np_struct *p = get_rpc_pipe_p(SVAL(req->inbuf,smb_vwv2)); int smb_maxcnt = SVAL(req->inbuf,smb_vwv5); int smb_mincnt = SVAL(req->inbuf,smb_vwv6); int nread = -1; char *data; bool unused; /* we don't use the offset given to use for pipe reads. This is deliberate, instead we always return the next lump of data on the pipe */ #if 0 uint32 smb_offs = IVAL(req->inbuf,smb_vwv3); #endif if (!p) { reply_doserror(req, ERRDOS, ERRbadfid); return; } reply_outbuf(req, 12, smb_maxcnt); data = smb_buf(req->outbuf); nread = read_from_pipe(p, data, smb_maxcnt, &unused); if (nread < 0) { reply_doserror(req, ERRDOS, ERRnoaccess); return; } srv_set_message((char *)req->outbuf, 12, nread, False); SSVAL(req->outbuf,smb_vwv5,nread); SSVAL(req->outbuf,smb_vwv6,smb_offset(data,req->outbuf)); SSVAL(smb_buf(req->outbuf),-2,nread); DEBUG(3,("readX-IPC pnum=%04x min=%d max=%d nread=%d\n", p->pnum, smb_mincnt, smb_maxcnt, nread)); chain_reply(req); }
NTSTATUS schedule_aio_write_and_X(connection_struct *conn, struct smb_request *smbreq, files_struct *fsp, const char *data, SMB_OFF_T startpos, size_t numtowrite) { struct aio_extra *aio_ex; SMB_STRUCT_AIOCB *a; size_t bufsize; size_t min_aio_write_size = lp_aio_write_size(SNUM(conn)); int ret; /* Ensure aio is initialized. */ if (!initialize_async_io_handler()) { return NT_STATUS_RETRY; } if (fsp->base_fsp != NULL) { /* No AIO on streams yet */ DEBUG(10, ("AIO on streams not yet supported\n")); return NT_STATUS_RETRY; } if ((!min_aio_write_size || (numtowrite < min_aio_write_size)) && !SMB_VFS_AIO_FORCE(fsp)) { /* Too small a write for aio request. */ DEBUG(10,("schedule_aio_write_and_X: write size (%u) too " "small for minimum aio_write of %u\n", (unsigned int)numtowrite, (unsigned int)min_aio_write_size )); return NT_STATUS_RETRY; } /* Only do this on non-chained and non-chaining writes not using the * write cache. */ if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) { return NT_STATUS_RETRY; } if (outstanding_aio_calls >= aio_pending_size) { DEBUG(3,("schedule_aio_write_and_X: Already have %d aio " "activities outstanding.\n", outstanding_aio_calls )); DEBUG(10,("schedule_aio_write_and_X: failed to schedule " "aio_write for file %s, offset %.0f, len = %u " "(mid = %u)\n", fsp_str_dbg(fsp), (double)startpos, (unsigned int)numtowrite, (unsigned int)smbreq->mid )); return NT_STATUS_RETRY; } bufsize = smb_size + 6*2; if (!(aio_ex = create_aio_extra(NULL, fsp, bufsize))) { DEBUG(0,("schedule_aio_write_and_X: malloc fail.\n")); return NT_STATUS_NO_MEMORY; } aio_ex->handle_completion = handle_aio_write_complete; aio_ex->write_through = BITSETW(smbreq->vwv+7,0); construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data); srv_set_message((char *)aio_ex->outbuf.data, 6, 0, True); SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */ init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid, (uint64_t)startpos, (uint64_t)numtowrite, WRITE_LOCK, &aio_ex->lock); /* Take the lock until the AIO completes. */ if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) { TALLOC_FREE(aio_ex); return NT_STATUS_FILE_LOCK_CONFLICT; } a = &aio_ex->acb; /* Now set up the aio record for the write call. */ a->aio_fildes = fsp->fh->fd; a->aio_buf = discard_const_p(char, data); a->aio_nbytes = numtowrite; a->aio_offset = startpos; a->aio_sigevent.sigev_notify = SIGEV_SIGNAL; a->aio_sigevent.sigev_signo = RT_SIGNAL_AIO; a->aio_sigevent.sigev_value.sival_ptr = aio_ex; ret = SMB_VFS_AIO_WRITE(fsp, a); if (ret == -1) { DEBUG(3,("schedule_aio_wrote_and_X: aio_write failed. " "Error %s\n", strerror(errno) )); SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock); TALLOC_FREE(aio_ex); return NT_STATUS_RETRY; } outstanding_aio_calls++; aio_ex->smbreq = talloc_move(aio_ex, &smbreq); /* This should actually be improved to span the write. */ contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WRITE); contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WRITE); if (!aio_ex->write_through && !lp_syncalways(SNUM(fsp->conn)) && fsp->aio_write_behind) { /* Lie to the client and immediately claim we finished the * write. */ SSVAL(aio_ex->outbuf.data,smb_vwv2,numtowrite); SSVAL(aio_ex->outbuf.data,smb_vwv4,(numtowrite>>16)&1); show_msg((char *)aio_ex->outbuf.data); if (!srv_send_smb(aio_ex->smbreq->sconn, (char *)aio_ex->outbuf.data, true, aio_ex->smbreq->seqnum+1, IS_CONN_ENCRYPTED(fsp->conn), &aio_ex->smbreq->pcd)) { exit_server_cleanly("schedule_aio_write_and_X: " "srv_send_smb failed."); } DEBUG(10,("schedule_aio_write_and_X: scheduled aio_write " "behind for file %s\n", fsp_str_dbg(fsp))); }
NTSTATUS schedule_aio_read_and_X(connection_struct *conn, struct smb_request *smbreq, files_struct *fsp, SMB_OFF_T startpos, size_t smb_maxcnt) { struct aio_extra *aio_ex; SMB_STRUCT_AIOCB *a; size_t bufsize; size_t min_aio_read_size = lp_aio_read_size(SNUM(conn)); int ret; /* Ensure aio is initialized. */ if (!initialize_async_io_handler()) { return NT_STATUS_RETRY; } if (fsp->base_fsp != NULL) { /* No AIO on streams yet */ DEBUG(10, ("AIO on streams not yet supported\n")); return NT_STATUS_RETRY; } if ((!min_aio_read_size || (smb_maxcnt < min_aio_read_size)) && !SMB_VFS_AIO_FORCE(fsp)) { /* Too small a read for aio request. */ DEBUG(10,("schedule_aio_read_and_X: read size (%u) too small " "for minimum aio_read of %u\n", (unsigned int)smb_maxcnt, (unsigned int)min_aio_read_size )); return NT_STATUS_RETRY; } /* Only do this on non-chained and non-chaining reads not using the * write cache. */ if (req_is_in_chain(smbreq) || (lp_write_cache_size(SNUM(conn)) != 0)) { return NT_STATUS_RETRY; } if (outstanding_aio_calls >= aio_pending_size) { DEBUG(10,("schedule_aio_read_and_X: Already have %d aio " "activities outstanding.\n", outstanding_aio_calls )); return NT_STATUS_RETRY; } /* The following is safe from integer wrap as we've already checked smb_maxcnt is 128k or less. Wct is 12 for read replies */ bufsize = smb_size + 12 * 2 + smb_maxcnt; if ((aio_ex = create_aio_extra(NULL, fsp, bufsize)) == NULL) { DEBUG(10,("schedule_aio_read_and_X: malloc fail.\n")); return NT_STATUS_NO_MEMORY; } aio_ex->handle_completion = handle_aio_read_complete; construct_reply_common_req(smbreq, (char *)aio_ex->outbuf.data); srv_set_message((char *)aio_ex->outbuf.data, 12, 0, True); SCVAL(aio_ex->outbuf.data,smb_vwv0,0xFF); /* Never a chained reply. */ init_strict_lock_struct(fsp, (uint64_t)smbreq->smbpid, (uint64_t)startpos, (uint64_t)smb_maxcnt, READ_LOCK, &aio_ex->lock); /* Take the lock until the AIO completes. */ if (!SMB_VFS_STRICT_LOCK(conn, fsp, &aio_ex->lock)) { TALLOC_FREE(aio_ex); return NT_STATUS_FILE_LOCK_CONFLICT; } a = &aio_ex->acb; /* Now set up the aio record for the read call. */ a->aio_fildes = fsp->fh->fd; a->aio_buf = smb_buf(aio_ex->outbuf.data); a->aio_nbytes = smb_maxcnt; a->aio_offset = startpos; a->aio_sigevent.sigev_notify = SIGEV_SIGNAL; a->aio_sigevent.sigev_signo = RT_SIGNAL_AIO; a->aio_sigevent.sigev_value.sival_ptr = aio_ex; ret = SMB_VFS_AIO_READ(fsp, a); if (ret == -1) { DEBUG(0,("schedule_aio_read_and_X: aio_read failed. " "Error %s\n", strerror(errno) )); SMB_VFS_STRICT_UNLOCK(conn, fsp, &aio_ex->lock); TALLOC_FREE(aio_ex); return NT_STATUS_RETRY; } outstanding_aio_calls++; aio_ex->smbreq = talloc_move(aio_ex, &smbreq); DEBUG(10,("schedule_aio_read_and_X: scheduled aio_read for file %s, " "offset %.0f, len = %u (mid = %u)\n", fsp_str_dbg(fsp), (double)startpos, (unsigned int)smb_maxcnt, (unsigned int)aio_ex->smbreq->mid )); return NT_STATUS_OK; }