コード例 #1
0
ssize_t vfswrap_write(vfs_handle_struct *handle, files_struct *fsp, int fd, const void *data, size_t n)
{
	ssize_t result;

	START_PROFILE_BYTES(syscall_write, n);
	result = sys_write(fd, data, n);
	END_PROFILE(syscall_write);
	return result;
}
コード例 #2
0
ssize_t vfswrap_read(vfs_handle_struct *handle, files_struct *fsp, int fd, void *data, size_t n)
{
	ssize_t result;

	START_PROFILE_BYTES(syscall_read, n);
	result = sys_read(fd, data, n);
	END_PROFILE(syscall_read);
	return result;
}
コード例 #3
0
ssize_t vfswrap_sendfile(vfs_handle_struct *handle, int tofd, files_struct *fsp, int fromfd, const DATA_BLOB *hdr,
			SMB_OFF_T offset, size_t n)
{
	ssize_t result;

	START_PROFILE_BYTES(syscall_sendfile, n);
	result = sys_sendfile(tofd, fromfd, hdr, offset, n);
	END_PROFILE(syscall_sendfile);
	return result;
}
コード例 #4
0
ファイル: vfs_onefs.c プロジェクト: gojdic/samba
static ssize_t onefs_recvfile(vfs_handle_struct *handle, int fromfd,
			      files_struct *tofsp, SMB_OFF_T offset,
			      size_t count)
{
	ssize_t result;

	START_PROFILE_BYTES(syscall_recvfile, count);
	result = onefs_sys_recvfile(fromfd, tofsp->fh->fd, offset, count);
	END_PROFILE(syscall_recvfile);
	return result;
}
コード例 #5
0
ファイル: vfs_onefs.c プロジェクト: gojdic/samba
static ssize_t onefs_sendfile(vfs_handle_struct *handle, int tofd,
			      files_struct *fromfsp, const DATA_BLOB *header,
			      SMB_OFF_T offset, size_t count)
{
	ssize_t result;

	START_PROFILE_BYTES(syscall_sendfile, count);
	result = onefs_sys_sendfile(handle->conn, tofd, fromfsp->fh->fd,
				    header, offset, count);
	END_PROFILE(syscall_sendfile);
	return result;
}
コード例 #6
0
ssize_t vfswrap_pread(vfs_handle_struct *handle, files_struct *fsp, int fd, void *data,
			size_t n, SMB_OFF_T offset)
{
	ssize_t result;

#if defined(HAVE_PREAD) || defined(HAVE_PREAD64)
	START_PROFILE_BYTES(syscall_pread, n);
	result = sys_pread(fd, data, n, offset);
	END_PROFILE(syscall_pread);
 
	if (result == -1 && errno == ESPIPE) {
		/* Maintain the fiction that pipes can be seeked (sought?) on. */
		result = SMB_VFS_READ(fsp, fd, data, n);
		fsp->fh->pos = 0;
	}

#else /* HAVE_PREAD */
	SMB_OFF_T   curr;
	int lerrno;
   
	curr = SMB_VFS_LSEEK(fsp, fd, 0, SEEK_CUR);
	if (curr == -1 && errno == ESPIPE) {
		/* Maintain the fiction that pipes can be seeked (sought?) on. */
		result = SMB_VFS_READ(fsp, fd, data, n);
		fsp->fh->pos = 0;
		return result;
	}

	if (SMB_VFS_LSEEK(fsp, fd, offset, SEEK_SET) == -1) {
		return -1;
	}

	errno = 0;
	result = SMB_VFS_READ(fsp, fd, data, n);
	lerrno = errno;

	SMB_VFS_LSEEK(fsp, fd, curr, SEEK_SET);
	errno = lerrno;

#endif /* HAVE_PREAD */

	return result;
}
コード例 #7
0
ssize_t vfswrap_pwrite(vfs_handle_struct *handle, files_struct *fsp, int fd, const void *data,
			size_t n, SMB_OFF_T offset)
{
	ssize_t result;

#if defined(HAVE_PWRITE) || defined(HAVE_PRWITE64)
	START_PROFILE_BYTES(syscall_pwrite, n);
	result = sys_pwrite(fd, data, n, offset);
	END_PROFILE(syscall_pwrite);

	if (result == -1 && errno == ESPIPE) {
		/* Maintain the fiction that pipes can be sought on. */
		result = SMB_VFS_WRITE(fsp, fd, data, n);
	}

#else /* HAVE_PWRITE */
	SMB_OFF_T   curr;
	int         lerrno;

	curr = SMB_VFS_LSEEK(fsp, fd, 0, SEEK_CUR);
	if (curr == -1) {
		return -1;
	}

	if (SMB_VFS_LSEEK(fsp, fd, offset, SEEK_SET) == -1) {
		return -1;
	}

	result = SMB_VFS_WRITE(fsp, fd, data, n);
	lerrno = errno;

	SMB_VFS_LSEEK(fsp, fd, curr, SEEK_SET);
	errno = lerrno;

#endif /* HAVE_PWRITE */

	return result;
}
コード例 #8
0
ファイル: onefs_system.c プロジェクト: berte/mediaplayer
/**
 * Handles the subtleties of using sendfile with CIFS.
 */
ssize_t onefs_sys_sendfile(connection_struct *conn, int tofd, int fromfd,
			   const DATA_BLOB *header, SMB_OFF_T offset,
			   size_t count)
{
	bool atomic = false;
	ssize_t ret = 0;

	START_PROFILE_BYTES(syscall_sendfile, count);

	if (lp_parm_bool(SNUM(conn), PARM_ONEFS_TYPE,
			 PARM_ATOMIC_SENDFILE,
			 PARM_ATOMIC_SENDFILE_DEFAULT)) {
		atomic = true;
	}

	/* Try the sendfile */
	ret = onefs_sys_do_sendfile(tofd, fromfd, header, offset, count,
				    atomic);

	/* If the sendfile wasn't atomic, we're done. */
	if (!atomic) {
		DEBUG(10, ("non-atomic sendfile read %ul bytes\n", ret));
		END_PROFILE(syscall_sendfile);
		return ret;
	}

	/*
	 * Atomic sendfile takes care to not write anything to the socket
	 * until all of the requested bytes have been read from the file.
	 * There are two atomic cases that need to be handled.
	 *
	 *  1. The file was truncated causing less data to be read than was
	 *     requested.  In this case, we return back to the caller to
	 *     indicate 0 bytes were written to the socket.  This should
	 *     prompt the caller to fallback to the standard read path: read
	 *     the data, create a header that indicates how many bytes were
	 *     actually read, and send the header/data back to the client.
	 *
	 *     This saves us from standard sendfile behavior of sending a
	 *     header promising more data then will actually be sent.  The
	 *     only two options are to close the socket and kill the client
	 *     connection, or write a bunch of 0s.  Closing the client
	 *     connection is bad because there could actually be multiple
	 *     sessions multiplexed from the same client that are all dropped
	 *     because of a truncate.  Writing the remaining data as 0s also
	 *     isn't good, because the client will have an incorrect version
	 *     of the file.  If the file is written back to the server, the 0s
	 *     will be written back.  Fortunately, atomic sendfile allows us
	 *     to avoid making this choice in most cases.
	 *
	 *  2. One downside of atomic sendfile, is that there is a limit on
	 *     the number of bytes that can be sent atomically.  The kernel
	 *     has a limited amount of mbuf space that it can read file data
	 *     into without exhausting the system's mbufs, so a buffer of
	 *     length xfsize is used.  The xfsize at the time of writing this
	 *     is 64K.  xfsize bytes are read from the file, and subsequently
	 *     written to the socket.  This makes it impossible to do the
	 *     sendfile atomically for a byte count > xfsize.
	 *
	 *     To cope with large requests, atomic sendfile returns -1 with
	 *     errno set to E2BIG.  Since windows maxes out at 64K writes,
	 *     this is currently only a concern with non-windows clients.
	 *     Posix extensions allow the full 24bit bytecount field to be
	 *     used in ReadAndX, and clients such as smbclient and the linux
	 *     cifs client can request up to 16MB reads!  There are a few
	 *     options for handling large sendfile requests.
	 *
	 *	a. Fall back to the standard read path.  This is unacceptable
	 *         because it would require prohibitively large mallocs.
	 *
	 *	b. Fall back to using samba's fake_send_file which emulates
	 *	   the kernel sendfile in userspace.  This still has the same
	 *	   problem of sending the header before all of the data has
	 *	   been read, so it doesn't buy us anything, and has worse
	 *	   performance than the kernel's zero-copy sendfile.
	 *
	 *	c. Use non-atomic sendfile syscall to attempt a zero copy
	 *	   read, and hope that there isn't a short read due to
	 *	   truncation.  In the case of a short read, there are two
	 *	   options:
	 *
	 *	    1. Kill the client connection
	 *
	 *	    2. Write zeros to the socket for the remaining bytes
	 *	       promised in the header.
	 *
	 *	   It is safer from a data corruption perspective to kill the
	 *	   client connection, so this is our default behavior, but if
	 *	   this causes problems this can be configured to write zeros
	 *	   via smb.conf.
	 */

	/* Handle case 1: short read -> truncated file. */
	if (ret == 0) {
		END_PROFILE(syscall_sendfile);
		return ret;
	}

	/* Handle case 2: large read. */
	if (ret == -1 && errno == E2BIG) {

		if (!lp_parm_bool(SNUM(conn), PARM_ONEFS_TYPE,
				 PARM_SENDFILE_LARGE_READS,
				 PARM_SENDFILE_LARGE_READS_DEFAULT)) {
			DEBUG(3, ("Not attempting non-atomic large sendfile: "
				  "%lu bytes\n", count));
			END_PROFILE(syscall_sendfile);
			return 0;
		}

		if (count < 0x10000) {
			DEBUG(0, ("Count < 2^16 and E2BIG was returned! %lu\n",
				  count));
		}

		DEBUG(10, ("attempting non-atomic large sendfile: %lu bytes\n",
			   count));

		/* Try a non-atomic sendfile. */
		ret = onefs_sys_do_sendfile(tofd, fromfd, header, offset,
					    count, false);
		/* Real error: kill the client connection. */
		if (ret == -1) {
			DEBUG(1, ("error on non-atomic large sendfile "
				  "(%lu bytes): %s\n", count,
				  strerror(errno)));
			END_PROFILE(syscall_sendfile);
			return ret;
		}

		/* Short read: kill the client connection. */
		if (ret != count + header->length) {
			DEBUG(1, ("short read on non-atomic large sendfile "
				  "(%lu of %lu bytes): %s\n", ret, count,
				  strerror(errno)));

			/*
			 * Returning ret here would cause us to drop into the
			 * codepath that calls sendfile_short_send, which
			 * sends the client a bunch of zeros instead.
			 * Returning -1 kills the connection.
			 */
			if (lp_parm_bool(SNUM(conn), PARM_ONEFS_TYPE,
				PARM_SENDFILE_SAFE,
				PARM_SENDFILE_SAFE_DEFAULT)) {
				END_PROFILE(syscall_sendfile);
				return -1;
			}

			END_PROFILE(syscall_sendfile);
			return ret;
		}

		DEBUG(10, ("non-atomic large sendfile successful\n"));
	}

	/* There was error in the atomic sendfile. */
	if (ret == -1) {
		DEBUG(1, ("error on %s sendfile (%lu bytes): %s\n",
			  atomic ? "atomic" : "non-atomic",
			  count, strerror(errno)));
	}

	END_PROFILE(syscall_sendfile);
	return ret;
}