コード例 #1
0
ファイル: open.c プロジェクト: 3sOx/asuswrt-merlin
long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
{
	char *tmp = getname(filename);
	int fd = PTR_ERR(tmp);

	if (!IS_ERR(tmp)) {
		fd = get_unused_fd_flags(flags);
		if (fd >= 0) {
			struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
			if (IS_ERR(f)) {
				put_unused_fd(fd);
				fd = PTR_ERR(f);
			} else {
				fsnotify_open(f);
				fd_install(fd, f);
			}
		}
		putname(tmp);
	}
	return fd;
}
コード例 #2
0
/*
 * Open a file descriptor on the autofs mount point corresponding
 * to the given path and device number (aka. new_encode_dev(sb->s_dev)).
 */
static int autofs_dev_ioctl_open_mountpoint(const char *path, dev_t devid)
{
	struct file *filp;
	struct nameidata nd;
	int err, fd;

	fd = get_unused_fd();
	if (likely(fd >= 0)) {
		/* Get nameidata of the parent directory */
		err = path_lookup(path, LOOKUP_PARENT, &nd);
		if (err)
			goto out;

		/*
		 * Search down, within the parent, looking for an
		 * autofs super block that has the device number
		 * corresponding to the autofs fs we want to open.
		 */
		err = autofs_dev_ioctl_find_super(&nd, devid);
		if (err) {
			path_put(&nd.path);
			goto out;
		}

		filp = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY,
				   current_cred());
		if (IS_ERR(filp)) {
			err = PTR_ERR(filp);
			goto out;
		}

		autofs_dev_ioctl_fd_install(fd, filp);
	}

	return fd;

out:
	put_unused_fd(fd);
	return err;
}
コード例 #3
0
ファイル: ctrl_gk20a.c プロジェクト: JamesLinus/nvgpu
static int gk20a_ctrl_alloc_as(
		struct gk20a *g,
		struct nvgpu_alloc_as_args *args)
{
	struct platform_device *dev = g->dev;
	struct gk20a_as_share *as_share;
	int err;
	int fd;
	struct file *file;
	char *name;

	err = get_unused_fd_flags(O_RDWR);
	if (err < 0)
		return err;
	fd = err;

	name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
			dev_name(&dev->dev), fd);

	file = anon_inode_getfile(name, g->as.cdev.ops, NULL, O_RDWR);
	kfree(name);
	if (IS_ERR(file)) {
		err = PTR_ERR(file);
		goto clean_up;
	}
	fd_install(fd, file);

	err = gk20a_as_alloc_share(&g->as, args->big_page_size, &as_share);
	if (err)
		goto clean_up;

	file->private_data = as_share;

	args->as_fd = fd;
	return 0;

clean_up:
	put_unused_fd(fd);
	return err;
}
コード例 #4
0
ファイル: pty.c プロジェクト: Lyude/linux
/**
 *	ptm_open_peer - open the peer of a pty
 *	@master: the open struct file of the ptmx device node
 *	@tty: the master of the pty being opened
 *	@flags: the flags for open
 *
 *	Provide a race free way for userspace to open the slave end of a pty
 *	(where they have the master fd and cannot access or trust the mount
 *	namespace /dev/pts was mounted inside).
 */
int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
{
	int fd = -1;
	struct file *filp;
	int retval = -EINVAL;
	struct path path;

	if (tty->driver != ptm_driver)
		return -EIO;

	fd = get_unused_fd_flags(flags);
	if (fd < 0) {
		retval = fd;
		goto err;
	}

	/* Compute the slave's path */
	path.mnt = devpts_mntget(master, tty->driver_data);
	if (IS_ERR(path.mnt)) {
		retval = PTR_ERR(path.mnt);
		goto err_put;
	}
	path.dentry = tty->link->driver_data;

	filp = dentry_open(&path, flags, current_cred());
	mntput(path.mnt);
	if (IS_ERR(filp)) {
		retval = PTR_ERR(filp);
		goto err_put;
	}

	fd_install(fd, filp);
	return fd;

err_put:
	put_unused_fd(fd);
err:
	return retval;
}
コード例 #5
0
ファイル: ctrl_gk20a.c プロジェクト: JamesLinus/nvgpu
static int gk20a_ctrl_open_tsg(struct gk20a *g,
			       struct nvgpu_gpu_open_tsg_args *args)
{
	struct platform_device *dev = g->dev;
	int err;
	int fd;
	struct file *file;
	char *name;

	err = get_unused_fd_flags(O_RDWR);
	if (err < 0)
		return err;
	fd = err;

	name = kasprintf(GFP_KERNEL, "nvgpu-%s-tsg%d",
			 dev_name(&dev->dev), fd);

	file = anon_inode_getfile(name, g->tsg.cdev.ops, NULL, O_RDWR);
	kfree(name);
	if (IS_ERR(file)) {
		err = PTR_ERR(file);
		goto clean_up;
	}
	fd_install(fd, file);

	err = gk20a_tsg_open(g, file);
	if (err)
		goto clean_up_file;

	args->tsg_fd = fd;
	return 0;

clean_up_file:
	fput(file);
clean_up:
	put_unused_fd(fd);
	return err;
}
コード例 #6
0
ファイル: open.c プロジェクト: dkati/Hulk-Kernel-V2
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
	struct open_flags op;
	int lookup = build_open_flags(flags, mode, &op);
	struct filename *tmp = getname(filename);
	int fd = PTR_ERR(tmp);

	if (!IS_ERR(tmp)) {
		fd = get_unused_fd_flags(flags);
		if (fd >= 0) {
			struct file *f = do_filp_open(dfd, tmp, &op, lookup);
			if (IS_ERR(f)) {
				put_unused_fd(fd);
				fd = PTR_ERR(f);
			} else {
				fsnotify_open(f);
				fd_install(fd, f);
			}
		}
		putname(tmp);
	}
	return fd;
}
コード例 #7
0
ファイル: open.c プロジェクト: GREYFOXRGR/BPI-M3-bsp
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
	struct open_flags op;
	int lookup = build_open_flags(flags, mode, &op);
	char *tmp = getname(filename);
	int fd = PTR_ERR(tmp);

	if (!IS_ERR(tmp)) {
#if (IO_TEST_DEBUG)
		if(!(flags & O_DIRECTORY)&& strstr(filename, "_quadrant_.tmp"))
		{
			if (flags&0x00000001)
			{
				io_w_test_count = (io_w_test_count + 1)%10;
				flags = 0x00000042;
			}
			else
			{
				flags = 0x00000002;
			}
		}
#endif
		fd = get_unused_fd_flags(flags);
		if (fd >= 0) {
			struct file *f = do_filp_open(dfd, tmp, &op, lookup);
			if (IS_ERR(f)) {
				put_unused_fd(fd);
				fd = PTR_ERR(f);
			} else {
				fsnotify_open(f);
				fd_install(fd, f);
			}
		}
		putname(tmp);
	}
	return fd;
}
コード例 #8
0
ファイル: lttng-abi.c プロジェクト: cbiancheri/lttng-modules
static
int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
		const struct file_operations *fops)
{
	int stream_fd, ret;
	struct file *stream_file;

	stream_fd = lttng_get_unused_fd();
	if (stream_fd < 0) {
		ret = stream_fd;
		goto fd_error;
	}
	stream_file = anon_inode_getfile("[lttng_stream]", fops,
			stream_priv, O_RDWR);
	if (IS_ERR(stream_file)) {
		ret = PTR_ERR(stream_file);
		goto file_error;
	}
	/*
	 * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
	 * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
	 * file descriptor, so we set FMODE_PREAD here.
	 */
	stream_file->f_mode |= FMODE_PREAD;
	fd_install(stream_fd, stream_file);
	/*
	 * The stream holds a reference to the channel within the generic ring
	 * buffer library, so no need to hold a refcount on the channel and
	 * session files here.
	 */
	return stream_fd;

file_error:
	put_unused_fd(stream_fd);
fd_error:
	return ret;
}
コード例 #9
0
/**
 * anon_inode_getfd - creates a new file instance by hooking it up to an
 *                    anonymous inode, and a dentry that describe the "class"
 *                    of the file
 *
 * @name:    [in]    name of the "class" of the new file
 * @fops:    [in]    file operations for the new file
 * @priv:    [in]    private data for the new file (will be file's private_data)
 * @flags:   [in]    flags
 *
 * Creates a new file by hooking it on a single inode. This is useful for files
 * that do not need to have a full-fledged inode in order to operate correctly.
 * All the files created with anon_inode_getfd() will share a single inode,
 * hence saving memory and avoiding code duplication for the file/inode/dentry
 * setup.  Returns new descriptor or an error code.
 */
int anon_inode_getfd(const char *name, const struct file_operations *fops,
		     void *priv, int flags)
{
	int error, fd;
	struct file *file;

	error = get_unused_fd_flags(flags);
	if (error < 0)
		return error;
	fd = error;

	file = anon_inode_getfile(name, fops, priv, flags);
	if (IS_ERR(file)) {
		error = PTR_ERR(file);
		goto err_put_unused_fd;
	}
	fd_install(fd, file);

	return fd;

err_put_unused_fd:
	put_unused_fd(fd);
	return error;
}
コード例 #10
0
ファイル: eventfd.c プロジェクト: kenkit/AndromadusMod-New
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
	int fd, error;
	struct file *file;

	error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS);
	if (error < 0)
		return error;
	fd = error;

	file = eventfd_file_create(count, flags);
	if (IS_ERR(file)) {
		error = PTR_ERR(file);
		goto err_put_unused_fd;
	}
	fd_install(fd, file);

	return fd;

err_put_unused_fd:
	put_unused_fd(fd);

	return error;
}
コード例 #11
0
PVRSRV_ERROR OSSecureExport(CONNECTION_DATA *psConnection,
							IMG_PVOID pvData,
							IMG_SECURE_TYPE *phSecure,
							CONNECTION_DATA **ppsSecureConnection)
{
	ENV_CONNECTION_DATA *psEnvConnection;
	CONNECTION_DATA *psSecureConnection;
	struct file *connection_file;
	struct file *secure_file;
	struct dentry *secure_dentry;
	struct vfsmount *secure_mnt;
	int secure_fd;
	IMG_BOOL bPmrUnlocked = IMG_FALSE;
	PVRSRV_ERROR eError;

	/* Obtain the current connections struct file */
	psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
	connection_file = LinuxFileFromEnvConnection(psEnvConnection);

	/* Allocate a fd number */
	secure_fd = get_unused_fd();
	if (secure_fd < 0)
	{
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto e0;
	}

	/*
		Get a reference to the dentry so when close is called we don't
		drop the last reference too early and delete the file
	*/
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
	secure_dentry = dget(connection_file->f_path.dentry);
	secure_mnt = mntget(connection_file->f_path.mnt);
#else
	secure_dentry = dget(connection_file->f_dentry);
	secure_mnt = mntget(connection_file->f_vfsmnt);
#endif

	/* PMR lock needs to be released before bridge lock to keep lock hierarchy
	* and avoid deadlock situation.
	* OSSecureExport() can be called from functions that are not acquiring
	* PMR lock (e.g. by PVRSRVSyncPrimServerSecureExportKM()) so we have to
	* check if PMR lock is locked. */
	if (PMRIsLockedByMe())
	{
		PMRUnlock();
		bPmrUnlocked = IMG_TRUE;
	}
	OSReleaseBridgeLock();

	/* Open our device (using the file information from our current connection) */
	secure_file = dentry_open(
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
					  &connection_file->f_path,
#else
					  connection_file->f_dentry,
					  connection_file->f_vfsmnt,
#endif
					  connection_file->f_flags,
					  current_cred());

	OSAcquireBridgeLock();
	if (bPmrUnlocked)
		PMRLock();

	/* Bail if the open failed */
	if (IS_ERR(secure_file))
	{
		put_unused_fd(secure_fd);
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto e0;
	}

	/* Bind our struct file with it's fd number */
	fd_install(secure_fd, secure_file);

	/* Return the new services connection our secure data created */
#if defined(SUPPORT_DRM)
	psSecureConnection = LinuxConnectionFromFile(PVR_DRM_FILE_FROM_FILE(secure_file));
#else
	psSecureConnection = LinuxConnectionFromFile(secure_file);
#endif

	/* Save the private data */
	PVR_ASSERT(psSecureConnection->hSecureData == IMG_NULL);
	psSecureConnection->hSecureData = pvData;

	*phSecure = secure_fd;
	*ppsSecureConnection = psSecureConnection;
	return PVRSRV_OK;

e0:
	PVR_ASSERT(eError != PVRSRV_OK);
	return eError;
}
コード例 #12
0
ファイル: kgsl_sync.c プロジェクト: Arunvasu/taoshan
int kgsl_add_fence_event(struct kgsl_device *device,
	u32 context_id, u32 timestamp, void __user *data, int len,
	struct kgsl_device_private *owner)
{
	struct kgsl_fence_event_priv *event;
	struct kgsl_timestamp_event_fence priv;
	struct kgsl_context *context;
	struct sync_pt *pt;
	struct sync_fence *fence = NULL;
	int ret = -EINVAL;

	if (len != sizeof(priv))
		return -EINVAL;

	context = kgsl_find_context(owner, context_id);
	if (context == NULL)
		return -EINVAL;

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (event == NULL)
		return -ENOMEM;
	event->context = context;
	event->timestamp = timestamp;
	kgsl_context_get(context);

	pt = kgsl_sync_pt_create(context->timeline, timestamp);
	if (pt == NULL) {
		KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n");
		ret = -ENOMEM;
		goto fail_pt;
	}

	fence = sync_fence_create("kgsl-fence", pt);
	if (fence == NULL) {
		/* only destroy pt when not added to fence */
		kgsl_sync_pt_destroy(pt);
		KGSL_DRV_ERR(device, "sync_fence_create failed\n");
		ret = -ENOMEM;
		goto fail_fence;
	}

	priv.fence_fd = get_unused_fd_flags(0);
	if (priv.fence_fd < 0) {
		KGSL_DRV_ERR(device, "invalid fence fd\n");
		ret = -EINVAL;
		goto fail_fd;
	}
	sync_fence_install(fence, priv.fence_fd);

	if (copy_to_user(data, &priv, sizeof(priv))) {
		ret = -EFAULT;
		goto fail_copy_fd;
	}

	ret = kgsl_add_event(device, context_id, timestamp,
			kgsl_fence_event_cb, event, owner);
	if (ret)
		goto fail_event;

	return 0;

fail_event:
fail_copy_fd:
	/* clean up sync_fence_install */
	put_unused_fd(priv.fence_fd);
fail_fd:
	/* clean up sync_fence_create */
	sync_fence_put(fence);
fail_fence:
fail_pt:
	kgsl_context_put(context);
	kfree(event);
	return ret;
}
コード例 #13
0
ファイル: xfs_ioctl.c プロジェクト: Mr-Aloof/wl500g
STATIC int
xfs_open_by_handle(
	xfs_mount_t		*mp,
	void			__user *arg,
	struct file		*parfilp,
	struct inode		*parinode)
{
	int			error;
	int			new_fd;
	int			permflag;
	struct file		*filp;
	struct inode		*inode;
	struct dentry		*dentry;
	xfs_fsop_handlereq_t	hreq;

	if (!capable(CAP_SYS_ADMIN))
		return -XFS_ERROR(EPERM);
	if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
		return -XFS_ERROR(EFAULT);

	error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
	if (error)
		return -error;

	/* Restrict xfs_open_by_handle to directories & regular files. */
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
		iput(inode);
		return -XFS_ERROR(EINVAL);
	}

#if BITS_PER_LONG != 32
	hreq.oflags |= O_LARGEFILE;
#endif
	/* Put open permission in namei format. */
	permflag = hreq.oflags;
	if ((permflag+1) & O_ACCMODE)
		permflag++;
	if (permflag & O_TRUNC)
		permflag |= 2;

	if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
	    (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
		iput(inode);
		return -XFS_ERROR(EPERM);
	}

	if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
		iput(inode);
		return -XFS_ERROR(EACCES);
	}

	/* Can't write directories. */
	if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
		iput(inode);
		return -XFS_ERROR(EISDIR);
	}

	if ((new_fd = get_unused_fd()) < 0) {
		iput(inode);
		return new_fd;
	}

	dentry = d_obtain_alias(inode);
	if (IS_ERR(dentry)) {
		put_unused_fd(new_fd);
		return PTR_ERR(dentry);
	}

	/* Ensure umount returns EBUSY on umounts while this file is open. */
	mntget(parfilp->f_path.mnt);

	/* Create file pointer. */
	filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags);
	if (IS_ERR(filp)) {
		put_unused_fd(new_fd);
		return -XFS_ERROR(-PTR_ERR(filp));
	}
	if (inode->i_mode & S_IFREG) {
		/* invisible operation should not change atime */
		filp->f_flags |= O_NOATIME;
		filp->f_op = &xfs_invis_file_operations;
	}

	fd_install(new_fd, filp);
	return new_fd;
}
ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
			      const char __user *buf,
			      int in_len, int out_len)
{
	struct ib_uverbs_get_context      cmd;
	struct ib_uverbs_get_context_resp resp;
	struct ib_udata                   udata;
	struct ib_device                 *ibdev = file->device->ib_dev;
	struct ib_ucontext		 *ucontext;
	struct file			 *filp;
	int ret;

	if (out_len < sizeof resp)
		return -ENOSPC;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	mutex_lock(&file->mutex);

	if (file->ucontext) {
		ret = -EINVAL;
		goto err;
	}

	INIT_UDATA(&udata, buf + sizeof cmd,
		   (unsigned long) cmd.response + sizeof resp,
		   in_len - sizeof cmd, out_len - sizeof resp);

	ucontext = ibdev->alloc_ucontext(ibdev, &udata);
	if (IS_ERR(ucontext))
		return PTR_ERR(file->ucontext);

	ucontext->device = ibdev;
	INIT_LIST_HEAD(&ucontext->pd_list);
	INIT_LIST_HEAD(&ucontext->mr_list);
	INIT_LIST_HEAD(&ucontext->mw_list);
	INIT_LIST_HEAD(&ucontext->cq_list);
	INIT_LIST_HEAD(&ucontext->qp_list);
	INIT_LIST_HEAD(&ucontext->srq_list);
	INIT_LIST_HEAD(&ucontext->ah_list);

	resp.num_comp_vectors = file->device->num_comp_vectors;

	filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
	if (IS_ERR(filp)) {
		ret = PTR_ERR(filp);
		goto err_free;
	}

	if (copy_to_user((void __user *) (unsigned long) cmd.response,
			 &resp, sizeof resp)) {
		ret = -EFAULT;
		goto err_file;
	}

	file->async_file = filp->private_data;

	INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
			      ib_uverbs_event_handler);
	ret = ib_register_event_handler(&file->event_handler);
	if (ret)
		goto err_file;

	kref_get(&file->async_file->ref);
	kref_get(&file->ref);
	file->ucontext = ucontext;

	fd_install(resp.async_fd, filp);

	mutex_unlock(&file->mutex);

	return in_len;

err_file:
	put_unused_fd(resp.async_fd);
	fput(filp);

err_free:
	ibdev->dealloc_ucontext(ucontext);

err:
	mutex_unlock(&file->mutex);
	return ret;
}
コード例 #15
0
/* tid is the actual task/thread id (née pid, stored as ->pid),
   pid/tgid is that 2.6 thread group id crap (stored as ->tgid) */
asmlinkage long sys_vperfctr_open(int tid, int creat)
{
	struct file *filp;
	struct task_struct *tsk;
	struct vperfctr *perfctr;
	int err;
	int fd;

	if (!vperfctr_fs_init_done())
		return -ENODEV;
	filp = vperfctr_get_filp();
	if (!filp)
		return -ENOMEM;
	err = fd = get_unused_fd();
	if (err < 0)
		goto err_filp;
	perfctr = NULL;
	if (creat) {
		perfctr = get_empty_vperfctr(); /* may sleep */
		if (IS_ERR(perfctr)) {
			err = PTR_ERR(perfctr);
			goto err_fd;
		}
	}
	tsk = current;

	if (tid != 0 && tid != tsk->pid) { /* remote? */
		// tasklist_lock is to access the linked list of task_struct structures exclusively
		read_lock(&tasklist_lock);
		//tsk = find_task_by_pid(tid);
		tsk = find_task_by_pid_ns(tid, current->nsproxy->pid_ns);
		if (tsk)
			get_task_struct(tsk);
		read_unlock(&tasklist_lock);
		err = -ESRCH;
		if (!tsk)
			goto err_perfctr;
		err = ptrace_check_attach(tsk, 0);
		if (err < 0)
			goto err_tsk;
	}
	if (creat) {
		/* check+install must be atomic to prevent remote-control races */
		task_lock(tsk);
		if (!tsk->thread.perfctr) {
			perfctr->owner = tsk;
			tsk->thread.perfctr = perfctr;
			err = 0;
		} else
			err = -EEXIST;
		task_unlock(tsk);
		if (err)
			goto err_tsk;
	} else {
		perfctr = tsk->thread.perfctr;
		/* XXX: Old API needed to allow NULL perfctr here.
		   Do we want to keep or change that rule? */
	}
	filp->private_data = perfctr;
	if (perfctr)
		atomic_inc(&perfctr->count);
	if (tsk != current)
		put_task_struct(tsk);
	#if 0
	if (perfctr) {
    	printk ("sys_vperfctr_open(): fd = %d, perfctr is NOT null\n", fd);
	}
	else {
    	printk ("sys_vperfctr_open(): fd = %d, perfctr is null\n", fd);
	}
	#endif
	fd_install(fd, filp);
	return fd;
 err_tsk:
	if (tsk != current)
		put_task_struct(tsk);
 err_perfctr:
	if (perfctr)	/* can only occur if creat != 0 */
		put_vperfctr(perfctr);
 err_fd:
	put_unused_fd(fd);
 err_filp:
	fput(filp);
	return err;
}
コード例 #16
0
ファイル: xfs_ioctl.c プロジェクト: 19Dan01/linux
int
xfs_open_by_handle(
	struct file		*parfilp,
	xfs_fsop_handlereq_t	*hreq)
{
	const struct cred	*cred = current_cred();
	int			error;
	int			fd;
	int			permflag;
	struct file		*filp;
	struct inode		*inode;
	struct dentry		*dentry;
	fmode_t			fmode;
	struct path		path;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	dentry = xfs_handlereq_to_dentry(parfilp, hreq);
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);
	inode = d_inode(dentry);

	/* Restrict xfs_open_by_handle to directories & regular files. */
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
		error = -EPERM;
		goto out_dput;
	}

#if BITS_PER_LONG != 32
	hreq->oflags |= O_LARGEFILE;
#endif

	permflag = hreq->oflags;
	fmode = OPEN_FMODE(permflag);
	if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
	    (fmode & FMODE_WRITE) && IS_APPEND(inode)) {
		error = -EPERM;
		goto out_dput;
	}

	if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
		error = -EACCES;
		goto out_dput;
	}

	/* Can't write directories. */
	if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) {
		error = -EISDIR;
		goto out_dput;
	}

	fd = get_unused_fd_flags(0);
	if (fd < 0) {
		error = fd;
		goto out_dput;
	}

	path.mnt = parfilp->f_path.mnt;
	path.dentry = dentry;
	filp = dentry_open(&path, hreq->oflags, cred);
	dput(dentry);
	if (IS_ERR(filp)) {
		put_unused_fd(fd);
		return PTR_ERR(filp);
	}

	if (S_ISREG(inode->i_mode)) {
		filp->f_flags |= O_NOATIME;
		filp->f_mode |= FMODE_NOCMTIME;
	}

	fd_install(fd, filp);
	return fd;

 out_dput:
	dput(dentry);
	return error;
}
コード例 #17
0
int
xfs_open_by_handle(
	struct file		*parfilp,
	xfs_fsop_handlereq_t	*hreq)
{
	const struct cred	*cred = current_cred();
	int			error;
	int			fd;
	int			permflag;
	struct file		*filp;
	struct inode		*inode;
	struct dentry		*dentry;

	if (!capable(CAP_SYS_ADMIN))
		return -XFS_ERROR(EPERM);

	dentry = xfs_handlereq_to_dentry(parfilp, hreq);
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);
	inode = dentry->d_inode;

	/* Restrict xfs_open_by_handle to directories & regular files. */
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
		error = -XFS_ERROR(EPERM);
		goto out_dput;
	}

#if BITS_PER_LONG != 32
	hreq->oflags |= O_LARGEFILE;
#endif

	/* Put open permission in namei format. */
	permflag = hreq->oflags;
	if ((permflag+1) & O_ACCMODE)
		permflag++;
	if (permflag & O_TRUNC)
		permflag |= 2;

	if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
	    (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
		error = -XFS_ERROR(EPERM);
		goto out_dput;
	}

	if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
		error = -XFS_ERROR(EACCES);
		goto out_dput;
	}

	/* Can't write directories. */
	if (S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
		error = -XFS_ERROR(EISDIR);
		goto out_dput;
	}

	fd = get_unused_fd();
	if (fd < 0) {
		error = fd;
		goto out_dput;
	}

	filp = dentry_open(dentry, mntget(parfilp->f_path.mnt),
			   hreq->oflags, cred);
	if (IS_ERR(filp)) {
		put_unused_fd(fd);
		return PTR_ERR(filp);
	}

	if (inode->i_mode & S_IFREG) {
		filp->f_flags |= O_NOATIME;
		filp->f_mode |= FMODE_NOCMTIME;
	}

	fd_install(fd, filp);
	return fd;

 out_dput:
	dput(dentry);
	return error;
}
コード例 #18
0
ファイル: mars_aio.c プロジェクト: ZyanKLee/mars
static int aio_event_thread(void *data)
{
	struct aio_threadinfo *tinfo = data;
	struct aio_output *output = tinfo->output;
	struct aio_threadinfo *other = &output->tinfo[2];
	int err = -ENOMEM;
	
	MARS_DBG("event thread has started.\n");
	//set_user_nice(current, -20);

	use_fake_mm();
	if (!current->mm)
		goto err;

	err = aio_start_thread(output, &output->tinfo[2], aio_sync_thread, 'y');
	if (unlikely(err < 0))
		goto err;

	while (!brick_thread_should_stop() || atomic_read(&tinfo->queued_sum) > 0) {
		mm_segment_t oldfs;
		int count;
		int i;
		struct timespec timeout = {
			.tv_sec = 1,
		};
		struct io_event events[MARS_MAX_AIO_READ];

		oldfs = get_fs();
		set_fs(get_ds());
		/* TODO: don't timeout upon termination.
		 * Probably we should submit a dummy request.
		 */
		count = sys_io_getevents(output->ctxp, 1, MARS_MAX_AIO_READ, events, &timeout);
		set_fs(oldfs);

		if (likely(count > 0)) {
			atomic_sub(count, &output->submit_count);
		}

		for (i = 0; i < count; i++) {
			struct aio_mref_aspect *mref_a = (void*)events[i].data;
			struct mref_object *mref;
			int err = events[i].res;

			if (!mref_a) {
				continue; // this was a dummy request
			}
			mref = mref_a->object;

			MARS_IO("AIO done %p pos = %lld len = %d rw = %d\n", mref, mref->ref_pos, mref->ref_len, mref->ref_rw);

			mapfree_set(output->mf, mref->ref_pos, mref->ref_pos + mref->ref_len);

			if (output->brick->o_fdsync
			   && err >= 0 
			   && mref->ref_rw != READ
			   && !mref->ref_skip_sync
			   && !mref_a->resubmit++) {
				// workaround for non-implemented AIO FSYNC operation
				if (output->mf &&
				    output->mf->mf_filp &&
				    output->mf->mf_filp->f_op &&
				    !output->mf->mf_filp->f_op->aio_fsync) {
					mars_trace(mref, "aio_fsync");
					_enqueue(other, mref_a, mref->ref_prio, true);
					continue;
				}
				err = aio_submit(output, mref_a, true);
				if (likely(err >= 0))
					continue;
			}

			_complete(output, mref_a, err);

		}
	}
	err = 0;

 err:
	MARS_DBG("event thread has stopped, err = %d\n", err);

	aio_stop_thread(output, 2, false);

	unuse_fake_mm();

	tinfo->terminated = true;
	wake_up_interruptible_all(&tinfo->terminate_event);
	return err;
}

#if 1
/* This should go to fs/open.c (as long as vfs_submit() is not implemented)
 */
#include <linux/fdtable.h>
void fd_uninstall(unsigned int fd)
{
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	MARS_DBG("fd = %d\n", fd);
	if (unlikely(fd < 0)) {
		MARS_ERR("bad fd = %d\n", fd);
		return;
	}
	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	rcu_assign_pointer(fdt->fd[fd], NULL);
	spin_unlock(&files->file_lock);
}
EXPORT_SYMBOL(fd_uninstall);
#endif

static
atomic_t ioctx_count = ATOMIC_INIT(0);

static
void _destroy_ioctx(struct aio_output *output)
{
	if (unlikely(!output))
		goto done;

	aio_stop_thread(output, 1, true);

	use_fake_mm();

	if (likely(output->ctxp)) {
		mm_segment_t oldfs;
		int err;

		MARS_DBG("ioctx count = %d destroying %p\n", atomic_read(&ioctx_count), (void*)output->ctxp);
		oldfs = get_fs();
		set_fs(get_ds());
		err = sys_io_destroy(output->ctxp);
		set_fs(oldfs);
		atomic_dec(&ioctx_count);
		MARS_DBG("ioctx count = %d status = %d\n", atomic_read(&ioctx_count), err);
		output->ctxp = 0;
	}

	if (likely(output->fd >= 0)) {
		MARS_DBG("destroying fd %d\n", output->fd);
		fd_uninstall(output->fd);
		put_unused_fd(output->fd);
		output->fd = -1;
	}

 done:
	if (likely(current->mm)) {
		unuse_fake_mm();
	}
}

static
int _create_ioctx(struct aio_output *output)
{
	struct file *file;
	mm_segment_t oldfs;
	int err = -EINVAL;

	CHECK_PTR_NULL(output, done);
	CHECK_PTR_NULL(output->mf, done);
	file = output->mf->mf_filp;
	CHECK_PTR_NULL(file, done);

	/* TODO: this is provisionary. We only need it for sys_io_submit()
	 * which uses userspace concepts like file handles.
	 * This should be accompanied by a future kernelsapce vfs_submit() or
	 * do_submit() which currently does not exist :(
	 */
	err = get_unused_fd();
	MARS_DBG("file %p '%s' new fd = %d\n", file, output->mf->mf_name, err);
	if (unlikely(err < 0)) {
		MARS_ERR("cannot get fd, err=%d\n", err);
		goto done;
	}
	output->fd = err;
	fd_install(err, file);

	use_fake_mm();

	err = -ENOMEM;
	if (unlikely(!current->mm)) {
		MARS_ERR("cannot fake mm\n");
		goto done;
	}

	MARS_DBG("ioctx count = %d old = %p\n", atomic_read(&ioctx_count), (void*)output->ctxp);
	output->ctxp = 0;

	oldfs = get_fs();
	set_fs(get_ds());
	err = sys_io_setup(MARS_MAX_AIO, &output->ctxp);
	set_fs(oldfs);
	if (likely(output->ctxp))
		atomic_inc(&ioctx_count);
	MARS_DBG("ioctx count = %d new = %p status = %d\n", atomic_read(&ioctx_count), (void*)output->ctxp, err);
	if (unlikely(err < 0)) {
		MARS_ERR("io_setup failed, err=%d\n", err);
		goto done;
	}
	
	err = aio_start_thread(output, &output->tinfo[1], aio_event_thread, 'e');
	if (unlikely(err < 0)) {
		MARS_ERR("could not start event thread\n");
		goto done;
	}

 done:
	if (likely(current->mm)) {
		unuse_fake_mm();
	}
	return err;
}

static int aio_submit_thread(void *data)
{
	struct aio_threadinfo *tinfo = data;
	struct aio_output *output = tinfo->output;
	struct file *file;
	int err = -EINVAL;

	MARS_DBG("submit thread has started.\n");

	file = output->mf->mf_filp;

	use_fake_mm();

	while (!brick_thread_should_stop() || atomic_read(&output->read_count) + atomic_read(&output->write_count) + atomic_read(&tinfo->queued_sum) > 0) {
		struct aio_mref_aspect *mref_a;
		struct mref_object *mref;
		int sleeptime;
		int status;

		wait_event_interruptible_timeout(
			tinfo->event,
			atomic_read(&tinfo->queued_sum) > 0,
			HZ / 4);

		mref_a = _dequeue(tinfo);
		if (!mref_a) {
			continue;
		}

		mref = mref_a->object;
		status = -EINVAL;
		CHECK_PTR(mref, error);

		mapfree_set(output->mf, mref->ref_pos, -1);

		if (mref->ref_rw) {
			insert_dirty(output, mref_a);
		}

		// check for reads exactly at EOF (special case)
		if (mref->ref_pos == mref->ref_total_size &&
		   !mref->ref_rw &&
		   mref->ref_timeout > 0) {
			loff_t total_size = i_size_read(file->f_mapping->host);
			loff_t len = total_size - mref->ref_pos;
			if (len > 0) {
				mref->ref_total_size = total_size;
				mref->ref_len = len;
			} else {
				if (!mref_a->start_jiffies) {
					mref_a->start_jiffies = jiffies;
				}
				if ((long long)jiffies - mref_a->start_jiffies <= mref->ref_timeout) {
					if (atomic_read(&tinfo->queued_sum) <= 0) {
						atomic_inc(&output->total_msleep_count);
						brick_msleep(1000 * 4 / HZ);
					}
					_enqueue(tinfo, mref_a, MARS_PRIO_LOW, true);
					continue;
				}
				MARS_DBG("ENODATA %lld\n", len);
				_complete(output, mref_a, -ENODATA);
				continue;
			}
		}

		sleeptime = 1;
		for (;;) {
			status = aio_submit(output, mref_a, false);

			if (likely(status != -EAGAIN)) {
				break;
			}
			atomic_inc(&output->total_delay_count);
			brick_msleep(sleeptime);
			if (sleeptime < 100) {
				sleeptime++;
			}
		}
	error:
		if (unlikely(status < 0)) {
			MARS_IO("submit_count = %d status = %d\n", atomic_read(&output->submit_count), status);
			_complete_mref(output, mref, status);
		}
	}

	MARS_DBG("submit thread has stopped, status = %d.\n", err);

	if (likely(current->mm)) {
		unuse_fake_mm();
	}

	tinfo->terminated = true;
	wake_up_interruptible_all(&tinfo->terminate_event);
	return err;
}

static int aio_get_info(struct aio_output *output, struct mars_info *info)
{
	struct file *file;
	loff_t min;
	loff_t max;

	if (unlikely(!output ||
		     !output->mf ||
		     !(file = output->mf->mf_filp) ||
		     !file->f_mapping ||
		     !file->f_mapping->host))
		return -EINVAL;

	info->tf_align = 1;
	info->tf_min_size = 1;

	/* Workaround for races in the page cache.
	 *
	 * It appears that concurrent reads and writes seem to
	 * result in inconsistent reads in some very rare cases, due to
	 * races. Sometimes, the inode claims that the file has been already
	 * appended by a write operation, but the data has not actually hit
	 * the page cache, such that a concurrent read gets NULL blocks.
	 */
	min = i_size_read(file->f_mapping->host);
	max = 0;

	if (!output->brick->is_static_device) {
		get_dirty(output, &min, &max);
	}

	info->current_size = min;
	MARS_DBG("determined file size = %lld\n", info->current_size);

	return 0;
}

//////////////// informational / statistics ///////////////

static noinline
char *aio_statistics(struct aio_brick *brick, int verbose)
{
	struct aio_output *output = brick->outputs[0];
	char *res = brick_string_alloc(4096);
	char *sync = NULL;
	int pos = 0;
	if (!res)
		return NULL;

	pos += report_timing(&timings[0], res + pos, 4096 - pos);
	pos += report_timing(&timings[1], res + pos, 4096 - pos);
	pos += report_timing(&timings[2], res + pos, 4096 - pos);

	snprintf(res + pos, 4096 - pos,
		 "total "
		 "reads = %d "
		 "writes = %d "
		 "allocs = %d "
		 "submits = %d "
		 "again = %d "
		 "delays = %d "
		 "msleeps = %d "
		 "fdsyncs = %d "
		 "fdsync_waits = %d "
		 "map_free = %d | "
		 "flying reads = %d "
		 "writes = %d "
		 "allocs = %d "
		 "submits = %d "
		 "q0 = %d "
		 "q1 = %d "
		 "q2 = %d "
		 "| total "
		 "q0 = %d "
		 "q1 = %d "
		 "q2 = %d "
		 "%s\n",
		 atomic_read(&output->total_read_count),
		 atomic_read(&output->total_write_count),
		 atomic_read(&output->total_alloc_count),
		 atomic_read(&output->total_submit_count),
		 atomic_read(&output->total_again_count),
		 atomic_read(&output->total_delay_count),
		 atomic_read(&output->total_msleep_count),
		 atomic_read(&output->total_fdsync_count),
		 atomic_read(&output->total_fdsync_wait_count),
		 atomic_read(&output->total_mapfree_count),
		 atomic_read(&output->read_count),
		 atomic_read(&output->write_count),
		 atomic_read(&output->alloc_count),
		 atomic_read(&output->submit_count),
		 atomic_read(&output->tinfo[0].queued_sum),
		 atomic_read(&output->tinfo[1].queued_sum),
		 atomic_read(&output->tinfo[2].queued_sum),
		 atomic_read(&output->tinfo[0].total_enqueue_count),
		 atomic_read(&output->tinfo[1].total_enqueue_count),
		 atomic_read(&output->tinfo[2].total_enqueue_count),
		 sync ? sync : "");
	
	if (sync)
		brick_string_free(sync);

	return res;
}

static noinline
void aio_reset_statistics(struct aio_brick *brick)
{
	struct aio_output *output = brick->outputs[0];
	int i;
	atomic_set(&output->total_read_count, 0);
	atomic_set(&output->total_write_count, 0);
	atomic_set(&output->total_alloc_count, 0);
	atomic_set(&output->total_submit_count, 0);
	atomic_set(&output->total_again_count, 0);
	atomic_set(&output->total_delay_count, 0);
	atomic_set(&output->total_msleep_count, 0);
	atomic_set(&output->total_fdsync_count, 0);
	atomic_set(&output->total_fdsync_wait_count, 0);
	atomic_set(&output->total_mapfree_count, 0);
	for (i = 0; i < 3; i++) {
		struct aio_threadinfo *tinfo = &output->tinfo[i];
		atomic_set(&tinfo->total_enqueue_count, 0);
	}
}


//////////////// object / aspect constructors / destructors ///////////////

static int aio_mref_aspect_init_fn(struct generic_aspect *_ini)
{
	struct aio_mref_aspect *ini = (void*)_ini;
	INIT_LIST_HEAD(&ini->io_head);
	INIT_LIST_HEAD(&ini->dirty_head);
	return 0;
}

static void aio_mref_aspect_exit_fn(struct generic_aspect *_ini)
{
	struct aio_mref_aspect *ini = (void*)_ini;
	CHECK_HEAD_EMPTY(&ini->dirty_head);
	CHECK_HEAD_EMPTY(&ini->io_head);
}

MARS_MAKE_STATICS(aio);

////////////////////// brick constructors / destructors ////////////////////

static int aio_brick_construct(struct aio_brick *brick)
{
	return 0;
}

static int aio_switch(struct aio_brick *brick)
{
	static int index;
	struct aio_output *output = brick->outputs[0];
	const char *path = output->brick->brick_path;
	int flags = O_RDWR | O_LARGEFILE;
	int status = 0;

	MARS_DBG("power.button = %d\n", brick->power.button);
	if (!brick->power.button)
		goto cleanup;

	if (brick->power.led_on || output->mf)
		goto done;

	mars_power_led_off((void*)brick, false);

	if (brick->o_creat) {
		flags |= O_CREAT;
		MARS_DBG("using O_CREAT on %s\n", path);
	}
	if (brick->o_direct) {
		flags |= O_DIRECT;
		MARS_DBG("using O_DIRECT on %s\n", path);
	}

	output->mf = mapfree_get(path, flags);
	if (unlikely(!output->mf)) {
		MARS_ERR("could not open file = '%s' flags = %d\n", path, flags);
		status = -ENOENT;
		goto err;
	} 

	output->index = ++index;

	status = _create_ioctx(output);
	if (unlikely(status < 0)) {
		MARS_ERR("could not create ioctx, status = %d\n", status);
		goto err;
	}

	status = aio_start_thread(output, &output->tinfo[0], aio_submit_thread, 's');
	if (unlikely(status < 0)) {
		MARS_ERR("could not start theads, status = %d\n", status);
		goto err;
	}

	MARS_DBG("opened file '%s'\n", path);
	mars_power_led_on((void*)brick, true);

done:
	return 0;

err:
	MARS_ERR("status = %d\n", status);
cleanup:
	if (brick->power.led_off) {
		goto done;
	}

	mars_power_led_on((void*)brick, false);

	aio_stop_thread(output, 0, false);

	_destroy_ioctx(output);

	mars_power_led_off((void*)brick,
			  (output->tinfo[0].thread == NULL &&
			   output->tinfo[1].thread == NULL &&
			   output->tinfo[2].thread == NULL));

	MARS_DBG("switch off led_off = %d status = %d\n", brick->power.led_off, status);
	if (brick->power.led_off) {
		if (output->mf) {
			MARS_DBG("closing file = '%s'\n", output->mf->mf_name);
			mapfree_put(output->mf);
			output->mf = NULL;
		}
	}
	return status;
}

static int aio_output_construct(struct aio_output *output)
{
	INIT_LIST_HEAD(&output->dirty_anchor);
	spin_lock_init(&output->dirty_lock);
	init_waitqueue_head(&output->fdsync_event);
	output->fd = -1;
	return 0;
}
コード例 #19
0
ファイル: xfs_ioctl.c プロジェクト: JBTech/ralink_rt5350
STATIC int
xfs_open_by_handle(
	xfs_mount_t		*mp,
	unsigned long		arg,
	struct file		*parfilp,
	struct inode		*parinode)
{
	int			error;
	int			new_fd;
	int			permflag;
	struct file		*filp;
	struct inode		*inode;
	struct dentry		*dentry;
	vnode_t			*vp;
	xfs_fsop_handlereq_t	hreq;

	error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
					sizeof(xfs_fsop_handlereq_t),
					&hreq, &vp, &inode);
	if (error)
		return -error;

	/* Restrict xfs_open_by_handle to directories & regular files. */
	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
		iput(inode);
		return -XFS_ERROR(EINVAL);
	}

#if BITS_PER_LONG != 32
	hreq.oflags |= O_LARGEFILE;
#endif
	/* Put open permission in namei format. */
	permflag = hreq.oflags;
	if ((permflag+1) & O_ACCMODE)
		permflag++;
	if (permflag & O_TRUNC)
		permflag |= 2;

	if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
	    (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
		iput(inode);
		return -XFS_ERROR(EPERM);
	}

	if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
		iput(inode);
		return -XFS_ERROR(EACCES);
	}

	/* Can't write directories. */
	if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
		iput(inode);
		return -XFS_ERROR(EISDIR);
	}

	if ((new_fd = get_unused_fd()) < 0) {
		iput(inode);
		return new_fd;
	}

	dentry = d_alloc_anon(inode);
	if (dentry == NULL) {
		iput(inode);
		put_unused_fd(new_fd);
		return -XFS_ERROR(ENOMEM);
	}

	/* Ensure umount returns EBUSY on umounts while this file is open. */
	mntget(parfilp->f_vfsmnt);

	/* Create file pointer. */
	filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags);
	if (IS_ERR(filp)) {
		put_unused_fd(new_fd);
		return -XFS_ERROR(-PTR_ERR(filp));
	}
	if (inode->i_mode & S_IFREG)
		filp->f_op = &linvfs_invis_file_operations;

	fd_install(new_fd, filp);
	return new_fd;
}
コード例 #20
0
ファイル: kni_vhost.c プロジェクト: fleitner/dpdk
static int
kni_vhost_backend_init(struct kni_dev *kni)
{
	struct kni_vhost_queue *q;
	struct net *net = current->nsproxy->net_ns;
	int err, i, sockfd;
	struct rte_kni_fifo *fifo;
	struct sk_buff *elem;

	if (kni->vhost_queue != NULL)
		return -1;

	if (!(q = (struct kni_vhost_queue *)sk_alloc(
		      net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto)))
		return -ENOMEM;

	err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
	if (err)
		goto free_sk;

	sockfd = kni_sock_map_fd(q->sock);
	if (sockfd < 0) {
		err = sockfd;
		goto free_sock;
	}

	/* cache init */
	q->cache = (struct sk_buff*)
		kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
			GFP_KERNEL);
	if (!q->cache)
		goto free_fd;

	fifo = (struct rte_kni_fifo*)
		kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
			+ sizeof(struct rte_kni_fifo), GFP_KERNEL);
	if (!fifo)
		goto free_cache;

	kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);

	for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
		elem = &q->cache[i];
		kni_fifo_put(fifo, (void**)&elem, 1);
	}
	q->fifo = fifo;

	/* store sockfd in vhost_queue */
	q->sockfd = sockfd;

	/* init socket */
	q->sock->type = SOCK_RAW;
	q->sock->state = SS_CONNECTED;
	q->sock->ops = &kni_socket_ops;
	sock_init_data(q->sock, &q->sk);

	/* init sock data */
	q->sk.sk_write_space = kni_sk_write_space;
	q->sk.sk_destruct = kni_sk_destruct;
	q->flags = IFF_NO_PI | IFF_TAP;
	q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
#ifdef RTE_KNI_VHOST_VNET_HDR_EN
	q->flags |= IFF_VNET_HDR;
#endif

	/* bind kni_dev with vhost_queue */
	q->kni = kni;
	kni->vhost_queue = q;

	wmb();

	kni->vq_status = BE_START;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
	KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx,"
		  "sk->sk_wq=0x%16llx",
		  q->sockfd, (uint64_t)q->sock->wq,
		  (uint64_t)q->sk.sk_wq);
#else
	KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx,"
		  "sk->sk_sleep=0x%16llx",
		  q->sockfd, (uint64_t)&q->sock->wait,
		  (uint64_t)q->sk.sk_sleep);
#endif

	return 0;

free_cache:
	kfree(q->cache);
	q->cache = NULL;

free_fd:
	put_unused_fd(sockfd);

free_sock:
	q->kni = NULL;
	kni->vhost_queue = NULL;
	kni->vq_status |= BE_FINISH;
	sock_release(q->sock);
	q->sock->ops = NULL;
	q->sock = NULL;

free_sk:
	sk_free((struct sock*)q);

	return err;
}
コード例 #21
0
ファイル: memfd.c プロジェクト: RPajak/kdbus
/**
 * kdbus_memfd_new() - create and install a memfd and file descriptor
 * @name:		Name of the (deleted) file which shows up in
 *			/proc, used for debugging
 * @size:		Initial size of the file
 * @fd:			Installed file descriptor
 *
 * Return: 0 on success, negative errno on failure.
 */
int kdbus_memfd_new(const char *name, size_t size, int *fd)
{
	const char *shmem_name = NULL;
	const char *anon_name = NULL;
	struct kdbus_memfile *mf;
	struct file *shmemfp;
	struct file *fp;
	int f, ret;

	mf = kzalloc(sizeof(*mf), GFP_KERNEL);
	if (!mf)
		return -ENOMEM;

	mutex_init(&mf->lock);

	if (name) {
		mf->name = kstrdup(name, GFP_KERNEL);
		shmem_name = kasprintf(GFP_KERNEL,
				       KBUILD_MODNAME "-memfd:%s", name);
		anon_name = kasprintf(GFP_KERNEL,
				      "[" KBUILD_MODNAME "-memfd:%s]", name);
		if (!mf->name || !shmem_name || !anon_name) {
			ret = -ENOMEM;
			goto exit;
		}
	}

	/* allocate a new unlinked shmem file */
	shmemfp = shmem_file_setup(name ? shmem_name : KBUILD_MODNAME "-memfd",
				   size, 0);
	if (IS_ERR(shmemfp)) {
		ret = PTR_ERR(shmemfp);
		goto exit;
	}
	mf->fp = shmemfp;

	f = get_unused_fd_flags(O_CLOEXEC);
	if (f < 0) {
		ret = f;
		goto exit_shmem;
	}

	/*
	 * The anonymous exported inode ops cannot reach the otherwise
	 * invisible shmem inode. We rely on the fact that nothing else
	 * can create a new file for the shmem inode, like by opening the
	 * fd in /proc/$PID/fd/
	 */
	fp = anon_inode_getfile(name ? anon_name : "[" KBUILD_MODNAME "-memfd]",
				&kdbus_memfd_fops, mf, O_RDWR);
	if (IS_ERR(fp)) {
		ret = PTR_ERR(fp);
		goto exit_fd;
	}

	fp->f_mode |= FMODE_LSEEK|FMODE_PREAD|FMODE_PWRITE;
	fp->f_mapping = shmemfp->f_mapping;
	fd_install(f, fp);

	kfree(anon_name);
	kfree(shmem_name);
	*fd = f;
	return 0;

exit_fd:
	put_unused_fd(f);
exit_shmem:
	fput(shmemfp);
exit:
	kfree(anon_name);
	kfree(shmem_name);
	kfree(mf->name);
	kfree(mf);
	return ret;
}
コード例 #22
0
/**
 * anon_inode_getfd - creates a new file instance by hooking it up to and
 *                    anonymous inode, and a dentry that describe the "class"
 *                    of the file
 *
 * @pfd:     [out]   pointer to the file descriptor
 * @dpinode: [out]   pointer to the inode
 * @pfile:   [out]   pointer to the file struct
 * @name:    [in]    name of the "class" of the new file
 * @fops     [in]    file operations for the new file
 * @priv     [in]    private data for the new file (will be file's private_data)
 *
 * Creates a new file by hooking it on a single inode. This is useful for files
 * that do not need to have a full-fledged inode in order to operate correctly.
 * All the files created with anon_inode_getfd() will share a single inode, by
 * hence saving memory and avoiding code duplication for the file/inode/dentry
 * setup.
 */
int anon_inode_getfd(int *pfd, struct inode **pinode, struct file **pfile,
		     const char *name, const struct file_operations *fops,
		     void *priv)
{
	struct qstr this;
	struct dentry *dentry;
	struct _dentry *_dentry;
	struct inode *inode;
	struct _inode *_inode;
	struct file *file;
	struct _file *_file;
	int error, fd;

	if (IS_ERR(anon_inode_inode))
		return -ENODEV;
	file = get_empty_filp();
	if (!file)
		return -ENFILE;

	_file = tx_cache_get_file(file);

	inode = igrab(anon_inode_inode);
	if (IS_ERR(inode)) {
		error = PTR_ERR(inode);
		goto err_put_filp;
	}

	_inode = tx_cache_get_inode(inode);

	error = get_unused_fd();
	if (error < 0)
		goto err_iput;
	fd = error;

	/*
	 * Link the inode to a directory entry by creating a unique name
	 * using the inode sequence number.
	 */
	error = -ENOMEM;
	this.name = name;
	this.len = strlen(name);
	this.hash = 0;
	dentry = d_alloc(tx_cache_get_dentry(anon_inode_mnt->mnt_sb->s_root), &this);
	if (!dentry)
		goto err_put_unused_fd;
	_dentry = tx_cache_get_dentry(dentry);
	_dentry->d_op = &anon_inodefs_dentry_operations;
	/* Do not publish this dentry inside the global dentry hash table */
	_dentry->d_flags &= ~DCACHE_UNHASHED;
	d_instantiate(_dentry, _inode);

	_file->f_path.mnt = mntget(anon_inode_mnt);
	_file->f_path.dentry = dentry;
	file->f_mapping = _inode->i_mapping;

	_file->f_pos = 0;
	_file->f_flags = O_RDWR;
	file->f_op = fops;
	_file->f_mode = FMODE_READ | FMODE_WRITE;
	_file->f_version = 0;
	file->private_data = priv;

	fd_install(fd, file);

	*pfd = fd;
	*pinode = inode;
	*pfile = file;
	return 0;

err_put_unused_fd:
	put_unused_fd(fd);
err_iput:
	iput(inode);
err_put_filp:
	put_filp(file);
	return error;
}
コード例 #23
0
ファイル: ioctl.c プロジェクト: shinsec/linux-parrot
static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg)
{
	int err, fd;
	aufs_bindex_t wbi, bindex, bend;
	struct file *h_file;
	struct super_block *sb;
	struct dentry *root;
	struct au_branch *br;
	struct aufs_wbr_fd wbrfd = {
		.oflags	= au_dir_roflags,
		.brid	= -1
	};
	const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY
		| O_NOATIME | O_CLOEXEC;

	AuDebugOn(wbrfd.oflags & ~valid);

	if (arg) {
		err = copy_from_user(&wbrfd, arg, sizeof(wbrfd));
		if (unlikely(err)) {
			err = -EFAULT;
			goto out;
		}

		err = -EINVAL;
		AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid);
		wbrfd.oflags |= au_dir_roflags;
		AuDbg("0%o\n", wbrfd.oflags);
		if (unlikely(wbrfd.oflags & ~valid))
			goto out;
	}

	fd = get_unused_fd_flags(0);
	err = fd;
	if (unlikely(fd < 0))
		goto out;

	h_file = ERR_PTR(-EINVAL);
	wbi = 0;
	br = NULL;
	sb = path->dentry->d_sb;
	root = sb->s_root;
	aufs_read_lock(root, AuLock_IR);
	bend = au_sbend(sb);
	if (wbrfd.brid >= 0) {
		wbi = au_br_index(sb, wbrfd.brid);
		if (unlikely(wbi < 0 || wbi > bend))
			goto out_unlock;
	}

	h_file = ERR_PTR(-ENOENT);
	br = au_sbr(sb, wbi);
	if (!au_br_writable(br->br_perm)) {
		if (arg)
			goto out_unlock;

		bindex = wbi + 1;
		wbi = -1;
		for (; bindex <= bend; bindex++) {
			br = au_sbr(sb, bindex);
			if (au_br_writable(br->br_perm)) {
				wbi = bindex;
				br = au_sbr(sb, wbi);
				break;
			}
		}
	}
	AuDbg("wbi %d\n", wbi);
	if (wbi >= 0)
		h_file = au_h_open(root, wbi, wbrfd.oflags, NULL,
				   /*force_wr*/0);

out_unlock:
	aufs_read_unlock(root, AuLock_IR);
	err = PTR_ERR(h_file);
	if (IS_ERR(h_file))
		goto out_fd;

	atomic_dec(&br->br_count); /* cf. au_h_open() */
	fd_install(fd, h_file);
	err = fd;
	goto out; /* success */

out_fd:
	put_unused_fd(fd);
out:
	AuTraceErr(err);
	return err;
}

/* ---------------------------------------------------------------------- */

long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg)
{
	long err;
	struct dentry *dentry;

	switch (cmd) {
	case AUFS_CTL_RDU:
	case AUFS_CTL_RDU_INO:
		err = au_rdu_ioctl(file, cmd, arg);
		break;

	case AUFS_CTL_WBR_FD:
		err = au_wbr_fd(&file->f_path, (void __user *)arg);
		break;

	case AUFS_CTL_IBUSY:
		err = au_ibusy_ioctl(file, arg);
		break;

	case AUFS_CTL_BRINFO:
		err = au_brinfo_ioctl(file, arg);
		break;

	case AUFS_CTL_FHSM_FD:
		dentry = file->f_path.dentry;
		if (IS_ROOT(dentry))
			err = au_fhsm_fd(dentry->d_sb, arg);
		else
			err = -ENOTTY;
		break;

	default:
		/* do not call the lower */
		AuDbg("0x%x\n", cmd);
		err = -ENOTTY;
	}

	AuTraceErr(err);
	return err;
}
コード例 #24
0
/*
 * Start requested lock.
 *
 * Allocates required memory, copies dma_buf_fd list from userspace,
 * acquires related KDS resources, and starts the lock.
 */
static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
{
	dma_buf_lock_resource *resource;
	int size;
	int fd;
	int i;
	int ret;

	if (NULL == request->list_of_dma_buf_fds)
	{
		return -EINVAL;
	}
	if (request->count <= 0)
	{
		return -EINVAL;
	}
	if (request->count > DMA_BUF_LOCK_BUF_MAX)
	{
		return -EINVAL;
	}
	if (request->exclusive != DMA_BUF_LOCK_NONEXCLUSIVE &&
	    request->exclusive != DMA_BUF_LOCK_EXCLUSIVE)
	{
		return -EINVAL;
	}

	resource = kzalloc(sizeof(dma_buf_lock_resource), GFP_KERNEL);
	if (NULL == resource)
	{
		return -ENOMEM;
	}

	atomic_set(&resource->locked, 0);
	kref_init(&resource->refcount);
	INIT_LIST_HEAD(&resource->link);
	resource->count = request->count;

	/* Allocate space to store dma_buf_fds received from user space */
	size = request->count * sizeof(int);
	resource->list_of_dma_buf_fds = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->list_of_dma_buf_fds)
	{
		kfree(resource);
		return -ENOMEM;
	}

	/* Allocate space to store dma_buf pointers associated with dma_buf_fds */
	size = sizeof(struct dma_buf *) * request->count;
	resource->dma_bufs = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->dma_bufs)
	{
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource);
		return -ENOMEM;
	}
	/* Allocate space to store kds_resources associated with dma_buf_fds */
	size = sizeof(struct kds_resource *) * request->count;
	resource->kds_resources = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->kds_resources)
	{
		kfree(resource->dma_bufs);
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource);
		return -ENOMEM;
	}

	/* Copy requested list of dma_buf_fds from user space */
	size = request->count * sizeof(int);
	if (0 != copy_from_user(resource->list_of_dma_buf_fds, (void __user *)request->list_of_dma_buf_fds, size))
	{
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource->dma_bufs);
		kfree(resource->kds_resources);
		kfree(resource);
		return -ENOMEM;
	}
#if DMA_BUF_LOCK_DEBUG
	for (i = 0; i < request->count; i++)
	{
		printk("dma_buf %i = %X\n", i, resource->list_of_dma_buf_fds[i]);
	}
#endif

	/* Add resource to global list */
	mutex_lock(&dma_buf_lock_mutex);

	list_add(&resource->link, &dma_buf_lock_resource_list);

	mutex_unlock(&dma_buf_lock_mutex);

	for (i = 0; i < request->count; i++)
	{
		/* Convert fd into dma_buf structure */
		resource->dma_bufs[i] = dma_buf_get(resource->list_of_dma_buf_fds[i]);

		if (IS_ERR_VALUE(PTR_ERR(resource->dma_bufs[i])))
		{
			mutex_lock(&dma_buf_lock_mutex);
			kref_put(&resource->refcount, dma_buf_lock_dounlock);
			mutex_unlock(&dma_buf_lock_mutex);
			return -EINVAL;
		}

		/*Get kds_resource associated with dma_buf */
		resource->kds_resources[i] = get_dma_buf_kds_resource(resource->dma_bufs[i]);

		if (NULL == resource->kds_resources[i])
		{
			mutex_lock(&dma_buf_lock_mutex);
			kref_put(&resource->refcount, dma_buf_lock_dounlock);
			mutex_unlock(&dma_buf_lock_mutex);
			return -EINVAL;
		}
#if DMA_BUF_LOCK_DEBUG
		printk("dma_buf_lock_dolock : dma_buf_fd %i dma_buf %X kds_resource %X\n", resource->list_of_dma_buf_fds[i],
		       (unsigned int)resource->dma_bufs[i], (unsigned int)resource->kds_resources[i]);
#endif	
	}

	kds_callback_init(&resource->cb, 1, dma_buf_lock_kds_callback);
	init_waitqueue_head(&resource->wait);

	kref_get(&resource->refcount);

	/* Create file descriptor associated with lock request */
	fd = anon_inode_getfd("dma_buf_lock", &dma_buf_lock_handle_fops, 
	                      (void *)resource, 0);
	if (fd < 0)
	{
		mutex_lock(&dma_buf_lock_mutex);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		mutex_unlock(&dma_buf_lock_mutex);
		return fd;
	}

	resource->exclusive = request->exclusive;

	/* Start locking process */
	ret = kds_async_waitall(&resource->resource_set,KDS_FLAG_LOCKED_ACTION,
	                        &resource->cb, resource, NULL,
	                        request->count,  &resource->exclusive,
	                        resource->kds_resources);

	if (IS_ERR_VALUE(ret))
	{
		put_unused_fd(fd);

		mutex_lock(&dma_buf_lock_mutex);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		mutex_unlock(&dma_buf_lock_mutex);

		return ret;
	}

#if DMA_BUF_LOCK_DEBUG
	printk("dma_buf_lock_dolock : complete\n");
#endif
	mutex_lock(&dma_buf_lock_mutex);
	kref_put(&resource->refcount, dma_buf_lock_dounlock);
	mutex_unlock(&dma_buf_lock_mutex);

	return fd;
}
コード例 #25
0
ファイル: scm.c プロジェクト: ANFS/ANFS-kernel
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
{
	struct cmsghdr __user *cm
		= (__force struct cmsghdr __user*)msg->msg_control;

	int fdmax = 0;
	int fdnum = scm->fp->count;
	struct file **fp = scm->fp->fp;
	int __user *cmfptr;
	int err = 0, i;

	if (MSG_CMSG_COMPAT & msg->msg_flags) {
		scm_detach_fds_compat(msg, scm);
		return;
	}

	if (msg->msg_controllen > sizeof(struct cmsghdr))
		fdmax = ((msg->msg_controllen - sizeof(struct cmsghdr))
			 / sizeof(int));

	if (fdnum < fdmax)
		fdmax = fdnum;

	for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
	     i++, cmfptr++)
	{
		int new_fd;
		err = security_file_receive(fp[i]);
		if (err)
			break;
		err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & msg->msg_flags
					  ? O_CLOEXEC : 0);
		if (err < 0)
			break;
		new_fd = err;
		err = put_user(new_fd, cmfptr);
		if (err) {
			put_unused_fd(new_fd);
			break;
		}
		/* Bump the usage count and install the file. */
		get_file(fp[i]);
		fd_install(new_fd, fp[i]);
	}

	if (i > 0)
	{
		int cmlen = CMSG_LEN(i*sizeof(int));
		err = put_user(SOL_SOCKET, &cm->cmsg_level);
		if (!err)
			err = put_user(SCM_RIGHTS, &cm->cmsg_type);
		if (!err)
			err = put_user(cmlen, &cm->cmsg_len);
		if (!err) {
			cmlen = CMSG_SPACE(i*sizeof(int));
			msg->msg_control += cmlen;
			msg->msg_controllen -= cmlen;
		}
	}
	if (i < fdnum || (fdnum && fdmax <= 0))
		msg->msg_flags |= MSG_CTRUNC;

	/*
	 * All of the files that fit in the message have had their
	 * usage counts incremented, so we just free the list.
	 */
	__scm_destroy(scm);
}
コード例 #26
0
ファイル: handle_ioctl.c プロジェクト: alangenfeld/cloud-nfs
long do_sys_open_by_handle(int mountdirfd, struct file_handle __user * ufh, int open_flag)
{
    int fd;
    long retval = 0;
    struct file *filp;
    struct dentry *dentry;
    struct file_handle f_handle;
    struct vfsmount *mnt = NULL;
    struct file_handle *handle = NULL;

    /* can't use O_CREATE with open_by_handle */
    if(open_flag & O_CREAT)
        {
            retval = -EINVAL;
            goto out_err;
        }
    if(copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
        {
            retval = -EFAULT;
            goto out_err;
        }
    if((f_handle.handle_size > MAX_HANDLE_SZ) || (f_handle.handle_size <= 0))
        {
            retval = -EINVAL;
            goto out_err;
        }
    handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_size, GFP_KERNEL);
    if(!handle)
        {
            retval = -ENOMEM;
            goto out_err;
        }
    /* copy the full handle */
    if(copy_from_user(handle, ufh, sizeof(struct file_handle) + f_handle.handle_size))
        {
            retval = -EFAULT;
            goto out_handle;
        }
    dentry = handle_to_dentry(mountdirfd, handle, &mnt);
    if(IS_ERR(dentry))
        {
            retval = PTR_ERR(dentry);
            goto out_handle;
        }
    retval = may_handle_open(dentry, open_flag);
    if(retval)
        goto out_handle;
    fd = get_unused_fd();
    if(fd < 0)
        {
            retval = fd;
            goto out_dentry;
        }
    filp = dentry_open(dentry, mnt, open_flag);
    if(IS_ERR(filp))
        {
            put_unused_fd(fd);
            retval = PTR_ERR(filp);
        }
    else
        {
            retval = fd;
            fsnotify_open(filp->f_dentry);
            fd_install(fd, filp);
        }
    kfree(handle);
    return retval;

out_dentry:
    dput(dentry);
    mntput(mnt);
out_handle:
    kfree(handle);
out_err:
    return retval;
}
コード例 #27
0
/**
 * anon_inode_getfd - creates a new file instance by hooking it up to an
 *                    anonymous inode, and a dentry that describe the "class"
 *                    of the file
 *
 * @name:    [in]    name of the "class" of the new file
 * @fops:    [in]    file operations for the new file
 * @priv:    [in]    private data for the new file (will be file's private_data)
 * @flags:   [in]    flags
 *
 * Creates a new file by hooking it on a single inode. This is useful for files
 * that do not need to have a full-fledged inode in order to operate correctly.
 * All the files created with anon_inode_getfd() will share a single inode,
 * hence saving memory and avoiding code duplication for the file/inode/dentry
 * setup.  Returns new descriptor or -error.
 */
int anon_inode_getfd(const char *name, const struct file_operations *fops,
		     void *priv, int flags)
{
	struct qstr this;
	struct dentry *dentry;
	struct file *file;
	int error, fd;

	if (IS_ERR(anon_inode_inode))
		return -ENODEV;

	if (fops->owner && !try_module_get(fops->owner))
		return -ENOENT;

	error = get_unused_fd_flags(flags);
	if (error < 0)
		goto err_module;
	fd = error;

	/*
	 * Link the inode to a directory entry by creating a unique name
	 * using the inode sequence number.
	 */
	error = -ENOMEM;
	this.name = name;
	this.len = strlen(name);
	this.hash = 0;
	dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this);
	if (!dentry)
		goto err_put_unused_fd;

	/*
	 * We know the anon_inode inode count is always greater than zero,
	 * so we can avoid doing an igrab() and we can use an open-coded
	 * atomic_inc().
	 */
	atomic_inc(&anon_inode_inode->i_count);

	dentry->d_op = &anon_inodefs_dentry_operations;
	/* Do not publish this dentry inside the global dentry hash table */
	dentry->d_flags &= ~DCACHE_UNHASHED;
	d_instantiate(dentry, anon_inode_inode);

	error = -ENFILE;
	file = alloc_file(anon_inode_mnt, dentry,
			  FMODE_READ | FMODE_WRITE, fops);
	if (!file)
		goto err_dput;
	file->f_mapping = anon_inode_inode->i_mapping;

	file->f_pos = 0;
	file->f_flags = O_RDWR | (flags & O_NONBLOCK);
	file->f_version = 0;
	file->private_data = priv;

	fd_install(fd, file);

	return fd;

err_dput:
	dput(dentry);
err_put_unused_fd:
	put_unused_fd(fd);
err_module:
	module_put(fops->owner);
	return error;
}
コード例 #28
0
ファイル: pipe.c プロジェクト: Edwin-Edward/elks
int do_pipe(int *fd)
{
    struct inode *inode;
    register struct file *f1;
    register struct file *f2;
    int error = ENFILE;
    int i, j;

    f1 = get_empty_filp();
    if (!f1)
	goto no_files;

    f2 = get_empty_filp();
    if (!f2)
	goto close_f1;

    inode = get_pipe_inode();
    if (!inode)
	goto close_f12;

    error = get_unused_fd();
    if (error < 0)
	goto close_f12_inode;

    i = error;
    current->files.fd[i] = f1;

    error = get_unused_fd();
    if (error < 0)
	goto close_f12_inode_i;

    j = error;
    f1->f_inode = f2->f_inode = inode;

    /* read file */
    f1->f_pos = f2->f_pos = 0;
    f1->f_flags = O_RDONLY;
    f1->f_op = &read_pipe_fops;
    f1->f_mode = 1;

    /* write file */
    f2->f_flags = O_WRONLY;
    f2->f_op = &write_pipe_fops;
    f2->f_mode = 2;

    current->files.fd[j] = f2;
    fd[0] = i;
    fd[1] = j;

    return 0;

  close_f12_inode_i:
#if 0
    put_unused_fd(i);		/* Not sure this is needed */
#endif

    current->files.fd[i] = NULL;

  close_f12_inode:inode->i_count--;
    iput(inode);

  close_f12:f2->f_count--;

  close_f1:f1->f_count--;

  no_files:return error;
}
コード例 #29
0
STATIC int balong_ade_overlay_commit(struct ade_compose_data_type    *ade_pri_data, void __user *p)
{
    struct overlay_compose_info comp_info;
    struct balong_fb_data_type *balongfd = NULL;
    int ret = 0;
    u32 struct_len = sizeof(struct ovly_hnd_info) * ADE_OVERLAY_MAX_LAYERS;
#if ADE_SYNC_SUPPORT
    int fenceId = 0;
    unsigned long flags;
#endif

    BUG_ON(ade_pri_data == NULL);

    balongfb_logi_display_debugfs("balong_ade_overlay_commit enter succ ! \n");

    if (copy_from_user(&comp_info, p, sizeof(comp_info))) {
        balongfb_loge("copy from user failed!\n");
        return -EFAULT;
    }

    balongfd = (struct balong_fb_data_type *)platform_get_drvdata(ade_pri_data->parent);
    BUG_ON(balongfd == NULL);
    if ((!balongfd->frame_updated) && lcd_pwr_status.panel_power_on)
    {

        balongfd->frame_updated = 1;
        lcd_pwr_status.lcd_dcm_pwr_status |= BIT(2);
        do_gettimeofday(&lcd_pwr_status.tvl_set_frame);
        time_to_tm(lcd_pwr_status.tvl_set_frame.tv_sec, 0, &lcd_pwr_status.tm_set_frame);
    }
    down(&balong_fb_blank_sem);
    if (!balongfd->ade_core_power_on) {
        up(&balong_fb_blank_sem);
        balongfb_logi("ade_core_power_on is false !\n");
        return -EPERM;
    }


    if (ADE_TRUE == comp_info.is_finished) {
        spin_lock_irqsave(&balongfd->refresh_lock, flags);
        balongfd->refresh++;
        spin_unlock_irqrestore(&balongfd->refresh_lock, flags);
        balongfd->timeline_max++;
        //release the reserved buffer of fb
        if((true == ade_pri_data->fb_reserved_flag) && (ade_pri_data->frame_count)) {
            balong_ion_free_mem_to_buddy();
            ade_pri_data->fb_reserved_flag = false;
        }

        if (PANEL_MIPI_VIDEO == ade_pri_data->lcd_type) {
            spin_lock_irqsave(&(balongfd->vsync_info.spin_lock), flags);
            set_LDI_INT_MASK_bit(balongfd->ade_base, LDI_ISR_FRAME_END_INT);
            balongfd->vsync_info.vsync_ctrl_expire_count = 0;
            spin_unlock_irqrestore(&(balongfd->vsync_info.spin_lock), flags);

            if (balongfd->vsync_info.vsync_ctrl_disabled_set) {
                if (balongfd->ldi_irq) {
                    enable_irq(balongfd->ldi_irq);
                    balongfd->vsync_info.vsync_ctrl_disabled_set = false;
                }
            }
        }

#ifndef PC_UT_TEST_ON
#ifdef CONFIG_TRACING
        trace_dot(SF, "7", 0);
#endif
#endif

#if ADE_DEBUG_LOG_ENABLE
        g_debug_frame_number = comp_info.frame_number;
#endif
    }

    ret = ade_overlay_commit(ade_pri_data, &comp_info);

    if ((ADE_TRUE == comp_info.is_finished)) {
        if (ret == 0) {
            set_LDI_INT_MASK_bit(ade_pri_data->ade_base, LDI_ISR_UNDER_FLOW_INT);
#if PARTIAL_UPDATE
            if ((PANEL_MIPI_CMD == ade_pri_data->lcd_type) && (true == balongfd->dirty_update)) {
                balongfb_set_display_region(balongfd);
                balongfd->dirty_update = false;
            }
#endif
            if (PANEL_MIPI_VIDEO == ade_pri_data->lcd_type) {
                if ((ade_pri_data->overlay_ctl.comp_info.compose_mode != OVERLAY_COMP_TYPE_ONLINE)
                    || (balongfd->vpu_power_on)){
                     /* ade_core_rate is default value (360M) */
                     balongfd->ade_set_core_rate = balongfd->ade_core_rate;
                }

                if (balongfd->last_ade_core_rate != balongfd->ade_set_core_rate) {
                    if (clk_set_rate(balongfd->ade_clk, balongfd->ade_set_core_rate) != 0) {
                          balongfb_loge("clk_set_rate ade_core_rate error \n");
                    }
                }

                balongfd->last_ade_core_rate = balongfd->ade_set_core_rate;

            }

            set_LDI_CTRL_ldi_en(ade_pri_data->ade_base, ADE_ENABLE);
            balongfd->ade_ldi_on = true;
            if (PANEL_MIPI_CMD == ade_pri_data->lcd_type) {
                set_LDI_INT_MASK_bit(balongfd->ade_base, LDI_ISR_DSI_TE0_PIN_INT);

                /* enable fake vsync timer */
                if (balongfd->frc_state != BALONG_FB_FRC_IDLE_PLAYING) {
                    balongfd->use_cmd_vsync = (balongfb_frc_get_fps(balongfd) < BALONG_FB_FRC_NORMAL_FPS ? true : false);
                }

                /* report vsync with timer */
                if (balongfd->use_cmd_vsync) {
                    hrtimer_restart(&balongfd->cmd_vsync_hrtimer);
                } else {
                    hrtimer_cancel(&balongfd->cmd_vsync_hrtimer);
                }
            }

#ifndef PC_UT_TEST_ON
#ifdef CONFIG_TRACING
            trace_dot(SF, "8", 0);
#endif
#endif

#if ADE_SYNC_SUPPORT
            /* In online/hybrid mode, ADE must create release fenceFd.
             * In offline mode, don't create release fenceFd
             * because ADE will read HAL's offline buffer instead of layer's buffer.
             */
            /*
               spin_lock_irqsave(&balongfd->refresh_lock, flags);
               balongfd->refresh++;
               spin_unlock_irqrestore(&balongfd->refresh_lock, flags);
               balongfd->timeline_max++;
             */
            if ((OVERLAY_COMP_TYPE_ONLINE == comp_info.compose_mode)
                    || (OVERLAY_COMP_TYPE_HYBRID == comp_info.compose_mode)) {

                fenceId = balong_ade_overlay_fence_create(balongfd->timeline, "ADE", balongfd->timeline_max);
                if (fenceId < 0) {
                    balongfb_loge("ADE failed to create fence!\n");
                }
                comp_info.release_fence = fenceId;
                if (copy_to_user((struct overlay_compose_info __user*)p, &comp_info, sizeof(struct overlay_compose_info))
                        && (fenceId >= 0)) {
                    fenceId = -EFAULT;
                    balongfb_loge("ADE failed to copy fence to user!\n");
                    put_unused_fd(comp_info.release_fence);

                    up(&balong_fb_blank_sem);
                    return fenceId;
                }
            }
#endif /* ADE_SYNC_SUPPORT */
            ade_overlay_handle_unlock(balongfd);
            memcpy(balongfd->locked_hnd, balongfd->locking_hnd, struct_len);
            memset(balongfd->locking_hnd, 0, struct_len);
        } else {
            ade_overlay_handle_unlock(balongfd);
            memcpy(balongfd->locked_hnd, balongfd->locking_hnd, struct_len);
            memset(balongfd->locking_hnd, 0, struct_len);
            up(&balong_fb_blank_sem);
            return ret;
        }
    }

    up(&balong_fb_blank_sem);

    balongfb_logi_display_debugfs("balong_ade_overlay_commit exit succ ! \n");
    return 0;
}
コード例 #30
0
static int
MksckPageDescToFd(struct socket *sock,
		  struct msghdr *msg,
		  Mksck_PageDesc *pd,
		  uint32 pages)
{
	int retval;
	int newfd;
	struct socket *newsock;
	struct sock *newsk;
	struct sock *sk = sock->sk;
	struct MksckPageDescInfo **pmpdi, *mpdi;

	lock_sock(sk);

	if (sk->sk_user_data) {
		struct MksckPageDescInfo *mpdi2;

		
		newfd = *((int *)sk->sk_user_data);

		
		newsock = sockfd_lookup(newfd, &retval);
		if (!newsock) {
			retval = -EINVAL;
			goto endProcessingReleaseSock;
		}

		newsk = newsock->sk;
		lock_sock(newsk);
		sockfd_put(newsock);

		if (((struct sock *)newsk->sk_user_data) != sk) {

			retval = -EINVAL;
			release_sock(newsk);
			goto endProcessingReleaseSock;
		}

		mpdi = kmalloc(sizeof(struct MksckPageDescInfo) +
			       pages*sizeof(Mksck_PageDesc), GFP_KERNEL);
		if (!mpdi) {
			retval = -ENOMEM;
			release_sock(newsk);
			goto endProcessingReleaseSock;
		}

		retval = put_cmsg(msg, SOL_DECNET, 0, sizeof(int), &newfd);
		if (retval < 0)
			goto endProcessingKFreeReleaseSock;

		release_sock(sk);

		mpdi2 = (struct MksckPageDescInfo *)newsk->sk_protinfo;
		while (mpdi2->next)
			mpdi2 = mpdi2->next;

		pmpdi = &(mpdi2->next);
	} else {
		retval = sock_create(sk->sk_family, sock->type, 0, &newsock);
		if (retval < 0)
			goto endProcessingReleaseSock;

		newsk = newsock->sk;
		lock_sock(newsk);
		newsk->sk_destruct = &MksckPageDescSkDestruct;
		newsk->sk_user_data = sk;
		sock_hold(sk); 
		newsock->ops = &mksckPageDescOps;

		mpdi = kmalloc(sizeof(struct MksckPageDescInfo) +
			       pages*sizeof(Mksck_PageDesc), GFP_KERNEL);
		if (!mpdi) {
			retval = -ENOMEM;
			goto endProcessingFreeNewSock;
		}

		sk->sk_user_data = sock_kmalloc(sk, sizeof(int), GFP_KERNEL);
		if (sk->sk_user_data == NULL) {
			retval = -ENOMEM;
			goto endProcessingKFreeAndNewSock;
		}

		newfd = sock_map_fd(newsock, O_CLOEXEC);
		if (newfd < 0) {
			retval = newfd;
			sock_kfree_s(sk, sk->sk_user_data, sizeof(int));
			sk->sk_user_data = NULL;
			goto endProcessingKFreeAndNewSock;
		}

		retval = put_cmsg(msg, SOL_DECNET, 0, sizeof(int), &newfd);
		if (retval < 0) {
			sock_kfree_s(sk, sk->sk_user_data, sizeof(int));
			sk->sk_user_data = NULL;
			kfree(mpdi);
			release_sock(newsk);
			sockfd_put(newsock);
			sock_release(newsock);
			put_unused_fd(newfd);
			goto endProcessingReleaseSock;
		}

		*(int *)sk->sk_user_data = newfd;
		release_sock(sk);
		pmpdi = (struct MksckPageDescInfo **)(&(newsk->sk_protinfo));
	}

	mpdi->next  = NULL;
	mpdi->flags = 0;
	mpdi->mapCounts = 0;
	mpdi->pages = pages;
	memcpy(mpdi->descs, pd, pages*sizeof(Mksck_PageDesc));

	*pmpdi = mpdi; 
	release_sock(newsk);

	MksckPageDescManage(pd, pages, MANAGE_INCREMENT);
	return 0;

endProcessingKFreeAndNewSock:
	kfree(mpdi);
endProcessingFreeNewSock:
	release_sock(newsk);
	sock_release(newsock);
	release_sock(sk);
	return retval;

endProcessingKFreeReleaseSock:
	kfree(mpdi);
	release_sock(newsk);
endProcessingReleaseSock:
	release_sock(sk);
	return retval;
}