long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; int lookup = build_open_flags(flags, mode, &op); char *tmp = getname(filename); int fd = PTR_ERR(tmp); if (!IS_ERR(tmp)) { fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op, lookup); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); /* */ sreadahead_prof( f, 0, 0); /* */ } } putname(tmp); } return fd; }
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; int lookup = build_open_flags(flags, mode, &op); char *tmp = getname(filename); int fd = PTR_ERR(tmp); if (!IS_ERR(tmp)) { fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op, lookup); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); /* LGE_CHANGE_S * * do read/mmap profiling during booting * in order to use the data as readahead args * * [email protected] 20120503 */ sreadahead_prof( f, 0, 0); /* LGE_CHANGE_E */ } } putname(tmp); } return fd; }
long do_sys_open(int dfd, const char __user *filename, int flags, int mode) { struct open_flags op; int lookup = build_open_flags(flags, mode, &op); char *tmp = getname(filename); int fd = PTR_ERR(tmp); if (!IS_ERR(tmp)) { fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op, lookup); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); //ASUS_BSP +++ Jimmy,Josh "remove fuse" if(strcmp(f->f_vfsmnt->mnt_mountpoint->d_iname,"sdcard")==0){ chown_common(&(f->f_path),-1,1015); } //ASUS_BSP --- Jimmy,Josh "remove fuse" } } putname(tmp); } return fd; }
static struct file *siw_event_file_new(struct siw_ucontext *ctx, int *event_fd) { struct file *filp; int rv; /* Create a file to communicate events between our userspace verbs * library and this kernel verbs driver, which cannot be communicated * (cleanly) using the uverbs interface. */ rv = get_unused_fd_flags(O_CLOEXEC); if (rv < 0) { goto out; } *event_fd = rv; ctx->event_file = kzalloc(sizeof(*ctx->event_file), GFP_KERNEL); if (!ctx->event_file) { rv = -ENOMEM; goto free_fd; } ctx->event_file->ctx = ctx; spin_lock_init(&ctx->event_file->lock); filp = anon_inode_getfile("[siwevent]", &siw_event_file_ops, ctx->event_file, O_WRONLY|O_NONBLOCK); if (IS_ERR(filp)) { rv = PTR_ERR(filp); goto free_event_file; } return filp; free_event_file: kfree(ctx->event_file); free_fd: put_unused_fd(*event_fd); out: return ERR_PTR(rv); }
/* * Open a file descriptor on the autofs mount point corresponding * to the given path and device number (aka. new_encode_dev(sb->s_dev)). */ static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid) { int err, fd; fd = get_unused_fd_flags(O_CLOEXEC); if (likely(fd >= 0)) { struct file *filp; struct path path; err = find_autofs_mount(name, &path, test_by_dev, &devid); if (err) goto out; /* * Find autofs super block that has the device number * corresponding to the autofs fs we want to open. */ filp = dentry_open(&path, O_RDONLY, current_cred()); path_put(&path); if (IS_ERR(filp)) { err = PTR_ERR(filp); goto out; } fd_install(fd, filp); } return fd; out: put_unused_fd(fd); return err; }
long do_handle_open(int mountdirfd, struct file_handle __user *ufh, int open_flag) { long retval = 0; struct path path; struct file *file; int fd; retval = handle_to_path(mountdirfd, ufh, &path); if (retval) return retval; fd = get_unused_fd_flags(open_flag); if (fd < 0) { path_put(&path); return fd; } file = file_open_root(path.dentry, path.mnt, "", open_flag); if (IS_ERR(file)) { put_unused_fd(fd); retval = PTR_ERR(file); } else { retval = fd; fsnotify_open(file); fd_install(fd, file); } path_put(&path); return retval; }
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; int fd = build_open_flags(flags, mode, &op); struct filename *tmp; if (fd) return fd; tmp = getname(filename); if (IS_ERR(tmp)) return PTR_ERR(tmp); fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); } } putname(tmp); return fd; }
/* Get a struct file and fd for a context and attach the ops */ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, int *fd) { struct file *file; int rc, flags, fdtmp; flags = O_RDWR | O_CLOEXEC; /* This code is similar to anon_inode_getfd() */ rc = get_unused_fd_flags(flags); if (rc < 0) return ERR_PTR(rc); fdtmp = rc; /* * Patch the file ops. Needs to be careful that this is rentrant safe. */ if (fops) { PATCH_FOPS(open); PATCH_FOPS(poll); PATCH_FOPS(read); PATCH_FOPS(release); PATCH_FOPS(unlocked_ioctl); PATCH_FOPS(compat_ioctl); PATCH_FOPS(mmap); } else /* use default ops */ fops = (struct file_operations *)&afu_fops; file = anon_inode_getfile("cxl", fops, ctx, flags); if (IS_ERR(file)) put_unused_fd(fdtmp); *fd = fdtmp; return file; }
static long eventfd_link_ioctl_copy2(unsigned long arg) { void __user *argp = (void __user *) arg; struct task_struct *task_target = NULL; struct file *file; struct files_struct *files; struct eventfd_copy2 eventfd_copy2; long ret = -EFAULT; if (copy_from_user(&eventfd_copy2, argp, sizeof(struct eventfd_copy2))) goto out; /* * Find the task struct for the target pid */ ret = -ESRCH; task_target = get_pid_task(find_vpid(eventfd_copy2.pid), PIDTYPE_PID); if (task_target == NULL) { pr_info("Unable to find pid %d\n", eventfd_copy2.pid); goto out; } ret = -ESTALE; files = get_files_struct(task_target); if (files == NULL) { pr_info("Failed to get target files struct\n"); goto out_task; } ret = -EBADF; file = fget_from_files(files, eventfd_copy2.fd); put_files_struct(files); if (file == NULL) { pr_info("Failed to get fd %d from target\n", eventfd_copy2.fd); goto out_task; } /* * Install the file struct from the target process into the * newly allocated file desciptor of the source process. */ ret = get_unused_fd_flags(eventfd_copy2.flags); if (ret < 0) { fput(file); goto out_task; } fd_install(ret, file); out_task: put_task_struct(task_target); out: return ret; }
/** * bus1_queue_entry_install() - install file descriptors * @entry: queue entry carrying file descriptors * @pool: parent pool of the queue entry * * This installs the file-descriptors that are carried by @entry into the * current process. If no file-descriptors are carried, this is a no-op. If * anything goes wrong, an error is returned without any file-descriptor being * installed (i.e., this operation either installs all, or none). * * The caller must make sure the queue-entry @entry has a linked slice with * enough trailing space to place the file-descriptors into. Furthermore, @pool * must point to the pool where that slice resides in. * * Return: 0 on success, negative error code on failure. */ int bus1_queue_entry_install(struct bus1_queue_entry *entry, struct bus1_pool *pool) { struct kvec vec; size_t i, n = 0; int r, *fds; /* bail out if no files are passed or if the entry is invalid */ if (entry->n_files == 0) return 0; if (WARN_ON(!entry->slice || entry->slice->size < entry->n_files * sizeof(*fds))) return -EFAULT; /* allocate temporary array to hold all FDs */ fds = kmalloc_array(entry->n_files, sizeof(*fds), GFP_TEMPORARY); if (!fds) return -ENOMEM; /* pre-allocate unused FDs */ for (i = 0; i < entry->n_files; ++i) { if (WARN_ON(!entry->files[i])) { fds[n++] = -1; } else { r = get_unused_fd_flags(O_CLOEXEC); if (r < 0) goto exit; fds[n++] = r; } } /* copy FD numbers into the slice */ vec.iov_base = fds; vec.iov_len = n * sizeof(*fds); r = bus1_pool_write_kvec(pool, entry->slice, entry->slice->size - n * sizeof(*fds), &vec, 1, vec.iov_len); if (r < 0) goto exit; /* all worked out fine, now install the actual files */ for (i = 0; i < n; ++i) if (fds[i] >= 0) fd_install(fds[i], get_file(entry->files[i])); r = 0; exit: if (r < 0) for (i = 0; i < n; ++i) put_unused_fd(fds[i]); kfree(fds); return r; }
static long sync_file_ioctl_merge(struct sync_file *sync_file, unsigned long arg) { int fd = get_unused_fd_flags(O_CLOEXEC); int err; struct sync_file *fence2, *fence3; struct sync_merge_data data; if (fd < 0) return fd; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { err = -EFAULT; goto err_put_fd; } if (data.flags || data.pad) { err = -EINVAL; goto err_put_fd; } fence2 = sync_file_fdget(data.fd2); if (!fence2) { err = -ENOENT; goto err_put_fd; } data.name[sizeof(data.name) - 1] = '\0'; fence3 = sync_file_merge(data.name, sync_file, fence2); if (!fence3) { err = -ENOMEM; goto err_put_fence2; } data.fence = fd; if (copy_to_user((void __user *)arg, &data, sizeof(data))) { err = -EFAULT; goto err_put_fence3; } fd_install(fd, fence3->file); fput(fence2->file); return 0; err_put_fence3: fput(fence3->file); err_put_fence2: fput(fence2->file); err_put_fd: put_unused_fd(fd); return err; }
/* * Create a /dev/tpm%d and 'server side' file descriptor pair * * Return value: * Returns file pointer on success, an error value otherwise */ static struct file *vtpm_proxy_create_device( struct vtpm_proxy_new_dev *vtpm_new_dev) { struct proxy_dev *proxy_dev; int rc, fd; struct file *file; if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL) return ERR_PTR(-EOPNOTSUPP); proxy_dev = vtpm_proxy_create_proxy_dev(); if (IS_ERR(proxy_dev)) return ERR_CAST(proxy_dev); proxy_dev->flags = vtpm_new_dev->flags; /* setup an anonymous file for the server-side */ fd = get_unused_fd_flags(O_RDWR); if (fd < 0) { rc = fd; goto err_delete_proxy_dev; } file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev, O_RDWR); if (IS_ERR(file)) { rc = PTR_ERR(file); goto err_put_unused_fd; } /* from now on we can unwind with put_unused_fd() + fput() */ /* simulate an open() on the server side */ vtpm_proxy_fops_open(file); if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2) proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2; vtpm_proxy_work_start(proxy_dev); vtpm_new_dev->fd = fd; vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt); vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt); vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num; return file; err_put_unused_fd: put_unused_fd(fd); err_delete_proxy_dev: vtpm_proxy_delete_proxy_dev(proxy_dev); return ERR_PTR(rc); }
void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) { struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int); int fdnum = scm->fp->count; struct file **fp = scm->fp->fp; int __user *cmfptr; int err = 0, i; if (fdnum < fdmax) fdmax = fdnum; for (i = 0, cmfptr = (int __user *) CMSG_COMPAT_DATA(cm); i < fdmax; i++, cmfptr++) { int new_fd; err = security_file_receive(fp[i]); if (err) break; err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & kmsg->msg_flags ? O_CLOEXEC : 0); if (err < 0) break; new_fd = err; err = put_user(new_fd, cmfptr); if (err) { put_unused_fd(new_fd); break; } /* Bump the usage count and install the file. */ get_file(fp[i]); fd_install(new_fd, fp[i]); } if (i > 0) { int cmlen = CMSG_COMPAT_LEN(i * sizeof(int)); err = put_user(SOL_SOCKET, &cm->cmsg_level); if (!err) err = put_user(SCM_RIGHTS, &cm->cmsg_type); if (!err) err = put_user(cmlen, &cm->cmsg_len); if (!err) { cmlen = CMSG_COMPAT_SPACE(i * sizeof(int)); kmsg->msg_control += cmlen; kmsg->msg_controllen -= cmlen; } } if (i < fdnum) kmsg->msg_flags |= MSG_CTRUNC; /* * All of the files that fit in the message have had their * usage counts incremented, so we just free the list. */ __scm_destroy(scm); }
/* create file and install a new file descriptor */ int kdbus_memfd_new(int *fd) { struct kdbus_memfile *mf; struct file *shmemfp; struct file *fp; int f; int ret; mf = kzalloc(sizeof(struct kdbus_memfile), GFP_KERNEL); if (!mf) return -ENOMEM; mutex_init(&mf->lock); /* allocate a new unlinked shmem file */ shmemfp = shmem_file_setup("kdbus-memfd", 0, 0); if (IS_ERR(shmemfp)) { ret = PTR_ERR(shmemfp); goto exit; } mf->fp = shmemfp; f = get_unused_fd_flags(O_CLOEXEC); if (f < 0) { ret = f; goto exit_shmem; } /* The anonymous exported inode ops cannot reach the otherwise * invisible shmem inode. We rely on the fact that nothing else * can create a new file for the shmem inode, like by opening the * fd in /proc/$PID/fd/ */ fp = anon_inode_getfile("[kdbus]", &kdbus_memfd_fops, mf, O_RDWR); if (IS_ERR(fp)) { ret = PTR_ERR(fp); goto exit_fd; } fp->f_mode |= FMODE_LSEEK|FMODE_PREAD|FMODE_PWRITE; fp->f_mapping = shmemfp->f_mapping; fd_install(f, fp); *fd = f; return 0; exit_fd: put_unused_fd(f); exit_shmem: fput(shmemfp); exit: kfree(mf); return ret; }
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; int lookup = build_open_flags(flags, mode, &op); char *tmp = getname(filename); int fd = PTR_ERR(tmp); #if IO_LOGGER_ENABLE unsigned long long time1 = 0,timeoffset = 0; bool add_trace_e = false; #endif if (!IS_ERR(tmp)) { #if IO_LOGGER_ENABLE if(unlikely(en_IOLogger())){ if(!memcmp(tmp,"/data",5)||!memcmp(tmp,"/system",7)){ add_trace_e = true; time1 = sched_clock(); AddIOTrace(IO_LOGGER_MSG_VFS_OPEN_INTFS,do_sys_open,tmp); } } #endif fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op, lookup); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); } } #if IO_LOGGER_ENABLE if(unlikely(en_IOLogger()) && add_trace_e){ timeoffset = sched_clock() - time1; add_trace_e = false; if(BEYOND_TRACE_LOG_TIME(timeoffset)) { AddIOTrace(IO_LOGGER_MSG_VFS_OPEN_INTFS_END,do_sys_open,tmp,timeoffset); if(BEYOND_DUMP_LOG_TIME(timeoffset)) DumpIOTrace(timeoffset); } } #endif putname(tmp); } return fd; }
static int kni_sock_map_fd(struct socket *sock) { struct file *file; int fd = get_unused_fd_flags(0); if (fd < 0) return fd; file = sock_alloc_file(sock, 0, NULL); if (IS_ERR(file)) { put_unused_fd(fd); return PTR_ERR(file); } fd_install(fd, file); return fd; }
int mali_stream_create_fence(mali_sync_pt *pt) { struct sync_fence *fence; struct fdtable * fdt; struct files_struct * files; int fd = -1; fence = sync_fence_create("mali_fence", pt); if (!fence) { sync_pt_free(pt); fd = -EFAULT; goto out; } /* create a fd representing the fence */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) { sync_fence_put(fence); goto out; } #else fd = get_unused_fd(); if (fd < 0) { sync_fence_put(fence); goto out; } files = current->files; spin_lock(&files->file_lock); fdt = files_fdtable(files); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0) __set_close_on_exec(fd, fdt); #else FD_SET(fd, fdt->close_on_exec); #endif spin_unlock(&files->file_lock); #endif /* Linux > 3.6 */ /* bind fence to the new fd */ sync_fence_install(fence, fd); out: return fd; }
void create_dummy_file(struct w32process *process) { int fd; if (process->dummyfd != -1) return; fd = get_unused_fd_flags(O_CLOEXEC); if (fd >= 0) { get_file(dummyfile); fd_install(fd, dummyfile); process->dummyfd = fd; } ktrace("process %p, dummyfd %d\n", process, process->dummyfd); }
static inline int turbotap_sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; newfile = sock_alloc_file(sock, flags, NULL); if (likely(!IS_ERR(newfile))) { fd_install(fd, newfile); return fd; } put_unused_fd(fd); return PTR_ERR(newfile); }
/* Get a struct file and fd for a context and attach the ops */ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, int *fd) { struct file *file; int rc, flags, fdtmp; char *name = NULL; /* only allow one per context */ if (ctx->mapping) return ERR_PTR(-EEXIST); flags = O_RDWR | O_CLOEXEC; /* This code is similar to anon_inode_getfd() */ rc = get_unused_fd_flags(flags); if (rc < 0) return ERR_PTR(rc); fdtmp = rc; /* * Patch the file ops. Needs to be careful that this is rentrant safe. */ if (fops) { PATCH_FOPS(open); PATCH_FOPS(poll); PATCH_FOPS(read); PATCH_FOPS(release); PATCH_FOPS(unlocked_ioctl); PATCH_FOPS(compat_ioctl); PATCH_FOPS(mmap); } else /* use default ops */ fops = (struct file_operations *)&afu_fops; name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe); file = cxl_getfile(name, fops, ctx, flags); kfree(name); if (IS_ERR(file)) goto err_fd; cxl_context_set_mapping(ctx, file->f_mapping); *fd = fdtmp; return file; err_fd: put_unused_fd(fdtmp); return NULL; }
static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg) { int fd = get_unused_fd_flags(O_CLOEXEC); int err; struct sync_pt *pt; struct sync_fence *fence; struct sw_sync_create_fence_data data; if (fd < 0) return fd; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { err = -EFAULT; goto err; } pt = sw_sync_pt_create(obj, data.value); if (pt == NULL) { err = -ENOMEM; goto err; } data.name[sizeof(data.name) - 1] = '\0'; fence = sync_fence_create(data.name, pt); if (fence == NULL) { sync_pt_free(pt); err = -ENOMEM; goto err; } data.fence = fd; if (copy_to_user((void __user *)arg, &data, sizeof(data))) { sync_fence_put(fence); err = -EFAULT; goto err; } sync_fence_install(fence, fd); return 0; err: put_unused_fd(fd); return err; }
static long ktapvm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_fd, err; struct file *new_file; new_fd = get_unused_fd_flags(0); if (new_fd < 0) return new_fd; new_file = anon_inode_getfile("[ktap]", &ktap_fops, NULL, O_RDWR); if (IS_ERR(new_file)) { err = PTR_ERR(new_file); put_unused_fd(new_fd); return err; } file->private_data = NULL; fd_install(new_fd, new_file); return new_fd; }
/** * _sde_fence_create_fd - create fence object and return an fd for it * This function is NOT thread-safe. * @timeline: Timeline to associate with fence * @name: Name for fence * @val: Timeline value at which to signal the fence * Return: File descriptor on success, or error code on error */ static int _sde_fence_create_fd(void *timeline, const char *name, uint32_t val) { struct sync_pt *sync_pt; struct sync_fence *fence; signed int fd = -EINVAL; if (!timeline) { SDE_ERROR("invalid timeline\n"); goto exit; } if (!name) name = "sde_fence"; /* create sync point */ sync_pt = sw_sync_pt_create(timeline, val); if (sync_pt == NULL) { SDE_ERROR("failed to create sync point, %s\n", name); goto exit; } /* create fence */ fence = sync_fence_create(name, sync_pt); if (fence == NULL) { sync_pt_free(sync_pt); SDE_ERROR("couldn't create fence, %s\n", name); goto exit; } /* create fd */ fd = get_unused_fd_flags(0); if (fd < 0) { SDE_ERROR("failed to get_unused_fd_flags(), %s\n", name); sync_fence_put(fence); goto exit; } sync_fence_install(fence, fd); exit: return fd; }
long do_sys_open(int dfd, const char __user *filename, int flags, int mode) { char *tmp = getname(filename); int fd = PTR_ERR(tmp); if (!IS_ERR(tmp)) { fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, flags, mode, 0); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f->f_path.dentry); fd_install(fd, f); } } putname(tmp); } return fd; }
static int gk20a_ctrl_alloc_as( struct gk20a *g, struct nvgpu_alloc_as_args *args) { struct platform_device *dev = g->dev; struct gk20a_as_share *as_share; int err; int fd; struct file *file; char *name; err = get_unused_fd_flags(O_RDWR); if (err < 0) return err; fd = err; name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d", dev_name(&dev->dev), fd); file = anon_inode_getfile(name, g->as.cdev.ops, NULL, O_RDWR); kfree(name); if (IS_ERR(file)) { err = PTR_ERR(file); goto clean_up; } fd_install(fd, file); err = gk20a_as_alloc_share(&g->as, args->big_page_size, &as_share); if (err) goto clean_up; file->private_data = as_share; args->as_fd = fd; return 0; clean_up: put_unused_fd(fd); return err; }
/** * ptm_open_peer - open the peer of a pty * @master: the open struct file of the ptmx device node * @tty: the master of the pty being opened * @flags: the flags for open * * Provide a race free way for userspace to open the slave end of a pty * (where they have the master fd and cannot access or trust the mount * namespace /dev/pts was mounted inside). */ int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) { int fd = -1; struct file *filp; int retval = -EINVAL; struct path path; if (tty->driver != ptm_driver) return -EIO; fd = get_unused_fd_flags(flags); if (fd < 0) { retval = fd; goto err; } /* Compute the slave's path */ path.mnt = devpts_mntget(master, tty->driver_data); if (IS_ERR(path.mnt)) { retval = PTR_ERR(path.mnt); goto err_put; } path.dentry = tty->link->driver_data; filp = dentry_open(&path, flags, current_cred()); mntput(path.mnt); if (IS_ERR(filp)) { retval = PTR_ERR(filp); goto err_put; } fd_install(fd, filp); return fd; err_put: put_unused_fd(fd); err: return retval; }
static int gk20a_ctrl_open_tsg(struct gk20a *g, struct nvgpu_gpu_open_tsg_args *args) { struct platform_device *dev = g->dev; int err; int fd; struct file *file; char *name; err = get_unused_fd_flags(O_RDWR); if (err < 0) return err; fd = err; name = kasprintf(GFP_KERNEL, "nvgpu-%s-tsg%d", dev_name(&dev->dev), fd); file = anon_inode_getfile(name, g->tsg.cdev.ops, NULL, O_RDWR); kfree(name); if (IS_ERR(file)) { err = PTR_ERR(file); goto clean_up; } fd_install(fd, file); err = gk20a_tsg_open(g, file); if (err) goto clean_up_file; args->tsg_fd = fd; return 0; clean_up_file: fput(file); clean_up: put_unused_fd(fd); return err; }
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; int lookup = build_open_flags(flags, mode, &op); char *tmp = getname(filename); int fd = PTR_ERR(tmp); if (!IS_ERR(tmp)) { #if (IO_TEST_DEBUG) if(!(flags & O_DIRECTORY)&& strstr(filename, "_quadrant_.tmp")) { if (flags&0x00000001) { io_w_test_count = (io_w_test_count + 1)%10; flags = 0x00000042; } else { flags = 0x00000002; } } #endif fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op, lookup); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); } } putname(tmp); } return fd; }
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) { int fd, error; struct file *file; error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS); if (error < 0) return error; fd = error; file = eventfd_file_create(count, flags); if (IS_ERR(file)) { error = PTR_ERR(file); goto err_put_unused_fd; } fd_install(fd, file); return fd; err_put_unused_fd: put_unused_fd(fd); return error; }
/** * anon_inode_getfd - creates a new file instance by hooking it up to an * anonymous inode, and a dentry that describe the "class" * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. * All the files created with anon_inode_getfd() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry * setup. Returns new descriptor or an error code. */ int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags) { int error, fd; struct file *file; error = get_unused_fd_flags(flags); if (error < 0) return error; fd = error; file = anon_inode_getfile(name, fops, priv, flags); if (IS_ERR(file)) { error = PTR_ERR(file); goto err_put_unused_fd; } fd_install(fd, file); return fd; err_put_unused_fd: put_unused_fd(fd); return error; }