static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __FD_CLR(fd, fdt->open_fds); if (fd < files->next_fd) files->next_fd = fd; }
/** * Destroy the @a xnselect structure associated with a file descriptor. * * Any binding with a @a xnselector block is destroyed. * * @param select_block pointer to the @a xnselect structure associated with a file descriptor */ void xnselect_destroy(struct xnselect *select_block) { xnholder_t *holder; int resched; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getq(&select_block->bindings))) { struct xnselect_binding *binding; struct xnselector *selector; binding = link2binding(holder, link); selector = binding->selector; __FD_CLR(binding->bit_index, &selector->fds[binding->type].expected); if (!__FD_ISSET(binding->bit_index, &selector->fds[binding->type].pending)) { __FD_SET(binding->bit_index, &selector->fds[binding->type].pending); if (xnselect_wakeup(selector)) resched = 1; } removeq(&selector->bindings, &binding->slink); xnlock_put_irqrestore(&nklock, s); xnfree(binding); xnlock_get_irqsave(&nklock, s); } if (resched) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); }
/* Must be called with nklock locked irqs off */ int __xnselect_signal(struct xnselect *select_block, unsigned state) { xnholder_t *holder; int resched; for(resched = 0, holder = getheadq(&select_block->bindings); holder; holder = nextq(&select_block->bindings, holder)) { struct xnselect_binding *binding; struct xnselector *selector; binding = link2binding(holder, link); selector = binding->selector; if (state) { if (!__FD_ISSET(binding->bit_index, &selector->fds[binding->type].pending)) { __FD_SET(binding->bit_index, &selector->fds[binding->type].pending); if (xnselect_wakeup(selector)) resched = 1; } } else __FD_CLR(binding->bit_index, &selector->fds[binding->type].pending); } return resched; }
/** * Bind a file descriptor (represented by its @a xnselect structure) to a * selector block. * * @param select_block pointer to the @a struct @a xnselect to be bound; * * @param binding pointer to a newly allocated (using xnmalloc) @a struct * @a xnselect_binding; * * @param selector pointer to the selector structure; * * @param type type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a * XNSELECT_EXCEPT); * * @param index index of the file descriptor (represented by @a select_block) in the bit fields used by the @a selector structure; * * @param state current state of the file descriptor>. * * @a select_block must have been initialized with xnselect_init(), * the @a xnselector structure must have been initialized with * xnselector_init(), @a binding may be uninitialized. * * This service must be called with nklock locked, irqs off. For this reason, * the @a binding parameter must have been allocated by the caller outside the * locking section. * * @retval -EINVAL if @a type or @a index is invalid; * @retval 0 otherwise. */ int xnselect_bind(struct xnselect *select_block, struct xnselect_binding *binding, struct xnselector *selector, unsigned type, unsigned index, unsigned state) { if (type >= XNSELECT_MAX_TYPES || index > __FD_SETSIZE) return -EINVAL; binding->selector = selector; binding->fd = select_block; binding->type = type; binding->bit_index = index; inith(&binding->link); inith(&binding->slink); appendq(&selector->bindings, &binding->slink); appendq(&select_block->bindings, &binding->link); __FD_SET(index, &selector->fds[type].expected); if (state) { __FD_SET(index, &selector->fds[type].pending); if (xnselect_wakeup(selector)) xnpod_schedule(); } else __FD_CLR(index, &selector->fds[type].pending); return 0; }
void fastcall __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); KSTM_BUG_ON(need_files_checkpoint()); __FD_CLR(fd, fdt->open_fds); if (fd < files->next_fd) files->next_fd = fd; }
static inline void __put_unused_fd(struct files_struct *files, unsigned int fd) { __FD_CLR(fd, files->open_fds); if (fd < files->next_fd) files->next_fd = fd; }
/** * lookup_instantiate_filp - instantiates the open intent filp * @nd: pointer to nameidata * @dentry: pointer to dentry * @open: open callback * * Helper for filesystems that want to use lookup open intents and pass back * a fully instantiated struct file to the caller. * This function is meant to be called from within a filesystem's * lookup method. * Beware of calling it for non-regular files! Those ->open methods might block * (e.g. in fifo_open), leaving you with parent locked (and in case of fifo, * leading to a deadlock, as nobody can open that fifo anymore, because * another process to open fifo will block on locked parent when doing lookup). * Note that in case of error, nd->intent.open.file is destroyed, but the * path information remains valid. * If the open callback is set to NULL, then the standard f_op->open() * filesystem callback is substituted. */ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)) { struct path path = { .dentry = dentry, .mnt = nd->path.mnt }; const struct cred *cred = current_cred(); if (IS_ERR(nd->intent.open.file)) goto out; if (IS_ERR(dentry)) goto out_err; nd->intent.open.file = __dentry_open(&path, nd->intent.open.file, open, cred); out: return nd->intent.open.file; out_err: release_open_intent(nd); nd->intent.open.file = (struct file *)dentry; goto out; } EXPORT_SYMBOL_GPL(lookup_instantiate_filp); /** * nameidata_to_filp - convert a nameidata to an open filp. * @nd: pointer to nameidata * @flags: open flags * * Note that this function destroys the original nameidata */ struct file *nameidata_to_filp(struct nameidata *nd) { const struct cred *cred = current_cred(); struct file *filp; /* Pick up the filp from the open intent */ filp = nd->intent.open.file; nd->intent.open.file = NULL; /* Has the filesystem initialised the file for us? */ if (filp->f_path.dentry == NULL) { struct inode *inode = nd->path.dentry->d_inode; if (inode->i_op->open) { int flags = filp->f_flags; put_filp(filp); filp = inode->i_op->open(nd->path.dentry, flags, cred); } else { filp = __dentry_open(&nd->path, filp, NULL, cred); } } return filp; } /* * dentry_open() will have done dput(dentry) and mntput(mnt) if it returns an * error. */ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags, const struct cred *cred) { struct path path = { .dentry = dentry, .mnt = mnt }; struct file *ret; /* We must always pass in a valid mount pointer. */ BUG_ON(!mnt); ret = vfs_open(&path, flags, cred); path_put(&path); return ret; } EXPORT_SYMBOL(dentry_open); /** * vfs_open - open the file at the given path * @path: path to open * @flags: open flags * @cred: credentials to use * * Open the file. If successful, the returned file will have acquired * an additional reference for path. */ struct file *vfs_open(struct path *path, int flags, const struct cred *cred) { struct file *f; struct inode *inode = path->dentry->d_inode; validate_creds(cred); if (inode->i_op->open) return inode->i_op->open(path->dentry, flags, cred); f = get_empty_filp(); if (f == NULL) return ERR_PTR(-ENFILE); f->f_flags = flags; return __dentry_open(path, f, NULL, cred); } EXPORT_SYMBOL(vfs_open); static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __FD_CLR(fd, fdt->open_fds); if (fd < files->next_fd) files->next_fd = fd; } void put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; spin_lock(&files->file_lock); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); } EXPORT_SYMBOL(put_unused_fd); /* * Install a file pointer in the fd array. * * The VFS is full of places where we drop the files lock between * setting the open_fds bitmap and installing the file in the file * array. At any such point, we are vulnerable to a dup2() race * installing a file in the array before us. We need to detect this and * fput() the struct file we are about to overwrite in this case. * * It should never happen - if we allow dup2() do it, _really_ bad things * will follow. */ void fd_install(unsigned int fd, struct file *file) { struct files_struct *files = current->files; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); spin_unlock(&files->file_lock); } EXPORT_SYMBOL(fd_install); static inline int build_open_flags(int flags, int mode, struct open_flags *op) { int lookup_flags = 0; int acc_mode; if (!(flags & O_CREAT)) mode = 0; op->mode = mode; /* Must never be set by userspace */ flags &= ~FMODE_NONOTIFY; /* * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only * check for O_DSYNC if the need any syncing at all we enforce it's * always set instead of having to deal with possibly weird behaviour * for malicious applications setting only __O_SYNC. */ if (flags & __O_SYNC) flags |= O_DSYNC; /* * If we have O_PATH in the open flag. Then we * cannot have anything other than the below set of flags */ if (flags & O_PATH) { flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH; acc_mode = 0; } else { acc_mode = MAY_OPEN | ACC_MODE(flags); } op->open_flag = flags; /* O_TRUNC implies we need access checks for write permissions */ if (flags & O_TRUNC) acc_mode |= MAY_WRITE; /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flags & O_APPEND) acc_mode |= MAY_APPEND; op->acc_mode = acc_mode; op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN; if (flags & O_CREAT) { op->intent |= LOOKUP_CREATE; if (flags & O_EXCL) op->intent |= LOOKUP_EXCL; } if (flags & O_DIRECTORY) lookup_flags |= LOOKUP_DIRECTORY; if (!(flags & O_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; return lookup_flags; }
static int exa_select(struct exa_select *sel) { int i; int one=0; int ret = -EFAULT; /* first phase register on each queue */ for (i = 0; i < __FD_SETSIZE; i++) { __FD_CLR(i, &sel->result); if (__FD_ISSET(i, &sel->select)) { sel->elt[i].socket = exa_getsock(i); if (sel->elt[i].socket == NULL) { ret = -EINVAL; continue; } set_callbacks(sel->elt[i].socket, &sel->elt[i]); if (sel->operation == EXA_SELECT_IN) { if (sock_readable(sel->elt[i].socket) == 1) one = 1; } if (sel->operation == EXA_SELECT_OUT) if (sock_writable(sel->elt[i].socket) == 1) one = 1; } } /* second phase : check if nothing arrived and wait if nothing arrived */ if (one==0) { int timeout = SELECT_TIMEOUT ; set_current_state(TASK_INTERRUPTIBLE); for (i = 0; i < __FD_SETSIZE; i++) { if (__FD_ISSET(i, &sel->select) && (sel->elt[i].socket != NULL)) { if (sel->operation == EXA_SELECT_IN) { if (sock_readable(sel->elt[i].socket) == 1) one = 1; } if (sel->operation == EXA_SELECT_OUT) { if (sock_writable(sel->elt[i].socket) == 1) one = 1; } } } if (one == 0) /* if some data already pending, we must not wait (or some race can occur)*/ timeout = schedule_timeout(timeout); set_current_state(TASK_RUNNING); } /* third : find wich socket receive/sent something */ for (i = __FD_SETSIZE - 1; i >= 0; i--) { if (__FD_ISSET(i, &sel->select)) { if (sel->elt[i].socket == NULL) continue; if (sel->operation == EXA_SELECT_IN) { if (sock_readable(sel->elt[i].socket) == 1) __FD_SET(i, &sel->result); } if (sel->operation == EXA_SELECT_OUT) { if (sock_writable(sel->elt[i].socket) == 1) __FD_SET(i, &sel->result); } if ((__FD_ISSET(i, &sel->result)) && (ret == -EFAULT)) ret = 0; restore_callbacks(sel->elt[i].socket, &sel->elt[i]); fput(sel->elt[i].socket->file); sel->elt[i].socket = NULL; } } /* XXX this is not an error, -EFAULT is used here as the timeout return * value.... * FIXME use ETIME to have an explicit timeout. */ if (ret == -EFAULT) __FD_ZERO(&sel->result); return ret; }