/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ SYSCALL_DEFINE1(close, unsigned int, fd) { struct file * filp; struct files_struct *files = current->files; struct fdtable *fdt; int retval; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; filp = fdt->fd[fd]; if (!filp) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __clear_close_on_exec(fd, fdt); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); retval = filp_close(filp, files); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; out_unlock: spin_unlock(&files->file_lock); return -EBADF; }
void put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; spin_lock(&files->file_lock); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); }
/** Close a file in the FAF deamon. * @author Renaud Lottiaux * * @param file The file to close. */ int close_faf_file(struct file * file) { struct files_struct *files = first_krgrpc->files; struct file * faf_file; struct fdtable *fdt; int fd = file->f_faf_srv_index; BUG_ON (!(file->f_flags & O_FAF_SRV)); BUG_ON (file_count(file) != 1); /* Remove the file from the FAF server file table */ spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) BUG(); faf_file = fdt->fd[fd]; if (!faf_file) BUG(); BUG_ON (faf_file != file); rcu_assign_pointer(fdt->fd[fd], NULL); FD_CLR(fd, fdt->close_on_exec); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); /* Cleanup Kerrighed flags but not objid to pass through the regular * kernel close file code plus kh_put_file() only. */ file->f_flags = file->f_flags & (~O_FAF_SRV); return filp_close(faf_file, files); }
/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ asmlinkage long sys_close(unsigned int fd) { struct file * filp; struct files_struct *files = current->files; spin_lock(&files->file_lock); if (fd >= files->max_fds) goto out_unlock; filp = files->fd[fd]; if (!filp) goto out_unlock; ltt_ev_file_system(LTT_EV_FILE_SYSTEM_CLOSE, fd, 0, NULL); files->fd[fd] = NULL; FD_CLR(fd, files->close_on_exec); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); return filp_close(filp, files); out_unlock: spin_unlock(&files->file_lock); return -EBADF; }
/** Add a file in the FAF daemon. * @author Renaud Lottiaux * * @param file The file to add in the FAF daemon * * @return 0 if everything ok. * Negative value otherwise. */ int setup_faf_file(struct file *file) { int server_fd = 0; int res = 0; struct files_struct *files = first_krgrpc->files; /* Install the file in the destination task file array */ if (file->f_flags & O_FAF_SRV) { res = -EALREADY; goto out; } server_fd = __get_unused_fd(first_krgrpc); if (server_fd < 0) { res = server_fd; goto out; } spin_lock(&files->file_lock); if (unlikely(file->f_flags & O_FAF_SRV)) { __put_unused_fd(files, server_fd); res = -EALREADY; } else { file->f_flags |= O_FAF_SRV; get_file(file); file->f_faf_srv_index = server_fd; __fd_install(files, server_fd, file); } spin_unlock(&files->file_lock); out: return res; }
SYSCALL_DEFINE1(close, unsigned int, fd) { struct file * filp; struct files_struct *files = current->files; struct fdtable *fdt; int retval; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) { pr_debug("[%s] fd %u exceeds max_fds %u (user: %s %d:%d)\n", __func__, fd, fdt->max_fds, current->comm, current->tgid, current->pid); goto out_unlock; } filp = fdt->fd[fd]; if (!filp) { struct fdt_user* user = &fdt->user[fd]; if (unlikely(user->remover && user->remover != current->pid)) { struct task_struct* task = find_task_by_vpid(user->remover); pr_warn("[%s] fd %u of %s %d:%d is" " already closed by thread %d (%s %d:%d)\n", __func__, fd, current->comm, current->tgid, current->pid, user->remover, task ? task->comm : "<unknown>", task ? task->tgid : -1, task ? task->pid : -1); } goto out_unlock; } #ifdef CONFIG_HTC_FD_MONITOR if (in_fd_list(fd, 0) == 1) { printk("fd error: %s(%d) tries to close fd=%d illegally\n", current->comm, current->pid, fd); force_sig(SIGABRT, current); spin_unlock(&files->file_lock); force_sig(SIGABRT, current); return 0xBADFD; } #endif rcu_assign_pointer(fdt->fd[fd], NULL); fdt->user[fd].remover = current->pid; __clear_close_on_exec(fd, fdt); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); retval = filp_close(filp, files); if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; out_unlock: spin_unlock(&files->file_lock); return -EBADF; }
void fastcall put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; if(need_files_checkpoint()) checkpoint_files(); spin_lock(&files->file_lock); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); }
/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ SYSCALL_DEFINE1(close, unsigned int, fd) { struct file * filp; struct files_struct *files = current->files; struct fdtable *fdt; int retval; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) { pr_debug("[%s] fd %u exceeds max_fds %u (user: %s %d:%d)\n", __func__, fd, fdt->max_fds, current->comm, current->tgid, current->pid); goto out_unlock; } filp = fdt->fd[fd]; if (!filp) { struct fdt_user* user = &fdt->user[fd]; /* * detecting the double closing that made by other thread */ if (unlikely(user->remover && user->remover != current->pid)) { struct task_struct* task = find_task_by_vpid(user->remover); pr_warn("[%s] fd %u of %s %d:%d is" " already closed by thread %d (%s %d:%d)\n", __func__, fd, current->comm, current->tgid, current->pid, user->remover, task ? task->comm : "<unknown>", task ? task->tgid : -1, task ? task->pid : -1); } goto out_unlock; } rcu_assign_pointer(fdt->fd[fd], NULL); fdt->user[fd].remover = current->pid; FD_CLR(fd, fdt->close_on_exec); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); retval = filp_close(filp, files); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; out_unlock: spin_unlock(&files->file_lock); return -EBADF; }
/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ asmlinkage long sys_close(unsigned int fd) { struct file * filp; struct vms_fd * vms_fd; struct files_struct *files = current->files; write_lock(&files->file_lock); if (fd >= files->max_fds) goto out_unlock; vms_fd = files->fd[fd]; int cmu_close(); if (vms_fd->vfd$l_is_cmu) return cmu_close; filp = vms_fd->vfd$l_fd_p; if (!filp) goto out_unlock; files->fd[fd] = NULL; FD_CLR(fd, files->close_on_exec); __put_unused_fd(files, fd); write_unlock(&files->file_lock); #if 0 if (fd<3) return 0; // temp workaround if (filp == files->fd[0]) return 0; // another temp workaround #else vms_fd->vfd$l_refcnt--; if (vms_fd->vfd$l_refcnt) return 0; #endif struct _rabdef * rab = filp; struct _fabdef * fab = rab->rab$l_fab; struct _xabdatdef * dat = fab->fab$l_xab; struct _xabfhcdef * fhc = 0; if (dat) fhc = dat->xab$l_nxt; exe$disconnect(rab); exe$close(fab); kfree(vms_fd); kfree(rab); kfree(fab); if (dat) kfree(dat); if (fhc) kfree(fhc); return 0; out_unlock: write_unlock(&files->file_lock); return -EBADF; }
/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ asmlinkage long sys_close(unsigned int fd) { struct file * filp; struct files_struct *files = current->files; struct fdtable *fdt; int retval; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; filp = fdt->fd[fd]; if (!filp) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); FD_CLR(fd, fdt->close_on_exec); __put_unused_fd(files, fd); /* USB stroage device may be unpluged after write complete. * So, we have to flush cache to disk after sys_close() * by Steven */ if (filp->f_mode == (FMODE_WRITE | FMODE_LSEEK | FMODE_PREAD)) { sys_sync(); } spin_unlock(&files->file_lock); retval = filp_close(filp, files); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; out_unlock: spin_unlock(&files->file_lock); return -EBADF; }
/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ asmlinkage long sys_close(unsigned int fd) { struct file * filp; struct files_struct *files = current->files; write_lock(&files->file_lock); if (fd >= files->max_fds) goto out_unlock; filp = files->fd[fd]; if (!filp) goto out_unlock; files->fd[fd] = NULL; FD_CLR(fd, files->close_on_exec); __put_unused_fd(files, fd); write_unlock(&files->file_lock); return filp_close(filp, files); out_unlock: write_unlock(&files->file_lock); return -EBADF; }
/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ SYSCALL_DEFINE1(close, unsigned int, fd) { struct file * filp; struct files_struct *files = current->files; struct fdtable *fdt; int retval; #ifdef CONFIG_SEC_DEBUG_ZERO_FD_CLOSE if (fd == 0 && strcmp(current->group_leader->comm,"mediaserver") == 0) panic("trying to close fd=0"); #endif spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; filp = fdt->fd[fd]; if (!filp) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __clear_close_on_exec(fd, fdt); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); retval = filp_close(filp, files); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; out_unlock: spin_unlock(&files->file_lock); return -EBADF; }
/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ asmlinkage long sys_close(unsigned int fd) { struct file * filp; struct files_struct *files = current->files; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; filp = fdt->fd[fd]; if (!filp) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); FD_CLR(fd, fdt->close_on_exec); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); return filp_close(filp, files); out_unlock: spin_unlock(&files->file_lock); return -EBADF; }
/* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ asmlinkage long sys_close(unsigned int fd) { struct file * filp; struct files_struct *files = current->files; struct fdtable *fdt; int retval; if(need_files_checkpoint()) checkpoint_files(); spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; filp = fdt->fd[fd]; if (!filp) goto out_unlock; tx_cache_get_file(filp); //get tx refcount on file rcu_assign_pointer(fdt->fd[fd], NULL); FD_CLR(fd, fdt->close_on_exec); //??check later __put_unused_fd(files, fd); spin_unlock(&files->file_lock); retval = filp_close(filp, files); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; out_unlock: spin_unlock(&files->file_lock); return -EBADF; }
/** * lookup_instantiate_filp - instantiates the open intent filp * @nd: pointer to nameidata * @dentry: pointer to dentry * @open: open callback * * Helper for filesystems that want to use lookup open intents and pass back * a fully instantiated struct file to the caller. * This function is meant to be called from within a filesystem's * lookup method. * Beware of calling it for non-regular files! Those ->open methods might block * (e.g. in fifo_open), leaving you with parent locked (and in case of fifo, * leading to a deadlock, as nobody can open that fifo anymore, because * another process to open fifo will block on locked parent when doing lookup). * Note that in case of error, nd->intent.open.file is destroyed, but the * path information remains valid. * If the open callback is set to NULL, then the standard f_op->open() * filesystem callback is substituted. */ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)) { struct path path = { .dentry = dentry, .mnt = nd->path.mnt }; const struct cred *cred = current_cred(); if (IS_ERR(nd->intent.open.file)) goto out; if (IS_ERR(dentry)) goto out_err; nd->intent.open.file = __dentry_open(&path, nd->intent.open.file, open, cred); out: return nd->intent.open.file; out_err: release_open_intent(nd); nd->intent.open.file = ERR_CAST(dentry); goto out; } EXPORT_SYMBOL_GPL(lookup_instantiate_filp); /** * nameidata_to_filp - convert a nameidata to an open filp. * @nd: pointer to nameidata * @flags: open flags * * Note that this function destroys the original nameidata */ struct file *nameidata_to_filp(struct nameidata *nd) { const struct cred *cred = current_cred(); struct file *filp; /* Pick up the filp from the open intent */ filp = nd->intent.open.file; /* Has the filesystem initialised the file for us? */ if (filp->f_path.dentry != NULL) { nd->intent.open.file = NULL; } else { struct file *res; struct inode *inode = nd->path.dentry->d_inode; if (inode->i_op->open) { res = inode->i_op->open(nd->path.dentry, filp, cred); if (!IS_ERR(res)) { nd->intent.open.file = NULL; } return res; } res = do_dentry_open(&nd->path, filp, NULL, cred); if (!IS_ERR(res)) { int error; nd->intent.open.file = NULL; BUG_ON(res != filp); error = open_check_o_direct(filp); if (error) { fput(filp); filp = ERR_PTR(error); } } else { /* Allow nd->intent.open.file to be recycled */ filp = res; } } return filp; } /* * dentry_open() will have done dput(dentry) and mntput(mnt) if it returns an * error. */ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags, const struct cred *cred) { struct file *f; struct file *ret; struct path path = { .dentry = dentry, .mnt = mnt }; validate_creds(cred); /* We must always pass in a valid mount pointer. */ BUG_ON(!mnt); ret = ERR_PTR(-ENFILE); f = get_empty_filp(); if (f != NULL) { f->f_flags = flags; ret = vfs_open(&path, f, cred); } path_put(&path); return ret; } EXPORT_SYMBOL(dentry_open); /** * vfs_open - open the file at the given path * @path: path to open * @filp: newly allocated file with f_flag initialized * @cred: credentials to use * * Open the file. If successful, the returned file will have acquired * an additional reference for path. */ struct file *vfs_open(struct path *path, struct file *filp, const struct cred *cred) { struct inode *inode = path->dentry->d_inode; if (inode->i_op->open) return inode->i_op->open(path->dentry, filp, cred); else return __dentry_open(path, filp, NULL, cred); } EXPORT_SYMBOL(vfs_open); static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __clear_open_fd(fd, fdt); if (fd < files->next_fd) files->next_fd = fd; } void put_unused_fd(unsigned int fd) { struct files_struct *files = current->files; spin_lock(&files->file_lock); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); } EXPORT_SYMBOL(put_unused_fd); /* * Install a file pointer in the fd array. * * The VFS is full of places where we drop the files lock between * setting the open_fds bitmap and installing the file in the file * array. At any such point, we are vulnerable to a dup2() race * installing a file in the array before us. We need to detect this and * fput() the struct file we are about to overwrite in this case. * * It should never happen - if we allow dup2() do it, _really_ bad things * will follow. */ void fd_install(unsigned int fd, struct file *file) { struct files_struct *files = current->files; struct fdtable *fdt; spin_lock(&files->file_lock); fdt = files_fdtable(files); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); spin_unlock(&files->file_lock); } EXPORT_SYMBOL(fd_install); static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) { int lookup_flags = 0; int acc_mode; if (flags & O_CREAT) op->mode = (mode & S_IALLUGO) | S_IFREG; else op->mode = 0; /* Must never be set by userspace */ flags &= ~FMODE_NONOTIFY; /* * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only * check for O_DSYNC if the need any syncing at all we enforce it's * always set instead of having to deal with possibly weird behaviour * for malicious applications setting only __O_SYNC. */ if (flags & __O_SYNC) flags |= O_DSYNC; /* * If we have O_PATH in the open flag. Then we * cannot have anything other than the below set of flags */ if (flags & O_PATH) { flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH; acc_mode = 0; } else { acc_mode = MAY_OPEN | ACC_MODE(flags); } op->open_flag = flags; /* O_TRUNC implies we need access checks for write permissions */ if (flags & O_TRUNC) acc_mode |= MAY_WRITE; /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flags & O_APPEND) acc_mode |= MAY_APPEND; op->acc_mode = acc_mode; op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN; if (flags & O_CREAT) { op->intent |= LOOKUP_CREATE; if (flags & O_EXCL) op->intent |= LOOKUP_EXCL; } if (flags & O_DIRECTORY) lookup_flags |= LOOKUP_DIRECTORY; if (!(flags & O_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; return lookup_flags; }