int utime(char *name, struct utimbuf *times) { struct fs *fs; char *rest; int rc; char path[MAXPATH]; rc = canonicalize(name, path); if (rc < 0) return rc; rc = fslookup(path, 0, &fs, &rest); if (rc < 0) return rc; if (!times) return -EINVAL; if (!fs->ops->utime) return -ENOSYS; fs->locks++; if (lock_fs(fs, FSOP_UTIME) < 0) { fs->locks--; return -ETIMEOUT; } rc = fs->ops->utime(fs, rest, times); unlock_fs(fs, FSOP_UTIME); fs->locks--; return rc; }
int ioctl(struct file *filp, int cmd, void *data, size_t size) { int rc; if (!filp) return -EINVAL; if (!data && size > 0) return -EINVAL; if (cmd == FIONBIO) { if (size != sizeof(int)) return -EINVAL; if (*(int *) data) { filp->flags |= O_NONBLOCK; } else { filp->flags &= ~O_NONBLOCK; } } else if (cmd == IOCTL_SET_TTY) { if (size != sizeof(int)) return -EINVAL; if (*(int *) data) { filp->flags |= F_TTY; } else { filp->flags &= ~F_TTY; } return 0; } else if (cmd == IOCTL_GET_TTY) { if (size != sizeof(int)) return -EINVAL; *(int *) data = filp->flags & F_TTY ? 1 : 0; return 0; } if (!filp->fs->ops->ioctl) return -ENOSYS; if (lock_fs(filp->fs, FSOP_IOCTL) < 0) return -ETIMEOUT; rc = filp->fs->ops->ioctl(filp, cmd, data, size); unlock_fs(filp->fs, FSOP_IOCTL); return rc; }
int link(char *oldname, char *newname) { struct fs *oldfs; struct fs *newfs; char *oldrest; char *newrest; int rc; char oldpath[MAXPATH]; char newpath[MAXPATH]; rc = canonicalize(oldname, oldpath); if (rc < 0) return rc; rc = fslookup(oldpath, 0, &oldfs, &oldrest); if (rc < 0) return rc; rc = canonicalize(newname, newpath); if (rc < 0) return rc; rc = fslookup(newpath, 0, &newfs, &newrest); if (rc < 0) return rc; if (oldfs != newfs) return -EXDEV; if (!oldfs->ops->link) return -ENOSYS; oldfs->locks++; if (lock_fs(oldfs, FSOP_LINK) < 0) { oldfs->locks--; return -ETIMEOUT; } rc = oldfs->ops->link(oldfs, oldrest, newrest); unlock_fs(oldfs, FSOP_LINK); oldfs->locks--; return rc; }
int chdir(char *name) { struct fs *fs; char *rest; int rc; char path[MAXPATH]; char newdir[MAXPATH]; struct stat64 buffer; rc = canonicalize(name, path); if (rc < 0) return rc; strcpy(newdir, path); rc = fslookup(path, 0, &fs, &rest); if (rc < 0) return rc; if (fs->ops->stat) { fs->locks++; if (lock_fs(fs, FSOP_STAT) < 0) { fs->locks--; return -ETIMEOUT; } rc = fs->ops->stat(fs, rest, &buffer); unlock_fs(fs, FSOP_STAT); fs->locks--; if (rc < 0) return rc; if ((buffer.st_mode & S_IFMT) != S_IFDIR) return -ENOTDIR; } strcpy(self()->curdir, newdir); return 0; }
off64_t lseek(struct file *filp, off64_t offset, int origin) { off64_t rc; if (!filp) return -EINVAL; if (!filp->fs->ops->lseek) return -ENOSYS; if (lock_fs(filp->fs, FSOP_LSEEK) < 0) return -ETIMEOUT; rc = filp->fs->ops->lseek(filp, offset, origin); unlock_fs(filp->fs, FSOP_LSEEK); return rc; }
int fstat(struct file *filp, struct stat64 *buffer) { int rc; if (!filp) return -EINVAL; if (!filp->fs->ops->fstat) return -ENOSYS; if (lock_fs(filp->fs, FSOP_FSTAT) < 0) return -ETIMEOUT; rc = filp->fs->ops->fstat(filp, buffer); unlock_fs(filp->fs, FSOP_FSTAT); return rc; }
off64_t tell(struct file *filp) { off64_t rc; if (!filp) return -EINVAL; if (!filp->fs->ops->tell) return -ENOSYS; if (lock_fs(filp->fs, FSOP_TELL) < 0) return -ETIMEOUT; rc = filp->fs->ops->tell(filp); unlock_fs(filp->fs, FSOP_TELL); return rc; }
int open(char *name, int flags, int mode, struct file **retval) { struct fs *fs; struct file *filp; int rc; char *rest; char path[MAXPATH]; rc = canonicalize(name, path); if (rc < 0) return rc; rc = fslookup(path, 0, &fs, &rest); if (rc < 0) return rc; filp = newfile(fs, path, flags, mode); if (!filp) return -EMFILE; if (fs->ops->open) { fs->locks++; if (lock_fs(fs, FSOP_OPEN) < 0) { fs->locks--; kfree(filp->path); kfree(filp); return -ETIMEOUT; } rc = fs->ops->open(filp, rest); if (rc == 0) { int access; if (filp->flags & O_RDWR) { access = S_IREAD | S_IWRITE; } else if (filp->flags & O_WRONLY) { access = S_IWRITE; } else { access = S_IREAD; } rc = check(filp->mode, filp->owner, filp->group, access); } unlock_fs(fs, FSOP_OPEN); if (rc != 0) { fs->locks--; kfree(filp->path); kfree(filp); return rc; } } *retval = filp; return 0; }
int ftruncate(struct file *filp, off64_t size) { int rc; if (!filp) return -EINVAL; if (filp->flags & O_RDONLY) return -EACCES; if (!filp->fs->ops->ftruncate) return -ENOSYS; if (lock_fs(filp->fs, FSOP_FTRUNCATE) < 0) return -ETIMEOUT; rc = filp->fs->ops->ftruncate(filp, size); unlock_fs(filp->fs, FSOP_FTRUNCATE); return rc; }
int fchown(struct file *filp, int owner, int group) { int rc; if (!filp) return -EINVAL; if (filp->flags & O_RDONLY) return -EACCES; if (!filp->fs->ops->fchown) return -ENOSYS; if (lock_fs(filp->fs, FSOP_FCHOWN) < 0) return -ETIMEOUT; rc = filp->fs->ops->fchown(filp, owner, group); unlock_fs(filp->fs, FSOP_FCHOWN); return rc; }
int fchmod(struct file *filp, int mode) { int rc; if (!filp) return -EINVAL; if (filp->flags & O_RDONLY) return -EACCES; if (!filp->fs->ops->fchmod) return -ENOSYS; if (lock_fs(filp->fs, FSOP_FCHMOD) < 0) return -ETIMEOUT; rc = filp->fs->ops->fchmod(filp, mode); unlock_fs(filp->fs, FSOP_FCHMOD); return rc; }
int readdir(struct file *filp, struct direntry *dirp, int count) { int rc; if (!filp) return -EINVAL; if (!dirp) return -EINVAL; if (!(filp->flags & F_DIR)) return -EINVAL; if (!filp->fs->ops->readdir) return -ENOSYS; if (lock_fs(filp->fs, FSOP_READDIR) < 0) return -ETIMEOUT; rc = filp->fs->ops->readdir(filp, dirp, count); unlock_fs(filp->fs, FSOP_READDIR); return rc; }
int futime(struct file *filp, struct utimbuf *times) { int rc; if (!filp) return -EINVAL; if (!times) return -EINVAL; if (filp->flags & O_RDONLY) return -EACCES; if (!filp->fs->ops->futime) return -ENOSYS; if (lock_fs(filp->fs, FSOP_FUTIME) < 0) return -ETIMEOUT; rc = filp->fs->ops->futime(filp, times); unlock_fs(filp->fs, FSOP_FUTIME); return rc; }
int fsync(struct file *filp) { int rc; if (!filp) return -EINVAL; if (!filp->fs->ops->fsync) return -ENOSYS; if (lock_fs(filp->fs, FSOP_FSYNC) < 0) return -ETIMEOUT; rc = filp->fs->ops->fsync(filp); unlock_fs(filp->fs, FSOP_FSYNC); return rc; }
int pwrite(struct file *filp, void *data, size_t size, off64_t offset) { int rc; if (!filp) return -EINVAL; if (!data && size > 0 || offset < 0) return -EINVAL; if (filp->flags == O_RDONLY) return -EACCES; if (filp->flags & O_TEXT) return -ENXIO; if (!filp->fs->ops->write) return -ENOSYS; if (lock_fs(filp->fs, FSOP_WRITE) < 0) return -ETIMEOUT; rc = filp->fs->ops->write(filp, data, size, offset); unlock_fs(filp->fs, FSOP_WRITE); return rc; }
int pread(struct file *filp, void *data, size_t size, off64_t offset) { int rc; if (!filp) return -EINVAL; if (!data && size > 0 || offset < 0) return -EINVAL; if (filp->flags & O_WRONLY) return -EACCES; if (filp->flags & O_TEXT) return -ENXIO; if (!filp->fs->ops->read) return -ENOSYS; if (lock_fs(filp->fs, FSOP_READ) < 0) return -ETIMEOUT; rc = filp->fs->ops->read(filp, data, size, offset); unlock_fs(filp->fs, FSOP_READ); return rc; }
int access(char *name, int mode) { struct fs *fs; char *rest; int rc; char path[MAXPATH]; rc = canonicalize(name, path); if (rc < 0) return rc; rc = fslookup(path, 0, &fs, &rest); if (rc < 0) return rc; if (!fs->ops->access) { struct thread *thread = self(); struct stat64 buf; rc = stat(name, &buf); if (rc < 0) return rc; if (mode == 0) return 0; if (thread->euid == 0) { if (mode == X_OK) { return buf.st_mode & 0111 ? 0 : -EACCES; } else { return 0; } } if (thread->euid == buf.st_uid) { mode <<= 6; } else if (thread->egid == buf.st_gid) { mode <<= 3; } if ((mode && buf.st_mode) == 0) return -EACCES; return 0; } fs->locks++; if (lock_fs(fs, FSOP_ACCESS) < 0) { fs->locks--; return -ETIMEOUT; } rc = fs->ops->access(fs, rest, mode); unlock_fs(fs, FSOP_ACCESS); fs->locks--; return rc; }
int write(struct file *filp, void *data, size_t size) { int rc; if (!filp) return -EINVAL; if (!data && size > 0) return -EINVAL; if (filp->flags == O_RDONLY) return -EACCES; if (!filp->fs->ops->write) return -ENOSYS; if (lock_fs(filp->fs, FSOP_WRITE) < 0) return -ETIMEOUT; if (filp->flags & O_TEXT) { rc = write_translated(filp, data, size); } else { rc = filp->fs->ops->write(filp, data, size, filp->pos); if (rc > 0) filp->pos += rc; } unlock_fs(filp->fs, FSOP_WRITE); return rc; }
int read(struct file *filp, void *data, size_t size) { int rc; if (!filp) return -EINVAL; if (!data && size > 0) return -EINVAL; if (filp->flags & O_WRONLY) return -EACCES; if (!filp->fs->ops->read) return -ENOSYS; if (lock_fs(filp->fs, FSOP_READ) < 0) return -ETIMEOUT; if (filp->flags & O_TEXT) { rc = read_translated(filp, data, size); } else { rc = filp->fs->ops->read(filp, data, size, filp->pos); if (rc > 0) filp->pos += rc; } unlock_fs(filp->fs, FSOP_READ); return rc; }
int opendir(char *name, struct file **retval) { struct fs *fs; struct file *filp; int rc; char *rest; char path[MAXPATH]; rc = canonicalize(name, path); if (rc < 0) return rc; rc = fslookup(path, 1, &fs, &rest); if (rc < 0) return rc; if (!fs->ops->opendir) return -ENOSYS; filp = (struct file *) kmalloc(sizeof(struct file)); if (!filp) return -ENOMEM; init_ioobject(&filp->iob, OBJECT_FILE); filp->fs = fs; filp->flags = O_RDONLY | F_DIR; filp->pos = 0; filp->data = NULL; filp->path = strdup(path); fs->locks++; if (lock_fs(fs, FSOP_OPENDIR) < 0) { fs->locks--; kfree(filp->path); kfree(filp); return -ETIMEOUT; } rc = fs->ops->opendir(filp, rest); unlock_fs(fs, FSOP_OPENDIR); if (rc != 0) { fs->locks--; kfree(filp->path); kfree(filp); return rc; } *retval = filp; return 0; }
int close(struct file *filp) { int rc; if (!filp) return -EINVAL; if (filp->fs->ops->close) { if (lock_fs(filp->fs, FSOP_CLOSE) < 0) return -ETIMEOUT; rc = filp->fs->ops->close(filp); unlock_fs(filp->fs, FSOP_CLOSE); } else { rc = 0; } if (rc == 0) filp->fs->locks--; detach_ioobject(&filp->iob); kfree(filp->path); filp->path = NULL; filp->flags |= F_CLOSED; return rc; }
int chown(char *name, int owner, int group) { struct fs *fs; char *rest; int rc; char path[MAXPATH]; rc = canonicalize(name, path); if (rc < 0) return rc; rc = fslookup(path, 0, &fs, &rest); if (rc < 0) return rc; if (!fs->ops->chmod) return -ENOSYS; fs->locks++; if (lock_fs(fs, FSOP_CHOWN) < 0) { fs->locks--; return -ETIMEOUT; } rc = fs->ops->chown(fs, rest, owner, group); unlock_fs(fs, FSOP_CHOWN); fs->locks--; return rc; }
int stat(char *name, struct stat64 *buffer) { struct fs *fs; char *rest; int rc; char path[MAXPATH]; rc = canonicalize(name, path); if (rc < 0) return rc; rc = fslookup(path, 0, &fs, &rest); if (rc < 0) return rc; if (!fs->ops->stat) return -ENOSYS; fs->locks++; if (lock_fs(fs, FSOP_STAT) < 0) { fs->locks--; return -ETIMEOUT; } rc = fs->ops->stat(fs, rest, buffer); unlock_fs(fs, FSOP_STAT); fs->locks--; return rc; }
int mkdir(char *name, int mode) { struct fs *fs; char *rest; int rc; char path[MAXPATH]; rc = canonicalize(name, path); if (rc < 0) return rc; rc = fslookup(path, 0, &fs, &rest); if (rc < 0) return rc; if (!fs->ops->mkdir) return -ENOSYS; fs->locks++; if (lock_fs(fs, FSOP_MKDIR) < 0) { fs->locks--; return -ETIMEOUT; } rc = fs->ops->mkdir(fs, rest, mode & ~(peb ? peb->umaskval : 0)); unlock_fs(fs, FSOP_MKDIR); fs->locks--; return rc; }
int unlink(char *name) { struct fs *fs; char *rest; int rc; char path[MAXPATH]; rc = canonicalize(name, path); if (rc < 0) return rc; rc = fslookup(path, 0, &fs, &rest); if (rc < 0) return rc; if (!fs->ops->unlink) return -ENOSYS; fs->locks++; if (lock_fs(fs, FSOP_UNLINK) < 0) { fs->locks--; return -ETIMEOUT; } rc = fs->ops->unlink(fs, rest); fs->locks--; unlock_fs(fs, FSOP_UNLINK); return rc; }
static int get_fsstat(struct fs *fs, struct statfs *buf) { int rc; strcpy(buf->fstype, fs->fsys->name); strcpy(buf->mntto, fs->mntto); strcpy(buf->mntfrom, fs->mntfrom); if (fs->ops->statfs) { if (lock_fs(fs, FSOP_STATFS) < 0) return -ETIMEOUT; rc = fs->ops->statfs(fs, buf); unlock_fs(fs, FSOP_STATFS); if (rc < 0) return rc; } else { buf->bsize = -1; buf->iosize = -1; buf->blocks = -1; buf->bfree = -1; buf->files = -1; buf->ffree = -1; } return 0; }
/* * We need to be able to change a mapping table under a mounted * filesystem. For example we might want to move some data in * the background. Before the table can be swapped with * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ int dm_suspend(struct mapped_device *md, int do_lockfs) { struct dm_table *map = NULL; DECLARE_WAITQUEUE(wait, current); struct bio *def; int r = -EINVAL; down(&md->suspend_lock); if (dm_suspended(md)) goto out; map = dm_get_table(md); /* This does not get reverted if there's an error later. */ dm_table_presuspend_targets(map); md->suspended_bdev = bdget_disk(md->disk, 0); if (!md->suspended_bdev) { DMWARN("bdget failed in dm_suspend"); r = -ENOMEM; goto out; } /* Flush I/O to the device. */ if (do_lockfs) { r = lock_fs(md); if (r) goto out; } /* * First we set the BLOCK_IO flag so no more ios will be mapped. */ down_write(&md->io_lock); set_bit(DMF_BLOCK_IO, &md->flags); add_wait_queue(&md->wait, &wait); up_write(&md->io_lock); /* unplug */ if (map) dm_table_unplug_all(map); /* * Then we wait for the already mapped ios to * complete. */ while (1) { set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&md->pending) || signal_pending(current)) break; io_schedule(); } set_current_state(TASK_RUNNING); down_write(&md->io_lock); remove_wait_queue(&md->wait, &wait); /* were we interrupted ? */ r = -EINTR; if (atomic_read(&md->pending)) { clear_bit(DMF_BLOCK_IO, &md->flags); def = bio_list_get(&md->deferred); __flush_deferred_io(md, def); up_write(&md->io_lock); unlock_fs(md); goto out; } up_write(&md->io_lock); dm_table_postsuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); r = 0; out: if (r && md->suspended_bdev) { bdput(md->suspended_bdev); md->suspended_bdev = NULL; } dm_table_put(map); up(&md->suspend_lock); return r; }
static void lock_cfs(void) { lock_fs(current->fs_struct); }
/* * We need to be able to change a mapping table under a mounted * filesystem. For example we might want to move some data in * the background. Before the table can be swapped with * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) { struct dm_table *map = NULL; unsigned long flags; DECLARE_WAITQUEUE(wait, current); struct bio *def; int r = -EINVAL; int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0; int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0; down(&md->suspend_lock); if (dm_suspended(md)) goto out_unlock; map = dm_get_table(md); /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. * This flag is cleared before dm_suspend returns. */ if (noflush) set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); /* This does not get reverted if there's an error later. */ dm_table_presuspend_targets(map); /* bdget() can stall if the pending I/Os are not flushed */ if (!noflush) { md->suspended_bdev = bdget_disk(md->disk, 0); if (!md->suspended_bdev) { DMWARN("bdget failed in dm_suspend"); r = -ENOMEM; goto flush_and_out; } } /* * Flush I/O to the device. * noflush supersedes do_lockfs, because lock_fs() needs to flush I/Os. */ if (do_lockfs && !noflush) { r = lock_fs(md); if (r) goto out; } /* * First we set the BLOCK_IO flag so no more ios will be mapped. */ down_write(&md->io_lock); set_bit(DMF_BLOCK_IO, &md->flags); add_wait_queue(&md->wait, &wait); up_write(&md->io_lock); /* unplug */ if (map) dm_table_unplug_all(map); /* * Then we wait for the already mapped ios to * complete. */ while (1) { set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&md->pending) || signal_pending(current)) break; io_schedule(); } set_current_state(TASK_RUNNING); down_write(&md->io_lock); remove_wait_queue(&md->wait, &wait); if (noflush) { spin_lock_irqsave(&md->pushback_lock, flags); clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); bio_list_merge_head(&md->deferred, &md->pushback); bio_list_init(&md->pushback); spin_unlock_irqrestore(&md->pushback_lock, flags); } /* were we interrupted ? */ r = -EINTR; if (atomic_read(&md->pending)) { clear_bit(DMF_BLOCK_IO, &md->flags); def = bio_list_get(&md->deferred); __flush_deferred_io(md, def); up_write(&md->io_lock); unlock_fs(md); goto out; /* pushback list is already flushed, so skip flush */ } up_write(&md->io_lock); dm_table_postsuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); r = 0; flush_and_out: if (r && noflush) { /* * Because there may be already I/Os in the pushback list, * flush them before return. */ down_write(&md->io_lock); spin_lock_irqsave(&md->pushback_lock, flags); clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); bio_list_merge_head(&md->deferred, &md->pushback); bio_list_init(&md->pushback); spin_unlock_irqrestore(&md->pushback_lock, flags); def = bio_list_get(&md->deferred); __flush_deferred_io(md, def); up_write(&md->io_lock); } out: if (r && md->suspended_bdev) { bdput(md->suspended_bdev); md->suspended_bdev = NULL; } dm_table_put(map); out_unlock: up(&md->suspend_lock); return r; }
static void lock_cfs(void) { lock_fs(pls_read(current)->fs_struct); }