/** * acct_auto_close - turn off a filesystem's accounting if it is on * @m: vfsmount being shut down * * If the accounting is turned on for a file in the subtree pointed to * to by m, turn accounting off. Done when m is about to die. */ void acct_auto_close_mnt(struct vfsmount *m) { spin_lock(&acct_globals.lock); if (acct_globals.file && tx_cache_get_file_ro(acct_globals.file)->f_path.mnt == m) acct_file_reopen(NULL); spin_unlock(&acct_globals.lock); }
/* * Close the old accounting file (if currently open) and then replace * it with file (if non-NULL). * * NOTE: acct_globals.lock MUST be held on entry and exit. */ static void acct_file_reopen(struct file *file) { struct file *old_acct = NULL; if (acct_globals.file) { old_acct = acct_globals.file; del_timer(&acct_globals.timer); acct_globals.active = 0; acct_globals.needcheck = 0; acct_globals.file = NULL; } if (file) { acct_globals.file = file; acct_globals.needcheck = 0; acct_globals.active = 1; /* It's been deleted if it was used before so this is safe */ init_timer(&acct_globals.timer); acct_globals.timer.function = acct_timeout; acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ; add_timer(&acct_globals.timer); } if (old_acct) { mnt_unpin(tx_cache_get_file_ro(old_acct)->f_path.mnt); spin_unlock(&acct_globals.lock); do_acct_process(old_acct); filp_close(old_acct, NULL); spin_lock(&acct_globals.lock); } }
asmlinkage long sys_fchdir(unsigned int fd) { struct file *file; struct _file *_file; struct dentry *dentry; struct _inode *inode; struct vfsmount *mnt; struct fs_struct *fs; int error; error = -EBADF; file = fget(fd); if (!file) goto out; _file = tx_cache_get_file_ro(file); dentry = _file->f_path.dentry; mnt = _file->f_path.mnt; inode = dentry_get_inode(dentry); error = -ENOTDIR; if (!S_ISDIR(inode->i_mode)) goto out_putf; error = file_permission(file, MAY_EXEC); if (!error) { fs = tx_cache_get_fs(current); set_fs_pwd(fs, mnt, dentry); } out_putf: fput(file); out: return error; }
static int file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error; int block; struct _inode * inode = d_get_inode(file_get_dentry(filp)); int __user *p = (int __user *)arg; switch (cmd) { case FIBMAP: { struct address_space *mapping = filp->f_mapping; int res; /* do we support this mess? */ if (!mapping->a_ops->bmap) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if ((error = get_user(block, p)) != 0) return error; lock_kernel(); res = mapping->a_ops->bmap(mapping, block); unlock_kernel(); return put_user(res, p); } case FIGETBSZ: return put_user(inode->i_sb->s_blocksize, p); case FIONREAD: return put_user(i_size_read(inode) - tx_cache_get_file_ro(filp)->f_pos, p); } return do_ioctl(filp, cmd, arg); }
long do_fsync(struct file *file, int datasync) { int ret; int err; struct address_space *mapping = file->f_mapping; if (live_transaction()){ /* DEP 5/27/10 - Defer fsync until commit. */ struct deferred_object_operation *def_op; txobj_thread_list_node_t *list_node = workset_has_object(&file->f_mapping->host->xobj); if (!list_node) { tx_cache_get_file_ro(file); tx_cache_get_inode_ro(file->f_mapping->host); list_node = workset_has_object(&file->f_mapping->host->xobj); } def_op = alloc_deferred_object_operation(); INIT_LIST_HEAD(&def_op->list); def_op->type = DEFERRED_TYPE_FSYNC; def_op->u.fsync.datasync = datasync; def_op->u.fsync.file = file; /* DEP: Pin the file until the sync is executed */ tx_atomic_inc_not_zero(&file->f_count); // XXX: Could probably use something finer grained here. WORKSET_LOCK(current->transaction); list_add(&def_op->list, &list_node->deferred_operations); WORKSET_UNLOCK(current->transaction); return 0; } if (!file->f_op || !file->f_op->fsync) { /* Why? We can still call filemap_fdatawrite */ ret = -EINVAL; goto out; } ret = filemap_fdatawrite(mapping); /* * We need to protect against concurrent writers, which could cause * livelocks in fsync_buffers_list(). */ if (!committing_transaction()) mutex_lock(&mapping->host->i_mutex); err = file->f_op->fsync(file, file_get_dentry(file), datasync); if (!ret) ret = err; if (!committing_transaction()) mutex_unlock(&mapping->host->i_mutex); err = filemap_fdatawait(mapping); if (!ret) ret = err; out: return ret; }
/** * acct_auto_close - turn off a filesystem's accounting if it is on * @sb: super block for the filesystem * * If the accounting is turned on for a file in the filesystem pointed * to by sb, turn accounting off. */ void acct_auto_close(struct super_block *sb) { spin_lock(&acct_globals.lock); if (acct_globals.file && tx_cache_get_file_ro(acct_globals.file)->f_path.mnt->mnt_sb == sb) { acct_file_reopen(NULL); } spin_unlock(&acct_globals.lock); }
/** * nameidata_to_filp - convert a nameidata to an open filp. * @nd: pointer to nameidata * @flags: open flags * * Note that this function destroys the original nameidata */ struct file *nameidata_to_filp(struct nameidata *nd, int flags) { struct file *filp; /* Pick up the filp from the open intent */ filp = nd->intent.open.file; /* Has the filesystem initialised the file for us? */ if (tx_cache_get_file_ro(filp)->f_path.dentry == NULL) filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL); else path_release(nd); return filp; }
static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct eventfd_ctx *ctx = file->private_data; ssize_t res; __u64 ucnt; DECLARE_WAITQUEUE(wait, current); if (count < sizeof(ucnt)) return -EINVAL; spin_lock_irq(&ctx->wqh.lock); res = -EAGAIN; ucnt = ctx->count; if (ucnt > 0) res = sizeof(ucnt); else if (!(tx_cache_get_file_ro(file)->f_flags & O_NONBLOCK)) { __add_wait_queue(&ctx->wqh, &wait); for (res = 0;;) { set_current_state(TASK_INTERRUPTIBLE); if (ctx->count > 0) { ucnt = ctx->count; res = sizeof(ucnt); break; } if (signal_pending(current)) { res = -ERESTARTSYS; break; } spin_unlock_irq(&ctx->wqh.lock); schedule(); spin_lock_irq(&ctx->wqh.lock); } __remove_wait_queue(&ctx->wqh, &wait); __set_current_state(TASK_RUNNING); } if (res > 0) { ctx->count = 0; if (waitqueue_active(&ctx->wqh)) wake_up_locked(&ctx->wqh); } spin_unlock_irq(&ctx->wqh.lock); if (res > 0 && put_user(ucnt, (__u64 __user *) buf)) return -EFAULT; return res; }
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) { struct _inode * inode; struct _dentry *dentry; struct file * file; struct _file * _file; int error; error = -EINVAL; if (length < 0) goto out; error = -EBADF; file = fget(fd); if (!file) goto out; _file = tx_cache_get_file_ro(file); /* explicitly opened as large or we are on 64-bit box */ if (_file->f_flags & O_LARGEFILE) small = 0; dentry = f_get_dentry(_file); inode = d_get_inode(dentry); error = -EINVAL; if (!S_ISREG(inode->i_mode) || !(_file->f_mode & FMODE_WRITE)) goto out_putf; error = -EINVAL; /* Cannot ftruncate over 2^31 bytes without large file support */ if (small && length > MAX_NON_LFS) goto out_putf; error = -EPERM; if (IS_APPEND(inode)) goto out_putf; error = locks_verify_truncate(parent(inode), file, length); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file); out_putf: fput(file); out: return error; }
static ssize_t acpi_system_read_event(struct file *file, char __user * buffer, size_t count, loff_t * ppos) { int result = 0; struct acpi_bus_event event; static char str[ACPI_MAX_STRING]; static int chars_remaining = 0; static char *ptr; if (!chars_remaining) { memset(&event, 0, sizeof(struct acpi_bus_event)); if ((tx_cache_get_file_ro(file)->f_flags & O_NONBLOCK) && (list_empty(&acpi_bus_event_list))) return -EAGAIN; result = acpi_bus_receive_event(&event); if (result) return result; chars_remaining = sprintf(str, "%s %s %08x %08x\n", event.device_class ? event. device_class : "<unknown>", event.bus_id ? event. bus_id : "<unknown>", event.type, event.data); ptr = str; } if (chars_remaining < count) { count = chars_remaining; } if (copy_to_user(buffer, ptr, count)) return -EFAULT; *ppos += count; chars_remaining -= count; ptr += count; return count; }
int gs_block_til_ready(void *port_, struct file * filp) { struct gs_port *port = port_; DECLARE_WAITQUEUE(wait, current); int retval; int do_clocal = 0; int CD; struct tty_struct *tty; unsigned long flags; func_enter (); if (!port) return 0; tty = port->tty; if (!tty) return 0; gs_dprintk (GS_DEBUG_BTR, "Entering gs_block_till_ready.\n"); /* * If the device is in the middle of being closed, then block * until it's done, and then try again. */ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { interruptible_sleep_on(&port->close_wait); if (port->flags & ASYNC_HUP_NOTIFY) return -EAGAIN; else return -ERESTARTSYS; } gs_dprintk (GS_DEBUG_BTR, "after hung up\n"); /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ if ((tx_cache_get_file_ro(filp)->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { port->flags |= ASYNC_NORMAL_ACTIVE; return 0; } gs_dprintk (GS_DEBUG_BTR, "after nonblock\n"); if (C_CLOCAL(tty)) do_clocal = 1; /* * Block waiting for the carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, port->count is dropped by one, so that * rs_close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&port->open_wait, &wait); gs_dprintk (GS_DEBUG_BTR, "after add waitq.\n"); spin_lock_irqsave(&port->driver_lock, flags); if (!tty_hung_up_p(filp)) { port->count--; } spin_unlock_irqrestore(&port->driver_lock, flags); port->blocked_open++; while (1) { CD = port->rd->get_CD (port); gs_dprintk (GS_DEBUG_BTR, "CD is now %d.\n", CD); set_current_state (TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) { if (port->flags & ASYNC_HUP_NOTIFY) retval = -EAGAIN; else retval = -ERESTARTSYS; break; } if (!(port->flags & ASYNC_CLOSING) && (do_clocal || CD)) break; gs_dprintk (GS_DEBUG_BTR, "signal_pending is now: %d (%lx)\n", (int)signal_pending (current), *(long*)(¤t->blocked)); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } gs_dprintk (GS_DEBUG_BTR, "Got out of the loop. (%d)\n", port->blocked_open); set_current_state (TASK_RUNNING); remove_wait_queue(&port->open_wait, &wait); if (!tty_hung_up_p(filp)) { port->count++; } port->blocked_open--; if (retval) return retval; port->flags |= ASYNC_NORMAL_ACTIVE; func_exit (); return 0; }
/* This is an inline function, we don't really care about a long * list of arguments */ static inline int __build_packet_message(struct nfulnl_instance *inst, const struct sk_buff *skb, unsigned int data_len, unsigned int pf, unsigned int hooknum, const struct net_device *indev, const struct net_device *outdev, const struct nf_loginfo *li, const char *prefix, unsigned int plen) { struct nfulnl_msg_packet_hdr pmsg; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; __be32 tmp_uint; sk_buff_data_t old_tail = inst->skb->tail; UDEBUG("entered\n"); nlh = NLMSG_PUT(inst->skb, 0, 0, NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, sizeof(struct nfgenmsg)); nfmsg = NLMSG_DATA(nlh); nfmsg->nfgen_family = pf; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(inst->group_num); pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); if (prefix) NFA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); if (indev) { tmp_uint = htonl(indev->ifindex); #ifndef CONFIG_BRIDGE_NETFILTER NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), &tmp_uint); /* this is the bridge group "brX" */ tmp_uint = htonl(indev->br_port->br->dev->ifindex); NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); if (skb->nf_bridge && skb->nf_bridge->physindev) { tmp_uint = htonl(skb->nf_bridge->physindev->ifindex); NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), &tmp_uint); } } #endif } if (outdev) { tmp_uint = htonl(outdev->ifindex); #ifndef CONFIG_BRIDGE_NETFILTER NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), &tmp_uint); /* this is the bridge group "brX" */ tmp_uint = htonl(outdev->br_port->br->dev->ifindex); NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); } else { /* Case 2: indev is a bridge group, we need to look * for physical device (when called from ipv4) */ NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); if (skb->nf_bridge && skb->nf_bridge->physoutdev) { tmp_uint = htonl(skb->nf_bridge->physoutdev->ifindex); NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), &tmp_uint); } } #endif } if (skb->mark) { tmp_uint = htonl(skb->mark); NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint); } if (indev && skb->dev && skb->dev->hard_header_parse) { struct nfulnl_msg_packet_hw phw; int len = skb->dev->hard_header_parse((struct sk_buff *)skb, phw.hw_addr); phw.hw_addrlen = htons(len); NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); } if (skb->tstamp.tv64) { struct nfulnl_msg_packet_timestamp ts; struct timeval tv = ktime_to_timeval(skb->tstamp); ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); } /* UID */ if (skb->sk) { read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) { __be32 uid = htonl(tx_cache_get_file_ro(skb->sk->sk_socket->file)->f_uid); /* need to unlock here since NFA_PUT may goto */ read_unlock_bh(&skb->sk->sk_callback_lock); NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid); } else read_unlock_bh(&skb->sk->sk_callback_lock); } /* local sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ) { tmp_uint = htonl(inst->seq++); NFA_PUT(inst->skb, NFULA_SEQ, sizeof(tmp_uint), &tmp_uint); } /* global sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) { tmp_uint = htonl(atomic_inc_return(&global_seq)); NFA_PUT(inst->skb, NFULA_SEQ_GLOBAL, sizeof(tmp_uint), &tmp_uint); } if (data_len) { struct nfattr *nfa; int size = NFA_LENGTH(data_len); if (skb_tailroom(inst->skb) < (int)NFA_SPACE(data_len)) { printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); goto nlmsg_failure; } nfa = (struct nfattr *)skb_put(inst->skb, NFA_ALIGN(size)); nfa->nfa_type = NFULA_PAYLOAD; nfa->nfa_len = size; if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len)) BUG(); } nlh->nlmsg_len = inst->skb->tail - old_tail; inst->lastnlh = nlh; return 0; nlmsg_failure: UDEBUG("nlmsg_failure\n"); nfattr_failure: PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); return -1; }
/* * Called when an inode is about to be open. * We use this to disallow opening large files on 32bit systems if * the caller didn't specify O_LARGEFILE. On 64bit systems we force * on this flag in sys_open. */ int generic_file_open(struct _inode * inode, struct file * filp) { if (!(tx_cache_get_file_ro(filp)->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EFBIG; return 0; }
static int verify_command(struct file *file, unsigned char *cmd) { static unsigned char cmd_type[256] = { /* Basic read-only commands */ safe_for_read(TEST_UNIT_READY), safe_for_read(REQUEST_SENSE), safe_for_read(READ_6), safe_for_read(READ_10), safe_for_read(READ_12), safe_for_read(READ_16), safe_for_read(READ_BUFFER), safe_for_read(READ_DEFECT_DATA), safe_for_read(READ_LONG), safe_for_read(INQUIRY), safe_for_read(MODE_SENSE), safe_for_read(MODE_SENSE_10), safe_for_read(LOG_SENSE), safe_for_read(START_STOP), safe_for_read(GPCMD_VERIFY_10), safe_for_read(VERIFY_16), /* Audio CD commands */ safe_for_read(GPCMD_PLAY_CD), safe_for_read(GPCMD_PLAY_AUDIO_10), safe_for_read(GPCMD_PLAY_AUDIO_MSF), safe_for_read(GPCMD_PLAY_AUDIO_TI), safe_for_read(GPCMD_PAUSE_RESUME), /* CD/DVD data reading */ safe_for_read(GPCMD_READ_BUFFER_CAPACITY), safe_for_read(GPCMD_READ_CD), safe_for_read(GPCMD_READ_CD_MSF), safe_for_read(GPCMD_READ_DISC_INFO), safe_for_read(GPCMD_READ_CDVD_CAPACITY), safe_for_read(GPCMD_READ_DVD_STRUCTURE), safe_for_read(GPCMD_READ_HEADER), safe_for_read(GPCMD_READ_TRACK_RZONE_INFO), safe_for_read(GPCMD_READ_SUBCHANNEL), safe_for_read(GPCMD_READ_TOC_PMA_ATIP), safe_for_read(GPCMD_REPORT_KEY), safe_for_read(GPCMD_SCAN), safe_for_read(GPCMD_GET_CONFIGURATION), safe_for_read(GPCMD_READ_FORMAT_CAPACITIES), safe_for_read(GPCMD_GET_EVENT_STATUS_NOTIFICATION), safe_for_read(GPCMD_GET_PERFORMANCE), safe_for_read(GPCMD_SEEK), safe_for_read(GPCMD_STOP_PLAY_SCAN), /* Basic writing commands */ safe_for_write(WRITE_6), safe_for_write(WRITE_10), safe_for_write(WRITE_VERIFY), safe_for_write(WRITE_12), safe_for_write(WRITE_VERIFY_12), safe_for_write(WRITE_16), safe_for_write(WRITE_LONG), safe_for_write(WRITE_LONG_2), safe_for_write(ERASE), safe_for_write(GPCMD_MODE_SELECT_10), safe_for_write(MODE_SELECT), safe_for_write(LOG_SELECT), safe_for_write(GPCMD_BLANK), safe_for_write(GPCMD_CLOSE_TRACK), safe_for_write(GPCMD_FLUSH_CACHE), safe_for_write(GPCMD_FORMAT_UNIT), safe_for_write(GPCMD_REPAIR_RZONE_TRACK), safe_for_write(GPCMD_RESERVE_RZONE_TRACK), safe_for_write(GPCMD_SEND_DVD_STRUCTURE), safe_for_write(GPCMD_SEND_EVENT), safe_for_write(GPCMD_SEND_KEY), safe_for_write(GPCMD_SEND_OPC), safe_for_write(GPCMD_SEND_CUE_SHEET), safe_for_write(GPCMD_SET_SPEED), safe_for_write(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL), safe_for_write(GPCMD_LOAD_UNLOAD), safe_for_write(GPCMD_SET_STREAMING), }; unsigned char type = cmd_type[cmd[0]]; int has_write_perm = 0; /* Anybody who can open the device can do a read-safe command */ if (type & CMD_READ_SAFE) return 0; /* * file can be NULL from ioctl_by_bdev()... */ if (file) has_write_perm = tx_cache_get_file_ro(file)->f_mode & FMODE_WRITE; /* Write-safe commands just require a writable open.. */ if ((type & CMD_WRITE_SAFE) && has_write_perm) return 0; /* And root can do any command.. */ if (capable(CAP_SYS_RAWIO)) return 0; if (!type) { cmd_type[cmd[0]] = CMD_WARNED; printk(KERN_WARNING "scsi: unknown opcode 0x%02x\n", cmd[0]); } /* Otherwise fail it with an "Operation not permitted" */ return -EPERM; }
static ssize_t write_chan(struct tty_struct * tty, struct file * file, const unsigned char * buf, size_t nr) { const unsigned char *b = buf; DECLARE_WAITQUEUE(wait, current); int c; ssize_t retval = 0; int transaction_commit = committing_transaction(); /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */ if ((!transaction_commit) && L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) { retval = tty_check_change(tty); if (retval) return retval; } add_wait_queue(&tty->write_wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if ((!transaction_commit) && (tty_hung_up_p(file) || (tty->link && !tty->link->count))) { retval = -EIO; break; } if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) { while (nr > 0) { ssize_t num = opost_block(tty, b, nr); if (num < 0) { if (num == -EAGAIN) break; retval = num; goto break_out; } b += num; nr -= num; if (nr == 0) break; c = *b; if (opost(c, tty) < 0) break; b++; nr--; } if (tty->driver->flush_chars) tty->driver->flush_chars(tty); } else { while (nr > 0) { c = tty->driver->write(tty, b, nr); if (c < 0) { retval = c; goto break_out; } if (!c) break; b += c; nr -= c; } } if (!nr) break; if ((!transaction_commit) && (tx_cache_get_file_ro(file)->f_flags & O_NONBLOCK)) { retval = -EAGAIN; break; } schedule(); } break_out: __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); return (b - buf) ? b - buf : retval; }
static ssize_t read_chan(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr) { unsigned char __user *b = buf; DECLARE_WAITQUEUE(wait, current); int c; int minimum, time; ssize_t retval = 0; ssize_t size; long timeout; unsigned long flags; do_it_again: if (!tty->read_buf) { printk("n_tty_read_chan: called with read_buf == NULL?!?\n"); return -EIO; } c = job_control(tty, file); if(c < 0) return c; minimum = time = 0; timeout = MAX_SCHEDULE_TIMEOUT; if (!tty->icanon) { time = (HZ / 10) * TIME_CHAR(tty); minimum = MIN_CHAR(tty); if (minimum) { if (time) tty->minimum_to_wake = 1; else if (!waitqueue_active(&tty->read_wait) || (tty->minimum_to_wake > minimum)) tty->minimum_to_wake = minimum; } else { timeout = 0; if (time) { timeout = time; time = 0; } tty->minimum_to_wake = minimum = 1; } } /* * Internal serialization of reads. */ if (tx_cache_get_file_ro(file)->f_flags & O_NONBLOCK) { if (!mutex_trylock(&tty->atomic_read_lock)) return -EAGAIN; } else { if (mutex_lock_interruptible(&tty->atomic_read_lock)) return -ERESTARTSYS; } add_wait_queue(&tty->read_wait, &wait); while (nr) { /* First test for status change. */ if (tty->packet && tty->link->ctrl_status) { unsigned char cs; if (b != buf) break; cs = tty->link->ctrl_status; tty->link->ctrl_status = 0; if (put_user(cs, b++)) { retval = -EFAULT; b--; break; } nr--; break; } /* This statement must be first before checking for input so that any interrupt will set the state back to TASK_RUNNING. */ set_current_state(TASK_INTERRUPTIBLE); if (((minimum - (b - buf)) < tty->minimum_to_wake) && ((minimum - (b - buf)) >= 1)) tty->minimum_to_wake = (minimum - (b - buf)); if (!input_available_p(tty, 0)) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { retval = -EIO; break; } if (tty_hung_up_p(file)) break; if (!timeout) break; if (tx_cache_get_file_ro(file)->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } n_tty_set_room(tty); timeout = schedule_timeout(timeout); continue; } __set_current_state(TASK_RUNNING); /* Deal with packet mode. */ if (tty->packet && b == buf) { if (put_user(TIOCPKT_DATA, b++)) { retval = -EFAULT; b--; break; } nr--; } if (tty->icanon) { /* N.B. avoid overrun if nr == 0 */ while (nr && tty->read_cnt) { int eol; eol = test_and_clear_bit(tty->read_tail, tty->read_flags); c = tty->read_buf[tty->read_tail]; spin_lock_irqsave(&tty->read_lock, flags); tty->read_tail = ((tty->read_tail+1) & (N_TTY_BUF_SIZE-1)); tty->read_cnt--; if (eol) { /* this test should be redundant: * we shouldn't be reading data if * canon_data is 0 */ if (--tty->canon_data < 0) tty->canon_data = 0; } spin_unlock_irqrestore(&tty->read_lock, flags); if (!eol || (c != __DISABLED_CHAR)) { if (put_user(c, b++)) { retval = -EFAULT; b--; break; } nr--; } if (eol) break; } if (retval) break; } else { int uncopied; uncopied = copy_from_read_buf(tty, &b, &nr); uncopied += copy_from_read_buf(tty, &b, &nr); if (uncopied) { retval = -EFAULT; break; } } /* If there is enough space in the read buffer now, let the * low-level driver know. We use n_tty_chars_in_buffer() to * check the buffer, as it now knows about canonical mode. * Otherwise, if the driver is throttled and the line is * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, * we won't get any more characters. */ if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) { n_tty_set_room(tty); check_unthrottle(tty); } if (b - buf >= minimum) break; if (time) timeout = time; } mutex_unlock(&tty->atomic_read_lock); remove_wait_queue(&tty->read_wait, &wait); if (!waitqueue_active(&tty->read_wait)) tty->minimum_to_wake = minimum; __set_current_state(TASK_RUNNING); size = b - buf; if (size) { retval = size; if (nr) clear_bit(TTY_PUSH, &tty->flags); } else if (test_and_clear_bit(TTY_PUSH, &tty->flags)) goto do_it_again; n_tty_set_room(tty); return retval; }
/* * Called when an inode is released. Note that this is different * from ext2_open_file: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext2_release_file (struct _inode * inode, struct file * filp) { if (tx_cache_get_file_ro(filp)->f_mode & FMODE_WRITE) ext2_discard_prealloc (parent(inode)); return 0; }
/* * Check the amount of free space and suspend/resume accordingly. */ static int check_free_space(struct file *file) { struct kstatfs sbuf; int res; int act; sector_t resume; sector_t suspend; spin_lock(&acct_globals.lock); res = acct_globals.active; if (!file || !acct_globals.needcheck) goto out; spin_unlock(&acct_globals.lock); /* May block */ if (vfs_statfs(tx_cache_get_file_ro(file)->f_dentry, &sbuf)) return res; suspend = sbuf.f_blocks * SUSPEND; resume = sbuf.f_blocks * RESUME; sector_div(suspend, 100); sector_div(resume, 100); if (sbuf.f_bavail <= suspend) act = -1; else if (sbuf.f_bavail >= resume) act = 1; else act = 0; /* * If some joker switched acct_globals.file under us we'ld better be * silent and _not_ touch anything. */ spin_lock(&acct_globals.lock); if (file != acct_globals.file) { if (act) res = act>0; goto out; } if (acct_globals.active) { if (act < 0) { acct_globals.active = 0; printk(KERN_INFO "Process accounting paused\n"); } } else { if (act > 0) { acct_globals.active = 1; printk(KERN_INFO "Process accounting resumed\n"); } } del_timer(&acct_globals.timer); acct_globals.needcheck = 0; acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ; add_timer(&acct_globals.timer); res = acct_globals.active; out: spin_unlock(&acct_globals.lock); return res; }