static ssize_t rtlx_write(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { unsigned long failed; int minor; struct rtlx_channel *rt; size_t fl; DECLARE_WAITQUEUE(wait, current); minor = MINOR(file->f_dentry->d_inode->i_rdev); rt = &rtlx->channel[minor]; /* any space left... */ if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; add_wait_queue(&channel_wqs[minor].rt_queue, &wait); set_current_state(TASK_INTERRUPTIBLE); while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) schedule(); set_current_state(TASK_RUNNING); remove_wait_queue(&channel_wqs[minor].rt_queue, &wait); } /* total number of bytes to copy */ count = min(count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) ); /* first bit from write pointer to the end of the buffer, or count */ fl = min(count, (size_t) rt->buffer_size - rt->rt_write); failed = copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl); if (failed) { count = fl - failed; goto out; } /* if there's any left copy to the beginning of the buffer */ if (count - fl) { failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); if (failed) { count -= failed; goto out; } } out: rt->rt_write += count; rt->rt_write %= rt->buffer_size; return count; }
static int citty_write_room(struct tty_struct *tty) { struct citty_port *citty = NULL; int index = tty->index; int room = -EINVAL; F_ENTER(); down(&sem_lock_tty[index]); citty = citty_table[index]; if (!citty) { up(&sem_lock_tty[index]); return -ENODEV; } if (!citty->port->count) { PDEBUG("citty_write_room: no port is open."); /* port was not opened */ goto exit; } /* calculate how much room is left in the device */ /* CHECKPOINT */ /* room = CITTY_BUF_SIZE * spacefree( &txCittyBuf ); */ room = CITTY_BUF_SIZE * spacefree(&txCittyBuf[tty->index]); exit: up(&sem_lock_tty[index]); F_LEAVE(); return room; }
static unsigned int vmnet_skyeye_poll(struct file *filp, poll_table *wait) { vmnet_private *my_vmnet_private = (vmnet_private *)filp->private_data; unsigned int mask = POLLOUT | POLLWRNORM; #if 0 struct scull_pipe *dev = filp->private_data; down(&dev->sem); poll_wait(filp, &dev->inq, wait); poll_wait(filp, &dev->outq, wait); if (dev->rp != dev->wp) mask |= POLLIN | POLLRDNORM; /* readable */ if (spacefree(dev)) mask |= POLLOUT | POLLWRNORM; /* writable */ up(&dev->sem); #endif poll_wait(filp, &my_vmnet_private->inq, wait); poll_wait(filp, &my_vmnet_private->outq, wait); spin_lock(&my_vmnet_private->vmnet_lock); if(my_vmnet_private->vmnet_data_head != NULL) { mask |= POLLIN | POLLRDNORM; /* readable */ } mask |= POLLOUT | POLLWRNORM; /* writable */ spin_unlock(&my_vmnet_private->vmnet_lock); return mask; }
ssize_t scull_p_write(struct file *filp, const char *buf, size_t count, loff_t *f_pos) { Scull_Pipe *dev = filp->private_data; if (f_pos != &filp->f_pos) return -ESPIPE; if (down_interruptible(&dev->sem)) return -ERESTARTSYS; /* Make sure there's space to write */ while (spacefree(dev) == 0) { /* full */ up(&dev->sem); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; PDEBUG("\"%s\" writing: going to sleep\n",current->comm); if (wait_event_interruptible(dev->outq, spacefree(dev) > 0)) return -ERESTARTSYS; /* signal: tell the fs layer to handle it */ if (down_interruptible(&dev->sem)) return -ERESTARTSYS; } /* ok, space is there, accept something */ count = min(count, spacefree(dev)); if (dev->wp >= dev->rp) count = min(count, dev->end - dev->wp); /* up to end-of-buffer */ else /* the write pointer has wrapped, fill up to rp-1 */ count = min(count, dev->rp - dev->wp - 1); PDEBUG("Going to accept %li bytes to %p from %p\n", (long)count, dev->wp, buf); if (copy_from_user(dev->wp, buf, count)) { up (&dev->sem); return -EFAULT; } dev->wp += count; if (dev->wp == dev->end) dev->wp = dev->buffer; /* wrapped */ up(&dev->sem); /* finally, awake any reader */ wake_up_interruptible(&dev->inq); /* blocked in read() and select() */ /* and signal asynchronous readers, explained late in chapter 5 */ if (dev->async_queue) kill_fasync(&dev->async_queue, SIGIO, POLL_IN); PDEBUG("\"%s\" did write %li bytes\n",current->comm, (long)count); return count; }
static int scull_getwritespace(struct scull_pipe *dev, struct file *filp) { while (spacefree(dev) == 0) { /* full */ DEFINE_WAIT(wait); up(&dev->sem); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; prepare_to_wait(&dev->outq, &wait, TASK_INTERRUPTIBLE); if (spacefree(dev) == 0) schedule(); finish_wait(&dev->outq, &wait); if (signal_pending(current)) return -ERESTARTSYS; /* signal: tell the fs layer to handle it */ if (down_interruptible(&dev->sem)) return -ERESTARTSYS; } return 0; }
static ssize_t scull_p_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct scull_pipe *dev = filp->private_data; int result; if(down_interruptible(&dev->sem)) return -ERESTARTSYS; /* Make sure there's space to write */ result = scull_getwritespace(dev, filp); if(result) return result; /* scull_getwritespace called up(&dev->sem)*/ /* Space is there, accept something */ count = min(count, (size_t)spacefree(dev)); if(dev->wp >= dev->rp) count = min(count, (size_t)(dev->end - dev->wp)); /* to end-of-buf */ else /* the write pointer has wrapped, fill up to rp-1 */ count = min(count, (size_t)(dev->rp - dev->wp -1)); printk(KERN_WARNING "Goint to accept %li bytes to %p from %p\n", (long)count, dev->wp, buf); if(copy_from_user(dev->wp, buf, count)){ up(&dev->sem); return -EFAULT; } dev->wp += count; if(dev->wp == dev->end) dev->wp = dev->buffer; /* wrapped */ up(&dev->sem); /* finally, awake any reader */ wake_up_interruptible(&dev->inq); /* blocked in read() and select() */ /* and signal asynchronous readers */ if(dev->async_queue) kill_fasync(&dev->async_queue, SIGIO, POLL_IN); printk(KERN_WARNING "\"%s\" did write %li bytes\n", current->comm, (long)count); return count; }
static unsigned int scull_p_poll(struct file *filp, poll_table *wait) { struct scull_pipe *dev = filp->private_data; unsigned int mask = 0; /* * The buffer is circular; it is considered full * if "wp" is right behind "rp" and empty if the * two are equal. */ down(&dev->sem); poll_wait(filp, &dev->inq, wait); poll_wait(filp, &dev->outq, wait); if (dev->rp != dev->wp) mask |= POLLIN | POLLRDNORM; /* readable */ if (spacefree(dev)) mask |= POLLOUT | POLLWRNORM; /* writable */ up(&dev->sem); return mask; }
static unsigned int rtlx_poll(struct file *file, poll_table * wait) { int minor; unsigned int mask = 0; struct rtlx_channel *chan; minor = MINOR(file->f_dentry->d_inode->i_rdev); chan = &rtlx->channel[minor]; poll_wait(file, &channel_wqs[minor].rt_queue, wait); poll_wait(file, &channel_wqs[minor].lx_queue, wait); /* data available to read? */ if (chan->lx_read != chan->lx_write) mask |= POLLIN | POLLRDNORM; /* space to write */ if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size)) mask |= POLLOUT | POLLWRNORM; return mask; }
size_t write_citty_buffer(struct buf_struct *cittyBuf, const char *buf, size_t count, short action) { unsigned char *pbuf; struct semaphore *lSem; int curBufIndex; DEFINE_WAIT(wait); F_ENTER(); /* make it a non-blocking write */ if (spacefree(cittyBuf) == 0) { printk(KERN_ERR "\"%s\" warning: Write Buffer overflow.\n", current->comm); return -EIO; } lSem = &(cittyBuf->gSem); if (down_interruptible(lSem)) { printk(KERN_ERR "\"%s\" Error: Unable to down SEM.\n", current->comm); return -ERESTARTSYS; } /* Make sure there's space to write */ while (spacefree(cittyBuf) == 0) { /* full */ PDEBUG("\"%s\" Going to define wait:", current->comm); up(lSem); /* release the lock */ /* if (filp->f_flags & O_NONBLOCK) */ /* return -EAGAIN; */ PDEBUG("\"%s\" writing: going to sleep", current->comm); prepare_to_wait(&cittyBuf->gOutq, &wait, TASK_INTERRUPTIBLE); if (spacefree(cittyBuf) == 0) { /* seem like it is bad: scheduling while atomic */ schedule(); } finish_wait(&cittyBuf->gOutq, &wait); if (signal_pending(current)) { printk(KERN_ERR "\"%s\" Error: Unable to signal_pending.\n", current->comm); /* signal: tell the fs layer to handle it */ return -ERESTARTSYS; } if (down_interruptible(lSem)) { printk(KERN_ERR "\"%s\" Error: Unable to down SEM.\n", current->comm); return -ERESTARTSYS; } } curBufIndex = cittyBuf->iBufIn++; pbuf = cittyBuf->pBuf[curBufIndex]; PDEBUG("\"%s\" Going to check flip", current->comm); /* * Check if it is flipped */ if (cittyBuf->iBufIn >= NUM_CITTY_BUF) cittyBuf->iBufIn = cittyBuf->iBufIn % NUM_CITTY_BUF; /* Check space */ if (pbuf == NULL) { printk(KERN_WARNING "warning: Buffer overflowed.\n"); up(lSem); return -EIO; } /* ok, space is there, accept something */ /* write only up to the size of the buffer */ if (count > CITTY_BUF_SIZE) { count = CITTY_BUF_SIZE; printk(KERN_WARNING "warning: Buffer too size to write.\n"); } if (action == COPY_FROM_USER) { PDEBUG("%s: going to copy_from_user at buf " \ "index %d and count %d", __func__, curBufIndex, count); if (copy_from_user((pbuf), buf, count)) { up(lSem); return -EFAULT; } } else if (action == COPY_FROM_CITTY) { /* it is from the cinet_hard_start_xmit */ PDEBUG("%s: going to COPY_FROM_CITTY at buf " \ "index %d and count %d", __func__, curBufIndex, count); memcpy(pbuf, buf, count); } else { printk(KERN_WARNING "undefined action.\n"); } /* saving datalen */ cittyBuf->iDatalen[curBufIndex] = count; up(lSem); /* finally, awake any reader */ /* blocked in read() and select() */ wake_up_interruptible(&cittyBuf->gInq); F_LEAVE(); return count; }
size_t write_citty_buffer(struct buf_struct *cittyBuf, const char *buf, size_t count, short action) { unsigned char *pbuf; struct mutex *ttylock; int curBufIndex; F_ENTER(); /* make it a non-blocking write */ if (spacefree(cittyBuf) == 0) { pr_err("\"%s\" warning: Write Buffer overflow.\n", current->comm); return -EIO; } ttylock = &(cittyBuf->gMutex); if (mutex_lock_interruptible(ttylock)) return -ERESTARTSYS; /* Make sure there's space to write */ while (spacefree(cittyBuf) == 0) { /* full */ PDEBUG("\"%s\" Going to define wait:", current->comm); mutex_unlock(ttylock); /* release the lock */ /* if (filp->f_flags & O_NONBLOCK) */ /* return -EAGAIN; */ PDEBUG("\"%s\" writing: going to sleep", current->comm); if (wait_event_interruptible (cittyBuf->gOutq, spacefree(cittyBuf))) { /* waken up by signal, get the lock and process it */ return -ERESTARTSYS; } if (mutex_lock_interruptible(ttylock)) return -ERESTARTSYS; } curBufIndex = cittyBuf->iBufIn++; pbuf = cittyBuf->pBuf[curBufIndex]; PDEBUG("\"%s\" Going to check flip", current->comm); /* * Check if it is flipped */ if (cittyBuf->iBufIn >= NUM_CITTY_BUF) cittyBuf->iBufIn = cittyBuf->iBufIn % NUM_CITTY_BUF; /* Check space */ if (pbuf == NULL) { pr_warn("warning: Buffer overflowed.\n"); mutex_unlock(ttylock); return -EIO; } /* ok, space is there, accept something */ /* write only up to the size of the buffer */ if (count > CITTY_BUF_SIZE) { count = CITTY_BUF_SIZE; pr_warn("warning: Buffer too size to write.\n"); } if (action == COPY_FROM_USER) { PDEBUG("%s: going to copy_from_user at buf " "index %d and count %d", __func__, curBufIndex, count); if (copy_from_user((pbuf), buf, count)) { mutex_unlock(ttylock); return -EFAULT; } } else if (action == COPY_FROM_CITTY) { /* it is from the cinet_hard_start_xmit */ PDEBUG("%s: going to COPY_FROM_CITTY at buf " "index %d and count %d", __func__, curBufIndex, count); memcpy(pbuf, buf, count); } else { pr_warn("undefined action.\n"); } /* saving datalen */ cittyBuf->iDatalen[curBufIndex] = count; mutex_unlock(ttylock); /* finally, awake any reader */ /* blocked in read() and select() */ wake_up_interruptible(&cittyBuf->gInq); F_LEAVE(); return count; }