void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; int err = 0; /* * FIXME! What are the locking issues here? This may me overdoing * things... This question is especially important now that we've * removed the irqlock. */ ld = tty_ldisc_ref(tty); if (ld != NULL) { /* We may have no line discipline at this point */ if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_driver_flush_buffer(tty); if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && ld->ops->write_wakeup) ld->ops->write_wakeup(tty); if (ld->ops->hangup) ld->ops->hangup(tty); tty_ldisc_deref(ld); } /* * FIXME: Once we trust the LDISC code better we can wait here for * ldisc completion and fix the driver call race */ wake_up_interruptible_poll(&tty->write_wait, POLLOUT); wake_up_interruptible_poll(&tty->read_wait, POLLIN); /* * Shutdown the current line discipline, and reset it to * N_TTY if need be. * * Avoid racing set_ldisc or tty_ldisc_release */ mutex_lock(&tty->ldisc_mutex); tty_ldisc_halt(tty); /* At this point we have a closed ldisc and we want to reopen it. We could defer this to the next open but it means auditing a lot of other paths so this is a FIXME */ if (tty->ldisc) { /* Not yet closed */ if (reset == 0) { tty_ldisc_reinit(tty, tty->termios->c_line); err = tty_ldisc_open(tty, tty->ldisc); } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { tty_ldisc_reinit(tty, N_TTY); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } tty_ldisc_enable(tty); } mutex_unlock(&tty->ldisc_mutex); if (reset) tty_reset_termios(tty); }
// will be called when modem sends us something. // we will then copy it to the tty's buffer. // this is essentially the "read" fops. static void ccci_tty_callback(CCCI_BUFF_T *buff, void *private_data) { switch(buff->channel) { case CCCI_UART1_TX_ACK: { // this should be in an interrupt, // so no locking required... ccci_tty_meta.ready = 1; wake_up_interruptible(ccci_tty_meta.write_waitq); wake_up_interruptible_poll(&ccci_tty_driver->ttys[CCCI_TTY_META]->write_wait,POLLOUT); } break; case CCCI_UART1_RX: { ccci_tty_meta_read_tasklet.data = (unsigned long) &ccci_tty_meta; tasklet_schedule(&ccci_tty_meta_read_tasklet); } break; case CCCI_UART2_TX_ACK: { // this should be in an interrupt, // so no locking required... ccci_tty_modem.ready = 1; wake_up_interruptible(ccci_tty_modem.write_waitq); wake_up_interruptible_poll(&ccci_tty_driver->ttys[CCCI_TTY_MODEM]->write_wait,POLLOUT); } break; case CCCI_UART2_RX: { ccci_tty_modem_read_tasklet.data = (unsigned long) &ccci_tty_modem; tasklet_schedule(&ccci_tty_modem_read_tasklet); } break; case CCCI_IPC_UART_TX_ACK: { ccci_tty_ipc.ready = 1; wake_up_interruptible(ccci_tty_ipc.write_waitq); wake_up_interruptible_poll(&ccci_tty_driver->ttys[CCCI_TTY_IPC]->write_wait,POLLOUT); } break; case CCCI_IPC_UART_RX: { ccci_tty_ipc_read_tasklet.data = (unsigned long) &ccci_tty_ipc; tasklet_schedule(&ccci_tty_ipc_read_tasklet); } break; default: break; } }
/** * iio_dma_buffer_block_done() - Indicate that a block has been completed * @block: The completed block * * Should be called when the DMA controller has finished handling the block to * pass back ownership of the block to the queue. */ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) { struct iio_dma_buffer_queue *queue = block->queue; unsigned long flags; spin_lock_irqsave(&queue->list_lock, flags); _iio_dma_buffer_block_done(block); spin_unlock_irqrestore(&queue->list_lock, flags); iio_buffer_block_put_atomic(block); wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); }
static void pty_stop(struct tty_struct *tty) { unsigned long flags; if (tty->link && tty->link->packet) { spin_lock_irqsave(&tty->ctrl_lock, flags); tty->ctrl_status &= ~TIOCPKT_START; tty->ctrl_status |= TIOCPKT_STOP; spin_unlock_irqrestore(&tty->ctrl_lock, flags); wake_up_interruptible_poll(&tty->link->read_wait, POLLIN); } }
static int iio_store_to_kfifo(struct iio_buffer *r, const void *data) { int ret; struct iio_kfifo *kf = iio_to_kfifo(r); ret = kfifo_in(&kf->kf, data, 1); if (ret != 1) return -EBUSY; wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM); return 0; }
/** * sk_stream_write_space - stream socket write_space callback. * @sk: socket * * FIXME: write proper description */ void sk_stream_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { clear_bit(SOCK_NOSPACE, &sock->flags); if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND); if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); } }
static void kni_sk_write_space(struct sock *sk) { wait_queue_head_t *wqueue; if (!sock_writeable(sk) || !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) wake_up_interruptible_poll( wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); }
static int iio_kfifo_remove_from(struct iio_buffer *r, void *data) { int ret; struct iio_kfifo *kf = iio_to_kfifo(r); if (kfifo_size(&kf->kf) < r->bytes_per_datum) return -EBUSY; ret = kfifo_out(&kf->kf, data, r->bytes_per_datum); if (ret != r->bytes_per_datum) return -EBUSY; wake_up_interruptible_poll(&r->pollq, POLLOUT | POLLWRNORM); return 0; }
/** * sk_stream_write_space - stream socket write_space callback. * @sk: socket * * FIXME: write proper description */ void sk_stream_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; struct socket_wq *wq; if (sk_stream_is_writeable(sk) && sock) { clear_bit(SOCK_NOSPACE, &sock->flags); rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, POLLOUT | POLLWRNORM | POLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } }
/* callback implementation for sk.sk_write_space() * to wakeup sndbuf producers that blocked with smc_tx_wait_memory(). * called under sk_socket lock. */ static void smc_tx_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; struct smc_sock *smc = smc_sk(sk); struct socket_wq *wq; /* similar to sk_stream_write_space */ if (atomic_read(&smc->conn.sndbuf_space) && sock) { clear_bit(SOCK_NOSPACE, &sock->flags); rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } }
void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; int err = 0; tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc); ld = tty_ldisc_ref(tty); if (ld != NULL) { if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_driver_flush_buffer(tty); if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && ld->ops->write_wakeup) ld->ops->write_wakeup(tty); if (ld->ops->hangup) ld->ops->hangup(tty); tty_ldisc_deref(ld); } wake_up_interruptible_poll(&tty->write_wait, POLLOUT); wake_up_interruptible_poll(&tty->read_wait, POLLIN); tty_unlock(tty); /* * Shutdown the current line discipline, and reset it to * N_TTY if need be. * * Avoid racing set_ldisc or tty_ldisc_release */ tty_ldisc_lock_pair(tty, tty->link); tty_lock(tty); if (tty->ldisc) { /* At this point we have a halted ldisc; we want to close it and reopen a new ldisc. We could defer the reopen to the next open but it means auditing a lot of other paths so this is a FIXME */ if (reset == 0) { if (!tty_ldisc_reinit(tty, tty->termios.c_line)) err = tty_ldisc_open(tty, tty->ldisc); else err = 1; } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { BUG_ON(tty_ldisc_reinit(tty, N_TTY)); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } } tty_ldisc_enable_pair(tty, tty->link); if (reset) tty_reset_termios(tty); tty_ldisc_debug(tty, "re-opened ldisc: %p\n", tty->ldisc); }
void tty_write_unlock(struct tty_struct *tty) { mutex_unlock(&tty->atomic_write_lock); wake_up_interruptible_poll(&tty->write_wait, POLLOUT); }
static ssize_t emd_cfifo_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { cfifo_instance_t *cfifo_instance=(cfifo_instance_t *)file->private_data; int ret = 0, part; int data_be_write, size, value; unsigned read, write, length; char *tx_buffer; if(count == 0) { EMD_MSG_INF("cfifo","emd_cfifo_write: count=0\n"); return 0; } mutex_lock(&cfifo_instance->emd_cfifo_mutex); data_be_write = (int)count; #if 0 EMD_MSG_INF("cfifo","emd_cfifo%d_write: write_request=%d write=%d read=%d\n", cfifo_instance->idx, data_be_write, *(cfifo_instance->shared_mem.tx_control.write), *(cfifo_instance->shared_mem.tx_control.read)); #endif size = 0; /* Check free space */ read = (int)(*(cfifo_instance->shared_mem.tx_control.read)); write = (int)(*(cfifo_instance->shared_mem.tx_control.write)); length = (int)(*(cfifo_instance->shared_mem.tx_control.length)); tx_buffer = cfifo_instance->shared_mem.tx_control.buffer; do { size = emd_cfifo_writeable(cfifo_instance); if (size == 0) { if (file->f_flags & O_NONBLOCK) { ret=-EAGAIN; goto out; } else { // Block write value = wait_event_interruptible(cfifo_instance->write_waitq, emd_cfifo_writeable(cfifo_instance)); if(value == -ERESTARTSYS) { EMD_MSG_INF("cfifo","(W)Interrupted syscall.signal_pend=0x%llx\n", *(long long *)current->pending.signal.sig); ret = -EINTR; goto out; } } } else break; }while(size==0); // Calculate write size if(data_be_write >= size) data_be_write = size; if( (write+data_be_write) >= length) { // Write twice // 1st write part = length - write; ret = copy_from_user(&tx_buffer[write],buf,part); if (ret) { EMD_MSG_INF("cfifo","write: copy from user fail:tx_buffer=0x%08x,write=%d, buf=%08x, part=%d ret=%d line=%d\n",\ (unsigned int)tx_buffer,write,(unsigned int)buf,part,ret,__LINE__); ret=-EFAULT; goto out; } // 2nd write ret = copy_from_user(tx_buffer,&buf[part],data_be_write - part); if (ret) { EMD_MSG_INF("cfifo","write: copy from user fail:tx_buffer=0x%08x,buf=%08x,part=%d,data_be_write-part=%d ret=%d line=%d\n",\ (unsigned int)tx_buffer,(unsigned int)buf,part,data_be_write - part,ret,__LINE__); ret=-EFAULT; goto out; } } else { // Write once is OK ret = copy_from_user(&tx_buffer[write],buf,data_be_write); if (ret) { EMD_MSG_INF("cfifo","write: copy from user fail:tx_buffer=0x%08x,write=%d,buf=%08x,data_be_write=%d ret=%d line=%d\n",\ (unsigned int)tx_buffer,write,(unsigned int)buf,data_be_write,ret,__LINE__); ret=-EFAULT; goto out; } } // Update read pointer write += data_be_write; if(write >= length) write -= length; *(cfifo_instance->shared_mem.tx_control.write) = write; ret = data_be_write; out: mutex_unlock(&cfifo_instance->emd_cfifo_mutex); if(emd_cfifo_readable(cfifo_instance->other_side)) { wake_up_interruptible(&cfifo_instance->other_side->read_waitq); wake_up_interruptible_poll(&cfifo_instance->other_side->poll_waitq_r,POLLIN); wake_lock_timeout(&cfifo_instance->other_side->wake_lock, HZ / 2); if(cfifo_instance->idx == 1) { //muxd write, so wake up md to read request_wakeup_md_timeout(1, 1); } } EMD_MSG_INF("cfifo","emd_cfifo%d_write: ret=%d\n",cfifo_instance->idx,ret); return ret; }
static ssize_t emd_cfifo_read(struct file *file, char *buf, size_t count, loff_t *ppos) { cfifo_instance_t *cfifo_instance=(cfifo_instance_t *)file->private_data; int part, size, ret = 0; int value; unsigned read, write, length; char *rx_buffer; int data_be_read; char *u_buf = buf; read = (int)(*(cfifo_instance->shared_mem.rx_control.read)); write = (int)(*(cfifo_instance->shared_mem.rx_control.write)); length = (int)(*(cfifo_instance->shared_mem.rx_control.length)); rx_buffer = cfifo_instance->shared_mem.rx_control.buffer; // EMD_MSG_INF("cfifo","emd_cfifo%d_read: read=%d,wirte=%d,length=%d,count=%d\n",cfifo_instance->idx,read,write,length,count); do { size = emd_cfifo_readable(cfifo_instance); if (size == 0) { if (file->f_flags & O_NONBLOCK) { ret=-EAGAIN; goto out; } else { value = wait_event_interruptible(cfifo_instance->read_waitq, emd_cfifo_readable(cfifo_instance)); if(value == -ERESTARTSYS) { EMD_MSG_INF("cfifo","Interrupted syscall.signal_pend=0x%llx\n", *(long long *)current->pending.signal.sig); ret = -EINTR; goto out; } } } else { break; } }while(size==0); data_be_read = (int)count; if(data_be_read > size) data_be_read = size; //copy_to_user may be scheduled, //So add 1s wake lock to make sure emd user can be running. wake_lock_timeout(&cfifo_instance->wake_lock, HZ); if( (read + data_be_read) >= length ) { // Need read twice // Copy first part part = length - read; if (copy_to_user(u_buf,&rx_buffer[read],part)) { EMD_MSG_INF("cfifo","read: copy_to_user fail:u_buf=%08x,rx_buffer=0x%08x,read=%d,size=%d ret=%d line=%d\n",\ (unsigned int)u_buf,(unsigned int)rx_buffer,read,part,ret,__LINE__); ret= -EFAULT; goto out; } // Copy second part if (copy_to_user(&u_buf[part],rx_buffer,data_be_read - part)) { EMD_MSG_INF("cfifo","read: copy_to_user fail:u_buf=%08x,rx_buffer=0x%08x,read=%d,size=%d ret=%d line=%d\n",\ (unsigned int)u_buf,(unsigned int)rx_buffer,read,data_be_read - part,ret,__LINE__); ret= -EFAULT; goto out; } } else { // Just need read once if (copy_to_user(u_buf,&rx_buffer[read],data_be_read)) { EMD_MSG_INF("cfifo","read: copy_to_user fail:u_buf=%08x,rx_buffer=0x%08x,read=%d,size=%d ret=%d line=%d\n",\ (unsigned int)u_buf,(unsigned int)rx_buffer,read,data_be_read,ret,__LINE__); ret= -EFAULT; goto out; } } // Update read pointer read += data_be_read; if(read >= length) read -= length; *(cfifo_instance->shared_mem.rx_control.read) = read; ret = data_be_read; if(emd_cfifo_writeable(cfifo_instance->other_side)) { wake_up_interruptible(&cfifo_instance->other_side->write_waitq); wake_up_interruptible_poll(&cfifo_instance->other_side->poll_waitq_w,POLLOUT); } EMD_MSG_INF("cfifo","emd_cfifo%d_read: ret=%d\n",cfifo_instance->idx,ret); out: return ret; }
void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; int err = 0; ld = tty_ldisc_ref(tty); if (ld != NULL) { /* We may have no line discipline at this point */ if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_driver_flush_buffer(tty); if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && ld->ops->write_wakeup) ld->ops->write_wakeup(tty); if (ld->ops->hangup) ld->ops->hangup(tty); tty_ldisc_deref(ld); } wake_up_interruptible_poll(&tty->write_wait, POLLOUT); wake_up_interruptible_poll(&tty->read_wait, POLLIN); /* * Shutdown the current line discipline, and reset it to * N_TTY if need be. * * Avoid racing set_ldisc or tty_ldisc_release */ mutex_lock(&tty->ldisc_mutex); /* * this is like tty_ldisc_halt, but we need to give up * the BTM before calling cancel_delayed_work_sync, * which may need to wait for another function taking the BTM */ clear_bit(TTY_LDISC, &tty->flags); tty_unlock(); cancel_delayed_work_sync(&tty->buf.work); mutex_unlock(&tty->ldisc_mutex); tty_lock(); mutex_lock(&tty->ldisc_mutex); if (tty->ldisc) { /* Not yet closed */ if (reset == 0) { if (!tty_ldisc_reinit(tty, tty->termios->c_line)) err = tty_ldisc_open(tty, tty->ldisc); else err = 1; } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { BUG_ON(tty_ldisc_reinit(tty, N_TTY)); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } tty_ldisc_enable(tty); } mutex_unlock(&tty->ldisc_mutex); if (reset) tty_reset_termios(tty); }
void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; int err = 0; /* * FIXME! What are the locking issues here? This may me overdoing * things... This question is especially important now that we've * removed the irqlock. */ ld = tty_ldisc_ref(tty); if (ld != NULL) { /* We may have no line discipline at this point */ if (ld->ops->flush_buffer) ld->ops->flush_buffer(tty); tty_driver_flush_buffer(tty); if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && ld->ops->write_wakeup) ld->ops->write_wakeup(tty); if (ld->ops->hangup) ld->ops->hangup(tty); tty_ldisc_deref(ld); } /* * FIXME: Once we trust the LDISC code better we can wait here for * ldisc completion and fix the driver call race */ wake_up_interruptible_poll(&tty->write_wait, POLLOUT); wake_up_interruptible_poll(&tty->read_wait, POLLIN); /* * Shutdown the current line discipline, and reset it to * N_TTY if need be. * * Avoid racing set_ldisc or tty_ldisc_release */ mutex_lock(&tty->ldisc_mutex); /* * this is like tty_ldisc_halt, but we need to give up * the BTM before calling cancel_work_sync, which may * need to wait for another function taking the BTM */ clear_bit(TTY_LDISC, &tty->flags); tty_unlock(tty); cancel_work_sync(&tty->port->buf.work); mutex_unlock(&tty->ldisc_mutex); retry: tty_lock(tty); mutex_lock(&tty->ldisc_mutex); /* At this point we have a closed ldisc and we want to reopen it. We could defer this to the next open but it means auditing a lot of other paths so this is a FIXME */ if (tty->ldisc) { /* Not yet closed */ if (atomic_read(&tty->ldisc->users) != 1) { char cur_n[TASK_COMM_LEN], tty_n[64]; long timeout = 3 * HZ; tty_unlock(tty); while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) { timeout = MAX_SCHEDULE_TIMEOUT; printk_ratelimited(KERN_WARNING "%s: waiting (%s) for %s took too long, but we keep waiting...\n", __func__, get_task_comm(cur_n, current), tty_name(tty, tty_n)); } mutex_unlock(&tty->ldisc_mutex); goto retry; } if (reset == 0) { if (!tty_ldisc_reinit(tty, tty->termios.c_line)) err = tty_ldisc_open(tty, tty->ldisc); else err = 1; } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { BUG_ON(tty_ldisc_reinit(tty, N_TTY)); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } tty_ldisc_enable(tty); } mutex_unlock(&tty->ldisc_mutex); if (reset) tty_reset_termios(tty); }
int kni_chk_vhost_rx(struct kni_dev *kni) { struct kni_vhost_queue *q = kni->vhost_queue; unsigned nb_in, nb_mbuf, nb_skb; const unsigned BURST_MASK = RX_BURST_SZ - 1; unsigned nb_burst, nb_backlog, i; struct sk_buff *skb[RX_BURST_SZ]; struct rte_kni_mbuf *va[RX_BURST_SZ]; if (unlikely(BE_STOP & kni->vq_status)) { kni->vq_status |= BE_FINISH; return 0; } if (unlikely(q == NULL)) return 0; nb_skb = kni_fifo_count(q->fifo); nb_mbuf = kni_fifo_count(kni->rx_q); nb_in = min(nb_mbuf, nb_skb); nb_in = min(nb_in, (unsigned)RX_BURST_SZ); nb_burst = (nb_in & ~BURST_MASK); nb_backlog = (nb_in & BURST_MASK); /* enqueue skb_queue per BURST_SIZE bulk */ if (0 != nb_burst) { if (unlikely(RX_BURST_SZ != kni_fifo_get( kni->rx_q, (void **)&va, RX_BURST_SZ))) goto except; if (unlikely(RX_BURST_SZ != kni_fifo_get( q->fifo, (void **)&skb, RX_BURST_SZ))) goto except; kni_vhost_enqueue_burst(kni, q, skb, va); } /* all leftover, do one by one */ for (i = 0; i < nb_backlog; ++i) { if (unlikely(1 != kni_fifo_get( kni->rx_q,(void **)&va, 1))) goto except; if (unlikely(1 != kni_fifo_get( q->fifo, (void **)&skb, 1))) goto except; kni_vhost_enqueue(kni, q, *skb, *va); } /* Ondemand wake up */ if ((nb_in == RX_BURST_SZ) || (nb_skb == 0) || ((nb_mbuf < RX_BURST_SZ) && (nb_mbuf != 0))) { wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); KNI_DBG_RX("RX CHK KICK nb_mbuf %d, nb_skb %d, nb_in %d\n", nb_mbuf, nb_skb, nb_in); } return 0; except: /* Failing should not happen */ KNI_ERR("Fail to enqueue fifo, it shouldn't happen \n"); BUG_ON(1); return 0; }