static unsigned int rpmsg_dev_poll(struct file *filp, poll_table *wait) { unsigned int mask = POLLOUT | POLLWRNORM; //POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM; struct _rpmsg_device *_prpmsg_device = (struct _rpmsg_device *)filp->private_data; struct _rpmsg_params *local = ( struct _rpmsg_params *)&_prpmsg_device->rpmsg_params; if( local) { unsigned int data_available; if (mutex_lock_interruptible(&local->sync_lock)) return mask; poll_wait(filp,&local->usr_wait_q, wait ); data_available = kfifo_len(&local->rpmsg_kfifo); if (data_available) { mask |= POLLIN | POLLRDNORM; } } mutex_unlock(&local->sync_lock); return mask; }
int getc(void) { unsigned char ch; uint64_t start; if (unlikely(!console_is_input_allow())) return -EPERM; /* * For 100us we read the characters from the serial driver * into a kfifo. This helps us not to lose characters * in small hardware fifos. */ start = get_time_ns(); while (1) { if (tstc_raw()) { kfifo_putc(console_input_fifo, getc_raw()); start = get_time_ns(); } if (is_timeout(start, 100 * USECOND) && kfifo_len(console_input_fifo)) break; } kfifo_getc(console_input_fifo, &ch); return ch; }
static int ir_raw_event_thread(void *data) { struct ir_raw_event ev; struct ir_raw_handler *handler; struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data; int retval; while (!kthread_should_stop()) { spin_lock_irq(&raw->lock); retval = kfifo_len(&raw->kfifo); if (retval < sizeof(ev)) { set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) set_current_state(TASK_RUNNING); spin_unlock_irq(&raw->lock); schedule(); continue; } retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev)); spin_unlock_irq(&raw->lock); mutex_lock(&ir_raw_handler_lock); list_for_each_entry(handler, &ir_raw_handler_list, list) handler->decode(raw->dev, ev); raw->prev_ev = ev; mutex_unlock(&ir_raw_handler_lock); } return 0; }
static int __init testfunc(void) { char buf[100]; unsigned int i; unsigned int ret; struct { unsigned char buf[6]; } hello = { "hello" }; printk(KERN_INFO "record fifo test start\n"); kfifo_in(&test, &hello, sizeof(hello)); /* show the size of the next record in the fifo */ printk(KERN_INFO "fifo peek len: %u\n", kfifo_peek_len(&test)); /* put in variable length data */ for (i = 0; i < 10; i++) { memset(buf, 'a' + i, i + 1); kfifo_in(&test, buf, i + 1); } printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); /* show the first record without removing from the fifo */ ret = kfifo_out_peek(&test, buf, sizeof(buf)); if (ret) printk(KERN_INFO "%.*s\n", ret, buf); /* print out all records in the fifo */ while (!kfifo_is_empty(&test)) { ret = kfifo_out(&test, buf, sizeof(buf)); printk(KERN_INFO "%.*s\n", ret, buf); } return 0; }
/** * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands * @conn: iscsi connection * @task: scsi command task * @sc: scsi command */ int iscsi_tcp_task_init(struct iscsi_task *task) { struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_conn *conn = task->conn; struct scsi_cmnd *sc = task->sc; int err; if (!sc) { /* * mgmt tasks do not have a scatterlist since they come * in from the iscsi interface. */ ISCSI_DBG_TCP(conn, "mtask deq [itt 0x%x]\n", task->itt); return conn->session->tt->init_pdu(task, 0, task->data_count); } BUG_ON(kfifo_len(&tcp_task->r2tqueue)); tcp_task->exp_datasn = 0; /* Prepare PDU, optionally w/ immediate data */ ISCSI_DBG_TCP(conn, "task deq [itt 0x%x imm %d unsol %d]\n", task->itt, task->imm_count, task->unsol_r2t.data_length); err = conn->session->tt->init_pdu(task, 0, task->imm_count); if (err) return err; task->imm_count = 0; return 0; }
/*----------------------------------------------------------------------------*/ VOID kalIndicateBOWEvent(IN P_GLUE_INFO_T prGlueInfo, IN P_AMPC_EVENT prEvent) { size_t u4AvailSize, u4EventSize; ASSERT(prGlueInfo); ASSERT(prEvent); /* check device */ if ((prGlueInfo->rBowInfo.fgIsRegistered == FALSE) || (prGlueInfo->u4Flag & GLUE_FLAG_HALT)) { return; } /* u4AvailSize = GLUE_BOW_KFIFO_DEPTH - kfifo_len(prGlueInfo->rBowInfo.prKfifo);*/ u4AvailSize = GLUE_BOW_KFIFO_DEPTH - kfifo_len(&(prGlueInfo->rBowInfo.rKfifo)); u4EventSize = prEvent->rHeader.u2PayloadLength + sizeof(AMPC_EVENT_HEADER_T); /* check kfifo availability */ if (u4AvailSize < u4EventSize) { DBGLOG(BOW, EVENT, ("[bow] no space for event: %d/%d\n", u4EventSize, u4AvailSize)); return; } /* queue into kfifo */ /* kfifo_put(prGlueInfo->rBowInfo.prKfifo, (PUINT_8)prEvent, u4EventSize); */ /* kfifo_in(prGlueInfo->rBowInfo.prKfifo, (PUINT_8)prEvent, u4EventSize); */ kfifo_in(&(prGlueInfo->rBowInfo.rKfifo), (PUINT_8) prEvent, u4EventSize); wake_up_interruptible(&(prGlueInfo->rBowInfo.outq)); return; }
/*----------------------------------------------------------------------------*/ static unsigned int bow_ampc_poll(IN struct file *filp, IN poll_table * wait) { unsigned int retval; P_GLUE_INFO_T prGlueInfo; prGlueInfo = (P_GLUE_INFO_T) (filp->private_data); ASSERT(prGlueInfo); if ((prGlueInfo->rBowInfo.fgIsRegistered == FALSE) || (prGlueInfo->u4Flag & GLUE_FLAG_HALT)) { return -EFAULT; } poll_wait(filp, &prGlueInfo->rBowInfo.outq, wait); retval = (POLLOUT | POLLWRNORM); /* always accepts incoming command packets */ /* DBGLOG(BOW, EVENT, ("bow_ampc_pol, POLLOUT | POLLWRNORM, %x\n", retval)); */ /* if(kfifo_len(prGlueInfo->rBowInfo.prKfifo) > 0) */ if (kfifo_len(&(prGlueInfo->rBowInfo.rKfifo)) > 0) { retval |= (POLLIN | POLLRDNORM); /* DBGLOG(BOW, EVENT, ("bow_ampc_pol, POLLIN | POLLRDNORM, %x\n", retval)); */ } return retval; }
int tstc(void) { if (unlikely(!console_is_input_allow())) return 0; return kfifo_len(console_input_fifo) || tstc_raw(); }
static void stp_uart_rx_handling(unsigned long func_data){ unsigned int how_much_get = 0; unsigned int how_much_to_get = 0; unsigned int flag = 0; #if 0 unsigned int flags; #endif #if defined(CONFIG_ARCH_MT6575) read_lock(&g_stp_uart_rx_handling_lock); #endif #if 0 spin_lock_irqsave(&g_stp_uart_rx_handling_lock,flags); #endif how_much_to_get = kfifo_len(g_stp_uart_rx_fifo); if (how_much_to_get >= RX_BUFFER_LEN) { flag = 1; UART_INFO_FUNC ("fifolen(%d)\n", how_much_to_get); } do{ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)) how_much_get= kfifo_get(g_stp_uart_rx_fifo, g_rx_data, RX_BUFFER_LEN); #else how_much_get= kfifo_out(g_stp_uart_rx_fifo, g_rx_data, RX_BUFFER_LEN); #endif //UART_INFO_FUNC ("fifoget(%d)\n", how_much_get); mtk_wcn_stp_parser_data((UINT8 *)g_rx_data, how_much_get); how_much_to_get = kfifo_len(g_stp_uart_rx_fifo); }while(how_much_to_get > 0); #if defined(CONFIG_ARCH_MT6575) read_unlock(&g_stp_uart_rx_handling_lock); #endif #if 0 spin_unlock_irqrestore(&g_stp_uart_rx_handling_lock,flags); #endif if (1 == flag) { UART_INFO_FUNC ("finish, fifolen(%d)\n", kfifo_len(g_stp_uart_rx_fifo)); } }
/* stp_uart_tty_receive() * * Called by tty low level driver when receive data is * available. * * Arguments: tty pointer to tty isntance data * data pointer to received data * flags pointer to flags for data * count count of received data in bytes * * Return Value: None */ static void stp_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count) { unsigned int fifo_avail_len;/* = LDISC_RX_FIFO_SIZE - kfifo_len(g_stp_uart_rx_fifo);*/ unsigned int how_much_put = 0; #if 0 { struct timeval now; do_gettimeofday(&now); printk("[+STP][ ][R] %4d --> sec = %lu, --> usec --> %lu\n", count, now.tv_sec, now.tv_usec); } #endif #if LDISC_RX_TASKLET_RWLOCK write_lock(&g_stp_uart_rx_handling_lock); #endif if (count > 2000) { /*this is abnormal*/ UART_ERR_FUNC("abnormal: buffer count = %d\n", count); } /*How much empty seat?*/ fifo_avail_len = LDISC_RX_FIFO_SIZE - kfifo_len(g_stp_uart_rx_fifo); if (fifo_avail_len > 0) { //UART_INFO_FUNC ("fifo left(%d), count(%d)\n", fifo_avail_len, count); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) how_much_put = kfifo_put(g_stp_uart_rx_fifo,(unsigned char *) data, count); #else how_much_put = kfifo_in(g_stp_uart_rx_fifo,(unsigned char *) data, count); #endif #if LDISC_RX_TASKLET_RWLOCK /* George Test */ write_unlock(&g_stp_uart_rx_handling_lock); #endif /*schedule it!*/ tasklet_schedule(&g_stp_uart_rx_fifo_tasklet); } else { UART_ERR_FUNC("stp_uart_tty_receive rxfifo is full!!\n"); } #if 0 { struct timeval now; do_gettimeofday(&now); printk("[-STP][ ][R] %4d --> sec = %lu, --> usec --> %lu\n", count, now.tv_sec, now.tv_usec); } #endif #if LDISC_RX_TASKLET_RWLOCK /* George Test */ //write_unlock(&g_stp_uart_rx_handling_lock); #endif }
static void gs_close(struct tty_struct *tty, struct file *file) { struct gs_port *port = tty->driver_data; struct gserial *gser; spin_lock_irq(&port->port_lock); if (port->port.count != 1) { if (port->port.count == 0) WARN_ON(1); else --port->port.count; goto exit; } pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file); /* mark port as closing but in use; we can drop port lock * and sleep if necessary */ port->openclose = true; port->port.count = 0; gser = port->port_usb; if (gser && gser->disconnect) gser->disconnect(gser); /* wait for circular write buffer to drain, disconnect, or at * most GS_CLOSE_TIMEOUT seconds; then discard the rest */ if (kfifo_len(&port->port_write_buf) > 0 && gser) { spin_unlock_irq(&port->port_lock); wait_event_interruptible_timeout(port->drain_wait, gs_writes_finished(port), GS_CLOSE_TIMEOUT * HZ); spin_lock_irq(&port->port_lock); gser = port->port_usb; } /* Iff we're disconnected, there can be no I/O in flight so it's * ok to free the circular buffer; else just scrub it. And don't * let the push tasklet fire again until we're re-opened. */ if (gser == NULL) kfifo_free(&port->port_write_buf); else kfifo_reset(&port->port_write_buf); port->port.tty = NULL; port->openclose = false; pr_debug("gs_close: ttyGS%d (%p,%p) done!\n", port->port_num, tty, file); wake_up(&port->close_wait); exit: spin_unlock_irq(&port->port_lock); }
/** * kfifo_skip - skip output data * @fifo: the fifo to be used. * @len: number of bytes to skip */ void kfifo_skip(struct kfifo *fifo, unsigned int len) { if (len < kfifo_len(fifo)) { __kfifo_add_out(fifo, len); return; } kfifo_reset_out(fifo); }
/** * kfifo_to_user - gets data from the FIFO and write it to user space * @fifo: the fifo to be used. * @to: where the data must be copied. * @len: the size of the destination buffer. * * This function copies at most @len bytes from the FIFO into the * @to buffer and returns the number of copied bytes. * * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these functions. */ unsigned int kfifo_to_user(struct kfifo *fifo, void __user *to, unsigned int len) { len = min(kfifo_len(fifo), len); len -= __kfifo_to_user_data(fifo, to, len, 0); __kfifo_add_out(fifo, len); return len; }
/** * kfifo_out - gets some data from the FIFO * @fifo: the fifo to be used. * @to: where the data must be copied. * @len: the size of the destination buffer. * * This function copies at most @len bytes from the FIFO into the * @to buffer and returns the number of copied bytes. * * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these functions. */ unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len) { len = min(kfifo_len(fifo), len); __kfifo_out_data(fifo, to, len, 0); __kfifo_add_out(fifo, len); return len; }
/* Transmitter */ static int cx25840_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count, ssize_t *num) { struct cx25840_ir_state *ir_state = to_ir_state(sd); struct i2c_client *c; if (ir_state == NULL) return -ENODEV; c = ir_state->c; #if 0 /* * FIXME - the code below is an incomplete and untested sketch of what * may need to be done. The critical part is to get 4 (or 8) pulses * from the tx_kfifo, or converted from ns to the proper units from the * input, and push them off to the hardware Tx FIFO right away, if the * HW TX fifo needs service. The rest can be pushed to the tx_kfifo in * a less critical timeframe. Also watch out for overruning the * tx_kfifo - don't let it happen and let the caller know not all his * pulses were written. */ u32 *ns_pulse = (u32 *) buf; unsigned int n; u32 fifo_pulse[FIFO_TX_DEPTH]; u32 mark; /* Compute how much we can fit in the tx kfifo */ n = CX25840_IR_TX_KFIFO_SIZE - kfifo_len(ir_state->tx_kfifo); n = min(n, (unsigned int) count); n /= sizeof(u32); /* FIXME - turn on Tx Fifo service interrupt * check hardware fifo level, and other stuff */ for (i = 0; i < n; ) { for (j = 0; j < FIFO_TX_DEPTH / 2 && i < n; j++) { mark = ns_pulse[i] & LEVEL_MASK; fifo_pulse[j] = ns_to_pulse_width_count( ns_pulse[i] & ~LEVEL_MASK, ir_state->txclk_divider); if (mark) fifo_pulse[j] &= FIFO_RXTX_LVL; i++; } kfifo_put(ir_state->tx_kfifo, (u8 *) fifo_pulse, j * sizeof(u32)); } *num = n * sizeof(u32); #else /* For now enable the Tx FIFO Service interrupt & pretend we did work */ irqenable_tx(sd, IRQEN_TSE); *num = count; #endif return 0; }
/** * KFIFO에서 TCP/IP데이터를 반환하는 함수.(심볼로 선언해서 다른 Kernel Module에서 호출하여 사용함) */ int get_kfifo(char *msg) { if(kfifo_len(&fifo) <= 0) { msg = 0; return 0; } kfifo_out(&fifo, msg, SIZE); return 1; }
unsigned int __kfifo_out_n(struct kfifo *fifo, void *to, unsigned int len, unsigned int recsize) { if (kfifo_len(fifo) < len + recsize) return len; __kfifo_out_data(fifo, to, len, recsize); __kfifo_add_out(fifo, len + recsize); return 0; }
static int __init testfunc(void) { unsigned char buf[6]; unsigned char i; unsigned int ret; printk(KERN_INFO "byte stream fifo test start\n"); /* put string into the fifo */ kfifo_in(&test, "hello", 5); /* put values into the fifo */ for (i = 0; i != 10; i++) kfifo_put(&test, &i); /* show the number of used elements */ printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); /* get max of 5 bytes from the fifo */ i = kfifo_out(&test, buf, 5); printk(KERN_INFO "buf: %.*s\n", i, buf); /* get max of 2 elements from the fifo */ ret = kfifo_out(&test, buf, 2); printk(KERN_INFO "ret: %d\n", ret); /* and put it back to the end of the fifo */ ret = kfifo_in(&test, buf, ret); printk(KERN_INFO "ret: %d\n", ret); /* put values into the fifo until is full */ for (i = 20; kfifo_put(&test, &i); i++) ; printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); /* print out all values in the fifo */ while (kfifo_get(&test, &i)) printk("%d ", i); printk("\n"); return 0; }
/** * usb_serial_generic_write_start - kick off an URB write * @port: Pointer to the &struct usb_serial_port data * * Returns zero on success, or a negative errno value */ static int usb_serial_generic_write_start(struct usb_serial_port *port) { struct urb *urb; int count, result; unsigned long flags; int i; if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags)) return 0; retry: spin_lock_irqsave(&port->lock, flags); if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) { clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); spin_unlock_irqrestore(&port->lock, flags); return 0; } i = (int)find_first_bit(&port->write_urbs_free, ARRAY_SIZE(port->write_urbs)); spin_unlock_irqrestore(&port->lock, flags); urb = port->write_urbs[i]; count = port->serial->type->prepare_write_buffer(port, urb->transfer_buffer, port->bulk_out_size); urb->transfer_buffer_length = count; usb_serial_debug_data(debug, &port->dev, __func__, count, urb->transfer_buffer); spin_lock_irqsave(&port->lock, flags); port->tx_bytes += count; spin_unlock_irqrestore(&port->lock, flags); clear_bit(i, &port->write_urbs_free); result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&port->dev, "%s - error submitting urb: %d\n", __func__, result); set_bit(i, &port->write_urbs_free); spin_lock_irqsave(&port->lock, flags); port->tx_bytes -= count; spin_unlock_irqrestore(&port->lock, flags); clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return result; } /* Try sending off another urb, unless in irq context (in which case * there will be no free urb). */ if (!in_irq()) goto retry; clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return 0; }
static void inline add2kfifo(const unsigned char *pdata,const unsigned int len) { if(kfifo_len(g_kfifo) < MAX_BUF_SIZE) { kfifo_put(g_kfifo,pdata,len); } else { kfifo_reset(g_kfifo); } }
static int gs_writes_finished(struct gs_port *p) { int cond; /* return true on disconnect or empty buffer */ spin_lock_irq(&p->port_lock); cond = (p->port_usb == NULL) || !kfifo_len(&p->port_write_buf); spin_unlock_irq(&p->port_lock); return cond; }
ssize_t cons_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { int ret; int copied; pr_info("%s() : FIFO size = %d, count = %d\n", __func__, (int)kfifo_len(&fifo), (int)count); if (down_interruptible(&cons_sem)) return -ERESTARTSYS; while (kfifo_len(&fifo) <= 0) { /* nothing to read */ up(&cons_sem); /* release the lock */ if (filp->f_flags & O_NONBLOCK) return -EAGAIN; pr_info("%s () : \"%s\" going to sleep\n", __func__, current->comm); if (wait_event_interruptible(cons_que, kfifo_len(&fifo) > 0)) { pr_info("%s() wait_event_interruptible() : signal: " "tell the fs layer to handle it\n", __func__); return -ERESTARTSYS; /* signal: inform the fs layer to handle it */ } if (down_interruptible(&cons_sem)) return -ERESTARTSYS; } /* ok, data is there, return something */ count = min((long)count, (long)kfifo_len(&fifo)); pr_info("%s() : \"%s\" data to copy = %li bytes\n", __func__, current->comm, (long)count); ret = kfifo_to_user(&fifo, buf, count, &copied); up(&cons_sem); if (ret < 0) return -EFAULT; pr_info("%s() : \"%s\" read %li bytes. FIFO new Size = %d\n", __func__, current->comm, (long)count, (int)kfifo_len(&fifo)); pr_info("%s() : \"%s\" waking up producer processes\n", __func__, current->comm); wake_up_interruptible(&prod_que); return copied; }
ssize_t prod_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { int ret; int copied; pr_info("%s() : FIFO size = %d, count = %d\n", __func__, (int)kfifo_len(&fifo), (int)count); if (down_interruptible(&prod_sem)) return -ERESTARTSYS; while ((int)kfifo_avail(&fifo) <= 0) { /* full */ up(&prod_sem); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; pr_info("%s() : \"%s\" going to sleep\n", __func__, current->comm); if (wait_event_interruptible(prod_que,\ (((int)kfifo_avail(&fifo)) > 0))) { pr_info("%s() wait_event_interruptible() : signal: " "tell the fs layer to handle it\n", __func__); return -ERESTARTSYS; /* signal: inform the fs layer to handle it */ } if (down_interruptible(&prod_sem)) return -ERESTARTSYS; pr_info("%s() : \"%s\" waken from sleep\n", __func__, current->comm); } count = min((int)count, (int)kfifo_avail(&fifo)); pr_info("%s() : \"%s\" data to copy = %li bytes\n", __func__, current->comm, (long)count); ret = kfifo_from_user(&fifo, buf, count, &copied); up(&prod_sem); if (ret < 0) return -EFAULT; pr_info("%s() : \"%s\" copied %d bytes.FIFO new SIZE = %d\n", __func__, current->comm, copied, (int)kfifo_len(&fifo)); pr_info("%s() : \"%s\" waking up consumer processes\n", __func__, current->comm); wake_up_interruptible(&cons_que); return ret ? ret : copied; }
static s32 usart1_ioctl(u8 cmd, u32 arg) { s32 val = 0; switch(cmd) { case CMD_SET_RX_TIMEOUT: rx_timeout_ticks =(arg/SYS_TICK_MS); break; case CMD_GET_RX_TIMEOUT: *(u32*)(arg) = rx_timeout_ticks * SYS_TICK_MS; break; case CMD_SET_RX_MODE: { if(arg != rx_mode) { usart1_flush(0); rx_mode = arg; } } break; case CMD_GET_RX_MODE: *(u8*)(arg) = rx_mode; break; case CMD_SET_KEY_CHAR: key_char = arg; break; case CMD_GET_KEY_CHAR: *(u8*)(arg) = key_char; break; case CMD_SET_N_CHAR_TIMEOUT: N_char_timeout = arg; usart1_update_timeout(baudRate,N_char_timeout); break; case CMD_GET_N_CHAR_TIMEOUT: *(u32*)(arg) = N_char_timeout; break; case CMD_GET_INPUT_BUF_SIZE: *(u32*)(arg) = kfifo_len(&rx_fifo); break; case CMD_FLUSH_INPUT: usart1_flush(0); break; case CMD_FLUSH_OUTPUT: usart1_flush(1); break; default: val = -1; break; } return val; }
static unsigned int ami_poll(struct file *file, poll_table *wait) { struct ami306_dev_data *pdev = container_of(file->private_data, struct ami306_dev_data, dev); unsigned int mask = 0; poll_wait(file, &pdev->waitq, wait); if (kfifo_len(&pdev->ebuff) > 0) { mask = POLLIN | POLLRDNORM; } return mask; }
static int rk3190_mbox_msg_get(struct ipc_mbox* imb, u32 *msg) { struct rk3190_mbox *pmb = (struct rk3190_mbox *)imb; int len; if (kfifo_len(&pmb->in_fifo) >= sizeof(u32)) { len = kfifo_out((&pmb->in_fifo), (unsigned char*)msg, sizeof(u32)); WARN_ON(len != sizeof(u32)); return 0; } return -1; }
/* * gs_send_packet * * If there is data to send, a packet is built in the given * buffer and the size is returned. If there is no data to * send, 0 is returned. * * Called with port_lock held. */ static unsigned gs_send_packet(struct gs_port *port, char *packet, unsigned size) { unsigned len; len = kfifo_len(&port->port_write_buf); if (len < size) size = len; if (size != 0) size = kfifo_out(&port->port_write_buf, packet, size); return size; }
static int tmsi_ioctl(struct inode* inode, struct file* file, unsigned int command, unsigned long argument) { struct tmsi_data* dev=(struct tmsi_data*) file->private_data; switch (command) { case IOCTL_TMSI_BUFFERSIZE_64: case IOCTL_TMSI_BUFFERSIZE: return kfifo_len(dev->packet_buffer); default: info("%s: IOCTL command 0x%X not implemented!", __FUNCTION__, command); break; } return -1; }
static int gs_console_thread(void *data) { struct gscons_info *info = &gscons_info; struct gs_port *port; struct usb_request *req; struct usb_ep *ep; int xfer, ret, count, size; do { port = info->port; set_current_state(TASK_INTERRUPTIBLE); if (!port || !port->port_usb || !port->port_usb->in || !info->console_req) goto sched; req = info->console_req; ep = port->port_usb->in; spin_lock_irq(&info->con_lock); count = kfifo_len(&info->con_buf); size = ep->maxpacket; if (count > 0 && !info->req_busy) { set_current_state(TASK_RUNNING); if (count < size) size = count; xfer = kfifo_out(&info->con_buf, req->buf, size); req->length = xfer; spin_unlock(&info->con_lock); ret = usb_ep_queue(ep, req, GFP_ATOMIC); spin_lock(&info->con_lock); if (ret < 0) info->req_busy = 0; else info->req_busy = 1; spin_unlock_irq(&info->con_lock); } else { spin_unlock_irq(&info->con_lock); sched: if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } schedule(); } } while (1); return 0; }
static void stp_uart_rx_handling(unsigned long func_data){ #define LOCAL_BUFFER_LEN 1024 unsigned char data[LOCAL_BUFFER_LEN]; unsigned int how_much_get = 0; unsigned int how_much_to_get = 0; unsigned int flag = 0; #if LDISC_RX_TASKLET_RWLOCK read_lock(&g_stp_uart_rx_handling_lock); #endif how_much_to_get = kfifo_len(g_stp_uart_rx_fifo); if (how_much_to_get >= LOCAL_BUFFER_LEN) { flag = 1; UART_INFO_FUNC ("fifolen(%d)\n", how_much_to_get); } do { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)) how_much_get= kfifo_get(g_stp_uart_rx_fifo, data, LOCAL_BUFFER_LEN); #else how_much_get= kfifo_out(g_stp_uart_rx_fifo, data, LOCAL_BUFFER_LEN); #endif UART_INFO_FUNC ("fifoget(%d)\n", how_much_get); mtk_wcn_stp_parser_data((UINT8 *)data, how_much_get); how_much_to_get = kfifo_len(g_stp_uart_rx_fifo); }while(how_much_to_get > 0); #if LDISC_RX_TASKLET_RWLOCK read_unlock(&g_stp_uart_rx_handling_lock); #endif if (1 == flag) { UART_INFO_FUNC ("finish, fifolen(%d)\n", kfifo_len(g_stp_uart_rx_fifo)); } }