static ssize_t plp_kmem_write(struct file *filp, const char __user *buf,size_t count, loff_t *pos) { P_SERIAL_DEV *dev= filp->private_data; int bytes; printk (">>>>>>>>>In write call\n"); if (kfifo_is_full(&(dev->write_kfifo))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; else wait_event_interruptible(dev->write_queue, !kfifo_is_full(&(dev->write_kfifo))) ; } if (access_ok (VERIFY_READ, (void __user *) buf, (unsigned long) count)) { bytes = kfifo_in_locked(&(dev->write_kfifo),(void __user *) buf,count,&(dev->wr_spinlock)); outb (0x03, base_addr + 1); //IER ,enabling Tx & Rx buffer interrupt ERBFI & EXBFI return bytes; } else return -EFAULT; }
/* * Writes the FIFO in 4 byte chunks. * If length isn't 4 byte aligned, rest of the data if put to a fifo * to be written later * Use r592_flush_fifo_write to flush that fifo when writing for the * last time */ static void r592_write_fifo_pio(struct r592_device *dev, unsigned char *buffer, int len) { /* flush spill from former write */ if (!kfifo_is_empty(&dev->pio_fifo)) { u8 tmp[4] = {0}; int copy_len = kfifo_in(&dev->pio_fifo, buffer, len); if (!kfifo_is_full(&dev->pio_fifo)) return; len -= copy_len; buffer += copy_len; copy_len = kfifo_out(&dev->pio_fifo, tmp, 4); WARN_ON(copy_len != 4); r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)tmp); } WARN_ON(!kfifo_is_empty(&dev->pio_fifo)); /* write full dwords */ while (len >= 4) { r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer); buffer += 4; len -= 4; } /* put remaining bytes to the spill */ if (len) kfifo_in(&dev->pio_fifo, buffer, len); }
void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq) { struct mt76x2_tx_status stat = {}; unsigned long flags; u8 update = 1; bool ret; if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) return; trace_mac_txstat_poll(dev); while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { spin_lock_irqsave(&dev->irq_lock, flags); ret = mt76x2_mac_load_tx_status(dev, &stat); spin_unlock_irqrestore(&dev->irq_lock, flags); if (!ret) break; trace_mac_txstat_fetch(dev, &stat); if (!irq) { mt76x2_send_tx_status(dev, &stat, &update); continue; } kfifo_put(&dev->txstatus_fifo, stat); } }
static int __logic_dispatch_push(ccif_msg_t *msg, void *ctl_b) { logic_channel_info_t *ch_info; int ret = 0; logic_dispatch_ctl_block_t *ctl_block = (logic_dispatch_ctl_block_t*)ctl_b; int md_id = ctl_block->m_md_id; int drop = 1; if (unlikely(msg->channel >= CCCI_MAX_CH_NUM)){ CCCI_MSG_INF(md_id, "cci", "%s get invalid logic ch id:%d\n", \ __FUNCTION__, msg->channel); ret = -CCCI_ERR_INVALID_LOGIC_CHANNEL_ID; goto _out; } ch_info = &(ctl_block->m_logic_ch_table[msg->channel]); if (unlikely(ch_info->m_attrs&L_CH_ATTR_TX)){ CCCI_MSG_INF(md_id, "cci", "%s CH:%d %s is tx channel\n", __FUNCTION__, \ msg->channel, ch_info->m_ch_name); ret = -CCCI_ERR_PUSH_RX_DATA_TO_TX_CHANNEL; goto _out; } // check whether fifo is ready if (!ch_info->m_kfifo_ready){ CCCI_MSG_INF(md_id, "cci", "%s CH:%d %s's kfifo is not ready\n", \ __FUNCTION__, msg->channel, ch_info->m_ch_name); ret = -CCCI_ERR_KFIFO_IS_NOT_READY; goto _out; } // Check fifo free space if (kfifo_is_full(&ch_info->m_kfifo)) { if (ch_info->m_attrs&L_CH_DROP_TOLERATED){ CCCI_CTL_MSG(md_id, "Drop (%08X %08X %02d %08X) is tolerated\n", \ msg->data[0], msg->data[1], msg->channel, msg->reserved); ret = sizeof(ccif_msg_t); } else { // message should NOT be droped CCCI_DBG_MSG(md_id, "cci", "kfifo full: ch:%s size:%d (%08X %08X %02d %08X)\n", ch_info->m_ch_name, kfifo_size(&ch_info->m_kfifo),msg->data[0], msg->data[1], msg->channel, msg->reserved); // disalbe CCIF interrupt here???? ret = 0; // Fix this!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! } goto _out; } // Push data ret = kfifo_in(&ch_info->m_kfifo,msg,sizeof(ccif_msg_t)); WARN_ON(ret!=sizeof(ccif_msg_t)); ctl_block->m_has_pending_data = 1; drop = 0; _out: add_logic_layer_record(md_id, (ccci_msg_t*)msg, drop); return ret; }
static bool iio_kfifo_buf_space_available(struct iio_buffer *r) { struct iio_kfifo *kf = iio_to_kfifo(r); bool full; mutex_lock(&kf->user_lock); full = kfifo_is_full(&kf->kf); mutex_unlock(&kf->user_lock); return !full; }
static int ssp_sensorhub_list(struct ssp_sensorhub_data *hub_data, char *dataframe, int length) { struct sensorhub_event *event; int ret = 0; if (unlikely(length <= 0 || length >= PAGE_SIZE)) { sensorhub_err("library length err(%d)", length); return -EINVAL; } ssp_sensorhub_log(__func__, dataframe, length); /* overwrite new event if list is full */ if (unlikely(kfifo_is_full(&hub_data->fifo))) { ret = kfifo_out(&hub_data->fifo, &event, sizeof(void *)); if (unlikely(ret != sizeof(void *))) { sensorhub_err("kfifo out err(%d)", ret); return -EIO; } sensorhub_info("overwrite event"); } /* allocate memory for new event */ kfree(hub_data->events[hub_data->event_number].library_data); hub_data->events[hub_data->event_number].library_data = kzalloc(length * sizeof(char), GFP_ATOMIC); if (unlikely(!hub_data->events[hub_data->event_number].library_data)) { sensorhub_err("allocate memory for library err"); return -ENOMEM; } /* copy new event into memory */ memcpy(hub_data->events[hub_data->event_number].library_data, dataframe, length); hub_data->events[hub_data->event_number].library_length = length; /* add new event into the end of list */ event = &hub_data->events[hub_data->event_number]; ret = kfifo_in(&hub_data->fifo, &event, sizeof(void *)); if (unlikely(ret != sizeof(void *))) { sensorhub_err("kfifo in err(%d)", ret); return -EIO; } /* not to overflow max list capacity */ if (hub_data->event_number++ >= LIST_SIZE - 1) hub_data->event_number = 0; return kfifo_len(&hub_data->fifo) / sizeof(void *); }
static int push_data(emd_dev_client_t *client, int data) { int size, ret; if(kfifo_is_full(&client->fifo)){ ret=-ENOMEM; EMD_MSG_INF("chr","sub_dev%d kfifo full\n",client->sub_dev_id); }else{ EMD_MSG_INF("chr","push data=0x%08x into sub_dev%d kfifo\n",data,client->sub_dev_id); size=kfifo_in(&client->fifo,&data,sizeof(int)); WARN_ON(size!=sizeof(int)); ret=sizeof(int); } return ret; }
static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) { u32 status; int i; /* * The TX_FIFO_STATUS interrupt needs special care. We should * read TX_STA_FIFO but we should do it immediately as otherwise * the register can overflow and we would lose status reports. * * Hence, read the TX_STA_FIFO register and copy all tx status * reports into a kernel FIFO which is handled in the txstatus * tasklet. We use a tasklet to process the tx status reports * because we can schedule the tasklet multiple times (when the * interrupt fires again during tx status processing). * * Furthermore we don't disable the TX_FIFO_STATUS * interrupt here but leave it enabled so that the TX_STA_FIFO * can also be read while the tx status tasklet gets executed. * * Since we have only one producer and one consumer we don't * need to lock the kfifo. */ for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { rt2x00pci_register_read(rt2x00dev, TX_STA_FIFO, &status); if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) break; if (kfifo_is_full(&rt2x00dev->txstatus_fifo)) { WARNING(rt2x00dev, "TX status FIFO overrun," " drop tx status report.\n"); break; } if (kfifo_in(&rt2x00dev->txstatus_fifo, &status, sizeof(status)) != sizeof(status)) { WARNING(rt2x00dev, "TX status FIFO overrun," "drop tx status report.\n"); break; } } /* Schedule the tasklet for processing the tx status. */ tasklet_schedule(&rt2x00dev->txstatus_tasklet); }
/***************************************************************************** * FUNCTION * hal_btif_is_tx_allow * DESCRIPTION * whether tx is allowed * PARAMETERS * p_base [IN] BTIF module's base address * RETURNS * true if tx operation is allowed; false if tx is not allowed *****************************************************************************/ bool hal_btif_is_tx_allow(P_MTK_BTIF_INFO_STR p_btif) { #define MIN_TX_MB ((26 * 1000000 / 13) / 1000000 ) #define AVE_TX_MB ((26 * 1000000 / 8) / 1000000 ) /*Chaozhong: To be implement*/ bool b_ret = false; unsigned int base = p_btif->base; unsigned int lsr = 0; unsigned int wait_us = (BTIF_TX_FIFO_SIZE - BTIF_TX_FIFO_THRE) / MIN_TX_MB ; /*only ava length */ #if NEW_TX_HANDLING_SUPPORT unsigned long flags = 0; spin_lock_irqsave(&(p_btif->tx_fifo_spinlock), flags); /*clear Tx enable flag if necessary*/ if (kfifo_is_full(p_btif->p_tx_fifo)){ BTIF_WARN_FUNC("BTIF tx FIFO is full\n"); b_ret = false; } else { b_ret = true; } spin_unlock_irqrestore(&(p_btif->tx_fifo_spinlock), flags); #else /*read LSR and check THER or TEMT, either one is 1 means can accept tx data*/ lsr = BTIF_READ32(BTIF_LSR(base)); if(!(lsr & (BTIF_LSR_TEMT_BIT | BTIF_LSR_THRE_BIT))) { BTIF_DBG_FUNC("wait for %d ~ %d us\n", wait_us, 3 * wait_us); //usleep_range(wait_us, 3 * 10 * wait_us); usleep_range(wait_us, 3 * wait_us); } lsr = BTIF_READ32(BTIF_LSR(base)); b_ret = (lsr & (BTIF_LSR_TEMT_BIT | BTIF_LSR_THRE_BIT)) ? true : false; if (!b_ret) BTIF_DBG_FUNC(" tx is not allowed for the moment\n"); else BTIF_DBG_FUNC(" tx is allowed\n"); #endif return b_ret; }
// eemcs_sysmsg_rx_dispatch_cb: CCCI_SYSTEM_RX message dispatch call back function for MODEM // @skb: pointer to a CCCI buffer // @private_data: pointer to private data of CCCI_SYSTEM_RX KAL_INT32 eemcs_sysmsg_rx_dispatch_cb(struct sk_buff *skb, KAL_UINT32 private_data) { CCCI_BUFF_T *p_cccih = NULL; DBGLOG(SMSG, DBG, "====> %s", FUNC_NAME); if (skb){ p_cccih = (CCCI_BUFF_T *)skb->data; DBGLOG(SMSG, INF, "sysmsg RX callback, msg: %08X, %08X, %02d, %08X\n", p_cccih->data[0], p_cccih->data[1], p_cccih->channel, p_cccih->reserved); if (p_cccih->channel == CH_SYS_TX){ DBGLOG(SMSG, ERR, "Wrong CH for recv"); return KAL_FAIL; } if (kfifo_is_full(&sysmsg_fifo)) { DBGLOG(SMSG, ERR, "kfifo full and packet drop, msg: %08X, %08X, %02d, %08X\n", \ p_cccih->data[0], p_cccih->data[1], p_cccih->channel, p_cccih->reserved); dev_kfree_skb(skb); return KAL_FAIL; } spin_lock_bh(&sysmsg_fifo_lock); //DBGLOG(SMSG, TRA, "ready to put skb into FIFO"); kfifo_in(&sysmsg_fifo, &skb, sizeof(unsigned int)); //DBGLOG(SMSG, TRA, "after put skb into FIFO"); spin_unlock_bh(&sysmsg_fifo_lock); DBGLOG(SMSG, DBG, "schedule sysmsg_work"); schedule_work(&sysmsg_work); } else { DBGLOG(SMSG, ERR, "skb is NULL!"); return KAL_FAIL; } DBGLOG(SMSG, DBG, "<==== %s", FUNC_NAME); return KAL_SUCCESS; }
/***************************************************************************** Syntax: void voice_buf_set_data_in_rtp_stream1 Remarks: set data on input RTP packet to FIFO buffer *******************************************************************************/ unsigned short voice_buf_set_data_in_rtp_stream1 (const unsigned char *in_buf ,const int in_size) { unsigned short i=0; if(kfifo_is_full(&test)==1) { printk("++++FIFO is FULL bytes_clear_data+++++\n\r"); return 0; } else { //put values into the fifo for (i = 0; i != in_size; i++) { kfifo_put(&test, &in_buf[i]); //printk("%x|",ret); } return i; } }
irqreturn_t pseudo_serial_intr_handler(int irq_no, void *p_dev) { int ret_val,i,j; irqreturn_t irq_r_flag=0; P_SERIAL_DEV *dev = (P_SERIAL_DEV *)p_dev; char lk_buff[HW_FIFO_SIZE]; irq_r_flag |= IRQ_NONE; printk("\n\n>in irqreturn_t...!\n"); if(inb(base_addr+0x05) & 0x20) //comparing for Tx interrupt { printk("\n>in irqreturn_t Tx interrupt...!\n"); outb (0x01, base_addr + 1); //IER ,disabling Tx & enabling Rx buffer interrupt ERBFI & EXBFI //copying the data into local buffer from the kfifo buffer ret_val = kfifo_out_locked(&(dev->write_kfifo),lk_buff,HW_FIFO_SIZE, &(dev->wr_spinlock)); printk("\n>in irqreturn_t Tx interrupt ret_val = kfifo_out_locked %d...!\n", ret_val); if(ret_val > 0) { for(i = 0; i < ret_val ; i++) outb(lk_buff[i], base_addr+0x00); } wake_up_interruptible (&(dev->write_queue)); irq_r_flag |= IRQ_HANDLED; } if(inb(base_addr+0x05) & 0x01) //comparing for Rx interrupt { printk("\n>in irqreturn_t Rx interrupt...!\n"); for(j=0;j<16;j++) { if(inb(base_addr+0x05) & 0x01) { lk_buff[j] = inb(base_addr+0x00); } else break; } //copying the data from local buffer onto the kfifo buffer if(kfifo_is_full(&(dev->read_kfifo))) dropped += j; else { kfifo_in_locked(&(dev->read_kfifo),lk_buff,j,&(dev->rd_spinlock)); received += j; } wake_up_interruptible(&(dev->read_queue)); irq_r_flag |= IRQ_HANDLED; } return irq_r_flag; }
/* * uart_device_write() - used for writting to the device buffer * from the userspace buffer */ static ssize_t uart_device_write(struct file *file, const char __user *user_buffer, size_t size, loff_t *offset) { int bytes; unsigned char byte; size_t wb_free; size_t to_write; size_t wb_len; unsigned long flags; struct uart_device_data *dev_data = (struct uart_device_data *)file->private_data; /* * no dev_data available right now, try again later */ wb_len = kfifo_len(&dev_data->write_buffer); if (file->f_flags & O_NONBLOCK) { if (wb_len == BUFFER_SIZE) return -EAGAIN; } else { /* * wait until there is available space in write buffer */ if (wait_event_interruptible(dev_data->write_wq, !kfifo_is_full (&dev_data->write_buffer))) { return -ERESTARTSYS; } } spin_lock_irqsave(&dev_data->write_lock, flags); /* * don't write more than the write_buffer available space */ wb_len = kfifo_len(&dev_data->write_buffer); wb_free = BUFFER_SIZE - wb_len; if (size > wb_free) to_write = wb_free; else to_write = size; /* * copy to device buffer */ bytes = 0; while (bytes < to_write) { if (get_user(byte, &user_buffer[bytes])) { spin_unlock_irqrestore(&dev_data->write_lock, flags); return -EFAULT; } kfifo_put(&dev_data->write_buffer, byte); bytes++; } spin_unlock_irqrestore(&dev_data->write_lock, flags); /* * enable Transmitter Holding Register Empty Interrupt */ byte = inb(dev_data->base_addr + IER) | (0x2); outb(byte, dev_data->base_addr + IER); return size; }
static bool can_write(struct kfifo *queue){ return !kfifo_is_full(queue);}
irqreturn_t interrupt_handler(int irq_no, void *data) { int device_status; uint32_t device_port = 0x0; /* * TODO: Write the code that handles a hardware interrupt. * TODO: Populate device_port with the port of the correct device. */ int ret = IRQ_HANDLED; if(irq_no == COM1_IRQ) { device_port = COM1_BASEPORT; } else if(irq_no == COM2_IRQ) { device_port = COM2_BASEPORT; } if(device_port) { disable_irq(irq_no); device_status = uart16550_hw_get_device_status(device_port); struct task_struct **task_user_get_data = ftask_user_get_data(data); struct task_struct **task_user_push_data = ftask_user_push_data(data); struct kfifo * data_from_user = fdata_from_user (data); struct kfifo * data_from_device = fdata_from_device (data); while (uart16550_hw_device_can_send(device_status) && !kfifo_is_empty(data_from_user)) { uint8_t byte_value; /* * TODO: Populate byte_value with the next value * from the kernel device outgoing buffer. */ kfifo_get(data_from_user,&byte_value); if(*task_user_push_data) wake_up_process(*task_user_push_data); uart16550_hw_write_to_device(device_port, byte_value); device_status = uart16550_hw_get_device_status(device_port); } while (uart16550_hw_device_has_data(device_status) && !kfifo_is_full(data_from_device)) { uint8_t byte_value; byte_value = uart16550_hw_read_from_device(device_port); /* * TODO: Store the read byte_value in the kernel device * incoming buffer. */ kfifo_put(data_from_device,byte_value); if(*task_user_get_data) wake_up_process(*task_user_get_data); device_status = uart16550_hw_get_device_status(device_port); } enable_irq(irq_no); } else { ret = -1; } return ret; }