/**
 * TCP/IP Packet을 KFIFO에 저장된 데이터를 read_sense2라는 fifo파일에 삽입하는 쓰레드 함수.
 */
static int thread_loop(void *data)
{
	int i;
	char cval[SIZE];
	
	printk(KERN_INFO "stauts = %d\n", kthread_should_stop());
	while(!kthread_should_stop()) {
		//udelay((unsigned long) 10000);
		msleep(1);
		
		if(flag == 1) {
			msleep(3500);
			flag = 0;
			continue;
		}

		memset(cval, 0, sizeof(cval));
		if(get_kfifo(cval) == 0) {
			continue;
		}
		printk(KERN_INFO "out other %s\n", cval);
		printk(KERN_INFO "queue available  : %u\n", kfifo_avail(&fifo));
		send_myfifo("read_sense", cval);
	}
	printk(KERN_INFO "queue available : %u\n", kfifo_avail(&fifo));
	printk(KERN_INFO "thread_end\n");

	return 0;
}
/**
 * Kernel Queue를 2개를 생성하여 2개를 각각의 쓰레드로 실행시키는 함수.
 */
int __init init_fifo_test(void)
{
	unsigned int i;
	
	printk("fifo start\n");
	filp1 = filp_open("/tmp/read_sense", O_WRONLY, S_IRUSR|S_IWUSR);
	if (IS_ERR(filp1)) {
		const int open_errno = -PTR_ERR(filp1);
        printk("fifo1 open error: %s\n errno=%d", __FUNCTION__, open_errno);
		return 0;
	} else {
		printk("fifo1 open success\n");
	}

	filp = filp_open("/tmp/read_sense2", O_WRONLY, S_IRUSR|S_IWUSR);
	if (IS_ERR(filp)) {
		const int open_errno = -PTR_ERR(filp);
        printk("fifo open error: %s\n errno=%d", __FUNCTION__, open_errno);
		return 0;
	} else {
		printk("fifo open success\n");
	}
	
	printk("fifo1 module insert-----------\n");
	if( kfifo_alloc(&fifo1, FIFOSIZE, GFP_KERNEL) ) {
		printk(KERN_WARNING "fifo1 error kfifo_alloc1 \n");
		return -ENOMEM;
	}

	printk("fifo module insert-----------\n");
	if( kfifo_alloc(&fifo, FIFOSIZE, GFP_KERNEL) ) {
		printk(KERN_WARNING "fifo error kfifo_alloc \n");
		return -ENOMEM;
	}

	printk(KERN_INFO "queue1 size : %u \n", kfifo_size(&fifo1));
	printk(KERN_INFO "queue size : %u \n", kfifo_size(&fifo));

	printk(KERN_INFO "queue1 available : %u\n", kfifo_avail(&fifo1));
	printk(KERN_INFO "queue available : %u\n", kfifo_avail(&fifo));

#if 1
	printk(KERN_INFO "thread_start1\n");
	t_id1 = (struct task_struct *)kthread_run(thread_loop1, NULL, "%s", "create_test1");
	
	printk(KERN_INFO "thread_start\n");
	t_id = (struct task_struct *)kthread_run(thread_loop, NULL, "%s", "create_test");
#endif
	return 0;

}
Esempio n. 3
0
/* Placing data into the read FIFO is done through sysfs */
static ssize_t sys_add_to_fifo(struct device* dev, struct device_attribute* attr, const char* buf, size_t count)
{
	unsigned int copied;

	dbg("");
	if (kfifo_avail(&parrot_msg_fifo) < count) {
		warn("not enough space left on fifo\n");
		return -ENOSPC;
	}
	if ((parrot_msg_idx_wr+1)%PARROT_MSG_FIFO_MAX == parrot_msg_idx_rd) {
		/* We've looped into our message length table */
		warn("message length table is full\n");
		return -ENOSPC;
	}

	/* The buffer is already in kernel space, so no need for ..._from_user() */
	copied = kfifo_in(&parrot_msg_fifo, buf, count);
	parrot_msg_len[parrot_msg_idx_wr] = copied;
	if (copied != count) {
		warn("short write detected\n");
	}
	parrot_msg_idx_wr = (parrot_msg_idx_wr+1)%PARROT_MSG_FIFO_MAX;

	return copied;
}
Esempio n. 4
0
int usb_serial_generic_write_room(struct tty_struct *tty)
{
	struct usb_serial_port *port = tty->driver_data;
	struct usb_serial *serial = port->serial;
	unsigned long flags;
	int room = 0;

	dbg("%s - port %d", __func__, port->number);

	if (!port->bulk_out_size)
		return 0;

	spin_lock_irqsave(&port->lock, flags);
	if (serial->type->max_in_flight_urbs) {
		if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
			room = port->bulk_out_size *
				(serial->type->max_in_flight_urbs -
				 port->urbs_in_flight);
	} else {
		room = kfifo_avail(&port->write_fifo);
	}
	spin_unlock_irqrestore(&port->lock, flags);

	dbg("%s - returns %d", __func__, room);
	return room;
}
Esempio n. 5
0
unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize)
{
	if (recsize == 0)
		return kfifo_avail(fifo);

	return __kfifo_peek_n(fifo, recsize);
}
Esempio n. 6
0
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
	struct omap_mbox_queue *mq = mbox->txq;
	int ret = 0, len;

	spin_lock_bh(&mq->lock);

	if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
		ret = -ENOMEM;
		goto out;
	}

	if (kfifo_is_empty(&mq->fifo) && !__mbox_poll_for_space(mbox)) {
		mbox_fifo_write(mbox, msg);
		goto out;
	}

	len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
	WARN_ON(len != sizeof(msg));

	tasklet_schedule(&mbox->txq->tasklet);

out:
	spin_unlock_bh(&mq->lock);
	return ret;
}
Esempio n. 7
0
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
	struct omap_mbox_queue *mq = mbox->rxq;
	mbox_msg_t msg;
	int len;

	while (!mbox_fifo_empty(mbox)) {
		if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
			omap_mbox_disable_irq(mbox, IRQ_RX);
			mq->full = true;
			goto nomem;
		}

		msg = mbox_fifo_read(mbox);

		len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
		WARN_ON(len != sizeof(msg));

		if (mbox->ops->type == OMAP_MBOX_TYPE1)
			break;
	}

	/* no more messages in the fifo. clear IRQ source. */
	ack_mbox_irq(mbox, IRQ_RX);
nomem:
	schedule_work(&mbox->rxq->work);
}
Esempio n. 8
0
static long rpmsg_dev_ioctl(struct file *filp, unsigned int cmd,
                            unsigned long arg)
{
    unsigned int tmp;
    struct _rpmsg_device *_prpmsg_device = (struct _rpmsg_device *)filp->private_data;
    struct _rpmsg_params *local = ( struct _rpmsg_params *)&_prpmsg_device->rpmsg_params;

    switch (cmd)
    {
    case IOCTL_CMD_GET_KFIFO_SIZE:
        tmp = kfifo_size(&local->rpmsg_kfifo);
        if (copy_to_user((unsigned int *)arg, &tmp, sizeof(int)))
            return -EACCES;
        break;

    case IOCTL_CMD_GET_AVAIL_DATA_SIZE:
        tmp = kfifo_len(&local->rpmsg_kfifo);
        pr_info("kfifo len ioctl = %d ", kfifo_len(&local->rpmsg_kfifo));
        if (copy_to_user((unsigned int *)arg, &tmp, sizeof(int)))
            return -EACCES;
        break;
    case IOCTL_CMD_GET_FREE_BUFF_SIZE:
        tmp = kfifo_avail(&local->rpmsg_kfifo);
        if (copy_to_user((unsigned int *)arg, &tmp, sizeof(int)))
            return -EACCES;
        break;

    default:
        return -EINVAL;
    }

    return 0;
}
Esempio n. 9
0
/**
 * kfifo_from_user - puts some data from user space into the FIFO
 * @fifo: the fifo to be used.
 * @from: pointer to the data to be added.
 * @len: the length of the data to be added.
 *
 * This function copies at most @len bytes from the @from into the
 * FIFO depending and returns the number of copied bytes.
 *
 * Note that with only one concurrent reader and one concurrent
 * writer, you don't need extra locking to use these functions.
 */
unsigned int kfifo_from_user(struct kfifo *fifo,
	const void __user *from, unsigned int len)
{
	len = min(kfifo_avail(fifo), len);
	len -= __kfifo_from_user_data(fifo, from, len, 0);
	__kfifo_add_in(fifo, len);
	return len;
}
Esempio n. 10
0
/**
 * kfifo_in - puts some data into the FIFO
 * @fifo: the fifo to be used.
 * @from: the data to be added.
 * @len: the length of the data to be added.
 *
 * This function copies at most @len bytes from the @from buffer into
 * the FIFO depending on the free space, and returns the number of
 * bytes copied.
 *
 * Note that with only one concurrent reader and one concurrent
 * writer, you don't need extra locking to use these functions.
 */
unsigned int kfifo_in(struct kfifo *fifo, const void *from, unsigned int len)
{
	len = min(kfifo_avail(fifo), len);

	kfifo_in_data(fifo, from, len, 0);
	kfifo_add_in(fifo, len);
	return len;
}
Esempio n. 11
0
unsigned int __kfifo_from_user_n(struct kfifo *fifo,
	const void __user *from, unsigned int len, unsigned int recsize)
{
	if (kfifo_avail(fifo) < len + recsize)
		return len + 1;

	return __kfifo_from_user_data(fifo, from, len, recsize);
}
Esempio n. 12
0
unsigned int __kfifo_in_n(struct kfifo *fifo,
	const void *from, unsigned int len, unsigned int recsize)
{
	if (kfifo_avail(fifo) < len + recsize)
		return len + 1;

	__kfifo_in_data(fifo, from, len, recsize);
	return 0;
}
Esempio n. 13
0
/*****************************************************************************
* FUNCTION
*  btif_tx_irq_handler
* DESCRIPTION
*  lower level tx interrupt handler 
* PARAMETERS
* p_base   [IN]        BTIF module's base address
* p_buf     [IN/OUT] pointer to rx data buffer
* max_len  [IN]        max length of rx buffer
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
static int btif_tx_irq_handler (P_MTK_BTIF_INFO_STR p_btif)
{
    int i_ret = -1;
#if NEW_TX_HANDLING_SUPPORT
    int how_many = 0;
    unsigned int lsr;
	unsigned int ava_len = 0;
	unsigned int base = p_btif->base;
	char local_buf[BTIF_TX_FIFO_SIZE];
	char *p_data = local_buf;
	unsigned long flag = 0;
	
    struct kfifo *p_tx_fifo = p_btif->p_tx_fifo;
	
	/*read LSR and check THER or TEMT, either one is 1 means can accept tx data*/
	lsr = BTIF_READ32(BTIF_LSR(base));
	
	if (lsr & BTIF_LSR_TEMT_BIT)
	{
		/*Tx Holding Register if empty, which means we can write tx FIFO count to BTIF*/
		ava_len = BTIF_TX_FIFO_SIZE;
	}
	else if (lsr & BTIF_LSR_THRE_BIT)
	{
		/*Tx Holding Register if empty, which means we can write (Tx FIFO count - Tx threshold)to BTIF*/
		ava_len = BTIF_TX_FIFO_SIZE - BTIF_TX_FIFO_THRE;
	}else
	{
		/*this means data size in tx FIFO is more than Tx threshold, we will not write data to THR*/
		ava_len = 0;
		goto ret;
	}
	spin_lock_irqsave(&(p_btif->tx_fifo_spinlock), flag);
	how_many = kfifo_out(p_tx_fifo, local_buf, ava_len);
	spin_unlock_irqrestore(&(p_btif->tx_fifo_spinlock), flag);
	BTIF_DBG_FUNC("BTIF tx size %d done, left:%d\n", how_many, kfifo_avail(p_tx_fifo));
    while (how_many--)
    {
        btif_reg_sync_writeb(*(p_data++), BTIF_THR(base));
    }
	
	spin_lock_irqsave(&(p_btif->tx_fifo_spinlock), flag);
	/*clear Tx enable flag if necessary*/
	if (kfifo_is_empty(p_tx_fifo)){
        hal_btif_tx_ier_ctrl(p_btif, false);
		BTIF_DBG_FUNC("BTIF tx FIFO is empty\n");
	}
	spin_unlock_irqrestore(&(p_btif->tx_fifo_spinlock), flag);
ret:
#else
    /*clear Tx enable flag*/
    hal_btif_tx_ier_ctrl(p_btif, false);
#endif
    i_ret = 0;
    return i_ret;
}
/**
 * KFIFO에 TCP/IP데이터를 삽입하는 함수.(심볼로 선언해서 다른 Kernel Module에서 호출하여 사용함)
 */
int set_kfifo(char *msg)
{
	//printk("get msg = %s\n", msg);
	if(kfifo_avail(&fifo) < SIZE)
		return 0;

	kfifo_in(&fifo, msg, SIZE);

	return 1;
}
Esempio n. 15
0
/*
 * Calculate how much is left in device
 * This method is called by the upper tty layer.
 *   #according to sources N_TTY.c it expects a value >= 0 and
 *    does not check for negative values.
 *
 * If the port is unplugged report lots of room and let the bits
 * dribble away so we don't block anything.
 */
static int ntty_write_room(struct tty_struct *tty)
{
	struct port *port = tty->driver_data;
	int room = 4096;
	const struct nozomi *dc = get_dc_by_tty(tty);

	if (dc)
		room = kfifo_avail(&port->fifo_ul);

	return room;
}
Esempio n. 16
0
ssize_t prod_write(struct file *filp, const char __user *buf, size_t count,
		loff_t *f_pos)
{
	int ret;
	int copied;
	pr_info("%s() : FIFO size = %d, count = %d\n", __func__,
			(int)kfifo_len(&fifo), (int)count);
	if (down_interruptible(&prod_sem))
		return -ERESTARTSYS;
	while ((int)kfifo_avail(&fifo) <= 0) { /* full */
		up(&prod_sem);
		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;
		pr_info("%s() : \"%s\" going to sleep\n", __func__,
				current->comm);
		if (wait_event_interruptible(prod_que,\
					(((int)kfifo_avail(&fifo)) > 0))) {
			pr_info("%s() wait_event_interruptible() : signal: "
				"tell the fs layer to handle it\n", __func__);
			return -ERESTARTSYS;
			/* signal: inform the fs layer to handle it */
		}
		if (down_interruptible(&prod_sem))
			return -ERESTARTSYS;
		pr_info("%s() : \"%s\" waken from sleep\n", __func__,
				current->comm);
	}
	count = min((int)count, (int)kfifo_avail(&fifo));
	pr_info("%s() : \"%s\" data to copy = %li bytes\n",
		__func__, current->comm, (long)count);
	ret = kfifo_from_user(&fifo, buf, count, &copied);
	up(&prod_sem);
	if (ret < 0)
		return -EFAULT;
	pr_info("%s() : \"%s\" copied %d bytes.FIFO new SIZE = %d\n", __func__,
				current->comm, copied, (int)kfifo_len(&fifo));
	pr_info("%s() : \"%s\" waking up consumer processes\n", __func__,
				current->comm);
	wake_up_interruptible(&cons_que);
	return ret ? ret : copied;
}
/**
 * ARP Packet을 KFIFO에 저장된 데이터를 read_sense라는 fifo파일에 삽입하는 쓰레드 함수.
 */
static int thread_loop1(void *data)
{
	int i;
	char cval[SIZE];
	
	printk(KERN_INFO "stauts1 = %d\n", kthread_should_stop());
	while(!kthread_should_stop()) {
		//udelay((unsigned long) 10000);
		msleep(1);

		memset(cval, 0, sizeof(cval));
		if(get_kfifo1(cval) == 0) {
			continue;
		}
		printk(KERN_INFO "out arp: %s\n", cval);
		printk(KERN_INFO "queue available 1 : %u\n", kfifo_avail(&fifo1));
		send_myfifo1("read_sense", cval);
	}
	printk(KERN_INFO "queue available1 : %u\n", kfifo_avail(&fifo1));
	printk(KERN_INFO "thread_end1\n");

	return 0;
}
Esempio n. 18
0
static int nullmodem_write_room(struct tty_struct *tty)
{
	struct nullmodem_end *end = tty->driver_data;
	int room = 0;

	if (tty->stopped)
	{
		dprintf("%s - #%d --> %d (tty stopped)\n", __FUNCTION__, tty->index, room);
		return 0;
	}
	room = kfifo_avail(&end->fifo);
	//dprintf("%s - #%d --> %d\n", __FUNCTION__, tty->index, room);
	return room;
}
/* Se invoca al hacer write() de entrada /proc */
static ssize_t fifoproc_write(struct file *filp, const char __user *buff, size_t len, loff_t *off){
	char kbuffer[MAX_KBUF]="";
	int escrito;

	/*if (off>0)
		return 0;*/

	if (len> MAX_ITEMS_FIFO || len> MAX_KBUF) { return -ENOSPC;}
	if (copy_from_user(kbuffer,buff,len)) { return -EFAULT;} // USAR OUT TO USER
	kbuffer[len] = '\0';
	*off += len;

	if (down_interruptible(&mtx))
		return -EINTR;

	/* Esperar hasta que haya hueco para insertar (debe haber consumidores) */
	while (kfifo_avail(&fifobuff)<len && cons_count>0){
		nr_prod_waiting++;
		up(&mtx); /* "Libera" el mutex */
		
		/* Se bloquea en la cola */
		if (down_interruptible(&sem_prod)){
			down(&mtx);
			nr_prod_waiting--;
			up(&mtx);
			
			return -EINTR;
		}
		
		/* "Adquiere" el mutex */
		if (down_interruptible(&mtx))
			return -EINTR;
	}

	/* Detectar fin de comunicación por error (consumidor cierra FIFO antes) */
	if (cons_count==0) {up(&mtx); return -EPIPE;}

	escrito = kfifo_in(&fifobuff,kbuffer,len);

	/* Despertar a posible consumidor bloqueado */
	if (nr_cons_waiting>0) {
		/* Despierta a uno de los hilos bloqueados */
		up(&sem_cons);
		nr_cons_waiting--;
	}

	up(&mtx); 
	return len;
}
static int _get_btif_tx_fifo_room(P_MTK_BTIF_INFO_STR p_btif_info)
{
    int i_ret = 0;
    unsigned long flag = 0;

    spin_lock_irqsave(&(p_btif_info->tx_fifo_spinlock), flag);
    if (NULL == p_btif_info->p_tx_fifo) {
        i_ret = 0;
    } else {
        i_ret = kfifo_avail(p_btif_info->p_tx_fifo);
    }
    spin_unlock_irqrestore(&(p_btif_info->tx_fifo_spinlock), flag);
    BTIF_DBG_FUNC("tx kfifo:0x%x, available room:%d\n", i_ret);
    return i_ret;
}
Esempio n. 21
0
/**
 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
 * @dev:	the struct rc_dev device descriptor
 * @ev:		the struct ir_raw_event descriptor of the pulse/space
 *
 * This routine (which may be called from an interrupt context) stores a
 * pulse/space duration for the raw ir decoding state machines. Pulses are
 * signalled as positive values and spaces as negative values. A zero value
 * will reset the decoding state machines.
 */
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
{
	if (!dev->raw)
		return -EINVAL;

	IR_dprintk(2, "sample: (%05dus %s)\n",
		   TO_US(ev->duration), TO_STR(ev->pulse));

	if (kfifo_avail(&dev->raw->kfifo) >= sizeof(*ev))
		kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev));
	else
		return -ENOMEM;

	return 0;
}
Esempio n. 22
0
int usb_serial_generic_write_room(struct tty_struct *tty)
{
    struct usb_serial_port *port = tty->driver_data;
    unsigned long flags;
    int room;

    if (!port->bulk_out_size)
        return 0;

    spin_lock_irqsave(&port->lock, flags);
    room = kfifo_avail(&port->write_fifo);
    spin_unlock_irqrestore(&port->lock, flags);

    dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
    return room;
}
Esempio n. 23
0
static int gs_write_room(struct tty_struct *tty)
{
	struct gs_port	*port = tty->driver_data;
	unsigned long	flags;
	int		room = 0;

	spin_lock_irqsave(&port->port_lock, flags);
	if (port->port_usb)
		room = kfifo_avail(&port->port_write_buf);
	spin_unlock_irqrestore(&port->port_lock, flags);

	pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
		port->port_num, tty, room);

	return room;
}
int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
{
	struct smux_pkt_t *send_pkt;
	unsigned long flags;
	int i;
	int ret;

	/*                  */
	send_pkt = smux_alloc_pkt();
	send_pkt->hdr = pkt_ptr->hdr;
	if (pkt_ptr->hdr.payload_len) {
		ret = smux_alloc_pkt_payload(send_pkt);
		if (ret) {
			ret = -ENOMEM;
			goto out;
		}
		memcpy(send_pkt->payload, pkt_ptr->payload,
				pkt_ptr->hdr.payload_len);
	}

	/*                                   */
	spin_lock_irqsave(&hw_fn_lock, flags);
	i = kfifo_avail(&smux_loop_pkt_fifo);
	if (i < sizeof(struct smux_pkt_t *)) {
		pr_err("%s: no space in fifo\n", __func__);
		ret = -ENOMEM;
		goto unlock;
	}

	i = kfifo_in(&smux_loop_pkt_fifo,
			&send_pkt,
			sizeof(struct smux_pkt_t *));
	if (i < 0) {
		pr_err("%s: fifo error\n", __func__);
		ret = -ENOMEM;
		goto unlock;
	}
	queue_work(smux_loopback_wq, &smux_loopback_work);
	ret = 0;

unlock:
	spin_unlock_irqrestore(&hw_fn_lock, flags);
out:
	return ret;
}
Esempio n. 25
0
static int iio_store_to_kfifo(struct iio_buffer *r,
			      u8 *data,
			      s64 timestamp)
{
	int ret;
	struct iio_kfifo *kf = iio_to_kfifo(r);
	if (kfifo_avail(&kf->kf) >= r->bytes_per_datum) {
		ret = kfifo_in(&kf->kf, data, r->bytes_per_datum);
		if (ret != r->bytes_per_datum)
			return -EBUSY;
	} else {
		return -ENOMEM;
	}
	r->stufftoread = true;
	wake_up_interruptible(&r->pollq);

	return 0;
}
Esempio n. 26
0
int demux_from_user(demux_t *dm, const char __user *buf, size_t count, struct mutex *mutex) {
        demux_chunk_t *dchunk = kmalloc(sizeof(demux_chunk_t), GFP_KERNEL);
        chunk_t *chunk;
        int ret = 0;
        demux_ent_t *ent = NULL;

        BUG_ON(dchunk == NULL);

        chunk = &dchunk->chunk;

        while(count >= sizeof(chunk_t)) {
                // the usermode program is going to operate on chunk_t structures
                // read these in
                if(copy_from_user(chunk, buf, sizeof(chunk_t))) {
                        kfree(chunk);
                        return -EFAULT;
                }                

                ret += sizeof(chunk_t);
                count -= sizeof(chunk_t);

                // the kernel fifo operates on demux_chunk_t structures
                // which include tickets
                BUG_ON(chunk->processor_id >= NUM_CHUNK_PROC);
                ent = dm->entries + chunk->processor_id;
                dchunk->ticket = alloc_next_ticket(dm, chunk->thread_id);

                while(kfifo_avail(&ent->fifo) < sizeof(demux_chunk_t))
                        cond_wait(&ent->fifo_full_cond, mutex);
                
                if (PRINT_DEBUG) printk(KERN_CRIT "pushing chunk tid=%u, ticket=%llu\n", dchunk->chunk.thread_id, dchunk->ticket);
                
                kfifo_in(&ent->fifo, dchunk, sizeof(demux_chunk_t));
                if(kfifo_len(&ent->fifo) == sizeof(demux_chunk_t))
                        cond_broadcast(&dm->next_chunk_cond);
        }

        kfree(dchunk);

        return ret;
}
Esempio n. 27
0
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
	struct omap_mbox_queue *mq = mbox->txq;
	int ret = 0, len;

	spin_lock(&mq->lock);

	if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
		ret = -ENOMEM;
		goto out;
	}

	len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
	WARN_ON(len != sizeof(msg));

	tasklet_schedule(&mbox->txq->tasklet);

out:
	spin_unlock(&mq->lock);
	return ret;
}
Esempio n. 28
0
static void rpmsg_proxy_dev_ept_cb(struct rpmsg_channel *rpdev, void *data,
                                   int len, void *priv, u32 src)
{

    struct _rpmsg_params *local = ( struct _rpmsg_params *)priv;

    while(mutex_lock_interruptible(&local->sync_lock));
    if (kfifo_avail(&local->rpmsg_kfifo) < len)
    {
        mutex_unlock(&local->sync_lock);
        return;
    }

    kfifo_in(&local->rpmsg_kfifo, data, (unsigned int)len);

    mutex_unlock(&local->sync_lock);

    /* Wake up any blocking contexts waiting for data */
    local->block_flag = 1;
    wake_up_interruptible(&local->usr_wait_q);

}
static inline bool have_buffer_to_read(void)
{
        return (kfifo_avail(&s_mipc_rx_cache_kfifo) >= MAX_MIPC_RX_FRAME_SIZE);
}
Esempio n. 30
0
static irqreturn_t rk3190_mbox_interrupt(int irq, void *p)
{
	struct rk3190_mbox *pmb = (struct rk3190_mbox *)p;
	unsigned long flags;
	spin_lock_irqsave(&pmb->lock, flags);
	while (1) { 
			unsigned long status, clear = 0;
			u32 msg_array[MBOX_CHAN_NUM] = {0};
			int msg_num = 0;
			int ch = 0;
	
			// get IRQ status
			status = rk3190_mbox_readl(pmb, MBOX_INTSTAT) & MBOX_CHAN_MASKBITS;
	
			if (status == 0) {
				break;
			}
	
			do {
				if (status & 0x1) {
					u32 seqno;
					u32 index;
	
					seqno = mbox_chan_read_data(pmb, ch);
					index = seqno - pmb->recv_seqno;
	
					if (index < MBOX_CHAN_NUM) {
						msg_array[index] = mbox_chan_read_cmd(pmb, ch);
						msg_num++;
	
					} else {
						WARN(1, "Mailbox Recieve Message sequence number error!\n");
						//reset seqno 
						pmb->send_seqno = 0;
						pmb->recv_seqno = seqno;
						index = seqno - pmb->recv_seqno;
						msg_array[index] = mbox_chan_read_cmd(pmb, ch);
						msg_num++;
					}
	
					clear |= (1 << ch); 
				}
	
				status >>= 1;
				ch++;
	
			} while (status);
	
			// update in sequence No.
			pmb->recv_seqno += msg_num;
	
			// notice : make sure clear IRQ status after data/cmd fetched.
			rk3190_mbox_writel(pmb, MBOX_INTSTAT, clear);
	
			//IPC_DBG(DEBUG_INFO,"%s %d status=%d clear=%d\n",__func__,__LINE__,status,clear);
			if (msg_num > 0) {
				int i;
	
				for (i=0; i<msg_num; i++) {
					// check soft FIFO full
					if (kfifo_avail(&pmb->in_fifo) < sizeof(u32)) {
						WARN(1, "Mailbox Receive FIFO full!\n");
						break;
					}
	
					kfifo_in(&pmb->in_fifo, (unsigned char*)&msg_array[i], sizeof(u32));
				}
			}
		}
	queue_work(pmb->mboxd, &pmb->work);
	spin_unlock_irqrestore(&pmb->lock, flags);
	return IRQ_HANDLED;
}