static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
					   struct dj_report *dj_report)
{
	/* We are called from atomic context (tasklet && djrcv->lock held) */

	kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));

	if (schedule_work(&djrcv_dev->work) == 0) {
		dbg_hid("%s: did not schedule the work item, was already "
			"queued\n", __func__);
	}
}
static int __init testfunc(void)
{
	int		buf[6];
	int		i, j;
	unsigned int	ret;

	printk(KERN_INFO "int fifo test start\n");

	/* put values into the fifo */
	for (i = 0; i != 10; i++)
		kfifo_put(&test, &i);

	/* show the number of used elements */
	printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test));

	/* get max of 2 elements from the fifo */
	ret = kfifo_out(&test, buf, 2);
	printk(KERN_INFO "ret: %d\n", ret);
	/* and put it back to the end of the fifo */
	ret = kfifo_in(&test, buf, ret);
	printk(KERN_INFO "ret: %d\n", ret);

	/* skip first element of the fifo */
	printk(KERN_INFO "skip 1st element\n");
	kfifo_skip(&test);

	/* put values into the fifo until is full */
	for (i = 20; kfifo_put(&test, &i); i++)
		;

	printk(KERN_INFO "queue len: %u\n", kfifo_len(&test));

	/* show the first value without removing from the fifo */
	if (kfifo_peek(&test, &i))
		printk(KERN_INFO "%d\n", i);

	/* check the correctness of all values in the fifo */
	j = 0;
	while (kfifo_get(&test, &i)) {
		printk(KERN_INFO "item = %d\n", i);
		if (i != expected_result[j++]) {
			printk(KERN_WARNING "value mismatch: test failed\n");
			return -EIO;
		}
	}
	if (j != ARRAY_SIZE(expected_result)) {
		printk(KERN_WARNING "size mismatch: test failed\n");
		return -EIO;
	}
	printk(KERN_INFO "test passed\n");

	return 0;
}
/**
 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
 * @input_dev:	the struct input_dev device descriptor
 * @ev:		the struct ir_raw_event descriptor of the pulse/space
 *
 * This routine (which may be called from an interrupt context) stores a
 * pulse/space duration for the raw ir decoding state machines. Pulses are
 * signalled as positive values and spaces as negative values. A zero value
 * will reset the decoding state machines.
 */
int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
{
	struct ir_input_dev *ir = input_get_drvdata(input_dev);

	if (!ir->raw)
		return -EINVAL;

	if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
		return -ENOMEM;

	return 0;
}
Example #4
0
static int iio_store_to_kfifo(struct iio_buffer *r,
			      const void *data)
{
	int ret;
	struct iio_kfifo *kf = iio_to_kfifo(r);
	ret = kfifo_in(&kf->kf, data, 1);
	if (ret != 1)
		return -EBUSY;

	wake_up_interruptible_poll(&r->pollq, POLLIN | POLLRDNORM);

	return 0;
}
static int iio_store_to_kfifo(struct iio_buffer *r,
			      u8 *data,
			      s64 timestamp)
{
	int ret;
	struct iio_kfifo *kf = iio_to_kfifo(r);
	ret = kfifo_in(&kf->kf, data, r->bytes_per_datum);
	if (ret != r->bytes_per_datum)
		return -EBUSY;
	r->stufftoread = true;
	wake_up_interruptible(&r->pollq);
	return 0;
}
static void schedule_delayed_hidpp_init(struct hidpp_device *hidpp_dev)
{
	enum delayed_work_type work_type = HIDPP_INIT;

	kfifo_in(&hidpp_dev->delayed_work_fifo, &work_type,
				sizeof(enum delayed_work_type));

	if (schedule_work(&hidpp_dev->work) == 0) {
		dbg_hid("%s: did not schedule the work item,"
			" was already queued\n",
			__func__);
	}
}
Example #7
0
File: ir-raw.c Project: 7799/linux
/**
 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
 * @dev:	the struct rc_dev device descriptor
 * @ev:		the struct ir_raw_event descriptor of the pulse/space
 *
 * This routine (which may be called from an interrupt context) stores a
 * pulse/space duration for the raw ir decoding state machines. Pulses are
 * signalled as positive values and spaces as negative values. A zero value
 * will reset the decoding state machines.
 */
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
{
	if (!dev->raw)
		return -EINVAL;

	IR_dprintk(2, "sample: (%05dus %s)\n",
		   TO_US(ev->duration), TO_STR(ev->pulse));

	if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
		return -ENOMEM;

	return 0;
}
static int ssp_sensorhub_list(struct ssp_sensorhub_data *hub_data,
				char *dataframe, int length)
{
	struct sensorhub_event *event;
	int ret = 0;

	if (unlikely(length <= 0 || length >= PAGE_SIZE)) {
		sensorhub_err("library length err(%d)", length);
		return -EINVAL;
	}

	ssp_sensorhub_log(__func__, dataframe, length);

	/* overwrite new event if list is full */
	if (unlikely(kfifo_is_full(&hub_data->fifo))) {
		ret = kfifo_out(&hub_data->fifo, &event, sizeof(void *));
		if (unlikely(ret != sizeof(void *))) {
			sensorhub_err("kfifo out err(%d)", ret);
			return -EIO;
		}
		sensorhub_info("overwrite event");
	}

	/* allocate memory for new event */
	kfree(hub_data->events[hub_data->event_number].library_data);
	hub_data->events[hub_data->event_number].library_data
		= kzalloc(length * sizeof(char), GFP_ATOMIC);
	if (unlikely(!hub_data->events[hub_data->event_number].library_data)) {
		sensorhub_err("allocate memory for library err");
		return -ENOMEM;
	}

	/* copy new event into memory */
	memcpy(hub_data->events[hub_data->event_number].library_data,
		dataframe, length);
	hub_data->events[hub_data->event_number].library_length = length;

	/* add new event into the end of list */
	event = &hub_data->events[hub_data->event_number];
	ret = kfifo_in(&hub_data->fifo, &event, sizeof(void *));
	if (unlikely(ret != sizeof(void *))) {
		sensorhub_err("kfifo in err(%d)", ret);
		return -EIO;
	}

	/* not to overflow max list capacity */
	if (hub_data->event_number++ >= LIST_SIZE - 1)
		hub_data->event_number = 0;

	return kfifo_len(&hub_data->fifo) / sizeof(void *);
}
Example #9
0
static int push_data(emd_dev_client_t *client, int data)
{
    int size, ret;
    if(kfifo_is_full(&client->fifo)){
        ret=-ENOMEM;
        EMD_MSG_INF("chr","sub_dev%d kfifo full\n",client->sub_dev_id);
    }else{
        EMD_MSG_INF("chr","push data=0x%08x into sub_dev%d kfifo\n",data,client->sub_dev_id);
        size=kfifo_in(&client->fifo,&data,sizeof(int));
        WARN_ON(size!=sizeof(int));
        ret=sizeof(int);
    }
    return ret;
}
Example #10
0
/**
 * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
 */
irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->indio_dev;
	struct inv_mpu6050_state *st = iio_priv(indio_dev);
	s64 timestamp;

	timestamp = iio_get_time_ns();
	spin_lock(&st->time_stamp_lock);
	kfifo_in(&st->timestamps, &timestamp, 1);
	spin_unlock(&st->time_stamp_lock);

	return IRQ_WAKE_THREAD;
}
Example #11
0
/**
 * iscsi_tcp_cleanup_task - free tcp_task resources
 * @task: iscsi task
 *
 * must be called with session lock
 */
void iscsi_tcp_cleanup_task(struct iscsi_task *task)
{
	struct iscsi_tcp_task *tcp_task = task->dd_data;
	struct iscsi_r2t_info *r2t;

	/* nothing to do for mgmt */
	if (!task->sc)
		return;

	/* flush task's r2t queues */
	while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
			    sizeof(void*));
		ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
	}

	r2t = tcp_task->r2t;
	if (r2t != NULL) {
		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
			    sizeof(void*));
		tcp_task->r2t = NULL;
	}
}
/* Se invoca al hacer write() de entrada /proc */
static ssize_t fifoproc_write(struct file *filp, const char __user *buff, size_t len, loff_t *off){
	char kbuffer[MAX_KBUF]="";
	int escrito;

	/*if (off>0)
		return 0;*/

	if (len> MAX_ITEMS_FIFO || len> MAX_KBUF) { return -ENOSPC;}
	if (copy_from_user(kbuffer,buff,len)) { return -EFAULT;} // USAR OUT TO USER
	kbuffer[len] = '\0';
	*off += len;

	if (down_interruptible(&mtx))
		return -EINTR;

	/* Esperar hasta que haya hueco para insertar (debe haber consumidores) */
	while (kfifo_avail(&fifobuff)<len && cons_count>0){
		nr_prod_waiting++;
		up(&mtx); /* "Libera" el mutex */
		
		/* Se bloquea en la cola */
		if (down_interruptible(&sem_prod)){
			down(&mtx);
			nr_prod_waiting--;
			up(&mtx);
			
			return -EINTR;
		}
		
		/* "Adquiere" el mutex */
		if (down_interruptible(&mtx))
			return -EINTR;
	}

	/* Detectar fin de comunicación por error (consumidor cierra FIFO antes) */
	if (cons_count==0) {up(&mtx); return -EPIPE;}

	escrito = kfifo_in(&fifobuff,kbuffer,len);

	/* Despertar a posible consumidor bloqueado */
	if (nr_cons_waiting>0) {
		/* Despierta a uno de los hilos bloqueados */
		up(&sem_cons);
		nr_cons_waiting--;
	}

	up(&mtx); 
	return len;
}
Example #13
0
static int __init queue_init(void)
{
	int          i;
	unsigned int ret;
	unsigned int val;

	printk(KERN_INFO "FIFO start\n");

	if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
		printk(KERN_WARNING "error kfifo\n");
		return -ENOMEM;
	}

	printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));

	kfifo_in(&fifo, "test", 4);

	for (i = 0; i < 4; i++)
		kfifo_in(&fifo, &i, sizeof(i));

	ret = kfifo_out(&fifo, buffer, 4);
	if (ret != 4)
		return -EINVAL;
	printk(KERN_INFO "%s\n", buffer);

	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));

	while (!kfifo_is_empty(&fifo)) {
		ret = kfifo_out(&fifo, &val, sizeof(val));
		if (ret != sizeof(val))
			return -EINVAL;
		printk(KERN_INFO "%u\n", val);
	}

	return 0;
}
Example #14
0
/**
 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
 * @input_dev:	the struct input_dev device descriptor
 * @ev:		the struct ir_raw_event descriptor of the pulse/space
 *
 * This routine (which may be called from an interrupt context) stores a
 * pulse/space duration for the raw ir decoding state machines. Pulses are
 * signalled as positive values and spaces as negative values. A zero value
 * will reset the decoding state machines.
 */
int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
{
	struct ir_input_dev *ir = input_get_drvdata(input_dev);

	if (!ir->raw)
		return -EINVAL;

	IR_dprintk(2, "sample: (05%dus %s)\n",
		TO_US(ev->duration), TO_STR(ev->pulse));

	if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
		return -ENOMEM;

	return 0;
}
Example #15
0
static int nullmodem_write(struct tty_struct *tty, const unsigned char *buffer, int count)
{
	struct nullmodem_end *end = tty->driver_data;
	int written = 0;

	if (tty->stopped)
	{
		dprintf("%s - #%d %d bytes --> 0 (tty stopped)\n", __FUNCTION__, tty->index, count);
		return 0;
	}

	written = kfifo_in(&end->fifo, buffer, count);
	//dprintf("%s - #%d %d bytes --> %d written\n", __FUNCTION__, tty->index, count, written);
	return written;
}
static void ami_work_func(struct work_struct *work)
{
	struct ami306_dev_data *pdev = container_of(work, struct ami306_dev_data, work);
	struct ami_sensor_value axis;
	int res;

	res = AMI_GetValue(pdev->handle, &axis);
	/* Copy data to the KFIFO */
	if (res == 0) {
		kfifo_in(&pdev->ebuff, &axis, 1);
		wake_up_interruptible(&pdev->waitq);
        if (mod_debug)
            printk("ami_work_func: pass(%d,%d, %d) to HAL\n", axis.mag[0], axis.mag[1], axis.mag[2]);
	}
}
Example #17
0
File: main.c Project: Lyude/linux
void lbs_queue_event(struct lbs_private *priv, u32 event)
{
	unsigned long flags;

	spin_lock_irqsave(&priv->driver_lock, flags);

	if (priv->psstate == PS_STATE_SLEEP)
		priv->psstate = PS_STATE_AWAKE;

	kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32));

	wake_up(&priv->waitq);

	spin_unlock_irqrestore(&priv->driver_lock, flags);
}
Example #18
0
static void kgdb_tty_recv(int ch)
{
	struct kgdb_nmi_tty_priv *priv;
	char c = ch;

	if (!kgdb_nmi_port || ch < 0)
		return;
	/*
	 * Can't use port->tty->driver_data as tty might be not there. Timer
	 * will check for tty and will get the ref, but here we don't have to
	 * do that, and actually, we can't: we're in NMI context, no locks are
	 * possible.
	 */
	priv = container_of(kgdb_nmi_port, struct kgdb_nmi_tty_priv, port);
	kfifo_in(&priv->fifo, &c, 1);
}
Example #19
0
//u2k_write
ssize_t  servicer_write(struct file *filp, const char __user *buffer, size_t size, loff_t *offset)
{
    int cmd ;

    if (copy_from_user( &cmd, buffer, 4))
    {
        lidbg("copy_from_user ERR\n");
    }

    spin_lock_irqsave(&fifo_k2u_lock, flags_k2u);
    kfifo_in(&u2k_fifo, &cmd, sizeof(int));
    spin_unlock_irqrestore(&fifo_k2u_lock, flags_k2u);

    complete(&u2k_com);

    return size;
}
Example #20
0
static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
{
	u32 status;
	int i;

	/*
	 * The TX_FIFO_STATUS interrupt needs special care. We should
	 * read TX_STA_FIFO but we should do it immediately as otherwise
	 * the register can overflow and we would lose status reports.
	 *
	 * Hence, read the TX_STA_FIFO register and copy all tx status
	 * reports into a kernel FIFO which is handled in the txstatus
	 * tasklet. We use a tasklet to process the tx status reports
	 * because we can schedule the tasklet multiple times (when the
	 * interrupt fires again during tx status processing).
	 *
	 * Furthermore we don't disable the TX_FIFO_STATUS
	 * interrupt here but leave it enabled so that the TX_STA_FIFO
	 * can also be read while the tx status tasklet gets executed.
	 *
	 * Since we have only one producer and one consumer we don't
	 * need to lock the kfifo.
	 */
	for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
		rt2x00pci_register_read(rt2x00dev, TX_STA_FIFO, &status);

		if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
			break;

		if (kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
			WARNING(rt2x00dev, "TX status FIFO overrun,"
				" drop tx status report.\n");
			break;
		}

		if (kfifo_in(&rt2x00dev->txstatus_fifo, &status,
			     sizeof(status)) != sizeof(status)) {
			WARNING(rt2x00dev, "TX status FIFO overrun,"
				"drop tx status report.\n");
			break;
		}
	}

	/* Schedule the tasklet for processing the tx status. */
	tasklet_schedule(&rt2x00dev->txstatus_tasklet);
}
Example #21
0
void k2u_write(int cmd)
{

    if(cmd != SERVICER_DONOTHING)
    {
        lidbg ("k2u_write=%d\n", cmd);

        spin_lock_irqsave(&fifo_k2u_lock, flags_k2u);
        kfifo_in(&k2u_fifo, &cmd, sizeof(int));
        spin_unlock_irqrestore(&fifo_k2u_lock, flags_k2u);

        wake_up(&k2u_wait);

        if (fasync_queue)
            kill_fasync(&fasync_queue, SIGIO, POLL_IN);
    }
}
int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
{
	struct smux_pkt_t *send_pkt;
	unsigned long flags;
	int i;
	int ret;

	/*                  */
	send_pkt = smux_alloc_pkt();
	send_pkt->hdr = pkt_ptr->hdr;
	if (pkt_ptr->hdr.payload_len) {
		ret = smux_alloc_pkt_payload(send_pkt);
		if (ret) {
			ret = -ENOMEM;
			goto out;
		}
		memcpy(send_pkt->payload, pkt_ptr->payload,
				pkt_ptr->hdr.payload_len);
	}

	/*                                   */
	spin_lock_irqsave(&hw_fn_lock, flags);
	i = kfifo_avail(&smux_loop_pkt_fifo);
	if (i < sizeof(struct smux_pkt_t *)) {
		pr_err("%s: no space in fifo\n", __func__);
		ret = -ENOMEM;
		goto unlock;
	}

	i = kfifo_in(&smux_loop_pkt_fifo,
			&send_pkt,
			sizeof(struct smux_pkt_t *));
	if (i < 0) {
		pr_err("%s: fifo error\n", __func__);
		ret = -ENOMEM;
		goto unlock;
	}
	queue_work(smux_loopback_wq, &smux_loopback_work);
	ret = 0;

unlock:
	spin_unlock_irqrestore(&hw_fn_lock, flags);
out:
	return ret;
}
Example #23
0
static void apb_log_get(struct es2_ap_dev *es2, char *buf)
{
	int retval;

	/* SVC messages go down our control pipe */
	do {
		retval = usb_control_msg(es2->usb_dev,
					usb_rcvctrlpipe(es2->usb_dev, 0),
					GB_APB_REQUEST_LOG,
					USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
					0x00, 0x00,
					buf,
					APB1_LOG_MSG_SIZE,
					ES2_TIMEOUT);
		if (retval > 0)
			kfifo_in(&es2->apb_log_fifo, buf, retval);
	} while (retval > 0);
}
Example #24
0
static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
	struct gs_port	*port = tty->driver_data;
	unsigned long	flags;

	pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
			port->port_num, tty, count);

	spin_lock_irqsave(&port->port_lock, flags);
	if (count)
		count = kfifo_in(&port->port_write_buf, buf, count);
	/* treat count == 0 as flush_chars() */
	if (port->port_usb)
		gs_start_tx(port);
	spin_unlock_irqrestore(&port->port_lock, flags);

	return count;
}
static void stp_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
{
    unsigned int fifo_avail_len = LDISC_RX_FIFO_SIZE - kfifo_len(g_stp_uart_rx_fifo);
    unsigned int how_much_put = 0;
#if 0
    {
        struct timeval now;
        do_gettimeofday(&now);
        printk("[+STP][  ][R] %4d --> sec = %lu, --> usec --> %lu\n",
            count, now.tv_sec, now.tv_usec);
    }
#endif
//    write_lock(&g_stp_uart_rx_handling_lock);
    if(count > 2000){
        /*this is abnormal*/
        UART_ERR_FUNC("abnormal: buffer count = %d\n", count);
    }
    /*How much empty seat?*/
    if(fifo_avail_len > 0){
    //UART_INFO_FUNC ("fifo left(%d), count(%d)\n", fifo_avail_len, count);
        #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
        how_much_put = kfifo_put(g_stp_uart_rx_fifo,(unsigned char *) data, count);
            #else
            how_much_put = kfifo_in(g_stp_uart_rx_fifo,(unsigned char *) data, count);
            #endif

        /*schedule it!*/
        tasklet_schedule(&g_stp_uart_rx_fifo_tasklet);
    }else{
        UART_ERR_FUNC("stp_uart_tty_receive rxfifo is full!!\n");
    }

#if 0
    {
        struct timeval now;
        do_gettimeofday(&now);
        printk("[-STP][  ][R] %4d --> sec = %lu, --> usec --> %lu\n",
            count, now.tv_sec, now.tv_usec);
    }
#endif

//    write_unlock(&g_stp_uart_rx_handling_lock);

}
Example #26
0
static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
			     struct srp_buf **ring)
{
	int i;
	struct iu_entry *iue;

	q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
	if (!q->pool)
		return -ENOMEM;
	q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
	if (!q->items)
		goto free_pool;

	spin_lock_init(&q->lock);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
	q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
			      GFP_KERNEL, &q->lock);
	if (IS_ERR(q->queue))
		goto free_item;
#else
	kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *));
#endif

	for (i = 0, iue = q->items; i < max; i++) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
		__kfifo_put(q->queue, (void *) &iue, sizeof(void *));
#else
		kfifo_in(&q->queue, (void *) &iue, sizeof(void *));
#endif
		iue->sbuf = ring[i];
		iue++;
	}
	return 0;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
free_item:
#endif
	kfree(q->items);
free_pool:
	kfree(q->pool);
	return -ENOMEM;
}
Example #27
0
// eemcs_sysmsg_rx_dispatch_cb: CCCI_SYSTEM_RX message dispatch call back function for MODEM
// @skb: pointer to a CCCI buffer
// @private_data: pointer to private data of CCCI_SYSTEM_RX
KAL_INT32 eemcs_sysmsg_rx_dispatch_cb(struct sk_buff *skb, KAL_UINT32 private_data)
{
    CCCI_BUFF_T *p_cccih = NULL;

	DBGLOG(SMSG, DBG, "====> %s", FUNC_NAME);  

    if (skb){
		p_cccih = (CCCI_BUFF_T *)skb->data;
		DBGLOG(SMSG, INF, "sysmsg RX callback, msg: %08X, %08X, %02d, %08X\n", p_cccih->data[0], p_cccih->data[1], p_cccih->channel, p_cccih->reserved);

        if (p_cccih->channel == CH_SYS_TX){
		    DBGLOG(SMSG, ERR, "Wrong CH for recv");
            return KAL_FAIL;
        }

        if (kfifo_is_full(&sysmsg_fifo))
        {
		    DBGLOG(SMSG, ERR, "kfifo full and packet drop, msg: %08X, %08X, %02d, %08X\n", \
				p_cccih->data[0], p_cccih->data[1], p_cccih->channel, p_cccih->reserved);
            dev_kfree_skb(skb);
            return KAL_FAIL;
        }
        
        spin_lock_bh(&sysmsg_fifo_lock);
    	//DBGLOG(SMSG, TRA, "ready to put skb into FIFO");
    	kfifo_in(&sysmsg_fifo, &skb, sizeof(unsigned int));
    	//DBGLOG(SMSG, TRA, "after put skb into FIFO");
    	spin_unlock_bh(&sysmsg_fifo_lock);
    	DBGLOG(SMSG, DBG, "schedule sysmsg_work");
       	schedule_work(&sysmsg_work);
        
    }
    else
    {
        DBGLOG(SMSG, ERR, "skb is NULL!");
        return KAL_FAIL;
    }

    DBGLOG(SMSG, DBG, "<==== %s", FUNC_NAME);  
    return KAL_SUCCESS;
}
Example #28
0
int demux_from_user(demux_t *dm, const char __user *buf, size_t count, struct mutex *mutex) {
        demux_chunk_t *dchunk = kmalloc(sizeof(demux_chunk_t), GFP_KERNEL);
        chunk_t *chunk;
        int ret = 0;
        demux_ent_t *ent = NULL;

        BUG_ON(dchunk == NULL);

        chunk = &dchunk->chunk;

        while(count >= sizeof(chunk_t)) {
                // the usermode program is going to operate on chunk_t structures
                // read these in
                if(copy_from_user(chunk, buf, sizeof(chunk_t))) {
                        kfree(chunk);
                        return -EFAULT;
                }                

                ret += sizeof(chunk_t);
                count -= sizeof(chunk_t);

                // the kernel fifo operates on demux_chunk_t structures
                // which include tickets
                BUG_ON(chunk->processor_id >= NUM_CHUNK_PROC);
                ent = dm->entries + chunk->processor_id;
                dchunk->ticket = alloc_next_ticket(dm, chunk->thread_id);

                while(kfifo_avail(&ent->fifo) < sizeof(demux_chunk_t))
                        cond_wait(&ent->fifo_full_cond, mutex);
                
                if (PRINT_DEBUG) printk(KERN_CRIT "pushing chunk tid=%u, ticket=%llu\n", dchunk->chunk.thread_id, dchunk->ticket);
                
                kfifo_in(&ent->fifo, dchunk, sizeof(demux_chunk_t));
                if(kfifo_len(&ent->fifo) == sizeof(demux_chunk_t))
                        cond_broadcast(&dm->next_chunk_cond);
        }

        kfree(dchunk);

        return ret;
}
Example #29
0
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
	struct omap_mbox_queue *mq = mbox->txq;
	int ret = 0, len;

	spin_lock(&mq->lock);

	if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
		ret = -ENOMEM;
		goto out;
	}

	len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
	WARN_ON(len != sizeof(msg));

	tasklet_schedule(&mbox->txq->tasklet);

out:
	spin_unlock(&mq->lock);
	return ret;
}
Example #30
0
static void rpmsg_proxy_dev_ept_cb(struct rpmsg_channel *rpdev, void *data,
                                   int len, void *priv, u32 src)
{

    struct _rpmsg_params *local = ( struct _rpmsg_params *)priv;

    while(mutex_lock_interruptible(&local->sync_lock));
    if (kfifo_avail(&local->rpmsg_kfifo) < len)
    {
        mutex_unlock(&local->sync_lock);
        return;
    }

    kfifo_in(&local->rpmsg_kfifo, data, (unsigned int)len);

    mutex_unlock(&local->sync_lock);

    /* Wake up any blocking contexts waiting for data */
    local->block_flag = 1;
    wake_up_interruptible(&local->usr_wait_q);

}