int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct ad799x_state *st = indio_dev->dev_data; int ret = 0; indio_dev->ring = iio_sw_rb_allocate(indio_dev); if (!indio_dev->ring) { ret = -ENOMEM; goto error_ret; } /* Effectively select the ring buffer implementation */ iio_ring_sw_register_funcs(&st->indio_dev->ring->access); ret = iio_alloc_pollfunc(indio_dev, NULL, &ad799x_poll_func_th); if (ret) goto error_deallocate_sw_rb; /* Ring buffer functions - here trigger setup related */ indio_dev->ring->preenable = &ad799x_ring_preenable; indio_dev->ring->postenable = &iio_triggered_ring_postenable; indio_dev->ring->predisable = &iio_triggered_ring_predisable; indio_dev->ring->scan_timestamp = true; INIT_WORK(&st->poll_work, &ad799x_poll_bh_to_ring); indio_dev->ring->scan_el_attrs = st->chip_info->scan_attrs; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->ring); error_ret: return ret; }
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev) { int ret = 0; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &ad799x_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_kfifo; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &ad799x_buf_setup_ops; indio_dev->buffer->scan_timestamp = true; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_kfifo: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int adis16201_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct iio_buffer *ring; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->buffer = ring; /* Effectively select the ring buffer implementation */ ring->bpe = 2; ring->scan_timestamp = true; ring->access = &ring_sw_access_funcs; ring->setup_ops = &adis16201_ring_setup_ops; ring->owner = THIS_MODULE; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &adis16201_trigger_handler, IRQF_ONESHOT, indio_dev, "adis16201_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->buffer); return ret; }
int adis16209_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct iio_buffer *ring; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->buffer = ring; ring->scan_timestamp = true; indio_dev->setup_ops = &adis16209_ring_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &adis16209_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->buffer); return ret; }
static int yas_probe_trigger(struct iio_dev *indio_dev) { int ret; struct yas_state *st = iio_priv(indio_dev); indio_dev->pollfunc = iio_alloc_pollfunc(&yas_iio_pollfunc_store_boottime, &yas_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_ret; } st->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name, indio_dev->id); if (!st->trig) { ret = -ENOMEM; goto error_dealloc_pollfunc; } st->trig->dev.parent = &st->client->dev; st->trig->ops = &yas_trigger_ops; iio_trigger_set_drvdata(st->trig, indio_dev); ret = iio_trigger_register(st->trig); if (ret) goto error_free_trig; return 0; error_free_trig: iio_trigger_free(st->trig); error_dealloc_pollfunc: iio_dealloc_pollfunc(indio_dev->pollfunc); error_ret: return ret; }
static int yas_probe_trigger(struct iio_dev *indio_dev) { int ret; struct yas_state *st = iio_priv(indio_dev); indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &yas_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_ret; } st->trig = iio_allocate_trigger("%s-dev%d", indio_dev->name, indio_dev->id); printk("YAMAHA allocate_trigger[%s-dev%d]\n", indio_dev->name, indio_dev->id); if (!st->trig) { ret = -ENOMEM; goto error_dealloc_pollfunc; } st->trig->dev.parent = &st->client->dev; st->trig->ops = &yas_trigger_ops; st->trig->private_data = indio_dev; ret = iio_trigger_register(st->trig); printk("YAMAHA iio_trigger_register[%s] [%d]\n", st->trig->name, ret); if (ret) goto error_free_trig; return 0; error_free_trig: iio_free_trigger(st->trig); error_dealloc_pollfunc: iio_dealloc_pollfunc(indio_dev->pollfunc); error_ret: return ret; }
int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev) { int ret; indio_dev->ring = iio_sw_rb_allocate(indio_dev); if (!indio_dev->ring) { ret = -ENOMEM; goto error_ret; } /* Effectively select the ring buffer implementation */ indio_dev->ring->access = &ring_sw_access_funcs; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &ad7887_trigger_handler, IRQF_ONESHOT, indio_dev, "ad7887_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Ring buffer functions - here trigger setup related */ indio_dev->ring->setup_ops = &ad7887_ring_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->ring); error_ret: return ret; }
static int optical_allocate_ring(struct iio_dev *indio_dev) //VungGV? { /*int ret; printk (KERN_ALERT "[%s]\n", __FUNCTION__); ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, &optical_trigger_handler, &optical_buffer_setup_ops); return ret;*/ int ret = 0; struct iio_ring_buffer *ring; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->ring = ring; /* Effectively select the ring buffer implementation */ ring->access = &ring_sw_access_funcs; ring->bpe = 2; ring->scan_timestamp = true; ring->setup_ops = &optical_buffer_setup_ops; ring->owner = THIS_MODULE; /* Set default scan mode */ iio_scan_mask_set(ring, L1PH03_SCAN_PROX_X); iio_scan_mask_set(ring, L1PH03_SCAN_LIGHT_X); //iio_scan_mask_set(ring, ADIS16260_SCAN_AUX_ADC); //iio_scan_mask_set(ring, ADIS16260_SCAN_TEMP); //iio_scan_mask_set(ring, ADIS16260_SCAN_ANGL); indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &optical_trigger_handler, IRQF_ONESHOT, indio_dev, "lt1ph03_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->ring); return ret; }
/** * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure * @pollfunc_bh: Function which will be used as pollfunc bottom half * @pollfunc_th: Function which will be used as pollfunc top half * @setup_ops: Buffer setup functions to use for this device. * If NULL the default setup functions for triggered * buffers will be used. * * This function combines some common tasks which will normally be performed * when setting up a triggered buffer. It will allocate the buffer and the * pollfunc, as well as register the buffer with the IIO core. * * Before calling this function the indio_dev structure should already be * completely initialized, but not yet registered. In practice this means that * this function should be called right before iio_device_register(). * * To free the resources allocated by this function call * iio_triggered_buffer_cleanup(). */ int iio_triggered_buffer_setup(struct iio_dev *indio_dev, irqreturn_t (*pollfunc_bh)(int irq, void *p), irqreturn_t (*pollfunc_th)(int irq, void *p), const struct iio_buffer_setup_ops *setup_ops) { int ret; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh, pollfunc_th, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_kfifo_free; } /* Ring buffer functions - here trigger setup related */ if (setup_ops) indio_dev->setup_ops = setup_ops; else indio_dev->setup_ops = &iio_triggered_buffer_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; ret = iio_buffer_register(indio_dev, indio_dev->channels, indio_dev->num_channels); if (ret) goto error_dealloc_pollfunc; return 0; error_dealloc_pollfunc: iio_dealloc_pollfunc(indio_dev->pollfunc); error_kfifo_free: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int adis16209_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct iio_ring_buffer *ring; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->ring = ring; /* Effectively select the ring buffer implementation */ ring->access = &ring_sw_access_funcs; ring->bpe = 2; ring->scan_timestamp = true; ring->setup_ops = &adis16209_ring_setup_ops; ring->owner = THIS_MODULE; /* Set default scan mode */ iio_scan_mask_set(ring, ADIS16209_SCAN_SUPPLY); iio_scan_mask_set(ring, ADIS16209_SCAN_ACC_X); iio_scan_mask_set(ring, ADIS16209_SCAN_ACC_Y); iio_scan_mask_set(ring, ADIS16209_SCAN_AUX_ADC); iio_scan_mask_set(ring, ADIS16209_SCAN_TEMP); iio_scan_mask_set(ring, ADIS16209_SCAN_INCLI_X); iio_scan_mask_set(ring, ADIS16209_SCAN_INCLI_Y); iio_scan_mask_set(ring, ADIS16209_SCAN_ROT); indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &adis16209_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->ring); return ret; }
int adis16209_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct adis16209_state *st = indio_dev->dev_data; struct iio_ring_buffer *ring; INIT_WORK(&st->work_trigger_to_ring, adis16209_trigger_bh_to_ring); /* Set default scan mode */ iio_scan_mask_set(indio_dev, iio_scan_el_supply.number); iio_scan_mask_set(indio_dev, iio_scan_el_rot.number); iio_scan_mask_set(indio_dev, iio_scan_el_accel_x.number); iio_scan_mask_set(indio_dev, iio_scan_el_accel_y.number); iio_scan_mask_set(indio_dev, iio_scan_el_temp.number); iio_scan_mask_set(indio_dev, iio_scan_el_aux_adc.number); iio_scan_mask_set(indio_dev, iio_scan_el_incli_x.number); iio_scan_mask_set(indio_dev, iio_scan_el_incli_y.number); indio_dev->scan_timestamp = true; indio_dev->scan_el_attrs = &adis16209_scan_el_group; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->ring = ring; /* Effectively select the ring buffer implementation */ iio_ring_sw_register_funcs(&ring->access); ring->bpe = 2; ring->preenable = &iio_sw_ring_preenable; ring->postenable = &iio_triggered_ring_postenable; ring->predisable = &iio_triggered_ring_predisable; ring->owner = THIS_MODULE; ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16209_poll_func_th); if (ret) goto error_iio_sw_rb_free; indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->ring); return ret; }
int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); int ret; indio_dev->buffer = iio_sw_rb_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } /* Effectively select the ring buffer implementation */ indio_dev->buffer->access = &ring_sw_access_funcs; indio_dev->buffer->bpe = st->chip_info->channels[0].scan_type.storagebits / 8; indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh, 0, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Ring buffer functions - here trigger setup related */ indio_dev->buffer->setup_ops = &ad7606_ring_setup_ops; indio_dev->buffer->scan_timestamp = true ; INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring); /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->buffer); error_ret: return ret; }
/** * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure * @pollfunc_bh: Function which will be used as pollfunc bottom half * @pollfunc_th: Function which will be used as pollfunc top half * @setup_ops: Buffer setup functions to use for this device. * If NULL the default setup functions for triggered * buffers will be used. * * This function combines some common tasks which will normally be performed * when setting up a triggered buffer. It will allocate the buffer and the * pollfunc. * * Before calling this function the indio_dev structure should already be * completely initialized, but not yet registered. In practice this means that * this function should be called right before iio_device_register(). * * To free the resources allocated by this function call * iio_triggered_buffer_cleanup(). */ int iio_triggered_buffer_setup(struct iio_dev *indio_dev, irqreturn_t (*pollfunc_bh)(int irq, void *p), irqreturn_t (*pollfunc_th)(int irq, void *p), const struct iio_buffer_setup_ops *setup_ops) { struct iio_buffer *buffer; int ret; buffer = iio_kfifo_allocate(); if (!buffer) { ret = -ENOMEM; goto error_ret; } iio_device_attach_buffer(indio_dev, buffer); indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh, pollfunc_th, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_kfifo_free; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_kfifo_free: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev) { int ret; struct iio_buffer *buffer; buffer = iio_kfifo_allocate(indio_dev); if (buffer == NULL) { ret = -ENOMEM; goto error_ret; } indio_dev->buffer = buffer; buffer->scan_timestamp = true; indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &iio_simple_dummy_trigger_h, IRQF_ONESHOT, indio_dev, "iio_simple_dummy_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_free_buffer; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_free_buffer: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); int ret; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh, 0, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_kfifo; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &ad7606_ring_setup_ops; indio_dev->buffer->scan_timestamp = true; INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring); /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_kfifo: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int lis3l02dq_configure_ring(struct iio_dev *indio_dev) { int ret; struct iio_sw_ring_helper_state *h = iio_dev_get_devdata(indio_dev); struct iio_ring_buffer *ring; INIT_WORK(&h->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring); h->get_ring_element = &lis3l02dq_get_ring_element; ring = lis3l02dq_alloc_buf(indio_dev); if (!ring) return -ENOMEM; indio_dev->ring = ring; /* Effectively select the ring buffer implementation */ lis3l02dq_register_buf_funcs(&ring->access); ring->bpe = 2; ring->scan_el_attrs = &lis3l02dq_scan_el_group; ring->scan_timestamp = true; ring->preenable = &iio_sw_ring_preenable; ring->postenable = &iio_triggered_ring_postenable; ring->predisable = &iio_triggered_ring_predisable; ring->owner = THIS_MODULE; /* Set default scan mode */ iio_scan_mask_set(ring, iio_scan_el_accel_x.number); iio_scan_mask_set(ring, iio_scan_el_accel_y.number); iio_scan_mask_set(ring, iio_scan_el_accel_z.number); ret = iio_alloc_pollfunc(indio_dev, NULL, &lis3l02dq_poll_func_th); if (ret) goto error_iio_sw_rb_free; indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_iio_sw_rb_free: lis3l02dq_free_buf(indio_dev->ring); return ret; }
int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct ad7476_state *st = iio_priv(indio_dev); int ret = 0; indio_dev->ring = iio_sw_rb_allocate(indio_dev); if (!indio_dev->ring) { ret = -ENOMEM; goto error_ret; } /* Effectively select the ring buffer implementation */ indio_dev->ring->access = &ring_sw_access_funcs; indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &ad7476_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", spi_get_device_id(st->spi)->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Ring buffer functions - here trigger setup related */ indio_dev->ring->setup_ops = &ad7476_ring_setup_ops; indio_dev->ring->scan_timestamp = true; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_RING_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->ring); error_ret: return ret; }
int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct max1363_state *st = iio_priv(indio_dev); int ret = 0; indio_dev->buffer = iio_sw_rb_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &max1363_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", st->client->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Effectively select the ring buffer implementation */ indio_dev->buffer->access = &ring_sw_access_funcs; /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &max1363_ring_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->buffer); error_ret: return ret; }
int ade7758_configure_ring(struct iio_dev *indio_dev) { struct ade7758_state *st = iio_priv(indio_dev); int ret = 0; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; return ret; } indio_dev->setup_ops = &ade7758_ring_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &ade7758_trigger_handler, 0, indio_dev, "ade7759_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_kfifo_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; st->tx_buf[0] = ADE7758_READ_REG(ADE7758_RSTATUS); st->tx_buf[1] = 0; st->tx_buf[2] = 0; st->tx_buf[3] = 0; st->tx_buf[4] = ADE7758_READ_REG(ADE7758_WFORM); st->tx_buf[5] = 0; st->tx_buf[6] = 0; st->tx_buf[7] = 0; /* build spi ring message */ st->ring_xfer[0].tx_buf = &st->tx_buf[0]; st->ring_xfer[0].len = 1; st->ring_xfer[0].bits_per_word = 8; st->ring_xfer[0].delay_usecs = 4; st->ring_xfer[1].rx_buf = &st->rx_buf[1]; st->ring_xfer[1].len = 3; st->ring_xfer[1].bits_per_word = 8; st->ring_xfer[1].cs_change = 1; st->ring_xfer[2].tx_buf = &st->tx_buf[4]; st->ring_xfer[2].len = 1; st->ring_xfer[2].bits_per_word = 8; st->ring_xfer[2].delay_usecs = 1; st->ring_xfer[3].rx_buf = &st->rx_buf[5]; st->ring_xfer[3].len = 3; st->ring_xfer[3].bits_per_word = 8; spi_message_init(&st->ring_msg); spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[2], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[3], &st->ring_msg); return 0; error_iio_kfifo_free: iio_kfifo_free(indio_dev->buffer); return ret; }
int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev) { int ret; struct iio_buffer *buffer; /* Allocate a buffer to use - here a kfifo */ buffer = iio_kfifo_allocate(indio_dev); if (buffer == NULL) { ret = -ENOMEM; goto error_ret; } indio_dev->buffer = buffer; /* Enable timestamps by default */ buffer->scan_timestamp = true; /* * Tell the core what device type specific functions should * be run on either side of buffer capture enable / disable. */ indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops; /* * Configure a polling function. * When a trigger event with this polling function connected * occurs, this function is run. Typically this grabs data * from the device. * * NULL for the top half. This is normally implemented only if we * either want to ping a capture now pin (no sleeping) or grab * a timestamp as close as possible to a data ready trigger firing. * * IRQF_ONESHOT ensures irqs are masked such that only one instance * of the handler can run at a time. * * "iio_simple_dummy_consumer%d" formatting string for the irq 'name' * as seen under /proc/interrupts. Remaining parameters as per printk. */ indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &iio_simple_dummy_trigger_h, IRQF_ONESHOT, indio_dev, "iio_simple_dummy_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_free_buffer; } /* * Notify the core that this device is capable of buffered capture * driven by a trigger. */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_free_buffer: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }