int dma_configure_ring_stream(struct iio_dev *indio_dev, const char *dma_name) { struct iio_buffer *buffer; if (dma_name == NULL) dma_name = "rx"; buffer = iio_dmaengine_buffer_alloc(indio_dev->dev.parent, dma_name, &dma_buffer_ops, indio_dev); if (IS_ERR(buffer)) return PTR_ERR(buffer); indio_dev->modes |= INDIO_BUFFER_HARDWARE; iio_device_attach_buffer(indio_dev, buffer); return 0; }
static int ssp_accel_probe(struct platform_device *pdev) { int ret; struct iio_dev *indio_dev; struct ssp_sensor_data *spd; struct iio_buffer *buffer; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*spd)); if (!indio_dev) return -ENOMEM; spd = iio_priv(indio_dev); spd->process_data = ssp_process_accel_data; spd->type = SSP_ACCELEROMETER_SENSOR; indio_dev->name = ssp_accel_device_name; indio_dev->dev.parent = &pdev->dev; indio_dev->dev.of_node = pdev->dev.of_node; indio_dev->info = &ssp_accel_iio_info; indio_dev->modes = INDIO_BUFFER_SOFTWARE; indio_dev->channels = ssp_acc_channels; indio_dev->num_channels = ARRAY_SIZE(ssp_acc_channels); indio_dev->available_scan_masks = ssp_accel_scan_mask; buffer = devm_iio_kfifo_allocate(&pdev->dev); if (!buffer) return -ENOMEM; iio_device_attach_buffer(indio_dev, buffer); indio_dev->setup_ops = &ssp_accel_buffer_ops; platform_set_drvdata(pdev, indio_dev); ret = iio_device_register(indio_dev); if (ret < 0) return ret; /* ssp registering should be done after all iio setup */ ssp_register_consumer(indio_dev, SSP_ACCELEROMETER_SENSOR); return 0; }
/** * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure * @pollfunc_bh: Function which will be used as pollfunc bottom half * @pollfunc_th: Function which will be used as pollfunc top half * @setup_ops: Buffer setup functions to use for this device. * If NULL the default setup functions for triggered * buffers will be used. * * This function combines some common tasks which will normally be performed * when setting up a triggered buffer. It will allocate the buffer and the * pollfunc. * * Before calling this function the indio_dev structure should already be * completely initialized, but not yet registered. In practice this means that * this function should be called right before iio_device_register(). * * To free the resources allocated by this function call * iio_triggered_buffer_cleanup(). */ int iio_triggered_buffer_setup(struct iio_dev *indio_dev, irqreturn_t (*pollfunc_bh)(int irq, void *p), irqreturn_t (*pollfunc_th)(int irq, void *p), const struct iio_buffer_setup_ops *setup_ops) { struct iio_buffer *buffer; int ret; buffer = iio_kfifo_allocate(); if (!buffer) { ret = -ENOMEM; goto error_ret; } iio_device_attach_buffer(indio_dev, buffer); indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh, pollfunc_th, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_kfifo_free; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_kfifo_free: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int ade7758_configure_ring(struct iio_dev *indio_dev) { struct ade7758_state *st = iio_priv(indio_dev); struct iio_buffer *buffer; int ret = 0; buffer = iio_kfifo_allocate(); if (!buffer) return -ENOMEM; iio_device_attach_buffer(indio_dev, buffer); indio_dev->setup_ops = &ade7758_ring_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &ade7758_trigger_handler, 0, indio_dev, "ade7759_consumer%d", indio_dev->id); if (!indio_dev->pollfunc) { ret = -ENOMEM; goto error_iio_kfifo_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; st->tx_buf[0] = ADE7758_READ_REG(ADE7758_RSTATUS); st->tx_buf[1] = 0; st->tx_buf[2] = 0; st->tx_buf[3] = 0; st->tx_buf[4] = ADE7758_READ_REG(ADE7758_WFORM); st->tx_buf[5] = 0; st->tx_buf[6] = 0; st->tx_buf[7] = 0; /* build spi ring message */ st->ring_xfer[0].tx_buf = &st->tx_buf[0]; st->ring_xfer[0].len = 1; st->ring_xfer[0].bits_per_word = 8; st->ring_xfer[0].delay_usecs = 4; st->ring_xfer[1].rx_buf = &st->rx_buf[1]; st->ring_xfer[1].len = 3; st->ring_xfer[1].bits_per_word = 8; st->ring_xfer[1].cs_change = 1; st->ring_xfer[2].tx_buf = &st->tx_buf[4]; st->ring_xfer[2].len = 1; st->ring_xfer[2].bits_per_word = 8; st->ring_xfer[2].delay_usecs = 1; st->ring_xfer[3].rx_buf = &st->rx_buf[5]; st->ring_xfer[3].len = 3; st->ring_xfer[3].bits_per_word = 8; spi_message_init(&st->ring_msg); spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[2], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[3], &st->ring_msg); return 0; error_iio_kfifo_free: iio_kfifo_free(indio_dev->buffer); return ret; }
int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev, const struct iio_chan_spec *channels, unsigned int num_channels) { int ret; struct iio_buffer *buffer; /* Allocate a buffer to use - here a kfifo */ buffer = iio_kfifo_allocate(indio_dev); if (buffer == NULL) { ret = -ENOMEM; goto error_ret; } iio_device_attach_buffer(indio_dev, buffer); /* Enable timestamps by default */ buffer->scan_timestamp = true; /* * Tell the core what device type specific functions should * be run on either side of buffer capture enable / disable. */ indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops; /* * Configure a polling function. * When a trigger event with this polling function connected * occurs, this function is run. Typically this grabs data * from the device. * * NULL for the bottom half. This is normally implemented only if we * either want to ping a capture now pin (no sleeping) or grab * a timestamp as close as possible to a data ready trigger firing. * * IRQF_ONESHOT ensures irqs are masked such that only one instance * of the handler can run at a time. * * "iio_simple_dummy_consumer%d" formatting string for the irq 'name' * as seen under /proc/interrupts. Remaining parameters as per printk. */ indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &iio_simple_dummy_trigger_h, IRQF_ONESHOT, indio_dev, "iio_simple_dummy_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_free_buffer; } /* * Notify the core that this device is capable of buffered capture * driven by a trigger. */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; ret = iio_buffer_register(indio_dev, channels, num_channels); if (ret) goto error_dealloc_pollfunc; return 0; error_dealloc_pollfunc: iio_dealloc_pollfunc(indio_dev->pollfunc); error_free_buffer: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }