static int yas_probe_buffer(struct iio_dev *indio_dev) { int ret; struct iio_buffer *buffer; buffer = iio_kfifo_allocate(indio_dev); if (!buffer) { ret = -ENOMEM; goto error_ret; } buffer->scan_timestamp = true; indio_dev->buffer = buffer; indio_dev->setup_ops = &yas_buffer_setup_ops; indio_dev->modes |= INDIO_BUFFER_TRIGGERED; ret = iio_buffer_register(indio_dev, indio_dev->channels, indio_dev->num_channels); if (ret) goto error_free_buf; iio_scan_mask_set(indio_dev, indio_dev->buffer, YAS_SCAN_ACCEL_X); iio_scan_mask_set(indio_dev, indio_dev->buffer, YAS_SCAN_ACCEL_Y); iio_scan_mask_set(indio_dev, indio_dev->buffer, YAS_SCAN_ACCEL_Z); return 0; error_free_buf: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev) { int ret = 0; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &ad799x_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_kfifo; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &ad799x_buf_setup_ops; indio_dev->buffer->scan_timestamp = true; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_kfifo: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
static void m4pas_remove_iiodev(struct iio_dev *iio) { struct m4pas_driver_data *dd = iio_priv(iio); /* Remember, only call when dd->mutex is locked */ iio_kfifo_free(iio->buffer); iio_buffer_unregister(iio); iio_device_unregister(iio); mutex_destroy(&(dd->mutex)); iio_device_free(iio); /* dd is freed here */ return; }
/** * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure * @pollfunc_bh: Function which will be used as pollfunc bottom half * @pollfunc_th: Function which will be used as pollfunc top half * @setup_ops: Buffer setup functions to use for this device. * If NULL the default setup functions for triggered * buffers will be used. * * This function combines some common tasks which will normally be performed * when setting up a triggered buffer. It will allocate the buffer and the * pollfunc, as well as register the buffer with the IIO core. * * Before calling this function the indio_dev structure should already be * completely initialized, but not yet registered. In practice this means that * this function should be called right before iio_device_register(). * * To free the resources allocated by this function call * iio_triggered_buffer_cleanup(). */ int iio_triggered_buffer_setup(struct iio_dev *indio_dev, irqreturn_t (*pollfunc_bh)(int irq, void *p), irqreturn_t (*pollfunc_th)(int irq, void *p), const struct iio_buffer_setup_ops *setup_ops) { int ret; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh, pollfunc_th, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_kfifo_free; } /* Ring buffer functions - here trigger setup related */ if (setup_ops) indio_dev->setup_ops = setup_ops; else indio_dev->setup_ops = &iio_triggered_buffer_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; ret = iio_buffer_register(indio_dev, indio_dev->channels, indio_dev->num_channels); if (ret) goto error_dealloc_pollfunc; return 0; error_dealloc_pollfunc: iio_dealloc_pollfunc(indio_dev->pollfunc); error_kfifo_free: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
static int m4pas_create_iiodev(struct iio_dev *iio) { int err = 0; struct m4pas_driver_data *dd = iio_priv(iio); iio->name = M4PAS_DRIVER_NAME; iio->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_HARDWARE; iio->num_channels = 1; iio->info = &m4pas_iio_info; iio->channels = m4pas_iio_channels; iio->buffer = iio_kfifo_allocate(iio); if (iio->buffer == NULL) { m4pas_err("%s: Failed to allocate IIO buffer.\n", __func__); err = -ENOMEM; goto m4pas_create_iiodev_kfifo_fail; } iio->buffer->scan_timestamp = true; iio->buffer->access->set_bytes_per_datum(iio->buffer, sizeof(dd->iiodat)); err = iio_buffer_register(iio, iio->channels, iio->num_channels); if (err < 0) { m4pas_err("%s: Failed to register IIO buffer.\n", __func__); goto m4pas_create_iiodev_buffer_fail; } err = iio_device_register(iio); if (err < 0) { m4pas_err("%s: Failed to register IIO device.\n", __func__); goto m4pas_create_iiodev_iioreg_fail; } goto m4pas_create_iiodev_exit; m4pas_create_iiodev_iioreg_fail: iio_buffer_unregister(iio); m4pas_create_iiodev_buffer_fail: iio_kfifo_free(iio->buffer); m4pas_create_iiodev_kfifo_fail: iio_device_free(iio); /* dd is freed here */ m4pas_create_iiodev_exit: return err; }
/** * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure * @pollfunc_bh: Function which will be used as pollfunc bottom half * @pollfunc_th: Function which will be used as pollfunc top half * @setup_ops: Buffer setup functions to use for this device. * If NULL the default setup functions for triggered * buffers will be used. * * This function combines some common tasks which will normally be performed * when setting up a triggered buffer. It will allocate the buffer and the * pollfunc. * * Before calling this function the indio_dev structure should already be * completely initialized, but not yet registered. In practice this means that * this function should be called right before iio_device_register(). * * To free the resources allocated by this function call * iio_triggered_buffer_cleanup(). */ int iio_triggered_buffer_setup(struct iio_dev *indio_dev, irqreturn_t (*pollfunc_bh)(int irq, void *p), irqreturn_t (*pollfunc_th)(int irq, void *p), const struct iio_buffer_setup_ops *setup_ops) { struct iio_buffer *buffer; int ret; buffer = iio_kfifo_allocate(); if (!buffer) { ret = -ENOMEM; goto error_ret; } iio_device_attach_buffer(indio_dev, buffer); indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh, pollfunc_th, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_kfifo_free; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_kfifo_free: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev) { int ret; struct iio_buffer *buffer; buffer = iio_kfifo_allocate(indio_dev); if (buffer == NULL) { ret = -ENOMEM; goto error_ret; } indio_dev->buffer = buffer; buffer->scan_timestamp = true; indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &iio_simple_dummy_trigger_h, IRQF_ONESHOT, indio_dev, "iio_simple_dummy_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_free_buffer; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_free_buffer: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); int ret; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh, 0, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_kfifo; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &ad7606_ring_setup_ops; indio_dev->buffer->scan_timestamp = true; INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring); /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_kfifo: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
void inv_ami306_unconfigure_ring(struct iio_dev *indio_dev) { iio_kfifo_free(indio_dev->buffer); };
static void devm_iio_kfifo_release(struct device *dev, void *res) { iio_kfifo_free(*(struct iio_buffer **)res); }
/** * iio_triggered_buffer_cleanup() - Free resources allocated by iio_triggered_buffer_setup() * @indio_dev: IIO device structure */ void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev) { iio_buffer_unregister(indio_dev); iio_dealloc_pollfunc(indio_dev->pollfunc); iio_kfifo_free(indio_dev->buffer); }
void ade7758_unconfigure_ring(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_kfifo_free(indio_dev->buffer); }
int ade7758_configure_ring(struct iio_dev *indio_dev) { struct ade7758_state *st = iio_priv(indio_dev); int ret = 0; indio_dev->buffer = iio_kfifo_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; return ret; } indio_dev->setup_ops = &ade7758_ring_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &ade7758_trigger_handler, 0, indio_dev, "ade7759_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_kfifo_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; st->tx_buf[0] = ADE7758_READ_REG(ADE7758_RSTATUS); st->tx_buf[1] = 0; st->tx_buf[2] = 0; st->tx_buf[3] = 0; st->tx_buf[4] = ADE7758_READ_REG(ADE7758_WFORM); st->tx_buf[5] = 0; st->tx_buf[6] = 0; st->tx_buf[7] = 0; /* build spi ring message */ st->ring_xfer[0].tx_buf = &st->tx_buf[0]; st->ring_xfer[0].len = 1; st->ring_xfer[0].bits_per_word = 8; st->ring_xfer[0].delay_usecs = 4; st->ring_xfer[1].rx_buf = &st->rx_buf[1]; st->ring_xfer[1].len = 3; st->ring_xfer[1].bits_per_word = 8; st->ring_xfer[1].cs_change = 1; st->ring_xfer[2].tx_buf = &st->tx_buf[4]; st->ring_xfer[2].len = 1; st->ring_xfer[2].bits_per_word = 8; st->ring_xfer[2].delay_usecs = 1; st->ring_xfer[3].rx_buf = &st->rx_buf[5]; st->ring_xfer[3].len = 3; st->ring_xfer[3].bits_per_word = 8; spi_message_init(&st->ring_msg); spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[2], &st->ring_msg); spi_message_add_tail(&st->ring_xfer[3], &st->ring_msg); return 0; error_iio_kfifo_free: iio_kfifo_free(indio_dev->buffer); return ret; }
void ssp_iio_unconfigure_ring(struct iio_dev *indio_dev) { iio_kfifo_free(indio_dev->buffer); };
/** * iio_simple_dummy_unconfigure_buffer() - release buffer resources * @indo_dev: device instance state */ void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_kfifo_free(indio_dev->buffer); }
int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev) { int ret; struct iio_buffer *buffer; /* Allocate a buffer to use - here a kfifo */ buffer = iio_kfifo_allocate(indio_dev); if (buffer == NULL) { ret = -ENOMEM; goto error_ret; } indio_dev->buffer = buffer; /* Enable timestamps by default */ buffer->scan_timestamp = true; /* * Tell the core what device type specific functions should * be run on either side of buffer capture enable / disable. */ indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops; /* * Configure a polling function. * When a trigger event with this polling function connected * occurs, this function is run. Typically this grabs data * from the device. * * NULL for the top half. This is normally implemented only if we * either want to ping a capture now pin (no sleeping) or grab * a timestamp as close as possible to a data ready trigger firing. * * IRQF_ONESHOT ensures irqs are masked such that only one instance * of the handler can run at a time. * * "iio_simple_dummy_consumer%d" formatting string for the irq 'name' * as seen under /proc/interrupts. Remaining parameters as per printk. */ indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &iio_simple_dummy_trigger_h, IRQF_ONESHOT, indio_dev, "iio_simple_dummy_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_free_buffer; } /* * Notify the core that this device is capable of buffered capture * driven by a trigger. */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_free_buffer: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; }
static void yas_remove_buffer(struct iio_dev *indio_dev) { iio_buffer_unregister(indio_dev); iio_kfifo_free(indio_dev->buffer); };
void ad7606_ring_cleanup(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_kfifo_free(indio_dev->buffer); }