static int inv_ami306_postenable(struct iio_dev *indio_dev) { struct inv_ami306_state_s *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; int result; /* when all the outputs are disabled, even though buffer/enable is on, do nothing */ if (!(iio_scan_mask_query(indio_dev, ring, INV_AMI306_SCAN_MAGN_X) || iio_scan_mask_query(indio_dev, ring, INV_AMI306_SCAN_MAGN_Y) || iio_scan_mask_query(indio_dev, ring, INV_AMI306_SCAN_MAGN_Z))) return 0; result = set_ami306_enable(indio_dev, true); if (result) return result; schedule_delayed_work(&st->work, msecs_to_jiffies(st->delay)); return 0; }
/** * lis3l02dq_read_accel_from_ring() individual acceleration read from ring **/ ssize_t lis3l02dq_read_accel_from_ring(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_scan_el *el = NULL; int ret, len = 0, i = 0; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); struct iio_dev *dev_info = dev_get_drvdata(dev); struct iio_ring_buffer *ring = dev_info->ring; struct attribute_group *scan_el_attrs = ring->scan_el_attrs; s16 *data; while (scan_el_attrs->attrs[i]) { el = to_iio_scan_el((struct device_attribute *) (scan_el_attrs->attrs[i])); /* label is in fact the address */ if (el->label == this_attr->address) break; i++; } if (!scan_el_attrs->attrs[i]) { ret = -EINVAL; goto error_ret; } /* If this element is in the scan mask */ ret = iio_scan_mask_query(ring, el->number); if (ret < 0) goto error_ret; if (ret) { data = kmalloc(ring->access.get_bytes_per_datum(ring), GFP_KERNEL); if (data == NULL) return -ENOMEM; ret = ring->access.read_last(ring, (u8 *)data); if (ret) goto error_free_data; } else { ret = -EINVAL; goto error_ret; } len = iio_scan_mask_count_to_right(ring, el->number); if (len < 0) { ret = len; goto error_free_data; } len = sprintf(buf, "ring %d\n", data[len]); error_free_data: kfree(data); error_ret: return ret ? ret : len; }
static ssize_t iio_scan_el_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct iio_dev *indio_dev = dev_get_drvdata(dev); ret = iio_scan_mask_query(indio_dev->buffer, to_iio_dev_attr(attr)->address); if (ret < 0) return ret; return sprintf(buf, "%d\n", ret); }
static int put_scan_to_buf(struct iio_dev *indio_dev, unsigned char *d, short *s, int scan_index) { struct iio_buffer *ring = indio_dev->buffer; int st; int i, d_ind; d_ind = 0; for (i = 0; i < 3; i++) { st = iio_scan_mask_query(indio_dev, ring, scan_index + i); if (st) { memcpy(&d[d_ind], &s[i], sizeof(s[i])); d_ind += sizeof(s[i]); } } return d_ind; }
static ssize_t iio_scan_el_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int ret = 0; bool state; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct iio_buffer *buffer = indio_dev->buffer; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); state = !(buf[0] == '0'); mutex_lock(&indio_dev->mlock); if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { ret = -EBUSY; goto error_ret; } ret = iio_scan_mask_query(buffer, this_attr->address); if (ret < 0) goto error_ret; if (!state && ret) { ret = iio_scan_mask_clear(buffer, this_attr->address); if (ret) goto error_ret; } else if (state && !ret) { ret = iio_scan_mask_set(buffer, this_attr->address); if (ret) goto error_ret; } error_ret: mutex_unlock(&indio_dev->mlock); return ret ? ret : len; }