void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r) { struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); spin_lock(&ring->use_lock); ring->use_count--; spin_unlock(&ring->use_lock); }
/** * ad7476_poll_bh_to_ring() bh of trigger launched polling to ring buffer * @work_s: the work struct through which this was scheduled * * Currently there is no option in this driver to disable the saving of * timestamps within the ring. * I think the one copy of this at a time was to avoid problems if the * trigger was set far too high and the reads then locked up the computer. **/ static void ad7476_poll_bh_to_ring(struct work_struct *work_s) { struct ad7476_state *st = container_of(work_s, struct ad7476_state, poll_work); struct iio_dev *indio_dev = st->indio_dev; struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring); s64 time_ns; __u8 *rxbuf; int b_sent; /* Ensure only one copy of this function running at a time */ if (atomic_inc_return(&st->protect_ring) > 1) return; rxbuf = kzalloc(st->d_size, GFP_KERNEL); if (rxbuf == NULL) return; b_sent = spi_read(st->spi, rxbuf, st->chip_info->storagebits / 8); if (b_sent < 0) goto done; time_ns = iio_get_time_ns(); if (indio_dev->ring->scan_timestamp) memcpy(rxbuf + st->d_size - sizeof(s64), &time_ns, sizeof(time_ns)); indio_dev->ring->access.store_to(&sw_ring->buf, rxbuf, time_ns); done: kfree(rxbuf); atomic_dec(&st->protect_ring); }
static int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp) { struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); return iio_store_to_sw_ring(ring, data, timestamp); }
/** * ad799x_poll_bh_to_ring() bh of trigger launched polling to ring buffer * @work_s: the work struct through which this was scheduled * * Currently there is no option in this driver to disable the saving of * timestamps within the ring. * I think the one copy of this at a time was to avoid problems if the * trigger was set far too high and the reads then locked up the computer. **/ static void ad799x_poll_bh_to_ring(struct work_struct *work_s) { struct ad799x_state *st = container_of(work_s, struct ad799x_state, poll_work); struct iio_dev *indio_dev = st->indio_dev; struct iio_ring_buffer *ring = indio_dev->ring; struct iio_sw_ring_buffer *ring_sw = iio_to_sw_ring(indio_dev->ring); s64 time_ns; __u8 *rxbuf; int b_sent; u8 cmd; /* Ensure only one copy of this function running at a time */ if (atomic_inc_return(&st->protect_ring) > 1) return; rxbuf = kmalloc(st->d_size, GFP_KERNEL); if (rxbuf == NULL) return; switch (st->id) { case ad7991: case ad7995: case ad7999: cmd = st->config | (ring->scan_mask << AD799X_CHANNEL_SHIFT); break; case ad7992: case ad7993: case ad7994: cmd = (ring->scan_mask << AD799X_CHANNEL_SHIFT) | AD7998_CONV_RES_REG; break; case ad7997: case ad7998: cmd = AD7997_8_READ_SEQUENCE | AD7998_CONV_RES_REG; break; default: cmd = 0; } b_sent = i2c_smbus_read_i2c_block_data(st->client, cmd, ring->scan_count * 2, rxbuf); if (b_sent < 0) goto done; time_ns = iio_get_time_ns(); if (ring->scan_timestamp) memcpy(rxbuf + st->d_size - sizeof(s64), &time_ns, sizeof(time_ns)); ring->access.store_to(&ring_sw->buf, rxbuf, time_ns); done: kfree(rxbuf); atomic_dec(&st->protect_ring); }
/** * max1363_poll_bh_to_ring() - bh of trigger launched polling to ring buffer * @work_s: the work struct through which this was scheduled * * Currently there is no option in this driver to disable the saving of * timestamps within the ring. * I think the one copy of this at a time was to avoid problems if the * trigger was set far too high and the reads then locked up the computer. **/ static void max1363_poll_bh_to_ring(struct work_struct *work_s) { struct max1363_state *st = container_of(work_s, struct max1363_state, poll_work); struct iio_dev *indio_dev = st->indio_dev; struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring); s64 time_ns; __u8 *rxbuf; int b_sent; size_t d_size; unsigned long numvals = hweight_long(st->current_mode->modemask); /* Ensure the timestamp is 8 byte aligned */ if (st->chip_info->bits != 8) d_size = numvals*2 + sizeof(s64); else d_size = numvals + sizeof(s64); if (d_size % sizeof(s64)) d_size += sizeof(s64) - (d_size % sizeof(s64)); /* Ensure only one copy of this function running at a time */ if (atomic_inc_return(&st->protect_ring) > 1) return; /* Monitor mode prevents reading. Whilst not currently implemented * might as well have this test in here in the meantime as it does * no harm. */ if (numvals == 0) return; rxbuf = kmalloc(d_size, GFP_KERNEL); if (rxbuf == NULL) return; if (st->chip_info->bits != 8) b_sent = i2c_master_recv(st->client, rxbuf, numvals*2); else b_sent = i2c_master_recv(st->client, rxbuf, numvals); if (b_sent < 0) goto done; time_ns = iio_get_time_ns(); memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns)); indio_dev->ring->access.store_to(&sw_ring->buf, rxbuf, time_ns); done: kfree(rxbuf); atomic_dec(&st->protect_ring); }
static int iio_request_update_sw_rb(struct iio_buffer *r) { int ret = 0; struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); r->stufftoread = false; if (!ring->update_needed) goto error_ret; __iio_free_sw_ring_buffer(ring); ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum, ring->buf.length); error_ret: return ret; }
int iio_request_update_sw_rb(struct iio_ring_buffer *r) { int ret = 0; struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); spin_lock(&ring->use_lock); if (!ring->update_needed) goto error_ret; if (ring->use_count) { ret = -EAGAIN; goto error_ret; } __iio_free_sw_ring_buffer(ring); ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bpd, ring->buf.length); error_ret: spin_unlock(&ring->use_lock); return ret; }
static void iio_sw_rb_release(struct device *dev) { struct iio_ring_buffer *r = to_iio_ring_buffer(dev); kfree(iio_to_sw_ring(r)); }
int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r) { struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); ring->update_needed = true; return 0; }
int iio_get_bpd_sw_rb(struct iio_ring_buffer *r) { struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); return ring->buf.bpd; }
int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, unsigned char *data) { return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data); }
int iio_rip_sw_rb(struct iio_ring_buffer *r, size_t count, u8 **data, int *dead_offset) { struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p; int ret, max_copied; int bytes_to_rip; /* A userspace program has probably made an error if it tries to * read something that is not a whole number of bpds. * Return an error. */ if (count % ring->buf.bpd) { ret = -EINVAL; printk(KERN_INFO "Ring buffer read request not whole number of" "samples: Request bytes %zd, Current bpd %d\n", count, ring->buf.bpd); goto error_ret; } /* Limit size to whole of ring buffer */ bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count); *data = kmalloc(bytes_to_rip, GFP_KERNEL); if (*data == NULL) { ret = -ENOMEM; goto error_ret; } /* build local copy */ initial_read_p = ring->read_p; if (unlikely(initial_read_p == NULL)) { /* No data here as yet */ ret = 0; goto error_free_data_cpy; } initial_write_p = ring->write_p; /* Need a consistent pair */ while ((initial_read_p != ring->read_p) || (initial_write_p != ring->write_p)) { initial_read_p = ring->read_p; initial_write_p = ring->write_p; } if (initial_write_p == initial_read_p) { /* No new data available.*/ ret = 0; goto error_free_data_cpy; } if (initial_write_p >= initial_read_p + bytes_to_rip) { /* write_p is greater than necessary, all is easy */ max_copied = bytes_to_rip; memcpy(*data, initial_read_p, max_copied); end_read_p = initial_read_p + max_copied; } else if (initial_write_p > initial_read_p) { /*not enough data to cpy */ max_copied = initial_write_p - initial_read_p; memcpy(*data, initial_read_p, max_copied); end_read_p = initial_write_p; } else { /* going through 'end' of ring buffer */ max_copied = ring->data + ring->buf.length*ring->buf.bpd - initial_read_p; memcpy(*data, initial_read_p, max_copied); /* possible we are done if we align precisely with end */ if (max_copied == bytes_to_rip) end_read_p = ring->data; else if (initial_write_p > ring->data + bytes_to_rip - max_copied) { /* enough data to finish */ memcpy(*data + max_copied, ring->data, bytes_to_rip - max_copied); max_copied = bytes_to_rip; end_read_p = ring->data + (bytes_to_rip - max_copied); } else { /* not enough data */ memcpy(*data + max_copied, ring->data, initial_write_p - ring->data); max_copied += initial_write_p - ring->data; end_read_p = initial_write_p; } } /* Now to verify which section was cleanly copied - i.e. how far * read pointer has been pushed */ current_read_p = ring->read_p; if (initial_read_p <= current_read_p) *dead_offset = current_read_p - initial_read_p; else *dead_offset = ring->buf.length*ring->buf.bpd - (initial_read_p - current_read_p); /* possible issue if the initial write has been lapped or indeed * the point we were reading to has been passed */ /* No valid data read. * In this case the read pointer is already correct having been * pushed further than we would look. */ if (max_copied - *dead_offset < 0) { ret = 0; goto error_free_data_cpy; } /* setup the next read position */ /* Beware, this may fail due to concurrency fun and games. * Possible that sufficient fill commands have run to push the read * pointer past where we would be after the rip. If this occurs, leave * it be. */ /* Tricky - deal with loops */ while (ring->read_p != end_read_p) ring->read_p = end_read_p; return max_copied - *dead_offset; error_free_data_cpy: kfree(*data); error_ret: return ret; }
static int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r) { struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); return ring->buf.bytes_per_datum; }
static int iio_read_first_n_sw_rb(struct iio_buffer *r, size_t n, char __user *buf) { struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p; u8 *data; int ret, max_copied, bytes_to_rip, dead_offset; size_t data_available, buffer_size; if (n % ring->buf.bytes_per_datum) { ret = -EINVAL; printk(KERN_INFO "Ring buffer read request not whole number of" "samples: Request bytes %zd, Current bytes per datum %d\n", n, ring->buf.bytes_per_datum); goto error_ret; } buffer_size = ring->buf.bytes_per_datum*ring->buf.length; bytes_to_rip = min_t(size_t, buffer_size, n); data = kmalloc(bytes_to_rip, GFP_KERNEL); if (data == NULL) { ret = -ENOMEM; goto error_ret; } initial_read_p = ring->read_p; if (unlikely(initial_read_p == NULL)) { ret = 0; goto error_free_data_cpy; } initial_write_p = ring->write_p; while ((initial_read_p != ring->read_p) || (initial_write_p != ring->write_p)) { initial_read_p = ring->read_p; initial_write_p = ring->write_p; } if (initial_write_p == initial_read_p) { ret = 0; goto error_free_data_cpy; } if (initial_write_p >= initial_read_p) data_available = initial_write_p - initial_read_p; else data_available = buffer_size - (initial_read_p - initial_write_p); if (data_available < bytes_to_rip) bytes_to_rip = data_available; if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) { max_copied = ring->data + buffer_size - initial_read_p; memcpy(data, initial_read_p, max_copied); memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied); end_read_p = ring->data + bytes_to_rip - max_copied; } else { memcpy(data, initial_read_p, bytes_to_rip); end_read_p = initial_read_p + bytes_to_rip; } current_read_p = ring->read_p; if (initial_read_p <= current_read_p) dead_offset = current_read_p - initial_read_p; else dead_offset = buffer_size - (initial_read_p - current_read_p); if (bytes_to_rip - dead_offset < 0) { ret = 0; goto error_free_data_cpy; } while (ring->read_p != end_read_p) ring->read_p = end_read_p; ret = bytes_to_rip - dead_offset; if (copy_to_user(buf, data + dead_offset, ret)) { ret = -EFAULT; goto error_free_data_cpy; } if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2) ring->buf.stufftoread = 0; error_free_data_cpy: kfree(data); error_ret: return ret; }
void iio_sw_rb_free(struct iio_buffer *r) { kfree(iio_to_sw_ring(r)); }
static int iio_read_first_n_sw_rb(struct iio_buffer *r, size_t n, char __user *buf) { struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p; u8 *data; int ret, max_copied, bytes_to_rip, dead_offset; size_t data_available, buffer_size; /* A userspace program has probably made an error if it tries to * read something that is not a whole number of bpds. * Return an error. */ if (n % ring->buf.bytes_per_datum) { ret = -EINVAL; printk(KERN_INFO "Ring buffer read request not whole number of" "samples: Request bytes %zd, Current bytes per datum %d\n", n, ring->buf.bytes_per_datum); goto error_ret; } buffer_size = ring->buf.bytes_per_datum*ring->buf.length; /* Limit size to whole of ring buffer */ bytes_to_rip = min_t(size_t, buffer_size, n); data = kmalloc(bytes_to_rip, GFP_KERNEL); if (data == NULL) { ret = -ENOMEM; goto error_ret; } /* build local copy */ initial_read_p = ring->read_p; if (unlikely(initial_read_p == NULL)) { /* No data here as yet */ ret = 0; goto error_free_data_cpy; } initial_write_p = ring->write_p; /* Need a consistent pair */ while ((initial_read_p != ring->read_p) || (initial_write_p != ring->write_p)) { initial_read_p = ring->read_p; initial_write_p = ring->write_p; } if (initial_write_p == initial_read_p) { /* No new data available.*/ ret = 0; goto error_free_data_cpy; } if (initial_write_p >= initial_read_p) data_available = initial_write_p - initial_read_p; else data_available = buffer_size - (initial_read_p - initial_write_p); if (data_available < bytes_to_rip) bytes_to_rip = data_available; if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) { max_copied = ring->data + buffer_size - initial_read_p; memcpy(data, initial_read_p, max_copied); memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied); end_read_p = ring->data + bytes_to_rip - max_copied; } else { memcpy(data, initial_read_p, bytes_to_rip); end_read_p = initial_read_p + bytes_to_rip; } /* Now to verify which section was cleanly copied - i.e. how far * read pointer has been pushed */ current_read_p = ring->read_p; if (initial_read_p <= current_read_p) dead_offset = current_read_p - initial_read_p; else dead_offset = buffer_size - (initial_read_p - current_read_p); /* possible issue if the initial write has been lapped or indeed * the point we were reading to has been passed */ /* No valid data read. * In this case the read pointer is already correct having been * pushed further than we would look. */ if (bytes_to_rip - dead_offset < 0) { ret = 0; goto error_free_data_cpy; } /* setup the next read position */ /* Beware, this may fail due to concurrency fun and games. * Possible that sufficient fill commands have run to push the read * pointer past where we would be after the rip. If this occurs, leave * it be. */ /* Tricky - deal with loops */ while (ring->read_p != end_read_p) ring->read_p = end_read_p; ret = bytes_to_rip - dead_offset; if (copy_to_user(buf, data + dead_offset, ret)) { ret = -EFAULT; goto error_free_data_cpy; } if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2) ring->buf.stufftoread = 0; error_free_data_cpy: kfree(data); error_ret: return ret; }