static irqreturn_t nuc970_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct nuc970_adc_device *info = iio_priv(indio_dev); int val; int channel; unsigned long timeout; channel = find_first_bit(indio_dev->active_scan_mask, indio_dev->masklength); // enable channel writel((readl(info->regs + CONF) & ~(0x7 << 3)) | (channel << 3), info->regs + CONF); // enable MST writel(readl(info->regs + CTL) | 0x100, info->regs + CTL); timeout = wait_for_completion_interruptible_timeout (&info->completion, NUC970_ADC_TIMEOUT); val = readl(info->regs + DATA); iio_push_to_buffers(indio_dev, (void *)&val); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; }
static irqreturn_t at91_adc_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *idev = pf->indio_dev; struct at91_adc_state *st = iio_priv(idev); int i, j = 0; for (i = 0; i < idev->masklength; i++) { if (!test_bit(i, idev->active_scan_mask)) continue; st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i)); j++; } if (idev->scan_timestamp) { s64 *timestamp = (s64 *)((u8 *)st->buffer + ALIGN(j, sizeof(s64))); *timestamp = pf->timestamp; } iio_push_to_buffers(indio_dev, (u8 *)st->buffer); iio_trigger_notify_done(idev->trig); /* Needed to ACK the DRDY interruption */ at91_adc_readl(st, AT91_ADC_LCDR); enable_irq(st->irq); return IRQ_HANDLED; }
/** * ad7298_trigger_handler() bh of trigger launched polling to ring buffer * * Currently there is no option in this driver to disable the saving of * timestamps within the ring. **/ static irqreturn_t ad7298_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ad7298_state *st = iio_priv(indio_dev); s64 time_ns = 0; int b_sent; b_sent = spi_sync(st->spi, &st->ring_msg); if (b_sent) goto done; if (indio_dev->scan_timestamp) { time_ns = iio_get_time_ns(); memcpy((u8 *)st->rx_buf + indio_dev->scan_bytes - sizeof(s64), &time_ns, sizeof(time_ns)); } iio_push_to_buffers(indio_dev, (u8 *)st->rx_buf); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; }
static irqreturn_t adc1x8s102_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev; struct adc1x8s102_state *st; s64 time_ns = 0; int b_sent; indio_dev = pf->indio_dev; st = iio_priv(indio_dev); b_sent = spi_sync(st->spi, &st->ring_msg); if (b_sent) goto done; if (indio_dev->scan_timestamp) { time_ns = iio_get_time_ns(); memcpy((u8 *)st->rx_buf + st->ring_xfer.len, &time_ns, sizeof(time_ns)); } /* Skip the dummy response in the first slot */ iio_push_to_buffers(indio_dev, (u8 *)&st->rx_buf[1]); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; }
static irqreturn_t yas_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct yas_state *st = iio_priv(indio_dev); int len = 0, i, j; int32_t *acc; acc = (int32_t *)kmalloc(indio_dev->scan_bytes, GFP_KERNEL); if (acc == NULL) { E("%s: memory alloc failed in buffer bh\n", __func__); goto done; } if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) { j = 0; for (i = 0; i < 3; i++) { if (test_bit(i, indio_dev->active_scan_mask)) { acc[j] = st->accel_data[i]; j++; } } len = j * 4; } if (indio_dev->scan_timestamp) *(s64 *)((u8 *)acc + ALIGN(len, sizeof(s64))) = pf->timestamp; iio_push_to_buffers(indio_dev, (u8 *)acc); kfree(acc); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; }
static irqreturn_t yas_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct yas_state *st = iio_priv(indio_dev); int len = 0, i, j; int32_t *mag; mag = (int32_t *) kmalloc(indio_dev->scan_bytes, GFP_KERNEL); if (mag == NULL) goto done; if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) { j = 0; for (i = 0; i < 3; i++) { if (test_bit(i, indio_dev->active_scan_mask)) { mag[j] = st->compass_data[i]; j++; } } len = j * 4; } /* Guaranteed to be aligned with 8 byte boundary */ if (indio_dev->scan_timestamp) *(s64 *)((u8 *)mag + ALIGN(len, sizeof(s64))) = pf->timestamp; iio_push_to_buffers(indio_dev, (u8 *)mag); kfree(mag); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; }
static int ssp_push_1bytes_buffer(struct iio_dev *indio_dev, u64 t, u8 *d) { u8 buf[IIO_BUFFER_1_BYTES]; memcpy(buf, d, sizeof(u8)); memcpy(buf + 1, &t, sizeof(t)); mutex_lock(&indio_dev->mlock); iio_push_to_buffers(indio_dev, buf); mutex_unlock(&indio_dev->mlock); return 0; }
static int ssp_push_24bytes_buffer(struct iio_dev *indio_dev, u64 t, s16 *q) { u8 buf[IIO_BUFFER_24_BYTES]; int i; for (i = 0; i < 6; i++) memcpy(buf + 2 * i, &q[i], sizeof(q[i])); memcpy(buf + 12, &t, sizeof(t)); mutex_lock(&indio_dev->mlock); iio_push_to_buffers(indio_dev, buf); mutex_unlock(&indio_dev->mlock); return 0; }
static int ssp_push_6bytes_buffer(struct iio_dev *indio_dev, u64 t, s16 *d) { u8 buf[IIO_BUFFER_6_BYTES]; int i; for (i = 0; i < 3; i++) memcpy(buf + i * 2, &d[i], sizeof(d[i])); memcpy(buf + 6, &t, sizeof(t)); mutex_lock(&indio_dev->mlock); iio_push_to_buffers(indio_dev, buf); mutex_unlock(&indio_dev->mlock); return 0; }
/* data header defines */ static int ssp_push_17bytes_buffer(struct iio_dev *indio_dev, u64 t, int *q) { u8 buf[IIO_BUFFER_17_BYTES]; int i; for (i = 0; i < 4; i++) memcpy(buf + 4 * i, &q[i], sizeof(q[i])); buf[16] = (u8)q[4]; memcpy(buf + 17, &t, sizeof(t)); mutex_lock(&indio_dev->mlock); iio_push_to_buffers(indio_dev, buf); mutex_unlock(&indio_dev->mlock); return 0; }
static irqreturn_t adis_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adis *adis = iio_device_get_drvdata(indio_dev); int ret; if (!adis->buffer) return -ENOMEM; if (adis->data->has_paging) { mutex_lock(&adis->txrx_lock); if (adis->current_page != 0) { adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID); adis->tx[1] = 0; spi_write(adis->spi, adis->tx, 2); } } ret = spi_sync(adis->spi, &adis->msg); if (ret) dev_err(&adis->spi->dev, "Failed to read data: %d", ret); if (adis->data->has_paging) { adis->current_page = 0; mutex_unlock(&adis->txrx_lock); } /* Guaranteed to be aligned with 8 byte boundary */ if (indio_dev->scan_timestamp) { void *b = adis->buffer + indio_dev->scan_bytes - sizeof(s64); *(s64 *)b = pf->timestamp; } iio_push_to_buffers(indio_dev, adis->buffer); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; }
/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device * specific to be rolled into the core. */ static irqreturn_t ade7758_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ade7758_state *st = iio_priv(indio_dev); s64 dat64[2]; u32 *dat32 = (u32 *)dat64; if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) if (ade7758_spi_read_burst(indio_dev) >= 0) *dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF; /* Guaranteed to be aligned with 8 byte boundary */ if (indio_dev->scan_timestamp) dat64[1] = pf->timestamp; iio_push_to_buffers(indio_dev, (u8 *)dat64); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; }
static irqreturn_t yas_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct iio_buffer *buffer = indio_dev->buffer; struct yas_state *st = iio_priv(indio_dev); int len = 0, i, j; size_t datasize = buffer->access->get_bytes_per_datum(buffer); int32_t *acc; acc = (int32_t *) kmalloc(datasize, GFP_KERNEL); if (acc == NULL) { dev_err(indio_dev->dev.parent, "memory alloc failed in buffer bh"); return -ENOMEM; } if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) { j = 0; for (i = 0; i < 3; i++) { if (test_bit(i, indio_dev->active_scan_mask)) { acc[j] = st->compass_data[i]; j++; } } len = j * 4; } /* Guaranteed to be aligned with 8 byte boundary */ if (buffer->scan_timestamp) *(s64 *)(((phys_addr_t)acc + len + sizeof(s64) - 1) & ~(sizeof(s64) - 1)) = pf->timestamp; iio_push_to_buffers(indio_dev, (u8 *)acc); iio_trigger_notify_done(indio_dev->trig); kfree(acc); return IRQ_HANDLED; }
static void m4pas_isr(enum m4sensorhub_irqs int_event, void *handle) { int err = 0; struct iio_dev *iio = handle; struct m4pas_driver_data *dd = iio_priv(iio); int size = 0; uint32_t passive_timestamp[M4PAS_NUM_PASSIVE_BUFFERS]; uint16_t steps[M4PAS_NUM_PASSIVE_BUFFERS]; uint16_t calories[M4PAS_NUM_PASSIVE_BUFFERS]; uint16_t heartrate[M4PAS_NUM_PASSIVE_BUFFERS]; uint8_t hrconfidence[M4PAS_NUM_PASSIVE_BUFFERS]; uint8_t healthy_minutes[M4PAS_NUM_PASSIVE_BUFFERS]; int i = 0; mutex_lock(&(dd->mutex)); size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_PASSIVE_TIMESTAMP); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_PASSIVE_TIMESTAMP, (char *)&(passive_timestamp)); if (err < 0) { m4pas_err("%s: Failed to read passive_timestamp data.\n", __func__); goto m4pas_isr_fail; } else if (err != size) { m4pas_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "passive_timestamp"); err = -EBADE; goto m4pas_isr_fail; } size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_PASSIVE_STEPS); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_PASSIVE_STEPS, (char *)&(steps)); if (err < 0) { m4pas_err("%s: Failed to read steps data.\n", __func__); goto m4pas_isr_fail; } else if (err != size) { m4pas_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "steps"); err = -EBADE; goto m4pas_isr_fail; } size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_PASSIVE_CALORIES); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_PASSIVE_CALORIES, (char *)&(calories)); if (err < 0) { m4pas_err("%s: Failed to read calories data.\n", __func__); goto m4pas_isr_fail; } else if (err != size) { m4pas_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "calories"); err = -EBADE; goto m4pas_isr_fail; } size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_PASSIVE_HEARTRATE); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_PASSIVE_HEARTRATE, (char *)&(heartrate)); if (err < 0) { m4pas_err("%s: Failed to read heartrate data.\n", __func__); goto m4pas_isr_fail; } else if (err != size) { m4pas_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "heartrate"); err = -EBADE; goto m4pas_isr_fail; } size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_PASSIVE_HRCONFIDENCE); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_PASSIVE_HRCONFIDENCE, (char *)&(hrconfidence)); if (err < 0) { m4pas_err("%s: Failed to read hrconfidence data.\n", __func__); goto m4pas_isr_fail; } else if (err != size) { m4pas_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "hrconfidence"); err = -EBADE; goto m4pas_isr_fail; } size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_PASSIVE_HEALTHYMINUTES); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_PASSIVE_HEALTHYMINUTES, (char *)&(healthy_minutes)); if (err < 0) { m4pas_err("%s: Failed to read healthy_minutes data.\n", __func__); goto m4pas_isr_fail; } else if (err != size) { m4pas_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "healthy_minutes"); err = -EBADE; goto m4pas_isr_fail; } for (i = 0; i < M4PAS_NUM_PASSIVE_BUFFERS; i++) { dd->iiodat[i].passive_timestamp = passive_timestamp[i]; dd->iiodat[i].steps = steps[i]; dd->iiodat[i].calories = calories[i]; dd->iiodat[i].heartrate = heartrate[i]; dd->iiodat[i].hrconfidence = hrconfidence[i]; dd->iiodat[i].healthy_minutes = healthy_minutes[i]; dd->iiodat[i].timestamp = iio_get_time_ns(); } /* * For some reason, IIO knows we are sending an array, * so all M4PAS_NUM_PASSIVE_BUFFERS indicies will be sent * in this one call (it does not need to go in the for-loop). */ iio_push_to_buffers(iio, (unsigned char *)&(dd->iiodat[0])); m4pas_isr_fail: if (err < 0) m4pas_err("%s: Failed with error code %d.\n", __func__, err); mutex_unlock(&(dd->mutex)); return; }
static void m4ges_isr(enum m4sensorhub_irqs int_event, void *handle) { int err = 0; struct iio_dev *iio = handle; struct m4ges_driver_data *dd = iio_priv(iio); int size = 0; mutex_lock(&(dd->mutex)); size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_GESTURE_GESTURE1); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_GESTURE_GESTURE1, (char *)&(dd->iiodat.gesture_type)); if (err < 0) { m4ges_err("%s: Failed to read gesture_type data.\n", __func__); goto m4ges_isr_fail; } else if (err != size) { m4ges_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "gesture_type"); err = -EBADE; goto m4ges_isr_fail; } size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_GESTURE_CONFIDENCE1); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_GESTURE_CONFIDENCE1, (char *)&(dd->iiodat.gesture_confidence)); if (err < 0) { m4ges_err("%s: Failed to read gesture_confidence data.\n", __func__); goto m4ges_isr_fail; } else if (err != size) { m4ges_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "gesture_confidence"); err = -EBADE; goto m4ges_isr_fail; } size = m4sensorhub_reg_getsize(dd->m4, M4SH_REG_GESTURE_VALUE1); err = m4sensorhub_reg_read(dd->m4, M4SH_REG_GESTURE_VALUE1, (char *)&(dd->iiodat.gesture_value)); if (err < 0) { m4ges_err("%s: Failed to read gesture_value data.\n", __func__); goto m4ges_isr_fail; } else if (err != size) { m4ges_err("%s: Read %d bytes instead of %d for %s.\n", __func__, err, size, "gesture_value"); err = -EBADE; goto m4ges_isr_fail; } #ifdef CONFIG_WAKEUP_SOURCE_NOTIFY if (dd->iiodat.gesture_type == GESTURE_WRIST_ROTATE) { notify_display_wakeup(GESTURE); } else if (dd->iiodat.gesture_type == GESTURE_VIEW) { if (dd->iiodat.gesture_value == GESTURE_VIEW_ON) notify_display_wakeup(GESTURE_VIEWON); else notify_display_wakeup(GESTURE_VIEWOFF); /* the GESTURE_VIEW is only effect for kernel now * do not send gesture to android */ goto m4ges_isr_fail; } #endif /* CONFIG_WAKEUP_SOURCE_NOTIFY */ dd->iiodat.timestamp = iio_get_time_ns(); iio_push_to_buffers(iio, (unsigned char *)&(dd->iiodat)); dd->gesture_count++; m4ges_isr_fail: if (err < 0) m4ges_err("%s: Failed with error code %d.\n", __func__, err); mutex_unlock(&(dd->mutex)); return; }