int c3db_read_ts( C3HDL *h, struct timespec from, struct timespec to, int metric, C3RES *res ) { int64_t t, f; ts_to_ns( from, f ); ts_to_ns( to, t ); return c3db_read_ns( h, f, t, metric, res ); }
irqreturn_t stml0xx_wake_isr(int irq, void *dev) { static struct timespec ts; static struct stml0xx_work_struct *stm_ws; struct stml0xx_data *ps_stml0xx = dev; getrawmonotonic(&ts); if (stml0xx_irq_disable) return IRQ_HANDLED; wake_lock_timeout(&ps_stml0xx->wake_sensor_wakelock, HZ); stm_ws = kmalloc( sizeof(struct stml0xx_work_struct), GFP_ATOMIC); if (!stm_ws) { dev_err(dev, "stml0xx_wake_isr: unable to allocate work struct"); return IRQ_HANDLED; } INIT_WORK((struct work_struct *)stm_ws, stml0xx_irq_wake_work_func); stm_ws->ts_ns = ts_to_ns(ts); queue_work(ps_stml0xx->irq_work_queue, (struct work_struct *)stm_ws); return IRQ_HANDLED; }
static int stml0xx_resume(struct device *dev) { static struct timespec ts; static struct stml0xx_delayed_work_struct *stm_ws; struct stml0xx_data *ps_stml0xx = spi_get_drvdata(to_spi_device(dev)); get_monotonic_boottime(&ts); dev_dbg(&stml0xx_misc_data->spi->dev, "%s", __func__); mutex_lock(&ps_stml0xx->lock); ps_stml0xx->is_suspended = false; enable_irq(ps_stml0xx->irq); if (ps_stml0xx->pending_wake_work) { stm_ws = kmalloc( sizeof(struct stml0xx_delayed_work_struct), GFP_ATOMIC); if (!stm_ws) { dev_err(dev, "stml0xx_resume: unable to allocate work struct"); return 0; } INIT_DELAYED_WORK((struct delayed_work *)stm_ws, stml0xx_irq_wake_work_func); stm_ws->ts_ns = ts_to_ns(ts); queue_delayed_work(ps_stml0xx->irq_work_queue, (struct delayed_work *)stm_ws, 0); ps_stml0xx->pending_wake_work = false; } if (stml0xx_irq_disable == 0) queue_work(ps_stml0xx->irq_work_queue, &ps_stml0xx->clear_interrupt_status_work); mutex_unlock(&ps_stml0xx->lock); return 0; }