static void bma250_work_f(struct work_struct *work) { int rc; struct driver_data *dd = container_of(work, struct driver_data, work_data.work); if (dd->power) { rc = bma250_report_data(dd); schedule_delayed_work(&dd->work_data, dd->delay_jiffies); } return ; }
static void bma250_work_f(struct work_struct *work) { int rc; struct driver_data *dd = container_of(work, struct driver_data, work_data.work); mutex_lock(&bma250_power_lock); if (dd->power) { rc = bma250_report_data(dd); if (rc) goto work_error; schedule_delayed_work(&dd->work_data, dd->delay_jiffies); } work_error: mutex_unlock(&bma250_power_lock); return ; }
static void bma250_work_f(struct work_struct *work) { int rc; struct driver_data *dd = container_of(work, struct driver_data, work_data.work); mutex_lock(&bma250_power_lock); if (dd->power) { rc = bma250_report_data(dd); if (rc) goto work_error; rc = bma250_update_settings(dd); } work_error: mutex_unlock(&bma250_power_lock); return ; }
static irqreturn_t bma250_irq(int irq, void *dev_id) { int rc = 0; struct device *dev = dev_id; struct driver_data *dd = dev_get_drvdata(dev); mutex_lock(&bma250_power_lock); if (dd->power) { rc = bma250_report_data(dd); if (rc) goto irq_error; rc = bma250_update_settings(dd); } else { dd->irq_pending = true; } irq_error: mutex_unlock(&bma250_power_lock); return IRQ_HANDLED; }
static int bma250_resume(struct device *dev) { struct driver_data *dd = dev_get_drvdata(dev); int rc; if (dd->pdata && dd->pdata->setup) { rc = dd->pdata->setup(&dd->ic_dev->dev); if (rc) return rc; } rc = bma250_config(dd); if (rc) return rc; if (dd->irq_pending) { dd->irq_pending= false; rc = bma250_report_data(dd); if (rc) return rc; } return rc; }