static void hdmi_early_suspend(struct early_suspend *h) { hdmi_dbg(hdmi->dev, "hdmi enter early suspend pwr %d state %d\n", hdmi->pwr_mode, hdmi->state); flush_delayed_work(&hdmi->delay_work); mutex_lock(&hdmi->enable_mutex); hdmi->suspend = 1; if(!hdmi->enable) { mutex_unlock(&hdmi->enable_mutex); return; } if (hdmi->irq) disable_irq(hdmi->irq); mutex_unlock(&hdmi->enable_mutex); hdmi->command = HDMI_CONFIG_ENABLE; init_completion(&hdmi->complete); hdmi->wait = 1; queue_delayed_work(hdmi->workqueue, &hdmi->delay_work, 0); wait_for_completion_interruptible_timeout(&hdmi->complete, msecs_to_jiffies(5000)); flush_delayed_work(&hdmi->delay_work); return; }
static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state) { sharpsl_pm.flags |= SHARPSL_SUSPENDED; flush_delayed_work(&toggle_charger); flush_delayed_work(&sharpsl_bat); if (sharpsl_pm.charge_mode == CHRG_ON) sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG; else sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG; return 0; }
/* Subsystem handlers */ static int riva_shutdown(const struct subsys_data *subsys) { pil_force_shutdown("wcnss"); flush_delayed_work(&cancel_vote_work); return 0; }
#ifdef CONFIG_PM static int apds9130_suspend(struct i2c_client *client, pm_message_t mesg) { #if 1 #else struct apds9130_data *data = i2c_get_clientdata(client); if(data->sw_mode == PROX_STAT_SHUTDOWN) return 0; apds9130_set_enable(client, 0); apds9130_set_command(client, 2); __cancel_delayed_work(&data->dwork); flush_delayed_work(&data->dwork); flush_workqueue(apds9130_workqueue); data->sw_mode = PROX_STAT_SHUTDOWN; disable_irq(client->irq); /* err = pdata->power(0); if(err < 0) { printk(KERN_INFO "%s, Proximity Power Off Fail in susped\n",__func__); return err; } */ irq_set_irq_wake(client->irq, 0); if(NULL != apds9130_workqueue){ destroy_workqueue(apds9130_workqueue); printk(KERN_INFO "%s, Destroy workqueue\n",__func__); apds9130_workqueue = NULL; } #endif
static int gp2a_i2c_remove(struct i2c_client *client) { struct gp2a_data *gp2a = i2c_get_clientdata(client); if (gp2a == NULL) { pr_err("%s, gp2a_data is NULL!!!!!\n", __func__); return 0; } if (gp2a->proximity_input_dev != NULL) { sysfs_remove_group(&gp2a->proximity_input_dev->dev.kobj, &proximity_attribute_group); input_unregister_device(gp2a->proximity_input_dev); gp2a->proximity_input_dev = NULL; } cancel_delayed_work_sync(&gp2a->light_work); flush_delayed_work(&gp2a->light_work); mutex_destroy(&gp2a->light_mutex); if (gp2a->light_input_dev != NULL) { sysfs_remove_group(&gp2a->light_input_dev->dev.kobj, &lightsensor_attribute_group); input_unregister_device(gp2a->light_input_dev); gp2a->light_input_dev = NULL; } mutex_destroy(&gp2a->data_mutex); kfree(gp2a); return 0; }
static int firefly_fb_event_notify(struct notifier_block *self, unsigned long action, void *data) { struct fb_event *event = data; int blank_mode = *((int *)event->data); struct delayed_work *delay_work; if (action == FB_EARLY_EVENT_BLANK) { switch (blank_mode) { case FB_BLANK_UNBLANK: break; default: if(!ddev->vga->suspend) { delay_work = vga_submit_work(ddev->vga, VGA_SUSPEND_CTL, 0, NULL); if(delay_work) flush_delayed_work(delay_work); } break; } } else if (action == FB_EVENT_BLANK) { switch (blank_mode) { case FB_BLANK_UNBLANK: if(ddev->vga->suspend) { vga_submit_work(ddev->vga, VGA_RESUME_CTL, 0, NULL); } break; default: break; } } return NOTIFY_OK; }
static void userspace_dtr(struct dm_dirty_log *log) { struct log_c *lc = log->context; if (lc->integrated_flush) { /* flush workqueue */ if (atomic_read(&lc->sched_flush)) flush_delayed_work(&lc->flush_log_work); destroy_workqueue(lc->dmlog_wq); } (void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, NULL, 0, NULL, NULL); if (lc->log_dev) dm_put_device(lc->ti, lc->log_dev); mempool_destroy(lc->flush_entry_pool); kfree(lc->usr_argv_str); kfree(lc); return; }
static void gp2a_i2c_shutdown(struct i2c_client *client) { #if 0// temp for power off alarm not working issue. waiting vendor double check struct gp2a_data *gp2a = i2c_get_clientdata(client); if (unlikely(gp2a == NULL)) { pr_err("%s, gp2a_data is NULL!!!!!\n", __func__); return; } if (gp2a->proximity_input_dev != NULL) { sysfs_remove_group(&gp2a->proximity_input_dev->dev.kobj, &proximity_attribute_group); input_unregister_device(gp2a->proximity_input_dev); gp2a->proximity_input_dev = NULL; } cancel_delayed_work_sync(&gp2a->light_work); flush_delayed_work(&gp2a->light_work); mutex_destroy(&gp2a->light_mutex); if (gp2a->light_input_dev != NULL) { sysfs_remove_group(&gp2a->light_input_dev->dev.kobj, &lightsensor_attribute_group); input_unregister_device(gp2a->light_input_dev); gp2a->light_input_dev = NULL; } mutex_destroy(&gp2a->data_mutex); kfree(gp2a); #endif }
/* * Remove bdi from the global list and shutdown any threads we have running */ static void wb_shutdown(struct bdi_writeback *wb) { /* Make sure nobody queues further work */ spin_lock_bh(&wb->work_lock); if (!test_and_clear_bit(WB_registered, &wb->state)) { spin_unlock_bh(&wb->work_lock); /* * Wait for wb shutdown to finish if someone else is just * running wb_shutdown(). Otherwise we could proceed to wb / * bdi destruction before wb_shutdown() is finished. */ wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE); return; } set_bit(WB_shutting_down, &wb->state); spin_unlock_bh(&wb->work_lock); cgwb_remove_from_bdi_list(wb); /* * Drain work list and shutdown the delayed_work. !WB_registered * tells wb_workfn() that @wb is dying and its work_list needs to * be drained no matter what. */ mod_delayed_work(bdi_wq, &wb->dwork, 0); flush_delayed_work(&wb->dwork); WARN_ON(!list_empty(&wb->work_list)); /* * Make sure bit gets cleared after shutdown is finished. Matches with * the barrier provided by test_and_clear_bit() above. */ smp_wmb(); clear_bit(WB_shutting_down, &wb->state); }
static int riva_shutdown(const struct subsys_data *subsys) { pil_force_shutdown("wcnss"); flush_delayed_work(&cancel_vote_work); disable_irq_nosync(RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ); return 0; }
static void rtl8187_unregister_led(struct rtl8187_led *led) { struct ieee80211_hw *hw = led->dev; struct rtl8187_priv *priv = hw->priv; led_classdev_unregister(&led->led_dev); flush_delayed_work(&priv->led_off); led->dev = NULL; }
/* Subsystem handlers */ static int riva_shutdown(const struct subsys_data *subsys) { pil_force_shutdown("wcnss"); pr_info("[SSR] pil_force_shutdown is finished\n"); flush_delayed_work(&cancel_vote_work); pr_info("[SSR] flush_delayed_work(vote) for shutdown is finished\n"); return 0; }
void intel_uncore_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; flush_delayed_work(&dev_priv->uncore.force_wake_work); /* Paranoia: make sure we have disabled everything before we exit. */ intel_uncore_sanitize(dev); }
static int affs_remount(struct super_block *sb, int *flags, char *data) { struct affs_sb_info *sbi = AFFS_SB(sb); int blocksize; kuid_t uid; kgid_t gid; int mode; int reserved; int root_block; unsigned long mount_flags; int res = 0; char *new_opts = kstrdup(data, GFP_KERNEL); char volume[32]; char *prefix = NULL; pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data); sync_filesystem(sb); *flags |= MS_NODIRATIME; memcpy(volume, sbi->s_volume, 32); if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block, &blocksize, &prefix, volume, &mount_flags)) { kfree(prefix); kfree(new_opts); return -EINVAL; } flush_delayed_work(&sbi->sb_work); replace_mount_options(sb, new_opts); sbi->s_flags = mount_flags; sbi->s_mode = mode; sbi->s_uid = uid; sbi->s_gid = gid; /* protect against readers */ spin_lock(&sbi->symlink_lock); if (prefix) { kfree(sbi->s_prefix); sbi->s_prefix = prefix; } memcpy(sbi->s_volume, volume, 32); spin_unlock(&sbi->symlink_lock); if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) return 0; if (*flags & MS_RDONLY) affs_free_bitmap(sb); else res = affs_init_bitmap(sb, flags); return res; }
static int wcnss_shutdown(const struct subsys_desc *subsys) { struct pronto_data *drv = subsys_to_drv(subsys); pil_shutdown(&drv->desc); flush_delayed_work(&drv->cancel_vote_work); wcnss_flush_delayed_boot_votes(); return 0; }
static void pil_shutdown(struct pil_device *pil) { pil->desc->ops->shutdown(pil->desc); if (proxy_timeout_ms == 0 && pil->desc->ops->proxy_unvote) pil->desc->ops->proxy_unvote(pil->desc); else flush_delayed_work(&pil->proxy); pil_set_state(pil, PIL_OFFLINE); }
void hdmi_suspend(struct hdmi *hdmi) { del_timer(&hdmi->timer); flush_delayed_work(&hdmi->work); if(hdmi->mode == DISP_ON_HDMI){ hdmi->ops->remove(hdmi); hdmi->mode = DISP_ON_LCD; } return; }
/** * radeon_irq_kms_fini - tear down driver interrupt info * * @rdev: radeon device pointer * * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). */ void radeon_irq_kms_fini(struct radeon_device *rdev) { drm_vblank_cleanup(rdev->ddev); if (rdev->irq.installed) { drm_irq_uninstall(rdev->ddev); rdev->irq.installed = false; if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); flush_delayed_work(&rdev->hotplug_work); } }
void flush_cpu_work(void) { int i; for_each_online_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* these works are per-cpu, no need for flush_sync */ flush_delayed_work(&b->work); } }
void flush_cpu_work(void) { int i; for_each_online_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); /* */ flush_delayed_work(&b->work); } }
/* Subsystem handlers */ static int riva_shutdown(const struct subsys_desc *subsys) { pr_info(MODULE_NAME "%s + \n", __FUNCTION__); pil_force_shutdown("wcnss"); pr_info(MODULE_NAME "%s pil_force_shutdown done \n", __FUNCTION__); flush_delayed_work(&cancel_vote_work); wcnss_flush_delayed_boot_votes(); disable_irq_nosync(RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ); pr_info(MODULE_NAME "%s - \n", __FUNCTION__); return 0; }
void snd_ak4114_reinit(struct ak4114 *chip) { chip->init = 1; mb(); flush_delayed_work(&chip->work); ak4114_init_regs(chip); /* bring up statistics / event queing */ chip->init = 0; if (chip->kctls[0]) schedule_delayed_work(&chip->work, HZ / 10); }
static int riva_shutdown(const struct subsys_desc *desc) { struct riva_data *drv; drv = container_of(desc, struct riva_data, subsys_desc); pil_shutdown(&drv->pil_desc); flush_delayed_work(&drv->cancel_work); wcnss_flush_delayed_boot_votes(); disable_irq_nosync(drv->irq); return 0; }
static void xboxdrv_disconnect(struct usb_interface *intf) { printk(KERN_INFO "xboxdrv_disconnect()\n"); { struct usb_xboxdrv* xboxdrv = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); cancel_delayed_work(&xboxdrv->worker); flush_delayed_work(&xboxdrv->worker); kfree(xboxdrv); } }
static s32 muic_suspend(struct i2c_client *client, pm_message_t state){ client->dev.power.power_state = state; /* S[, 2012.08.18, [email protected], MUIC should not ignore disconnection of USB cable. */ #if 0 cancel_delayed_work(&muic_wq); #else flush_delayed_work(&muic_wq); #endif /* E], 2012.08.18, [email protected], MUIC should not ignore disconnection of USB cable. */ printk(KERN_INFO "[MUIC] muic_suspend \n"); return 0; }
void msm_pil_unregister(struct pil_device *pil) { if (IS_ERR_OR_NULL(pil)) return; if (get_device(&pil->dev)) { mutex_lock(&pil->lock); WARN_ON(pil->count); flush_delayed_work(&pil->proxy); msm_pil_debugfs_remove(pil); device_unregister(&pil->dev); mutex_unlock(&pil->lock); put_device(&pil->dev); } }
/* Subsystem handlers */ static int riva_shutdown(const struct subsys_data *subsys) { pr_info(MODULE_NAME ": riva_shutdown.\n"); //ASUS_BSP+++ "for /data/log/ASUSEvtlog" ASUSEvtlog("[wcnss]: riva_shutdown.\n"); //ASUS_BSP--- "for /data/log/ASUSEvtlog" pil_force_shutdown("wcnss"); flush_delayed_work(&cancel_vote_work); wcnss_flush_delayed_boot_votes(); disable_irq_nosync(RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ); return 0; }
static void __exit myworkqueue_exit(void) { printk(KERN_INFO "workqueue: %s\n", __FUNCTION__); if (dynamic_work) { flush_work(dynamic_work); kfree(dynamic_work); } flush_delayed_work(&static_delay_work); if (wq) { flush_workqueue(wq); destroy_workqueue(wq); } }
static int userspace_postsuspend(struct dm_dirty_log *log) { int r; struct log_c *lc = log->context; /* * Run planned flush earlier. */ if (lc->integrated_flush && atomic_read(&lc->sched_flush)) flush_delayed_work(&lc->flush_log_work); r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, NULL, 0, NULL, NULL); return r; }
static ssize_t gt_act_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; flush_delayed_work(&dev_priv->rps.delayed_resume_work); intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 freq; freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); } else {