static void ev3_uart_send_ack(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct ev3_uart_port_data *port = container_of(dwork, struct ev3_uart_port_data, send_ack_work); int err; ev3_uart_write_byte(port->tty, EV3_UART_SYS_ACK); if (!port->sensor.context && port->type_id <= EV3_UART_TYPE_MAX) { port->sensor.context = port->tty; err = register_lego_sensor(&port->sensor, port->tty->dev); if (err < 0) { port->sensor.context = NULL; if (port->in_port) { port->in_port = NULL; put_device(&port->in_port->dev); } dev_err(port->tty->dev, "Could not register UART sensor on tty %s", port->tty->name); return; } } else { dev_err(port->tty->dev, "Reconnected due to: %s\n", port->last_err); } mdelay(4); schedule_work(&port->change_bitrate_work); }
static void psi_update_work(struct work_struct *work) { struct delayed_work *dwork; struct psi_group *group; bool nonidle; dwork = to_delayed_work(work); group = container_of(dwork, struct psi_group, clock_work); /* * If there is task activity, periodically fold the per-cpu * times and feed samples into the running averages. If things * are idle and there is no data to process, stop the clock. * Once restarted, we'll catch up the running averages in one * go - see calc_avgs() and missed_periods. */ nonidle = update_stats(group); if (nonidle) { unsigned long delay = 0; u64 now; now = sched_clock(); if (group->next_update > now) delay = nsecs_to_jiffies(group->next_update - now) + 1; schedule_delayed_work(dwork, delay); } }
void delayed_m3(struct work_struct *work) { struct delayed_work *del_work = to_delayed_work(work); mhi_device_ctxt *mhi_dev_ctxt = container_of(del_work, mhi_device_ctxt, m3_work); mhi_initiate_m3(mhi_dev_ctxt); }
static void msm_led_cci_test_blink_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); msm_led_cci_set_brightness(&fctrl, fctrl.led_info->test_status); fctrl.led_info->test_status = !fctrl.led_info->test_status; schedule_delayed_work(dwork, msecs_to_jiffies(1100)); }
void rtl8180_rate_adapter(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, rate_adapter_wq); struct net_device *dev = ieee->dev; StaRateAdaptive87SE(dev); }
static void fsa9485_mhl_detect(struct work_struct *work) { struct delayed_work *dw = to_delayed_work(work); struct fsa9485_usbsw *usbsw = container_of(dw, struct fsa9485_usbsw, mhl_work); struct fsa9485_platform_data *pdata = usbsw->pdata; if (local_usbsw->mhl_ready == 0) { fsa9485_set_mhl_cable(isMhlAttached); dev_info(&usbsw->client->dev, "%s: ignore mhl-detection in booting time\n", __func__); isMhlAttached = MHL_DETACHED; return; } dev_info(&usbsw->client->dev, "%s(%d)\n", __func__, isMhlAttached); if (isMhlAttached == MHL_ATTACHED) { if (pdata->mhl_cb) pdata->mhl_cb(FSA9485_ATTACHED); } else if(isMhlAttached == MHL_DETACHED) { if (pdata->mhl_cb) pdata->mhl_cb(FSA9485_DETACHED); } else { dev_err(&usbsw->client->dev, "[ERROR] %s() mhl known state\n", __func__); } }
static void gb_bootrom_timedout(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct gb_bootrom *bootrom = container_of(dwork, struct gb_bootrom, dwork); struct device *dev = &bootrom->connection->bundle->dev; const char *reason; switch (bootrom->next_request) { case NEXT_REQ_FIRMWARE_SIZE: reason = "Firmware Size Request"; break; case NEXT_REQ_GET_FIRMWARE: reason = "Get Firmware Request"; break; case NEXT_REQ_READY_TO_BOOT: reason = "Ready to Boot Request"; break; case NEXT_REQ_MODE_SWITCH: reason = "Interface Mode Switch"; break; default: reason = NULL; dev_err(dev, "Invalid next-request: %u", bootrom->next_request); break; } dev_err(dev, "Timed out waiting for %s from the Module\n", reason); mutex_lock(&bootrom->mutex); free_firmware(bootrom); mutex_unlock(&bootrom->mutex); /* TODO: Power-off Module ? */ }
static void pmem_header_update_worker(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct bittern_cache *bc; int ret; bc = container_of(dwork, struct bittern_cache, bc_pmem_update_work); ASSERT(bc != NULL); ASSERT_BITTERN_CACHE(bc); M_ASSERT(bc->bc_pmem_update_workqueue != NULL); if (bc->error_state == ES_NOERROR) { BT_TRACE(BT_LEVEL_TRACE2, bc, NULL, NULL, NULL, NULL, "bc=%p", bc); ret = pmem_header_update(bc, 0); /* should make this a common function */ if (ret != 0) { printk_err("%s: cannot update header: %d. will fail all future requests\n", bc->bc_name, ret); bc->error_state = ES_ERROR_FAIL_ALL; } } schedule_delayed_work(&bc->bc_pmem_update_work, msecs_to_jiffies(30000)); }
static void alias_guid_work(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); int ret = 0; struct mlx4_next_alias_guid_work *rec; struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port = container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det, alias_guid_work); struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent; struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid, struct mlx4_ib_sriov, alias_guid); struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); rec = kzalloc(sizeof *rec, GFP_KERNEL); if (!rec) { pr_err("alias_guid_work: No Memory\n"); return; } pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1); ret = get_next_record_to_update(dev, sriov_alias_port->port, rec); if (ret) { pr_debug("No more records to update.\n"); goto out; } set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, &rec->rec_det); out: kfree(rec); }
static void gab_work(struct work_struct *work) { struct gab *adc_bat; struct gab_platform_data *pdata; struct delayed_work *delayed_work; bool is_plugged; int status; delayed_work = to_delayed_work(work); adc_bat = container_of(delayed_work, struct gab, bat_work); pdata = adc_bat->pdata; status = adc_bat->status; is_plugged = power_supply_am_i_supplied(adc_bat->psy); adc_bat->cable_plugged = is_plugged; if (!is_plugged) adc_bat->status = POWER_SUPPLY_STATUS_DISCHARGING; else if (gab_charge_finished(adc_bat)) adc_bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING; else adc_bat->status = POWER_SUPPLY_STATUS_CHARGING; if (status != adc_bat->status) power_supply_changed(adc_bat->psy); }
static void acc_work(struct work_struct *data) { struct delayed_work *delayed = to_delayed_work(data); struct acc_dev *dev = container_of(delayed, struct acc_dev, work); android_enable_function(&dev->function, 1); }
static void exynos_busfreq_timer(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct busfreq_data *data = container_of(delayed_work, struct busfreq_data, worker); struct opp *opp; unsigned int voltage; unsigned long currfreq; unsigned long newfreq; unsigned int index = 0; opp = data->monitor(data); if (bus_ctrl.opp_lock) opp = bus_ctrl.opp_lock; ppmu_start(data->dev); newfreq = opp_get_freq(opp); index = data->get_table_index(opp); mutex_lock(&busfreq_lock); if (opp == data->curr_opp || newfreq == 0 || data->use == false) goto out; currfreq = opp_get_freq(data->curr_opp); voltage = opp_get_voltage(opp); if (newfreq > currfreq) { regulator_set_voltage(data->vdd_mif, voltage, voltage + 25000); voltage = data->get_int_volt(index); regulator_set_voltage(data->vdd_int, voltage, voltage + 25000); /*if (data->busfreq_prepare) data->busfreq_prepare(index);*/ } if (data->set_qos) data->set_qos(index); data->target(index); if (newfreq < currfreq) { /*if (data->busfreq_post) data->busfreq_post(index);*/ regulator_set_voltage(data->vdd_mif, voltage, voltage + 25000); voltage = data->get_int_volt(index); regulator_set_voltage(data->vdd_int, voltage, voltage + 25000); } data->curr_opp = opp; out: update_busfreq_stat(data, index); mutex_unlock(&busfreq_lock); queue_delayed_work(system_freezable_wq, &data->worker, data->sampling_rate); }
static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mcast_group *group; struct mcast_req *req = NULL; group = container_of(delay, typeof(*group), timeout_work); mutex_lock(&group->lock); if (group->state == MCAST_JOIN_SENT) { if (!list_empty(&group->pending_list)) { req = list_first_entry(&group->pending_list, struct mcast_req, group_list); list_del(&req->group_list); list_del(&req->func_list); --group->func[req->func].num_pend_reqs; mutex_unlock(&group->lock); kfree(req); if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { if (release_group(group, 1)) return; } else { kfree(group); return; } mutex_lock(&group->lock); } else
/* This is work queue function for channel switch handling. * This function takes care of updating new channel definitin to * bss config structure, restart AP and indicate channel switch success * to cfg80211. */ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) { struct mwifiex_uap_bss_param *bss_cfg; struct delayed_work *delayed_work = to_delayed_work(work); struct mwifiex_private *priv = container_of(delayed_work, struct mwifiex_private, dfs_chan_sw_work); bss_cfg = &priv->bss_cfg; if (!bss_cfg->beacon_period) { mwifiex_dbg(priv->adapter, ERROR, "channel switch: AP already stopped\n"); return; } mwifiex_uap_set_channel(priv, bss_cfg, priv->dfs_chandef); if (mwifiex_config_start_uap(priv, bss_cfg)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to start AP after channel switch\n"); return; } mwifiex_dbg(priv->adapter, MSG, "indicating channel switch completion to kernel\n"); cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef); }
static void tmu_monitor(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct tmu_info *info = container_of(delayed_work, struct tmu_info, polling); struct tmu_data *data = info->dev->platform_data; int cur_temp; cur_temp = get_cur_temp(info); dev_dbg(info->dev, "Current: %dc, FLAG=%d\n", cur_temp, info->tmu_state); mutex_lock(&tmu_lock); switch (info->tmu_state) { case TMU_STATUS_NORMAL: exynos_thermal_unthrottle(); enable_irq(info->irq); goto out; case TMU_STATUS_THROTTLED: if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; else if (cur_temp > data->ts.stop_throttle) exynos_thermal_throttle(); else info->tmu_state = TMU_STATUS_NORMAL; break; case TMU_STATUS_TRIPPED: if (cur_temp >= data->ts.start_emergency) panic("Emergency thermal shutdown: temp=%d\n", cur_temp); if (cur_temp >= data->ts.start_tripping) pr_err("thermal tripped: temp=%d\n", cur_temp); else info->tmu_state = TMU_STATUS_THROTTLED; break; default: break; } /* Memory throttling */ if (cur_temp >= data->ts.start_mem_throttle && !info->mem_throttled) { set_refresh_period(FREQ_IN_PLL, info->auto_refresh_mem_throttle); info->mem_throttled = true; dev_dbg(info->dev, "set auto refresh period %dns\n", info->auto_refresh_mem_throttle); } else if (cur_temp <= data->ts.stop_mem_throttle && info->mem_throttled) { set_refresh_period(FREQ_IN_PLL, info->auto_refresh_normal); info->mem_throttled = false; dev_dbg(info->dev, "set auto refresh period %dns\n", info->auto_refresh_normal); } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); out: mutex_unlock(&tmu_lock); }
static void extcon_register_dwork(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct otg_switch_mtk *otg_sx = container_of(dwork, struct otg_switch_mtk, extcon_reg_dwork); ssusb_extcon_register(otg_sx); }
void rtl8180_tx_pw_wq(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,tx_pw_wq); struct net_device *dev = ieee->dev; DoTxHighPower(dev); }
static void acc_work(struct work_struct *data) { struct delayed_work *delayed = to_delayed_work(data); struct acc_dev *dev = container_of(delayed, struct acc_dev, work); // printk(KERN_ERR "[ACC_WORK] dev->function name : %s\n", dev->function.name); android_enable_function(&dev->function, 1); }
static void grace_ender(struct work_struct *grace) { struct delayed_work *dwork = to_delayed_work(grace); struct lockd_net *ln = container_of(dwork, struct lockd_net, grace_period_end); locks_end_grace(&ln->lockd_manager); }
static void omap_dwc3_mailbox_work(struct work_struct *mailbox_work) { struct delayed_work *d_work = to_delayed_work(mailbox_work); struct dwc3_omap *omap = container_of(d_work, struct dwc3_omap, omap_dwc3_mailbox_work); omap_dwc3_set_mailbox(omap); }
static void legoev3_uart_send_ack(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct legoev3_uart_port_data *port = container_of(dwork, struct legoev3_uart_port_data, send_ack_work); struct legoev3_port_device *sensor; struct device *in_port_dev; int err; if (!port->sensor && port->ms.type_id <= LEGOEV3_UART_TYPE_MAX) { sensor = legoev3_port_device_register( "ev3-uart-sensor", -1, /* TODO: get input port ID here */ &legoev3_uart_sensor_device_type, port->ms.type_id, &port->ms, sizeof(struct msensor_device), port->tty->dev); if (IS_ERR(sensor)) { dev_err(port->tty->dev, "Could not register UART sensor on tty %s", port->tty->name); return; } port->sensor = sensor; /* * This is a special case for the input ports on the EV3 brick. * We use the name of the input port instead of the tty to make * it easier to know which sensor is which. */ in_port_dev = bus_find_device(&legoev3_bus_type, NULL, port->tty->name, legoev3_uart_match_input_port); if (in_port_dev) { port->in_port = to_legoev3_port_device(in_port_dev); strncpy(port->ms.port_name, dev_name(&port->in_port->dev), MSENSOR_PORT_NAME_SIZE); } else strncpy(port->ms.port_name, port->tty->name, MSENSOR_PORT_NAME_SIZE); port->ms.context = port->tty; err = register_msensor(&port->ms, &port->sensor->dev); if (err < 0) { dev_err(port->tty->dev, "Could not register UART sensor on tty %s", port->tty->name); legoev3_port_device_unregister(sensor); port->sensor = NULL; return; } } else dev_err(port->tty->dev, "Reconnected due to: %s\n", port->last_err); legoev3_uart_write_byte(port->tty, LEGOEV3_UART_SYS_ACK); schedule_delayed_work(&port->change_bitrate_work, msecs_to_jiffies(LEGOEV3_UART_SET_BITRATE_DELAY)); }
/* handle HDMI connect/disconnect */ static void tda998x_hpd(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct tda998x_priv *priv = container_of(dwork, struct tda998x_priv, dwork); if (priv->encoder && priv->encoder->dev) drm_kms_helper_hotplug_event(priv->encoder->dev); }
static void qpnp_led_turn_off_delayed(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct qpnp_led_data *led = container_of(dwork, struct qpnp_led_data, dwork); led->cdev.brightness = LED_OFF; qpnp_led_set(&led->cdev, led->cdev.brightness); }
static void close_fb_work(struct work_struct *work) { struct fb_info *fb_info; struct delayed_work *fb_work = to_delayed_work(work); fb_info = registered_fb[0]; if (fb_info && fb_info->fbops->fb_release) fb_info->fbops->fb_release(fb_info, 0); kfree(fb_work); }
static void virtio_gpu_fb_dirty_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct virtio_gpu_fbdev *vfbdev = container_of(delayed_work, struct virtio_gpu_fbdev, work); struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb; virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1, vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1); }
static void gb_vibrator_worker(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct gb_vibrator_device *vib = container_of(delayed_work, struct gb_vibrator_device, delayed_work); turn_off(vib); }
void rtl8180_rate_adapter(struct work_struct * work) { struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork,struct ieee80211_device,rate_adapter_wq); struct net_device *dev = ieee->dev; //struct r8180_priv *priv = ieee80211_priv(dev); // DMESG("---->rtl8180_rate_adapter"); StaRateAdaptive87SE(dev); // DMESG("<----rtl8180_rate_adapter"); }
static void set_console_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct earjack_debugger_device *adev = container_of( dwork, struct earjack_debugger_device, work); int detect = 0; detect = earjack_debugger_detected(adev); if (detect != adev->saved_detect) earjack_debugger_set_console(adev); }
static void cur_temp_monitor(struct work_struct *work) { int cur_temp; struct delayed_work *delayed_work = to_delayed_work(work); struct tmu_info *info = container_of(delayed_work, struct tmu_info, monitor); cur_temp = get_cur_temp(info); pr_info("current temp = %d\n", cur_temp); queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->sampling_rate); }
static void update_stats_cache(struct work_struct *work) { struct delayed_work *delayed_work; struct nfp_flower_priv *fl_priv; delayed_work = to_delayed_work(work); fl_priv = container_of(delayed_work, struct nfp_flower_priv, qos_stats_work); nfp_flower_stats_rlim_request_all(fl_priv); schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE); }