Example #1
0
static void my_exit (void) {

    // irq
    // #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30)
        // if (!can_request_irq(thisIRQ, irqflags))
    // #endif
   			free_irq(thisIRQ,&dev_id);

	while (delayed_work_pending(hello_data->wk)) {
		cancel_delayed_work_sync(hello_data->wk);
        printk(KERN_ALERT "2470:10.6a: Cancelling delayed work!\n");
	}

	destroy_workqueue(hello_data->proc_hello_wkq);

	kfree(hello_data->wk);

	kfree(hello_data->proc_hello_value);
	kfree(hello_data);

	if (proc_hello)
		remove_proc_entry (HELLO, proc_mydev);
	if (proc_mydev)
		remove_proc_entry (MYDEV, 0);

    // module exit message
    printk(KERN_ALERT "2470:10.6a: main destroyed!\n");
}
Example #2
0
static int max77665_charger_types(struct max77665_charger *charger)
{
#define MA_TO_UA 1000
	enum cable_status_t cable_status = charger->cable_status;
	int chgin_ilim = 0;
	int ret;

	switch (cable_status) {
	case CABLE_TYPE_USB:	//USB input current 500mA
		chgin_ilim = charger->chgin_ilim_usb *MA_TO_UA;
		break;
	case CABLE_TYPE_AC:	//AC input current 1200mA
		chgin_ilim = charger->chgin_ilim_ac * MA_TO_UA;
		break;
	default:
		chgin_ilim = 0;
		break;
	}

	if (chgin_ilim) {
		/* set ilim cur */
		ret = regulator_set_current_limit(charger->ps, chgin_ilim, MAX_AC_CURRENT*MA_TO_UA);
		if (ret) {
			pr_err("failed to set current limit\n");
			return ret;
		}

	}

	if(delayed_work_pending(&charger->poll_dwork))
		cancel_delayed_work(&charger->poll_dwork);
	schedule_delayed_work_on(0, &charger->poll_dwork, 0);

	return 0;
}
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (queue_idling_enabled[rqueue->prio]) {
		if (delayed_work_pending(&rd->read_idle.idle_work))
			(void)cancel_delayed_work(
				&rd->read_idle.idle_work);
		if (ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time)) <
				rd->read_idle.freq) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling");
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	row_log_rowq(rd, rqueue->prio, "added request");
}
Example #4
0
static void haptic_enable(struct timed_output_dev *tdev, int value)
{
	struct haptic_data *chip =
		container_of(tdev, struct haptic_data, tdev);

	mutex_lock(&chip->haptic_mutex);

	if (chip->motor_status == MOTOR_SHUTDOWN)
		goto unlock;

#ifdef __CONFIG_DEBUG_HAPTIC__
	pr_info("%s: vibration time = %d\n", __func__, g_vibrate_count++);
#endif

	//max77665_haptic_on(chip, false);
	hrtimer_cancel(&chip->timer);
	
	if (delayed_work_pending(&chip->disable_work))
		cancel_delayed_work(&chip->disable_work);
	
	if (value > 0) {
		value = min(value, chip->max_timeout);
		hrtimer_start(&chip->timer, ktime_set(value/1000, (value%1000)*1000000),
						HRTIMER_MODE_REL);
	} 
#ifdef __CONFIG_DEBUG_HAPTIC__
	pr_info("%s: process: %s, time: %d ms\n", __func__, current->comm, value);
#endif
	max77665_haptic_on(chip, !!value);
unlock:
	mutex_unlock(&chip->haptic_mutex);
}
Example #5
0
static void stop_podgov_workers(struct podgov_info_rec *podgov)
{
	/* idle_timer can rearm itself */
	do {
		cancel_delayed_work_sync(&podgov->idle_timer);
	} while (delayed_work_pending(&podgov->idle_timer));
}
static int msm_voice_rx_mute_timeout_put(struct snd_kcontrol *kcontrol,
					struct snd_ctl_elem_value *ucontrol)
{
	static struct delayed_work *unmute_work = NULL;
	int ret = 0;
	int mute = ucontrol->value.integer.value[0];
	uint32_t session_id = ucontrol->value.integer.value[1];
	int timeout = ucontrol->value.integer.value[2];

	if (unmute_work == NULL) {
		unmute_work = kzalloc(sizeof(struct delayed_work), GFP_KERNEL);
		INIT_DELAYED_WORK(unmute_work, msm_voice_unmute_work);
	}

	if ((mute != 1) || (timeout <= 0)) {
		pr_err(" %s Invalid arguments", __func__);

		ret = -EINVAL;
		goto done;
	}

	pr_debug("%s: mute=%d session_id=%#x timeout=%d\n", __func__,
		 mute, session_id, timeout);

	voc_set_device_mute_lge(voc_get_session_id(VOICE_SESSION_NAME),
							VSS_IVOLUME_DIRECTION_RX, 1, 500);

	if (unlikely(delayed_work_pending(unmute_work)))
		cancel_delayed_work_sync(unmute_work);
	schedule_delayed_work(unmute_work, msecs_to_jiffies(timeout));
done:
	return ret;
}
Example #7
0
inline void hotplug_boostpulse(void)
{
        if (unlikely(flags & (EARLYSUSPEND_ACTIVE
                | HOTPLUG_DISABLED)))
                return;

        if (!(flags & BOOSTPULSE_ACTIVE)) {
                flags |= BOOSTPULSE_ACTIVE;
                /*
                 * If there are less than 2 CPUs online, then online
                 * an additional CPU, otherwise check for any pending
                 * offlines, cancel them and pause for 2 seconds.
                 * Either way, we don't allow any cpu_down()
                 * whilst the user is interacting with the device.
                 */
                if (likely(num_online_cpus() < 2)) {
                        cancel_delayed_work_sync(&hotplug_offline_work);
                        flags |= HOTPLUG_PAUSED;
                        schedule_work(&hotplug_online_single_work);
                        schedule_delayed_work(&hotplug_unpause_work, HZ );
                } else {
                        pr_info("auto_hotplug: %s: %d CPUs online\n", __func__, num_online_cpus());
                        if (delayed_work_pending(&hotplug_offline_work)) {
                                pr_info("auto_hotplug: %s: Cancelling hotplug_offline_work\n", __func__);
                                cancel_delayed_work(&hotplug_offline_work);
                                flags |= HOTPLUG_PAUSED;
                                schedule_delayed_work(&hotplug_unpause_work, HZ );
                                schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
                        }
                }
        }
}
static void rt9532_duration_work(struct work_struct *work)
{
	if(delayed_work_pending(&charger.irq_work))
		cancel_delayed_work_sync(&charger.irq_work);	
	
	schedule_delayed_work(&charger.irq_work, msecs_to_jiffies(1000));
}
static void battery_chargalg_schedule_delayed_work(struct delayed_work *work,
						   unsigned long delay)
{
	if (delayed_work_pending(work))
		cancel_delayed_work(work);
	schedule_delayed_work(work, delay);
}
/* HSIC AUX GPIO irq handler */
static irqreturn_t hsic_aux_gpio_irq(int irq, void *data)
{
	struct device *dev = data;

	dev_dbg(dev,
		"%s---> hsic aux gpio request irq: %d\n",
		__func__, irq);

	if (hsic.hsic_aux_irq_enable == 0) {
		dev_dbg(dev,
			"%s---->AUX IRQ is disabled\n", __func__);
		return IRQ_HANDLED;
	}

	cancel_delayed_work(&hsic.wakeup_work);
	if (delayed_work_pending(&hsic.hsic_aux)) {
		dev_dbg(dev,
			"%s---->Delayed work pending\n", __func__);
		return IRQ_HANDLED;
	}

	hsic.hsic_aux_finish = 0;
	schedule_delayed_work(&hsic.hsic_aux, 0);
	dev_dbg(dev,
		"%s<----\n", __func__);

	return IRQ_HANDLED;
}
Example #11
0
static void rfkill_schedule_ratelimited(void)
{
	if (delayed_work_pending(&rfkill_op_work))
		return;
	schedule_delayed_work(&rfkill_op_work,
			      rfkill_ratelimit(rfkill_last_scheduled));
	rfkill_last_scheduled = jiffies;
}
Example #12
0
static bool batch_update(struct fb_info *info, struct omap3epfb_update_area *p)
{
	struct omap3epfb_par *par = info->par;

	// If EPD is disabled, do nothing
	if (par->disable_flags > 0)
	{
		DEBUG_REGION(DEBUG_LEVEL3, p,"update DISABLED = ");
		// Tell the caller not to update.
		return false;
	}

	// Check if the delayed full screen updates are enabled.
	if (!par->clear_delay)
	{
		DEBUG_REGION(DEBUG_LEVEL1, p,"   do = ");
		if (fast==1) {
			omap3epfb_update_screen(par->info, OMAP3EPFB_WVFID_VU, false);
		} else {
			omap3epfb_update_area(info, p);
		}
		return false;
	}

	// If this is not a fullscreen GC update, treat it as a normal update.
	if (!(rect_fullscreen(info, p) && p->wvfid == OMAP3EPFB_WVFID_GC))
	{
		// If we have a fullscreen batched, we do not need to update.
		if (!delayed_work_pending(&par->clear_work))
		{
			DEBUG_REGION(DEBUG_LEVEL1, p,"   do = ");
			if (fast==1) {
				omap3epfb_update_screen(par->info, OMAP3EPFB_WVFID_VU, false);
			} else {
				omap3epfb_update_area(info, p);
			}
		}
		else
		{
			DEBUG_REGION(DEBUG_LEVEL1, p,"   skip = ");
		}
		return false;
	}

	// We need to do fullscreen batching.
	if (par->user_debug & DEBUG_LEVEL1)
	{
		if (TIME_DELTA_MS(par->last_clear) < 1000)
			DEBUG_REGION(DEBUG_LEVEL1, p,"   req FULLSCREEN PREV %dms ago AUTO = ", TIME_DELTA_MS(par->last_clear));
		else
			DEBUG_REGION(DEBUG_LEVEL1, p,"   req FULLSCREEN = ");
	}
	omap3epfb_reqq_purge(par->info);
	cancel_delayed_work_sync(&par->clear_work);
	schedule_delayed_work(&par->clear_work, msecs_to_jiffies(par->clear_delay));
	par->last_clear = TIME_STAMP();
	return true;
}
Example #13
0
void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev)
{
#if 0 /* Not in RHEL5... */
	cancel_delayed_work_sync(&rt2x00dev->link.work);
#else
	if (delayed_work_pending(&rt2x00dev->link.work))
		cancel_rearming_delayed_work(&rt2x00dev->link.work);
#endif
}
Example #14
0
void mdss_mdp_clk_ctrl(int enable, int isr)
{
	static atomic_t clk_ref = ATOMIC_INIT(0);
	static DEFINE_MUTEX(clk_ctrl_lock);
	int force_off = 0;

	pr_debug("clk enable=%d isr=%d clk_ref=%d\n", enable, isr,
			atomic_read(&clk_ref));
	/*
	 * It is assumed that if isr = TRUE then start = OFF
	 * if start = ON when isr = TRUE it could happen that the usercontext
	 * could turn off the clocks while the interrupt is updating the
	 * power to ON
	 */
	WARN_ON(isr == true && enable);

	if (enable == MDP_BLOCK_POWER_ON) {
		atomic_inc(&clk_ref);
	} else if (!atomic_add_unless(&clk_ref, -1, 0)) {
		if (enable == MDP_BLOCK_MASTER_OFF) {
			pr_debug("master power-off req\n");
			force_off = 1;
		} else {
			WARN(1, "too many mdp clock off call\n");
		}
	}

	WARN_ON(enable == MDP_BLOCK_MASTER_OFF && !force_off);

	if (isr) {
		/* if it's power off send workqueue to turn off clocks */
		if (mdss_res->clk_ena && !atomic_read(&clk_ref))
			queue_delayed_work(mdss_res->clk_ctrl_wq,
					   &mdss_res->clk_ctrl_worker,
					   mdss_res->timeout);
	} else {
		mutex_lock(&clk_ctrl_lock);
		if (delayed_work_pending(&mdss_res->clk_ctrl_worker))
			cancel_delayed_work(&mdss_res->clk_ctrl_worker);

		if (atomic_read(&clk_ref)) {
			mdss_mdp_clk_ctrl_update(true);
		} else if (mdss_res->clk_ena) {
			mutex_lock(&mdp_suspend_mutex);
			if (force_off || mdss_res->suspend) {
				mdss_mdp_clk_ctrl_update(false);
			} else {
				/* send workqueue to turn off mdp power */
				queue_delayed_work(mdss_res->clk_ctrl_wq,
						   &mdss_res->clk_ctrl_worker,
						   mdss_res->timeout);
			}
			mutex_unlock(&mdp_suspend_mutex);
		}
		mutex_unlock(&clk_ctrl_lock);
	}
}
Example #15
0
void cleanup_module () {
   flush_workqueue(queue);
   if (delayed_work_pending(&dwork)) {
      cancel_delayed_work(&dwork);
      flush_workqueue(queue);
   }
   destroy_workqueue(queue);
   printk(KERN_INFO "Module removed");
}
Example #16
0
static int pga_event(struct snd_soc_dapm_widget *w,
		     struct snd_kcontrol *kcontrol, int event)
{
	struct snd_soc_codec *codec = w->codec;
	struct wm8350_data *wm8350_data = codec->private_data;
	struct wm8350_output *out;

	switch (w->shift) {
	case 0:
	case 1:
		out = &wm8350_data->out1;
		break;
	case 2:
	case 3:
		out = &wm8350_data->out2;
		break;

	default:
		BUG();
		return -1;
	}

	switch (event) {
	case SND_SOC_DAPM_POST_PMU:
		out->ramp = WM8350_RAMP_UP;
		out->active = 1;

		if (!delayed_work_pending(&codec->delayed_work))
			schedule_delayed_work(&codec->delayed_work,
					      msecs_to_jiffies(1));
		break;

	case SND_SOC_DAPM_PRE_PMD:
		out->ramp = WM8350_RAMP_DOWN;
		out->active = 0;

		if (!delayed_work_pending(&codec->delayed_work))
			schedule_delayed_work(&codec->delayed_work,
					      msecs_to_jiffies(1));
		break;
	}

	return 0;
}
Example #17
0
static void battery_work(struct work_struct *work)
{
	struct bq27541_chip *chip = container_of(work, struct bq27541_chip, battery_dwork.work);

	get_battery_info(chip);
	power_supply_changed(&chip->fuelgauge);

	if(!delayed_work_pending(&chip->battery_dwork))
		schedule_delayed_work(&chip->battery_dwork, REFRESH_POLL);
}
static ssize_t hsic_port_enable_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t size)
{
	int retval;
	int org_req;

	if (size > HSIC_ENABLE_SIZE)
		return -EINVAL;

	if (sscanf(buf, "%d", &org_req) != 1) {
		dev_dbg(dev, "Invalid, value\n");
		return -EINVAL;
	}

#if 0
	/* Free the aux irq */
	hsic_aux_irq_free();
	dev_dbg(dev,
		"%s---->AUX IRQ is disabled\n", __func__);
#endif

	if (delayed_work_pending(&hsic.hsic_aux)) {
		dev_dbg(dev,
			"%s---->Wait for delayed work finish\n",
			 __func__);
		retval = wait_event_interruptible(hsic.aux_wq,
						hsic.hsic_aux_finish);
		if (retval < 0)
			return retval;

		if (org_req)
			return size;
	}

	mutex_lock(&hsic.hsic_mutex);
	if (org_req) {
		dev_dbg(dev, "enable hsic\n");

		/* add this due to hcd release
			 doesn't set hcd to NULL */
		if (hsic.hsic_stopped == 0)
			ush_hsic_port_disable(pci_dev);
		usleep_range(5000, 6000);
		ush_hsic_port_enable(pci_dev);
	} else {
		dev_dbg(dev, "disable hsic\n");
		/* add this due to hcd release
			 doesn't set hcd to NULL */
		if (hsic.hsic_stopped == 0)
			ush_hsic_port_disable(pci_dev);

	}
	mutex_unlock(&hsic.hsic_mutex);
	return size;
}
Example #19
0
static int pga_event(struct snd_soc_dapm_widget *w, int event)
{
	struct snd_soc_codec *codec = w->codec;
	struct wm8350_out_ramp *or = codec->private_data;
	
	switch (event) {
	case SND_SOC_DAPM_POST_PMU:
		/* ramp vol up */
		switch (w->shift) {
		case 0:
		case 1:
			or->out1.ramp = WM8350_RAMP_UP;
			break;
		case 2:
		case 3:
			or->out2.ramp = WM8350_RAMP_UP;
			break;
		}
		if (!delayed_work_pending(&codec->delayed_work))
			schedule_delayed_work(&codec->delayed_work,
				msecs_to_jiffies(1));
		break;
	case SND_SOC_DAPM_PRE_PMD:
		/* ramp down */
		switch (w->shift) {
		case 0:
		case 1:
			or->out1.ramp = WM8350_RAMP_DOWN;
			break;
		case 2:
		case 3:
			or->out2.ramp = WM8350_RAMP_DOWN;
			break;
		}
		if (!delayed_work_pending(&codec->delayed_work))
			schedule_delayed_work(&codec->delayed_work,
				msecs_to_jiffies(1));
		break;
	}
	return 0;
} 
Example #20
0
static irqreturn_t lowbat_irq_thread(int irq, void *dev_id)
{
	struct bq27541_chip *chip = dev_id;
	pr_info("------- %s\n", __func__);

	if (!gpio_get_value(chip->low_bat_gpio)) {
		if(delayed_work_pending(&chip->battery_dwork))
			cancel_delayed_work(&chip->battery_dwork);
		schedule_delayed_work(&chip->battery_dwork, HZ/2);
	}
	return IRQ_HANDLED;
}
static void wireless_isr_work_function(struct work_struct *dat)
{
	if (delayed_work_pending(&charger->wireless_isr_work))
		cancel_delayed_work(&charger->wireless_isr_work);

	SMB_NOTICE("wireless state = %d\n", wireless_is_plugged());

	if (wireless_is_plugged())
		wireless_set();
	else
		wireless_reset();
}
Example #22
0
static void wb_exit(struct bdi_writeback *wb)
{
	int i;

	WARN_ON(delayed_work_pending(&wb->dwork));

	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
		percpu_counter_destroy(&wb->stat[i]);

	fprop_local_destroy_percpu(&wb->completions);
	wb_congested_put(wb->congested);
}
Example #23
0
/*
 * row_exit_queue() - called on unloading the RAW scheduler
 * @e:	poiner to struct elevator_queue
 *
 */
static void row_exit_queue(struct elevator_queue *e)
{
	struct row_data *rd = (struct row_data *)e->elevator_data;
	int i;

	for (i = 0; i < ROWQ_MAX_PRIO; i++)
		BUG_ON(!list_empty(&rd->row_queues[i].fifo));
	(void)cancel_delayed_work_sync(&rd->read_idle.idle_work);
	BUG_ON(delayed_work_pending(&rd->read_idle.idle_work));
	destroy_workqueue(rd->read_idle.idle_workqueue);
	kfree(rd);
}
static void wireless_set(void)
{
	wireless_on = true;
	if (delayed_work_pending(&charger->wireless_set_current_work))
		cancel_delayed_work(&charger->wireless_set_current_work);
	wake_lock(&wireless_wakelock);
	charger->wpc_curr_limit = 300;
	charger->wpc_curr_limit_count = 0;
	smb345_set_WCInputCurrentlimit(charger->client, 300);
	smb345_vflt_setting();
	bq27541_wireless_callback(wireless_on);
	queue_delayed_work(smb345_wq, &charger->wireless_set_current_work, WPC_SET_CURT_INTERVAL);
}
Example #25
0
void cleanup_module( void ) {
    if(dwork && delayed_work_pending(dwork)) {
        cancel_delayed_work(dwork);
        printk("Delayed work canceled\n");
    } else {
        printk("No delayed work to cancel\n");
    }
    flush_workqueue(queue);
    destroy_workqueue(queue);

    printk("Timer module uninstalling\n");

    return;
}
static void wireless_set_current_function(struct work_struct *dat)
{
	if (delayed_work_pending(&charger->wireless_set_current_work))
		cancel_delayed_work(&charger->wireless_set_current_work);

	if (charger->wpc_curr_limit == 700 || charger->wpc_curr_limit_count >= WPC_SET_CURT_LIMIT_CNT)
		return;
	else if (charger->wpc_curr_limit == 300)
		smb345_set_WCInputCurrentlimit(charger->client, 500);
	else if (charger->wpc_curr_limit == 500)
		smb345_set_WCInputCurrentlimit(charger->client, 700);

	queue_delayed_work(smb345_wq, &charger->wireless_set_current_work, WPC_SET_CURT_INTERVAL);
}
static void wireless_reset(void)
{
	wireless_on = false;
	if (delayed_work_pending(&charger->wireless_set_current_work))
		cancel_delayed_work(&charger->wireless_set_current_work);
	charger->wpc_curr_limit = 300;
	charger->wpc_curr_limit_count = 0;
	if (ac_on) {
		smb345_set_InputCurrentlimit(charger->client, 1200);
		smb345_vflt_setting();
	}
	bq27541_wireless_callback(wireless_on);
	wake_unlock(&wireless_wakelock);
}
Example #28
0
/**
 * update_sampling_rate - update sampling rate effective immediately if needed.
 * @new_rate: new sampling rate
 *
 * If new rate is smaller than the old, simply updating
 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
 */
static void update_sampling_rate(struct dbs_data *dbs_data,
		unsigned int new_rate)
{
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	int cpu;

	od_tuners->sampling_rate = new_rate = max(new_rate,
			dbs_data->min_sampling_rate);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		struct cpufreq_policy *policy;
		struct od_cpu_dbs_info_s *dbs_info;
		unsigned long next_sampling, appointed_at;

		policy = cpufreq_cpu_get(cpu);
		if (!policy)
			continue;
		if (policy->governor != &cpufreq_gov_ondemand) {
			cpufreq_cpu_put(policy);
			continue;
		}
		dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
		cpufreq_cpu_put(policy);

		mutex_lock(&dbs_info->cdbs.timer_mutex);

		if (!delayed_work_pending(&dbs_info->cdbs.work)) {
			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			continue;
		}

		next_sampling = jiffies + usecs_to_jiffies(new_rate);
		appointed_at = dbs_info->cdbs.work.timer.expires;

		if (time_before(next_sampling, appointed_at)) {

			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			cancel_delayed_work_sync(&dbs_info->cdbs.work);
			mutex_lock(&dbs_info->cdbs.timer_mutex);

			gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
					usecs_to_jiffies(new_rate), true);

		}
		mutex_unlock(&dbs_info->cdbs.timer_mutex);
	}
	put_online_cpus();
}
Example #29
0
static irqreturn_t dcin_irq_handler(int irqno, void *data)
{
	struct platform_device *pdev = data;

	dev_dbg(&pdev->dev, "dcin_irq_handler\n");

	if (delayed_work_pending(&bat->work)) {
		cancel_delayed_work(&bat->work);		
	}

	INIT_DELAYED_WORK(&bat->work, dcin_read_dc_detect_delayed);
	schedule_delayed_work(&bat->work, msecs_to_jiffies(100));

	return IRQ_HANDLED;
}
Example #30
0
void kbase_device_runtime_put_sync(struct device *dev)
{
    struct kbase_device *kbdev;
    kbdev = dev_get_drvdata(dev);

    if(delayed_work_pending(&kbdev->runtime_pm_workqueue)) {
        cancel_delayed_work_sync(&kbdev->runtime_pm_workqueue);
    }

    pm_runtime_put_noidle(kbdev->osdev.dev);
    schedule_delayed_work_on(0, &kbdev->runtime_pm_workqueue, RUNTIME_PM_DELAY_TIME/(1000/HZ));
#if MALI_RTPM_DEBUG
    printk( "---kbase_device_runtime_put_sync, usage_count=%d\n", atomic_read(&kbdev->osdev.dev->power.usage_count));
#endif
}