Пример #1
0
static void dt2w_input_event(struct input_handle *handle, unsigned int type,
				unsigned int code, int value)
{
	if ((!scr_suspended) || (!dt2w_switch))
		return;

	/* You can debug here with 'adb shell getevent -l' command. */
	switch(code) {
		case ABS_MT_SLOT:
			doubletap2wake_reset();
			break;

		case ABS_MT_TRACKING_ID:
			if (value == 0xffffffff)
				is_touching = false;
			break;

		case ABS_MT_POSITION_X:
			touch_x = value;
			queue_work_on(0, dt2w_input_wq, &dt2w_input_work);
			break;

		case ABS_MT_POSITION_Y:
			touch_y = value;
			queue_work_on(0, dt2w_input_wq, &dt2w_input_work);
			break;

		default:
			break;
	}
}
Пример #2
0
static void sdio_mux_notify(void *_dev, unsigned event)
{
	DBG("%s: event %d notified\n", __func__, event);

	/* write avail may not be enouogh for a packet, but should be fine */
	if ((event == SDIO_EVENT_DATA_WRITE_AVAIL) &&
	    sdio_write_avail(sdio_mux_ch))
		queue_work_on(0, sdio_mux_workqueue, &work_sdio_mux_write);

	if ((event == SDIO_EVENT_DATA_READ_AVAIL) &&
	    sdio_read_avail(sdio_mux_ch))
		queue_work_on(0, sdio_mux_workqueue, &work_sdio_mux_read);
}
static void wg_input_event(struct input_handle *handle, unsigned int type,
				unsigned int code, int value)
{

	if (scr_suspended() && code == ABS_MT_POSITION_X) {
		value -= 5000;
	}

#if WG_DEBUG
	pr_info("wg: code: %s|%u, val: %i\n",
		((code==ABS_MT_POSITION_X) ? "X" :
		(code==ABS_MT_POSITION_Y) ? "Y" :
		(code==ABS_MT_TRACKING_ID) ? "ID" :
		"undef"), code, value);
#endif
	if (code == ABS_MT_SLOT) {
		sweep2wake_reset();
		doubletap2wake_reset();
		return;
	}

	if (code == ABS_MT_TRACKING_ID && value == -1) {
		sweep2wake_reset();
		touch_cnt = true;
		queue_work_on(0, dt2w_input_wq, &dt2w_input_work);
		return;
	}

	if (code == ABS_MT_POSITION_X) {
		touch_x = value;
		touch_x_called = true;
	}

	if (code == ABS_MT_POSITION_Y) {
		touch_y = value;
		touch_y_called = true;
	}

	if (touch_x_called && touch_y_called) {
		touch_x_called = false;
		touch_y_called = false;
		queue_work_on(0, s2w_input_wq, &s2w_input_work);
	} else if (!scr_suspended() && touch_x_called && !touch_y_called) {
		touch_x_called = false;
		touch_y_called = false;
		queue_work_on(0, s2w_input_wq, &s2w_input_work);
	}
}
Пример #4
0
ssize_t cpu_usage_store(struct kobject *kobj, struct kobj_attribute *attr,
		const char *buf, size_t n)
{
	struct workqueue_struct	*workqueue;
	struct work_struct *work;
	char cmd[20];
	int usage = 0;
	int cpu;

	sscanf(buf, "%s %d", cmd, &usage);

	if((!strncmp(cmd, "start", strlen("start")))) {
		PM_DBG("get cmd start\n");
		cpu_usage_run = 1;
		
		cpu_usage_percent = (ARM_MODE_TIMER_MSEC * usage) / 100;


		for_each_online_cpu(cpu){
			work = &per_cpu(work_cpu_usage, cpu);
			workqueue = per_cpu(workqueue_cpu_usage, cpu);
			if (!work || !workqueue){
				PM_ERR("work or workqueue NULL\n");
				return n;
			}	
			queue_work_on(cpu, workqueue, work);
		}
#if 0
		del_timer(&arm_mode_timer);
		arm_mode_timer.expires	= jiffies + msecs_to_jiffies(ARM_MODE_TIMER_MSEC);
		add_timer(&arm_mode_timer);
#endif
	
	} else if (!strncmp(cmd, "stop", strlen("stop"))) {
Пример #5
0
void
tfw_cache_req_process(TfwHttpReq *req, tfw_http_req_cache_cb_t action,
		      void *data)
{
	int node;
	unsigned long key;

	if (!tfw_cfg.cache)
		return;

	key = tfw_cache_key_calc(req);

	node = tfw_cache_key_node(key);
	if (node != numa_node_id()) {
		/* Schedule the cache entry to the right node. */
		TfwCWork *cw = kmem_cache_alloc(c_cache, GFP_ATOMIC);
		if (!cw)
			goto process_locally;
		INIT_WORK(&cw->work, tfw_cache_req_process_node);
		cw->cw_req = req;
		cw->cw_act = action;
		cw->cw_data = data;
		cw->cw_key = key;
		queue_work_on(tfw_cache_sched_work_cpu(node), cache_wq,
			      (struct work_struct *)cw);
	}

process_locally:
	__cache_req_process_node(req, key, action, data);
}
Пример #6
0
void lru_add_drain_all(void)
{
	static DEFINE_MUTEX(lock);
	static struct cpumask has_work;
	int cpu;

	mutex_lock(&lock);
	get_online_cpus();
	cpumask_clear(&has_work);

	for_each_online_cpu(cpu) {
		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);

		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
		    need_activate_page_drain(cpu)) {
			INIT_WORK(work, lru_add_drain_per_cpu);
			queue_work_on(cpu, lru_add_drain_wq, work);
			cpumask_set_cpu(cpu, &has_work);
		}
	}

	for_each_cpu(cpu, &has_work)
		flush_work(&per_cpu(lru_add_drain_work, cpu));

	put_online_cpus();
	mutex_unlock(&lock);
}
Пример #7
0
static void dt2w_input_event(struct input_handle *handle, unsigned int type,
				unsigned int code, int value) {
#if DT2W_DEBUG
	pr_info("doubletap2wake: code: %s|%u, val: %i\n",
		((code==ABS_MT_POSITION_X) ? "X" :
		(code==ABS_MT_POSITION_Y) ? "Y" :
		(code==ABS_MT_TRACKING_ID) ? "ID" :
		"undef"), code, value);
#endif
	if (!scr_suspended)
		return;

	if (code == ABS_MT_SLOT) {
		doubletap2wake_reset();
		return;
	}

	if (code == ABS_MT_TRACKING_ID && value == -1) {
		touch_cnt = true;
		queue_work_on(0, dt2w_input_wq, &dt2w_input_work);
		return;
	}

	if (code == ABS_MT_POSITION_X) {
		touch_x = value;
		touch_x_called = true;
	}

	if (code == ABS_MT_POSITION_Y) {
		touch_y = value;
		touch_y_called = true;
	}
}
Пример #8
0
static void isert_cq_comp_handler(struct ib_cq *cq, void *context)
{
	struct isert_cq *cq_desc = context;

	queue_work_on(smp_processor_id(), cq_desc->cq_workqueue,
		      &cq_desc->cq_comp_work);
}
Пример #9
0
static void s2w_input_event(struct input_handle *handle, unsigned int type,
				unsigned int code, int value) {
#if S2W_DEBUG
	pr_info("sweep2wake: code: %s|%u, val: %i\n",
		((code==ABS_MT_POSITION_X) ? "X" :
		(code==ABS_MT_POSITION_Y) ? "Y" :
		(code==ABS_MT_TRACKING_ID) ? "ID" :
		"undef"), code, value);
#endif
	if (code == ABS_MT_SLOT) {
		sweep2wake_reset();
		return;
	}

	if (code == ABS_MT_TRACKING_ID && value == -1) {
		sweep2wake_reset();
		return;
	}

	if (code == ABS_MT_POSITION_X) {
		touch_x = value;
		touch_x_called = true;
	}

	if (code == ABS_MT_POSITION_Y) {
		touch_y = value;
		touch_y_called = true;
	}

	if (touch_x_called && touch_y_called) {
		touch_x_called = false;
		touch_y_called = false;
		queue_work_on(0, s2w_input_wq, &s2w_input_work);
	}
}
Пример #10
0
static void s2s_input_event(struct input_handle *handle, unsigned int type,
				unsigned int code, int value) {

	if (code == ABS_MT_SLOT) {
		sweep2wake_reset();
		return;
	}

	if (code == ABS_MT_TRACKING_ID && value == -1) {
		sweep2wake_reset();
		return;
	}

	if (code == ABS_MT_POSITION_X) {
		touch_x = value;
		touch_x_called = true;
	}

	if (code == ABS_MT_POSITION_Y) {
		touch_y = value;
		touch_y_called = true;
	}

	if (touch_x_called && touch_y_called) {
		touch_x_called = false;
		touch_y_called = false;
		queue_work_on(0, s2s_input_wq, &s2s_input_work);
	}
}
Пример #11
0
Файл: swap.c Проект: Lyude/linux
/*
 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
 * kworkers being shut down before our page_alloc_cpu_dead callback is
 * executed on the offlined cpu.
 * Calling this function with cpu hotplug locks held can actually lead
 * to obscure indirect dependencies via WQ context.
 */
void lru_add_drain_all(void)
{
	static DEFINE_MUTEX(lock);
	static struct cpumask has_work;
	int cpu;

	/*
	 * Make sure nobody triggers this path before mm_percpu_wq is fully
	 * initialized.
	 */
	if (WARN_ON(!mm_percpu_wq))
		return;

	mutex_lock(&lock);
	cpumask_clear(&has_work);

	for_each_online_cpu(cpu) {
		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);

		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
		    pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
		    need_activate_page_drain(cpu)) {
			INIT_WORK(work, lru_add_drain_per_cpu);
			queue_work_on(cpu, mm_percpu_wq, work);
			cpumask_set_cpu(cpu, &has_work);
		}
	}

	for_each_cpu(cpu, &has_work)
		flush_work(&per_cpu(lru_add_drain_work, cpu));

	mutex_unlock(&lock);
}
Пример #12
0
int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
{
	struct work_struct *sm_work;
	int i, ret;

	/* Set up initial state. */
	mutex_lock(&lock);
	num_threads = num_online_cpus();
	active_cpus = cpus;
	active.fn = fn;
	active.data = data;
	active.fnret = 0;
	idle.fn = chill;
	idle.data = NULL;

	set_state(STOPMACHINE_PREPARE);

	/* Schedule the stop_cpu work on all cpus: hold this CPU so one
	 * doesn't hit this CPU until we're ready. */
	get_cpu();
	for_each_online_cpu(i) {
		sm_work = percpu_ptr(stop_machine_work, i);
		INIT_WORK(sm_work, stop_cpu);
		queue_work_on(i, stop_machine_wq, sm_work);
	}
	/* This will release the thread on our CPU. */
	put_cpu();
	flush_workqueue(stop_machine_wq);
	ret = active.fnret;
	mutex_unlock(&lock);
	return ret;
}
void mali_dvfs_handler(u32 utilization)
{
    g_mali_dfs_var.dfs_Utilization = utilization;
    queue_work_on(0, mali_dvfs_wq,&mali_dvfs_work);
    g_mali_dfs_var.dfs_GpuUtilization = utilization;
    /*add error handle here*/
}
Пример #14
0
int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation)
{
	unsigned long flags;
	struct exynos_context *platform;

	KBASE_DEBUG_ASSERT(kbdev != NULL);
	platform = (struct exynos_context *) kbdev->platform_context;

	spin_lock_irqsave(&mali_dvfs_spinlock, flags);
	if (platform->time_tick < MALI_DVFS_TIME_INTERVAL) {
		platform->time_tick++;
		platform->time_busy += kbdev->pm.metrics.time_busy;
		platform->time_idle += kbdev->pm.metrics.time_idle;
	} else {
		platform->time_busy = kbdev->pm.metrics.time_busy;
		platform->time_idle = kbdev->pm.metrics.time_idle;
		platform->time_tick = 0;
	}

	if ((platform->time_tick == MALI_DVFS_TIME_INTERVAL) &&
		(platform->time_idle + platform->time_busy > 0))
			platform->utilisation = (100*platform->time_busy) / (platform->time_idle + platform->time_busy);

	mali_dvfs_status_current.utilisation = utilisation;

#ifdef MALI_DEBUG
	printk(KERN_INFO "\n[mali_devfreq]utilization: %d\n", utilisation);
#endif
	spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);

	queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
	/*add error handle here*/
	return MALI_TRUE;
}
Пример #15
0
static int boost_input_connect(struct input_handler *handler,
                struct input_dev *dev, const struct input_device_id *id)
{
	struct input_handle *handle;
	int error;

	handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
	if (!handle)
		return -ENOMEM;

	handle->dev = dev;
	handle->handler = handler;
	handle->name = "touchboost";

	error = input_register_handle(handle);
	if (error)
		goto err;

	touchboost_inputopen.handle = handle;
	queue_work_on(0, input_boost_wq, &touchboost_inputopen.inputopen_work);
	return 0;

err:
	kfree(handle);
	return error;
}
Пример #16
0
int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation)
{
	unsigned long flags;
	struct rk_context *platform;

	BUG_ON(!kbdev);
	platform = (struct rk_context *)kbdev->platform_context;

	spin_lock_irqsave(&mali_dvfs_spinlock, flags);
	if (platform->time_tick < MALI_DVFS_TIME_INTERVAL) {
		platform->time_tick++;
		platform->time_busy += kbdev->pm.metrics.time_busy;
		platform->time_idle += kbdev->pm.metrics.time_idle;
	} else {
		platform->time_busy = kbdev->pm.metrics.time_busy;
		platform->time_idle = kbdev->pm.metrics.time_idle;
		platform->time_tick = 0;
	}

	if ((platform->time_tick == MALI_DVFS_TIME_INTERVAL) && (platform->time_idle + platform->time_busy > 0))
		platform->utilisation = (100 * platform->time_busy) / (platform->time_idle + platform->time_busy);

	mali_dvfs_status_current.utilisation = utilisation;
	spin_unlock_irqrestore(&mali_dvfs_spinlock, flags);

	queue_work_on(0, mali_dvfs_wq, &mali_dvfs_work);
	/*add error handle here */
	return MALI_TRUE;
}
Пример #17
0
static inline void schedule_link_to_demux(struct mem_link_device *mld)
{
	struct link_device *ld = &mld->link_dev;
	struct delayed_work *dwork = &ld->rx_delayed_work;

	/*queue_delayed_work(ld->rx_wq, dwork, 0);*/
	queue_work_on(7, ld->rx_wq, &dwork->work);
}
Пример #18
0
int schedule_work_on(int cpu, struct work_struct *work)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
	return queue_work_on(cpu, system_wq, work);
#else
	return queue_work(system_wq, work);
#endif
}
Пример #19
0
mali_bool mali_orion_dvfs_handler(u32 utilization)
{
	mali_orion_dvfs_utilization = utilization;
	queue_work_on(0, mali_orion_dvfs_wq,&mali_orion_dvfs_work);

	/*add error handle here*/
	return MALI_TRUE;
}
Пример #20
0
void state_resume(void)
{
	dprintk("%s: resume called.\n", STATE_NOTIFIER);
	cancel_delayed_work_sync(&suspend_work);
	suspend_in_progress = false;

	if (state_suspended)
		queue_work_on(0, susp_wq, &resume_work);
}
Пример #21
0
acpi_status acpi_os_execute(acpi_execute_type type,
			    acpi_osd_exec_callback function, void *context)
{
	acpi_status status = AE_OK;
	struct acpi_os_dpc *dpc;
	struct workqueue_struct *queue;
	int ret;
	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
			  "Scheduling function [%p(%p)] for deferred execution.\n",
			  function, context));

	/*
	 * Allocate/initialize DPC structure.  Note that this memory will be
	 * freed by the callee.  The kernel handles the work_struct list  in a
	 * way that allows us to also free its memory inside the callee.
	 * Because we may want to schedule several tasks with different
	 * parameters we can't use the approach some kernel code uses of
	 * having a static work_struct.
	 */

	dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
	if (!dpc)
		return AE_NO_MEMORY;

	dpc->function = function;
	dpc->context = context;

	/*
	 * To prevent lockdep from complaining unnecessarily, make sure that
	 * there is a different static lockdep key for each workqueue by using
	 * INIT_WORK() for each of them separately.
	 */
	if (type == OSL_NOTIFY_HANDLER) {
		queue = kacpi_notify_wq;
		INIT_WORK(&dpc->work, acpi_os_execute_deferred);
	} else {
		queue = kacpid_wq;
		INIT_WORK(&dpc->work, acpi_os_execute_deferred);
	}

	/*
	 * On some machines, a software-initiated SMI causes corruption unless
	 * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
	 * typically it's done in GPE-related methods that are run via
	 * workqueues, so we can avoid the known corruption cases by always
	 * queueing on CPU 0.
	 */
	ret = queue_work_on(0, queue, &dpc->work);

	if (!ret) {
		printk(KERN_ERR PREFIX
			  "Call to queue_work() failed.\n");
		status = AE_ERROR;
		kfree(dpc);
	}
	return status;
}
Пример #22
0
/*
 * nss_data_plane_schedule_registration()
 *	Called from nss_init to schedule a work to do data_plane register to nss-gmac
 */
bool nss_data_plane_schedule_registration(void)
{
	if (!queue_work_on(1, nss_data_plane_workqueue, &nss_data_plane_work.work)) {
		nss_warning("Failed to register data plane workqueue on core 1\n");
		return false;
	} else {
		nss_info("Register data plane workqueue on core 1\n");
		return true;
	}
}
Пример #23
0
static irqreturn_t mdm_errfatal(int irq, void *dev_id)
{
	pr_err("%s: mdm got errfatal interrupt\n", __func__);

	if (mdm_drv->mdm_ready &&
		(gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 1)) {
		pr_debug("%s: scheduling work now\n", __func__);
		queue_work_on(0, mdm_queue, &mdm_fatal_work);	
	}
	return IRQ_HANDLED;
}
Пример #24
0
int hwDataRxIn(void *pBuf, int bufLen, int mode)
{
    if(hwDataSrcCopyIn(pBuf, bufLen, mode) != bufLen)
    {
        DV_DBG(" hwDataSrcCopyIn failed.\n");
        return DV_ERROR;
    }

    queue_work_on(0, hwSimInfo.pHwSimWq, &hwSimInfo.dvDataWork);

    return DV_OK;
}
Пример #25
0
static void mdm_status_check_fn(struct work_struct *work)
{
	int value = 0;

	msleep(3000); 
	pr_info("%s mdm_status_change notified? %c\n", __func__, mdm_status_change_notified ? 'Y': 'N');
	if (!mdm_status_change_notified) {
		dump_mdm_related_gpio();
		value = gpio_get_value(mdm_drv->mdm2ap_status_gpio);
		if (value == 1)
			queue_work_on(0, mdm_queue, &mdm_status_work);
	}
}
int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation)
{
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (platform->dvfs_wq)
		queue_work_on(0, platform->dvfs_wq, &gpu_dvfs_work);

	GPU_LOG(DVFS_DEBUG, DUMMY, 0u, 0u, "dvfs hanlder is called\n");

	return 0;
}
/* input event dispatcher */
static void s2s_input_event(struct input_handle *handle, unsigned int type,
				unsigned int code, int value)
{
	if (!s2s)
		return;

	if (code == ABS_MT_SLOT) 
	{
		sweep2sleep_reset();
		if (debug)
			pr_info(LOGTAG"sweep ABS_MT_SLOT\n");
		
		return;
	}

	if (code == ABS_MT_TRACKING_ID && value == -1) 
	{
		if (debug)
			pr_info(LOGTAG"sweep ABS_MT_TRACKING_ID\n");
		
		// only reset due to finger taken off when not on soft keys
		// (on soft keys it is normal as it interrupts the touch screen area)
		if (touch_y < S2S_Y_BUTTONLIMIT)
		{
			sweep2sleep_reset();
			if (debug)
				pr_info(LOGTAG"sweep reset\n");
		}
		return;
	}

	if (code == ABS_MT_POSITION_X)
	{
		touch_x = value;
		touch_x_called = true;
	}

	if (code == ABS_MT_POSITION_Y) 
	{
		touch_y = value;
		touch_y_called = true;
	}

	if (touch_x_called && touch_y_called) 
	{
		touch_x_called = false;
		touch_y_called = false;
		queue_work_on(0, s2s_input_wq, &s2s_input_work);
	}
}
Пример #28
0
/**
 * padata_do_parallel - padata parallelization function
 *
 * @pinst: padata instance
 * @padata: object to be parallelized
 * @cb_cpu: cpu the serialization callback function will run on,
 *          must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
 *
 * The parallelization callback function will run with BHs off.
 * Note: Every object which is parallelized by padata_do_parallel
 * must be seen by padata_do_serial.
 */
int padata_do_parallel(struct padata_instance *pinst,
		       struct padata_priv *padata, int cb_cpu)
{
	int target_cpu, err;
	struct padata_parallel_queue *queue;
	struct parallel_data *pd;

	rcu_read_lock_bh();

	pd = rcu_dereference(pinst->pd);

	err = -EINVAL;
	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
		goto out;

	if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
		goto out;

	err =  -EBUSY;
	if ((pinst->flags & PADATA_RESET))
		goto out;

	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
		goto out;

	err = 0;
	atomic_inc(&pd->refcnt);
	padata->pd = pd;
	padata->cb_cpu = cb_cpu;

	if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
		atomic_set(&pd->seq_nr, -1);

	padata->seq_nr = atomic_inc_return(&pd->seq_nr);

	target_cpu = padata_cpu_hash(padata);
	queue = per_cpu_ptr(pd->pqueue, target_cpu);

	spin_lock(&queue->parallel.lock);
	list_add_tail(&padata->list, &queue->parallel.list);
	spin_unlock(&queue->parallel.lock);

	queue_work_on(target_cpu, pinst->wq, &queue->work);

out:
	rcu_read_unlock_bh();

	return err;
}
static void unicam_timer_callback(unsigned long data)
{
	struct unicam_camera_dev *unicam_dev =
			(struct unicam_camera_dev *)(data);
	if (atomic_read(&unicam_dev->cam_triggered) == 0) {
		pr_info("***Retry Timer Ignore***\n");
		return;
	}
	pr_info("***Retry(%d) Timer***\n", \
		atomic_read(&unicam_dev->retry_count));
	atomic_add(1, &unicam_dev->retry_count);

	queue_work_on(0, unicam_dev->single_wq, \
		&unicam_dev->retry_work);
}
static enum hrtimer_restart timer_func(struct hrtimer *handle)
{
	struct sleep_data *sleep_info = container_of(handle, struct sleep_data,
			timer);

	if (atomic_read(&sleep_info->timer_expired))
		pr_info("msm_sleep_stats: Missed timer interrupt on cpu %d\n",
				sleep_info->cpu);

	atomic_set(&sleep_info->timer_val_ms, 0);
	atomic_set(&sleep_info->timer_expired, 1);

	queue_work_on(sleep_info->cpu, msm_stats_wq, &sleep_info->work);

	return HRTIMER_NORESTART;
}