static ssize_t irled_store(struct device *dev, struct device_attribute *attr,
			   const char *buf, size_t size)
{
	int i;
	unsigned int _data;

	for (i = 0; i < MAX_SIZE; i++) {
		if (sscanf(buf++, "%u", &_data) == 1) {
			ir_data.signal[i] = _data;
			if (ir_data.signal[i] == 0)
				break;
			while (_data > 0) {
				buf++;
				_data /= 10;
			}
		} else {
			ir_data.signal[i] = 0;
			break;
		}
	}

	if (!work_pending(&ir_data.work))
		schedule_work(&ir_data.work);

	return size;
}
Example #2
0
static void release_cp_wakeup(struct work_struct *ws)
{
    struct mem_link_device *mld;
    int i;
    unsigned long flags;

    mld = container_of(ws, struct mem_link_device, cp_sleep_dwork.work);

    if (work_pending(&mld->cp_sleep_dwork.work))
        cancel_delayed_work(&mld->cp_sleep_dwork);

    spin_lock_irqsave(&mld->pm_lock, flags);
    i = atomic_read(&mld->ref_cnt);
    spin_unlock_irqrestore(&mld->pm_lock, flags);
    if (i > 0)
        goto reschedule;

    if (gpio_get_value(mld->gpio_ap_wakeup) == 0) {
        gpio_set_value(mld->gpio_cp_wakeup, 0);
        gpio_set_value(mld->gpio_ap_status, 0);
    }

#if 1
    print_pm_status(mld);
#endif

    return;

reschedule:
    queue_delayed_work(system_nrt_wq, &mld->cp_sleep_dwork,
                       msecs_to_jiffies(sleep_timeout));
}
Example #3
0
static ssize_t remocon_store(struct device *dev, struct device_attribute *attr,
			     const char *buf, size_t size)
{
	struct ir_remocon_data *data = dev_get_drvdata(dev);
	int i;
	unsigned int _data;

	for (i = 0; i < MAX_SIZE; i++) {
		if (sscanf(buf++, "%u", &_data) == 1) {
			data->signal[i] = _data;
			if (data->signal[i] == 0)
				break;
#if 0
			pr_info("%d = %d,", i, data->signal[i]);
#endif
			while (_data > 0) {
				buf++;
				_data /= 10;
			}
		} else {
			data->signal[i] = 0;
			break;
		}
	}

	if (!work_pending(&data->work))
		schedule_work(&data->work);

	return size;
}
Example #4
0
/* Trigger work thread*/
static void max3107_dowork(struct max3107_port *s)
{
	if (!work_pending(&s->work) && !freezing(current) && !s->suspended)
		queue_work(s->workqueue, &s->work);
	else
		dev_warn(&s->spi->dev, "interrup isn't serviced normally!\n");
}
Example #5
0
static int
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
			      bool value)
{
	int ret = 0;

	/* During mm_invalidate_range we need to cancel any userptr that
	 * overlaps the range being invalidated. Doing so requires the
	 * struct_mutex, and that risks recursion. In order to cause
	 * recursion, the user must alias the userptr address space with
	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
	 * to invalidate that mmaping, mm_invalidate_range is called with
	 * the userptr address *and* the struct_mutex held.  To prevent that
	 * we set a flag under the i915_mmu_notifier spinlock to indicate
	 * whether this object is valid.
	 */
#if defined(CONFIG_MMU_NOTIFIER)
	if (obj->userptr.mmu_object == NULL)
		return 0;

	spin_lock(&obj->userptr.mmu_object->mn->lock);
	/* In order to serialise get_pages with an outstanding
	 * cancel_userptr, we must drop the struct_mutex and try again.
	 */
	if (!value)
		del_object(obj->userptr.mmu_object);
	else if (!work_pending(&obj->userptr.mmu_object->work))
		add_object(obj->userptr.mmu_object);
	else
		ret = -EAGAIN;
	spin_unlock(&obj->userptr.mmu_object->mn->lock);
#endif

	return ret;
}
Example #6
0
File: rxrpc.c Project: krzk/linux
/*
 * Dispose of a reference on a call.
 */
void afs_put_call(struct afs_call *call)
{
	struct afs_net *net = call->net;
	int n = atomic_dec_return(&call->usage);
	int o = atomic_read(&net->nr_outstanding_calls);

	trace_afs_call(call, afs_call_trace_put, n + 1, o,
		       __builtin_return_address(0));

	ASSERTCMP(n, >=, 0);
	if (n == 0) {
		ASSERT(!work_pending(&call->async_work));
		ASSERT(call->type->name != NULL);

		if (call->rxcall) {
			rxrpc_kernel_end_call(net->socket, call->rxcall);
			call->rxcall = NULL;
		}
		if (call->type->destructor)
			call->type->destructor(call);

		afs_put_server(call->net, call->cm_server);
		afs_put_cb_interest(call->net, call->cbi);
		kfree(call->request);

		trace_afs_call(call, afs_call_trace_free, 0, o,
			       __builtin_return_address(0));
		kfree(call);

		o = atomic_dec_return(&net->nr_outstanding_calls);
		if (o == 0)
			wake_up_var(&net->nr_outstanding_calls);
	}
}
Example #7
0
static ssize_t caps_lock_led(struct device *dev, struct device_attribute *attr, char *buf, size_t size)
{
//    struct dock_keyboard_data *data = dev->platform_data;
    int i=0;
    //printk(KERN_DEBUG "[Keyboard] Caps lock led : %d.\n", g_data->led_on);
    if(sscanf(buf,"%d",&i)==1)
    {
        if(i == 1)
        {
            g_data->led_on = true;
        }
        else
        {
            g_data->led_on = false;
        }
    }
    else
    {
        printk(KERN_ERR "[Keyboard] Couldn't get led state.\n");
    }

    if (!work_pending(&g_data->work_led))
    {
        schedule_work(&g_data->work_led);
    }

    return size;
}
Example #8
0
void send_keyevent(unsigned int key_code)
{
    buf_rear = (1+buf_rear)%MAX_BUF;
    if( buf_front == buf_rear )
    {
        if(buf_rear == 0)
        {
            buf_rear = MAX_BUF;
        }
        else
        {
            buf_rear--;
        }
#if defined(CONFIG_SAMSUNG_KERNEL_DEBUG_USER)
        printk(KERN_DEBUG "[Keyboard] miss the key_code : %x\n", key_code);
#endif
    }
    else
    {
        key_buf[buf_rear]  = (unsigned char)key_code;
    }
//    printk(KERN_DEBUG "[Keyboard] key_code : %x\n", key_code);
    if (!work_pending(&g_data->work_msg))
    {
        schedule_work(&g_data->work_msg);
    }
}
static void cpuboost_input_event(struct input_handle *handle,
		unsigned int type, unsigned int code, int value)
{
	u64 now;

	if (!cpuboost_enable) return;

	if (!input_boost_freq)
		return;

#ifdef CONFIG_IRLED_GPIO
	if (unlikely(gir_boost_disable)) {
		pr_debug("[GPIO_IR][%s] continue~!(cpu:%d)\n", 
			__func__, raw_smp_processor_id());
		return;
	}
#endif

	now = ktime_to_us(ktime_get());
	if (now - last_input_time < (input_boost_ms * USEC_PER_MSEC))
		return;

	if (work_pending(&input_boost_work))
		return;

	queue_work(cpu_boost_wq, &input_boost_work);
	last_input_time = ktime_to_us(ktime_get());
}
static int fb_notifier_callback(struct notifier_block *self,
				unsigned long event, void *data)
{
	struct fb_event *evdata = data;
	int *blank;

	if (evdata && evdata->data && event == FB_EVENT_BLANK) {
		blank = evdata->data;
		switch (*blank) {
			case FB_BLANK_UNBLANK:
				if (!wakeup_boost || !input_boost_enabled ||
				     work_pending(&input_boost_work))
					break;
				pr_debug("Wakeup boost for display on event.\n");
				queue_work(cpu_boost_wq, &input_boost_work);
				last_input_time = ktime_to_us(ktime_get());
				break;
			case FB_BLANK_POWERDOWN:
			case FB_BLANK_HSYNC_SUSPEND:
			case FB_BLANK_VSYNC_SUSPEND:
			case FB_BLANK_NORMAL:
				break;
		}
	}

	return 0;
}
/**
@brief	interrupt handler for a wakeup interrupt

1) Reads the interrupt value\n
2) Performs interrupt handling\n

@param irq	the IRQ number
@param data	the pointer to a data
*/
static irqreturn_t ap_wakeup_handler(int irq, void *data)
{
	struct mem_link_device *mld = (struct mem_link_device *)data;
	struct link_device *ld = &mld->link_dev;
	int ap_wakeup = gpio_get_value(mld->gpio_ap_wakeup);
	int ap_status = gpio_get_value(mld->gpio_ap_status);

	s5p_change_irq_type(irq, ap_wakeup);

	if (!cp_online(ld->mc))
		goto exit;

	if (work_pending(&mld->cp_sleep_dwork.work))
		__cancel_delayed_work(&mld->cp_sleep_dwork);

	print_pm_status(mld);

	if (ap_wakeup) {
		if (!wake_lock_active(&mld->ap_wlock))
			wake_lock(&mld->ap_wlock);

		if (!c2c_suspended() && !ap_status)
			gpio_set_value(mld->gpio_ap_status, 1);
	} else {
		if (wake_lock_active(&mld->ap_wlock))
			wake_unlock(&mld->ap_wlock);

		queue_delayed_work(system_nrt_wq, &mld->cp_sleep_dwork,
				msecs_to_jiffies(CP_WAKEUP_HOLD_TIME));
	}

exit:
	return IRQ_HANDLED;
}
Example #12
0
static int ipc_memory_callback(struct notifier_block *self,
				unsigned long action, void *arg)
{
	static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);

	switch (action) {
	case MEM_ONLINE:    /* memory successfully brought online */
	case MEM_OFFLINE:   /* or offline: it's time to recompute msgmni */
		/*
		 * This is done by invoking the ipcns notifier chain with the
		 * IPC_MEMCHANGED event.
		 * In order not to keep the lock on the hotplug memory chain
		 * for too long, queue a work item that will, when waken up,
		 * activate the ipcns notification chain.
		 * No need to keep several ipc work items on the queue.
		 */
		if (!work_pending(&ipc_memory_wq))
			schedule_work(&ipc_memory_wq);
		break;
	case MEM_GOING_ONLINE:
	case MEM_GOING_OFFLINE:
	case MEM_CANCEL_ONLINE:
	case MEM_CANCEL_OFFLINE:
	default:
		break;
	}

	return NOTIFY_OK;
}
/**
@brief		forbid CP from going to sleep

Wakes up a CP if it can sleep and increases the "ref_cnt" counter in the
mem_link_device instance.

@param mld	the pointer to a mem_link_device instance

@remark		CAUTION!!! permit_cp_sleep() MUST be invoked after
		forbid_cp_sleep() success to decrease the "ref_cnt" counter.
*/
static void forbid_cp_sleep(struct mem_link_device *mld)
{
	struct link_device *ld = &mld->link_dev;
	int ap_status = gpio_get_value(mld->gpio_ap_status);
	int cp_wakeup = gpio_get_value(mld->gpio_cp_wakeup);
	unsigned long flags;

	spin_lock_irqsave(&mld->pm_lock, flags);

	atomic_inc(&mld->ref_cnt);

	gpio_set_value(mld->gpio_ap_status, 1);
	gpio_set_value(mld->gpio_cp_wakeup, 1);

	if (work_pending(&mld->cp_sleep_dwork.work))
		cancel_delayed_work(&mld->cp_sleep_dwork);

	spin_unlock_irqrestore(&mld->pm_lock, flags);

	if (!ap_status || !cp_wakeup)
		print_pm_status(mld);

	if (check_link_status(mld) < 0) {
		print_pm_status(mld);
		mif_err("%s: ERR! check_link_status fail\n", ld->name);
		mem_forced_cp_crash(mld);
	}
}
static irqreturn_t Si47xx_isr(int irq, void *unused)
{
	debug("Si47xx_isr: FM device called IRQ: %d\n", irq);
#ifdef RDS_INTERRUPT_ON_ALWAYS
	if ((Si47xx_dev_wait_flag == SEEK_WAITING) ||
	    (Si47xx_dev_wait_flag == TUNE_WAITING)) {
		debug("Si47xx_isr: FM Seek/Tune Interrupt "
			"called IRQ %d\n", irq);
		Si47xx_dev_wait_flag = WAIT_OVER;
		wake_up_interruptible(&Si47xx_waitq);
	} else if (Si47xx_RDS_flag == RDS_WAITING) {	/* RDS Interrupt */
		debug_rds("Si47xx_isr: FM RDS Interrupt "
			"called IRQ %d", irq);

		debug_rds("RDS_Groups_Available_till_now b/w "
			"Power ON/OFF : %d",
			  RDS_Groups_Available_till_now);

		if (!work_pending(&Si47xx_work))
			queue_work(Si47xx_wq, &Si47xx_work);
	}
#else
	if ((Si47xx_dev_wait_flag == SEEK_WAITING) ||
	    (Si47xx_dev_wait_flag == TUNE_WAITING) ||
	    (Si47xx_dev_wait_flag == RDS_WAITING)) {
		Si47xx_dev_wait_flag = WAIT_OVER;
		wake_up_interruptible(&Si47xx_waitq);
	}
#endif
	return IRQ_HANDLED;
}
Example #15
0
int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
{
	struct buf_list *pbuf;
	struct shmdrv_layer *pshm_drv;
	struct list_head *pos;
	u32 avail_emptybuff = 0;
	unsigned long flags = 0;

	pshm_drv = (struct shmdrv_layer *)priv;

	/* Check for received buffers. */
	if (mbx_msg & SHM_FULL_MASK) {
		int idx;

		spin_lock_irqsave(&pshm_drv->lock, flags);

		/* Check whether we have any outstanding buffers. */
		if (list_empty(&pshm_drv->rx_empty_list)) {

			/* Release spin lock. */
			spin_unlock_irqrestore(&pshm_drv->lock, flags);

			/* We print even in IRQ context... */
			pr_warn("No empty Rx buffers to fill: "
					"mbx_msg:%x\n", mbx_msg);

			/* Bail out. */
			goto err_sync;
		}

		pbuf =
			list_entry(pshm_drv->rx_empty_list.next,
					struct buf_list, list);
		idx = pbuf->index;

		/* Check buffer synchronization. */
		if (idx != SHM_GET_FULL(mbx_msg)) {

			/* We print even in IRQ context... */
			pr_warn(
			"phyif_shm_mbx_msg_cb: RX full out of sync:"
			" idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
				idx, mbx_msg, SHM_GET_FULL(mbx_msg));

			spin_unlock_irqrestore(&pshm_drv->lock, flags);

			/* Bail out. */
			goto err_sync;
		}

		list_del_init(&pbuf->list);
		list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);

		spin_unlock_irqrestore(&pshm_drv->lock, flags);

		/* Schedule RX work queue. */
		if (!work_pending(&pshm_drv->shm_rx_work))
			queue_work(pshm_drv->pshm_rx_workqueue,
						&pshm_drv->shm_rx_work);
	}
static ssize_t diag_dbgfs_read_hsic(struct file *file, char __user *ubuf,
				    size_t count, loff_t *ppos)
{
	char *buf;
	int ret;

	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
	if (!buf) {
		pr_err("diag: %s, Error allocating memory\n", __func__);
		return -ENOMEM;
	}

	ret = scnprintf(buf, DEBUG_BUF_SIZE,
		"hsic initialized: %d\n"
		"hsic ch: %d\n"
		"hsic enabled: %d\n"
		"hsic_opened: %d\n"
		"hisc_suspend: %d\n"
		"in_busy_hsic_read_on_mdm: %d\n"
		"in_busy_hsic_write_on_mdm: %d\n"
		"in_busy_hsic_write: %d\n"
		"in_busy_hsic_read: %d\n"
		"usb_mdm_connected: %d\n"
		"diag_read_mdm_work: %d\n"
		"diag_read_hsic_work: %d\n"
		"diag_disconnect_work: %d\n"
		"diag_usb_read_complete_work: %d\n",
		driver->hsic_initialized,
		driver->hsic_ch,
		driver->hsic_device_enabled,
		driver->hsic_device_opened,
		driver->hsic_suspend,
		driver->in_busy_hsic_read_on_device,
		driver->in_busy_hsic_write_on_device,
		driver->in_busy_hsic_write,
		driver->in_busy_hsic_read,
		driver->usb_mdm_connected,
		work_pending(&(driver->diag_read_mdm_work)),
		work_pending(&(driver->diag_read_hsic_work)),
		work_pending(&(driver->diag_disconnect_work)),
		work_pending(&(driver->diag_usb_read_complete_work)));

	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);

	kfree(buf);
	return ret;
}
Example #17
0
static void fjes_update_zone_irq(struct fjes_adapter *adapter,
				 int src_epid)
{
	struct fjes_hw *hw = &adapter->hw;

	if (!work_pending(&hw->update_zone_task))
		queue_work(adapter->control_wq, &hw->update_zone_task);
}
Example #18
0
static enum hrtimer_restart vibetonz_timer_func(struct hrtimer *timer)
{
    if (!work_pending(&work_timer))
    {
        schedule_work(&work_timer);
    }
	return HRTIMER_NORESTART;
}
Example #19
0
static int usb_send(struct link_device *ld, struct io_device *iod,
			struct sk_buff *skb)
{
	struct sk_buff_head *txq;
	size_t tx_size;
	struct usb_link_device *usb_ld = to_usb_link_device(ld);
	struct link_pm_data *pm_data = usb_ld->link_pm_data;

	switch (iod->format) {
	case IPC_RAW:
		txq = &ld->sk_raw_tx_q;

		if (unlikely(ld->raw_tx_suspended)) {
			/* Unlike misc_write, vnet_xmit is in interrupt.
			 * Despite call netif_stop_queue on CMD_SUSPEND,
			 * packets can be reached here.
			 */
			if (in_irq()) {
				mif_err("raw tx is suspended, "
						"drop packet. size=%d",
						skb->len);
				return -EBUSY;
			}

			mif_err("wait RESUME CMD...\n");
			INIT_COMPLETION(ld->raw_tx_resumed_by_cp);
			wait_for_completion(&ld->raw_tx_resumed_by_cp);
			mif_err("resumed done.\n");
		}
		break;
	case IPC_BOOT:
	case IPC_FMT:
	case IPC_RFS:
	default:
		txq = &ld->sk_fmt_tx_q;
		break;
	}
	/* store the tx size before run the tx_delayed_work*/
	tx_size = skb->len;

	/* drop packet, when link is not online */
	if (ld->com_state == COM_BOOT && iod->format != IPC_BOOT) {
		mif_err("%s: drop packet, size=%d, com_state=%d\n",
				iod->name, skb->len, ld->com_state);
		dev_kfree_skb_any(skb);
		return 0;
	}

	/* en queue skb data */
	skb_queue_tail(txq, skb);
	/* Hold wake_lock for getting schedule the tx_work */
	wake_lock(&pm_data->tx_async_wake);

	if (!work_pending(&ld->tx_delayed_work.work))
		queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, 0);

	return tx_size;
}
static void __wakeup_boost(void)
{
	if (!wakeup_boost || !input_boost_enabled ||
	     work_pending(&input_boost_work))
		return;
	pr_debug("Wakeup boost for display on event.\n");
	queue_work(cpu_boost_wq, &input_boost_work);
	last_input_time = ktime_to_us(ktime_get());
}
Example #21
0
static void keyboard_timer(unsigned long _data)
{
    /* this part will be run in the disable() func */
    struct sec_keyboard_drvdata *data =
        (struct sec_keyboard_drvdata *)_data;

    if (!work_pending(&data->work_timer))
        schedule_work(&data->work_timer);
}
Example #22
0
static inline void schedule_cp_free(struct modem_link_pm *pm)
{
	/* Hold gpio_ap2cp_wakeup for CP_HOLD_TIME */
	if (work_pending(&pm->cp_free_dwork.work))
		return;

	queue_delayed_work(pm->wq, &pm->cp_free_dwork,
			   msecs_to_jiffies(cp_hold_time));
}
/*
 * si470x_i2c_interrupt - interrupt handler
 */
static irqreturn_t si470x_i2c_interrupt(int irq, void *dev_id)
{
	struct si470x_device *radio = dev_id;

	if (!work_pending(&radio->radio_work))
		schedule_work(&radio->radio_work);

	return IRQ_HANDLED;
}
Example #24
0
File: wq.c Project: zzdever/kmdev
void __exit Exit(void)
{
int ret;
ret = work_pending((struct work_struct*)work);
printk(KERN_INFO "pending ret: %d\n",ret);
printk(KERN_INFO "work exit: %ld\n",(long)work);                                                                                
        flush_workqueue(queue);                                                 
        destroy_workqueue(queue); 
}
Example #25
0
/**
 * work_busy - test whether a work is currently pending or running
 * @work: the work to be tested
 *
 * Test whether @work is currently pending or running.  There is no
 * synchronization around this function and the test result is
 * unreliable and only useful as advisory hints or for debugging.
 * Especially for reentrant wqs, the pending state might hide the
 * running state.
 *
 * RETURNS:
 * OR'd bitmask of WORK_BUSY_* bits.
 */
unsigned int work_busy(struct work_struct *work)
{
	unsigned int ret = 0;

	if (work_pending(work))
		ret |= WORK_BUSY_PENDING;

	return ret;
}
static irqreturn_t misc_hall_irq(int irq, void *data)
{
	struct hall_switch_data *hall_data = data;
	this_data = data;
	if(!work_pending(&hall_data->hall_work)){
		queue_work(hall_data->hall_workqueue, &hall_data->hall_work);
	}
	return IRQ_HANDLED;
}
Example #27
0
irqreturn_t irq_left_button2(int irq, void *dev_id)
{
	val = SOC_IO_Input(BUTTON_LEFT_1, BUTTON_LEFT_1, 0 /*GPIO_CFG_PULL_UP*/);
	if(val)
		knob_input_dev->key[BIT_WORD(BTN_1)] = 0;
	else
		knob_input_dev->key[BIT_WORD(BTN_1)] = 2;
	knob_input_dev->keybit[BIT_WORD(BTN_1)] = BIT_MASK(BTN_1);
	input_report_key(knob_input_dev, BTN_1, val);
	input_sync(knob_input_dev);
	if(val){
		if(!work_pending(&work_left_button1))
			schedule_work(&work_left_button1);
	}else {
		if(!work_pending(&work_left_button2))
			schedule_work(&work_left_button2);
	}
	return IRQ_HANDLED;
}
Example #28
0
static netdev_tx_t usbsvn_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct usbsvn *svn = netdev_priv(dev);

	skb_queue_tail(&svn->tx_skb_queue, skb);
	if(!work_pending(&svn->tx_work))
		queue_work(svn->tx_workqueue, &svn->tx_work);

	return NETDEV_TX_OK;
}
Example #29
0
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
{
	DECLARE_COMPLETION_ONSTACK(compl);
	unsigned long flags;
	int ret;
	u32 start_time = jiffies;
	bool pending = false;

	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
		return 0;

	wl1271_debug(DEBUG_PSM, "waking up chip from elp");

	/*
	 * The spinlock is required here to synchronize both the work and
	 * the completion variable in one entity.
	 */
	spin_lock_irqsave(&wl->wl_lock, flags);
	if (work_pending(&wl->irq_work) || chip_awake)
		pending = true;
	else
		wl->elp_compl = &compl;
	spin_unlock_irqrestore(&wl->wl_lock, flags);

	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);

	if (!pending) {
		ret = wait_for_completion_timeout(
			&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
		if (ret == 0) {
			wl1271_error("ELP wakeup timeout!");
			ieee80211_queue_work(wl->hw, &wl->recovery_work);
			ret = -ETIMEDOUT;
			goto err;
		} else if (ret < 0) {
			wl1271_error("ELP wakeup completion error.");
			goto err;
		}
	}

	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);

	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
		     jiffies_to_msecs(jiffies - start_time));
	goto out;

err:
	spin_lock_irqsave(&wl->wl_lock, flags);
	wl->elp_compl = NULL;
	spin_unlock_irqrestore(&wl->wl_lock, flags);
	return ret;

out:
	return 0;
}
Example #30
0
static irqreturn_t  rt5611_ts_pen_interrupt(int irq, void *dev_id)
{
	struct  rt5611_ts *rt = dev_id;
	RT5611_TS_DEBUG("rt5611_ts_pen_interrupt\n");

	if (!work_pending(&rt->pen_event_work)) {
		disable_irq_nosync(irq);
		queue_work(rt->ts_workq, &rt->pen_event_work);
	}
	return IRQ_HANDLED;
}