static void hci_smd_notify_event(void *data, unsigned int event)
{
	struct hci_dev *hdev = hs.hdev;
	struct hci_smd_data *hsmd = &hs;
	struct work_struct *reset_worker;
	struct work_struct *open_worker;

	int len = 0;

	if (!hdev) {
		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
		return;
	}

	switch (event) {
	case SMD_EVENT_DATA:
		len = smd_read_avail(hsmd->event_channel);
		if (len > 0)
			tasklet_hi_schedule(&hs.rx_task);
		else if (len < 0)
			BT_ERR("Failed to read event from smd %d", len);

		break;
	case SMD_EVENT_OPEN:
		BT_INFO("opening HCI-SMD channel :%s", EVENT_CHANNEL);
		hci_smd_open(hdev);
		open_worker = kzalloc(sizeof(*open_worker), GFP_ATOMIC);
		if (!open_worker) {
			BT_ERR("Out of memory");
			break;
		}
		INIT_WORK(open_worker, hci_dev_smd_open);
		schedule_work(open_worker);
		break;
	case SMD_EVENT_CLOSE:
		BT_INFO("Closing HCI-SMD channel :%s", EVENT_CHANNEL);
		hci_smd_close(hdev);
		reset_worker = kzalloc(sizeof(*reset_worker), GFP_ATOMIC);
		if (!reset_worker) {
			BT_ERR("Out of memory");
			break;
		}
		INIT_WORK(reset_worker, hci_dev_restart);
		schedule_work(reset_worker);
		break;
	default:
		break;
	}
}
static void usb_write_port_complete(struct urb *purb)
{
	int i;
	struct xmit_frame *pxmitframe = (struct xmit_frame *)purb->context;
	struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
	struct _adapter *padapter = pxmitframe->padapter;
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
	struct pkt_attrib *pattrib = &pxmitframe->attrib;

	switch (pattrib->priority) {
	case 1:
	case 2:
		pxmitpriv->bkq_cnt--;
		break;
	case 4:
	case 5:
		pxmitpriv->viq_cnt--;
		break;
	case 6:
	case 7:
		pxmitpriv->voq_cnt--;
		break;
	case 0:
	case 3:
	default:
		pxmitpriv->beq_cnt--;
		break;
	}
	pxmitpriv->txirp_cnt--;
	for (i = 0; i < 8; i++) {
		if (purb == pxmitframe->pxmit_urb[i]) {
			pxmitframe->bpending[i] = false;
			break;
		}
	}
	if (padapter->bSurpriseRemoved)
		return;
	switch (purb->status) {
	case 0:
		break;
	default:
		printk(KERN_WARNING "r8712u: pipe error: (%d)\n", purb->status);
		break;
	}
	/* not to consider tx fragment */
	r8712_free_xmitframe_ex(pxmitpriv, pxmitframe);
	r8712_free_xmitbuf(pxmitpriv, pxmitbuf);
	tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
}
MAILBOX_LOCAL int mailbox_ipc_process(
                struct mb_local_work  *local_work,
                struct mb_local_proc  *local_proc,
                unsigned int channel_id,
                unsigned int proc_id)
{
    unsigned int is_find = MAILBOX_TRUE;

    while (local_work) {
        /*从邮箱工作队列中找到对应的邮箱,设置标志位并释放信号量通知处理任务*/
        if (channel_id  == local_work->channel_id) {
            /*设置任务邮箱工作队列链表中此邮箱的数据标志位*/
            local_work->data_flag = MAILBOX_TRUE;
            
#ifdef MAILBOX_OPEN_MNTN              
            mailbox_record_sche_send(local_work->mb_priv);
#endif             
            wake_lock(&mb_lpwr_lock);

            if ((proc_id > MAILBOX_RECV_TASK_START)
                && (proc_id < MAILBOX_RECV_TASK_END)) {
             
                /*释放信号量,通知任务*/
                local_proc->incoming = MAILBOX_TRUE;
                wake_up(&local_proc->wait);

            } else if(MAILBOX_RECV_TASKLET_HI == proc_id) {
                 /*tasklet处理方式,在tasklet中处理邮箱数据*/
                tasklet_hi_schedule(&local_proc->tasklet);
                 
            } else if(MAILBOX_RECV_TASKLET == proc_id) {
                 /*tasklet处理方式,在tasklet中处理邮箱数据*/
                tasklet_schedule(&local_proc->tasklet);
                 
            } else if(MAILBOX_RECV_INT_IRQ == proc_id) {
                /*中断处理方式,在中断中直接处理邮箱数据*/
                mailbox_receive_process((unsigned long)local_proc);
                
            } else {
                is_find = MAILBOX_FALSE;
            }

		}

		local_work = local_work->next;
	}

	return is_find;
}
Example #4
0
/* PS-Poll frame use BulkOutPipeId = 0 */
void RTUSBBulkOutPsPollComplete(struct urb *pUrb, struct pt_regs * pt_regs)
{
	struct rt_rtmp_adapter *pAd;
	struct rt_tx_context *pPsPollContext;
	int Status;
	struct os_cookie *pObj;

	pPsPollContext = (struct rt_tx_context *)pUrb->context;
	pAd = pPsPollContext->pAd;
	Status = pUrb->status;

	pObj = (struct os_cookie *)pAd->OS_Cookie;
	pObj->pspoll_frame_complete_task.data = (unsigned long)pUrb;
	tasklet_hi_schedule(&pObj->pspoll_frame_complete_task);
}
Example #5
0
static irqreturn_t adf_isr(int irq, void *privdata)
{
	struct adf_accel_dev *accel_dev = privdata;
	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
	struct adf_bar *pmisc =
			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
	u32 v_int;

	/* Read VF INT source CSR to determine the source of VF interrupt */
	v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);

	/* Check for PF2VF interrupt */
	if (v_int & ADF_VINTSOU_PF2VF) {
		/* Disable PF to VF interrupt */
		adf_disable_pf2vf_interrupts(accel_dev);

		/* Schedule tasklet to handle interrupt BH */
		tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
		return IRQ_HANDLED;
	}

	/* Check bundle interrupt */
	if (v_int & ADF_VINTSOU_BUN) {
		struct adf_etr_data *etr_data = accel_dev->transport;
		struct adf_etr_bank_data *bank = &etr_data->banks[0];

		/* Disable Flag and Coalesce Ring Interrupts */
		WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
					   0);
		tasklet_hi_schedule(&bank->resp_handler);
		return IRQ_HANDLED;
	}

	return IRQ_NONE;
}
	void request_done_tasklet(unsigned long data)
	{
		ifxpcd_ep_t *_ifxep = (ifxpcd_ep_t *)data;
		ifxpcd_request_t   *req;
		unsigned long flags=0;

		int k=10;
		_ifxep->cmpt_tasklet_in_process=1;

		while (k)
		{
			SPIN_LOCK_IRQSAVE(&_ifxep->cmp_lock,flags);
			if(_ifxep->queue_cmpt.next != &_ifxep->queue_cmpt)
			{
				req = list_entry(_ifxep->queue_cmpt.next, ifxpcd_request_t,trq);
				list_del_init(&req->trq);
				SPIN_UNLOCK_IRQRESTORE(&_ifxep->cmp_lock,flags);

				if(req->sysreq.complete)
					req->sysreq.complete(&_ifxep->sysep, &req->sysreq);
				else
				{
					#ifdef __req_num_dbg__
						IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, req->reqid);
					#else
						IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, req);
					#endif
				}
			}
			else
			{
				SPIN_UNLOCK_IRQRESTORE(&_ifxep->cmp_lock,flags);
				break;
			}
			k--;
		}

		if(!list_empty(&_ifxep->queue_cmpt))
		{
			#ifdef __GADGET_TASKLET_HIGH__
				tasklet_hi_schedule(&_ifxep->cmpt_tasklet);
			#else
				tasklet_schedule(&_ifxep->cmpt_tasklet);
			#endif
		}
		else
			_ifxep->cmpt_tasklet_in_process=0;
	}
Example #7
0
// PS-Poll frame use BulkOutPipeId = 0
VOID RTUSBBulkOutPsPollComplete(purbb_t pUrb,struct pt_regs *pt_regs)
{
	PRTMP_ADAPTER		pAd;
	PTX_CONTEXT			pPsPollContext;
	NTSTATUS			Status;
	POS_COOKIE			pObj;


	pPsPollContext= (PTX_CONTEXT)pUrb->context;
	pAd = pPsPollContext->pAd;
	Status = pUrb->status;

	pObj = (POS_COOKIE) pAd->OS_Cookie;
	pObj->pspoll_frame_complete_task.data = (unsigned long)pUrb;
	tasklet_hi_schedule(&pObj->pspoll_frame_complete_task);
}
void r8712_xmit_bh(void *priv)
{
	int ret = false;
	struct _adapter *padapter = (struct _adapter *)priv;
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;

	if ((padapter->bDriverStopped == true) ||
	    (padapter->bSurpriseRemoved == true)) {
		netdev_err(padapter->pnetdev, "xmit_bh => bDriverStopped or bSurpriseRemoved\n");
		return;
	}
	ret = r8712_xmitframe_complete(padapter, pxmitpriv, NULL);
	if (ret == false)
		return;
	tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
}
Example #9
0
void rtw_os_xmit_schedule(struct adapter *padapter)
{
	struct xmit_priv *pxmitpriv;

	if (!padapter)
		return;

	pxmitpriv = &padapter->xmitpriv;

	spin_lock_bh(&pxmitpriv->lock);

	if (rtw_txframes_pending(padapter))
		tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);

	spin_unlock_bh(&pxmitpriv->lock);
}
static irqreturn_t hsi_mpu_handler(int irq, void *p)
{
	struct hsi_port *pport = (struct hsi_port *) p;

	/* Check no other interrupt handler has already scheduled the tasklet */
	if (test_and_set_bit(HSI_FLAGS_TASKLET_LOCK, &pport->flags))
		return IRQ_HANDLED;

		tasklet_hi_schedule(&pport->hsi_tasklet);

	/* Disable interrupt until Bottom Half has cleared the IRQ status */
	/* register */
		disable_irq_nosync(pport->irq);

	return IRQ_HANDLED;
}
void rtl8723b_silentreset_for_specific_platform(_adapter *padapter)
{
	HAL_DATA_TYPE	*pHalData = GET_HAL_DATA(padapter);
	struct sreset_priv *psrtpriv = &pHalData->srestpriv;

	struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
	struct mlme_priv	*pmlmepriv = &(padapter->mlmepriv);
	struct xmit_priv	*pxmitpriv = &padapter->xmitpriv;
	_irqL irqL;

#ifdef DBG_CONFIG_ERROR_RESET

	DBG_871X("%s\n", __FUNCTION__);

	psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;

	if (!rtw_netif_queue_stopped(padapter->pnetdev))
		rtw_netif_stop_queue(padapter->pnetdev);

	rtw_cancel_all_timer(padapter);
	tasklet_kill(&pxmitpriv->xmit_tasklet);

	_enter_critical_mutex(&psrtpriv->silentreset_mutex, &irqL);
	psrtpriv->silent_reset_inprogress = _TRUE;
	pwrpriv->change_rfpwrstate = rf_off;
#ifdef CONFIG_IPS
	ips_enter(padapter);
	ips_leave(padapter);
#endif
	if(check_fwstate(pmlmepriv, _FW_LINKED)== _TRUE)
	{
		_restore_network_status(padapter);
		_restore_security_setting(padapter);
	}

	_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);

	psrtpriv->silent_reset_inprogress = _FALSE;
	_exit_critical_mutex(&psrtpriv->silentreset_mutex, &irqL);

	tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
	_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);

	if (netif_queue_stopped(padapter->pnetdev))
		netif_wake_queue(padapter->pnetdev);
#endif
}
Example #12
0
/*******************************************************************************
**
**  onuEponIrqRoutine
**  ____________________________________________________________________________
**
**  DESCRIPTION: EPON Interrupt routinr
**               
**  PARAMETERS:  none
** 
**  OUTPUTS:     none
**                               
**  RETURNS:     IRQ_HANDLED
**
*******************************************************************************/
irqreturn_t onuEponIrqRoutine(int irq, void *arg)
{
#ifdef MV_EPON_HW_INTERRUPT
  onuEponIsrRoutine(0, 0);
#else /* SW_INTERRUPT - TASKLET */
  MV_U32 interruptEvent;
  MV_U32 interruptStatus;

  S_onuPonIrq *irqId = (S_onuPonIrq*)arg;
  onuEponIsrLowRoutine(&interruptEvent, &interruptStatus);
  eponCurrentInterruptEvent  = interruptEvent;
  eponCurrentInterruptStatus = interruptStatus;
  tasklet_hi_schedule(&(irqId->onuPonTasklet));
#endif 

  return(IRQ_HANDLED);
}
Example #13
0
/*
	========================================================================

	Routine Description:
		This routine process Rx Irp and call rx complete function.

	Arguments:
		DeviceObject	Pointer to the device object for next lower
						device. DeviceObject passed in here belongs to
						the next lower driver in the stack because we
						were invoked via IoCallDriver in USB_RxPacket
						AND it is not OUR device object
	  Irp				Ptr to completed IRP
	  Context			Ptr to our Adapter object (context specified
						in IoSetCompletionRoutine

	Return Value:
		Always returns STATUS_MORE_PROCESSING_REQUIRED

	Note:
		Always returns STATUS_MORE_PROCESSING_REQUIRED
	========================================================================
*/
void RTUSBBulkRxComplete(struct urb *pUrb, struct pt_regs *pt_regs)
{
	/* use a receive tasklet to handle received packets; */
	/* or sometimes hardware IRQ will be disabled here, so we can not */
	/* use spin_lock_bh()/spin_unlock_bh() after IRQ is disabled. :< */
	struct rt_rx_context *pRxContext;
	struct rt_rtmp_adapter *pAd;
	struct os_cookie *pObj;

	pRxContext = (struct rt_rx_context *)pUrb->context;
	pAd = pRxContext->pAd;
	pObj = (struct os_cookie *)pAd->OS_Cookie;

	pObj->rx_done_task.data = (unsigned long)pUrb;
	tasklet_hi_schedule(&pObj->rx_done_task);

}
Example #14
0
void rtw_os_xmit_schedule(struct adapter *padapter)
{
	unsigned long  irql;
	struct xmit_priv *pxmitpriv;

	if (!padapter)
		return;

	pxmitpriv = &padapter->xmitpriv;

	_enter_critical_bh(&pxmitpriv->lock, &irql);

	if (rtw_txframes_pending(padapter))
		tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);

	_exit_critical_bh(&pxmitpriv->lock, &irql);
}
void rtl8192c_silentreset_for_specific_platform(_adapter *padapter)
{
	HAL_DATA_TYPE	*pHalData = GET_HAL_DATA(padapter);	
	struct sreset_priv *psrtpriv = &pHalData->srestpriv;
	
	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;		
	struct mlme_priv	*pmlmepriv = &(padapter->mlmepriv);
	struct xmit_priv	*pxmitpriv = &padapter->xmitpriv;	
	_irqL irqL;

	psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;

	if (!netif_queue_stopped(padapter->pnetdev))
		netif_stop_queue(padapter->pnetdev);
		
	rtw_cancel_all_timer(padapter);	
	tasklet_kill(&pxmitpriv->xmit_tasklet);	

	_enter_critical_mutex(&psrtpriv->silentreset_mutex, &irqL);
	psrtpriv->silent_reset_inprogress = _TRUE;
	pwrpriv->change_rfpwrstate = rf_off;		
	ips_enter(padapter);								
	ips_leave(padapter);
	if(check_fwstate(pmlmepriv, _FW_LINKED)== _TRUE)
	{
		_restore_network_status(padapter);
		_restore_security_setting(padapter);	
	}
	
	if(pmlmepriv->fw_state & _FW_UNDER_SURVEY)			
		pmlmepriv->fw_state ^= _FW_UNDER_SURVEY;
	
	if(pmlmepriv->fw_state & _FW_UNDER_LINKING) 		
		pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
	
	
	psrtpriv->silent_reset_inprogress = _FALSE;
	_exit_critical_mutex(&psrtpriv->silentreset_mutex, &irqL);
		
	tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
	_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);	
				
	if (netif_queue_stopped(padapter->pnetdev))
		netif_wake_queue(padapter->pnetdev);	
}
Example #16
0
static void non_command_handler(struct dpram_link_device *dpld, u16 non_cmd)
{
	struct link_device *ld = &dpld->ld;
	int i = 0;
	int ret = 0;
	u16 tx_mask = 0;

	if (!dpram_ipc_active(dpld))
		return;

	/* Read data from DPRAM */
	for (i = 0; i < dpld->max_ipc_dev; i++) {
		ret = dpram_ipc_recv_data(dpld, i);
		if (ret < 0)
			dpram_purge_rx_circ(dpld, i);

		/* Check and process REQ_ACK (at this time, in == out) */
		if (non_cmd & get_mask_req_ack(dpld, i)) {
			mif_debug("%s: send %s_RES_ACK\n",
				ld->name, get_dev_name(i));
			tx_mask |= get_mask_res_ack(dpld, i);
		}
	}

	/* Schedule soft IRQ for RX */
	tasklet_hi_schedule(&dpld->rx_tsk);

	/* Try TX via DPRAM */
	for (i = 0; i < dpld->max_ipc_dev; i++) {
		if (atomic_read(&dpld->res_required[i]) > 0) {
			ret = dpram_try_ipc_tx(dpld, i);
			if (ret > 0) {
				atomic_set(&dpld->res_required[i], 0);
				tx_mask |= get_mask_send(dpld, i);
			} else if (ret == -ENOSPC) {
				tx_mask |= get_mask_req_ack(dpld, i);
			}
		}
	}

	if (tx_mask) {
		send_intr(dpld, INT_NON_CMD(tx_mask));
		mif_debug("%s: send intr 0x%04X\n", ld->name, tx_mask);
	}
}
Example #17
0
VOID RTUSBBulkRxComplete(purbb_t pUrb, struct pt_regs *pt_regs)
{
	
	
	
	PRX_CONTEXT		pRxContext;
	PRTMP_ADAPTER	pAd;
	POS_COOKIE 		pObj;


	pRxContext	= (PRX_CONTEXT)pUrb->context;
	pAd 		= pRxContext->pAd;
	pObj 		= (POS_COOKIE) pAd->OS_Cookie;

	pObj->rx_done_task.data = (unsigned long)pUrb;
	tasklet_hi_schedule(&pObj->rx_done_task);

}
Example #18
0
/*
	========================================================================

	Routine Description:
		This routine process Rx Irp and call rx complete function.

	Arguments:
		DeviceObject	Pointer to the device object for next lower
						device. DeviceObject passed in here belongs to
						the next lower driver in the stack because we
						were invoked via IoCallDriver in USB_RxPacket
						AND it is not OUR device object
	  Irp				Ptr to completed IRP
	  Context			Ptr to our Adapter object (context specified
						in IoSetCompletionRoutine

	Return Value:
		Always returns STATUS_MORE_PROCESSING_REQUIRED

	Note:
		Always returns STATUS_MORE_PROCESSING_REQUIRED
	========================================================================
*/
VOID RTUSBBulkRxComplete(purbb_t pUrb, struct pt_regs *pt_regs)
{
	// use a receive tasklet to handle received packets;
	// or sometimes hardware IRQ will be disabled here, so we can not
	// use spin_lock_bh()/spin_unlock_bh() after IRQ is disabled. :<
	PRX_CONTEXT		pRxContext;
	PRTMP_ADAPTER	pAd;
	POS_COOKIE 		pObj;


	pRxContext	= (PRX_CONTEXT)pUrb->context;
	pAd 		= pRxContext->pAd;
	pObj 		= (POS_COOKIE) pAd->OS_Cookie;

	pObj->rx_done_task.data = (unsigned long)pUrb;
	tasklet_hi_schedule(&pObj->rx_done_task);

}
Example #19
0
VOID RTUSBBulkOutMLMEPacketComplete(purbb_t pUrb, struct pt_regs *pt_regs)
{
	PTX_CONTEXT			pMLMEContext;
	PRTMP_ADAPTER		pAd;
	NTSTATUS			Status;
	POS_COOKIE 			pObj;
	int					index;

	//DBGPRINT_RAW(RT_DEBUG_INFO, ("--->RTUSBBulkOutMLMEPacketComplete\n"));
	pMLMEContext	= (PTX_CONTEXT)pUrb->context;
	pAd 			= pMLMEContext->pAd;
	pObj 			= (POS_COOKIE)pAd->OS_Cookie;
	Status			= pUrb->status;
	index 			= pMLMEContext->SelfIdx;

	pObj->mgmt_dma_done_task.data = (unsigned long)pUrb;
	tasklet_hi_schedule(&pObj->mgmt_dma_done_task);
}
Example #20
0
void RTUSBBulkOutMLMEPacketComplete(struct urb *pUrb, struct pt_regs * pt_regs)
{
	struct rt_tx_context *pMLMEContext;
	struct rt_rtmp_adapter *pAd;
	int Status;
	struct os_cookie *pObj;
	int index;

	/*DBGPRINT_RAW(RT_DEBUG_INFO, ("--->RTUSBBulkOutMLMEPacketComplete\n")); */
	pMLMEContext = (struct rt_tx_context *)pUrb->context;
	pAd = pMLMEContext->pAd;
	pObj = (struct os_cookie *)pAd->OS_Cookie;
	Status = pUrb->status;
	index = pMLMEContext->SelfIdx;

	pObj->mgmt_dma_done_task.data = (unsigned long)pUrb;
	tasklet_hi_schedule(&pObj->mgmt_dma_done_task);
}
Example #21
0
VOID AndesBhSchedule(RTMP_ADAPTER *ad)
{
	struct MCU_CTRL *ctl = &ad->MCUCtrl;

	if (!OS_TEST_BIT(MCU_INIT, &ctl->flags))
		return;

	if (((AndesQueueLen(ctl, &ctl->rx_doneq) > 0)
							|| (AndesQueueLen(ctl, &ctl->tx_doneq) > 0))
							&& OS_TEST_BIT(MCU_INIT, &ctl->flags)) {
#ifndef WORKQUEUE_BH
		RTMP_NET_TASK_DATA_ASSIGN(&ctl->cmd_msg_task, (unsigned long)(ad));
		RTMP_OS_TASKLET_SCHE(&ctl->cmd_msg_task);
#else
		tasklet_hi_schedule(&ctl->cmd_msg_task);
#endif
	}
}
Example #22
0
void jit_tasklet_fn(unsigned long arg)
{
	struct jit_data *data = (struct jit_data *)arg;
	unsigned long j = jiffies;
	seq_printf(data->s, "%9li  %3li     %i    %6i   %i   %s\n",
		     j, j - data->prevjiffies, in_interrupt() ? 1 : 0,
		     current->pid, smp_processor_id(), current->comm);

	if (--data->loops) {
		data->prevjiffies = j;
		if (data->hi)
			tasklet_hi_schedule(&data->tlet);
		else
			tasklet_schedule(&data->tlet);
	} else {
		wake_up_interruptible(&data->wait);
	}
}
Example #23
0
static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
{
	struct qtnf_bus *bus = (struct qtnf_bus *)data;
	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
	struct qtnf_pcie_bus_priv *priv = &ps->base;
	u32 status;

	priv->pcie_irq_count++;
	status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));

	qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
	qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);

	if (!(status & ps->pcie_irq_mask))
		goto irq_done;

	if (status & PCIE_HDP_INT_RX_BITS)
		ps->pcie_irq_rx_count++;

	if (status & PCIE_HDP_INT_TX_BITS)
		ps->pcie_irq_tx_count++;

	if (status & PCIE_HDP_INT_HHBM_UF)
		ps->pcie_irq_uf_count++;

	if (status & PCIE_HDP_INT_RX_BITS) {
		qtnf_dis_rxdone_irq(ps);
		napi_schedule(&bus->mux_napi);
	}

	if (status & PCIE_HDP_INT_TX_BITS) {
		qtnf_dis_txdone_irq(ps);
		tasklet_hi_schedule(&priv->reclaim_tq);
	}

irq_done:
	/* H/W workaround: clean all bits, not only enabled */
	qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base));

	if (!priv->msi_enabled)
		qtnf_deassert_intx(ps);

	return IRQ_HANDLED;
}
Example #24
0
/*
	========================================================================

	Routine Description:
	Arguments:
		Adapter 	Pointer to our adapter. Dequeue all power safe delayed braodcast frames after beacon.

	IRQL = DISPATCH_LEVEL
	
	========================================================================
*/
VOID	RTMPHandleTBTTInterrupt(
	IN PRTMP_ADAPTER pAd)
{
#ifdef CONFIG_AP_SUPPORT
	POS_COOKIE pObj = (POS_COOKIE) pAd->OS_Cookie;

	if (pAd->OpMode == OPMODE_AP)
	{
		ReSyncBeaconTime(pAd);

#ifdef WORKQUEUE_BH	
		schedule_work(&pObj->tbtt_work);
#else
		tasklet_hi_schedule(&pObj->tbtt_task);
#endif // WORKQUEUE_BH //

		if ((pAd->CommonCfg.Channel > 14)
			&& (pAd->CommonCfg.bIEEE80211H == 1)
			&& (pAd->CommonCfg.RadarDetect.RDMode == RD_SWITCHING_MODE))
		{
			DBGPRINT(RT_DEBUG_TRACE, ("RTMPHandlePreTBTTInterrupt::Channel Switching...(%d/%d)\n", pAd->CommonCfg.RadarDetect.CSCount, pAd->CommonCfg.RadarDetect.CSPeriod));
			
			pAd->CommonCfg.RadarDetect.CSCount++;
			if (pAd->CommonCfg.RadarDetect.CSCount >= pAd->CommonCfg.RadarDetect.CSPeriod)
			{
#ifdef DFS_HARDWARE_SUPPORT
				pAd->CommonCfg.DFSAPRestart=1;
				schedule_dfs_task(pAd);
#else
				APStop(pAd);
				APStartUp(pAd);
#endif // DFS_HARDWARE_SUPPORT //
			}
		}
	}
	else
#endif // CONFIG_AP_SUPPORT //
	{
		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE))
		{
		}
	}
}
static void smd_net_notify(void *_dev, unsigned event)
{
	struct rmnet_private *p = netdev_priv((struct net_device *)_dev);

	if (event != SMD_EVENT_DATA)
		return;

	spin_lock(&p->lock);
	if (p->skb && (smd_write_avail(p->ch) >= p->skb->len))
		tasklet_hi_schedule(&p->tsklt);

	spin_unlock(&p->lock);

	if (smd_read_avail(p->ch) &&
	    (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) {
		smd_net_data_tasklet.data = (unsigned long) _dev;
		tasklet_schedule(&smd_net_data_tasklet);
	}
}
Example #26
0
File: jit.c Project: Jyang772/scull
/* the /proc function: allocate everything to allow concurrency */
int jit_tasklet(char *buf, char **start, off_t offset,
	      int len, int *eof, void *arg)
{
	struct jit_data *data;
	char *buf2 = buf;
	unsigned long j = jiffies;
	long hi = (long)arg;

	data = kmalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	init_waitqueue_head (&data->wait);

	/* write the first lines in the buffer */
	buf2 += sprintf(buf2, "   time   delta  inirq    pid   cpu command\n");
	buf2 += sprintf(buf2, "%9li  %3li     %i    %6i   %i   %s\n",
			j, 0L, in_interrupt() ? 1 : 0,
			current->pid, smp_processor_id(), current->comm);

	/* fill the data for our tasklet function */
	data->prevjiffies = j;
	data->buf = buf2;
	data->loops = JIT_ASYNC_LOOPS;
	
	/* register the tasklet */
	tasklet_init(&data->tlet, jit_tasklet_fn, (unsigned long)data);
	data->hi = hi;
	if (hi)
		tasklet_hi_schedule(&data->tlet);
	else
		tasklet_schedule(&data->tlet);

	/* wait for the buffer to fill */
	wait_event_interruptible(data->wait, !data->loops);

	if (signal_pending(current))
		return -ERESTARTSYS;
	buf2 = data->buf;
	kfree(data);
	*eof = 1;
	return buf2 - buf;
}
static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
{
	struct rt2x00_dev *rt2x00dev = dev_instance;
	u32 reg, mask;

	rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
	rt2x00pci_register_write(rt2x00dev, CSR7, reg);

	if (!reg)
		return IRQ_NONE;

	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
		return IRQ_HANDLED;

	mask = reg;

	if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
		tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);

	if (rt2x00_get_field32(reg, CSR7_RXDONE))
		tasklet_schedule(&rt2x00dev->rxdone_tasklet);

	if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
	    rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
	    rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
		tasklet_schedule(&rt2x00dev->txstatus_tasklet);
		rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
		rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
		rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
	}

	spin_lock(&rt2x00dev->irqmask_lock);

	rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
	reg |= mask;
	rt2x00pci_register_write(rt2x00dev, CSR8, reg);

	spin_unlock(&rt2x00dev->irqmask_lock);



	return IRQ_HANDLED;
}
Example #28
0
s32	 rtl8812au_hal_xmitframe_enqueue(_adapter *padapter, struct xmit_frame *pxmitframe)
{
	struct xmit_priv 	*pxmitpriv = &padapter->xmitpriv;
	s32 err;
	
	if ((err=rtw_xmitframe_enqueue(padapter, pxmitframe)) != _SUCCESS) 
	{
		rtw_free_xmitframe(pxmitpriv, pxmitframe);

		pxmitpriv->tx_drop++;					
	}
	else
	{
		tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
	}
	
	return err;
	
}
Example #29
0
/* Ethernet Tx DMA interrupt */
static irqreturn_t
rc32434_tx_dma_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct rc32434_local *lp;
	volatile u32 dmas,dmasm;
	irqreturn_t retval;
	
	ASSERT(dev != NULL);
	
	lp = (struct rc32434_local *)dev->priv;
	
	spin_lock(&lp->lock);
	
	dmas = __raw_readl(&lp->tx_dma_regs->dmas);
	
	if (dmas & (DMAS_f_m | DMAS_e_m)) {
		dmasm = __raw_readl(&lp->tx_dma_regs->dmasm);
		/* Mask F E bit in Tx DMA */
		__raw_writel(dmasm | (DMASM_f_m | DMASM_e_m), &lp->tx_dma_regs->dmasm);
		
		tasklet_hi_schedule(lp->tx_tasklet);
		
		if(lp->tx_chain_status == filled && (__raw_readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
			__raw_writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), &(lp->tx_dma_regs->dmandptr));			
			lp->tx_chain_status = empty;
			lp->tx_chain_head = lp->tx_chain_tail;
			dev->trans_start = jiffies;
		}
		
		if (dmas & DMAS_e_m)
			ERR(": DMA error\n");
		
		retval = IRQ_HANDLED;
	}
	else
		retval = IRQ_NONE;
	
	spin_unlock(&lp->lock);
	
	return retval;
}
Example #30
0
void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
{
	int cover_open;
	struct mmc_omap_host *host = dev_get_drvdata(dev);
	struct mmc_omap_slot *slot = host->slots[num];

	BUG_ON(num >= host->nr_slots);

	/* Other subsystems can call in here before we're initialised. */
	if (host->nr_slots == 0 || !host->slots[num])
		return;

	cover_open = mmc_omap_cover_is_open(slot);
	if (cover_open != slot->cover_open) {
		slot->cover_open = cover_open;
		sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
	}

	tasklet_hi_schedule(&slot->cover_tasklet);
}