예제 #1
0
static int eventfd_release(struct inode *inode, struct file *file)
{
	struct eventfd_ctx *ctx = file->private_data;

	wake_up_poll(&ctx->wqh, POLLHUP);
	eventfd_ctx_put(ctx);
	return 0;
}
void signalfd_cleanup(struct sighand_struct *sighand)
{
    wait_queue_head_t *wqh = &sighand->signalfd_wqh;
    if (likely(!waitqueue_active(wqh)))
        return;


    wake_up_poll(wqh, POLLHUP | POLLFREE);
}
예제 #3
0
static int
gfsk_evfd_release(struct inode *inode, struct file *file)
{
	struct gfsk_evfd_ctx *ctx = file->private_data;

	wake_up_poll(&ctx->wqh, POLLHUP);
	kfree(ctx);
	return (0);
}
예제 #4
0
int timerfd_release(struct inode* inode, struct file* file)
{
  struct timerfd_ctx *ctx = file->private_data;

  hrtimer_cancel(&ctx->timer);
  wake_up_poll(&ctx->wqh, POLLHUP);
  module_put(THIS_MODULE);
  kfree(ctx);
  return 0;
}
예제 #5
0
void signalfd_cleanup(struct sighand_struct *sighand)
{
	wait_queue_head_t *wqh = &sighand->signalfd_wqh;
	/*
	 * The lockless check can race with remove_wait_queue() in progress,
	 * but in this case its caller should run under rcu_read_lock() and
	 * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
	 */
	if (likely(!waitqueue_active(wqh)))
		return;

	/* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
	wake_up_poll(wqh, POLLHUP | POLLFREE);
}
예제 #6
0
static KAL_INT32 eemcs_ipc_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data)
{
    CCCI_BUFF_T *p_cccih = NULL;
    KAL_UINT32  node_id;
    IPC_MSGSVC_TASKMAP_T *id_map;

	DEBUG_LOG_FUNCTION_ENTRY;

	if (skb){
		p_cccih = (CCCI_BUFF_T *)skb->data;
        DBGLOG(IPCD,TRA,"%s: CCCI_H(0x%08X, 0x%08X, %02d, 0x%08X)", __FUNCTION__,\
            p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved);
	}
    
#ifndef _EEMCS_IPCD_LB_UT_
    /* Check IPC task id and extq_id */
    if ((id_map=unify_AP_id_2_local_id(p_cccih->reserved))==NULL)
   	{
	   DBGLOG(IPCD,ERR,"Wrong AP Unify id (%#x)@RX.!!! PACKET DROP !!!\n",p_cccih->reserved);
       dev_kfree_skb(skb);
       return KAL_SUCCESS ;
   	}
    node_id = id_map->task_id;
#else
    node_id = 0;
#endif
    if(IPCD_KERNEL == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        ipc_ilm_t* p_ilm = NULL;
        skb_pull(skb, sizeof(CCCI_BUFF_T));
        p_ilm = (ipc_ilm_t*)(skb->data);
        p_ilm->dest_mod_id = p_cccih->reserved;
        p_ilm->local_para_ptr = (local_para_struct *)p_ilm+1;
        mtk_conn_md_bridge_send_msg((ipc_ilm_t*)(skb->data));
        dev_kfree_skb(skb);
        DBGLOG(IPCD, TRA, "IPC(%d) MT_CONN kernel rx callback", node_id);
    }
    else if(IPCD_OPEN == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        skb_queue_tail(&eemcs_ipc_inst.ipc_node[node_id].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */
        atomic_inc(&eemcs_ipc_inst.ipc_node[node_id].rx_pkt_cnt);     /* increase rx_pkt_cnt */
        kill_fasync(&eemcs_ipc_inst.ipc_node[node_id].fasync, SIGIO, POLL_IN);
        wake_up_poll(&eemcs_ipc_inst.ipc_node[node_id].rx_waitq,POLLIN); /* wake up rx_waitq */
    }else{
		DBGLOG(IPCD,ERR,"PKT DROP while ipc dev(%d) closed", node_id);
		dev_kfree_skb(skb);
        eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP);
    }
    
    DEBUG_LOG_FUNCTION_LEAVE;
	return KAL_SUCCESS ;
}
예제 #7
0
static KAL_INT32 eemcs_ipc_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data)
{
    CCCI_BUFF_T *p_cccih = NULL;
    KAL_UINT32  node_id;
    IPC_MSGSVC_TASKMAP_T *id_map;
    unsigned int i = 0;
    char * addr;

    DEBUG_LOG_FUNCTION_ENTRY;

    if (skb){
        p_cccih = (CCCI_BUFF_T *)skb->data;
        DBGLOG(IPCD,DBG,"[RX]CCCI_H(0x%08X, 0x%08X, %02d, 0x%08X)", \
            p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved);
    }
    
#ifndef _EEMCS_IPCD_LB_UT_
    /* Check IPC task id and extq_id */
    if ((id_map=unify_AP_id_2_local_id(p_cccih->reserved))==NULL)
    {
       DBGLOG(IPCD,ERR,"Wrong AP Unify id (%#x)@RX.!!! PACKET DROP !!!\n",p_cccih->reserved);
       dev_kfree_skb(skb);
       return KAL_SUCCESS ;
    }
    node_id = id_map->task_id;
#else
    node_id = 0;
#endif
    if(IPCD_KERNEL == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        ipc_ilm_t* p_ilm = NULL;
        skb_pull(skb, sizeof(CCCI_BUFF_T));
        p_ilm = (ipc_ilm_t*)(skb->data);
        p_ilm->dest_mod_id = p_cccih->reserved;
        p_ilm->local_para_ptr = (local_para_struct *)(p_ilm+1);

        if (p_ilm->local_para_ptr != NULL) {
            DBGLOG(IPCD, INF, "[RX][KERN]src=%d dest=0x%x sap=0x%x msg=0x%x local_ptr=%p msg_len=%d", \
				p_ilm->src_mod_id, p_ilm->dest_mod_id, p_ilm->sap_id, p_ilm->msg_id,\
				p_ilm->local_para_ptr, p_ilm->local_para_ptr->msg_len); 
            for (i=0; i<32; i++) {
		addr = (char *)p_ilm;
		DBGLOG(IPCD, DBG, "%p=%x", (addr+i), *(addr+i));
            }
        }
        #ifdef ENABLE_CONN_COEX_MSG		
        mtk_conn_md_bridge_send_msg((ipc_ilm_t*)(skb->data));
        #endif
        dev_kfree_skb(skb);
    }
    else if(IPCD_OPEN == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        skb_queue_tail(&eemcs_ipc_inst.ipc_node[node_id].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */
        atomic_inc(&eemcs_ipc_inst.ipc_node[node_id].rx_pkt_cnt);     /* increase rx_pkt_cnt */
        kill_fasync(&eemcs_ipc_inst.ipc_node[node_id].fasync, SIGIO, POLL_IN);
        wake_up_poll(&eemcs_ipc_inst.ipc_node[node_id].rx_waitq,POLLIN); /* wake up rx_waitq */
    }else{
        DBGLOG(IPCD, ERR, "PKT DROP while ipc dev(%d) closed", node_id);
        dev_kfree_skb(skb);
        eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP);
    }
    
    DEBUG_LOG_FUNCTION_LEAVE;
	return KAL_SUCCESS ;
}