示例#1
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;
	bool		queue = 0;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
				if (status == -EINVAL)
					dev->net->stats.rx_errors++;
				else if (status == -EOVERFLOW)
					dev->net->stats.rx_over_errors++;
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}

		if (!status)
			queue = 1;
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

clean:
	spin_lock(&dev->req_lock);
	list_add(&req->list, &dev->rx_reqs);
	spin_unlock(&dev->req_lock);

	if (queue)
		queue_work(uether_wq, &dev->rx_work);
}
示例#2
0
static void prism2_host_roaming(local_info_t *local)
{
	struct hfa384x_join_request req;
	struct net_device *dev = local->dev;
	struct hfa384x_hostscan_result *selected, *entry;
	int i;
	unsigned long flags;

	if (local->last_join_time &&
	    time_before(jiffies, local->last_join_time + 10 * HZ)) {
		PDEBUG(DEBUG_EXTRA, "%s: last join request has not yet been "
		       "completed - waiting for it before issuing new one\n",
		       dev->name);
		return;
	}

	/* ScanResults are sorted: first ESS results in decreasing signal
	 * quality then IBSS results in similar order.
	 * Trivial roaming policy: just select the first entry.
	 * This could probably be improved by adding hysteresis to limit
	 * number of handoffs, etc.
	 *
	 * Could do periodic RID_SCANREQUEST or Inquire F101 to get new
	 * ScanResults */
	spin_lock_irqsave(&local->lock, flags);
	if (local->last_scan_results == NULL ||
	    local->last_scan_results_count == 0) {
		spin_unlock_irqrestore(&local->lock, flags);
		PDEBUG(DEBUG_EXTRA, "%s: no scan results for host roaming\n",
		       dev->name);
		return;
	}

	selected = &local->last_scan_results[0];

	if (local->preferred_ap[0] || local->preferred_ap[1] ||
	    local->preferred_ap[2] || local->preferred_ap[3] ||
	    local->preferred_ap[4] || local->preferred_ap[5]) {
		/* Try to find preferred AP */
		PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID %pM\n",
		       dev->name, local->preferred_ap);
		for (i = 0; i < local->last_scan_results_count; i++) {
			entry = &local->last_scan_results[i];
			if (memcmp(local->preferred_ap, entry->bssid, 6) == 0)
			{
				PDEBUG(DEBUG_EXTRA, "%s: using preferred AP "
				       "selection\n", dev->name);
				selected = entry;
				break;
			}
		}
	}

	memcpy(req.bssid, selected->bssid, 6);
	req.channel = selected->chid;
	spin_unlock_irqrestore(&local->lock, flags);

	PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=%pM"
	       " channel=%d\n",
	       dev->name, req.bssid, le16_to_cpu(req.channel));
	if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
				 sizeof(req))) {
		printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name);
	}
	local->last_join_time = jiffies;
}
示例#3
0
static __inline __s32 Hal_Set_Frame(__u32 sel, __u32 tcon_index, __u32 id)
{	
    __u32 cur_line = 0, start_delay = 0;
    unsigned long flags;

    cur_line = tcon_get_cur_line(sel, tcon_index);
    start_delay = tcon_get_start_delay(sel, tcon_index);
	if(cur_line > start_delay-5)
	{
	    //DE_INF("cur_line(%d) >= start_delay(%d)-3 in Hal_Set_Frame\n", cur_line, start_delay);
		return DIS_FAIL;
	}

#ifdef __LINUX_OSAL__
    spin_lock_irqsave(&g_video[sel][id].flag_lock, flags);
#endif
    if(g_video[sel][id].display_cnt == 0)
    {
	    g_video[sel][id].pre_frame_addr_luma = g_video[sel][id].video_cur.addr[0];
        g_video[sel][id].pre_frame_addr_chroma= g_video[sel][id].video_cur.addr[1];
        memcpy(&g_video[sel][id].video_cur, &g_video[sel][id].video_new, sizeof(__disp_video_fb_t));
        g_video[sel][id].cur_maf_flag_addr ^=  g_video[sel][id].pre_maf_flag_addr;
        g_video[sel][id].pre_maf_flag_addr ^=  g_video[sel][id].cur_maf_flag_addr;
        g_video[sel][id].cur_maf_flag_addr ^=  g_video[sel][id].pre_maf_flag_addr;
        disp_video_checkin(sel, id);
    }
    g_video[sel][id].display_cnt++;
#ifdef __LINUX_OSAL__
    spin_unlock_irqrestore(&g_video[sel][id].flag_lock, flags);
#endif
    if(gdisp.screen[sel].layer_manage[id].para.mode == DISP_LAYER_WORK_MODE_SCALER)
    {
        __u32 scaler_index;
    	__scal_buf_addr_t scal_addr;
        __scal_src_size_t in_size;
        __scal_out_size_t out_size;
        __scal_src_type_t in_type;
        __scal_out_type_t out_type;
        __scal_scan_mod_t in_scan;
        __scal_scan_mod_t out_scan;
        __disp_scaler_t * scaler;
        __u32 pre_frame_addr_luma = 0, pre_frame_addr_chroma = 0;
        __u32 maf_linestride = 0;
        __u32 size;

        scaler_index = gdisp.screen[sel].layer_manage[id].scaler_index;
        
        scaler = &(gdisp.scaler[scaler_index]);

    	if(g_video[sel][id].video_cur.interlace == TRUE)
    	{
    	    if((!(gdisp.screen[sel].de_flicker_status & DE_FLICKER_USED)) && 
    	        (scaler->in_fb.format == DISP_FORMAT_YUV420 && scaler->in_fb.mode == DISP_MOD_MB_UV_COMBINED)
    	        && (dit_mode_default[scaler_index] != 0xff) && (scaler->in_fb.size.width < 1920))//todo , full size of 3d mode < 1920
    	    {
    		    g_video[sel][id].dit_enable = TRUE;
    		}else
            {
                g_video[sel][id].dit_enable = FALSE;
            }

            g_video[sel][id].fetch_field = FALSE;
        	if(g_video[sel][id].display_cnt == 0)
        	{
        	    g_video[sel][id].fetch_bot = (g_video[sel][id].video_cur.top_field_first)?0:1;
        	}
        	else
        	{
        		g_video[sel][id].fetch_bot = (g_video[sel][id].video_cur.top_field_first)?1:0;
        	}

    		if(g_video[sel][id].dit_enable == TRUE)
    		{
				g_video[sel][id].dit_mode = dit_mode_default[scaler_index];
        		maf_linestride = (((scaler->src_win.width + 31) & 	0xffffffe0)*2/8 + 31) & 0xffffffe0; 
        		// //g_video[sel][id].video_cur.flag_stride;//todo? ( (ги720 + 31гй&0xffffffe0 ) * 2/8  + 31) & 0xffffffe0

    			if(g_video[sel][id].video_cur.pre_frame_valid == TRUE)
    			{
                    g_video[sel][id].tempdiff_en = TRUE;
                    pre_frame_addr_luma= (__u32)OSAL_VAtoPA((void*)g_video[sel][id].pre_frame_addr_luma);
                    pre_frame_addr_chroma= (__u32)OSAL_VAtoPA((void*)g_video[sel][id].pre_frame_addr_chroma);
    			}
    			else
    			{
    				g_video[sel][id].tempdiff_en = FALSE;
    			}
    			g_video[sel][id].diagintp_en = TRUE;
    		}
    		else
    		{
        	    g_video[sel][id].dit_mode = DIT_MODE_WEAVE;
        	    g_video[sel][id].tempdiff_en = FALSE;
        	    g_video[sel][id].diagintp_en = FALSE;
    		}
    	}
    	else
    	{
    		g_video[sel][id].dit_enable = FALSE;
    	    g_video[sel][id].fetch_field = FALSE;
    	    g_video[sel][id].fetch_bot = FALSE;
    	    g_video[sel][id].dit_mode = DIT_MODE_WEAVE;
    	    g_video[sel][id].tempdiff_en = FALSE;
    	    g_video[sel][id].diagintp_en = FALSE;
    	}
        
    	in_type.fmt= Scaler_sw_para_to_reg(0,scaler->in_fb.mode, scaler->in_fb.format, scaler->in_fb.seq);
        in_type.mod= Scaler_sw_para_to_reg(1,scaler->in_fb.mode, scaler->in_fb.format, scaler->in_fb.seq);
        in_type.ps= Scaler_sw_para_to_reg(2,scaler->in_fb.mode, scaler->in_fb.format, (__u8)scaler->in_fb.seq);
    	in_type.byte_seq = 0;
    	in_type.sample_method = 0;

    	scal_addr.ch0_addr= (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr[0]));
    	scal_addr.ch1_addr= (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr[1]));
    	scal_addr.ch2_addr= (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr[2]));

    	in_size.src_width = scaler->in_fb.size.width;
    	in_size.src_height = scaler->in_fb.size.height;
    	in_size.x_off =  scaler->src_win.x;
    	in_size.y_off =  scaler->src_win.y;
    	in_size.scal_height=  scaler->src_win.height;
    	in_size.scal_width=  scaler->src_win.width;

    	out_type.byte_seq =  scaler->out_fb.seq;
    	out_type.fmt =  scaler->out_fb.format;

    	out_size.width =  scaler->out_size.width;
    	out_size.height =  scaler->out_size.height;

    	in_scan.field = g_video[sel][id].fetch_field;
    	in_scan.bottom = g_video[sel][id].fetch_bot;

    	out_scan.field = (gdisp.screen[sel].de_flicker_status & DE_FLICKER_USED)?0: gdisp.screen[sel].b_out_interlace;
        
    	if(scaler->out_fb.cs_mode > DISP_VXYCC)
    	{
    		scaler->out_fb.cs_mode = DISP_BT601;
    	}

        if(scaler->in_fb.b_trd_src)
        {
            __scal_3d_inmode_t inmode;
            __scal_3d_outmode_t outmode = 0;
            __scal_buf_addr_t scal_addr_right;

            inmode = Scaler_3d_sw_para_to_reg(0, scaler->in_fb.trd_mode, 0);
            outmode = Scaler_3d_sw_para_to_reg(1, scaler->out_trd_mode, gdisp.screen[sel].b_out_interlace);
            
            DE_SCAL_Get_3D_In_Single_Size(inmode, &in_size, &in_size);
            if(scaler->b_trd_out)
            {
                DE_SCAL_Get_3D_Out_Single_Size(outmode, &out_size, &out_size);
            }

        	scal_addr_right.ch0_addr= (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr_right[0]));
        	scal_addr_right.ch1_addr= (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr_right[1]));
        	scal_addr_right.ch2_addr= (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr_right[2]));

            DE_SCAL_Set_3D_Ctrl(scaler_index, scaler->b_trd_out, inmode, outmode);
            DE_SCAL_Config_3D_Src(scaler_index, &scal_addr, &in_size, &in_type, inmode, &scal_addr_right);
						DE_SCAL_Agth_Config(sel, &in_type, &in_size, &out_size, 0, scaler->b_trd_out, outmode);
        }
        else
        {
    	    DE_SCAL_Config_Src(scaler_index,&scal_addr,&in_size,&in_type,FALSE,FALSE);
					DE_SCAL_Agth_Config(sel, &in_type, &in_size, &out_size, 0, 0, 0);
    	}

    	DE_SCAL_Set_Init_Phase(scaler_index, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, g_video[sel][id].dit_enable);
    	DE_SCAL_Set_Scaling_Factor(scaler_index, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type);
    	//DE_SCAL_Set_Scaling_Coef_for_video(scaler_index, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, 0x00000101);
    	DE_SCAL_Set_Scaling_Coef(scaler_index, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, scaler->smooth_mode);
    	DE_SCAL_Set_Out_Size(scaler_index, &out_scan,&out_type, &out_size);
    	DE_SCAL_Set_Di_Ctrl(scaler_index,g_video[sel][id].dit_enable,g_video[sel][id].dit_mode,g_video[sel][id].diagintp_en,g_video[sel][id].tempdiff_en);
    	DE_SCAL_Set_Di_PreFrame_Addr(scaler_index, pre_frame_addr_luma, pre_frame_addr_chroma);
			DE_SCAL_Set_Di_MafFlag_Src(scaler_index, OSAL_VAtoPA((void*)g_video[sel][id].cur_maf_flag_addr),
			    OSAL_VAtoPA((void*)g_video[sel][id].pre_maf_flag_addr), maf_linestride);

        if(g_video[sel][id].display_cnt == 0)
        {
            size = (scaler->in_fb.size.width * scaler->src_win.height * de_format_to_bpp(scaler->in_fb.format) + 7)/8;
            //OSAL_CacheRangeFlush((void *)scal_addr.ch0_addr,size ,CACHE_CLEAN_FLUSH_D_CACHE_REGION);
        }
        
        DE_SCAL_Set_Reg_Rdy(scaler_index);
    }
    else
    {
        __layer_man_t * layer_man;
        __disp_fb_t fb;
        layer_src_t layer_fb;

        memset(&layer_fb, 0, sizeof(layer_src_t));
        layer_man = &gdisp.screen[sel].layer_manage[id];

        BSP_disp_layer_get_framebuffer(sel, id, &fb);
        fb.addr[0] = (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr[0]));
        fb.addr[1] = (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr[1]));
        fb.addr[2] = (__u32)OSAL_VAtoPA((void*)(g_video[sel][id].video_cur.addr[2]));

    	if(get_fb_type(fb.format) == DISP_FB_TYPE_YUV)
    	{
        	Yuv_Channel_adjusting(sel , fb.mode, fb.format, &layer_man->para.src_win.x, &layer_man->para.scn_win.width);
    		Yuv_Channel_Set_framebuffer(sel, &fb, layer_man->para.src_win.x, layer_man->para.src_win.y);
    	}
    	else
    	{        	    
            layer_fb.fb_addr    = (__u32)OSAL_VAtoPA((void*)fb.addr[0]);
            layer_fb.pixseq     = img_sw_para_to_reg(3,0,fb.seq);
            layer_fb.br_swap    = fb.br_swap;
            layer_fb.fb_width   = fb.size.width;
            layer_fb.offset_x   = layer_man->para.src_win.x;
            layer_fb.offset_y   = layer_man->para.src_win.y;
            layer_fb.format = fb.format;
            DE_BE_Layer_Set_Framebuffer(sel, id,&layer_fb);
        }
    }
    gdisp.screen[sel].layer_manage[id].para.fb.addr[0] = g_video[sel][id].video_cur.addr[0];
    gdisp.screen[sel].layer_manage[id].para.fb.addr[1] = g_video[sel][id].video_cur.addr[1];
    gdisp.screen[sel].layer_manage[id].para.fb.addr[2] = g_video[sel][id].video_cur.addr[2];
    gdisp.screen[sel].layer_manage[id].para.fb.trd_right_addr[0] = g_video[sel][id].video_cur.addr_right[0];
    gdisp.screen[sel].layer_manage[id].para.fb.trd_right_addr[1] = g_video[sel][id].video_cur.addr_right[1];
    gdisp.screen[sel].layer_manage[id].para.fb.trd_right_addr[2] = g_video[sel][id].video_cur.addr_right[2];
    
	return DIS_SUCCESS;
}
/**
<<<<<<< HEAD
 * __account_scheduler_latency - record an occurred latency
=======
 * __account_scheduler_latency - record an occured latency
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
 * @tsk - the task struct of the task hitting the latency
 * @usecs - the duration of the latency in microseconds
 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
 *
 * This function is the main entry point for recording latency entries
 * as called by the scheduler.
 *
 * This function has a few special cases to deal with normal 'non-latency'
 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
 * since this usually is caused by waiting for events via select() and co.
 *
 * Negative latencies (caused by time going backwards) are also explicitly
 * skipped.
 */
void __sched
__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
{
	unsigned long flags;
	int i, q;
	struct latency_record lat;

	/* Long interruptible waits are generally user requested... */
	if (inter && usecs > 5000)
		return;

	/* Negative sleeps are time going backwards */
	/* Zero-time sleeps are non-interesting */
	if (usecs <= 0)
		return;

	memset(&lat, 0, sizeof(lat));
	lat.count = 1;
	lat.time = usecs;
	lat.max = usecs;
	store_stacktrace(tsk, &lat);

	spin_lock_irqsave(&latency_lock, flags);

	account_global_scheduler_latency(tsk, &lat);

	for (i = 0; i < tsk->latency_record_count; i++) {
		struct latency_record *mylat;
		int same = 1;

		mylat = &tsk->latency_record[i];
		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
			unsigned long record = lat.backtrace[q];

			if (mylat->backtrace[q] != record) {
				same = 0;
				break;
			}

			/* 0 and ULONG_MAX entries mean end of backtrace: */
			if (record == 0 || record == ULONG_MAX)
				break;
		}
		if (same) {
			mylat->count++;
			mylat->time += lat.time;
			if (lat.time > mylat->max)
				mylat->max = lat.time;
			goto out_unlock;
		}
	}

	/*
	 * short term hack; if we're > 32 we stop; future we recycle:
	 */
	if (tsk->latency_record_count >= LT_SAVECOUNT)
		goto out_unlock;

	/* Allocated a new one: */
	i = tsk->latency_record_count++;
	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));

out_unlock:
	spin_unlock_irqrestore(&latency_lock, flags);
}
示例#5
0
static int rockchip_spi_dma_transfer(struct rockchip_spi *rs)
{
	unsigned long flags;
	struct dma_slave_config rxconf, txconf;
	struct dma_async_tx_descriptor *rxdesc, *txdesc;

	spin_lock_irqsave(&rs->lock, flags);
	rs->state &= ~RXBUSY;
	rs->state &= ~TXBUSY;
	spin_unlock_irqrestore(&rs->lock, flags);

	if (rs->rx) {
		rxconf.direction = rs->dma_rx.direction;
		rxconf.src_addr = rs->dma_rx.addr;
		rxconf.src_addr_width = rs->n_bytes;
		rxconf.src_maxburst = rs->n_bytes;
		dmaengine_slave_config(rs->dma_rx.ch, &rxconf);

		rxdesc = dmaengine_prep_slave_sg(
				rs->dma_rx.ch,
				rs->rx_sg.sgl, rs->rx_sg.nents,
				rs->dma_rx.direction, DMA_PREP_INTERRUPT);

		rxdesc->callback = rockchip_spi_dma_rxcb;
		rxdesc->callback_param = rs;
	}

	if (rs->tx) {
		txconf.direction = rs->dma_tx.direction;
		txconf.dst_addr = rs->dma_tx.addr;
		txconf.dst_addr_width = rs->n_bytes;
		txconf.dst_maxburst = rs->n_bytes;
		dmaengine_slave_config(rs->dma_tx.ch, &txconf);

		txdesc = dmaengine_prep_slave_sg(
				rs->dma_tx.ch,
				rs->tx_sg.sgl, rs->tx_sg.nents,
				rs->dma_tx.direction, DMA_PREP_INTERRUPT);

		txdesc->callback = rockchip_spi_dma_txcb;
		txdesc->callback_param = rs;
	}

	/* rx must be started before tx due to spi instinct */
	if (rs->rx) {
		spin_lock_irqsave(&rs->lock, flags);
		rs->state |= RXBUSY;
		spin_unlock_irqrestore(&rs->lock, flags);
		dmaengine_submit(rxdesc);
		dma_async_issue_pending(rs->dma_rx.ch);
	}

	if (rs->tx) {
		spin_lock_irqsave(&rs->lock, flags);
		rs->state |= TXBUSY;
		spin_unlock_irqrestore(&rs->lock, flags);
		dmaengine_submit(txdesc);
		dma_async_issue_pending(rs->dma_tx.ch);
	}

	return 1;
}
/*****************************************************************************
* FUNCTION
*  hal_rx_dma_irq_handler
* DESCRIPTION
*  lower level rx interrupt handler
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* p_buf     [IN/OUT] pointer to rx data buffer
* max_len  [IN]        max length of rx buffer
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_rx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info,
			   unsigned char *p_buf, const unsigned int max_len)
{
	int i_ret = -1;
	unsigned int valid_len = 0;
	unsigned int wpt_wrap = 0;
	unsigned int rpt_wrap = 0;
	unsigned int wpt = 0;
	unsigned int rpt = 0;
	unsigned int tail_len = 0;
	unsigned int real_len = 0;
	unsigned int base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	dma_rx_buf_write rx_cb = p_dma_info->rx_cb;
	unsigned char *p_vff_buf = NULL;
	unsigned char *vff_base = p_vfifo->p_vir_addr;
	unsigned int vff_size = p_vfifo->vfifo_size;
	P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo,
							MTK_BTIF_DMA_VFIFO,
							vfifo);
	unsigned long flag = 0;

	spin_lock_irqsave(&(g_clk_cg_spinlock), flag);
#if MTK_BTIF_ENABLE_CLK_CTL
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
		BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n",
			      __FILE__);
		return i_ret;
	}
#endif
/*disable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, false);

/*clear Rx DMA's interrupt status*/
	BTIF_SET_BIT(RX_DMA_INT_FLAG(base), RX_DMA_INT_DONE | RX_DMA_INT_THRE);

	valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));
	rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
	wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	if ((0 == valid_len) && (rpt == wpt)) {
		BTIF_DBG_FUNC
		    ("rx interrupt, no data available in Rx DMA, wpt(0x%08x), rpt(0x%08x)\n",
		     rpt, wpt);
	}

	i_ret = 0;

	while ((0 < valid_len) || (rpt != wpt)) {
		rpt_wrap = rpt & DMA_RPT_WRAP;
		wpt_wrap = wpt & DMA_WPT_WRAP;
		rpt &= DMA_RPT_MASK;
		wpt &= DMA_WPT_MASK;

/*calcaute length of available data  in vFIFO*/
		if (wpt_wrap != p_mtk_vfifo->last_wpt_wrap) {
			real_len = wpt + vff_size - rpt;
		} else {
			real_len = wpt - rpt;
		}

		if (NULL != rx_cb) {
			tail_len = vff_size - rpt;
			p_vff_buf = vff_base + rpt;
			if (tail_len >= real_len) {
				(*rx_cb) (p_dma_info, p_vff_buf, real_len);
			} else {
				(*rx_cb) (p_dma_info, p_vff_buf, tail_len);
				p_vff_buf = vff_base;
				(*rx_cb) (p_dma_info, p_vff_buf, real_len -
					  tail_len);
			}
			i_ret += real_len;
		} else {
			BTIF_ERR_FUNC
			    ("no rx_cb found, please check your init process\n");
		}
		dsb();
		rpt += real_len;
		if (rpt >= vff_size) {
/*read wrap bit should be revert*/
			rpt_wrap ^= DMA_RPT_WRAP;
			rpt %= vff_size;
		}
		rpt |= rpt_wrap;
/*record wpt, last_wpt_wrap, rpt, last_rpt_wrap*/
		p_mtk_vfifo->wpt = wpt;
		p_mtk_vfifo->last_wpt_wrap = wpt_wrap;

		p_mtk_vfifo->rpt = rpt;
		p_mtk_vfifo->last_rpt_wrap = rpt_wrap;

/*update rpt information to DMA controller*/
		btif_reg_sync_writel(rpt, RX_DMA_VFF_RPT(base));

/*get vff valid size again and check if rx data is processed completely*/
		valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));

		rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
		wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	}

/*enable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, true);
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
	return i_ret;
}
示例#7
0
static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;
	struct c2_ring *tx_ring = &c2_port->tx_ring;
	struct c2_element *elem;
	dma_addr_t mapaddr;
	u32 maplen;
	unsigned long flags;
	unsigned int i;

	spin_lock_irqsave(&c2_port->tx_lock, flags);

	if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
		netif_stop_queue(netdev);
		spin_unlock_irqrestore(&c2_port->tx_lock, flags);

		pr_debug("%s: Tx ring full when queue awake!\n",
			netdev->name);
		return NETDEV_TX_BUSY;
	}

	maplen = skb_headlen(skb);
	mapaddr =
	    pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);

	elem = tx_ring->to_use;
	elem->skb = skb;
	elem->mapaddr = mapaddr;
	elem->maplen = maplen;

	/* Tell HW to xmit */
	__raw_writeq((__force u64) cpu_to_be64(mapaddr),
		     elem->hw_desc + C2_TXP_ADDR);
	__raw_writew((__force u16) cpu_to_be16(maplen),
		     elem->hw_desc + C2_TXP_LEN);
	__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
		     elem->hw_desc + C2_TXP_FLAGS);

	netdev->stats.tx_packets++;
	netdev->stats.tx_bytes += maplen;

	/* Loop thru additional data fragments and queue them */
	if (skb_shinfo(skb)->nr_frags) {
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			maplen = frag->size;
			mapaddr =
			    pci_map_page(c2dev->pcidev, frag->page,
					 frag->page_offset, maplen,
					 PCI_DMA_TODEVICE);

			elem = elem->next;
			elem->skb = NULL;
			elem->mapaddr = mapaddr;
			elem->maplen = maplen;

			/* Tell HW to xmit */
			__raw_writeq((__force u64) cpu_to_be64(mapaddr),
				     elem->hw_desc + C2_TXP_ADDR);
			__raw_writew((__force u16) cpu_to_be16(maplen),
				     elem->hw_desc + C2_TXP_LEN);
			__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
				     elem->hw_desc + C2_TXP_FLAGS);

			netdev->stats.tx_packets++;
			netdev->stats.tx_bytes += maplen;
		}
	}

	tx_ring->to_use = elem->next;
	c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);

	if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
		netif_stop_queue(netdev);
		if (netif_msg_tx_queued(c2_port))
			pr_debug("%s: transmit queue full\n",
				netdev->name);
	}

	spin_unlock_irqrestore(&c2_port->tx_lock, flags);

	netdev->trans_start = jiffies;

	return NETDEV_TX_OK;
}
int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
	struct videobuf_buffer *buf;
	enum v4l2_field field;
	unsigned long flags = 0;
	int retval;

	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);

	if (b->memory == V4L2_MEMORY_MMAP)
		down_read(&current->mm->mmap_sem);

	mutex_lock(&q->vb_lock);
	retval = -EBUSY;
	if (q->reading) {
		dprintk(1, "qbuf: Reading running...\n");
		goto done;
	}
	retval = -EINVAL;
	if (b->type != q->type) {
		dprintk(1, "qbuf: Wrong type.\n");
		goto done;
	}
	if (b->index >= VIDEO_MAX_FRAME) {
		dprintk(1, "qbuf: index out of range.\n");
		goto done;
	}
	buf = q->bufs[b->index];
	if (NULL == buf) {
		dprintk(1, "qbuf: buffer is null.\n");
		goto done;
	}
	MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
	if (buf->memory != b->memory) {
		dprintk(1, "qbuf: memory type is wrong.\n");
		goto done;
	}
	if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
		dprintk(1, "qbuf: buffer is already queued or active.\n");
		goto done;
	}

	if (b->flags & V4L2_BUF_FLAG_INPUT) {
		if (b->input >= q->inputs) {
			dprintk(1, "qbuf: wrong input.\n");
			goto done;
		}
		buf->input = b->input;
	} else {
		buf->input = UNSET;
	}

	switch (b->memory) {
	case V4L2_MEMORY_MMAP:
		if (0 == buf->baddr) {
			dprintk(1, "qbuf: mmap requested "
				   "but buffer addr is zero!\n");
			goto done;
		}
		if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
		    || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
		    || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
			buf->size = b->bytesused;
			buf->field = b->field;
			buf->ts = b->timestamp;
		}
		break;
	case V4L2_MEMORY_USERPTR:
		if (b->length < buf->bsize) {
			dprintk(1, "qbuf: buffer length is not enough\n");
			goto done;
		}
		if (VIDEOBUF_NEEDS_INIT != buf->state &&
		    buf->baddr != b->m.userptr)
			q->ops->buf_release(q, buf);
		buf->baddr = b->m.userptr;
		break;
	case V4L2_MEMORY_OVERLAY:
		buf->boff = b->m.offset;
		break;
	default:
		dprintk(1, "qbuf: wrong memory type\n");
		goto done;
	}

	dprintk(1, "qbuf: requesting next field\n");
	field = videobuf_next_field(q);
	retval = q->ops->buf_prepare(q, buf, field);
	if (0 != retval) {
		dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
		goto done;
	}

	list_add_tail(&buf->stream, &q->stream);
	if (q->streaming) {
		spin_lock_irqsave(q->irqlock, flags);
		q->ops->buf_queue(q, buf);
		spin_unlock_irqrestore(q->irqlock, flags);
	}
	dprintk(1, "qbuf: succeeded\n");
	retval = 0;
	wake_up_interruptible_sync(&q->wait);

done:
	mutex_unlock(&q->vb_lock);

	if (b->memory == V4L2_MEMORY_MMAP)
		up_read(&current->mm->mmap_sem);

	return retval;
}
ssize_t videobuf_read_one(struct videobuf_queue *q,
			  char __user *data, size_t count, loff_t *ppos,
			  int nonblocking)
{
	enum v4l2_field field;
	unsigned long flags = 0;
	unsigned size = 0, nbufs = 1;
	int retval;

	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);

	mutex_lock(&q->vb_lock);

	q->ops->buf_setup(q, &nbufs, &size);

	if (NULL == q->read_buf  &&
	    count >= size        &&
	    !nonblocking) {
		retval = videobuf_read_zerocopy(q, data, count, ppos);
		if (retval >= 0  ||  retval == -EIO)
			/* ok, all done */
			goto done;
		/* fallback to kernel bounce buffer on failures */
	}

	if (NULL == q->read_buf) {
		/* need to capture a new frame */
		retval = -ENOMEM;
		q->read_buf = videobuf_alloc(q);

		dprintk(1, "video alloc=0x%p\n", q->read_buf);
		if (NULL == q->read_buf)
			goto done;
		q->read_buf->memory = V4L2_MEMORY_USERPTR;
		q->read_buf->bsize = count; /* preferred size */
		field = videobuf_next_field(q);
		retval = q->ops->buf_prepare(q, q->read_buf, field);

		if (0 != retval) {
			kfree(q->read_buf);
			q->read_buf = NULL;
			goto done;
		}

		spin_lock_irqsave(q->irqlock, flags);
		q->ops->buf_queue(q, q->read_buf);
		spin_unlock_irqrestore(q->irqlock, flags);

		q->read_off = 0;
	}

	/* wait until capture is done */
	retval = videobuf_waiton(q->read_buf, nonblocking, 1);
	if (0 != retval)
		goto done;

	CALL(q, sync, q, q->read_buf);

	if (VIDEOBUF_ERROR == q->read_buf->state) {
		/* catch I/O errors */
		q->ops->buf_release(q, q->read_buf);
		kfree(q->read_buf);
		q->read_buf = NULL;
		retval = -EIO;
		goto done;
	}

	/* Copy to userspace */
	retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
	if (retval < 0)
		goto done;

	q->read_off += retval;
	if (q->read_off == q->read_buf->size) {
		/* all data copied, cleanup */
		q->ops->buf_release(q, q->read_buf);
		kfree(q->read_buf);
		q->read_buf = NULL;
	}

done:
	mutex_unlock(&q->vb_lock);
	return retval;
}
示例#10
0
/*===========================================================================*/
void wmt_dump_dma_regs(dmach_t ch)
{
	struct dma_info_s *dma = &dma_chan[ch] ;
	struct dma_regs_s *regs  = dma->regs ;
	unsigned long flags;
	unsigned int now_time = 0;
	unsigned int delay_time = 0;

	printk("0x%8.8X : [0x%8.8X] GCR \n", \
		(unsigned int)&(regs->DMA_GCR) , (unsigned int)regs->DMA_GCR) ;
	printk("0x%8.8X : [0x%8.8X] MPRP \n", \
		(unsigned int)&(regs->DMA_MRPR) , (unsigned int)regs->DMA_MRPR) ;
	printk("0x%8.8X : [0x%8.8X] IER \n", \
		(unsigned int)&(regs->DMA_IER) , (unsigned int)regs->DMA_IER) ;
	printk("0x%8.8X : [0x%8.8X] ISR \n", \
		(unsigned int)&(regs->DMA_ISR) , (unsigned int)regs->DMA_ISR) ;
	printk("0x%8.8X : [0x%8.8X] TMR \n", \
		(unsigned int)&(regs->DMA_TMR) , (unsigned int)regs->DMA_TMR) ;
	printk("0x%8.8X : [0x%8.8X] CCR \n", \
		(unsigned int)&(regs->DMA_CCR_CH[ch]) , (unsigned int)regs->DMA_CCR_CH[ch]) ;

	if (dma->regs->DMA_CCR_CH[ch] & (SYSTEM_DMA_RUN)) {
		spin_lock_irqsave(&dma_list_lock, flags);
		dma->regs->DMA_CCR_CH[ch] |= (DMA_UP_MEMREG_EN);/*update memory register before reading*/

		now_time = wmt_read_oscr();
		while (dma->regs->DMA_CCR_CH[ch] &  (DMA_UP_MEMREG_EN)) {
			delay_time = wmt_read_oscr() - now_time;
			if (delay_time > 15) {/*5us*/
				DPRINTK("[%d]Warnning:up_mem_reg did not clear[%x]\n", ch, dma->regs->DMA_CCR_CH[ch]);
				dma->regs->DMA_CCR_CH[ch] &= ~DMA_UP_MEMREG_EN;/*clear DMA_UP_MEMREG_EN*/
				break;
			}

		}

		spin_unlock_irqrestore(&dma_list_lock, flags);
	}
	printk("0x%8.8X : [0x%8.8X] Residue Bytes 0 \n", \
		(unsigned int)&(dma_mem_regs->mem_reg_group[ch].DMA_IF0RBR_CH)
		, (unsigned int)dma_mem_regs->mem_reg_group[ch].DMA_IF0RBR_CH) ;
	printk("0x%8.8X : [0x%8.8X] Data Address 0 \n", \
		(unsigned int)&(dma_mem_regs->mem_reg_group[ch].DMA_IF0DAR_CH)
		, (unsigned int)dma_mem_regs->mem_reg_group[ch].DMA_IF0DAR_CH) ;
	printk("0x%8.8X : [0x%8.8X] Branch Address 0 \n", \
		(unsigned int)&(dma_mem_regs->mem_reg_group[ch].DMA_IF0BAR_CH)
		, (unsigned int)dma_mem_regs->mem_reg_group[ch].DMA_IF0BAR_CH) ;
	printk("0x%8.8X : [0x%8.8X] Command Pointer  0 \n", \
		(unsigned int)&(dma_mem_regs->mem_reg_group[ch].DMA_IF0CPR_CH)
		, (unsigned int)dma_mem_regs->mem_reg_group[ch].DMA_IF0CPR_CH) ;

	printk("0x%8.8X : [0x%8.8X] Residue Bytes 1 \n", \
		(unsigned int)&(dma_mem_regs->mem_reg_group[ch].DMA_IF1RBR_CH)
		, (unsigned int)dma_mem_regs->mem_reg_group[ch].DMA_IF1RBR_CH) ;
	printk("0x%8.8X : [0x%8.8X] Data Address 1 \n", \
		(unsigned int)&(dma_mem_regs->mem_reg_group[ch].DMA_IF1DAR_CH)
		, (unsigned int)dma_mem_regs->mem_reg_group[ch].DMA_IF1DAR_CH) ;
	printk("0x%8.8X : [0x%8.8X] Branch Address 1 \n", \
		(unsigned int)&(dma_mem_regs->mem_reg_group[ch].DMA_IF1BAR_CH)
		, (unsigned int)dma_mem_regs->mem_reg_group[ch].DMA_IF1BAR_CH) ;
	printk("0x%8.8X : [0x%8.8X] Command Pointer  1 \n", \
		(unsigned int)&(dma_mem_regs->mem_reg_group[ch].DMA_IF1CPR_CH)
		, (unsigned int)dma_mem_regs->mem_reg_group[ch].DMA_IF1CPR_CH) ;

}
ssize_t videobuf_read_stream(struct videobuf_queue *q,
			     char __user *data, size_t count, loff_t *ppos,
			     int vbihack, int nonblocking)
{
	int rc, retval;
	unsigned long flags = 0;

	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);

	dprintk(2, "%s\n", __func__);
	mutex_lock(&q->vb_lock);
	retval = -EBUSY;
	if (q->streaming)
		goto done;
	if (!q->reading) {
		retval = __videobuf_read_start(q);
		if (retval < 0)
			goto done;
	}

	retval = 0;
	while (count > 0) {
		/* get / wait for data */
		if (NULL == q->read_buf) {
			q->read_buf = list_entry(q->stream.next,
						 struct videobuf_buffer,
						 stream);
			list_del(&q->read_buf->stream);
			q->read_off = 0;
		}
		rc = videobuf_waiton(q->read_buf, nonblocking, 1);
		if (rc < 0) {
			if (0 == retval)
				retval = rc;
			break;
		}

		if (q->read_buf->state == VIDEOBUF_DONE) {
			rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
					retval, vbihack, nonblocking);
			if (rc < 0) {
				retval = rc;
				break;
			}
			retval      += rc;
			count       -= rc;
			q->read_off += rc;
		} else {
			/* some error */
			q->read_off = q->read_buf->size;
			if (0 == retval)
				retval = -EIO;
		}

		/* requeue buffer when done with copying */
		if (q->read_off == q->read_buf->size) {
			list_add_tail(&q->read_buf->stream,
				      &q->stream);
			spin_lock_irqsave(q->irqlock, flags);
			q->ops->buf_queue(q, q->read_buf);
			spin_unlock_irqrestore(q->irqlock, flags);
			q->read_buf = NULL;
		}
		if (retval < 0)
			break;
	}
示例#12
0
static void media_check(unsigned long arg)
{
    struct net_device *dev = (struct net_device *)(arg);
    struct el3_private *lp = netdev_priv(dev);
    unsigned int ioaddr = dev->base_addr;
    u16 media, errs;
    unsigned long flags;

    if (!netif_device_present(dev)) goto reschedule;

    /* Check for pending interrupt with expired latency timer: with
       this, we can limp along even if the interrupt is blocked */
    if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
	(inb(ioaddr + EL3_TIMER) == 0xff)) {
	if (!lp->fast_poll)
		netdev_warn(dev, "interrupt(s) dropped!\n");

	local_irq_save(flags);
	el3_interrupt(dev->irq, dev);
	local_irq_restore(flags);

	lp->fast_poll = HZ;
    }
    if (lp->fast_poll) {
	lp->fast_poll--;
	lp->media.expires = jiffies + HZ/100;
	add_timer(&lp->media);
	return;
    }

    /* lp->lock guards the EL3 window. Window should always be 1 except
       when the lock is held */
    spin_lock_irqsave(&lp->lock, flags);
    EL3WINDOW(4);
    media = inw(ioaddr+WN4_MEDIA) & 0xc810;

    /* Ignore collisions unless we've had no irq's recently */
    if (time_before(jiffies, lp->last_irq + HZ)) {
	media &= ~0x0010;
    } else {
	/* Try harder to detect carrier errors */
	EL3WINDOW(6);
	outw(StatsDisable, ioaddr + EL3_CMD);
	errs = inb(ioaddr + 0);
	outw(StatsEnable, ioaddr + EL3_CMD);
	dev->stats.tx_carrier_errors += errs;
	if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
    }

    if (media != lp->media_status) {
	if ((media & lp->media_status & 0x8000) &&
	    ((lp->media_status ^ media) & 0x0800))
		netdev_info(dev, "%s link beat\n",
			    (lp->media_status & 0x0800 ? "lost" : "found"));
	else if ((media & lp->media_status & 0x4000) &&
		 ((lp->media_status ^ media) & 0x0010))
		netdev_info(dev, "coax cable %s\n",
			    (lp->media_status & 0x0010 ? "ok" : "problem"));
	if (dev->if_port == 0) {
	    if (media & 0x8000) {
		if (media & 0x0800)
			netdev_info(dev, "flipped to 10baseT\n");
		else
			tc589_set_xcvr(dev, 2);
	    } else if (media & 0x4000) {
		if (media & 0x0010)
		    tc589_set_xcvr(dev, 1);
		else
		    netdev_info(dev, "flipped to 10base2\n");
	    }
	}
	lp->media_status = media;
    }

    EL3WINDOW(1);
    spin_unlock_irqrestore(&lp->lock, flags);

reschedule:
    lp->media.expires = jiffies + HZ;
    add_timer(&lp->media);
}
示例#13
0
static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
    struct rmnet_private *p = netdev_priv(dev);
    u32 old_opmode = p->operation_mode;
    unsigned long flags;
    int prev_mtu = dev->mtu;
    int rc = 0;


    switch (cmd) {
    case RMNET_IOCTL_SET_LLP_ETHERNET:

        if (p->operation_mode & RMNET_MODE_LLP_IP) {
            ether_setup(dev);
            random_ether_addr(dev->dev_addr);
            dev->mtu = prev_mtu;

            dev->netdev_ops = &rmnet_ops_ether;
            spin_lock_irqsave(&p->lock, flags);
            p->operation_mode &= ~RMNET_MODE_LLP_IP;
            p->operation_mode |= RMNET_MODE_LLP_ETH;
            spin_unlock_irqrestore(&p->lock, flags);
            DBG0("[%s] rmnet_ioctl(): "
                 "set Ethernet protocol mode\n",
                 dev->name);
        }
        break;

    case RMNET_IOCTL_SET_LLP_IP:

        if (p->operation_mode & RMNET_MODE_LLP_ETH) {


            dev->header_ops         = 0;
            dev->type               = ARPHRD_RAWIP;
            dev->hard_header_len    = 0;
            dev->mtu                = prev_mtu;
            dev->addr_len           = 0;
            dev->flags              &= ~(IFF_BROADCAST|
                                         IFF_MULTICAST);

            dev->netdev_ops = &rmnet_ops_ip;
            spin_lock_irqsave(&p->lock, flags);
            p->operation_mode &= ~RMNET_MODE_LLP_ETH;
            p->operation_mode |= RMNET_MODE_LLP_IP;
            spin_unlock_irqrestore(&p->lock, flags);
            DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
                 dev->name);
        }
        break;

    case RMNET_IOCTL_GET_LLP:
        ifr->ifr_ifru.ifru_data =
            (void *)(p->operation_mode &
                     (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
        break;

    case RMNET_IOCTL_SET_QOS_ENABLE:
        spin_lock_irqsave(&p->lock, flags);
        p->operation_mode |= RMNET_MODE_QOS;
        spin_unlock_irqrestore(&p->lock, flags);
        DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
             dev->name);
        break;

    case RMNET_IOCTL_SET_QOS_DISABLE:
        spin_lock_irqsave(&p->lock, flags);
        p->operation_mode &= ~RMNET_MODE_QOS;
        spin_unlock_irqrestore(&p->lock, flags);
        DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
             dev->name);
        break;

    case RMNET_IOCTL_GET_QOS:
        ifr->ifr_ifru.ifru_data =
            (void *)(p->operation_mode & RMNET_MODE_QOS);
        break;

    case RMNET_IOCTL_GET_OPMODE:
        ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
        break;

    case RMNET_IOCTL_OPEN:
        rc = __rmnet_open(dev);
        DBG0("[%s] rmnet_ioctl(): open transport port\n",
             dev->name);
        break;

    case RMNET_IOCTL_CLOSE:
        rc = __rmnet_close(dev);
        DBG0("[%s] rmnet_ioctl(): close transport port\n",
             dev->name);
        break;

    default:
        pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
               dev->name, cmd);
        return -EINVAL;
    }

    DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
         dev->name, __func__, cmd, old_opmode, p->operation_mode);
    return rc;
}
示例#14
0
static void smd_net_data_handler(unsigned long arg)
{
    struct net_device *dev = (struct net_device *) arg;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb;
    void *ptr = 0;
    int sz;
    u32 opmode = p->operation_mode;
    unsigned long flags;

    for (;;) {
        sz = smd_cur_packet_size(p->ch);
        if (sz == 0) break;
        if (smd_read_avail(p->ch) < sz) break;

        skb = dev_alloc_skb(sz + NET_IP_ALIGN);
        if (skb == NULL) {
            pr_err("[%s] rmnet_recv() cannot allocate skb\n",
                   dev->name);

            smd_net_data_tasklet.data = (unsigned long)dev;
            tasklet_schedule(&smd_net_data_tasklet);
            break;
        } else {
            skb->dev = dev;
            skb_reserve(skb, NET_IP_ALIGN);
            ptr = skb_put(skb, sz);
            wake_lock_timeout(&p->wake_lock, HZ / 2);
            if (smd_read(p->ch, ptr, sz) != sz) {
                pr_err("[%s] rmnet_recv() smd lied about avail?!",
                       dev->name);
                ptr = 0;
                dev_kfree_skb_irq(skb);
            } else {

                spin_lock_irqsave(&p->lock, flags);
                opmode = p->operation_mode;
                spin_unlock_irqrestore(&p->lock, flags);

                if (RMNET_IS_MODE_IP(opmode)) {

                    skb->protocol =
                        rmnet_ip_type_trans(skb, dev);
                } else {

                    skb->protocol =
                        eth_type_trans(skb, dev);
                }
                if (RMNET_IS_MODE_IP(opmode) ||
                        count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
                    p->wakeups_rcv +=
                        rmnet_cause_wakeup(p);
#endif
                    p->stats.rx_packets++;
                    p->stats.rx_bytes += skb->len;
                }
                DBG1("[%s] Rx packet #%lu len=%d\n",
                     dev->name, p->stats.rx_packets,
                     skb->len);


                netif_rx(skb);
            }
            continue;
        }
        if (smd_read(p->ch, ptr, sz) != sz)
            pr_err("[%s] rmnet_recv() smd lied about avail?!",
                   dev->name);
    }
}
/*****************************************************************************
* FUNCTION
*  hal_btif_clk_ctrl
* DESCRIPTION
*  control clock output enable/disable of DMA module
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_btif_dma_clk_ctrl(P_MTK_DMA_INFO_STR p_dma_info, ENUM_CLOCK_CTRL flag)
{
/*In MTK DMA BTIF channel, there's only one global CG on AP_DMA, no sub channel's CG bit*/
/*according to Artis's comment, clock of DMA and BTIF is default off, so we assume it to be off by default*/
	int i_ret = 0;
	unsigned long irq_flag = 0;

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
	static atomic_t s_clk_ref = ATOMIC_INIT(0);
#else
	static ENUM_CLOCK_CTRL status = CLK_OUT_DISABLE;
#endif
	spin_lock_irqsave(&(g_clk_cg_spinlock), irq_flag);

#if MTK_BTIF_ENABLE_CLK_CTL

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

	if (CLK_OUT_ENABLE == flag) {
		if (1 == atomic_inc_return(&s_clk_ref)) {
			i_ret =
			    enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
		}
	} else if (CLK_OUT_DISABLE == flag) {
		if (0 == atomic_dec_return(&s_clk_ref)) {
			i_ret =
			    disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
		}
	} else {
		i_ret = ERR_INVALID_PAR;
		BTIF_ERR_FUNC("invalid  clock ctrl flag (%d)\n", flag);
	}

#else

	if (status == flag) {
		i_ret = 0;
		BTIF_DBG_FUNC("dma clock already %s\n",
			      CLK_OUT_ENABLE ==
			      status ? "enabled" : "disabled");
	} else {
		if (CLK_OUT_ENABLE == flag) {
			i_ret =
			    enable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			status = (0 == i_ret) ? flag : status;
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("enable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
		} else if (CLK_OUT_DISABLE == flag) {
			i_ret =
			    disable_clock(MTK_BTIF_APDMA_CLK_CG, DMA_USER_ID);
			status = (0 == i_ret) ? flag : status;
			if (i_ret) {
				BTIF_WARN_FUNC
				    ("disable_clock for MTK_BTIF_APDMA_CLK_CG failed, ret:%d",
				     i_ret);
			}
		} else {
			i_ret = ERR_INVALID_PAR;
			BTIF_ERR_FUNC("invalid  clock ctrl flag (%d)\n", flag);
		}
	}
#endif

#else

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER

#else

	status = flag;
#endif

	i_ret = 0;
#endif

	spin_unlock_irqrestore(&(g_clk_cg_spinlock), irq_flag);

#if MTK_BTIF_ENABLE_CLK_REF_COUNTER
	if (0 == i_ret) {
		BTIF_DBG_FUNC("dma clock %s\n",
			      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
	} else {
		BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n",
			      CLK_OUT_ENABLE == flag ? "enable" : "disable",
			      i_ret);
	}
#else

	if (0 == i_ret) {
		BTIF_DBG_FUNC("dma clock %s\n",
			      CLK_OUT_ENABLE == flag ? "enabled" : "disabled");
	} else {
		BTIF_ERR_FUNC("%s dma clock failed, ret(%d)\n",
			      CLK_OUT_ENABLE == flag ? "enable" : "disable",
			      i_ret);
	}
#endif
#if MTK_BTIF_ENABLE_CLK_CTL
	BTIF_DBG_FUNC("DMA's clock is %s\n",
		      (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) ? "off" : "on");
#endif

	return i_ret;
}
示例#16
0
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
	struct sk_buff	*skb;
	int		retval = -ENOMEM;
	size_t		size = 0;
	struct usb_ep	*out;
	unsigned long	flags;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb)
		out = dev->port_usb->out_ep;
	else
		out = NULL;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!out)
		return -ENOTCONN;


	/* Padding up to RX_EXTRA handles minor disagreements with host.
	 * Normally we use the USB "terminate on short read" convention;
	 * so allow up to (N*maxpacket), since that memory is normally
	 * already allocated.  Some hardware doesn't deal well with short
	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
	 * byte off the end (to force hardware errors on overflow).
	 *
	 * RNDIS uses internal framing, and explicitly allows senders to
	 * pad to end-of-packet.  That's potentially nice for speed, but
	 * means receivers can't recover lost synch on their own (because
	 * new packets don't only start after a short RX).
	 */
	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
	size += dev->port_usb->header_len;
	size += out->maxpacket - 1;
	size -= size % out->maxpacket;

#ifdef CONFIG_USB_GADGET_S3C_OTGD_DMA_MODE
	/* for double word align */
	skb = alloc_skb(size + NET_IP_ALIGN + 6, gfp_flags);
#else
	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
#endif
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
		goto enomem;
	}

	/* Some platforms perform better when IP packets are aligned,
	 * but on at least one, checksumming fails otherwise.  Note:
	 * RNDIS headers involve variable numbers of LE32 values.
	 */
#ifdef CONFIG_USB_GADGET_S3C_OTGD_DMA_MODE
	/* for double word align */
	skb_reserve(skb, NET_IP_ALIGN + 6);
#else
	skb_reserve(skb, NET_IP_ALIGN);
#endif

	req->buf = skb->data;
	req->length = size;
	req->complete = rx_complete;
	req->context = skb;

	retval = usb_ep_queue(out, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent(dev, WORK_RX_MEMORY);
	if (retval) {
		DBG(dev, "rx submit --> %d\n", retval);
		if (skb)
			dev_kfree_skb_any(skb);
		spin_lock_irqsave(&dev->req_lock, flags);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return retval;
}
/*****************************************************************************
* FUNCTION
*  hal_tx_dma_irq_handler
* DESCRIPTION
*  lower level tx interrupt handler
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_tx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info)
{
#define MAX_CONTINIOUS_TIMES 512
	unsigned int i_ret = -1;
	unsigned int valid_size = 0;
	unsigned int vff_len = 0;
	unsigned int left_len = 0;
	unsigned int base = p_dma_info->base;
	static int flush_irq_counter;
	static int superious_irq_counter;
	static struct timeval start_timer;
	static struct timeval end_timer;
	unsigned long flag = 0;
	spin_lock_irqsave(&(g_clk_cg_spinlock), flag);
#if MTK_BTIF_ENABLE_CLK_CTL
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
		BTIF_ERR_FUNC
		    ("%s: clock is off before irq status clear done!!!\n",
		     __FILE__);
		return i_ret;
	}
#endif
/*check if Tx VFF Left Size equal to VFIFO size or not*/
	vff_len = BTIF_READ32(TX_DMA_VFF_LEN(base));
	valid_size = BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base));
	left_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base));
	if (0 == flush_irq_counter) {
		do_gettimeofday(&start_timer);
	}
	if ((0 < valid_size) && (8 > valid_size)) {
		i_ret = _tx_dma_flush(p_dma_info);
		flush_irq_counter++;
		if (MAX_CONTINIOUS_TIMES <= flush_irq_counter) {
			do_gettimeofday(&end_timer);
/*when btif tx fifo cannot accept any data and counts of bytes left in tx vfifo < 8 for a while
we assume that btif cannot send data for a long time
in order not to generate interrupt continiously, which may effect system's performance.
we clear tx flag and disable btif tx interrupt
*/
/*clear interrupt flag*/
			BTIF_CLR_BIT(TX_DMA_INT_FLAG(base),
				     TX_DMA_INT_FLAG_MASK);
/*vFIFO data has been read by DMA controller, just disable tx dma's irq*/
			i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false);
			BTIF_ERR_FUNC
			    ("**********************ERROR, ERROR, ERROR**************************\n");
			BTIF_ERR_FUNC
			    ("BTIF Tx IRQ happened %d times (continiously), between %d.%d and %d.%d\n",
			     MAX_CONTINIOUS_TIMES, start_timer.tv_sec,
			     start_timer.tv_usec, end_timer.tv_usec,
			     end_timer.tv_usec);
		}
	} else if (vff_len == left_len) {
		flush_irq_counter = 0;
		superious_irq_counter = 0;
/*clear interrupt flag*/
		BTIF_CLR_BIT(TX_DMA_INT_FLAG(base), TX_DMA_INT_FLAG_MASK);
/*vFIFO data has been read by DMA controller, just disable tx dma's irq*/
		i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false);
	} else {
		superious_irq_counter++;
		if (100 <= superious_irq_counter)
		{
			BTIF_ERR_FUNC
		    	("**********************WARNING**************************\n");
			BTIF_ERR_FUNC("invalid irq condition, dump register\n");
			hal_dma_dump_reg(p_dma_info, REG_TX_DMA_ALL);
/*clear interrupt flag*/
			BTIF_CLR_BIT(TX_DMA_INT_FLAG(base),
				     TX_DMA_INT_FLAG_MASK);
/*disable tx dma's irq*/
			i_ret = hal_btif_dma_ier_ctrl(p_dma_info, false);
			superious_irq_counter = 0;
		}

		BTIF_DBG_FUNC
		    ("superious IRQ occurs, vff_len(%d), valid_size(%d), left_len(%d)\n",
		     vff_len, valid_size, left_len);
	}
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
	return i_ret;
}
示例#18
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context, *skb2;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}
		skb = NULL;

		skb2 = skb_dequeue(&dev->rx_frames);
		while (skb2) {
			if (status < 0
					|| ETH_HLEN > skb2->len
					|| skb2->len > ETH_FRAME_LEN) {
				dev->net->stats.rx_errors++;
				dev->net->stats.rx_length_errors++;
				DBG(dev, "rx length %d\n", skb2->len);
				dev_kfree_skb_any(skb2);
				goto next_frame;
			}
			skb2->protocol = eth_type_trans(skb2, dev->net);
			dev->net->stats.rx_packets++;
			dev->net->stats.rx_bytes += skb2->len;

			/* no buffer copies needed, unless hardware can't
			 * use skb buffers.
			 */
			status = netif_rx(skb2);
next_frame:
			skb2 = skb_dequeue(&dev->rx_frames);
		}
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

	if (skb)
		dev_kfree_skb_any(skb);
	if (!netif_running(dev->net)) {
clean:
		spin_lock(&dev->req_lock);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock(&dev->req_lock);
		req = NULL;
	}
	if (req)
		rx_submit(dev, req, GFP_ATOMIC);
}
示例#19
0
static void c2_rx_interrupt(struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;
	struct c2_ring *rx_ring = &c2_port->rx_ring;
	struct c2_element *elem;
	struct c2_rx_desc *rx_desc;
	struct c2_rxp_hdr *rxp_hdr;
	struct sk_buff *skb;
	dma_addr_t mapaddr;
	u32 maplen, buflen;
	unsigned long flags;

	spin_lock_irqsave(&c2dev->lock, flags);

	/* Begin where we left off */
	rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;

	for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
	     elem = elem->next) {
		rx_desc = elem->ht_desc;
		mapaddr = elem->mapaddr;
		maplen = elem->maplen;
		skb = elem->skb;
		rxp_hdr = (struct c2_rxp_hdr *) skb->data;

		if (rxp_hdr->flags != RXP_HRXD_DONE)
			break;
		buflen = rxp_hdr->len;

		/* Sanity check the RXP header */
		if (rxp_hdr->status != RXP_HRXD_OK ||
		    buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
			c2_rx_error(c2_port, elem);
			continue;
		}

		/*
		 * Allocate and map a new skb for replenishing the host
		 * RX desc
		 */
		if (c2_rx_alloc(c2_port, elem)) {
			c2_rx_error(c2_port, elem);
			continue;
		}

		/* Unmap the old skb */
		pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
				 PCI_DMA_FROMDEVICE);

		prefetch(skb->data);

		/*
		 * Skip past the leading 8 bytes comprising of the
		 * "struct c2_rxp_hdr", prepended by the adapter
		 * to the usual Ethernet header ("struct ethhdr"),
		 * to the start of the raw Ethernet packet.
		 *
		 * Fix up the various fields in the sk_buff before
		 * passing it up to netif_rx(). The transfer size
		 * (in bytes) specified by the adapter len field of
		 * the "struct rxp_hdr_t" does NOT include the
		 * "sizeof(struct c2_rxp_hdr)".
		 */
		skb->data += sizeof(*rxp_hdr);
		skb_set_tail_pointer(skb, buflen);
		skb->len = buflen;
		skb->protocol = eth_type_trans(skb, netdev);

		netif_rx(skb);

		netdev->stats.rx_packets++;
		netdev->stats.rx_bytes += buflen;
	}

	/* Save where we left off */
	rx_ring->to_clean = elem;
	c2dev->cur_rx = elem - rx_ring->start;
	C2_SET_CUR_RX(c2dev, c2dev->cur_rx);

	spin_unlock_irqrestore(&c2dev->lock, flags);
}
示例#20
0
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* apply outgoing CDC or RNDIS filters */
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	/*
	 * this freelist can be empty if an interrupt triggered disconnect()
	 * and reconfigured the gadget (shutting down this queue) after the
	 * network stack decided to xmit but before we got the spinlock.
	 */
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		return NETDEV_TX_BUSY;
	}

	req = container_of(dev->tx_reqs.next, struct usb_request, list);
	list_del(&req->list);

	/* temporarily stop TX queue when the freelist empties */
	if (list_empty(&dev->tx_reqs))
		netif_stop_queue(net);
	spin_unlock_irqrestore(&dev->req_lock, flags);

	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
	if (dev->wrap) {
		unsigned long	flags;

		spin_lock_irqsave(&dev->lock, flags);
		if (dev->port_usb)
			skb = dev->wrap(dev->port_usb, skb);
		spin_unlock_irqrestore(&dev->lock, flags);
		if (!skb)
			goto drop;

		length = skb->len;
	}

#ifdef CONFIG_USB_GADGET_S3C_OTGD_DMA_MODE
	/* for double word align */
	req->buf = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);

	if (!req->buf) {
		req->buf = skb->data;
		printk("%s: fail to kmalloc [req->buf = skb->data]\n", __FUNCTION__);
	}
	else
		memcpy((void *)req->buf, (void *)skb->data, skb->len);
#else
	req->buf = skb->data;
#endif

	req->context = skb;
	req->complete = tx_complete;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	if (dev->zlp)
	req->zero = 1;
	else if (length % in->maxpacket == 0)
		length++;

	req->length = length;

	/* throttle highspeed IRQ rate back slightly */
	if (gadget_is_dualspeed(dev->gadget))
		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
			: 0;

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
		dev_kfree_skb_any(skb);
drop:
		dev->net->stats.tx_dropped++;


#ifdef CONFIG_USB_GADGET_S3C_OTGD_DMA_MODE
		if (req->buf != skb->data)
			kfree(req->buf);
#endif
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return NETDEV_TX_OK;
}
示例#21
0
文件: netvsc.c 项目: Kirill2013/kasan
int netvsc_send(struct hv_device *device,
		struct hv_netvsc_packet *packet)
{
	struct netvsc_device *net_device;
	int ret = 0, m_ret = 0;
	struct vmbus_channel *out_channel;
	u16 q_idx = packet->q_idx;
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	unsigned long flag;
	struct multi_send_data *msdp;
	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
	bool try_batch;

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;

	out_channel = net_device->chn_table[q_idx];
	if (!out_channel) {
		out_channel = device->channel;
		q_idx = 0;
		packet->q_idx = 0;
	}
	packet->channel = out_channel;
	packet->send_buf_index = NETVSC_INVALID_INDEX;
	packet->cp_partial = false;

	msdp = &net_device->msd[q_idx];

	/* batch packets in send buffer if possible */
	spin_lock_irqsave(&msdp->lock, flag);
	if (msdp->pkt)
		msd_len = msdp->pkt->total_data_buflen;

	try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
		    net_device->max_pkt;

	if (try_batch && msd_len + pktlen + net_device->pkt_align <
	    net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;

	} else if (try_batch && msd_len + packet->rmsg_size <
		   net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;
		packet->cp_partial = true;

	} else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
		   net_device->send_section_size) {
		section_index = netvsc_get_next_send_section(net_device);
		if (section_index != NETVSC_INVALID_INDEX) {
				msd_send = msdp->pkt;
				msdp->pkt = NULL;
				msdp->count = 0;
				msd_len = 0;
		}
	}

	if (section_index != NETVSC_INVALID_INDEX) {
		netvsc_copy_to_send_buf(net_device,
					section_index, msd_len,
					packet);

		packet->send_buf_index = section_index;

		if (packet->cp_partial) {
			packet->page_buf_cnt -= packet->rmsg_pgcnt;
			packet->total_data_buflen = msd_len + packet->rmsg_size;
		} else {
			packet->page_buf_cnt = 0;
			packet->total_data_buflen += msd_len;
		}

		if (msdp->pkt)
			netvsc_xmit_completion(msdp->pkt);

		if (packet->xmit_more && !packet->cp_partial) {
			msdp->pkt = packet;
			msdp->count++;
		} else {
			cur_send = packet;
			msdp->pkt = NULL;
			msdp->count = 0;
		}
	} else {
		msd_send = msdp->pkt;
		msdp->pkt = NULL;
		msdp->count = 0;
		cur_send = packet;
	}

	spin_unlock_irqrestore(&msdp->lock, flag);

	if (msd_send) {
		m_ret = netvsc_send_pkt(msd_send, net_device);

		if (m_ret != 0) {
			netvsc_free_send_slot(net_device,
					      msd_send->send_buf_index);
			netvsc_xmit_completion(msd_send);
		}
	}

	if (cur_send)
		ret = netvsc_send_pkt(cur_send, net_device);

	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
		netvsc_free_send_slot(net_device, section_index);

	return ret;
}
示例#22
0
/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
 * @locked_q: request_queue the caller is holding queue_lock of (hint)
 *
 * Decrement reference count of @ioc and release it if the count reaches
 * zero.  If the caller is holding queue_lock of a queue, it can indicate
 * that with @locked_q.  This is an optimization hint and the caller is
 * allowed to pass in %NULL even when it's holding a queue_lock.
 */
void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
{
	struct request_queue *last_q = locked_q;
	unsigned long flags;

	if (ioc == NULL)
		return;

	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
	if (locked_q)
		lockdep_assert_held(locked_q->queue_lock);

	if (!atomic_long_dec_and_test(&ioc->refcount))
		return;

	/*
	 * Destroy @ioc.  This is a bit messy because icq's are chained
	 * from both ioc and queue, and ioc->lock nests inside queue_lock.
	 * The inner ioc->lock should be held to walk our icq_list and then
	 * for each icq the outer matching queue_lock should be grabbed.
	 * ie. We need to do reverse-order double lock dancing.
	 *
	 * Another twist is that we are often called with one of the
	 * matching queue_locks held as indicated by @locked_q, which
	 * prevents performing double-lock dance for other queues.
	 *
	 * So, we do it in two stages.  The fast path uses the queue_lock
	 * the caller is holding and, if other queues need to be accessed,
	 * uses trylock to avoid introducing locking dependency.  This can
	 * handle most cases, especially if @ioc was performing IO on only
	 * single device.
	 *
	 * If trylock doesn't cut it, we defer to @ioc->release_work which
	 * can do all the double-locking dancing.
	 */
	spin_lock_irqsave_nested(&ioc->lock, flags,
				 ioc_release_depth(locked_q));

	while (!hlist_empty(&ioc->icq_list)) {
		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
						struct io_cq, ioc_node);
		struct request_queue *this_q = icq->q;

		if (this_q != last_q) {
			if (last_q && last_q != locked_q)
				spin_unlock(last_q->queue_lock);
			last_q = NULL;

			if (!spin_trylock(this_q->queue_lock))
				break;
			last_q = this_q;
			continue;
		}
		ioc_exit_icq(icq);
	}

	if (last_q && last_q != locked_q)
		spin_unlock(last_q->queue_lock);

	spin_unlock_irqrestore(&ioc->lock, flags);

	/* if no icq is left, we're done; otherwise, kick release_work */
	if (hlist_empty(&ioc->icq_list))
		kmem_cache_free(iocontext_cachep, ioc);
	else
		schedule_work(&ioc->release_work);
}
static void cpm_uart_set_termios(struct uart_port *port,
				 struct termios *termios, struct termios *old)
{
	int baud;
	unsigned long flags;
	u16 cval, scval, prev_mode;
	int bits, sbits;
	struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
	volatile smc_t *smcp = pinfo->smcp;
	volatile scc_t *sccp = pinfo->sccp;

	pr_debug("CPM uart[%d]:set_termios\n", port->line);

	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);

	/* Character length programmed into the mode register is the
	 * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
	 * 1 or 2 stop bits, minus 1.
	 * The value 'bits' counts this for us.
	 */
	cval = 0;
	scval = 0;

	/* byte size */
	switch (termios->c_cflag & CSIZE) {
	case CS5:
		bits = 5;
		break;
	case CS6:
		bits = 6;
		break;
	case CS7:
		bits = 7;
		break;
	case CS8:
		bits = 8;
		break;
		/* Never happens, but GCC is too dumb to figure it out */
	default:
		bits = 8;
		break;
	}
	sbits = bits - 5;

	if (termios->c_cflag & CSTOPB) {
		cval |= SMCMR_SL;	/* Two stops */
		scval |= SCU_PSMR_SL;
		bits++;
	}

	if (termios->c_cflag & PARENB) {
		cval |= SMCMR_PEN;
		scval |= SCU_PSMR_PEN;
		bits++;
		if (!(termios->c_cflag & PARODD)) {
			cval |= SMCMR_PM_EVEN;
			scval |= (SCU_PSMR_REVP | SCU_PSMR_TEVP);
		}
	}

	/*
	 * Set up parity check flag
	 */
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))

	port->read_status_mask = (BD_SC_EMPTY | BD_SC_OV);
	if (termios->c_iflag & INPCK)
		port->read_status_mask |= BD_SC_FR | BD_SC_PR;
	if ((termios->c_iflag & BRKINT) || (termios->c_iflag & PARMRK))
		port->read_status_mask |= BD_SC_BR;

	/*
	 * Characters to ignore
	 */
	port->ignore_status_mask = 0;
	if (termios->c_iflag & IGNPAR)
		port->ignore_status_mask |= BD_SC_PR | BD_SC_FR;
	if (termios->c_iflag & IGNBRK) {
		port->ignore_status_mask |= BD_SC_BR;
		/*
		 * If we're ignore parity and break indicators, ignore
		 * overruns too.  (For real raw support).
		 */
		if (termios->c_iflag & IGNPAR)
			port->ignore_status_mask |= BD_SC_OV;
	}
	/*
	 * !!! ignore all characters if CREAD is not set
	 */
	if ((termios->c_cflag & CREAD) == 0)
		port->read_status_mask &= ~BD_SC_EMPTY;

	spin_lock_irqsave(&port->lock, flags);

	/* Start bit has not been added (so don't, because we would just
	 * subtract it later), and we need to add one for the number of
	 * stops bits (there is always at least one).
	 */
	bits++;
	if (IS_SMC(pinfo)) {
		/* Set the mode register.  We want to keep a copy of the
		 * enables, because we want to put them back if they were
		 * present.
		 */
		prev_mode = smcp->smc_smcmr;
		smcp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART;
		smcp->smc_smcmr |= (prev_mode & (SMCMR_REN | SMCMR_TEN));
	} else {
		sccp->scc_psmr = (sbits << 12) | scval;
	}

	cpm_set_brg(pinfo->brg - 1, baud);
	spin_unlock_irqrestore(&port->lock, flags);

}
示例#24
0
文件: isdnloop.c 项目: mdamt/linux
/*
 * Start a 'card'. Simulate card's boot message and set the phone
 * number(s) of the virtual 'S0-Interface'. Install D-channel
 * poll timer.
 *
 * Parameter:
 *   card  = pointer to card struct.
 *   sdefp = pointer to struct holding ioctl parameters.
 * Return:
 *   0 on success, -E??? otherwise.
 */
static int
isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
{
	unsigned long flags;
	isdnloop_sdef sdef;
	int i;

	if (card->flags & ISDNLOOP_FLAGS_RUNNING)
		return -EBUSY;
	if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
		return -EFAULT;

	for (i = 0; i < 3; i++) {
		if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
			return -EINVAL;
	}

	spin_lock_irqsave(&card->isdnloop_lock, flags);
	switch (sdef.ptype) {
	case ISDN_PTYPE_EURO:
		if (isdnloop_fake(card, "DRV1.23EC-Q.931-CAPI-CNS-BASIS-20.02.96",
				  -1)) {
			spin_unlock_irqrestore(&card->isdnloop_lock, flags);
			return -ENOMEM;
		}
		card->sil[0] = card->sil[1] = 4;
		if (isdnloop_fake(card, "TEI OK", 0)) {
			spin_unlock_irqrestore(&card->isdnloop_lock, flags);
			return -ENOMEM;
		}
		for (i = 0; i < 3; i++) {
			strlcpy(card->s0num[i], sdef.num[i],
				sizeof(card->s0num[0]));
		}
		break;
	case ISDN_PTYPE_1TR6:
		if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
				  -1)) {
			spin_unlock_irqrestore(&card->isdnloop_lock, flags);
			return -ENOMEM;
		}
		card->sil[0] = card->sil[1] = 4;
		if (isdnloop_fake(card, "TEI OK", 0)) {
			spin_unlock_irqrestore(&card->isdnloop_lock, flags);
			return -ENOMEM;
		}
		strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
		card->s0num[1][0] = '\0';
		card->s0num[2][0] = '\0';
		break;
	default:
		spin_unlock_irqrestore(&card->isdnloop_lock, flags);
		printk(KERN_WARNING "isdnloop: Illegal D-channel protocol %d\n",
		       sdef.ptype);
		return -EINVAL;
	}
	init_timer(&card->st_timer);
	card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD;
	card->st_timer.function = isdnloop_polldchan;
	card->st_timer.data = (unsigned long) card;
	add_timer(&card->st_timer);
	card->flags |= ISDNLOOP_FLAGS_RUNNING;
	spin_unlock_irqrestore(&card->isdnloop_lock, flags);
	return 0;
}
示例#25
0
static void smartio_interrupt_task(void *arg)
{
	unchar	code;
	unsigned long flags;
	unchar  dummy;

	spin_lock_irqsave(&smartio_busy_lock, flags);
	if (atomic_read(&smartio_busy) == 1) {
		spin_unlock_irqrestore(&smartio_busy_lock, flags);
		queue_task(&tq_smartio, &tq_timer);
	}
	else {
		atomic_set(&smartio_busy, 1);
		spin_unlock_irqrestore(&smartio_busy_lock, flags);
	}

	/* Read SMARTIO Interrupt Status to check which Interrupt is occurred
	 * and Clear SMARTIO Interrupt */
	send_SSP_msg((unchar *) &RD_INT_CMD, 2);
	code = (unchar) (read_SSP_response(1) & 0xFF);

#ifdef CONFIG_VT
	if (code  & 0x04) {					// Keyboard Interrupt
		kbd_int++;
		/* Read Scan code */
		send_SSP_msg((unchar *) &RD_KBD_CMD, 2);
		code = (unchar) (read_SSP_response(1) & 0xFF);
		dummy = code & 0x80;
		if ((code == 0xE0) || (code == 0xE1) || (code == 0xF0)) {	// combined code
			if (code == 0xF0) {
				if (!previous_code) {
					code = 0xE0;
					previous_code = 0xF0;
				} else {
					code = mf_two_kbdmap[code & 0x7F] | dummy;
					previous_code = 0;
				}
			} else if (code == 0xE0) {
				if (previous_code != 0) {
					code = mf_two_kbdmap[code & 0x7F] | dummy;
					previous_code = 0;
				} else previous_code = code;
			} else {						// 0xE1
				if (!previous_code) {
					code = mf_two_kbdmap[code &0x7F] | dummy;
					previous_code = 0;
				} else {
					previous_code = code;
				}
			}
		} else {
			if (code == 0x03) {
				f_five_pressed = 1;
			} else if (code == 0x83) {
				if (f_five_pressed != 0) {
					f_five_pressed = 0;
					code = 0x03;
				} else if (f_seven_pressed == 0) {
					f_seven_pressed = 1;
					code = 2;
					dummy = 0;
				} else {
					f_seven_pressed = 0;
					code = 2;
				}
			}
			previous_code = 0;
			code &= 0x7F;
			code = mf_two_kbdmap[code] | dummy;
		}
		sniffed_value = (ushort)code;
		if (SNIFFER) wake_up_interruptible(&sniffer_queue);
		if (SNIFFMODE == PASSIVE) {
			handle_scancode( code, (code & 0x80) ? 0 : 1 );
			if (code & 0x80) {
				wake_up_interruptible(&keyboard_done_queue);
				mdelay(10);		// this makes the whole thing a bit more stable
							// keyboard handling can be corrupted when hitting
							// thousands of keys like crazy. kbd_translate might catch up
							// with irq routine? or there is simply a buffer overflow on
							// the serial device? somehow it looses some key sequences.
							// if a break code is lost or coruppted the keyboard starts
							// to autorepeat like crazy and appears to hang.
							// this needs further investigations! Thomas
				kbd_press_flag = 0;
			}
			else
				kbd_press_flag = 1;
		}
		code = 0;       // prevent furthermore if ... then to react!
	}
#endif
	// ADC resolution is 10bit (0x000 ~ 0x3FF)
	if (code & 0x02) {					// ADC Complete Interrupt
		adc_int++;
		send_SSP_msg((unchar *) &RD_ADC_CMD, 2);
		adc_value = (ushort) (read_SSP_response(2) & 0x3FF);
		wake_up_interruptible(&smartio_adc_queue);
	}

	if (code & 0x08) { 					// Keypad interrupt
		kpd_int++;
		send_SSP_msg((unchar *) &RD_KPD_CMD, 2);
		kpd_value = (unchar) (read_SSP_response(1) & 0xFF);
		wake_up_interruptible(&smartio_kpd_queue);
	}

	spin_lock_irqsave(&smartio_busy_lock, flags);
	atomic_set(&smartio_busy, 0);
	spin_unlock_irqrestore(&smartio_busy_lock, flags);

	enable_irq(ADS_AVR_IRQ);

	wake_up_interruptible(&smartio_queue);
}
示例#26
0
文件: isdnloop.c 项目: mdamt/linux
/*
 * Poll a virtual cards message queue.
 * If there are new status-replies from the card, copy them to
 * ringbuffer for reading on /dev/isdnctrl and call
 * isdnloop_parse_status() for processing them. Watch for special
 * Firmware bootmessage and parse it, to get the D-Channel protocol.
 * If there are B-Channels open, initiate a timer-callback to
 * isdnloop_pollbchan().
 * This routine is called periodically via timer interrupt.
 *
 * Parameter:
 *   data = pointer to card struct
 */
static void
isdnloop_polldchan(unsigned long data)
{
	isdnloop_card *card = (isdnloop_card *) data;
	struct sk_buff *skb;
	int avail;
	int left;
	u_char c;
	int ch;
	unsigned long flags;
	u_char *p;
	isdn_ctrl cmd;

	skb = skb_dequeue(&card->dqueue);
	if (skb)
		avail = skb->len;
	else
		avail = 0;
	for (left = avail; left > 0; left--) {
		c = *skb->data;
		skb_pull(skb, 1);
		isdnloop_putmsg(card, c);
		card->imsg[card->iptr] = c;
		if (card->iptr < 59)
			card->iptr++;
		if (!skb->len) {
			avail++;
			isdnloop_putmsg(card, '\n');
			card->imsg[card->iptr] = 0;
			card->iptr = 0;
			if (card->imsg[0] == '0' && card->imsg[1] >= '0' &&
			    card->imsg[1] <= '2' && card->imsg[2] == ';') {
				ch = (card->imsg[1] - '0') - 1;
				p = &card->imsg[3];
				isdnloop_parse_status(p, ch, card);
			} else {
				p = card->imsg;
				if (!strncmp(p, "DRV1.", 5)) {
					printk(KERN_INFO "isdnloop: (%s) %s\n", CID, p);
					if (!strncmp(p + 7, "TC", 2)) {
						card->ptype = ISDN_PTYPE_1TR6;
						card->interface.features |= ISDN_FEATURE_P_1TR6;
						printk(KERN_INFO
						       "isdnloop: (%s) 1TR6-Protocol loaded and running\n", CID);
					}
					if (!strncmp(p + 7, "EC", 2)) {
						card->ptype = ISDN_PTYPE_EURO;
						card->interface.features |= ISDN_FEATURE_P_EURO;
						printk(KERN_INFO
						       "isdnloop: (%s) Euro-Protocol loaded and running\n", CID);
					}
					continue;

				}
			}
		}
	}
	if (avail) {
		cmd.command = ISDN_STAT_STAVAIL;
		cmd.driver = card->myid;
		cmd.arg = avail;
		card->interface.statcallb(&cmd);
	}
	if (card->flags & (ISDNLOOP_FLAGS_B1ACTIVE | ISDNLOOP_FLAGS_B2ACTIVE))
		if (!(card->flags & ISDNLOOP_FLAGS_RBTIMER)) {
			/* schedule b-channel polling */
			card->flags |= ISDNLOOP_FLAGS_RBTIMER;
			spin_lock_irqsave(&card->isdnloop_lock, flags);
			del_timer(&card->rb_timer);
			card->rb_timer.function = isdnloop_pollbchan;
			card->rb_timer.data = (unsigned long) card;
			card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD;
			add_timer(&card->rb_timer);
			spin_unlock_irqrestore(&card->isdnloop_lock, flags);
		}
	/* schedule again */
	spin_lock_irqsave(&card->isdnloop_lock, flags);
	card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD;
	add_timer(&card->st_timer);
	spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
示例#27
0
__s32 Video_Operation_In_Vblanking(__u32 sel, __u32 tcon_index)
{	
    __u32 id=0;

#if 0
	for(id = 0; id<4; id++)
	{
		if((g_video[sel][id].enable == TRUE) && (g_video[sel][id].have_got_frame == TRUE))
		{
			Hal_Set_Frame(sel, tcon_index, id);
		}
	}
#else  //de-interlace
	for(id = 0; id < 2; id++)
	{
		if((g_interlace[id].enable) && (gdisp.scaler[id].status & SCALER_USED))
		{
			unsigned long flags;
			dit_mode_t dit_mod;
			__u32 maf_linestride = 0;
			__u32 pre_frame_addr_luma = 0;
			__u32 pre_frame_addr_chroma = 0;
			__scal_src_type_t in_type;
			__scal_out_type_t out_type;
			__scal_src_size_t in_size;
			__scal_out_size_t out_size;
			__scal_scan_mod_t in_scan;
			__scal_scan_mod_t out_scan;
			__disp_scaler_t *scaler = &(gdisp.scaler[id]);
			__scal_buf_addr_t scal_addr;
			__bool fetch_bot;
			__bool  tempdiff_en;
			__bool  diagintp_en;

#ifdef __LINUX_OSAL__
			spin_lock_irqsave(&g_video[sel][0].flag_lock, flags);
#endif
			fetch_bot = scaler->in_fb.b_top_field_first ^ g_interlace[id].first_field;
			g_interlace[id].first_field = false;
#ifdef __LINUX_OSAL__
			spin_unlock_irqrestore(&g_video[sel][0].flag_lock, flags);
#endif

			dit_mod = dit_mode_default[id];
			maf_linestride = (((scaler->src_win.width + 31) & 0xffffffe0)*2/8 + 31) & 0xffffffe0;

			if(!g_interlace[id].first_frame)
			{
				if(dit_mod == DIT_MODE_MAF)
				{
					pre_frame_addr_luma = g_interlace[id].pre_frame_addr_luma;
					pre_frame_addr_chroma = g_interlace[id].pre_frame_addr_chroma;
					tempdiff_en = true;
					diagintp_en = true;
				}
				else
				{
					tempdiff_en =  false;
					diagintp_en = false;
				}
			}
			else
			{
				tempdiff_en =  false;
				g_interlace[id].first_frame = false;
			}

			in_type.fmt = Scaler_sw_para_to_reg(0,scaler->in_fb.mode,scaler->in_fb.format,scaler->in_fb.seq);
			in_type.mod = Scaler_sw_para_to_reg(1,scaler->in_fb.mode,scaler->in_fb.format,scaler->in_fb.seq);
			in_type.ps = Scaler_sw_para_to_reg(2,scaler->in_fb.mode,scaler->in_fb.format,scaler->in_fb.seq);
			in_type.byte_seq = 0;
			in_type.sample_method = 0;

			in_scan.field = false;
			in_scan.bottom = fetch_bot;
			out_scan.field = (gdisp.screen[sel].de_flicker_status & DE_FLICKER_USED) ?
				0: gdisp.screen[sel].b_out_interlace;

			in_size.src_width = scaler->in_fb.size.width;
			in_size.src_height = scaler->in_fb.size.height;
			in_size.x_off =  scaler->src_win.x;
			in_size.y_off =  scaler->src_win.y;
			in_size.scal_height=  scaler->src_win.height;
			in_size.scal_width=  scaler->src_win.width;

			out_type.byte_seq =  scaler->out_fb.seq;
			out_type.fmt =  scaler->out_fb.format;

			out_size.width =  scaler->out_size.width;
			out_size.height =  scaler->out_size.height;

			scal_addr.ch0_addr= (__u32)OSAL_VAtoPA((void*)(scaler->in_fb.addr[0]));
			scal_addr.ch1_addr= (__u32)OSAL_VAtoPA((void*)(scaler->in_fb.addr[1]));
			scal_addr.ch2_addr= (__u32)OSAL_VAtoPA((void*)(scaler->in_fb.addr[2]));

			DE_SCAL_Config_Src(id,&scal_addr,&in_size,&in_type,FALSE,FALSE);
			DE_SCAL_Agth_Config(sel, &in_type, &in_size, &out_size, 0, 0, 0);

			DE_SCAL_Set_Init_Phase(id, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, 1);
			DE_SCAL_Set_Scaling_Factor(id, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type);
			//DE_SCAL_Set_Scaling_Coef_for_video(scaler_index, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, 0x00000101);
			DE_SCAL_Set_Scaling_Coef(id, &in_scan, &in_size, &in_type, &out_scan, &out_size, &out_type, scaler->smooth_mode);
			DE_SCAL_Set_Out_Size(id, &out_scan,&out_type, &out_size);
			DE_SCAL_Set_Di_Ctrl(id,1,dit_mod,diagintp_en,tempdiff_en);
			DE_SCAL_Set_Di_PreFrame_Addr(id,pre_frame_addr_luma,pre_frame_addr_chroma);
			DE_SCAL_Set_Di_MafFlag_Src(id,g_interlace[id].cur_maf_flag_addr,
				g_interlace[id].pre_maf_flag_addr,maf_linestride);
		}
		else
		{
			DE_SCAL_Set_Di_Ctrl(id,0,DIT_MODE_WEAVE,0,0);
		}
	}

#endif
	return DIS_SUCCESS;
}
示例#28
0
文件: isdnloop.c 项目: mdamt/linux
/*
 * Try finding a listener for an outgoing call.
 *
 * Parameter:
 *   card = pointer to calling card.
 *   p    = pointer to ICN-type setup-string.
 *   lch  = channel of calling card.
 *   cmd  = pointer to struct to be filled when parsing setup.
 * Return:
 *   0 = found match, alerting should happen.
 *   1 = found matching number but it is busy.
 *   2 = no matching listener.
 *   3 = found matching number but SI does not match.
 */
static int
isdnloop_try_call(isdnloop_card *card, char *p, int lch, isdn_ctrl *cmd)
{
	isdnloop_card *cc = cards;
	unsigned long flags;
	int ch;
	int num_match;
	int i;
	char *e;
	char nbuf[32];

	isdnloop_parse_setup(p, cmd);
	while (cc) {
		for (ch = 0; ch < 2; ch++) {
			/* Exclude ourself */
			if ((cc == card) && (ch == lch))
				continue;
			num_match = 0;
			switch (cc->ptype) {
			case ISDN_PTYPE_EURO:
				for (i = 0; i < 3; i++)
					if (!(strcmp(cc->s0num[i], cmd->parm.setup.phone)))
						num_match = 1;
				break;
			case ISDN_PTYPE_1TR6:
				e = cc->eazlist[ch];
				while (*e) {
					sprintf(nbuf, "%s%c", cc->s0num[0], *e);
					if (!(strcmp(nbuf, cmd->parm.setup.phone)))
						num_match = 1;
					e++;
				}
			}
			if (num_match) {
				spin_lock_irqsave(&card->isdnloop_lock, flags);
				/* channel idle? */
				if (!(cc->rcard[ch])) {
					/* Check SI */
					if (!(si2bit[cmd->parm.setup.si1] & cc->sil[ch])) {
						spin_unlock_irqrestore(&card->isdnloop_lock, flags);
						return 3;
					}
					/* ch is idle, si and number matches */
					cc->rcard[ch] = card;
					cc->rch[ch] = lch;
					card->rcard[lch] = cc;
					card->rch[lch] = ch;
					spin_unlock_irqrestore(&card->isdnloop_lock, flags);
					return 0;
				} else {
					spin_unlock_irqrestore(&card->isdnloop_lock, flags);
					/* num matches, but busy */
					if (ch == 1)
						return 1;
				}
			}
		}
		cc = cc->next;
	}
	return 2;
}
示例#29
0
void disp_set_interlace_info(__u32 sel, __u32 scaler_id, __u32 layer_id, __disp_scaler_t *scaler)
{
	unsigned long flags;

	if((false == scaler->in_fb.b_interlace)
		|| (scaler->in_fb.format != DISP_FORMAT_YUV420)
		|| ((scaler->in_fb.mode != DISP_MOD_MB_UV_COMBINED)
		    && (scaler->in_fb.mode != DISP_MOD_NON_MB_UV_COMBINED))
		|| (gdisp.screen[sel].de_flicker_status & DE_FLICKER_USED)
		|| (dit_mode_default[scaler_id] == 0xff)
		//|| ((scaler->in_fb.b_trd_src == 1) && (scaler->b_trd_out == 1) && (scaler->in_fb.size.width < 1920))
		)
	{
		scaler->in_fb.b_interlace = false;
		g_interlace[scaler_id].enable = false;
		return;
	}

	if(false != g_interlace[scaler_id].enable) //not the first interlace frame
	{
		if(g_interlace[scaler_id].cur_frame_addr_luma != scaler->in_fb.addr[0]) //different frame
		{
#ifdef __LINUX_OSAL__
			spin_lock_irqsave(&g_video[sel][0].flag_lock, flags);
#endif
			g_interlace[scaler_id].layer_id = layer_id;
			g_interlace[scaler_id].pre_frame_addr_luma = g_interlace[scaler_id].cur_frame_addr_luma;
			g_interlace[scaler_id].cur_frame_addr_luma = scaler->in_fb.addr[0];
			g_interlace[scaler_id].pre_frame_addr_chroma = g_interlace[scaler_id].cur_frame_addr_chroma;
			g_interlace[scaler_id].cur_frame_addr_chroma= scaler->in_fb.addr[1];
			g_interlace[scaler_id].cur_maf_flag_addr ^=  g_interlace[scaler_id].pre_maf_flag_addr;
			g_interlace[scaler_id].pre_maf_flag_addr ^=  g_interlace[scaler_id].cur_maf_flag_addr;
			g_interlace[scaler_id].cur_maf_flag_addr ^=  g_interlace[scaler_id].pre_maf_flag_addr;
			g_interlace[scaler_id].first_field = true;
#ifdef __LINUX_OSAL__
			spin_unlock_irqrestore(&g_video[sel][0].flag_lock, flags);
#endif
		}
	}
	else  // the first interlace frame
	{
#ifdef __LINUX_OSAL__
		spin_lock_irqsave(&g_video[sel][0].flag_lock, flags);
#endif
		g_interlace[scaler_id].layer_id = layer_id;
		g_interlace[scaler_id].cur_frame_addr_luma = //scaler->in_fb.addr[0];
			g_interlace[scaler_id].pre_frame_addr_luma = scaler->in_fb.addr[0];
		g_interlace[scaler_id].cur_frame_addr_chroma = //scaler->in_fb.addr[1];
			g_interlace[scaler_id].pre_frame_addr_chroma = scaler->in_fb.addr[1];
		g_interlace[scaler_id].pre_maf_flag_addr = 
			((__u32)maf_flag_mem[scaler_id][0] > 0x40000000) ? 
			((__u32)maf_flag_mem[scaler_id][0] - 0x40000000) : 
			(__u32)maf_flag_mem[scaler_id][0];
		g_interlace[scaler_id].cur_maf_flag_addr = 
			((__u32)maf_flag_mem[scaler_id][1] > 0x40000000) ? 
			((__u32)maf_flag_mem[scaler_id][1] - 0x40000000) : 
			(__u32)maf_flag_mem[scaler_id][1];
		g_interlace[scaler_id].first_field = true;
		g_interlace[scaler_id].first_frame = true;
		g_interlace[scaler_id].enable = true;
#ifdef __LINUX_OSAL__
		spin_unlock_irqrestore(&g_video[sel][0].flag_lock, flags);
#endif
	}
}
示例#30
0
static int g19_probe(struct hid_device *hdev,
                     const struct hid_device_id *id)
{
	unsigned long irq_flags;
	int error;
	struct gcommon_data *gdata;
	struct g19_data *g19data;
	int i;
	int led_num;
	struct usb_interface *intf;
	struct usb_device *usbdev;
	struct list_head *feature_report_list =
			    &hdev->report_enum[HID_FEATURE_REPORT].report_list;
	struct hid_report *report;
	char *led_name;

	dev_dbg(&hdev->dev, "Logitech G19 HID hardware probe...");

	/* Get the usb device to send the start report on */
	intf = to_usb_interface(hdev->dev.parent);
	usbdev = interface_to_usbdev(intf);

	/*
	 * Let's allocate the g19 data structure, set some reasonable
	 * defaults, and associate it with the device
	 */
	gdata = kzalloc(sizeof(struct gcommon_data), GFP_KERNEL);
	if (gdata == NULL) {
		dev_err(&hdev->dev, "can't allocate space for Logitech G19 device attributes\n");
		error = -ENOMEM;
		goto err_no_cleanup;
	}

	g19data = kzalloc(sizeof(struct g19_data), GFP_KERNEL);
	if (g19data == NULL) {
		dev_err(&hdev->dev, "can't allocate space for Logitech G19 device attributes\n");
		error = -ENOMEM;
		goto err_cleanup_gdata;
	}
	gdata->data = g19data;

	spin_lock_init(&gdata->lock);

	init_completion(&g19data->ready);

	gdata->hdev = hdev;

	g19data->ep1_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (g19data->ep1_urb == NULL) {
		dev_err(&hdev->dev, G19_NAME ": ERROR: can't alloc ep1 urb stuff\n");
		error = -ENOMEM;
		goto err_cleanup_g19data;
	}

	hid_set_drvdata(hdev, gdata);

	dbg_hid("Preparing to parse " G19_NAME " hid reports\n");

	/* Parse the device reports and start it up */
	error = hid_parse(hdev);
	if (error) {
		dev_err(&hdev->dev, G19_NAME " device report parse failed\n");
		error = -EINVAL;
		goto err_cleanup_ep1_urb;
	}

	error = hid_hw_start(hdev, HID_CONNECT_DEFAULT | HID_CONNECT_HIDINPUT_FORCE);
	if (error) {
		dev_err(&hdev->dev, G19_NAME " hardware start failed\n");
		error = -EINVAL;
		goto err_cleanup_ep1_urb;
	}

	dbg_hid(G19_NAME " claimed: %d\n", hdev->claimed);

	error = hdev->ll_driver->open(hdev);
	if (error) {
		dev_err(&hdev->dev, G19_NAME " failed to open input interrupt pipe for key and joystick events\n");
		error = -EINVAL;
		goto err_cleanup_hw_start;
	}

	/* Set up the input device for the key I/O */
	gdata->input_dev = input_allocate_device();
	if (gdata->input_dev == NULL) {
		dev_err(&hdev->dev, G19_NAME " error initializing the input device");
		error = -ENOMEM;
		goto err_cleanup_hw_start;
	}

	input_set_drvdata(gdata->input_dev, gdata);

	gdata->input_dev->name = G19_NAME;
	gdata->input_dev->phys = hdev->phys;
	gdata->input_dev->uniq = hdev->uniq;
	gdata->input_dev->id.bustype = hdev->bus;
	gdata->input_dev->id.vendor = hdev->vendor;
	gdata->input_dev->id.product = hdev->product;
	gdata->input_dev->id.version = hdev->version;
	gdata->input_dev->dev.parent = hdev->dev.parent;
	gdata->input_dev->keycode = gdata->input_data.keycode;
	gdata->input_dev->keycodemax = G19_KEYMAP_SIZE;
	gdata->input_dev->keycodesize = sizeof(unsigned int);
	gdata->input_dev->setkeycode = ginput_setkeycode;
	gdata->input_dev->getkeycode = ginput_getkeycode;

	input_set_capability(gdata->input_dev, EV_KEY, KEY_UNKNOWN);
	gdata->input_dev->evbit[0] |= BIT_MASK(EV_REP);

	gdata->input_data.notify_keymap_switched = g19_notify_keymap_switched;

	error = ginput_alloc(gdata, G19_KEYS);
	if (error) {
		dev_err(&hdev->dev, G19_NAME " error allocating memory for the input device");
		goto err_cleanup_input_dev;
	}

	g19_initialize_keymap(gdata);

	error = input_register_device(gdata->input_dev);
	if (error) {
		dev_err(&hdev->dev, G19_NAME " error registering the input device");
		error = -EINVAL;
		goto err_cleanup_input_dev_data;
	}

	if (list_empty(feature_report_list)) {
		dev_err(&hdev->dev, "no feature report found\n");
		error = -ENODEV;
		goto err_cleanup_input_dev_reg;
	}
	dbg_hid(G19_NAME " feature report found\n");

	list_for_each_entry(report, feature_report_list, list) {
		switch (report->id) {
		case 0x04:
			g19data->feature_report_4 = report;
			break;
		case 0x05:
			g19data->led_report = report;
			break;
		case 0x06:
			g19data->start_input_report = report;
			break;
		case 0x07:
			g19data->backlight_report = report;
			break;
		default:
			break;
		}
		dbg_hid(G19_NAME " Feature report: id=%u type=%u size=%u maxfield=%u report_count=%u\n",
		        report->id, report->type, report->size,
		        report->maxfield, report->field[0]->report_count);
	}

	dbg_hid("Found all reports\n");

	/* Create the LED structures */
	for (i = 0; i < LED_COUNT; i++) {
		g19data->led_cdev[i] = kzalloc(sizeof(struct led_classdev), GFP_KERNEL);
		if (g19data->led_cdev[i] == NULL) {
			dev_err(&hdev->dev, G19_NAME " error allocating memory for led %d", i);
			error = -ENOMEM;
			goto err_cleanup_led_structs;
		}
		/* Set the accessor functions by copying from template*/
		*(g19data->led_cdev[i]) = g19_led_cdevs[i];

		/*
		 * Allocate memory for the LED name
		 *
		 * Since led_classdev->name is a const char* we'll use an
		 * intermediate until the name is formatted with sprintf().
		 */
		led_name = kzalloc(sizeof(char)*20, GFP_KERNEL);
		if (led_name == NULL) {
			dev_err(&hdev->dev, G19_NAME " error allocating memory for led %d name", i);
			error = -ENOMEM;
			goto err_cleanup_led_structs;
		}
		switch (i) {
		case G19_LED_M1:
		case G19_LED_M2:
		case G19_LED_M3:
			sprintf(led_name, "g19_%d:orange:m%d", hdev->minor, i+1);
			break;
		case G19_LED_MR:
			sprintf(led_name, "g19_%d:red:mr", hdev->minor);
			break;
		case G19_LED_BL_R:
			sprintf(led_name, "g19_%d:red:bl", hdev->minor);
			break;
		case G19_LED_BL_G:
			sprintf(led_name, "g19_%d:green:bl", hdev->minor);
			break;
		case G19_LED_BL_B:
			sprintf(led_name, "g19_%d:blue:bl", hdev->minor);
			break;
		case G19_LED_BL_SCREEN:
			sprintf(led_name, "g19_%d:white:screen", hdev->minor);
			break;

		}
		g19data->led_cdev[i]->name = led_name;
	}

	for (i = 0; i < LED_COUNT; i++) {
		led_num = i;
		error = led_classdev_register(&hdev->dev, g19data->led_cdev[i]);
		if (error < 0) {
			dev_err(&hdev->dev, G19_NAME " error registering led %d", i);
			error = -EINVAL;
			goto err_cleanup_registered_leds;
		}
	}

	gdata->gfb_data = gfb_probe(hdev, GFB_PANEL_TYPE_320_240_16);
	if (gdata->gfb_data == NULL) {
		dev_err(&hdev->dev, G19_NAME " error registering framebuffer\n");
		goto err_cleanup_registered_leds;
	}

	dbg_hid("Waiting for G19 to activate\n");

	/* Add the sysfs attributes */
	error = sysfs_create_group(&(hdev->dev.kobj), &g19_attr_group);
	if (error) {
		dev_err(&hdev->dev, G19_NAME " failed to create sysfs group attributes\n");
		goto err_cleanup_gfb;
	}

	/*
	 * Wait here for stage 1 (substages 1-3) to complete
	 */
	wait_for_completion_timeout(&g19data->ready, HZ);

	/* Protect data->ready_stages before checking whether we're ready to proceed */
	spin_lock_irqsave(&gdata->lock, irq_flags);
	if (g19data->ready_stages != G19_READY_STAGE_1) {
		dev_warn(&hdev->dev, G19_NAME " hasn't completed stage 1 yet, forging ahead with initialization\n");
		/* Force the stage */
		g19data->ready_stages = G19_READY_STAGE_1;
	}
	init_completion(&g19data->ready);
	g19data->ready_stages |= G19_READY_SUBSTAGE_4;
	spin_unlock_irqrestore(&gdata->lock, irq_flags);

	/*
	 * Send the init report, then follow with the input report to trigger
	 * report 6 and wait for us to get a response.
	 */
	g19_feature_report_4_send(hdev, G19_REPORT_4_INIT);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)

	hid_hw_request(hdev, g19data->start_input_report, HID_REQ_GET_REPORT);

#else

	usbhid_submit_report(hdev, g19data->start_input_report, USB_DIR_IN);

#endif

	wait_for_completion_timeout(&g19data->ready, HZ);

	/* Protect g19data->ready_stages before checking whether we're ready to proceed */
	spin_lock_irqsave(&gdata->lock, irq_flags);
	if (g19data->ready_stages != G19_READY_STAGE_2) {
		dev_warn(&hdev->dev, G19_NAME " hasn't completed stage 2 yet, forging ahead with initialization\n");
		/* Force the stage */
		g19data->ready_stages = G19_READY_STAGE_2;
	}
	init_completion(&g19data->ready);
	g19data->ready_stages |= G19_READY_SUBSTAGE_6;
	spin_unlock_irqrestore(&gdata->lock, irq_flags);

	/*
	 * Clear the LEDs
	 */
	g19_led_send(hdev);

	g19data->rgb[0] = G19_DEFAULT_RED;
	g19data->rgb[1] = G19_DEFAULT_GREEN;
	g19data->rgb[2] = G19_DEFAULT_BLUE;
	g19_rgb_send(hdev);

	g19data->screen_bl = G19_DEFAULT_BRIGHTNESS;
	g19_screen_bl_send(hdev);

	/*
	 * Send the finalize report, then follow with the input report to trigger
	 * report 6 and wait for us to get a response.
	 */
	g19_feature_report_4_send(hdev, G19_REPORT_4_FINALIZE);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)

	hid_hw_request(hdev, g19data->start_input_report, HID_REQ_GET_REPORT);
	hid_hw_request(hdev, g19data->start_input_report, HID_REQ_GET_REPORT);

#else

	usbhid_submit_report(hdev, g19data->start_input_report, USB_DIR_IN);
	usbhid_submit_report(hdev, g19data->start_input_report, USB_DIR_IN);

#endif

	wait_for_completion_timeout(&g19data->ready, HZ);

	/* Protect data->ready_stages before checking whether we're ready to proceed */
	spin_lock_irqsave(&gdata->lock, irq_flags);

	if (g19data->ready_stages != G19_READY_STAGE_3) {
		dev_warn(&hdev->dev, G19_NAME " hasn't completed stage 3 yet, forging ahead with initialization\n");
		/* Force the stage */
		g19data->ready_stages = G19_READY_STAGE_3;
	} else {
		dbg_hid(G19_NAME " stage 3 complete\n");
	}

	spin_unlock_irqrestore(&gdata->lock, irq_flags);

	ginput_set_keymap_switching(gdata, 1);

	g19_ep1_read(hdev);

	dbg_hid("G19 activated and initialized\n");

	/* Everything went well */
	return 0;

err_cleanup_gfb:
	gfb_remove(gdata->gfb_data);

err_cleanup_registered_leds:
	for (i = 0; i < led_num; i++)
		led_classdev_unregister(g19data->led_cdev[i]);

err_cleanup_led_structs:
	for (i = 0; i < LED_COUNT; i++) {
		if (g19data->led_cdev[i] != NULL) {
			if (g19data->led_cdev[i]->name != NULL)
				kfree(g19data->led_cdev[i]->name);
			kfree(g19data->led_cdev[i]);
		}
	}

err_cleanup_input_dev_reg:
	input_unregister_device(gdata->input_dev);

err_cleanup_input_dev_data:
	ginput_free(gdata);

err_cleanup_input_dev:
	input_free_device(gdata->input_dev);

err_cleanup_hw_start:
	hid_hw_stop(hdev);

err_cleanup_ep1_urb:
	usb_free_urb(g19data->ep1_urb);

err_cleanup_g19data:
	kfree(g19data);

err_cleanup_gdata:
	kfree(gdata);

err_no_cleanup:
	hid_set_drvdata(hdev, NULL);
	return error;
}