コード例 #1
0
/*
 * The typical workload of the driver:
 *   Handle the network interface interrupts.
 */
static void
net_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
	struct net_device *dev;
	struct net_local *lp;
	int status = 0;

	BUGMSG("net_interrupt..");
	if (dev_id == NULL) {
		printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq);
		return;
	}

	dev = (struct net_device *) dev_id;
	lp = (struct net_local *) dev->priv;

	status = lp->uEth->INTST;

	/* RX Start: No frame available during frame RX */
	if(status & ETHINTSTRXSTART) {
		printk("PANIC: Ethernet RX Queue is empty: this should NEVER Occur!!!\n");
		printk("rxq_ptr = %d\n", lp->rxq_ptr);
	}

	/* RX End: a frame is received successfully */
	if(status & ETHINTSTRXEND) {
		BUGMSG("RX end..");
		do {
			unsigned int rx_packet_stat = lp->uEth->RXSTAT;

			if(rx_packet_stat & ETHRXSTATERROR) {
				printk("RX Packet error 0x%.8x\n", rx_packet_stat);
				lp->stats.rx_dropped++;
				/* We reuse the frame */
				lp->uEth->RXADDR = (int)lp->rx_frames[lp->rxq_ptr & (ETH_RXQ_SIZE -1)];
			} else {
				unsigned int length = (rx_packet_stat & 0x3fff);
				struct sk_buff *skb;
				struct sk_buff *newskb;

				BUGMSG("rxq %d, skblen %d..", lp->rxq_ptr, length);
				/* The received skb */
				skb = lp->rx_frames[lp->rxq_ptr & (ETH_RXQ_SIZE -1)];
				skb_put(skb, length);
				lp->stats.rx_packets++;
				lp->stats.rx_bytes += length;

				/* Alloc new skb for rx_frame ringbuffer */
				newskb = dev_alloc_skb( PKT_BUF_SZ );
				if (newskb == NULL) {
					/* We assume that we can consume and produce the RX ring
					 * buffer at the same time. In this case, we cannot
					 * produce so we will eventually crash ...
					 */
					printk("Cannot allocate skb. This is very bad.... We will CRASH!\n");
				} else {
					newskb->dev = dev;
					newskb->protocol = eth_type_trans(skb, dev);
					newskb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
					/* word align IP header */
					skb_reserve( newskb, 2 );
					lp->rx_frames[lp->rxq_ptr & (ETH_RXQ_SIZE -1)] = newskb;
					/* Put in Ethernet RX queue */
					lp->uEth->RXADDR = (int) newskb->data;
				}
				lp->rxq_ptr++;
				netif_rx(skb);
			}
		} while(lp->uEth->INTST & ETHINTSTRXEND);
	}

	if(status & ETHINTSTTXEND) {
		BUGMSG("TX end..");
		do {
			unsigned int tx_packet_stat = lp->uEth->TXSTAT;
			struct sk_buff *skb;

			BUGMSG("*%d %d", lp->txq_p, lp->txq_c);
			if(tx_packet_stat)
				printk("TX Packet error 0x%.8x\n", tx_packet_stat);

			skb = lp->tx_frames[lp->txq_c & (ETH_TXQ_SIZE -1)];
			/* paranoia check should be removed */
			if(skb == NULL) {
				printk("ERROR: tx frame NULL\n");
				break;
			}
			lp->tx_frames[lp->txq_c & (ETH_TXQ_SIZE -1)] = NULL;
			lp->txq_c++;
			lp->stats.tx_packets++;
			dev_kfree_skb(skb);
		} while(lp->uEth->INTST & ETHINTSTTXEND);
	}
	BUGMSG("\n");
	return;
}
コード例 #2
0
ファイル: caif_virtio.c プロジェクト: 383530895/linux
/* Put the CAIF packet on the virtio ring and kick the receiver */
static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
{
	struct cfv_info *cfv = netdev_priv(netdev);
	struct buf_info *buf_info;
	struct scatterlist sg;
	unsigned long flags;
	bool flow_off = false;
	int ret;

	/* garbage collect released buffers */
	cfv_release_used_buf(cfv->vq_tx);
	spin_lock_irqsave(&cfv->tx_lock, flags);

	/* Flow-off check takes into account number of cpus to make sure
	 * virtqueue will not be overfilled in any possible smp conditions.
	 *
	 * Flow-on is triggered when sufficient buffers are freed
	 */
	if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
		flow_off = true;
		cfv->stats.tx_full_ring++;
	}

	/* If we run out of memory, we release the memory reserve and retry
	 * allocation.
	 */
	buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
	if (unlikely(!buf_info)) {
		cfv->stats.tx_no_mem++;
		flow_off = true;

		if (cfv->reserved_mem && cfv->genpool) {
			gen_pool_free(cfv->genpool,  cfv->reserved_mem,
				      cfv->reserved_size);
			cfv->reserved_mem = 0;
			buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
		}
	}

	if (unlikely(flow_off)) {
		/* Turn flow on when a 1/4 of the descriptors are released */
		cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
		/* Enable notifications of recycled TX buffers */
		virtqueue_enable_cb(cfv->vq_tx);
		netif_tx_stop_all_queues(netdev);
	}

	if (unlikely(!buf_info)) {
		/* If the memory reserve does it's job, this shouldn't happen */
		netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
		goto err;
	}

	ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
	if (unlikely((ret < 0))) {
		/* If flow control works, this shouldn't happen */
		netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
			    ret);
		goto err;
	}

	/* update netdev statistics */
	cfv->ndev->stats.tx_packets++;
	cfv->ndev->stats.tx_bytes += skb->len;
	spin_unlock_irqrestore(&cfv->tx_lock, flags);

	/* tell the remote processor it has a pending message to read */
	virtqueue_kick(cfv->vq_tx);

	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
err:
	spin_unlock_irqrestore(&cfv->tx_lock, flags);
	cfv->ndev->stats.tx_dropped++;
	free_buf_info(cfv, buf_info);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
コード例 #3
0
static ssize_t eemcs_ipc_write(struct file *fp, const char __user *buf, size_t in_sz, loff_t *ppos)
{
    ssize_t ret   = 0;
    eemcs_ipc_node_t *curr_node = (eemcs_ipc_node_t *)fp->private_data;
    KAL_UINT8 node_id = curr_node->ipc_node_id;/* node_id */
    KAL_UINT8 port_id = eemcs_ipc_inst.eemcs_port_id; /* port_id */
    KAL_UINT32 p_type, control_flag;    
    struct sk_buff *new_skb;
    CCCI_BUFF_T *ccci_header;
    ipc_ilm_t *ilm=NULL;
    IPC_MSGSVC_TASKMAP_T *id_map;
    size_t count = in_sz;
    size_t skb_alloc_size;
    KAL_UINT32 alloc_time = 0, curr_time = 0;
    
    DEBUG_LOG_FUNCTION_ENTRY;        
    DBGLOG(IPCD, DBG, "[TX]deivce=%s iminor=%d len=%d", curr_node->dev_name, node_id, count);

    p_type = ccci_get_port_type(port_id);
    if(p_type != EX_T_USER) 
    {
        DBGLOG(IPCD, ERR, "PORT%d refuse port(%d) access user port", port_id, p_type);
        ret=-EINVAL;
        goto _exit;                    
    }

    control_flag = ccci_get_port_cflag(port_id);	
    if (check_device_state() == EEMCS_EXCEPTION) {//modem exception		
        if ((control_flag & TX_PRVLG2) == 0) {
            DBGLOG(IPCD, TRA, "[TX]PORT%d write fail when modem exception", port_id);
            return -ETXTBSY;
        }
    } else if (check_device_state() != EEMCS_BOOTING_DONE) {//modem not ready
        if ((control_flag & TX_PRVLG1) == 0) {
            DBGLOG(IPCD, TRA, "[TX]PORT%d write fail when modem not ready", port_id);
            return -ENODEV;
        }
    }

    if((control_flag & EXPORT_CCCI_H) && (count < sizeof(CCCI_BUFF_T)))
    {
        DBGLOG(IPCD, ERR, "invalid wirte_len(%d) of PORT%d", count, port_id);
        ret=-EINVAL;
        goto _exit;            
    }

    if(control_flag & EXPORT_CCCI_H){
        if(count > (MAX_TX_BYTE+sizeof(CCCI_BUFF_T))){
            DBGLOG(IPCD, WAR, "PORT%d wirte_len(%d)>MTU(%d)!", port_id, count, MAX_TX_BYTE);
            count = MAX_TX_BYTE+sizeof(CCCI_BUFF_T);
        }
        skb_alloc_size = count - sizeof(CCCI_BUFF_T);
    }else{
        if(count > MAX_TX_BYTE){
            DBGLOG(IPCD, WAR, "PORT%d wirte_len(%d)>MTU(%d)!", port_id, count, MAX_TX_BYTE);
            count = MAX_TX_BYTE;
        }
        skb_alloc_size = count;
    }

    if (ccci_ch_write_space_alloc(eemcs_ipc_inst.ccci_ch.tx)==0){
        DBGLOG(IPCD, WAR, "PORT%d write return 0)", port_id);
        ret = -EAGAIN;
        goto _exit;
    }	
    
    new_skb = ccci_ipc_mem_alloc(skb_alloc_size + CCCI_IPC_HEADER_ROOM, GFP_ATOMIC);
    if(NULL == new_skb)
    {
    	DBGLOG(CHAR, INF, "[TX]PORT%d alloc skb fail with wait", port_id);
    	alloc_time = jiffies;
    	new_skb = ccci_ipc_mem_alloc(skb_alloc_size + CCCI_IPC_HEADER_ROOM, GFP_KERNEL);
    	if (NULL == new_skb) {
            ret = -ENOMEM;
            DBGLOG(IPCD, ERR, "[TX]PORT%d alloc skb fail with wait fail", port_id);
            goto _exit; 
    	}

    	curr_time = jiffies;
    	if ((curr_time - alloc_time) >= 1) {			
            DBGLOG(IPCD, ERR, "[TX]PORT%d alloc skb delay: time=%dms", port_id, \
			10*(curr_time - alloc_time));
    	}
    }
    
    /* reserve SDIO_H header room */
    #ifdef CCCI_SDIO_HEAD
    skb_reserve(new_skb, sizeof(SDIO_H));
    #endif

    ccci_header = (CCCI_BUFF_T *)skb_put(new_skb, sizeof(CCCI_BUFF_T)) ;

    if(copy_from_user(skb_put(new_skb, count), buf, count))
    {
        DBGLOG(IPCD, ERR, "[TX]PORT%d copy_from_user(len=%d, %p->%p) fail", \
		port_id, count, buf, new_skb->data);
        dev_kfree_skb(new_skb);
        ret = -EFAULT;
        goto _exit;
    }

    ilm = (ipc_ilm_t*)((char*)ccci_header + sizeof(CCCI_BUFF_T));
    /* Check IPC extq_id */
	if ((id_map=local_MD_id_2_unify_id(ilm->dest_mod_id))==NULL)
	{
		DBGLOG(IPCD,ERR,"Invalid dest_mod_id=%d",ilm->dest_mod_id);
		dev_kfree_skb(new_skb);
		ret=-EINVAL;
		goto _exit;
	}
    
    /* user bring down the payload only */
    ccci_header->data[1]    = count + sizeof(CCCI_BUFF_T);
    ccci_header->reserved   = id_map->extq_id;
    ccci_header->channel    = eemcs_ipc_inst.ccci_ch.tx;

    DBGLOG(IPCD, DBG, "[TX]PORT%d CCCI_MSG(0x%08X, 0x%08X, %02d, 0x%08X)", 
                    port_id, 
                    ccci_header->data[0], ccci_header->data[1],
                    ccci_header->channel, ccci_header->reserved);

  
    ret = ccci_ch_write_desc_to_q(ccci_header->channel, new_skb);

    if (KAL_SUCCESS != ret) {
        DBGLOG(IPCD, ERR, "PORT%d PKT DROP of ch%d!", port_id, ccci_header->channel);
        dev_kfree_skb(new_skb);
        ret = -EAGAIN;
    } else {
        atomic_inc(&curr_node->tx_pkt_cnt);
        wake_up(&curr_node->tx_waitq); /* wake up tx_waitq for notify poll_wait of state change */
    }

_exit:
    DEBUG_LOG_FUNCTION_LEAVE;
    if(!ret){
        return count;
    }
    return ret;
}
コード例 #4
0
ファイル: ni52.c プロジェクト: AppEngine/linux-2.6
static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	int len, i;
#ifndef NO_NOPCOMMANDS
	int next_nop;
#endif
	struct priv *p = netdev_priv(dev);

	if (skb->len > XMIT_BUFF_SIZE) {
		printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
		return 0;
	}

	netif_stop_queue(dev);

	memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
	len = skb->len;
	if (len < ETH_ZLEN) {
		len = ETH_ZLEN;
		memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
							len - skb->len);
	}

#if (NUM_XMIT_BUFFS == 1)
#	ifdef NO_NOPCOMMANDS

#ifdef DEBUG
	if (readb(&p->scb->cus) & CU_ACTIVE) {
		printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
		printk(KERN_ERR "%s: stat: %04x %04x\n",
				dev->name, readb(&p->scb->cus),
				readw(&p->xmit_cmds[0]->cmd_status));
	}
#endif
	writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
	for (i = 0; i < 16; i++) {
		writew(0, &p->xmit_cmds[0]->cmd_status);
		wait_for_scb_cmd(dev);
		if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
			writeb(CUC_RESUME, &p->scb->cmd_cuc);
		else {
			writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
			writeb(CUC_START, &p->scb->cmd_cuc);
		}
		ni_attn586();
		dev->trans_start = jiffies;
		if (!i)
			dev_kfree_skb(skb);
		wait_for_scb_cmd(dev);
		/* test it, because CU sometimes doesn't start immediately */
		if (readb(&p->scb->cus) & CU_ACTIVE)
			break;
		if (readw(&p->xmit_cmds[0]->cmd_status))
			break;
		if (i == 15)
			printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
	}
#	else
	next_nop = (p->nop_point + 1) & 0x1;
	writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
	writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
	writew(make16(p->nop_cmds[next_nop]),
				&p->nop_cmds[next_nop]->cmd_link);
	writew(0, &p->xmit_cmds[0]->cmd_status);
	writew(0, &p->nop_cmds[next_nop]->cmd_status);

	writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
	dev->trans_start = jiffies;
	p->nop_point = next_nop;
	dev_kfree_skb(skb);
#	endif
#else
	writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
	next_nop = p->xmit_count + 1
	if (next_nop == NUM_XMIT_BUFFS)
		next_nop = 0;
	writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
	/* linkpointer of xmit-command already points to next nop cmd */
	writew(make16(p->nop_cmds[next_nop]),
				&p->nop_cmds[next_nop]->cmd_link);
	writew(0, &p->nop_cmds[next_nop]->cmd_status);
	writew(make16(p->xmit_cmds[p->xmit_count]),
				&p->nop_cmds[p->xmit_count]->cmd_link);
	dev->trans_start = jiffies;
	p->xmit_count = next_nop;
	{
		unsigned long flags;
		spin_lock_irqsave(&p->spinlock);
		if (p->xmit_count != p->xmit_last)
			netif_wake_queue(dev);
		spin_unlock_irqrestore(&p->spinlock);
	}
	dev_kfree_skb(skb);
#endif
	return 0;
}
コード例 #5
0
ファイル: virtio_net.c プロジェクト: PennPanda/linux-repo
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
			unsigned len)
{
	struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);

	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
		goto drop;
	}
	len -= sizeof(struct virtio_net_hdr);
	BUG_ON(len > MAX_PACKET_LEN);

	skb_trim(skb, len);
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);
	dev->stats.rx_bytes += skb->len;
	dev->stats.rx_packets++;

	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
		pr_debug("Needs csum!\n");
		skb->ip_summed = CHECKSUM_PARTIAL;
		skb->csum_start = hdr->csum_start;
		skb->csum_offset = hdr->csum_offset;
		if (skb->csum_start > skb->len - 2
		    || skb->csum_offset > skb->len - 2) {
			if (net_ratelimit())
				printk(KERN_WARNING "%s: csum=%u/%u len=%u\n",
				       dev->name, skb->csum_start,
				       skb->csum_offset, skb->len);
			goto frame_err;
		}
	}

	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
		pr_debug("GSO!\n");
		switch (hdr->gso_type) {
		case VIRTIO_NET_HDR_GSO_TCPV4:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
			break;
		case VIRTIO_NET_HDR_GSO_TCPV4_ECN:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCP_ECN;
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
			break;
		default:
			if (net_ratelimit())
				printk(KERN_WARNING "%s: bad gso type %u.\n",
				       dev->name, hdr->gso_type);
			goto frame_err;
		}

		skb_shinfo(skb)->gso_size = hdr->gso_size;
		if (skb_shinfo(skb)->gso_size == 0) {
			if (net_ratelimit())
				printk(KERN_WARNING "%s: zero gso size.\n",
				       dev->name);
			goto frame_err;
		}

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	netif_receive_skb(skb);
	return;

frame_err:
	dev->stats.rx_frame_errors++;
drop:
	dev_kfree_skb(skb);
}
コード例 #6
0
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	struct net_device *net_dev = bgmac->net_dev;
	struct bgmac_dma_desc *dma_desc;
	struct bgmac_slot_info *slot;
	u32 ctl0, ctl1;
	int free_slots;

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
		bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
		goto err_stop_drop;
	}

	if (ring->start <= ring->end)
		free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
	else
		free_slots = ring->start - ring->end;
	if (free_slots == 1) {
		bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

	slot = &ring->slots[ring->end];
	slot->skb = skb;
	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
					DMA_TO_DEVICE);
	if (dma_mapping_error(dma_dev, slot->dma_addr)) {
		bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
			  ring->mmio_base);
		goto err_stop_drop;
	}

	ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
	if (ring->end == ring->num_slots - 1)
		ctl0 |= BGMAC_DESC_CTL0_EOT;
	ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;

	dma_desc = ring->cpu_base;
	dma_desc += ring->end;
	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);

	netdev_sent_queue(net_dev, skb->len);

	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	if (++ring->end >= BGMAC_TX_RING_SLOTS)
		ring->end = 0;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
		    ring->index_base +
		    ring->end * sizeof(struct bgmac_dma_desc));

	/* Always keep one slot free to allow detecting bugged calls. */
	if (--free_slots == 1)
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

err_stop_drop:
	netif_stop_queue(net_dev);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
コード例 #7
0
/*----------------------------------------------------------------
* p80211knetdev_hard_start_xmit
*
* Linux netdevice method for transmitting a frame.
*
* Arguments:
*	skb	Linux sk_buff containing the frame.
*	netdev	Linux netdevice.
*
* Side effects:
*	If the lower layers report that buffers are full. netdev->tbusy
*	will be set to prevent higher layers from sending more traffic.
*
*	Note: If this function returns non-zero, higher layers retain
*	      ownership of the skb.
*
* Returns:
*	zero on success, non-zero on failure.
----------------------------------------------------------------*/
static int p80211knetdev_hard_start_xmit( struct sk_buff *skb, netdevice_t *netdev)
{
	int		result = 0;
	int		txresult = -1;
	wlandevice_t	*wlandev = netdev->ml_priv;
	p80211_hdr_t    p80211_hdr;
	p80211_metawep_t p80211_wep;

	DBFENTER;

	if (skb == NULL) {
		return 0;
	}

        if (wlandev->state != WLAN_DEVICE_OPEN) {
		result = 1;
		goto failed;
	}

	memset(&p80211_hdr, 0, sizeof(p80211_hdr_t));
	memset(&p80211_wep, 0, sizeof(p80211_metawep_t));

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,38) )
	if ( test_and_set_bit(0, (void*)&(netdev->tbusy)) != 0 ) {
		/* We've been called w/ tbusy set, has the tx */
		/* path stalled?   */
		WLAN_LOG_DEBUG(1, "called when tbusy set\n");
		result = 1;
		goto failed;
	}
#else
	if ( netif_queue_stopped(netdev) ) {
		WLAN_LOG_DEBUG(1, "called when queue stopped.\n");
		result = 1;
		goto failed;
	}

	netif_stop_queue(netdev);

	/* No timeout handling here, 2.3.38+ kernels call the
	 * timeout function directly.
	 * TODO: Add timeout handling.
	*/
#endif

	/* Check to see that a valid mode is set */
	switch( wlandev->macmode ) {
	case WLAN_MACMODE_IBSS_STA:
	case WLAN_MACMODE_ESS_STA:
	case WLAN_MACMODE_ESS_AP:
		break;
	default:
		/* Mode isn't set yet, just drop the frame
		 * and return success .
		 * TODO: we need a saner way to handle this
		 */
		if(skb->protocol != ETH_P_80211_RAW) {
			p80211netdev_start_queue(wlandev);
			WLAN_LOG_NOTICE(
				"Tx attempt prior to association, frame dropped.\n");
			wlandev->linux_stats.tx_dropped++;
			result = 0;
			goto failed;
		}
		break;
	}

	/* Check for raw transmits */
	if(skb->protocol == ETH_P_80211_RAW) {
		if (!capable(CAP_NET_ADMIN)) {
			result = 1;
			goto failed;
		}
		/* move the header over */
		memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr_t));
		skb_pull(skb, sizeof(p80211_hdr_t));
	} else {
		if ( skb_ether_to_p80211(wlandev, wlandev->ethconv, skb, &p80211_hdr, &p80211_wep) != 0 ) {
			/* convert failed */
			WLAN_LOG_DEBUG(1, "ether_to_80211(%d) failed.\n",
					wlandev->ethconv);
			result = 1;
			goto failed;
		}
	}
	if ( wlandev->txframe == NULL ) {
		result = 1;
		goto failed;
	}

	netdev->trans_start = jiffies;

	wlandev->linux_stats.tx_packets++;
	/* count only the packet payload */
	wlandev->linux_stats.tx_bytes += skb->len;

	txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep);

	if ( txresult == 0) {
		/* success and more buf */
		/* avail, re: hw_txdata */
		p80211netdev_wake_queue(wlandev);
		result = 0;
	} else if ( txresult == 1 ) {
		/* success, no more avail */
		WLAN_LOG_DEBUG(3, "txframe success, no more bufs\n");
		/* netdev->tbusy = 1;  don't set here, irqhdlr */
		/*   may have already cleared it */
		result = 0;
	} else if ( txresult == 2 ) {
		/* alloc failure, drop frame */
		WLAN_LOG_DEBUG(3, "txframe returned alloc_fail\n");
		result = 1;
	} else {
		/* buffer full or queue busy, drop frame. */
		WLAN_LOG_DEBUG(3, "txframe returned full or busy\n");
		result = 1;
	}

 failed:
	/* Free up the WEP buffer if it's not the same as the skb */
	if ((p80211_wep.data) && (p80211_wep.data != skb->data))
		kfree(p80211_wep.data);

	/* we always free the skb here, never in a lower level. */
	if (!result)
		dev_kfree_skb(skb);

	DBFEXIT;
	return result;
}
コード例 #8
0
static void
bch_l2l1(struct hisax_if *ifc, int pr, void *arg)
{
	struct hfc4s8s_btype *bch = ifc->priv;
	struct hfc4s8s_l1 *l1 = bch->l1p;
	struct sk_buff *skb = (struct sk_buff *) arg;
	long mode = (long) arg;
	u_long flags;

	switch (pr) {

		case (PH_DATA | REQUEST):
			if (!l1->enabled || (bch->mode == L1_MODE_NULL)) {
				dev_kfree_skb(skb);
				break;
			}
			spin_lock_irqsave(&l1->lock, flags);
			skb_queue_tail(&bch->tx_queue, skb);
			if (!bch->tx_skb && (bch->tx_cnt <= 0)) {
				l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
				    ((bch->bchan == 1) ? 1 : 4);
				spin_unlock_irqrestore(&l1->lock, flags);
				schedule_work(&l1->hw->tqueue);
			} else
				spin_unlock_irqrestore(&l1->lock, flags);
			break;

		case (PH_ACTIVATE | REQUEST):
		case (PH_DEACTIVATE | REQUEST):
			if (!l1->enabled)
				break;
			if (pr == (PH_DEACTIVATE | REQUEST))
				mode = L1_MODE_NULL;

			switch (mode) {
				case L1_MODE_HDLC:
					spin_lock_irqsave(&l1->lock,
							  flags);
					l1->hw->mr.timer_usg_cnt++;
					l1->hw->mr.
					    fifo_slow_timer_service[l1->
								    st_num]
					    |=
					    ((bch->bchan ==
					      1) ? 0x2 : 0x8);
					Write_hfc8(l1->hw, R_FIFO,
						   (l1->st_num * 8 +
						    ((bch->bchan ==
						      1) ? 0 : 2)));
					wait_busy(l1->hw);
					Write_hfc8(l1->hw, A_CON_HDLC, 0xc);	/* HDLC mode, flag fill, connect ST */
					Write_hfc8(l1->hw, A_SUBCH_CFG, 0);	/* 8 bits */
					Write_hfc8(l1->hw, A_IRQ_MSK, 1);	/* enable TX interrupts for hdlc */
					Write_hfc8(l1->hw, A_INC_RES_FIFO, 2);	/* reset fifo */
					wait_busy(l1->hw);

					Write_hfc8(l1->hw, R_FIFO,
						   (l1->st_num * 8 +
						    ((bch->bchan ==
						      1) ? 1 : 3)));
					wait_busy(l1->hw);
					Write_hfc8(l1->hw, A_CON_HDLC, 0xc);	/* HDLC mode, flag fill, connect ST */
					Write_hfc8(l1->hw, A_SUBCH_CFG, 0);	/* 8 bits */
					Write_hfc8(l1->hw, A_IRQ_MSK, 1);	/* enable RX interrupts for hdlc */
					Write_hfc8(l1->hw, A_INC_RES_FIFO, 2);	/* reset fifo */

					Write_hfc8(l1->hw, R_ST_SEL,
						   l1->st_num);
					l1->hw->mr.r_ctrl0 |=
					    (bch->bchan & 3);
					Write_hfc8(l1->hw, A_ST_CTRL0,
						   l1->hw->mr.r_ctrl0);
					bch->mode = L1_MODE_HDLC;
					spin_unlock_irqrestore(&l1->lock,
							       flags);

					bch->b_if.ifc.l1l2(&bch->b_if.ifc,
							   PH_ACTIVATE |
							   INDICATION,
							   NULL);
					break;

				case L1_MODE_TRANS:
					spin_lock_irqsave(&l1->lock,
							  flags);
					l1->hw->mr.
					    fifo_rx_trans_enables[l1->
								  st_num]
					    |=
					    ((bch->bchan ==
					      1) ? 0x2 : 0x8);
					l1->hw->mr.timer_usg_cnt++;
					Write_hfc8(l1->hw, R_FIFO,
						   (l1->st_num * 8 +
						    ((bch->bchan ==
						      1) ? 0 : 2)));
					wait_busy(l1->hw);
					Write_hfc8(l1->hw, A_CON_HDLC, 0xf);	/* Transparent mode, 1 fill, connect ST */
					Write_hfc8(l1->hw, A_SUBCH_CFG, 0);	/* 8 bits */
					Write_hfc8(l1->hw, A_IRQ_MSK, 0);	/* disable TX interrupts */
					Write_hfc8(l1->hw, A_INC_RES_FIFO, 2);	/* reset fifo */
					wait_busy(l1->hw);

					Write_hfc8(l1->hw, R_FIFO,
						   (l1->st_num * 8 +
						    ((bch->bchan ==
						      1) ? 1 : 3)));
					wait_busy(l1->hw);
					Write_hfc8(l1->hw, A_CON_HDLC, 0xf);	/* Transparent mode, 1 fill, connect ST */
					Write_hfc8(l1->hw, A_SUBCH_CFG, 0);	/* 8 bits */
					Write_hfc8(l1->hw, A_IRQ_MSK, 0);	/* disable RX interrupts */
					Write_hfc8(l1->hw, A_INC_RES_FIFO, 2);	/* reset fifo */

					Write_hfc8(l1->hw, R_ST_SEL,
						   l1->st_num);
					l1->hw->mr.r_ctrl0 |=
					    (bch->bchan & 3);
					Write_hfc8(l1->hw, A_ST_CTRL0,
						   l1->hw->mr.r_ctrl0);
					bch->mode = L1_MODE_TRANS;
					spin_unlock_irqrestore(&l1->lock,
							       flags);

					bch->b_if.ifc.l1l2(&bch->b_if.ifc,
							   PH_ACTIVATE |
							   INDICATION,
							   NULL);
					break;

				default:
					if (bch->mode == L1_MODE_NULL)
						break;
					spin_lock_irqsave(&l1->lock,
							  flags);
					l1->hw->mr.
					    fifo_slow_timer_service[l1->
								    st_num]
					    &=
					    ~((bch->bchan ==
					       1) ? 0x3 : 0xc);
					l1->hw->mr.
					    fifo_rx_trans_enables[l1->
								  st_num]
					    &=
					    ~((bch->bchan ==
					       1) ? 0x3 : 0xc);
					l1->hw->mr.timer_usg_cnt--;
					Write_hfc8(l1->hw, R_FIFO,
						   (l1->st_num * 8 +
						    ((bch->bchan ==
						      1) ? 0 : 2)));
					wait_busy(l1->hw);
					Write_hfc8(l1->hw, A_IRQ_MSK, 0);	/* disable TX interrupts */
					wait_busy(l1->hw);
					Write_hfc8(l1->hw, R_FIFO,
						   (l1->st_num * 8 +
						    ((bch->bchan ==
						      1) ? 1 : 3)));
					wait_busy(l1->hw);
					Write_hfc8(l1->hw, A_IRQ_MSK, 0);	/* disable RX interrupts */
					Write_hfc8(l1->hw, R_ST_SEL,
						   l1->st_num);
					l1->hw->mr.r_ctrl0 &=
					    ~(bch->bchan & 3);
					Write_hfc8(l1->hw, A_ST_CTRL0,
						   l1->hw->mr.r_ctrl0);
					spin_unlock_irqrestore(&l1->lock,
							       flags);

					bch->mode = L1_MODE_NULL;
					bch->b_if.ifc.l1l2(&bch->b_if.ifc,
							   PH_DEACTIVATE |
							   INDICATION,
							   NULL);
					if (bch->tx_skb) {
						dev_kfree_skb(bch->tx_skb);
						bch->tx_skb = NULL;
					}
					if (bch->rx_skb) {
						dev_kfree_skb(bch->rx_skb);
						bch->rx_skb = NULL;
					}
					skb_queue_purge(&bch->tx_queue);
					bch->tx_cnt = 0;
					bch->rx_ptr = NULL;
					break;
			}

			/* timer is only used when at least one b channel */
			/* is set up to transparent mode */
			if (l1->hw->mr.timer_usg_cnt) {
				Write_hfc8(l1->hw, R_IRQMSK_MISC,
					   M_TI_IRQMSK);
			} else {
				Write_hfc8(l1->hw, R_IRQMSK_MISC, 0);
			}

			break;

		default:
			printk(KERN_INFO
			       "HFC-4S/8S: Unknown B-chan cmd 0x%x received, ignored\n",
			       pr);
			break;
	}
	if (!l1->enabled)
		bch->b_if.ifc.l1l2(&bch->b_if.ifc,
				   PH_DEACTIVATE | INDICATION, NULL);
}				/* bch_l2l1 */
コード例 #9
0
static void
rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
{
	int z1, z2;
	u_char f1, f2, df;
	struct sk_buff *skb;
	u_char *cp;


	if (!l1p->enabled)
		return;
	do {
		/* E/D RX fifo */
		Write_hfc8(l1p->hw, R_FIFO,
			   (l1p->st_num * 8 + ((ech) ? 7 : 5)));
		wait_busy(l1p->hw);

		f1 = Read_hfc8_stable(l1p->hw, A_F1);
		f2 = Read_hfc8(l1p->hw, A_F2);
		df = f1 - f2;
		if ((f1 - f2) < 0)
			df = f1 - f2 + MAX_F_CNT + 1;


		if (!df) {
			return;	/* no complete frame in fifo */
		}

		z1 = Read_hfc16_stable(l1p->hw, A_Z1);
		z2 = Read_hfc16(l1p->hw, A_Z2);

		z1 = z1 - z2 + 1;
		if (z1 < 0)
			z1 += 384;

		if (!(skb = dev_alloc_skb(MAX_D_FRAME_SIZE))) {
			printk(KERN_INFO
			       "HFC-4S/8S: Could not allocate D/E "
			       "channel receive buffer");
			Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2);
			wait_busy(l1p->hw);
			return;
		}

		if (((z1 < 4) || (z1 > MAX_D_FRAME_SIZE))) {
			if (skb)
				dev_kfree_skb(skb);
			/* remove errornous D frame */
			if (df == 1) {
				/* reset fifo */
				Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2);
				wait_busy(l1p->hw);
				return;
			} else {
				/* read errornous D frame */

#ifndef HISAX_HFC4S8S_PCIMEM
				SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif

				while (z1 >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
					Read_hfc32(l1p->hw, A_FIFO_DATA0);
#else
					fRead_hfc32(l1p->hw);
#endif
					z1 -= 4;
				}

				while (z1--)
#ifdef HISAX_HFC4S8S_PCIMEM
					Read_hfc8(l1p->hw, A_FIFO_DATA0);
#else
					fRead_hfc8(l1p->hw);
#endif

				Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
				wait_busy(l1p->hw);
				return;
			}
		}

		cp = skb->data;

#ifndef HISAX_HFC4S8S_PCIMEM
		SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif

		while (z1 >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
			*((unsigned long *) cp) =
			    Read_hfc32(l1p->hw, A_FIFO_DATA0);
#else
			*((unsigned long *) cp) = fRead_hfc32(l1p->hw);
#endif
			cp += 4;
			z1 -= 4;
		}

		while (z1--)
#ifdef HISAX_HFC4S8S_PCIMEM
			*cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0);
#else
			*cp++ = fRead_hfc8(l1p->hw);
#endif

		Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);	/* increment f counter */
		wait_busy(l1p->hw);

		if (*(--cp)) {
			dev_kfree_skb(skb);
		} else {
			skb->len = (cp - skb->data) - 2;
			if (ech)
				l1p->d_if.ifc.l1l2(&l1p->d_if.ifc,
						   PH_DATA_E | INDICATION,
						   skb);
			else
				l1p->d_if.ifc.l1l2(&l1p->d_if.ifc,
						   PH_DATA | INDICATION,
						   skb);
		}
	} while (1);
}				/* rx_d_frame */
コード例 #10
0
ファイル: Dta1xxNw.c プロジェクト: VahidSh69/DTAPI
//.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.- Dta1xxNwStartXmit -.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-
//
// The stack will execute Dta1xxNwStartXmit only ones.
// (NETIF_F_LLTX flag is not set)
//
int Dta1xxNwStartXmit(struct sk_buff* pSkb, struct net_device* pDevice)
{	
	Dta1xxNw_Private* lp = (Dta1xxNw_Private*)netdev_priv(pDevice);
	Dta1xxDmaTxHeader* pTxHeader;
	Int PktLength;
	Int TotalLength;
	UInt FreeSpaceUntilEnd;
	UInt FreeSpaceFromBegin;
	UInt ReadOffset;
	UInt WriteOffset;
	Dta1xxNwBuf* pNwTx = &lp->m_NwTxBuf;

#if LOG_LEVEL > 0
	DTA1XXNW_LOG(KERN_DEBUG, "Dta1xxNwStartXmit is called");
#endif
	
	// Check if it's our own dummy generated packet
	if ((pSkb->len >= (sizeof(EthernetIIHeader) + sizeof(IpHeaderV4)-5 + sizeof(UdpHeader) + 10)) &&
	    (pSkb->len <= ETH_ZLEN)){
		EthernetIIHeader* pEthernetIIHeader;
		IpHeaderV4* pIpHeader;
		UdpHeader* pUdpHeader;
		UInt8* pData;
	
		pEthernetIIHeader = (EthernetIIHeader*)pSkb->data;
		pIpHeader = (IpHeaderV4*)(pSkb->data + sizeof(EthernetIIHeader));
		pUdpHeader = (UdpHeader*)(pSkb->data + sizeof(EthernetIIHeader)+ sizeof(IpHeaderV4)-5);
		pData = pSkb->data + sizeof(EthernetIIHeader) + sizeof(IpHeaderV4)-5 + sizeof(UdpHeader);
		if ((pUdpHeader->m_DestinationPort ==  0x0400) && 
			(strcmp(pData, "DektecArp\0") == 0)) {
			// It's our own dummy packet. Skip it.
			dev_kfree_skb(pSkb);
			return STATUS_SUCCESS;
		} 
	}

	if (lp->m_Connected == 0) {
		lp->m_NetStats.tx_errors++;
		return STATUS_UNSUCCESSFUL;
	}
	PktLength = (pSkb->len < ETH_ZLEN ? ETH_ZLEN : pSkb->len);
    
	// Make sure there is enough free space for a complete packet including header and alignment
	TotalLength = PktLength + sizeof(Dta1xxDmaTxHeader);
	if ((TotalLength%4)!=0)
		TotalLength += (4-(TotalLength%4));
	
	ReadOffset = pNwTx->m_ReadOffset;
	WriteOffset = pNwTx->m_WriteOffset;
	if (WriteOffset < ReadOffset) {
		FreeSpaceFromBegin = 0;
		FreeSpaceUntilEnd = ReadOffset - WriteOffset - 1;
	} else {
		FreeSpaceUntilEnd = pNwTx->m_BufSize - WriteOffset - 1;
		if (ReadOffset == 0)
			FreeSpaceFromBegin = 0;
		else
			FreeSpaceFromBegin = ReadOffset - 1;
	}

	if (FreeSpaceUntilEnd >= TotalLength) {
		// Found free space at end of buffer
		pTxHeader = (Dta1xxDmaTxHeader*)(pNwTx->m_pBuffer + WriteOffset);

	} else if (FreeSpaceFromBegin >= TotalLength) {
		// Found free space at begin of buffer
		// Mark first free byte at end of buffer as not used '*',
		// so the TxThread knows to start next packet at begin of buffer.
		pNwTx->m_pBuffer[WriteOffset] = '*';
		WriteOffset = 0;
		pTxHeader = (Dta1xxDmaTxHeader*)(pNwTx->m_pBuffer + WriteOffset);
	} else {
		// Not enough free space, skip pakket
		lp->m_NetStats.tx_dropped++;
		return STATUS_UNSUCCESSFUL;
	}

	memset(pTxHeader, 0, sizeof(Dta1xxDmaTxHeader));

	pTxHeader->Tag = 0x445441A0;
	pTxHeader->Version = 1;
	pTxHeader->Length = sizeof(Dta1xxDmaTxHeader);
	pTxHeader->TransmitControl.PacketLength = PktLength;
	WriteOffset+= sizeof(Dta1xxDmaTxHeader);
	
	if (pSkb->len < ETH_ZLEN) {
		// Reset stuffing bytes
		memset(pNwTx->m_pBuffer+WriteOffset, 0, ETH_ZLEN);
	}
	memcpy(pNwTx->m_pBuffer+WriteOffset, pSkb->data, pSkb->len);

	// Make sure the next packet is aligned on multiples of 4
	if ((PktLength%4)!=0)
		PktLength += (4-(PktLength%4));

	WriteOffset+= PktLength;

	if (WriteOffset >= pNwTx->m_BufSize) {
		// Can not occur due to free space check above. 
		// If it occurs then WriteOffset==ReadOffset can not be prevented!!
		WriteOffset = 0;
	}

	pNwTx->m_WriteOffset = WriteOffset;
	pDevice->trans_start = jiffies;

	// Acknowledge new packet arrived
	set_bit(0, (void*)&pNwTx->m_PktAvail);
	wake_up_interruptible(&pNwTx->m_WaitQueue);

	dev_kfree_skb(pSkb);
	return STATUS_SUCCESS;
}
コード例 #11
0
static void
dch_l2l1(struct hisax_d_if *iface, int pr, void *arg)
{
	struct hfc4s8s_l1 *l1 = iface->ifc.priv;
	struct sk_buff *skb = (struct sk_buff *) arg;
	u_long flags;

	switch (pr) {

		case (PH_DATA | REQUEST):
			if (!l1->enabled) {
				dev_kfree_skb(skb);
				break;
			}
			spin_lock_irqsave(&l1->lock, flags);
			skb_queue_tail(&l1->d_tx_queue, skb);
			if ((skb_queue_len(&l1->d_tx_queue) == 1) &&
			    (l1->tx_cnt <= 0)) {
				l1->hw->mr.r_irq_fifo_blx[l1->st_num] |=
				    0x10;
				spin_unlock_irqrestore(&l1->lock, flags);
				schedule_work(&l1->hw->tqueue);
			} else
				spin_unlock_irqrestore(&l1->lock, flags);
			break;

		case (PH_ACTIVATE | REQUEST):
			if (!l1->enabled)
				break;
			if (!l1->nt_mode) {
				if (l1->l1_state < 6) {
					spin_lock_irqsave(&l1->lock,
							  flags);

					Write_hfc8(l1->hw, R_ST_SEL,
						   l1->st_num);
					Write_hfc8(l1->hw, A_ST_WR_STA,
						   0x60);
					mod_timer(&l1->l1_timer,
						  jiffies + L1_TIMER_T3);
					spin_unlock_irqrestore(&l1->lock,
							       flags);
				} else if (l1->l1_state == 7)
					l1->d_if.ifc.l1l2(&l1->d_if.ifc,
							  PH_ACTIVATE |
							  INDICATION,
							  NULL);
			} else {
				if (l1->l1_state != 3) {
					spin_lock_irqsave(&l1->lock,
							  flags);
					Write_hfc8(l1->hw, R_ST_SEL,
						   l1->st_num);
					Write_hfc8(l1->hw, A_ST_WR_STA,
						   0x60);
					spin_unlock_irqrestore(&l1->lock,
							       flags);
				} else if (l1->l1_state == 3)
					l1->d_if.ifc.l1l2(&l1->d_if.ifc,
							  PH_ACTIVATE |
							  INDICATION,
							  NULL);
			}
			break;

		default:
			printk(KERN_INFO
			       "HFC-4S/8S: Unknown D-chan cmd 0x%x received, ignored\n",
			       pr);
			break;
	}
	if (!l1->enabled)
		l1->d_if.ifc.l1l2(&l1->d_if.ifc,
				  PH_DEACTIVATE | INDICATION, NULL);
}				/* dch_l2l1 */
コード例 #12
0
ファイル: ibmlana.c プロジェクト: OpenHMR/Open-HMR600
static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
{
	ibmlana_priv *priv = netdev_priv(dev);
	int retval = 0, tmplen, addr;
	unsigned long flags;
	tda_t tda;
	int baddr;

	/* find out if there are free slots for a frame to transmit. If not,
	   the upper layer is in deep desperation and we simply ignore the frame. */

	if (priv->txusedcnt >= TXBUFCNT) {
		retval = -EIO;
		priv->stat.tx_dropped++;
		goto tx_done;
	}

	/* copy the frame data into the next free transmit buffer - fillup missing */
	tmplen = skb->len;
	if (tmplen < 60)
		tmplen = 60;
	baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
	memcpy_toio(priv->base + baddr, skb->data, skb->len);

	/* copy filler into RAM - in case we're filling up... 
	   we're filling a bit more than necessary, but that doesn't harm
	   since the buffer is far larger... 
	   Sorry Linus for the filler string but I couldn't resist ;-) */

	if (tmplen > skb->len) {
		char *fill = "NetBSD is a nice OS too! ";
		unsigned int destoffs = skb->len, l = strlen(fill);

		while (destoffs < tmplen) {
			memcpy_toio(priv->base + baddr + destoffs, fill, l);
			destoffs += l;
		}
	}

	/* set up the new frame descriptor */
	addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t));
	memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t));
	tda.length = tda.fraglength = tmplen;
	memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));

	/* if there were no active descriptors, trigger the SONIC */
	spin_lock_irqsave(&priv->lock, flags);

	priv->txusedcnt++;
	priv->txused[priv->nexttxdescr] = 1;

	/* are all transmission slots used up ? */
	if (priv->txusedcnt >= TXBUFCNT)
		netif_stop_queue(dev);

	if (priv->txusedcnt == 1)
		StartTx(dev, priv->nexttxdescr);
	priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT;

	spin_unlock_irqrestore(&priv->lock, flags);
tx_done:
	dev_kfree_skb(skb);
	return retval;
}
コード例 #13
0
ファイル: rt2x00dev.c プロジェクト: AdrianHuang/linux-3.8.13
void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
{
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct rxdone_entry_desc rxdesc;
	struct sk_buff *skb;
	struct ieee80211_rx_status *rx_status;
	unsigned int header_length;
	int rate_idx;

	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
	    !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
		goto submit_entry;

	if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
		goto submit_entry;

	/*
	 * Allocate a new sk_buffer. If no new buffer available, drop the
	 * received frame and reuse the existing buffer.
	 */
	skb = rt2x00queue_alloc_rxskb(entry, gfp);
	if (!skb)
		goto submit_entry;

	/*
	 * Unmap the skb.
	 */
	rt2x00queue_unmap_skb(entry);

	/*
	 * Extract the RXD details.
	 */
	memset(&rxdesc, 0, sizeof(rxdesc));
	rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);

	/*
	 * Check for valid size in case we get corrupted descriptor from
	 * hardware.
	 */
	if (unlikely(rxdesc.size == 0 ||
		     rxdesc.size > entry->queue->data_size)) {
		ERROR(rt2x00dev, "Wrong frame size %d max %d.\n",
			rxdesc.size, entry->queue->data_size);
		dev_kfree_skb(entry->skb);
		goto renew_skb;
	}

	/*
	 * The data behind the ieee80211 header must be
	 * aligned on a 4 byte boundary.
	 */
	header_length = ieee80211_get_hdrlen_from_skb(entry->skb);

	/*
	 * Hardware might have stripped the IV/EIV/ICV data,
	 * in that case it is possible that the data was
	 * provided separately (through hardware descriptor)
	 * in which case we should reinsert the data into the frame.
	 */
	if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
	    (rxdesc.flags & RX_FLAG_IV_STRIPPED))
		rt2x00crypto_rx_insert_iv(entry->skb, header_length,
					  &rxdesc);
	else if (header_length &&
		 (rxdesc.size > header_length) &&
		 (rxdesc.dev_flags & RXDONE_L2PAD))
		rt2x00queue_remove_l2pad(entry->skb, header_length);

	/* Trim buffer to correct size */
	skb_trim(entry->skb, rxdesc.size);

	/*
	 * Translate the signal to the correct bitrate index.
	 */
	rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc);
	if (rxdesc.rate_mode == RATE_MODE_HT_MIX ||
	    rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD)
		rxdesc.flags |= RX_FLAG_HT;

	/*
	 * Check if this is a beacon, and more frames have been
	 * buffered while we were in powersaving mode.
	 */
	rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc);

	/*
	 * Update extra components
	 */
	rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
	rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);

	/*
	 * Initialize RX status information, and send frame
	 * to mac80211.
	 */
	rx_status = IEEE80211_SKB_RXCB(entry->skb);

	/* Ensure that all fields of rx_status are initialized
	 * properly. The skb->cb array was used for driver
	 * specific informations, so rx_status might contain
	 * garbage.
	 */
	memset(rx_status, 0, sizeof(*rx_status));

	rx_status->mactime = rxdesc.timestamp;
	rx_status->band = rt2x00dev->curr_band;
	rx_status->freq = rt2x00dev->curr_freq;
	rx_status->rate_idx = rate_idx;
	rx_status->signal = rxdesc.rssi;
	rx_status->flag = rxdesc.flags;
	rx_status->antenna = rt2x00dev->link.ant.active.rx;

	ieee80211_rx_ni(rt2x00dev->hw, entry->skb);

renew_skb:
	/*
	 * Replace the skb with the freshly allocated one.
	 */
	entry->skb = skb;

submit_entry:
	entry->flags = 0;
	rt2x00queue_index_inc(entry, Q_INDEX_DONE);
	if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
	    test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
		rt2x00dev->ops->lib->clear_entry(entry);
}
コード例 #14
0
ファイル: sonic.c プロジェクト: nos1609/Chrono_Kernel-1
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);
	dma_addr_t laddr;
	int length;
	int entry = lp->next_tx;

	if (sonic_debug > 2)
;

	length = skb->len;
	if (length < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			return NETDEV_TX_OK;
		length = ETH_ZLEN;
	}

	/*
	 * Map the packet data into the logical DMA address space
	 */

	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
	if (!laddr) {
;
		dev_kfree_skb(skb);
		return NETDEV_TX_BUSY;
	}

	sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
	sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
	sonic_tda_put(dev, entry, SONIC_TD_LINK,
		sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);

	/*
	 * Must set tx_skb[entry] only after clearing status, and
	 * before clearing EOL and before stopping queue
	 */
	wmb();
	lp->tx_len[entry] = length;
	lp->tx_laddr[entry] = laddr;
	lp->tx_skb[entry] = skb;

	wmb();
	sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
				  sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
	lp->eol_tx = entry;

	lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
	if (lp->tx_skb[lp->next_tx] != NULL) {
		/* The ring is full, the ISR has yet to process the next TD. */
		if (sonic_debug > 3)
;
		netif_stop_queue(dev);
		/* after this packet, wait for ISR to free up some TDAs */
	} else netif_start_queue(dev);

	if (sonic_debug > 2)
;

	SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);

	return NETDEV_TX_OK;
}
コード例 #15
0
ファイル: ifb.c プロジェクト: chunyenho/RTS-hw2
static void ri_tasklet(unsigned long dev)
{

    struct net_device *_dev = (struct net_device *)dev;
    struct ifb_private *dp = netdev_priv(_dev);
    struct net_device_stats *stats = &_dev->stats;
    struct netdev_queue *txq;
    struct sk_buff *skb;

    txq = netdev_get_tx_queue(_dev, 0);
    dp->st_task_enter++;
    if ((skb = skb_peek(&dp->tq)) == NULL) {
        dp->st_txq_refl_try++;
        if (__netif_tx_trylock(txq)) {
            dp->st_rxq_enter++;
            while ((skb = skb_dequeue(&dp->rq)) != NULL) {
                skb_queue_tail(&dp->tq, skb);
                dp->st_rx2tx_tran++;
            }
            __netif_tx_unlock(txq);
        } else {
            /* reschedule */
            dp->st_rxq_notenter++;
            goto resched;
        }
    }

    while ((skb = skb_dequeue(&dp->tq)) != NULL) {
        u32 from = G_TC_FROM(skb->tc_verd);

        skb->tc_verd = 0;
        skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
        stats->tx_packets++;
        stats->tx_bytes +=skb->len;

        skb->dev = dev_get_by_index(&init_net, skb->iif);
        if (!skb->dev) {
            dev_kfree_skb(skb);
            stats->tx_dropped++;
            break;
        }
        dev_put(skb->dev);
        skb->iif = _dev->ifindex;

        if (from & AT_EGRESS) {
            dp->st_rx_frm_egr++;
            dev_queue_xmit(skb);
        } else if (from & AT_INGRESS) {
            dp->st_rx_frm_ing++;
            skb_pull(skb, skb->dev->hard_header_len);
            netif_rx(skb);
        } else
            BUG();
    }

    if (__netif_tx_trylock(txq)) {
        dp->st_rxq_check++;
        if ((skb = skb_peek(&dp->rq)) == NULL) {
            dp->tasklet_pending = 0;
            if (netif_queue_stopped(_dev))
                netif_wake_queue(_dev);
        } else {
            dp->st_rxq_rsch++;
            __netif_tx_unlock(txq);
            goto resched;
        }
        __netif_tx_unlock(txq);
    } else {
resched:
        dp->tasklet_pending = 1;
        tasklet_schedule(&dp->ifb_tasklet);
    }

}
コード例 #16
0
static void
tx_d_frame(struct hfc4s8s_l1 *l1p)
{
	struct sk_buff *skb;
	u_char f1, f2;
	u_char *cp;
	long cnt;

	if (l1p->l1_state != 7)
		return;

	/* TX fifo */
	Write_hfc8(l1p->hw, R_FIFO, (l1p->st_num * 8 + 4));
	wait_busy(l1p->hw);

	f1 = Read_hfc8(l1p->hw, A_F1);
	f2 = Read_hfc8_stable(l1p->hw, A_F2);

	if ((f1 ^ f2) & MAX_F_CNT)
		return;		/* fifo is still filled */

	if (l1p->tx_cnt > 0) {
		cnt = l1p->tx_cnt;
		l1p->tx_cnt = 0;
		l1p->d_if.ifc.l1l2(&l1p->d_if.ifc, PH_DATA | CONFIRM,
				   (void *) cnt);
	}

	if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
		cp = skb->data;
		cnt = skb->len;
#ifndef HISAX_HFC4S8S_PCIMEM
		SetRegAddr(l1p->hw, A_FIFO_DATA0);
#endif

		while (cnt >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
			fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
				     *(unsigned long *) cp);
#else
			SetRegAddr(l1p->hw, A_FIFO_DATA0);
			fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
#endif
			cp += 4;
			cnt -= 4;
		}

#ifdef HISAX_HFC4S8S_PCIMEM
		while (cnt--)
			fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
#else
		while (cnt--)
			fWrite_hfc8(l1p->hw, *cp++);
#endif

		l1p->tx_cnt = skb->truesize;
		Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);	/* increment f counter */
		wait_busy(l1p->hw);

		dev_kfree_skb(skb);
	}
}				/* tx_d_frame */
コード例 #17
0
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct netfront_stats *stats = this_cpu_ptr(np->stats);
	struct xen_netif_tx_request *tx;
	struct xen_netif_extra_info *extra;
	char *data = skb->data;
	RING_IDX i;
	grant_ref_t ref;
	unsigned long mfn;
	int notify;
	int frags = skb_shinfo(skb)->nr_frags;
	unsigned int offset = offset_in_page(data);
	unsigned int len = skb_headlen(skb);
	unsigned long flags;

	/* If skb->len is too big for wire format, drop skb and alert
	 * user about misconfiguration.
	 */
	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
		net_alert_ratelimited(
			"xennet: skb->len = %u, too big for wire format\n",
			skb->len);
		goto drop;
	}

	frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
		       frags);
		dump_stack();
		goto drop;
	}

	spin_lock_irqsave(&np->tx_lock, flags);

	if (unlikely(!netif_carrier_ok(dev) ||
		     (frags > 1 && !xennet_can_sg(dev)) ||
		     netif_needs_gso(skb, netif_skb_features(skb)))) {
		spin_unlock_irqrestore(&np->tx_lock, flags);
		goto drop;
	}

	i = np->tx.req_prod_pvt;

	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
	np->tx_skbs[id].skb = skb;

	tx = RING_GET_REQUEST(&np->tx, i);

	tx->id   = id;
	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
	BUG_ON((signed short)ref < 0);
	mfn = virt_to_mfn(data);
	gnttab_grant_foreign_access_ref(
		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
	tx->gref = np->grant_tx_ref[id] = ref;
	tx->offset = offset;
	tx->size = len;
	extra = NULL;

	tx->flags = 0;
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		/* local packet? */
		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
		/* remote but checksummed. */
		tx->flags |= XEN_NETTXF_data_validated;

	if (skb_shinfo(skb)->gso_size) {
		struct xen_netif_extra_info *gso;

		gso = (struct xen_netif_extra_info *)
			RING_GET_REQUEST(&np->tx, ++i);

		if (extra)
			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
		else
			tx->flags |= XEN_NETTXF_extra_info;

		gso->u.gso.size = skb_shinfo(skb)->gso_size;
		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
		gso->u.gso.pad = 0;
		gso->u.gso.features = 0;

		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
		gso->flags = 0;
		extra = gso;
	}

	np->tx.req_prod_pvt = i + 1;

	xennet_make_frags(skb, dev, tx);
	tx->size = skb->len;

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);

	u64_stats_update_begin(&stats->syncp);
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	u64_stats_update_end(&stats->syncp);

	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
	xennet_tx_buf_gc(dev);

	if (!netfront_tx_slot_available(np))
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&np->tx_lock, flags);

	return NETDEV_TX_OK;

 drop:
	dev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
コード例 #18
0
static void
tx_b_frame(struct hfc4s8s_btype *bch)
{
	struct sk_buff *skb;
	struct hfc4s8s_l1 *l1 = bch->l1p;
	u_char *cp;
	int cnt, max, hdlc_num;
	long ack_len = 0;

	if (!l1->enabled || (bch->mode == L1_MODE_NULL))
		return;

	/* TX fifo */
	Write_hfc8(l1->hw, R_FIFO,
		   (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2)));
	wait_busy(l1->hw);
	do {

		if (bch->mode == L1_MODE_HDLC) {
			hdlc_num = Read_hfc8(l1->hw, A_F1) & MAX_F_CNT;
			hdlc_num -=
			    (Read_hfc8_stable(l1->hw, A_F2) & MAX_F_CNT);
			if (hdlc_num < 0)
				hdlc_num += 16;
			if (hdlc_num >= 15)
				break;	/* fifo still filled up with hdlc frames */
		} else
			hdlc_num = 0;

		if (!(skb = bch->tx_skb)) {
			if (!(skb = skb_dequeue(&bch->tx_queue))) {
				l1->hw->mr.fifo_slow_timer_service[l1->
								   st_num]
				    &= ~((bch->bchan == 1) ? 1 : 4);
				break;	/* list empty */
			}
			bch->tx_skb = skb;
			bch->tx_cnt = 0;
		}

		if (!hdlc_num)
			l1->hw->mr.fifo_slow_timer_service[l1->st_num] |=
			    ((bch->bchan == 1) ? 1 : 4);
		else
			l1->hw->mr.fifo_slow_timer_service[l1->st_num] &=
			    ~((bch->bchan == 1) ? 1 : 4);

		max = Read_hfc16_stable(l1->hw, A_Z2);
		max -= Read_hfc16(l1->hw, A_Z1);
		if (max <= 0)
			max += 384;
		max--;

		if (max < 16)
			break;	/* don't write to small amounts of bytes */

		cnt = skb->len - bch->tx_cnt;
		if (cnt > max)
			cnt = max;
		cp = skb->data + bch->tx_cnt;
		bch->tx_cnt += cnt;

#ifndef HISAX_HFC4S8S_PCIMEM
		SetRegAddr(l1->hw, A_FIFO_DATA0);
#endif
		while (cnt >= 4) {
#ifdef HISAX_HFC4S8S_PCIMEM
			fWrite_hfc32(l1->hw, A_FIFO_DATA0,
				     *(unsigned long *) cp);
#else
			fWrite_hfc32(l1->hw, *(unsigned long *) cp);
#endif
			cp += 4;
			cnt -= 4;
		}

		while (cnt--)
#ifdef HISAX_HFC4S8S_PCIMEM
			fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++);
#else
			fWrite_hfc8(l1->hw, *cp++);
#endif

		if (bch->tx_cnt >= skb->len) {
			if (bch->mode == L1_MODE_HDLC) {
				/* increment f counter */
				Write_hfc8(l1->hw, A_INC_RES_FIFO, 1);
			}
			ack_len += skb->truesize;
			bch->tx_skb = NULL;
			bch->tx_cnt = 0;
			dev_kfree_skb(skb);
		} else
			/* Re-Select */
			Write_hfc8(l1->hw, R_FIFO,
				   (l1->st_num * 8 +
				    ((bch->bchan == 1) ? 0 : 2)));
		wait_busy(l1->hw);
	} while (1);

	if (ack_len)
		bch->b_if.ifc.l1l2((struct hisax_if *) &bch->b_if,
				   PH_DATA | CONFIRM, (void *) ack_len);
}				/* tx_b_frame */
コード例 #19
0
/*----------------------------------------------------------------
* p80211netdev_rx_bh
*
* Deferred processing of all received frames.
*
* Arguments:
*	wlandev		WLAN network device structure
*	skb		skbuff containing a full 802.11 frame.
* Returns:
*	nothing
* Side effects:
*
----------------------------------------------------------------*/
static void p80211netdev_rx_bh(unsigned long arg)
{
	wlandevice_t *wlandev = (wlandevice_t *) arg;
	struct sk_buff *skb = NULL;
	netdevice_t     *dev = wlandev->netdev;
	p80211_hdr_a3_t *hdr;
	UINT16 fc;

        DBFENTER;

	/* Let's empty our our queue */
	while ( (skb = skb_dequeue(&wlandev->nsd_rxq)) ) {
		if (wlandev->state == WLAN_DEVICE_OPEN) {

			if (dev->type != ARPHRD_ETHER) {
				/* RAW frame; we shouldn't convert it */
				// XXX Append the Prism Header here instead.

				/* set up various data fields */
				skb->dev = dev;
				skb_reset_mac_header(skb);
				skb->ip_summed = CHECKSUM_NONE;
				skb->pkt_type = PACKET_OTHERHOST;
				skb->protocol = htons(ETH_P_80211_RAW);
				dev->last_rx = jiffies;

				wlandev->linux_stats.rx_packets++;
				wlandev->linux_stats.rx_bytes += skb->len;
				netif_rx_ni(skb);
				continue;
			} else {
				hdr = (p80211_hdr_a3_t *)skb->data;
				fc = ieee2host16(hdr->fc);
				if (p80211_rx_typedrop(wlandev, fc)) {
					dev_kfree_skb(skb);
					continue;
				}

				/* perform mcast filtering */
				if (wlandev->netdev->flags & IFF_ALLMULTI) {
					/* allow my local address through */
					if (memcmp(hdr->a1, wlandev->netdev->dev_addr, WLAN_ADDR_LEN) != 0) {
						/* but reject anything else that isn't multicast */
						if (!(hdr->a1[0] & 0x01)) {
							dev_kfree_skb(skb);
							continue;
						}
					}
				}

				if ( skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0 ) {
					skb->dev->last_rx = jiffies;
					wlandev->linux_stats.rx_packets++;
					wlandev->linux_stats.rx_bytes += skb->len;
					netif_rx_ni(skb);
					continue;
				}
				WLAN_LOG_DEBUG(1, "p80211_to_ether failed.\n");
			}
		}
		dev_kfree_skb(skb);
	}

        DBFEXIT;
}
コード例 #20
0
void BCMFASTPATH
osl_pktfree(osl_t *osh, void *p, bool send)
{
	struct sk_buff *skb, *nskb;

	skb = (struct sk_buff*) p;

	if (send && osh->pub.tx_fn)
		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);

	
	while (skb) {
		nskb = skb->next;
		skb->next = NULL;


#ifdef CTFPOOL
		if (PKTISFAST(osh, skb))
			osl_pktfastfree(osh, skb);
		else {
#else 
		{
#endif 

			if (skb->destructor)
				
				dev_kfree_skb_any(skb);
			else
				
				dev_kfree_skb(skb);
		}

		osh->pub.pktalloced--;

		skb = nskb;
	}
}

#ifdef CONFIG_DHD_USE_STATIC_BUF
void *
osl_pktget_static(osl_t *osh, uint len)
{
	int i;
	struct sk_buff *skb;

	if (!bcm_static_skb || (len > (PAGE_SIZE * 2))) {
		printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
		return osl_pktget(osh, len);
	}

	down(&bcm_static_skb->osl_pkt_sem);

	if (len <= PAGE_SIZE) {
		for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
			if (bcm_static_skb->pkt_use[i] == 0)
				break;
		}

		if (i != STATIC_PKT_MAX_NUM) {
			bcm_static_skb->pkt_use[i] = 1;
			skb = bcm_static_skb->skb_4k[i];
			skb->tail = skb->data + len;
			skb->len = len;
			up(&bcm_static_skb->osl_pkt_sem);
			return skb;
		}
	}


	for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
		if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0)
			break;
	}

	if (i != STATIC_PKT_MAX_NUM) {
		bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1;
		skb = bcm_static_skb->skb_8k[i];
		skb->tail = skb->data + len;
		skb->len = len;
		up(&bcm_static_skb->osl_pkt_sem);
		return skb;
	}

	up(&bcm_static_skb->osl_pkt_sem);
	printk("%s: all static pkt in use!\n", __FUNCTION__);
	return osl_pktget(osh, len);
}
コード例 #21
0
static int
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
{
	int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
	unsigned long flags;
	tx_t tx;
	tbd_t tbd;
	nop_t nop;

	if (priv(dev)->restart) {
		printk(KERN_WARNING "%s: resetting device\n", dev->name);

		ether1_reset(dev);

		if (ether1_init_for_open(dev))
			printk(KERN_ERR "%s: unable to restart interface\n", dev->name);
		else
			priv(dev)->restart = 0;
	}

	if (skb->len < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			goto out;
	}

	
	txaddr = ether1_txalloc (dev, TX_SIZE);
	tbdaddr = ether1_txalloc (dev, TBD_SIZE);
	dataddr = ether1_txalloc (dev, skb->len);
	nopaddr = ether1_txalloc (dev, NOP_SIZE);

	tx.tx_status = 0;
	tx.tx_command = CMD_TX | CMD_INTR;
	tx.tx_link = nopaddr;
	tx.tx_tbdoffset = tbdaddr;
	tbd.tbd_opts = TBD_EOL | skb->len;
	tbd.tbd_link = I82586_NULL;
	tbd.tbd_bufl = dataddr;
	tbd.tbd_bufh = 0;
	nop.nop_status = 0;
	nop.nop_command = CMD_NOP;
	nop.nop_link = nopaddr;

	local_irq_save(flags);
	ether1_writebuffer (dev, &tx, txaddr, TX_SIZE);
	ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE);
	ether1_writebuffer (dev, skb->data, dataddr, skb->len);
	ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE);
	tmp = priv(dev)->tx_link;
	priv(dev)->tx_link = nopaddr;

	
	ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS);

	local_irq_restore(flags);

	
	dev->trans_start = jiffies;

	
	tmp = priv(dev)->tx_head;
	tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
	priv(dev)->tx_head = tmp;
	dev_kfree_skb (skb);

	if (tst == -1)
		netif_stop_queue(dev);

 out:
	return NETDEV_TX_OK;
}
コード例 #22
0
static ssize_t ieee80211_if_parse_tkip_mic_test(
	struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
	struct ieee80211_local *local = sdata->local;
	u8 addr[ETH_ALEN];
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	__le16 fc;

	if (!mac_pton(buf, addr))
		return -EINVAL;

	if (!ieee80211_sdata_running(sdata))
		return -ENOTCONN;

	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
	if (!skb)
		return -ENOMEM;
	skb_reserve(skb, local->hw.extra_tx_headroom);

	hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
	memset(hdr, 0, 24);
	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);

	switch (sdata->vif.type) {
	case NL80211_IFTYPE_AP:
		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
		/* DA BSSID SA */
		memcpy(hdr->addr1, addr, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
		break;
	case NL80211_IFTYPE_STATION:
		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
		/* BSSID SA DA */
		sdata_lock(sdata);
		if (!sdata->u.mgd.associated) {
			sdata_unlock(sdata);
			dev_kfree_skb(skb);
			return -ENOTCONN;
		}
		memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		memcpy(hdr->addr3, addr, ETH_ALEN);
		sdata_unlock(sdata);
		break;
	default:
		dev_kfree_skb(skb);
		return -EOPNOTSUPP;
	}
	hdr->frame_control = fc;

	/*
	 * Add some length to the test frame to make it look bit more valid.
	 * The exact contents does not matter since the recipient is required
	 * to drop this because of the Michael MIC failure.
	 */
	memset(skb_put(skb, 50), 0, 50);

	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;

	ieee80211_tx_skb(sdata, skb);

	return buflen;
}
コード例 #23
0
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
	int ret, extra;
	u16 fc;
	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
	struct sk_buff *skb;
	struct wl12xx_arp_rsp_template *tmpl;
	struct ieee80211_hdr_3addr *hdr;
	struct arphdr *arp_hdr;

	skb = dev_alloc_skb(sizeof(*hdr) + sizeof(__le16) + sizeof(*tmpl) +
			    WL1271_EXTRA_SPACE_MAX);
	if (!skb) {
		wl1271_error("failed to allocate buffer for arp rsp template");
		return -ENOMEM;
	}

	skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);

	tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl));
	memset(tmpl, 0, sizeof(tmpl));

	/* llc layer */
	memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
	tmpl->llc_type = cpu_to_be16(ETH_P_ARP);

	/* arp header */
	arp_hdr = &tmpl->arp_hdr;
	arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
	arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
	arp_hdr->ar_hln = ETH_ALEN;
	arp_hdr->ar_pln = 4;
	arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);

	/* arp payload */
	memcpy(tmpl->sender_hw, vif->addr, ETH_ALEN);
	tmpl->sender_ip = wlvif->ip_addr;

	/* encryption space */
	switch (wlvif->encryption_type) {
	case KEY_TKIP:
		extra = WL1271_EXTRA_SPACE_TKIP;
		break;
	case KEY_AES:
		extra = WL1271_EXTRA_SPACE_AES;
		break;
	case KEY_NONE:
	case KEY_WEP:
	case KEY_GEM:
		extra = 0;
		break;
	default:
		wl1271_warning("Unknown encryption type: %d",
			       wlvif->encryption_type);
		ret = -EINVAL;
		goto out;
	}

	if (extra) {
		u8 *space = skb_push(skb, extra);
		memset(space, 0, extra);
	}

	/* QoS header - BE */
	if (wlvif->sta.qos)
		memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16));

	/* mac80211 header */
	hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr));
	memset(hdr, 0, sizeof(hdr));
	fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
	if (wlvif->sta.qos)
		fc |= IEEE80211_STYPE_QOS_DATA;
	else
		fc |= IEEE80211_STYPE_DATA;
	if (wlvif->encryption_type != KEY_NONE)
		fc |= IEEE80211_FCTL_PROTECTED;

	hdr->frame_control = cpu_to_le16(fc);
	memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
	memcpy(hdr->addr2, vif->addr, ETH_ALEN);
	memset(hdr->addr3, 0xff, ETH_ALEN);

	ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
				      skb->data, skb->len, 0,
				      wlvif->basic_rate);
out:
	dev_kfree_skb(skb);
	return ret;
}
コード例 #24
0
ファイル: svc.c プロジェクト: 274914765/C
static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
{
    struct sock *sk = sock->sk;
    struct sk_buff *skb;
    struct atmsvc_msg *msg;
    struct atm_vcc *old_vcc = ATM_SD(sock);
    struct atm_vcc *new_vcc;
    int error;

    lock_sock(sk);

    error = svc_create(sock_net(sk), newsock,0);
    if (error)
        goto out;

    new_vcc = ATM_SD(newsock);

    pr_debug("svc_accept %p -> %p\n",old_vcc,new_vcc);
    while (1) {
        DEFINE_WAIT(wait);

        prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
        while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
               sigd) {
            if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break;
            if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) {
                error = -sk->sk_err;
                break;
            }
            if (flags & O_NONBLOCK) {
                error = -EAGAIN;
                break;
            }
            release_sock(sk);
            schedule();
            lock_sock(sk);
            if (signal_pending(current)) {
                error = -ERESTARTSYS;
                break;
            }
            prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
        }
        finish_wait(sk->sk_sleep, &wait);
        if (error)
            goto out;
        if (!skb) {
            error = -EUNATCH;
            goto out;
        }
        msg = (struct atmsvc_msg *) skb->data;
        new_vcc->qos = msg->qos;
        set_bit(ATM_VF_HASQOS,&new_vcc->flags);
        new_vcc->remote = msg->svc;
        new_vcc->local = msg->local;
        new_vcc->sap = msg->sap;
        error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
                    msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci);
        dev_kfree_skb(skb);
        sk->sk_ack_backlog--;
        if (error) {
            sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL,
                &old_vcc->qos,error);
            error = error == -EAGAIN ? -EBUSY : error;
            goto out;
        }
        /* wait should be short, so we ignore the non-blocking flag */
        set_bit(ATM_VF_WAITING, &new_vcc->flags);
        prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
        sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL);
        while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
            release_sock(sk);
            schedule();
            lock_sock(sk);
            prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
        }
        finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
        if (!sigd) {
            error = -EUNATCH;
            goto out;
        }
        if (!sk_atm(new_vcc)->sk_err)
            break;
        if (sk_atm(new_vcc)->sk_err != ERESTARTSYS) {
            error = -sk_atm(new_vcc)->sk_err;
            goto out;
        }
    }
    newsock->state = SS_CONNECTED;
out:
    release_sock(sk);
    return error;
}
コード例 #25
0
ファイル: caif_virtio.c プロジェクト: 383530895/linux
/* Get packets from the host vring */
static int cfv_rx_poll(struct napi_struct *napi, int quota)
{
	struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
	int rxcnt = 0;
	int err = 0;
	void *buf;
	struct sk_buff *skb;
	struct vringh_kiov *riov = &cfv->ctx.riov;
	unsigned int skb_len;

again:
	do {
		skb = NULL;

		/* Put the previous iovec back on the used ring and
		 * fetch a new iovec if we have processed all elements.
		 */
		if (riov->i == riov->used) {
			if (cfv->ctx.head != USHRT_MAX) {
				vringh_complete_kern(cfv->vr_rx,
						     cfv->ctx.head,
						     0);
				cfv->ctx.head = USHRT_MAX;
			}

			err = vringh_getdesc_kern(
				cfv->vr_rx,
				riov,
				NULL,
				&cfv->ctx.head,
				GFP_ATOMIC);

			if (err <= 0)
				goto exit;
		}

		buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
		/* TODO: Add check on valid buffer address */

		skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
					     riov->iov[riov->i].iov_len);
		if (unlikely(err))
			goto exit;

		/* Push received packet up the stack. */
		skb_len = skb->len;
		skb->protocol = htons(ETH_P_CAIF);
		skb_reset_mac_header(skb);
		skb->dev = cfv->ndev;
		err = netif_receive_skb(skb);
		if (unlikely(err)) {
			++cfv->ndev->stats.rx_dropped;
		} else {
			++cfv->ndev->stats.rx_packets;
			cfv->ndev->stats.rx_bytes += skb_len;
		}

		++riov->i;
		++rxcnt;
	} while (rxcnt < quota);

	++cfv->stats.rx_napi_resched;
	goto out;

exit:
	switch (err) {
	case 0:
		++cfv->stats.rx_napi_complete;

		/* Really out of patckets? (stolen from virtio_net)*/
		napi_complete(napi);
		if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
		    napi_schedule_prep(napi)) {
			vringh_notify_disable_kern(cfv->vr_rx);
			__napi_schedule(napi);
			goto again;
		}
		break;

	case -ENOMEM:
		++cfv->stats.rx_nomem;
		dev_kfree_skb(skb);
		/* Stop NAPI poll on OOM, we hope to be polled later */
		napi_complete(napi);
		vringh_notify_enable_kern(cfv->vr_rx);
		break;

	default:
		/* We're doomed, any modem fault is fatal */
		netdev_warn(cfv->ndev, "Bad ring, disable device\n");
		cfv->ndev->stats.rx_dropped = riov->used - riov->i;
		napi_complete(napi);
		vringh_notify_disable_kern(cfv->vr_rx);
		netif_carrier_off(cfv->ndev);
		break;
	}
out:
	if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
		vringh_notify(cfv->vr_rx);
	return rxcnt;
}
コード例 #26
0
ファイル: isdn_x25iface.c プロジェクト: robacklin/ts7800
/* process a frame handed over to us from linux network layer. First byte
   semantics as defined in Documentation/networking/x25-iface.txt
   */
static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
{
	unsigned char firstbyte = skb->data[0];
	enum wan_states *state = &((ix25_pdata_t*)cprot->proto_data)->state;
	int ret = 0;
	IX25DEBUG( "isdn_x25iface_xmit: %s first=%x state=%d \n", MY_DEVNAME(cprot -> net_dev), firstbyte, *state );
	switch ( firstbyte ){
	case 0x00: /* dl_data request */
		if( *state == WAN_CONNECTED ){
			skb_pull(skb, 1);
			cprot -> net_dev -> trans_start = jiffies;
			ret = ( cprot -> dops -> data_req(cprot, skb) );
			/* prepare for future retransmissions */
			if( ret ) skb_push(skb,1);
			return ret;
		}
		illegal_state_warn( *state, firstbyte ); 
		break;
	case 0x01: /* dl_connect request */
		if( *state == WAN_DISCONNECTED ){
			*state = WAN_CONNECTING;
		        ret = cprot -> dops -> connect_req(cprot);
			if(ret){
				/* reset state and notify upper layer about
				 * immidiatly failed attempts */
				isdn_x25iface_disconn_ind(cprot);
			}
		} else {
			illegal_state_warn( *state, firstbyte );
		}
		break;
	case 0x02: /* dl_disconnect request */
		switch ( *state ){
		case WAN_DISCONNECTED: 
			/* Should not happen. However, give upper layer a
			   chance to recover from inconstistency  but don't
			   trust the lower layer sending the disconn_confirm
			   when already disconnected */
			printk(KERN_WARNING "isdn_x25iface_xmit: disconnect "
			       " requested while disconnected\n" );
			isdn_x25iface_disconn_ind(cprot);
			break; /* prevent infinite loops */
		case WAN_CONNECTING:
		case WAN_CONNECTED:
			*state = WAN_DISCONNECTED;
			cprot -> dops -> disconn_req(cprot);
			break;
		default:
			illegal_state_warn( *state, firstbyte );
		}
		break;
	case 0x03: /* changing lapb parameters requested */
		printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb"
		       " options not yet supported\n");
		break;
	default:
		printk(KERN_WARNING "isdn_x25iface_xmit: frame with illegal"
		       " first byte %x ignored:\n", firstbyte);
	}
	dev_kfree_skb(skb);
	return 0;
}
コード例 #27
0
static KAL_INT32 eemcs_ipc_rx_callback(struct sk_buff *skb, KAL_UINT32 private_data)
{
    CCCI_BUFF_T *p_cccih = NULL;
    KAL_UINT32  node_id;
    IPC_MSGSVC_TASKMAP_T *id_map;
    unsigned int i = 0;
    char * addr;

    DEBUG_LOG_FUNCTION_ENTRY;

    if (skb){
        p_cccih = (CCCI_BUFF_T *)skb->data;
        DBGLOG(IPCD,DBG,"[RX]CCCI_H(0x%08X, 0x%08X, %02d, 0x%08X)", \
            p_cccih->data[0],p_cccih->data[1],p_cccih->channel, p_cccih->reserved);
    }
    
#ifndef _EEMCS_IPCD_LB_UT_
    /* Check IPC task id and extq_id */
    if ((id_map=unify_AP_id_2_local_id(p_cccih->reserved))==NULL)
    {
       DBGLOG(IPCD,ERR,"Wrong AP Unify id (%#x)@RX.!!! PACKET DROP !!!\n",p_cccih->reserved);
       dev_kfree_skb(skb);
       return KAL_SUCCESS ;
    }
    node_id = id_map->task_id;
#else
    node_id = 0;
#endif
    if(IPCD_KERNEL == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        ipc_ilm_t* p_ilm = NULL;
        skb_pull(skb, sizeof(CCCI_BUFF_T));
        p_ilm = (ipc_ilm_t*)(skb->data);
        p_ilm->dest_mod_id = p_cccih->reserved;
        p_ilm->local_para_ptr = (local_para_struct *)(p_ilm+1);

        if (p_ilm->local_para_ptr != NULL) {
            DBGLOG(IPCD, INF, "[RX][KERN]src=%d dest=0x%x sap=0x%x msg=0x%x local_ptr=%p msg_len=%d", \
				p_ilm->src_mod_id, p_ilm->dest_mod_id, p_ilm->sap_id, p_ilm->msg_id,\
				p_ilm->local_para_ptr, p_ilm->local_para_ptr->msg_len); 
            for (i=0; i<32; i++) {
		addr = (char *)p_ilm;
		DBGLOG(IPCD, DBG, "%p=%x", (addr+i), *(addr+i));
            }
        }
        #ifdef ENABLE_CONN_COEX_MSG		
        mtk_conn_md_bridge_send_msg((ipc_ilm_t*)(skb->data));
        #endif
        dev_kfree_skb(skb);
    }
    else if(IPCD_OPEN == atomic_read(&eemcs_ipc_inst.ipc_node[node_id].dev_state)){
        skb_queue_tail(&eemcs_ipc_inst.ipc_node[node_id].rx_skb_list, skb); /* spin_lock_ireqsave inside, refering skbuff.c */
        atomic_inc(&eemcs_ipc_inst.ipc_node[node_id].rx_pkt_cnt);     /* increase rx_pkt_cnt */
        kill_fasync(&eemcs_ipc_inst.ipc_node[node_id].fasync, SIGIO, POLL_IN);
        wake_up_poll(&eemcs_ipc_inst.ipc_node[node_id].rx_waitq,POLLIN); /* wake up rx_waitq */
    }else{
        DBGLOG(IPCD, ERR, "PKT DROP while ipc dev(%d) closed", node_id);
        dev_kfree_skb(skb);
        eemcs_update_statistics(0, eemcs_ipc_inst.eemcs_port_id, RX, DROP);
    }
    
    DEBUG_LOG_FUNCTION_LEAVE;
	return KAL_SUCCESS ;
}
コード例 #28
0
ファイル: usbnet.c プロジェクト: gismo141/picosafe
static void usbnet_bh (unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue (&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process (dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb (entry->urb);
			dev_kfree_skb (skb);
			continue;
		default:
			netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
		}
	}

	// waiting for all pending urbs to complete?
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
			wake_up (dev->wait);
		}

	// or are we maybe short a few urbs?
	} else if (netif_running (dev->net) &&
		   netif_device_present (dev->net) &&
		   !timer_pending (&dev->delay) &&
		   !test_bit (EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN (dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			// don't refill the queue all at once
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb (0, GFP_ATOMIC);
				if (urb != NULL) {
					if (rx_submit (dev, urb, GFP_ATOMIC) ==
					    -ENOLINK)
						return;
				}
			}
			if (temp != dev->rxq.qlen)
				netif_dbg(dev, link, dev->net,
					  "rxqlen %d --> %d\n",
					  temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule (&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN (dev))
			netif_wake_queue (dev->net);
	}
}
コード例 #29
0
ssize_t eemcs_ipc_kern_write(ipc_ilm_t *in_ilm){
    ssize_t ret   = 0;
    eemcs_ipc_node_t *curr_node = NULL;
    KAL_UINT8 node_id = 0;
    KAL_UINT8 port_id = eemcs_ipc_inst.eemcs_port_id; /* port_id */
    KAL_UINT32 p_type, control_flag;    
    struct sk_buff *new_skb;
    CCCI_BUFF_T *ccci_header;
    ipc_ilm_t *ilm=NULL;
    IPC_MSGSVC_TASKMAP_T *id_map;
    size_t count = sizeof(ipc_ilm_t);
    size_t skb_alloc_size;
    char * addr;
    int i = 0;
    DEBUG_LOG_FUNCTION_ENTRY;
    
    // src module id check
    node_id =(KAL_UINT8) (in_ilm->src_mod_id & (~AP_UNIFY_ID_FLAG)); // source id is ap side txq_id
    if (node_id >= EEMCS_IPCD_MAX_NUM){
        DBGLOG(IPCD, ERR, "invalid src_mod_id=0x%x", in_ilm->src_mod_id);
        ret = -EINVAL;
        goto _exit;
    }
    curr_node = (eemcs_ipc_node_t *)&eemcs_ipc_inst.ipc_node[node_id];
    node_id = curr_node->ipc_node_id;/* node_id */
    if (atomic_read(&curr_node->dev_state) != IPCD_KERNEL){
        DBGLOG(IPCD, ERR, "invalid dev_state(not IPCD_KERNEL), src_mod_id=0x%x", in_ilm->src_mod_id);
        ret = -EINVAL;
        goto _exit;
    }

    p_type = ccci_get_port_type(port_id);
    if(p_type != EX_T_USER) 
    {
        DBGLOG(IPCD, ERR, "PORT%d refuse port(%d) access user port", port_id, p_type);
        ret=-EINVAL;
        goto _exit;                    
    }
	
    control_flag = ccci_get_port_cflag(port_id);
    if (check_device_state() == EEMCS_EXCEPTION) {//modem exception		
        if ((control_flag & TX_PRVLG2) == 0) {
            DBGLOG(IPCD, TRA, "[TX]PORT%d kernel write fail when modem exception", port_id);
            return -ETXTBSY;
        }
    } else if (check_device_state() != EEMCS_BOOTING_DONE) {//modem not ready
        if ((control_flag & TX_PRVLG1) == 0) {
            DBGLOG(IPCD, TRA, "[TX]PORT%d kernel write fail when modem not ready", port_id);
            return -ENODEV;
        }
    }

    DBGLOG(IPCD, INF, "[TX][KERN]iminor=%d src=0x%x dest=0x%x sap=0x%x msg_id=0x%x local_ptr=%#X peer_ptr=%#X",\
                node_id, \
                (unsigned int)in_ilm->src_mod_id, (unsigned int)in_ilm->dest_mod_id, \
                (unsigned int)in_ilm->sap_id, (unsigned int)in_ilm->msg_id, \
                (unsigned int)in_ilm->local_para_ptr, (unsigned int)in_ilm->peer_buff_ptr);

    if(in_ilm->local_para_ptr != NULL){
        count = sizeof(ipc_ilm_t) + in_ilm->local_para_ptr->msg_len;		
        DBGLOG(IPCD, DBG, "[TX][KERN]ilm_len=%d local_len=%d msg_len=%d", \
		sizeof(ipc_ilm_t), sizeof(local_para_struct), count);
    }
	
    if((control_flag & EXPORT_CCCI_H) && (count < sizeof(CCCI_BUFF_T)))
    {
        DBGLOG(IPCD, ERR, "invalid wirte_len(%d) of PORT%d", count, port_id);
        ret=-EINVAL;
        goto _exit;            
    }

    if(control_flag & EXPORT_CCCI_H){
        if(count > (MAX_TX_BYTE+sizeof(CCCI_BUFF_T))){
            DBGLOG(IPCD, WAR, "PORT%d wirte_len(%d)>MTU(%d)", port_id, count, MAX_TX_BYTE);
            count = MAX_TX_BYTE+sizeof(CCCI_BUFF_T);
        }
        skb_alloc_size = count - sizeof(CCCI_BUFF_T);
    }else{
        if(count > MAX_TX_BYTE){
            DBGLOG(IPCD, WAR, "PORT%d wirte_len(%d)>MTU(%d)", port_id, count, MAX_TX_BYTE);
            count = MAX_TX_BYTE;
        }
        skb_alloc_size = count;
    }

    if (ccci_ch_write_space_alloc(eemcs_ipc_inst.ccci_ch.tx)==0){
        DBGLOG(IPCD, WAR, "PORT%d write return 0)", port_id);
        ret = -EAGAIN;
        goto _exit;
    }	
    
    new_skb = ccci_ipc_mem_alloc(skb_alloc_size + CCCI_IPC_HEADER_ROOM, GFP_ATOMIC);
    if(NULL == new_skb)
    {
        DBGLOG(IPCD, ERR, "[TX]PORT%d alloc skb fail", port_id);
        ret = -ENOMEM;
        goto _exit;            
    }
    
    /* reserve SDIO_H header room */
    #ifdef CCCI_SDIO_HEAD
    skb_reserve(new_skb, sizeof(SDIO_H));
    #endif

    ccci_header = (CCCI_BUFF_T *)skb_put(new_skb, sizeof(CCCI_BUFF_T)) ;

    memcpy(skb_put(new_skb, sizeof(ipc_ilm_t)), in_ilm, sizeof(ipc_ilm_t));

    count = in_ilm->local_para_ptr->msg_len;
    memcpy(skb_put(new_skb, count), in_ilm->local_para_ptr, count);

    ilm = (ipc_ilm_t*)((char*)ccci_header + sizeof(CCCI_BUFF_T));
    for (i=0; i<count+sizeof(ipc_ilm_t); i++) {
        addr = (char *)ilm;
        DBGLOG(IPCD, DBG, "%p=%x", (addr+i), *(addr+i));
    }
    /* Check IPC extq_id */
    if ((id_map=local_MD_id_2_unify_id(ilm->dest_mod_id))==NULL)
    {
        DBGLOG(IPCD,ERR,"Invalid dest_mod_id=%d",ilm->dest_mod_id);
        dev_kfree_skb(new_skb);
        ret=-EINVAL;
        goto _exit;
    }
    
    /* user bring down the payload only */
    ccci_header->data[1]    = count + sizeof(CCCI_BUFF_T);
    ccci_header->reserved   = id_map->extq_id;
    ccci_header->channel    = eemcs_ipc_inst.ccci_ch.tx;

    DBGLOG(IPCD, DBG, "[TX][KERN]PORT%d CCCI_MSG(0x%08X, 0x%08X, %02d, 0x%08X)", 
                    port_id, 
                    ccci_header->data[0], ccci_header->data[1],
                    ccci_header->channel, ccci_header->reserved);

  
    ret = ccci_ch_write_desc_to_q(ccci_header->channel, new_skb);

    if (KAL_SUCCESS != ret) {
        DBGLOG(IPCD, ERR, "PKT DROP of ch%d!",ccci_header->channel);
        dev_kfree_skb(new_skb);
        ret = -EAGAIN;
    } else {
        atomic_inc(&curr_node->tx_pkt_cnt);
        wake_up(&curr_node->tx_waitq); /* wake up tx_waitq for notify poll_wait of state change */
    }

_exit:
    DEBUG_LOG_FUNCTION_LEAVE;
    if(!ret){
        return count;
    }
    return ret;    
}
コード例 #30
0
static netdev_tx_t
fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct fjes_adapter *adapter = netdev_priv(netdev);
	struct fjes_hw *hw = &adapter->hw;

	int max_epid, my_epid, dest_epid;
	enum ep_partner_status pstatus;
	struct netdev_queue *cur_queue;
	char shortpkt[VLAN_ETH_HLEN];
	bool is_multi, vlan;
	struct ethhdr *eth;
	u16 queue_no = 0;
	u16 vlan_id = 0;
	netdev_tx_t ret;
	char *data;
	int len;

	ret = NETDEV_TX_OK;
	is_multi = false;
	cur_queue = netdev_get_tx_queue(netdev, queue_no);

	eth = (struct ethhdr *)skb->data;
	my_epid = hw->my_epid;

	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;

	data = skb->data;
	len = skb->len;

	if (is_multicast_ether_addr(eth->h_dest)) {
		dest_epid = 0;
		max_epid = hw->max_epid;
		is_multi = true;
	} else if (is_local_ether_addr(eth->h_dest)) {
		dest_epid = eth->h_dest[ETH_ALEN - 1];
		max_epid = dest_epid + 1;

		if ((eth->h_dest[0] == 0x02) &&
		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
			      eth->h_dest[3] | eth->h_dest[4])) &&
		    (dest_epid < hw->max_epid)) {
			;
		} else {
			dest_epid = 0;
			max_epid = 0;
			ret = NETDEV_TX_OK;

			adapter->stats64.tx_packets += 1;
			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
			adapter->stats64.tx_bytes += len;
			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
		}
	} else {
		dest_epid = 0;
		max_epid = 0;
		ret = NETDEV_TX_OK;

		adapter->stats64.tx_packets += 1;
		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
		adapter->stats64.tx_bytes += len;
		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
	}

	for (; dest_epid < max_epid; dest_epid++) {
		if (my_epid == dest_epid)
			continue;

		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
		if (pstatus != EP_PARTNER_SHARED) {
			ret = NETDEV_TX_OK;
		} else if (!fjes_hw_check_epbuf_version(
				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
			/* version is NOT 0 */
			adapter->stats64.tx_carrier_errors += 1;
			hw->ep_shm_info[dest_epid].net_stats
						.tx_carrier_errors += 1;

			ret = NETDEV_TX_OK;
		} else if (!fjes_hw_check_mtu(
				&adapter->hw.ep_shm_info[dest_epid].rx,
				netdev->mtu)) {
			adapter->stats64.tx_dropped += 1;
			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
			adapter->stats64.tx_errors += 1;
			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;

			ret = NETDEV_TX_OK;
		} else if (vlan &&
			   !fjes_hw_check_vlan_id(
				&adapter->hw.ep_shm_info[dest_epid].rx,
				vlan_id)) {
			ret = NETDEV_TX_OK;
		} else {
			if (len < VLAN_ETH_HLEN) {
				memset(shortpkt, 0, VLAN_ETH_HLEN);
				memcpy(shortpkt, skb->data, skb->len);
				len = VLAN_ETH_HLEN;
				data = shortpkt;
			}

			if (adapter->tx_retry_count == 0) {
				adapter->tx_start_jiffies = jiffies;
				adapter->tx_retry_count = 1;
			} else {
				adapter->tx_retry_count++;
			}

			if (fjes_tx_send(adapter, dest_epid, data, len)) {
				if (is_multi) {
					ret = NETDEV_TX_OK;
				} else if (
					   ((long)jiffies -
					    (long)adapter->tx_start_jiffies) >=
					    FJES_TX_RETRY_TIMEOUT) {
					adapter->stats64.tx_fifo_errors += 1;
					hw->ep_shm_info[dest_epid].net_stats
								.tx_fifo_errors += 1;
					adapter->stats64.tx_errors += 1;
					hw->ep_shm_info[dest_epid].net_stats
								.tx_errors += 1;

					ret = NETDEV_TX_OK;
				} else {
					netif_trans_update(netdev);
					netif_tx_stop_queue(cur_queue);

					if (!work_pending(&adapter->tx_stall_task))
						queue_work(adapter->txrx_wq,
							   &adapter->tx_stall_task);

					ret = NETDEV_TX_BUSY;
				}
			} else {
				if (!is_multi) {
					adapter->stats64.tx_packets += 1;
					hw->ep_shm_info[dest_epid].net_stats
								.tx_packets += 1;
					adapter->stats64.tx_bytes += len;
					hw->ep_shm_info[dest_epid].net_stats
								.tx_bytes += len;
				}

				adapter->tx_retry_count = 0;
				ret = NETDEV_TX_OK;
			}
		}
	}

	if (ret == NETDEV_TX_OK) {
		dev_kfree_skb(skb);
		if (is_multi) {
			adapter->stats64.tx_packets += 1;
			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
			adapter->stats64.tx_bytes += 1;
			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
		}
	}

	return ret;
}