예제 #1
0
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipcomp_data *ipcd = x->data;
	const int plen = skb->len;
	int dlen = IPCOMP_SCRATCH_SIZE;
	const u8 *start = skb->data;
	const int cpu = get_cpu();
	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
	int len;

	if (err)
		goto out;

	if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
		err = -EINVAL;
		goto out;
	}

	len = dlen - plen;
	if (len > skb_tailroom(skb))
		len = skb_tailroom(skb);

	__skb_put(skb, len);

	len += plen;
	skb_copy_to_linear_data(skb, scratch, len);

	while ((scratch += len, dlen -= len) > 0) {
		skb_frag_t *frag;
<<<<<<< HEAD
		struct page *page;
=======
<<<<<<< HEAD
예제 #2
0
static int pep_reply(struct sock *sk, struct sk_buff *oskb,
			u8 code, const void *data, int len, gfp_t priority)
{
	const struct pnpipehdr *oph = pnp_hdr(oskb);
	struct pnpipehdr *ph;
	struct sk_buff *skb;

	skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
	if (!skb)
		return -ENOMEM;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PNPIPE_HEADER);
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, data, len);
	__skb_push(skb, sizeof(*ph));
	skb_reset_transport_header(skb);
	ph = pnp_hdr(skb);
	ph->utid = oph->utid;
	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
	ph->pipe_handle = oph->pipe_handle;
	ph->error_code = code;

	return pn_skb_send(sk, skb, &pipe_srv);
}
static int pn_raw_send(const void *data, int len, struct net_device *dev,
			u16 dst, u16 src, u8 res)
{
	struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
	if (skb == NULL)
		return -ENOMEM;

	if (phonet_address_lookup(dev_net(dev), pn_addr(dst)) == 0)
		skb->pkt_type = PACKET_LOOPBACK;

	skb_reserve(skb, MAX_PHONET_HEADER);
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, data, len);

	/*
	[P120208-3952]
	For preventing the situation that some ISI message go into FMT channel.
	*/
	if ((len == 4) && ((pn_dev(dst) != 0x60) && (pn_dev(dst) != 0x64))) {
		pr_info("dst 0x%x, res: 0x%x\n", dst, res);
		/*
		To prevent VT call disconnection.
		*/
		if (skb != NULL)
			kfree_skb(skb);
		return 1;
	}

	return pn_send(skb, dev, dst, src, res, 1);
}
예제 #4
0
static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
				 struct filter_ctx *ctx)
{
	struct adapter *adapter = netdev2adap(dev);
	struct tid_info *t = &adapter->tids;
	struct cpl_abort_req *abort_req;
	struct cpl_abort_rpl *abort_rpl;
	struct cpl_set_tcb_field *req;
	struct ulptx_idata *aligner;
	struct work_request_hdr *wr;
	struct filter_entry *f;
	struct sk_buff *skb;
	unsigned int wrlen;
	int ret;

	netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
		   __func__, filter_id, adapter->tids.nftids);

	if (filter_id > adapter->tids.ntids)
		return -E2BIG;

	f = lookup_tid(t, filter_id);
	if (!f) {
		netdev_err(dev, "%s: no filter entry for filter_id = %d",
			   __func__, filter_id);
		return -EINVAL;
	}

	ret = writable_filter(f);
	if (ret)
		return ret;

	if (!f->valid)
		return -EINVAL;

	f->ctx = ctx;
	f->pending = 1;
	wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
			+ sizeof(*abort_req) + sizeof(*abort_rpl), 16);
	skb = alloc_skb(wrlen, GFP_KERNEL);
	if (!skb) {
		netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
		return -ENOMEM;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
	INIT_ULPTX_WR(req, wrlen, 0, 0);
	wr = (struct work_request_hdr *)req;
	wr++;
	req = (struct cpl_set_tcb_field *)wr;
	mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
		       TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
	aligner = (struct ulptx_idata *)(req + 1);
	abort_req = (struct cpl_abort_req *)(aligner + 1);
	mk_abort_req_ulp(abort_req, f->tid);
	abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
	mk_abort_rpl_ulp(abort_rpl, f->tid);
	t4_ofld_send(adapter, skb);
	return 0;
}
예제 #5
0
파일: cxgbit_ddp.c 프로젝트: Anjali05/linux
static struct sk_buff *
cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
		       unsigned int idx, unsigned int npods, unsigned int tid)
{
	struct ulp_mem_io *req;
	struct ulptx_idata *idata;
	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
	unsigned int dlen = npods << PPOD_SIZE_SHIFT;
	unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
				sizeof(struct ulptx_idata) + dlen, 16);
	struct sk_buff *skb;

	skb  = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb)
		return NULL;

	req = __skb_put(skb, wr_len);
	INIT_ULPTX_WR(req, wr_len, 0, tid);
	req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
		FW_WR_ATOMIC_V(0));
	req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
		ULP_MEMIO_ORDER_V(0) |
		T5_ULP_MEMIO_IMM_V(1));
	req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
	req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));

	idata = (struct ulptx_idata *)(req + 1);
	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
	idata->len = htonl(dlen);

	return skb;
}
예제 #6
0
/*
 * Set up an L2T entry and send any packets waiting in the arp queue.  The
 * supplied skb is used for the CPL_L2T_WRITE_REQ.  Must be called with the
 * entry locked.
 */
static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
				  struct l2t_entry *e)
{
	struct cpl_l2t_write_req *req;

	if (!skb) {
		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
		if (!skb)
			return -ENOMEM;
	}

	req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
	req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
			    V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
			    V_L2T_W_PRIO(vlan_prio(e)));
	memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
	skb->priority = CPL_PRIORITY_CONTROL;
	cxgb3_ofld_send(dev, skb);
	while (e->arpq_head) {
		skb = e->arpq_head;
		e->arpq_head = skb->next;
		skb->next = NULL;
		cxgb3_ofld_send(dev, skb);
	}
	e->arpq_tail = NULL;
	e->state = L2T_STATE_VALID;

	return 0;
}
예제 #7
0
파일: rx.c 프로젝트: 3bsa/linux
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
				     struct efx_rx_buffer *rx_buf,
				     unsigned int n_frags,
				     u8 *eh, int hdr_len)
{
	struct efx_nic *efx = channel->efx;
	struct sk_buff *skb;

	/* Allocate an SKB to store the headers */
	skb = netdev_alloc_skb(efx->net_dev,
			       efx->rx_ip_align + efx->rx_prefix_size +
			       hdr_len);
	if (unlikely(skb == NULL)) {
		atomic_inc(&efx->n_rx_noskb_drops);
		return NULL;
	}

	EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);

	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
	       efx->rx_prefix_size + hdr_len);
	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
	__skb_put(skb, hdr_len);

	/* Append the remaining page(s) onto the frag list */
	if (rx_buf->len > hdr_len) {
		rx_buf->page_offset += hdr_len;
		rx_buf->len -= hdr_len;

		for (;;) {
			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
					   rx_buf->page, rx_buf->page_offset,
					   rx_buf->len);
			rx_buf->page = NULL;
			skb->len += rx_buf->len;
			skb->data_len += rx_buf->len;
			if (skb_shinfo(skb)->nr_frags == n_frags)
				break;

			rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
		}
	} else {
		__free_pages(rx_buf->page, efx->rx_buffer_order);
		rx_buf->page = NULL;
		n_frags = 0;
	}

	skb->truesize += n_frags * efx->rx_buffer_truesize;

	/* Move past the ethernet header */
	skb->protocol = eth_type_trans(skb, efx->net_dev);

	skb_mark_napi_id(skb, &channel->napi_str);

	return skb;
}
예제 #8
0
static void bnep_net_set_mc_list(struct net_device *dev)
{
#ifdef CONFIG_BT_BNEP_MC_FILTER
	struct bnep_session *s = netdev_priv(dev);
	struct sock *sk = s->sock->sk;
	struct bnep_set_filter_req *r;
	struct sk_buff *skb;
	int size;

	BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));

	size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
	skb  = alloc_skb(size, GFP_ATOMIC);
	if (!skb) {
		BT_ERR("%s Multicast list allocation failed", dev->name);
		return;
	}

	r = (void *) skb->data;
	__skb_put(skb, sizeof(*r));

	r->type = BNEP_CONTROL;
	r->ctrl = BNEP_FILTER_MULTI_ADDR_SET;

	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
		u8 start[ETH_ALEN] = { 0x01 };

		/* Request all addresses */
		memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN);
		memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
		r->len = htons(ETH_ALEN * 2);
	} else {
		struct netdev_hw_addr *ha;
		int i, len = skb->len;

		if (dev->flags & IFF_BROADCAST) {
			memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
			memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
		}

		/* FIXME: We should group addresses here. */

		i = 0;
		netdev_for_each_mc_addr(ha, dev) {
			if (i == BNEP_MAX_MULTICAST_FILTERS)
				break;
			memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
			memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);

			i++;
		}
		r->len = htons(skb->len - len);
	}

	skb_queue_tail(&sk->sk_write_queue, skb);
	wake_up_interruptible(sk_sleep(sk));
#endif
}
static int pn_raw_send(const void *data, int len, struct net_device *dev,
                       u16 dst, u16 src, u8 res)
{
    struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
    if (skb == NULL)
        return -ENOMEM;

    skb_reserve(skb, MAX_PHONET_HEADER);
    __skb_put(skb, len);
    skb_copy_to_linear_data(skb, data, len);
    return pn_send(skb, dev, dst, src, res, 1);
}
예제 #10
0
파일: dma.c 프로젝트: Brainiarc7/mt76
static int
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
{
	struct sk_buff *skb;
	unsigned char *data;
	int len;
	int done = 0;
	bool more;

	while (done < budget) {
		u32 info;

		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
		if (!data)
			break;

		if (q->rx_head) {
			mt76_add_fragment(dev, q, data, len, more);
			continue;
		}

		skb = build_skb(data, q->buf_size);
		if (!skb) {
			skb_free_frag(data);
			continue;
		}

		skb_reserve(skb, q->buf_offset);
		if (skb->tail + len > skb->end) {
			dev_kfree_skb(skb);
			continue;
		}

		if (q == &dev->q_rx[MT_RXQ_MCU]) {
			u32 * rxfce = (u32 *) skb->cb;
			*rxfce = info;
		}

		__skb_put(skb, len);
		done++;

		if (more) {
			q->rx_head = skb;
			continue;
		}

		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
	}

	mt76_dma_rx_fill(dev, q, true);
	return done;
}
예제 #11
0
static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
		struct sk_buff *skb, gfp_t flags)
{
	int pack_len = skb->len;
	int pack_with_header_len = pack_len + INT51X1_HEADER_SIZE;
	int headroom = skb_headroom(skb);
	int tailroom = skb_tailroom(skb);
	int need_tail = 0;
	__le16 *len;

	/*                                                              */
	if ((pack_with_header_len) < dev->maxpacket)
		need_tail = dev->maxpacket - pack_with_header_len + 1;
	/*
                                                                    
                                                                     
                                 
  */
	else if (!(pack_with_header_len % dev->maxpacket))
		need_tail = 1;

	if (!skb_cloned(skb) &&
			(headroom + tailroom >= need_tail + INT51X1_HEADER_SIZE)) {
		if (headroom < INT51X1_HEADER_SIZE || tailroom < need_tail) {
			skb->data = memmove(skb->head + INT51X1_HEADER_SIZE,
					skb->data, skb->len);
			skb_set_tail_pointer(skb, skb->len);
		}
	} else {
		struct sk_buff *skb2;

		skb2 = skb_copy_expand(skb,
				INT51X1_HEADER_SIZE,
				need_tail,
				flags);
		dev_kfree_skb_any(skb);
		if (!skb2)
			return NULL;
		skb = skb2;
	}

	pack_len += need_tail;
	pack_len &= 0x07ff;

	len = (__le16 *) __skb_push(skb, INT51X1_HEADER_SIZE);
	*len = cpu_to_le16(pack_len);

	if(need_tail)
		memset(__skb_put(skb, need_tail), 0, need_tail);

	return skb;
}
예제 #12
0
static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
		struct sk_buff *skb, gfp_t flags)
{
	int pack_len = skb->len;
	int pack_with_header_len = pack_len + INT51X1_HEADER_SIZE;
	int headroom = skb_headroom(skb);
	int tailroom = skb_tailroom(skb);
	int need_tail = 0;
	__le16 *len;

	/* if packet and our header is smaler than 64 pad to 64 (+ ZLP) */
	if ((pack_with_header_len) < dev->maxpacket)
		need_tail = dev->maxpacket - pack_with_header_len + 1;
	/*
	 * usbnet would send a ZLP if packetlength mod urbsize == 0 for us,
	 * but we need to know ourself, because this would add to the length
	 * we send down to the device...
	 */
	else if (!(pack_with_header_len % dev->maxpacket))
		need_tail = 1;

	if (!skb_cloned(skb) &&
			(headroom + tailroom >= need_tail + INT51X1_HEADER_SIZE)) {
		if (headroom < INT51X1_HEADER_SIZE || tailroom < need_tail) {
			skb->data = memmove(skb->head + INT51X1_HEADER_SIZE,
					skb->data, skb->len);
			skb_set_tail_pointer(skb, skb->len);
		}
	} else {
		struct sk_buff *skb2;

		skb2 = skb_copy_expand(skb,
				INT51X1_HEADER_SIZE,
				need_tail,
				flags);
		dev_kfree_skb_any(skb);
		if (!skb2)
			return NULL;
		skb = skb2;
	}

	pack_len += need_tail;
	pack_len &= 0x07ff;

	len = (__le16 *) __skb_push(skb, INT51X1_HEADER_SIZE);
	*len = cpu_to_le16(pack_len);

	if(need_tail)
		memset(__skb_put(skb, need_tail), 0, need_tail);

	return skb;
}
예제 #13
0
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		      struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;

	wr_len = sizeof *res_wr + sizeof *res;
	skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
	if (!skb)
		return -ENOMEM;
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (u64)&wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_RESET;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);

	c4iw_init_wr_wait(&wr_wait);
	ret = c4iw_ofld_send(rdev, skb);
	if (!ret) {
		wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
		if (!wr_wait.done) {
			printk(KERN_ERR MOD "Device %s not responding!\n",
			       pci_name(rdev->lldi.pdev));
			rdev->flags = T4_FATAL_ERROR;
			ret = -EIO;
		} else
			ret = wr_wait.ret;
	}

	kfree(cq->sw_queue);
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
	c4iw_put_cqid(rdev, cq->cqid, uctx);
	return ret;
}
static int pn_raw_send(const void *data, int len, struct net_device *dev,
			u16 dst, u16 src, u8 res)
{
	struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
	if (skb == NULL)
		return -ENOMEM;

	if (phonet_address_lookup(dev_net(dev), pn_addr(dst)) == 0)
		skb->pkt_type = PACKET_LOOPBACK;

	skb_reserve(skb, MAX_PHONET_HEADER);
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, data, len);
	return pn_send(skb, dev, dst, src, res, 1);
}
예제 #15
0
파일: pep.c 프로젝트: 33d/linux-2.6.21-hh20
static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
					int len, gfp_t priority)
{
	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
	if (!skb)
		return NULL;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PNPIPE_HEADER);
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, payload, len);
	__skb_push(skb, sizeof(struct pnpipehdr));
	skb_reset_transport_header(skb);
	return skb;
}
예제 #16
0
파일: zd_mac.c 프로젝트: ivucica/linux
int zd_mac_rx_irq(struct zd_mac *mac, const u8 *buffer, unsigned int length)
{
	struct sk_buff *skb;

	skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length);
	if (!skb) {
		dev_warn(zd_mac_dev(mac), "Could not allocate skb.\n");
		return -ENOMEM;
	}
	skb_reserve(skb, sizeof(struct zd_rt_hdr));
	memcpy(__skb_put(skb, length), buffer, length);
	skb_queue_tail(&mac->rx_queue, skb);
	tasklet_schedule(&mac->rx_tasklet);
	return 0;
}
예제 #17
0
파일: cq.c 프로젝트: ANFS/ANFS-kernel
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		      struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;

	wr_len = sizeof *res_wr + sizeof *res;
	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_RESET;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);

	c4iw_init_wr_wait(&wr_wait);
	ret = c4iw_ofld_send(rdev, skb);
	if (!ret) {
		ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	}

	kfree(cq->sw_queue);
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
	c4iw_put_cqid(rdev, cq->cqid, uctx);
	return ret;
}
예제 #18
0
static int
mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{
	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
	u8 *data = sg_virt(&urb->sg[0]);
	int data_len, len, nsgs = 1;
	struct sk_buff *skb;

	if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
		return 0;

	len = mt76u_get_rx_entry_len(data, urb->actual_length);
	if (len < 0)
		return 0;

	skb = build_skb(data, q->buf_size);
	if (!skb)
		return 0;

	data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
	skb_reserve(skb, MT_DMA_HDR_LEN);
	if (skb->tail + data_len > skb->end) {
		dev_kfree_skb(skb);
		return 1;
	}

	__skb_put(skb, data_len);
	len -= data_len;

	while (len > 0) {
		data_len = min_t(int, len, urb->sg[nsgs].length);
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
				sg_page(&urb->sg[nsgs]),
				urb->sg[nsgs].offset,
				data_len, q->buf_size);
		len -= data_len;
		nsgs++;
	}
	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);

	return nsgs;
}
예제 #19
0
static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
			    unsigned int qid_filterid, struct adapter *adap)
{
	struct cpl_t6_act_open_req *t6req = NULL;
	struct cpl_act_open_req *req = NULL;

	t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
	INIT_TP_WR(t6req, 0);
	req = (struct cpl_act_open_req *)t6req;
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
	req->local_port = cpu_to_be16(f->fs.val.lport);
	req->peer_port = cpu_to_be16(f->fs.val.fport);
	req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
		f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
	req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
		f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
	req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
					f->fs.newvlan == VLAN_REWRITE) |
				DELACK_V(f->fs.hitcnts) |
				L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
				SMAC_SEL_V((cxgb4_port_viid(f->dev) &
					    0x7F) << 1) |
				TX_CHAN_V(f->fs.eport) |
				NO_CONG_V(f->fs.rpttid) |
				ULP_MODE_V(f->fs.nat_mode ?
					   ULP_MODE_TCPDDP : ULP_MODE_NONE) |
				TCAM_BYPASS_F | NON_OFFLOAD_F);

	t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
								      f->dev)));
	t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
			    RSS_QUEUE_V(f->fs.iq) |
			    TX_QUEUE_V(f->fs.nat_mode) |
			    T5_OPT_2_VALID_F |
			    RX_CHANNEL_F |
			    CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
					 (f->fs.dirsteer << 1)) |
			    PACE_V((f->fs.maskhash) |
				   ((f->fs.dirsteerhash) << 1)) |
			    CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
}
static void br_send_bpdu(struct net_bridge_port *p,
			 const unsigned char *data, int length)
{
	struct sk_buff *skb;

	skb = dev_alloc_skb(length+LLC_RESERVE);
	if (!skb)
		return;

	skb->dev = p->dev;
	skb->protocol = htons(ETH_P_802_2);

	skb_reserve(skb, LLC_RESERVE);
	memcpy(__skb_put(skb, length), data, length);

	llc_pdu_header_init(skb, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
			    LLC_SAP_BSPAN, LLC_PDU_CMD);
	llc_pdu_init_as_ui_cmd(skb);

	llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);

<<<<<<< HEAD
예제 #21
0
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
					      struct ixgbe_rx_buffer *bi,
					      struct xdp_buff *xdp)
{
	unsigned int metasize = xdp->data - xdp->data_meta;
	unsigned int datasize = xdp->data_end - xdp->data;
	struct sk_buff *skb;

	/* allocate a skb to store the frags */
	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
			       xdp->data_end - xdp->data_hard_start,
			       GFP_ATOMIC | __GFP_NOWARN);
	if (unlikely(!skb))
		return NULL;

	skb_reserve(skb, xdp->data - xdp->data_hard_start);
	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
	if (metasize)
		skb_metadata_set(skb, metasize);

	ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
	return skb;
}
예제 #22
0
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
	struct fw_filter_wr *fwr;
	struct sk_buff *skb;
	unsigned int len;

	len = sizeof(*fwr);

	skb = alloc_skb(len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	fwr = __skb_put(skb, len);
	t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1
			: adapter->sge.fw_evtq.abs_id);

	/* Mark the filter as "pending" and ship off the Filter Work Request.
	 * When we get the Work Request Reply we'll clear the pending status.
	 */
	f->pending = 1;
	t4_mgmt_tx(adapter, skb);
	return 0;
}
예제 #23
0
static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
			 unsigned int ftid,  u16 word, u64 mask, u64 val,
			 int no_reply)
{
	struct cpl_set_tcb_field *req;
	struct sk_buff *skb;

	skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
	if (!skb)
		return -ENOMEM;

	req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
	memset(req, 0, sizeof(*req));
	INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
	req->reply_ctrl = htons(REPLY_CHAN_V(0) |
				QUEUENO_V(adap->sge.fw_evtq.abs_id) |
				NO_REPLY_V(no_reply));
	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
	req->mask = cpu_to_be64(mask);
	req->val = cpu_to_be64(val);
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
	t4_ofld_send(adap, skb);
	return 0;
}
예제 #24
0
파일: cq.c 프로젝트: ANFS/ANFS-kernel
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int user = (uctx != &rdev->uctx);
	struct c4iw_wr_wait wr_wait;
	int ret;
	struct sk_buff *skb;

	cq->cqid = c4iw_get_cqid(rdev, uctx);
	if (!cq->cqid) {
		ret = -ENOMEM;
		goto err1;
	}

	if (!user) {
		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
		if (!cq->sw_queue) {
			ret = -ENOMEM;
			goto err2;
		}
	}
	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
				       &cq->dma_addr, GFP_KERNEL);
	if (!cq->queue) {
		ret = -ENOMEM;
		goto err3;
	}
	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
	memset(cq->queue, 0, cq->memsize);

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err4;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_WRITE;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);
	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
			V_FW_RI_RES_WR_IQANUS(0) |
			V_FW_RI_RES_WR_IQANUD(1) |
			F_FW_RI_RES_WR_IQANDST |
			V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
			F_FW_RI_RES_WR_IQDROPRSS |
			V_FW_RI_RES_WR_IQPCIECH(2) |
			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
			F_FW_RI_RES_WR_IQO |
			V_FW_RI_RES_WR_IQESIZE(1));
	res->u.cq.iqsize = cpu_to_be16(cq->size);
	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err4;
	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	if (ret)
		goto err4;

	cq->gen = 1;
	cq->gts = rdev->lldi.gts_reg;
	cq->rdev = rdev;
	if (user) {
		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(cq->cqid << rdev->cqshift);
		cq->ugts &= PAGE_MASK;
	}
	return 0;
err4:
	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
err3:
	kfree(cq->sw_queue);
err2:
	c4iw_put_cqid(rdev, cq->cqid, uctx);
err1:
	return ret;
}
예제 #25
0
static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
	struct fm10k_intfc *interface = netdev_priv(dev);
	unsigned int r_idx = skb->queue_mapping;
	int err;

	if ((skb->protocol ==  htons(ETH_P_8021Q)) &&
	    !vlan_tx_tag_present(skb)) {
		/* FM10K only supports hardware tagging, any tags in frame
		 * are considered 2nd level or "outer" tags
		 */
		struct vlan_hdr *vhdr;
		__be16 proto;

		/* make sure skb is not shared */
		skb = skb_share_check(skb, GFP_ATOMIC);
		if (!skb)
			return NETDEV_TX_OK;

		/* make sure there is enough room to move the ethernet header */
		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
			return NETDEV_TX_OK;

		/* verify the skb head is not shared */
		err = skb_cow_head(skb, 0);
		if (err)
			return NETDEV_TX_OK;

		/* locate vlan header */
		vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);

		/* pull the 2 key pieces of data out of it */
		__vlan_hwaccel_put_tag(skb,
				       htons(ETH_P_8021Q),
				       ntohs(vhdr->h_vlan_TCI));
		proto = vhdr->h_vlan_encapsulated_proto;
		skb->protocol = (ntohs(proto) >= 1536) ? proto :
							 htons(ETH_P_802_2);

		/* squash it by moving the ethernet addresses up 4 bytes */
		memmove(skb->data + VLAN_HLEN, skb->data, 12);
		__skb_pull(skb, VLAN_HLEN);
		skb_reset_mac_header(skb);
	}

	/* The minimum packet size for a single buffer is 17B so pad the skb
	 * in order to meet this minimum size requirement.
	 */
	if (unlikely(skb->len < 17)) {
		int pad_len = 17 - skb->len;

		if (skb_pad(skb, pad_len))
			return NETDEV_TX_OK;
		__skb_put(skb, pad_len);
	}

	/* prepare packet for hardware time stamping */
	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
		fm10k_ts_tx_enqueue(interface, skb);

	if (r_idx >= interface->num_tx_queues)
		r_idx %= interface->num_tx_queues;

	err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]);

	return err;
}
예제 #26
0
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		netif_dbg(dev, tx_err, dev->net, "no urb\n");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave(&dev->txq.lock, flags);
	retval = usb_autopm_get_interface_async(dev->intf);
	if (retval < 0) {
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		goto drop;
	}

#ifdef CONFIG_PM
	/* if this triggers the device is still a sleep */
	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
		/* transmission will be done in resume */
		usb_anchor_urb(urb, &dev->deferred);
		/* no use to process more packets */
		netif_stop_queue(net);
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
		goto deferred;
	}
#endif

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
		usb_autopm_put_interface_async(dev->intf);
		break;
	default:
		usb_autopm_put_interface_async(dev->intf);
		netif_dbg(dev, tx_err, dev->net,
			  "tx: submit urb err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
		dev->net->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else
		netif_dbg(dev, tx_queued, dev->net,
			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
	return NETDEV_TX_OK;
}
예제 #27
0
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipcomp_data *ipcd = x->data;
	const int plen = skb->len;
	int dlen = IPCOMP_SCRATCH_SIZE;
	const u8 *start = skb->data;
	const int cpu = get_cpu();
	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
	int len;

	if (err)
		goto out;

	if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
		err = -EINVAL;
		goto out;
	}

	len = dlen - plen;
	if (len > skb_tailroom(skb))
		len = skb_tailroom(skb);

	__skb_put(skb, len);

	len += plen;
	skb_copy_to_linear_data(skb, scratch, len);

	while ((scratch += len, dlen -= len) > 0) {
		skb_frag_t *frag;
		struct page *page;

		err = -EMSGSIZE;
		if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
			goto out;

		frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
		page = alloc_page(GFP_ATOMIC);

		err = -ENOMEM;
		if (!page)
			goto out;

		__skb_frag_set_page(frag, page);

		len = PAGE_SIZE;
		if (dlen < len)
			len = dlen;

		frag->page_offset = 0;
		skb_frag_size_set(frag, len);
		memcpy(skb_frag_address(frag), scratch, len);

		skb->truesize += len;
		skb->data_len += len;
		skb->len += len;

		skb_shinfo(skb)->nr_frags++;
	}

	err = 0;

out:
	put_cpu();
	return err;
}
예제 #28
0
static void kevent(void *data)
{
	struct usbnet *dev = (struct usbnet *)data;
#else
static void kevent(struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
#endif
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit(EVENT_TX_HALT, &dev->flags)) {

		unlink_urbs(dev, &dev->txq);
		status = usb_clear_halt(dev->udev, dev->out);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_tx_err(dev))
				deverr(dev, "can't clear tx halt, status %d",
				       status);
		} else {
			clear_bit(EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue(dev->net);
		}
	}
	if (test_bit(EVENT_RX_HALT, &dev->flags)) {

		unlink_urbs(dev, &dev->rxq);
		status = usb_clear_halt(dev->udev, dev->in);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_rx_err(dev))
				deverr(dev, "can't clear rx halt, status %d",
				       status);
		} else {
			clear_bit(EVENT_RX_HALT, &dev->flags);
			tasklet_schedule(&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit(EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running(dev->net))
			urb = usb_alloc_urb(0, GFP_KERNEL);
		else
			clear_bit(EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit(EVENT_RX_MEMORY, &dev->flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
			urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
			rx_submit(dev, urb, GFP_KERNEL);
			tasklet_schedule(&dev->bh);
		}
	}

	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit(EVENT_LINK_RESET, &dev->flags);

		if (info->link_reset) {
			retval = info->link_reset(dev);
			if (retval < 0) {
				devinfo(dev,
					"link reset failed (%d) usbnet usb-%s-%s, %s",
					retval,
					dev->udev->bus->bus_name,
					dev->udev->devpath,
					info->description);
			}
		}
	}

	if (dev->flags)
		devdbg(dev, "kevent done, flags = 0x%lx", dev->flags);
}

/*-------------------------------------------------------------------------*/

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
static void tx_complete(struct urb *urb, struct pt_regs *regs)
#else
static void tx_complete(struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;

	if (urb->status == 0) {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += entry->length;
	} else {
		dev->stats.tx_errors++;

		switch (urb->status) {
		case -EPIPE:
			axusbnet_defer_kevent(dev, EVENT_TX_HALT);
			break;

		/* software-driven interface shutdown */
		case -ECONNRESET:		/* async unlink */
		case -ESHUTDOWN:		/* hardware gone */
			break;

		/* like rx, tx gets controller i/o faults during khubd delays */
		/* and so it uses the same throttling mechanism. */
		case -EPROTO:
		case -ETIME:
		case -EILSEQ:
			if (!timer_pending(&dev->delay)) {
				mod_timer(&dev->delay,
					  jiffies + THROTTLE_JIFFIES);
				if (netif_msg_link(dev))
					devdbg(dev, "tx throttle %d",
					       urb->status);
			}
			netif_stop_queue(dev->net);
			break;
		default:
			if (netif_msg_tx_err(dev))
				devdbg(dev, "tx err %d", entry->urb->status);
			break;
		}
	}

	urb->dev = NULL;
	entry->state = tx_done;
	defer_bh(dev, skb, &dev->txq);
}

/*-------------------------------------------------------------------------*/

static
void axusbnet_tx_timeout(struct net_device *net)
{
	struct usbnet *dev = netdev_priv(net);

	unlink_urbs(dev, &dev->txq);
	tasklet_schedule(&dev->bh);

	/* FIXME: device recovery -- reset? */
}

/*-------------------------------------------------------------------------*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
static int
#else
static netdev_tx_t
#endif
axusbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	/* some devices want funky USB-level framing, for */
	/* win32 driver (usually) and/or hardware quirks */
	if (info->tx_fixup) {
		skb = info->tx_fixup(dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err(dev))
				devdbg(dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		if (netif_msg_tx_err(dev))
			devdbg(dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data,
			  skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave(&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb(urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue(net);
		axusbnet_defer_kevent(dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err(dev))
			devdbg(dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail(&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN(dev))
			netif_stop_queue(net);
	}
	spin_unlock_irqrestore(&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err(dev))
			devdbg(dev, "drop, code %d", retval);
drop:
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any(skb);
		usb_free_urb(urb);
	} else if (netif_msg_tx_queued(dev)) {
		devdbg(dev, "> tx, len %d, type 0x%x",
		       length, skb->protocol);
	}
	return NETDEV_TX_OK;
}

/*-------------------------------------------------------------------------*/

/* tasklet (work deferred from completions, in_irq) or timer */

static void axusbnet_bh(unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue(&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process(dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb(entry->urb);
			dev_kfree_skb(skb);
			continue;
		default:
			devdbg(dev, "bogus skb state %d", entry->state);
		}
	}

	/* waiting for all pending urbs to complete? */
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0)
			wake_up(dev->wait);

	/* or are we maybe short a few urbs? */
	} else if (netif_running(dev->net)
			&& netif_device_present(dev->net)
			&& !timer_pending(&dev->delay)
			&& !test_bit(EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN(dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			/* don't refill the queue all at once */
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb(0, GFP_ATOMIC);
				if (urb != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
					urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
					rx_submit(dev, urb, GFP_ATOMIC);
				}
			}
			if (temp != dev->rxq.qlen && netif_msg_link(dev))
				devdbg(dev, "rxqlen %d --> %d",
				       temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule(&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN(dev))
			netif_wake_queue(dev->net);
	}
}


/*-------------------------------------------------------------------------
 *
 * USB Device Driver support
 *
 *-------------------------------------------------------------------------*/

/* precondition: never called in_interrupt */

static
void axusbnet_disconnect(struct usb_interface *intf)
{
	struct usbnet		*dev;
	struct usb_device	*xdev;
	struct net_device	*net;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	if (!dev)
		return;

	xdev = interface_to_usbdev(intf);

	if (netif_msg_probe(dev))
		devinfo(dev, "unregister '%s' usb-%s-%s, %s",
			intf->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description);

	net = dev->net;
	unregister_netdev(net);

	/* we don't hold rtnl here ... */
	flush_scheduled_work();

	if (dev->driver_info->unbind)
		dev->driver_info->unbind(dev, intf);

	free_netdev(net);
	usb_put_dev(xdev);
}

/*-------------------------------------------------------------------------*/

/* precondition: never called in_interrupt */

static int
axusbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		printk(KERN_ERR "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev(udev);
	interface = udev->cur_altsetting;

	usb_get_dev(xdev);

	status = -ENOMEM;

	/* set up our own records */
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		printk(KERN_ERR "can't kmalloc dev");
		goto out;
	}

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV |
					 NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init(&dev->rxq);
	skb_queue_head_init(&dev->txq);
	skb_queue_head_init(&dev->done);
	dev->bh.func = axusbnet_bh;
	dev->bh.data = (unsigned long) dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
	INIT_WORK(&dev->kevent, kevent, dev);
#else
	INIT_WORK(&dev->kevent, kevent);
#endif

	dev->delay.function = axusbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer(&dev->delay);
	/* mutex_init(&dev->phy_mutex); */

	dev->net = net;

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;

#if 0
	/* dma_supported() is deeply broken on almost all architectures */
	/* possible with some EHCI controllers */
	if (dma_supported(&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
	net->open		= axusbnet_open,
	net->stop		= axusbnet_stop,
	net->hard_start_xmit	= axusbnet_start_xmit,
	net->tx_timeout	= axusbnet_tx_timeout,
	net->get_stats = axusbnet_get_stats;
#endif

	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &axusbnet_ethtool_ops;

	/* allow device-specific bind/init procedures */
	/* NOTE net->name still not usable ... */
	status = info->bind(dev, udev);
	if (status < 0) {
		deverr(dev, "Binding device failed: %d", status);
		goto out1;
	}

	/* maybe the remote can't receive an Ethernet MTU */
	if (net->mtu > (dev->hard_mtu - net->hard_header_len))
		net->mtu = dev->hard_mtu - net->hard_header_len;

	status = init_status(dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);

	SET_NETDEV_DEV(net, &udev->dev);
	status = register_netdev(net);
	if (status) {
		deverr(dev, "net device registration failed: %d", status);
		goto out3;
	}

	if (netif_msg_probe(dev))
		devinfo(dev, "register '%s' at usb-%s-%s, %s, %pM",
			udev->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description,
			net->dev_addr);

	/* ok, it's ready to go. */
	usb_set_intfdata(udev, dev);

	/* start as if the link is up */
	netif_device_attach(net);

	return 0;

out3:
	if (info->unbind)
		info->unbind(dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}

/*-------------------------------------------------------------------------*/

/*
 * suspend the whole driver as soon as the first interface is suspended
 * resume only when the last interface is resumed
 */

static int axusbnet_suspend(struct usb_interface *intf,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 10)
pm_message_t message)
#else
u32 message)
#endif
{
	struct usbnet *dev = usb_get_intfdata(intf);

	if (!dev->suspend_count++) {
		/*
		 * accelerate emptying of the rx and queues, to avoid
		 * having everything error out.
		 */
		netif_device_detach(dev->net);
		(void) unlink_urbs(dev, &dev->rxq);
		(void) unlink_urbs(dev, &dev->txq);
		usb_kill_urb(dev->interrupt);
		/*
		 * reattach so runtime management can use and
		 * wake the device
		 */
		netif_device_attach(dev->net);
	}
	return 0;
}

static int
axusbnet_resume(struct usb_interface *intf)
{
	struct usbnet	*dev = usb_get_intfdata(intf);
	int	retval = 0;

	if (!--dev->suspend_count)
		tasklet_schedule(&dev->bh);

	retval = init_status(dev, intf);
	if (retval < 0)
		return retval;

	if (dev->interrupt) {
		retval = usb_submit_urb(dev->interrupt, GFP_KERNEL);
		if (retval < 0 && netif_msg_ifup(dev))
			deverr(dev, "intr submit %d", retval);
	}

	return retval;
}
예제 #29
0
static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	int			retval = NET_XMIT_SUCCESS;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err (dev))
				devdbg (dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if ((length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave (&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err (dev))
			devdbg (dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "drop, code %d", retval);
drop:
		retval = NET_XMIT_SUCCESS;
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else if (netif_msg_tx_queued (dev)) {
		devdbg (dev, "> tx, len %d, type 0x%x",
			length, skb->protocol);
	}
	return retval;
}
예제 #30
0
파일: qp.c 프로젝트: richardlee8681/linux
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid) {
		ret = -ENOMEM;
		goto free_sq_qid;
	}

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq) {
			ret = -ENOMEM;
			goto free_rq_qid;
		}

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq) {
			ret = -ENOMEM;
			goto free_sw_sq;
		}
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr) {
		ret = -ENOMEM;
		goto free_sw_rq;
	}

	if (user) {
		ret = alloc_oc_sq(rdev, &wq->sq);
		if (ret)
			goto free_hwaddr;

		ret = alloc_host_sq(rdev, &wq->sq);
		if (ret)
			goto free_sq;
	} else
		ret = alloc_host_sq(rdev, &wq->sq);
		if (ret)
			goto free_hwaddr;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue)
		goto free_sq;
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user) {
		wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->sq.qid << rdev->qpshift);
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->rq.qid << rdev->qpshift);
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto free_dma;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto free_dma;
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto free_dma;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
free_dma:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
free_sq:
	dealloc_sq(rdev, &wq->sq);
free_hwaddr:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
free_sw_rq:
	kfree(wq->rq.sw_rq);
free_sw_sq:
	kfree(wq->sq.sw_sq);
free_rq_qid:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
free_sq_qid:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return ret;
}