Esempio n. 1
0
static int ztdeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt)
#endif
{
	struct dahdi_span *span;
	struct ztdeth_header *zh;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
	zh = (struct ztdeth_header *)skb_network_header(skb);
#else
	zh = (struct ztdeth_header *)skb->nh.raw;
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
	span = ztdeth_getspan(eth_hdr(skb)->h_source, zh->subaddr);
#else
	span = ztdeth_getspan(skb->mac.ethernet->h_source, zh->subaddr);
#endif	
	if (span) {
		skb_pull(skb, sizeof(struct ztdeth_header));
#ifdef NEW_SKB_LINEARIZE
		if (skb_is_nonlinear(skb))
			skb_linearize(skb);
#else
		if (skb_is_nonlinear(skb))
			skb_linearize(skb, GFP_KERNEL);
#endif
		dahdi_dynamic_receive(span, (unsigned char *)skb->data, skb->len);
	}
	kfree_skb(skb);
	return 0;
}
Esempio n. 2
0
unsigned int core_4to6(struct sk_buff *skb)
{
	struct iphdr *ip4_header;
	struct in_addr daddr;
	enum verdict result;

	skb_linearize(skb);

	ip4_header = ip_hdr(skb);

	daddr.s_addr = ip4_header->daddr;
	if (!pool4_contains(&daddr))
		return NF_ACCEPT;

	log_debug("===============================================");
	log_debug("Catching IPv4 packet: %pI4->%pI4", &ip4_header->saddr, &ip4_header->daddr);

	result = validate_skb_ipv4(skb);
	if (result != VER_CONTINUE)
		return result;

	return nat64_core(skb,
			compute_out_tuple_4to6,
			translating_the_packet_4to6,
			send_packet_ipv6);
}
Esempio n. 3
0
int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
{
	struct sk_buff *skb = pkt_to_skb(pkt);
	struct sk_buff *lastskb;

	caif_assert(buf != NULL);
	if (unlikely(is_erronous(pkt)))
		return -EPROTO;
	/* Make sure SKB is writable */
	if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
		PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
		return -EPROTO;
	}

	if (unlikely(skb_linearize(skb) != 0)) {
		PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
		return -EPROTO;
	}

	if (unlikely(skb_tailroom(skb) < buflen)) {
		PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
		return -EPROTO;
	}

	*buf = skb_put(skb, buflen);
	return 1;
}
Esempio n. 4
0
int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
{
	struct sk_buff *skb = pkt_to_skb(pkt);

	caif_assert(buf != NULL);
	if (unlikely(is_erronous(pkt)))
		return -EPROTO;

	if (unlikely(buflen > skb->len)) {
		PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
				"- failed\n");
		return -EPROTO;
	}

	if (unlikely(buflen > skb_headlen(skb))) {
		if (unlikely(skb_linearize(skb) != 0)) {
			PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
			return -EPROTO;
		}
	}

	*buf = skb->data;
	skb_pull(skb, buflen);

	return 1;
}
Esempio n. 5
0
/**
 * tipc_msg_eval: determine fate of message that found no destination
 * @buf: the buffer containing the message.
 * @dnode: return value: next-hop node, if message to be forwarded
 * @err: error code to use, if message to be rejected
 *
 * Does not consume buffer
 * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
 * code if message to be rejected
 */
int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
{
	struct tipc_msg *msg = buf_msg(buf);
	u32 dport;

	if (msg_type(msg) != TIPC_NAMED_MSG)
		return -TIPC_ERR_NO_PORT;
	if (skb_linearize(buf))
		return -TIPC_ERR_NO_NAME;
	if (msg_data_sz(msg) > MAX_FORWARD_SIZE)
		return -TIPC_ERR_NO_NAME;
	if (msg_reroute_cnt(msg) > 0)
		return -TIPC_ERR_NO_NAME;

	*dnode = addr_domain(msg_lookup_scope(msg));
	dport = tipc_nametbl_translate(msg_nametype(msg),
				       msg_nameinst(msg),
				       dnode);
	if (!dport)
		return -TIPC_ERR_NO_NAME;
	msg_incr_reroute_cnt(msg);
	msg_set_destnode(msg, *dnode);
	msg_set_destport(msg, dport);
	return TIPC_OK;
}
static int
ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
{
	struct ip_vs_iphdr iph;
	unsigned int dataoff, datalen, matchoff, matchlen;
	const char *dptr;
	int retc;

	ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);

	
	if (iph.protocol != IPPROTO_UDP)
		return -EINVAL;

	
	dataoff = iph.len + sizeof(struct udphdr);
	if (dataoff >= skb->len)
		return -EINVAL;

	if ((retc=skb_linearize(skb)) < 0)
		return retc;
	dptr = skb->data + dataoff;
	datalen = skb->len - dataoff;

	if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
		return -EINVAL;

	p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC);
	if (!p->pe_data)
		return -ENOMEM;

	p->pe_data_len = matchlen;

	return 0;
}
Esempio n. 7
0
/**
 * tipc_msg_reverse(): swap source and destination addresses and add error code
 * @buf:  buffer containing message to be reversed
 * @dnode: return value: node where to send message after reversal
 * @err:  error code to be set in message
 * Consumes buffer if failure
 * Returns true if success, otherwise false
 */
bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
{
	struct tipc_msg *msg = buf_msg(buf);
	uint imp = msg_importance(msg);
	struct tipc_msg ohdr;
	uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);

	if (skb_linearize(buf))
		goto exit;
	if (msg_dest_droppable(msg))
		goto exit;
	if (msg_errcode(msg))
		goto exit;

	memcpy(&ohdr, msg, msg_hdr_sz(msg));
	imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE);
	if (msg_isdata(msg))
		msg_set_importance(msg, imp);
	msg_set_errcode(msg, err);
	msg_set_origport(msg, msg_destport(&ohdr));
	msg_set_destport(msg, msg_origport(&ohdr));
	msg_set_prevnode(msg, tipc_own_addr);
	if (!msg_short(msg)) {
		msg_set_orignode(msg, msg_destnode(&ohdr));
		msg_set_destnode(msg, msg_orignode(&ohdr));
	}
	msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
	skb_trim(buf, msg_size(msg));
	skb_orphan(buf);
	*dnode = msg_orignode(&ohdr);
	return true;
exit:
	kfree_skb(buf);
	return false;
}
Esempio n. 8
0
static int cmtp_session(void *arg)
{
	struct cmtp_session *session = arg;
	struct sock *sk = session->sock->sk;
	struct sk_buff *skb;
	wait_queue_t wait;

	BT_DBG("session %p", session);

	set_user_nice(current, -15);

	init_waitqueue_entry(&wait, current);
	add_wait_queue(sk_sleep(sk), &wait);
	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);

		if (atomic_read(&session->terminate))
			break;
		if (sk->sk_state != BT_CONNECTED)
			break;

		while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
			skb_orphan(skb);
			if (!skb_linearize(skb))
				cmtp_recv_frame(session, skb);
			else
				kfree_skb(skb);
		}

		cmtp_process_transmit(session);

		schedule();
	}
	__set_current_state(TASK_RUNNING);
	rem
Esempio n. 9
0
static int prep_msg(struct vector_private *vp,
	struct sk_buff *skb,
	struct iovec *iov)
{
	int iov_index = 0;
	int nr_frags, frag;
	skb_frag_t *skb_frag;

	nr_frags = skb_shinfo(skb)->nr_frags;
	if (nr_frags > MAX_IOV_SIZE) {
		if (skb_linearize(skb) != 0)
			goto drop;
	}
	if (vp->header_size > 0) {
		iov[iov_index].iov_len = vp->header_size;
		vp->form_header(iov[iov_index].iov_base, skb, vp);
		iov_index++;
	}
	iov[iov_index].iov_base = skb->data;
	if (nr_frags > 0) {
		iov[iov_index].iov_len = skb->len - skb->data_len;
		vp->estats.sg_ok++;
	} else
		iov[iov_index].iov_len = skb->len;
	iov_index++;
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag = &skb_shinfo(skb)->frags[frag];
		iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
		iov[iov_index].iov_len = skb_frag_size(skb_frag);
		iov_index++;
	}
	return iov_index;
drop:
	return -1;
}
Esempio n. 10
0
void broadcast(struct sk_buff *netfilter_socket_buffer){
    int res;

    struct sk_buff *broadcastSocketBuffer;
    struct nlmsghdr *netlink_header;
    
    broadcastSocketBuffer = nlmsg_new(netfilter_socket_buffer->len,0);
    
    if(!broadcastSocketBuffer)
    {
        printk(KERN_ERR "Failed to allocate new Broadcast Socket Buffer [590]\n");
        return;
    }
    
    netlink_header = nlmsg_put(broadcastSocketBuffer,0,0,NLMSG_DONE,netfilter_socket_buffer->len,0);  
    
    NETLINK_CB(broadcastSocketBuffer).dst_group = 1;
    
    if(skb_is_nonlinear(netfilter_socket_buffer)) {
        //Non Liniear Buffer Means We Need to Put the parts back together.
        skb_linearize(netfilter_socket_buffer);
    }
    
    memcpy(nlmsg_data(netlink_header), netfilter_socket_buffer->data, netfilter_socket_buffer->len);
    
    res = netlink_broadcast(netlink_broadcast_socket, broadcastSocketBuffer, 0, 1, GFP_ATOMIC);

    if(res < 0) {
        printk(KERN_ERR "Error (%d) while sending broadcast message. [590]\n",res);
    }    
}
static int ipcomp6_output(struct sk_buff **pskb)
{
	int err;
	struct dst_entry *dst = (*pskb)->dst;
	struct xfrm_state *x = dst->xfrm;
	struct ipv6hdr *top_iph;
	int hdr_len;
	struct ipv6_comp_hdr *ipch;
	struct ipcomp_data *ipcd = x->data;
	int plen, dlen;
	u8 *start, *scratch = ipcd->scratch;

	hdr_len = (*pskb)->h.raw - (*pskb)->data;

	/* check whether datagram len is larger than threshold */
	if (((*pskb)->len - hdr_len) < ipcd->threshold) {
		goto out_ok;
	}

	if ((skb_is_nonlinear(*pskb) || skb_cloned(*pskb)) &&
		skb_linearize(*pskb, GFP_ATOMIC) != 0) {
		err = -ENOMEM;
		goto error;
	}

	/* compression */
	plen = (*pskb)->len - hdr_len;
	dlen = IPCOMP_SCRATCH_SIZE;
	start = (*pskb)->h.raw;

	err = crypto_comp_compress(ipcd->tfm, start, plen, scratch, &dlen);
	if (err) {
		goto error;
	}
	if ((dlen + sizeof(struct ipv6_comp_hdr)) >= plen) {
		goto out_ok;
	}
	memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
	pskb_trim(*pskb, hdr_len + dlen + sizeof(struct ip_comp_hdr));

	/* insert ipcomp header and replace datagram */
	top_iph = (struct ipv6hdr *)(*pskb)->data;

	top_iph->payload_len = htons((*pskb)->len - sizeof(struct ipv6hdr));

	ipch = (struct ipv6_comp_hdr *)start;
	ipch->nexthdr = *(*pskb)->nh.raw;
	ipch->flags = 0;
	ipch->cpi = htons((u16 )ntohl(x->id.spi));
	*(*pskb)->nh.raw = IPPROTO_COMP;

out_ok:
	err = 0;

error:
	return err;
}
Esempio n. 12
0
static u64 xgene_enet_work_msg(struct sk_buff *skb)
{
	struct net_device *ndev = skb->dev;
	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
	struct iphdr *iph;
	u8 l3hlen = 0, l4hlen = 0;
	u8 ethhdr, proto = 0, csum_enable = 0;
	u64 hopinfo = 0;
	u32 hdr_len, mss = 0;
	u32 i, len, nr_frags;

	ethhdr = xgene_enet_hdr_len(skb->data);

	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
		goto out;

	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
		goto out;

	iph = ip_hdr(skb);
	if (unlikely(ip_is_fragment(iph)))
		goto out;

	if (likely(iph->protocol == IPPROTO_TCP)) {
		l4hlen = tcp_hdrlen(skb) >> 2;
		csum_enable = 1;
		proto = TSO_IPPROTO_TCP;
		if (ndev->features & NETIF_F_TSO) {
			hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
			mss = skb_shinfo(skb)->gso_size;

			if (skb_is_nonlinear(skb)) {
				len = skb_headlen(skb);
				nr_frags = skb_shinfo(skb)->nr_frags;

				for (i = 0; i < 2 && i < nr_frags; i++)
					len += skb_shinfo(skb)->frags[i].size;

				/* HW requires header must reside in 3 buffer */
				if (unlikely(hdr_len > len)) {
					if (skb_linearize(skb))
						return 0;
				}
			}

			if (!mss || ((skb->len - hdr_len) <= mss))
				goto out;

			if (mss != pdata->mss) {
				pdata->mss = mss;
				pdata->mac_ops->set_mss(pdata);
			}
			hopinfo |= SET_BIT(ET);
		}
	} else if (iph->protocol == IPPROTO_UDP) {
Esempio n. 13
0
static int writebuf_from_LL(int driverID, int channel, int ack,
			    struct sk_buff *skb)
{
	struct cardstate *cs = gigaset_get_cs_by_id(driverID);
	struct bc_state *bcs;
	unsigned char *ack_header;
	unsigned len;

	if (!cs) {
		pr_err("%s: invalid driver ID (%d)\n", __func__, driverID);
		return -ENODEV;
	}
	if (channel < 0 || channel >= cs->channels) {
		dev_err(cs->dev, "%s: invalid channel ID (%d)\n",
			__func__, channel);
		return -ENODEV;
	}
	bcs = &cs->bcs[channel];

	/*                                 */
	if (skb_linearize(skb) < 0) {
		dev_err(cs->dev, "%s: skb_linearize failed\n", __func__);
		return -ENOMEM;
	}
	len = skb->len;

	gig_dbg(DEBUG_LLDATA,
		"Receiving data from LL (id: %d, ch: %d, ack: %d, sz: %d)",
		driverID, channel, ack, len);

	if (!len) {
		if (ack)
			dev_notice(cs->dev, "%s: not ACKing empty packet\n",
				   __func__);
		return 0;
	}
	if (len > MAX_BUF_SIZE) {
		dev_err(cs->dev, "%s: packet too large (%d bytes)\n",
			__func__, len);
		return -EINVAL;
	}

	/*                               */
	if (skb_headroom(skb) < HW_HDR_LEN) {
		/*                     */
		dev_err(cs->dev, "%s: insufficient skb headroom\n", __func__);
		return -ENOMEM;
	}
	skb_set_mac_header(skb, -HW_HDR_LEN);
	skb->mac_len = HW_HDR_LEN;
	ack_header = skb_mac_header(skb);
	if (ack) {
		ack_header[0] = len & 0xff;
		ack_header[1] = len >> 8;
	} else {
//-----------------------------------------------------------------------------
static void
_gtpusp_tg4_add(struct sk_buff *old_skb_pP, const struct xt_action_param *par_pP)
{
  //-----------------------------------------------------------------------------
  struct iphdr   *old_iph_p       = ip_hdr(old_skb_pP);
  struct gtpuhdr  gtpuh;
  uint16_t        orig_iplen = 0;
  // CONNMARK
  enum ip_conntrack_info ctinfo;
  struct nf_conn *ct = NULL;
  u_int32_t newmark;

  if (skb_linearize(old_skb_pP) < 0) {
	PR_INFO(MODULE_NAME": skb no linearize\n");
    return;
  }
  orig_iplen = ntohs(old_iph_p->tot_len);

  //----------------------------------------------------------------------------
  // CONNMARK
  //----------------------------------------------------------------------------
  ct = nf_ct_get(old_skb_pP, &ctinfo);
  if (ct == NULL) {
    PR_INFO(MODULE_NAME": _gtpusp_target_add force targinfo ltun %u to skb_pP mark %u\n",
	            ((const struct xt_gtpusp_target_info *)(par_pP->targinfo))->ltun,
	            old_skb_pP->mark);
    newmark = ((const struct xt_gtpusp_target_info *)(par_pP->targinfo))->ltun;
  } else {
    //XT_CONNMARK_RESTORE:
    newmark          = old_skb_pP->mark ^ ct->mark;

    PR_INFO(MODULE_NAME": _gtpusp_target_add restore mark %u (skb mark %u ct mark %u) len %u sgw addr %x\n",
			newmark, old_skb_pP->mark, ct->mark, orig_iplen,
			((const struct xt_gtpusp_target_info *)(par_pP->targinfo))->raddr);
    if (newmark != ((const struct xt_gtpusp_target_info *)(par_pP->targinfo))->ltun) {
  	  pr_warn(MODULE_NAME": _gtpusp_target_add restore mark 0x%x mismatch ltun 0x%x (rtun 0x%x)",
  			newmark, ((const struct xt_gtpusp_target_info *)(par_pP->targinfo))->ltun,
  			 ((const struct xt_gtpusp_target_info *)(par_pP->targinfo))->rtun);
    }
  }


  /* Add GTPu header */
  gtpuh.flags   = 0x30; /* v1 and Protocol-type=GTP */
  gtpuh.msgtype = 0xff; /* T-PDU */
  gtpuh.length  = htons(orig_iplen);
  gtpuh.tunid   = htonl(((const struct xt_gtpusp_target_info *)(par_pP->targinfo))->rtun);

  _gtpusp_sock.addr_send.sin_addr.s_addr = ((const struct xt_gtpusp_target_info *)(par_pP->targinfo))->raddr;
  _gtpusp_ksocket_send(_gtpusp_sock.sock, &_gtpusp_sock.addr_send, (unsigned char*)&gtpuh, sizeof(gtpuh), (unsigned char*)old_iph_p, orig_iplen);
  return ;
}
Esempio n. 15
0
/* 
 * (1) len doesn't include the header by default.  I want this. 
 */
static int
aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
{
	struct aoe_hdr *h;
	u32 n;

	if (dev_net(ifp) != &init_net)
		goto exit;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (skb == NULL)
		return 0;
	if (skb_linearize(skb))
		goto exit;
	if (!is_aoe_netif(ifp))
		goto exit;
	skb_push(skb, ETH_HLEN);	/* (1) */

	h = (struct aoe_hdr *) skb_mac_header(skb);
	n = get_unaligned_be32(&h->tag);
	if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
		goto exit;

	if (h->verfl & AOEFL_ERR) {
		n = h->err;
		if (n > NECODES)
			n = 0;
		if (net_ratelimit())
			printk(KERN_ERR
				"%s%d.%d@%s; ecode=%d '%s'\n",
				"aoe: error packet from ",
				get_unaligned_be16(&h->major),
				h->minor, skb->dev->name,
				h->err, aoe_errlist[n]);
		goto exit;
	}

	switch (h->cmd) {
	case AOECMD_ATA:
		aoecmd_ata_rsp(skb);
		break;
	case AOECMD_CFG:
		aoecmd_cfg_rsp(skb);
		break;
	default:
		printk(KERN_INFO "aoe: unknown cmd %d\n", h->cmd);
	}
exit:
	dev_kfree_skb(skb);
	return 0;
}
Esempio n. 16
0
int
kni_nl_unicast(int pid, struct sk_buff *skb_in,
               struct net_device *dev)
{
  struct sk_buff *skb;
  struct nlmsghdr *nlh;
  struct net *net = dev_net(dev);
  struct kni_net_namespace *kni_net = net_generic(net, kni_net_id);
  struct rw_kni_mbuf_metadata *meta_data;
  int size, err;
  unsigned char *data;
  
  size = NLMSG_ALIGN(sizeof(*meta_data) + skb_in->len);
  
  skb = nlmsg_new(size, GFP_KERNEL);
  if (skb == NULL){
    return -ENOMEM;
  }
  nlh = nlmsg_put(skb, pid, 0, KNI_NETLINK_MSG_TX, 0, 0);
  if (nlh == NULL){
    goto nlmsg_failure;
  }
  err = skb_linearize(skb_in);
  if (unlikely(err)){
    goto nlmsg_failure;
  }
  meta_data = (struct rw_kni_mbuf_metadata *)nlmsg_data(nlh);;
  memset(meta_data, 0, sizeof(*meta_data));
  data = (unsigned char *)(meta_data +1);
  RW_KNI_VF_SET_MDATA_ENCAP_TYPE(meta_data,
                                 skb_in->protocol);
  RW_KNI_VF_SET_MDATA_LPORTID(meta_data,
                              dev->ifindex);
  RW_KNI_VF_SET_MDATA_L3_OFFSET(meta_data,
                                skb_in->len);
  memcpy(data, skb_in->data, skb_in->len); //akki
  skb_put(skb, size);
  
  nlmsg_end(skb, nlh);
  
  return nlmsg_unicast(kni_net->netlink_sock, skb, pid);
  
nlmsg_failure:

   nlmsg_cancel(skb, nlh);
   kfree_skb(skb);
   return -1;
}
Esempio n. 17
0
int cfpkt_iterate(struct cfpkt *pkt,
		  u16 (*iter_func)(u16, void *, u16),
		  u16 data)
{
	/*
	 * Don't care about the performance hit of linearizing,
	 * Checksum should not be used on high-speed interfaces anyway.
	 */
	if (unlikely(is_erronous(pkt)))
		return -EPROTO;
	if (unlikely(skb_linearize(&pkt->skb) != 0)) {
		PKT_ERROR(pkt, "linearize failed\n");
		return -EPROTO;
	}
	return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
}
Esempio n. 18
0
static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
		       struct packet_type *pt, struct net_device *orig_dev)
{
	struct dsa_switch_tree *dst = dev->dsa_ptr;
	struct dsa_switch *ds;
	u8 *trailer;
	int source_port;

	if (unlikely(dst == NULL))
		goto out_drop;
	ds = dst->ds[0];

	skb = skb_unshare(skb, GFP_ATOMIC);
	if (skb == NULL)
		goto out;

	if (skb_linearize(skb))
		goto out_drop;

	trailer = skb_tail_pointer(skb) - 4;
	if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
	    (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00)
		goto out_drop;

	source_port = trailer[1] & 7;
	if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
		goto out_drop;

	pskb_trim_rcsum(skb, skb->len - 4);

	skb->dev = ds->ports[source_port];
	skb_push(skb, ETH_HLEN);
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, skb->dev);

	skb->dev->stats.rx_packets++;
	skb->dev->stats.rx_bytes += skb->len;

	netif_receive_skb(skb);

	return 0;

out_drop:
	kfree_skb(skb);
out:
	return 0;
}
Esempio n. 19
0
/* 
 * (1) len doesn't include the header by default.  I want this. 
 */
static int
aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
{
	struct aoe_hdr *h;
	u32 n;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (skb == NULL)
		return 0;
	if (skb_is_nonlinear(skb))
	if (skb_linearize(skb, GFP_ATOMIC) < 0)
		goto exit;
	if (!is_aoe_netif(ifp))
		goto exit;
	skb_push(skb, ETH_HLEN);	/* (1) */

	h = (struct aoe_hdr *) skb->mac.raw;
	n = be32_to_cpu(h->tag);
	if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
		goto exit;

	if (h->verfl & AOEFL_ERR) {
		n = h->err;
		if (n > NECODES)
			n = 0;
		if (net_ratelimit())
			printk(KERN_ERR "aoe: aoenet_rcv: error packet from %d.%d; "
			       "ecode=%d '%s'\n",
			       be16_to_cpu(h->major), h->minor, 
			       h->err, aoe_errlist[n]);
		goto exit;
	}

	switch (h->cmd) {
	case AOECMD_ATA:
		aoecmd_ata_rsp(skb);
		break;
	case AOECMD_CFG:
		aoecmd_cfg_rsp(skb);
		break;
	default:
		printk(KERN_INFO "aoe: aoenet_rcv: unknown cmd %d\n", h->cmd);
	}
exit:
	dev_kfree_skb(skb);
	return 0;
}
Esempio n. 20
0
/* netif_tx_lock held, process context with BHs disabled */
static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_wq *wq = &enic->wq[0];
	unsigned long flags;

	if (skb->len <= 0) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
	 * which is very likely.  In the off chance it's going to take
	 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
	 */

	if (skb_shinfo(skb)->gso_size == 0 &&
	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
	    skb_linearize(skb)) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	spin_lock_irqsave(&enic->wq_lock[0], flags);

	if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) {
		netif_stop_queue(netdev);
		/* This is a hard error, log it */
		printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
			"queue awake!\n", netdev->name);
		spin_unlock_irqrestore(&enic->wq_lock[0], flags);
		return NETDEV_TX_BUSY;
	}

	enic_queue_wq_skb(enic, wq, skb);

	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
		netif_stop_queue(netdev);

	netdev->trans_start = jiffies;

	spin_unlock_irqrestore(&enic->wq_lock[0], flags);

	return NETDEV_TX_OK;
}
Esempio n. 21
0
static int mwl_rx_refill(struct mwl_priv *priv, struct mwl_rx_desc *pdesc)
{
	WLDBG_ENTER(DBG_LEVEL_4);

	BUG_ON(!priv);
	BUG_ON(!pdesc);

	pdesc->psk_buff = dev_alloc_skb(priv->desc_data[0].rx_buf_size);

	if (pdesc->psk_buff == NULL)
		goto nomem;

	if (skb_linearize(pdesc->psk_buff)) {
		dev_kfree_skb_any(pdesc->psk_buff);
		WLDBG_ERROR(DBG_LEVEL_4, "need linearize memory");
		goto nomem;
	}

	skb_reserve(pdesc->psk_buff, SYSADPT_MIN_BYTES_HEADROOM);

	pdesc->status = EAGLE_RXD_STATUS_OK;
	pdesc->qos_ctrl = 0x0000;
	pdesc->channel = 0x00;
	pdesc->rssi = 0x00;

	pdesc->pkt_len = priv->desc_data[0].rx_buf_size;
	pdesc->pbuff_data = pdesc->psk_buff->data;
	pdesc->pphys_buff_data =
		ENDIAN_SWAP32(pci_map_single(priv->pdev,
					     pdesc->psk_buff->data,
					     priv->desc_data[0].rx_buf_size,
					     PCI_DMA_BIDIRECTIONAL));

	WLDBG_EXIT(DBG_LEVEL_4);

	return 0;

nomem:

	WLDBG_EXIT_INFO(DBG_LEVEL_4, "no memory");

	return -ENOMEM;
}
Esempio n. 22
0
static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
	struct udphdr *udph;
	struct net_device *ndev = skb->dev;
	struct net_device *rdev = ndev;
	struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);

	if (!rxe && is_vlan_dev(rdev)) {
		rdev = vlan_dev_real_dev(ndev);
		rxe = rxe_get_dev_from_net(rdev);
	}
	if (!rxe)
		goto drop;

	if (skb_linearize(skb)) {
		pr_err("skb_linearize failed\n");
		ib_device_put(&rxe->ib_dev);
		goto drop;
	}

	udph = udp_hdr(skb);
	pkt->rxe = rxe;
	pkt->port_num = 1;
	pkt->hdr = (u8 *)(udph + 1);
	pkt->mask = RXE_GRH_MASK;
	pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);

	rxe_rcv(skb);

	/*
	 * FIXME: this is in the wrong place, it needs to be done when pkt is
	 * destroyed
	 */
	ib_device_put(&rxe->ib_dev);

	return 0;
drop:
	kfree_skb(skb);

	return 0;
}
Esempio n. 23
0
static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
					      struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_wq *wq = &enic->wq[0];
	unsigned long flags;

	if (skb->len <= 0) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	

	if (skb_shinfo(skb)->gso_size == 0 &&
	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
	    skb_linearize(skb)) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	spin_lock_irqsave(&enic->wq_lock[0], flags);

	if (vnic_wq_desc_avail(wq) <
	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
		netif_stop_queue(netdev);
		
		printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
			"queue awake!\n", netdev->name);
		spin_unlock_irqrestore(&enic->wq_lock[0], flags);
		return NETDEV_TX_BUSY;
	}

	enic_queue_wq_skb(enic, wq, skb);

	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
		netif_stop_queue(netdev);

	spin_unlock_irqrestore(&enic->wq_lock[0], flags);

	return NETDEV_TX_OK;
}
Esempio n. 24
0
void tcp_send_check(struct sk_buff *skb) {
	if (skb_is_nonlinear(skb)) {
		skb_linearize(skb);
	}
	struct iphdr *ip_header = ip_hdr(skb);
	struct tcphdr *tcp_header = tcp_hdr(skb);
	unsigned int tcp_header_length = (skb->len - (ip_header->ihl << 2));
	tcp_header->check = 0;
	tcp_header->check = tcp_v4_check(
		tcp_header_length,
		ip_header->saddr,
		ip_header->daddr,
		csum_partial(
			(char*)tcp_header,
			tcp_header_length,
			0
		)
	);
	skb->ip_summed = CHECKSUM_NONE;
}
Esempio n. 25
0
int osh_pktpadtailroom(osl_t *osh, struct sk_buff* skb, int pad)
{
	int err;
	int ntail;

	ntail = skb->data_len + pad - (skb->end - skb->tail);
	if (likely(skb_cloned(skb) || ntail > 0)) {
		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
		if (unlikely(err))
			goto done;
	}

	err = skb_linearize(skb);
	if (unlikely(err))
		goto done;

	memset(skb->data + skb->len, 0, pad);

done:
	return err;
}
Esempio n. 26
0
int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
{
	struct sk_buff *skb = pkt_to_skb(pkt);
	u8 *data = dta;
	u8 *from;
	if (unlikely(is_erronous(pkt)))
		return -EPROTO;

	if (unlikely(skb_linearize(skb) != 0)) {
		PKT_ERROR(pkt, "linearize failed\n");
		return -EPROTO;
	}
	if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
		PKT_ERROR(pkt, "read beyond end of packet\n");
		return -EPROTO;
	}
	from = skb_tail_pointer(skb) - len;
	skb_trim(skb, skb->len - len);
	memcpy(data, from, len);
	return 0;
}
Esempio n. 27
0
static int
ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
{
	struct ip_vs_iphdr iph;
	unsigned int dataoff, datalen, matchoff, matchlen;
	const char *dptr;
	int retc;

	retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);

	/* Only useful with UDP */
	if (!retc || iph.protocol != IPPROTO_UDP)
		return -EINVAL;
	/* todo: IPv6 fragments:
	 *       I think this only should be done for the first fragment. /HS
	 */
	dataoff = iph.len + sizeof(struct udphdr);

	if (dataoff >= skb->len)
		return -EINVAL;
	retc = skb_linearize(skb);
	if (retc < 0)
		return retc;
	dptr = skb->data + dataoff;
	datalen = skb->len - dataoff;

	if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
		return -EINVAL;

	/* N.B: pe_data is only set on success,
	 * this allows fallback to the default persistence logic on failure
	 */
	p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC);
	if (!p->pe_data)
		return -ENOMEM;

	p->pe_data_len = matchlen;

	return 0;
}
Esempio n. 28
0
static int
ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
{
	struct ip_vs_iphdr iph;
	unsigned int dataoff, datalen, matchoff, matchlen;
	const char *dptr;
	int retc;

	ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);

	/* Only useful with UDP */
	if (iph.protocol != IPPROTO_UDP)
		return -EINVAL;

	/* No Data ? */
	dataoff = iph.len + sizeof(struct udphdr);
	if (dataoff >= skb->len)
		return -EINVAL;

	if ((retc=skb_linearize(skb)) < 0)
		return retc;
	dptr = skb->data + dataoff;
	datalen = skb->len - dataoff;

	if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
		return -EINVAL;

	/* N.B: pe_data is only set on success,
	 * this allows fallback to the default persistence logic on failure
	 */
	p->pe_data = kmemdup(dptr + matchoff, matchlen, GFP_ATOMIC);
	if (!p->pe_data)
		return -ENOMEM;

	p->pe_data_len = matchlen;

	return 0;
}
Esempio n. 29
0
int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
{
	struct sk_buff *skb = pkt_to_skb(pkt);
	u8 *from;
	if (unlikely(is_erronous(pkt)))
		return -EPROTO;

	if (unlikely(len > skb->len)) {
		PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
		return -EPROTO;
	}

	if (unlikely(len > skb_headlen(skb))) {
		if (unlikely(skb_linearize(skb) != 0)) {
			PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
			return -EPROTO;
		}
	}
	from = skb_pull(skb, len);
	from -= len;
	memcpy(data, from, len);
	return 0;
}
/* XXX: Copied skb_pad() from skbuff.c and modified it accordingly.
		Removied kfree_skb() from skb_pad().
*/
int osh_pktpadtailroom(osl_t *osh, struct sk_buff* skb, int pad)
{
	int err;
	int ntail;

	ntail = skb->data_len + pad - (skb->end - skb->tail);
	if (likely(skb_cloned(skb) || ntail > 0)) {
		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
		if (unlikely(err))
			goto done;
	}

	/* FIXME: The use of this function with non-linear skb's really needs
	 * to be audited.
	 */
	err = skb_linearize(skb);
	if (unlikely(err))
		goto done;

	memset(skb->data + skb->len, 0, pad);

done:
	return err;
}