Пример #1
0
/* Notify the driver that BM pool is being used as specific type and return the
 * pool pointer on success
 */
struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
					  enum mvneta_bm_type type, u8 port_id,
					  int pkt_size)
{
	struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
	int num, err;

	if (new_pool->type == MVNETA_BM_LONG &&
	    new_pool->port_map != 1 << port_id) {
		dev_err(&priv->pdev->dev,
			"long pool cannot be shared by the ports\n");
		return NULL;
	}

	if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
		dev_err(&priv->pdev->dev,
			"mixing pools' types between the ports is forbidden\n");
		return NULL;
	}

	if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
		new_pool->pkt_size = pkt_size;

	/* Allocate buffers in case BM pool hasn't been used yet */
	if (new_pool->type == MVNETA_BM_FREE) {
		struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;

		new_pool->priv = priv;
		new_pool->type = type;
		new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
		hwbm_pool->frag_size =
			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
		hwbm_pool->construct = mvneta_bm_construct;
		hwbm_pool->priv = new_pool;
		spin_lock_init(&hwbm_pool->lock);

		/* Create new pool */
		err = mvneta_bm_pool_create(priv, new_pool);
		if (err) {
			dev_err(&priv->pdev->dev, "fail to create pool %d\n",
				new_pool->id);
			return NULL;
		}

		/* Allocate buffers for this pool */
		num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
		if (num != hwbm_pool->size) {
			WARN(1, "pool %d: %d of %d allocated\n",
			     new_pool->id, num, hwbm_pool->size);
			return NULL;
		}
	}

	return new_pool;
}
Пример #2
0
/**
 * send_msg - send a TIPC message out over an InfiniBand interface
 */
static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
                    struct tipc_media_addr *dest)
{
    struct sk_buff *clone;
    struct net_device *dev;
    int delta;

    clone = skb_clone(buf, GFP_ATOMIC);
    if (!clone)
        return 0;

    dev = ((struct ib_bearer *)(tb_ptr->usr_handle))->dev;
    delta = dev->hard_header_len - skb_headroom(buf);

    if ((delta > 0) &&
            pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
        kfree_skb(clone);
        return 0;
    }

    skb_reset_network_header(clone);
    clone->dev = dev;
    clone->protocol = htons(ETH_P_TIPC);
    dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
                    dev->dev_addr, clone->len);
    dev_queue_xmit(clone);
    return 0;
}
Пример #3
0
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel_info *tun_info;
	const struct ip_tunnel_key *key;
	struct flowi4 fl;
	struct rtable *rt;
	int min_headroom;
	int tunnel_hlen;
	__be16 df, flags;
	int err;

	tun_info = skb_tunnel_info(skb);
	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
		     ip_tunnel_info_af(tun_info) != AF_INET))
		goto err_free_skb;

	key = &tun_info->key;
	rt = gre_get_rt(skb, dev, &fl, key);
	if (IS_ERR(rt))
		goto err_free_skb;

	tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);

	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ tunnel_hlen + sizeof(struct iphdr);
	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
		int head_delta = SKB_DATA_ALIGN(min_headroom -
						skb_headroom(skb) +
						16);
		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
				       0, GFP_ATOMIC);
		if (unlikely(err))
			goto err_free_rt;
	}
Пример #4
0
/**
 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
 * @buf: the packet to be sent
 * @b_ptr: the bearer through which the packet is to be sent
 * @dest: peer destination address
 */
int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
		     struct tipc_media_addr *dest)
{
	struct sk_buff *clone;
	struct net_device *dev;
	int delta;

	dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
	if (!dev)
		return 0;

	clone = skb_clone(buf, GFP_ATOMIC);
	if (!clone)
		return 0;

	delta = dev->hard_header_len - skb_headroom(buf);
	if ((delta > 0) &&
	    pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
		kfree_skb(clone);
		return 0;
	}

	skb_reset_network_header(clone);
	clone->dev = dev;
	clone->protocol = htons(ETH_P_TIPC);
	dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
			dev->dev_addr, clone->len);
	dev_queue_xmit(clone);
	return 0;
}
Пример #5
0
/**
 *	skb_recycle_check - check if skb can be reused for receive
 *	@skb: buffer
 *	@skb_size: minimum receive buffer size
 *
 *	Checks that the skb passed in is not shared or cloned, and
 *	that it is linear and its head portion at least as large as
 *	skb_size so that it can be recycled as a receive buffer.
 *	If these conditions are met, this function does any necessary
 *	reference count dropping and cleans up the skbuff as if it
 *	just came from __alloc_skb().
 */
bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
	struct skb_shared_info *shinfo;

	//	if (irqs_disabled())
	//		return false;

	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
		return false;

	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
	if (skb_end_pointer(skb) - skb->head < skb_size)
		return false;

	if (skb_shared(skb) || skb_cloned(skb))
		return false;

	skb_release_head_state(skb);

	shinfo = skb_shinfo(skb);
	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
	atomic_set(&shinfo->dataref, 1);

	memset(skb, 0, offsetof(struct sk_buff, tail));
	skb->data = skb->head + NET_SKB_PAD;
	skb_reset_tail_pointer(skb);

	return true;
} EXPORT_SYMBOL(skb_recycle_check);
Пример #6
0
static int rtnet_mgr_read_proc (char *page, char **start,
                off_t off, int count, int *eof, void *data)
{
    PROC_PRINT_VARS;
    int i;
    struct rtnet_device *rtdev;
    unsigned int rtskb_len;

    PROC_PRINT("\nRTnet\n\n");
    PROC_PRINT("Devices:\n");
    for (i = 1; i <= MAX_RT_DEVICES; i++) {
        rtdev = rtdev_get_by_index(i);
        if (rtdev != NULL) {
            PROC_PRINT("  %s: %s rxq=%d\n",
                rtdev->name,
                (rtdev->flags & IFF_UP) ? "UP" : "DOWN",
                rtdev->rxqueue_len);
            rtdev_dereference(rtdev);
        }
    }

    rtskb_len = ALIGN_RTSKB_STRUCT_LEN + SKB_DATA_ALIGN(RTSKB_SIZE);
    PROC_PRINT("\nrtskb pools current/max:       %d / %d\n"
               "rtskbs current/max:            %d / %d\n"
               "rtskb memory need current/max: %d / %d\n\n",
               rtskb_pools, rtskb_pools_max,
               rtskb_amount, rtskb_amount_max,
               rtskb_amount * rtskb_len, rtskb_amount_max * rtskb_len);

    PROC_PRINT_DONE;
}
Пример #7
0
/***
 *	new_rtskb		-	allocate an new rtskb-Buffer
 *	return:	buffer
 */
struct rtskb *new_rtskb(void)
{
	struct rtskb *skb;
	unsigned int len = SKB_DATA_ALIGN(rtskb_max_size);

	if ( !(skb = kmem_cache_alloc(rtskb_cache, GFP_ATOMIC)) ) {
		printk("RTnet: allocate rtskb failed.\n");
		return NULL;
	}
	memset(skb, 0, sizeof(struct rtskb));

	if ( !(skb->buf_start = kmem_cache_alloc(rtskb_data_cache, GFP_ATOMIC)) ) {
		printk("RTnet: allocate rtskb->buf_ptr failed.\n");
		kmem_cache_free(rtskb_cache, skb);
		return NULL;
	}

	memset(skb->buf_start, 0, len);
	skb->buf_len = len;
	skb->buf_end = skb->buf_start+len-1;

	rtskb_amount++;
	if (rtskb_amount_max < rtskb_amount) {
		rtskb_amount_max  = rtskb_amount;
	}
	return skb;
}
Пример #8
0
static int __send(struct vport *vport, struct sk_buff *skb,
		  int tunnel_hlen,
		  __be32 seq, __be16 gre64_flag)
{
	struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
	struct rtable *rt;
	int min_headroom;
	__be16 df;
	__be32 saddr;
	int err;

	/* Route lookup */
	saddr = tun_key->ipv4_src;
	rt = find_route(ovs_dp_get_net(vport->dp),
			&saddr, tun_key->ipv4_dst,
			IPPROTO_GRE, tun_key->ipv4_tos,
			skb->mark);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		goto error;
	}

	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
			+ tunnel_hlen + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);

	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
		int head_delta = SKB_DATA_ALIGN(min_headroom -
						skb_headroom(skb) +
						16);
		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
					0, GFP_ATOMIC);
		if (unlikely(err))
			goto err_free_rt;
	}
Пример #9
0
/**
 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
 * @skb: the packet to be sent
 * @b: the bearer through which the packet is to be sent
 * @dest: peer destination address
 */
int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
		     struct tipc_bearer *b, struct tipc_media_addr *dest)
{
	struct net_device *dev;
	int delta;
	void *tipc_ptr;

	dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
	if (!dev)
		return 0;

	/* Send RESET message even if bearer is detached from device */
	tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
	if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
		goto drop;

	delta = dev->hard_header_len - skb_headroom(skb);
	if ((delta > 0) &&
	    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
		goto drop;

	skb_reset_network_header(skb);
	skb->dev = dev;
	skb->protocol = htons(ETH_P_TIPC);
	dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
			dev->dev_addr, skb->len);
	dev_queue_xmit(skb);
	return 0;
drop:
	kfree_skb(skb);
	return 0;
}
Пример #10
0
struct sk_buff *__alloc_skb(unsigned int length, int gfp_mask,
			    int fclone)
{
	int order, i;
	kmem_cache_t *cachep;

	length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);

	if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
		for (i = 0; skbuff_small[i].size < length; i++)
			continue;
		cachep = skbuff_small[i].cachep;
	} else {
		order = get_order(length);
		if (order > MAX_SKBUFF_ORDER) {
			printk(KERN_ALERT "Attempt to allocate order %d "
			       "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
			return NULL;
		}
		cachep = skbuff_order_cachep[order];
	}

	length -= sizeof(struct skb_shared_info);

	return alloc_skb_from_cache(cachep, length, gfp_mask, fclone);
}
struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
{
	struct sk_buff *skb;
	u8 *data;

	if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
		static int count = 0;
		if (++count < 5) {
			printk(KERN_ERR "alloc_skb called nonatomically "
			       "from interrupt %p\n", NET_CALLER(size));
 			BUG();
		}
		gfp_mask &= ~__GFP_WAIT;
	}

	/* Get the HEAD */
	skb = skb_head_from_pool();
	if (skb == NULL) {
		skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask & ~__GFP_DMA);
		if (skb == NULL)
			goto nohead;
	}

	/* reserve some place at the tailroom for HW switches that use it for
	 * trailers with switch related information, e.g. port_map, etc. */
	if (size < (2048 - sizeof(struct skb_shared_info) - 64))
	    size += 64;
	/* Get the DATA. Size must match skb_add_mtu(). */
	size = SKB_DATA_ALIGN(size);
	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask | GFP_DMA);
	if (data == NULL)
		goto nodata;

	/* XXX: does not include slab overhead */ 
	skb->truesize = size + sizeof(struct sk_buff);

	/* Load the data pointers. */
	skb->head = data;
	skb->data = data;
	skb->tail = data;
	skb->end = data + size;

	/* Set up other state */
	skb->priority = 0;
	skb->len = 0;
	skb->cloned = 0;
	skb->data_len = 0;

	atomic_set(&skb->users, 1); 
	atomic_set(&(skb_shinfo(skb)->dataref), 1);
	skb_shinfo(skb)->nr_frags = 0;
	skb_shinfo(skb)->frag_list = NULL;
	return skb;

nodata:
	skb_head_to_pool(skb);
nohead:
	return NULL;
}
Пример #12
0
/**
 *	__alloc_skb	-	allocate a network buffer
 *	@size: size to allocate
 *	@gfp_mask: allocation mask
 *	@fclone: allocate from fclone cache instead of head cache
 *		and allocate a cloned (child) skb
 *	@node: numa node to allocate memory on
 *
 *	Allocate a new &sk_buff. The returned buffer has no headroom and a
 *	tail room of size bytes. The object has a reference count of one.
 *	The return is the buffer. On a failure the return is %NULL.
 *
 *	Buffers may only be allocated from interrupts using a @gfp_mask of
 *	%GFP_ATOMIC.
 */
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
			    int fclone, int node)
{
	struct kmem_cache *cache;
	struct skb_shared_info *shinfo;
	struct sk_buff *skb;
	u8 *data;

	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;

	/* Get the HEAD */
	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
	if (!skb)
		goto out;

	/* Get the DATA. Size must match skb_add_mtu(). */
	size = SKB_DATA_ALIGN(size);
	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
			gfp_mask, node);
	if (!data)
		goto nodata;

	memset(skb, 0, offsetof(struct sk_buff, truesize));
	skb->truesize = size + sizeof(struct sk_buff);
	atomic_set(&skb->users, 1);
	skb->head = data;
	skb->data = data;
	skb->tail = data;
	skb->end  = data + size;
	/* make sure we initialize shinfo sequentially */
	shinfo = skb_shinfo(skb);
	atomic_set(&shinfo->dataref, 1);
	shinfo->nr_frags  = 0;
	shinfo->gso_size = 0;
	shinfo->gso_segs = 0;
	shinfo->gso_type = 0;
	shinfo->ip6_frag_id = 0;
	shinfo->frag_list = NULL;

	if (fclone) {
		struct sk_buff *child = skb + 1;
		atomic_t *fclone_ref = (atomic_t *) (child + 1);

		skb->fclone = SKB_FCLONE_ORIG;
		atomic_set(fclone_ref, 1);

		child->fclone = SKB_FCLONE_UNAVAILABLE;
	}
out:
	return skb;
nodata:
	kmem_cache_free(cache, skb);
	skb = NULL;
	goto out;
}
Пример #13
0
static int mlx5e_create_rq(struct mlx5e_channel *c,
			   struct mlx5e_rq_param *param,
			   struct mlx5e_rq *rq)
{
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;
	void *rqc = param->rqc;
	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
	int wq_sz;
	int err;
	int i;

	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
				&rq->wq_ctrl);
	if (err)
		return err;

	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];

	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
	rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
			       cpu_to_node(c->cpu));
	if (!rq->skb) {
		err = -ENOMEM;
		goto err_rq_wq_destroy;
	}

	rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
					     MLX5E_SW2HW_MTU(priv->netdev->mtu);
	rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);

	for (i = 0; i < wq_sz; i++) {
		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
		u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;

		wqe->data.lkey       = c->mkey_be;
		wqe->data.byte_count =
			cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
	}

	rq->pdev    = c->pdev;
	rq->netdev  = c->netdev;
	rq->channel = c;
	rq->ix      = c->ix;

	return 0;

err_rq_wq_destroy:
	mlx5_wq_destroy(&rq->wq_ctrl);

	return err;
}
Пример #14
0
static int rtnet_rtskb_show(struct xnvfile_regular_iterator *it, void *data)
{
    unsigned int rtskb_len;

    rtskb_len = ALIGN_RTSKB_STRUCT_LEN + SKB_DATA_ALIGN(RTSKB_SIZE);

    xnvfile_printf(it, "Statistics\t\tCurrent\tMaximum\n"
                   "rtskb pools\t\t%d\t%d\n"
                   "rtskbs\t\t\t%d\t%d\n"
                   "rtskb memory need\t%d\t%d\n",
                   rtskb_pools, rtskb_pools_max,
                   rtskb_amount, rtskb_amount_max,
                   rtskb_amount * rtskb_len, rtskb_amount_max * rtskb_len);
    return 0;
}
Пример #15
0
static char *
netlink_trans_alloc(unsigned int size)
{
    struct sk_buff *skb;
    int len;

    len = NETLINK_RESPONSE_HEADER_LEN;
    len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

    skb = alloc_skb(size + len, GFP_ATOMIC);
    if (!skb)
        return NULL;

    /* Store the skb address at the beginning of the skb itself */
    *(struct sk_buff **)(skb->data) = skb;
    return skb->data + NETLINK_RESPONSE_HEADER_LEN;
}
Пример #16
0
static int rtnet_read_proc_rtskb(char *buf, char **start, off_t offset, int count,
                                 int *eof, void *data)
{
    unsigned int rtskb_len;
    RTNET_PROC_PRINT_VARS(256);


    rtskb_len = ALIGN_RTSKB_STRUCT_LEN + SKB_DATA_ALIGN(RTSKB_SIZE);
    RTNET_PROC_PRINT("Statistics\t\tCurrent\tMaximum\n"
                     "rtskb pools\t\t%d\t%d\n"
                     "rtskbs\t\t\t%d\t%d\n"
                     "rtskb memory need\t%d\t%d\n",
                     rtskb_pools, rtskb_pools_max,
                     rtskb_amount, rtskb_amount_max,
                     rtskb_amount * rtskb_len, rtskb_amount_max * rtskb_len);

    RTNET_PROC_PRINT_DONE;
}
Пример #17
0
static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
{
	struct net *net = ovs_dp_get_net(vport->dp);
	struct ovs_key_ipv4_tunnel *tun_key;
	struct flowi4 fl;
	struct rtable *rt;
	int min_headroom;
	int tunnel_hlen;
	__be16 df;
	int err;

	if (unlikely(!OVS_CB(skb)->egress_tun_key)) {
		err = -EINVAL;
		goto error;
	}

	tun_key = OVS_CB(skb)->egress_tun_key;
	/* Route lookup */
	memset(&fl, 0, sizeof(fl));
	fl.daddr = tun_key->ipv4_dst;
	fl.saddr = tun_key->ipv4_src;
	fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
	fl.flowi4_mark = skb->mark;
	fl.flowi4_proto = IPPROTO_GRE;

	rt = ip_route_output_key(net, &fl);
	if (IS_ERR(rt))
		return PTR_ERR(rt);

	tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);

	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ tunnel_hlen + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
		int head_delta = SKB_DATA_ALIGN(min_headroom -
						skb_headroom(skb) +
						16);
		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
					0, GFP_ATOMIC);
		if (unlikely(err))
			goto err_free_rt;
	}
Пример #18
0
/***
 *	rtskb_pool_init
 */
int rtskb_pool_init(void)
{
	unsigned int i;
	int err = 0;
	struct rtskb* skb;

	rtskb_queue_head_init(&rtskb_pool);

	rtskb_cache = kmem_cache_create 
		(RTSKB_CACHE, sizeof (struct rtskb), 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
	if ( !rtskb_cache ) {
		rt_printk("RTnet: allocating 'rtskb_cache' failed.");
		return -ENOMEM;
	}
	rtskb_data_cache = kmem_cache_create 
		(RTSKB_DATA_CACHE, SKB_DATA_ALIGN(rtskb_max_size), 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
	if ( !rtskb_data_cache ) {
		rt_printk("RTnet: allocating 'rtskb_data_cache' failed.");
		return -ENOMEM;
	}

	for (i=0; i<rtskb_pool_default; i++) {
		skb = new_rtskb(); /* might return NULL */
		if (skb) {
			__rtskb_queue_tail(&rtskb_pool, skb);
		} else {
			printk("%s(): new_rtskb() returned NULL, qlen=%d\n", __FUNCTION__, rtskb_pool.qlen);
			break;
		}
	}

	if ( (inc_pool_srq=rt_request_srq (0, inc_pool_handler, 0)) < 0) {
		rt_printk("RTnet: allocating 'inc_pool_srq=%d' failed.\n", inc_pool_srq);
		return inc_pool_srq;
	}

	if ( (dec_pool_srq=rt_request_srq (0, dec_pool_handler, 0)) < 0) {
		rt_printk("RTnet: allocating 'dec_pool_srq=%d' failed.\n", dec_pool_srq);
		return dec_pool_srq;
	}

	return err;
}
Пример #19
0
struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
{
	struct sk_buff *skb;
	int order;

	length = SKB_DATA_ALIGN(length + 16);
	order = get_order(length + sizeof(struct skb_shared_info));
	if (order > MAX_SKBUFF_ORDER) {
		printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
		       "Increase MAX_SKBUFF_ORDER.\n", order);
		return NULL;
	}

	skb = alloc_skb_from_cache(
		skbuff_order_cachep[order], length, gfp_mask, 0);
	if (skb != NULL)
		skb_reserve(skb, 16);

	return skb;
}
Пример #20
0
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel_info *tun_info;
	struct net *net = dev_net(dev);
	const struct ip_tunnel_key *key;
	struct flowi4 fl;
	struct rtable *rt;
	int min_headroom;
	int tunnel_hlen;
	__be16 df, flags;
	int err;

	tun_info = skb_tunnel_info(skb, AF_INET);
	if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX))
		goto err_free_skb;

	key = &tun_info->key;
	memset(&fl, 0, sizeof(fl));
	fl.daddr = key->ipv4_dst;
	fl.saddr = key->ipv4_src;
	fl.flowi4_tos = RT_TOS(key->ipv4_tos);
	fl.flowi4_mark = skb->mark;
	fl.flowi4_proto = IPPROTO_GRE;

	rt = ip_route_output_key(net, &fl);
	if (IS_ERR(rt))
		goto err_free_skb;

	tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);

	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ tunnel_hlen + sizeof(struct iphdr);
	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
		int head_delta = SKB_DATA_ALIGN(min_headroom -
						skb_headroom(skb) +
						16);
		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
				       0, GFP_ATOMIC);
		if (unlikely(err))
			goto err_free_rt;
	}
Пример #21
0
static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
{
    struct hip04_priv *priv = netdev_priv(ndev);
    int i;

    priv->tx_desc = dma_alloc_coherent(d,
                                       TX_DESC_NUM * sizeof(struct tx_desc),
                                       &priv->tx_desc_dma, GFP_KERNEL);
    if (!priv->tx_desc)
        return -ENOMEM;

    priv->rx_buf_size = RX_BUF_SIZE +
                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
    for (i = 0; i < RX_DESC_NUM; i++) {
        priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
        if (!priv->rx_buf[i])
            return -ENOMEM;
    }

    return 0;
}
/**
 *	alloc_skb_from_cache	-	allocate a network buffer
 *	@cp: kmem_cache from which to allocate the data area
 *           (object size must be big enough for @size bytes + skb overheads)
 *	@size: size to allocate
 *	@gfp_mask: allocation mask
 *
 *	Allocate a new &sk_buff. The returned buffer has no headroom and
 *	tail room of size bytes. The object has a reference count of one.
 *	The return is the buffer. On a failure the return is %NULL.
 *
 *	Buffers may only be allocated from interrupts using a @gfp_mask of
 *	%GFP_ATOMIC.
 */
struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
				     unsigned int size,
				     gfp_t gfp_mask)
{
	struct sk_buff *skb;
	u8 *data;

	/* Get the HEAD */
	skb = kmem_cache_alloc(skbuff_head_cache,
			       gfp_mask & ~__GFP_DMA);
	if (!skb)
		goto out;

	/* Get the DATA. */
	size = SKB_DATA_ALIGN(size);
	data = kmem_cache_alloc(cp, gfp_mask);
	if (!data)
		goto nodata;

	memset(skb, 0, offsetof(struct sk_buff, truesize));
	skb->truesize = size + sizeof(struct sk_buff);
	atomic_set(&skb->users, 1);
	skb->head = data;
	skb->data = data;
	skb->tail = data;
	skb->end  = data + size;

	atomic_set(&(skb_shinfo(skb)->dataref), 1);
	skb_shinfo(skb)->nr_frags  = 0;
	skb_shinfo(skb)->tso_size = 0;
	skb_shinfo(skb)->tso_segs = 0;
	skb_shinfo(skb)->frag_list = NULL;
out:
	return skb;
nodata:
	kmem_cache_free(skbuff_head_cache, skb);
	skb = NULL;
	goto out;
}
Пример #23
0
static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
				      struct net_device *dev,
				      struct flowi4 *fl,
				      int tunnel_hlen)
{
	struct ip_tunnel_info *tun_info;
	const struct ip_tunnel_key *key;
	struct rtable *rt = NULL;
	int min_headroom;
	bool use_cache;
	int err;

	tun_info = skb_tunnel_info(skb);
	key = &tun_info->key;
	use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);

	if (use_cache)
		rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
	if (!rt) {
		rt = gre_get_rt(skb, dev, fl, key);
		if (IS_ERR(rt))
			goto err_free_skb;
		if (use_cache)
			dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
					  fl->saddr);
	}

	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ tunnel_hlen + sizeof(struct iphdr);
	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
		int head_delta = SKB_DATA_ALIGN(min_headroom -
						skb_headroom(skb) +
						16);
		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
				       0, GFP_ATOMIC);
		if (unlikely(err))
			goto err_free_rt;
	}
Пример #24
0
/**
 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
 * @skb: the packet to be sent
 * @b: the bearer through which the packet is to be sent
 * @dest: peer destination address
 */
int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
		     struct tipc_bearer *b, struct tipc_media_addr *dest)
{
	struct net_device *dev;
	int delta;

	dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
	if (!dev)
		return 0;

	delta = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
	if ((delta > 0) && pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) {
		kfree_skb(skb);
		return 0;
	}
	skb_reset_network_header(skb);
	skb->dev = dev;
	skb->protocol = htons(ETH_P_TIPC);
	dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
			dev->dev_addr, skb->len);
	dev_queue_xmit(skb);
	return 0;
}
Пример #25
0
/**
 *	alloc_skb	-	allocate a network buffer
 *	@size: size to allocate
 *	@gfp_mask: allocation mask
 *
 *	Allocate a new &sk_buff. The returned buffer has no headroom and a
 *	tail room of size bytes. The object has a reference count of one.
 *	The return is the buffer. On a failure the return is %NULL.
 */
struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
{
	struct sk_buff *skb;
	u8 *data;

	/* Get the HEAD */
	skb = ALLOCATE(struct sk_buff);
	if (!skb)
		goto out;

	/* Get the DATA. Size must match skb_add_mtu(). */
	size = SKB_DATA_ALIGN(size);
	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
	if (!data)
		goto nodata;

	memset(skb, 0, offsetof(struct sk_buff, truesize));
	skb->truesize = size + sizeof(struct sk_buff);
	atomic_set(&skb->users, 1);
	skb->head = data;
	skb->data = data;
	skb->tail = data;
	skb->end  = data + size;
        skb->list = NULL;

	atomic_set(&(skb_shinfo(skb)->dataref), 1);
	skb_shinfo(skb)->nr_frags  = 0;
	skb_shinfo(skb)->tso_size = 0;
	skb_shinfo(skb)->tso_segs = 0;
	skb_shinfo(skb)->frag_list = NULL;
out:
	return skb;
nodata:
	kfree(skb);
	skb = NULL;
	goto out;
}
void *skb_cache_start(unsigned int alloc_size)
{
    skb_cache_t *skb_cache = kmalloc(sizeof(skb_cache_t), GFP_ATOMIC);

    if (!skb_cache)
	return NULL;
    memset(skb_cache, 0, sizeof(skb_cache_t));
    skb_cache->alloc_size = SKB_DATA_ALIGN(alloc_size);

    /* add clean up timer */
    init_timer(&skb_cache->timer);
    skb_cache->timer.expires = SKB_CACHE_TIMEOUT + jiffies;
    skb_cache->timer.data = (unsigned long) skb_cache;
    skb_cache->timer.function = skb_cache_timer_cb;
    add_timer(&skb_cache->timer);

    /* add to global list of cache lists */
    local_irq_disable();
    skb_cache->next = skb_cache_list;
    skb_cache_list = skb_cache;
    local_irq_enable();

    return skb_cache;
}
Пример #27
0
/***
 *	rtskb_data_init		-	constructor for slab 
 *
 */
static inline void rtskb_data_init(void *p, kmem_cache_t *cache, unsigned long flags)
{
	unsigned char *skb_data = p;
	memset (skb_data, 0, SKB_DATA_ALIGN(rtskb_max_size));
}
Пример #28
0
struct sk_buff *__alloc_skb(const char* name, unsigned int size, gfp_t gfp_mask, int fclone, int
	node)
{
	uint32_t ret_size;
	//	struct kmem_cache *cache;
	struct skb_shared_info *shinfo;
	struct sk_buff *skb;
	u8 *data;
#if USE_MEM_DEBUG
	skb = mem_calloc_ex(name, sizeof(struct sk_buff), 1);
#else
	skb = mem_calloc(sizeof(struct sk_buff), 1);
#endif
	if (!skb)
		goto out;

	g_skb_alloc_size += get_mem_size(skb);
	
	size = SKB_DATA_ALIGN(size);
	data = mem_malloc(size + sizeof(struct skb_shared_info));

	if (!data)
		goto nodata;
	//	prefetchw(data + size);

	g_skb_alloc_size += get_mem_size(data);
	/*
	 * Only clear those fields we need to clear, not those that we will
	 * actually initialise below. Hence, don't put any more fields after
	 * the tail pointer in struct sk_buff!
	 */
	memset(skb, 0, offsetof(struct sk_buff, tail));
	skb->truesize = size + sizeof(struct sk_buff);
	atomic_set(&skb->users, 1);
	skb->head = data;
	skb->data = data;
	skb_reset_tail_pointer(skb);
	skb->end = skb->tail + size;
	#ifdef NET_SKBUFF_DATA_USES_OFFSET
	skb->mac_header = ~0U;
	#endif 
	//SET_MONITOR_ITEM_VALUE(_g_skb_alloc_size, g_skb_alloc_size);
	/* make sure we initialize shinfo sequentially */
	shinfo = skb_shinfo(skb);
	memset(shinfo, 0, /*offsetof(struct skb_shared_info, dataref)*/sizeof(struct skb_shared_info));
	atomic_set(&shinfo->dataref, 1);
	//	kmemcheck_annotate_variable(shinfo->destructor_arg);

	if (fclone)
	{
		p_err("fclone\n");
	} 
out: 
	return skb;
nodata: 
	ret_size = kmem_cache_free(cache, skb);

	g_skb_alloc_size -= ret_size;
	skb = NULL;
	goto out;
}
Пример #29
0
/***
 *  alloc_rtskb - allocate an rtskb from a pool
 *  @size: required buffer size (to check against maximum boundary)
 *  @pool: pool to take the rtskb from
 */
struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_queue *pool)
{
    struct rtskb *skb;


    RTNET_ASSERT(size <= SKB_DATA_ALIGN(RTSKB_SIZE), return NULL;);
Пример #30
0
/***
 * rt_loopback_xmit - begin packet transmission
 * @skb: packet to be sent
 * @dev: network device to which packet is sent
 *
 */
static int rt_loopback_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
	int err=0;
	struct rtskb *new_skb;

	if ( (new_skb=dev_alloc_rtskb(skb->len + 2))==NULL ) 
	{
		rt_printk("RTnet %s: couldn't allocate a rtskb of size %d.\n", rtdev->name, skb->len);
		err = -ENOMEM;
		goto rt_loopback_xmit_end;
	}
	else 
	{
		new_skb->rx = rt_get_time();
		new_skb->rtdev = rtdev;
		rtskb_reserve(new_skb,2);
		memcpy(new_skb->buf_start, skb->buf_start, SKB_DATA_ALIGN(ETH_FRAME_LEN));
		rtskb_put(new_skb, skb->len);
		new_skb->protocol = rt_eth_type_trans(new_skb, rtdev);

#ifdef DEBUG_LOOPBACK_DRIVER
		{
			int i, cuantos;
			rt_printk("\n\nPACKET:");
			rt_printk("\nskb->protocol = %d", 		skb->protocol);
			rt_printk("\nskb->pkt_type = %d", 		skb->pkt_type);
			rt_printk("\nskb->users = %d", 			skb->users);
			rt_printk("\nskb->cloned = %d", 		skb->cloned);
			rt_printk("\nskb->csum = %d",	 		skb->csum);
			rt_printk("\nskb->len = %d", 			skb->len);
			
			rt_printk("\nnew_skb->protocol = %d", 	new_skb->protocol);
			rt_printk("\nnew_skb->pkt_type = %d", 	new_skb->pkt_type);
			rt_printk("\nnew_skb->users = %d", 		new_skb->users);
			rt_printk("\nnew_skb->cloned = %d", 	new_skb->cloned);
			rt_printk("\nnew_skb->csum = %d",	 	new_skb->csum);
			rt_printk("\nnew_skb->len = %d", 		new_skb->len);
			
			rt_printk("\n\nETHERNET HEADER:");
			rt_printk("\nMAC dest: "); for(i=0;i<6;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+2]); }
			rt_printk("\nMAC orig: "); for(i=0;i<6;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+8]); }
			rt_printk("\nPROTOCOL: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+14]); }
		
			rt_printk("\n\nIP HEADER:");
			rt_printk("\nVERSIZE : "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+16]); }
			rt_printk("\nPRIORITY: "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+17]); }
			rt_printk("\nLENGTH  : "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+18]); }
			rt_printk("\nIDENT   : "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+20]); }
			rt_printk("\nFRAGMENT: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+22]); }
			rt_printk("\nTTL     : "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+24]); }
			rt_printk("\nPROTOCOL: "); for(i=0;i<1;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+25]); }
			rt_printk("\nCHECKSUM: "); for(i=0;i<2;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+26]); }
			rt_printk("\nIP ORIGE: "); for(i=0;i<4;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+28]); }
			rt_printk("\nIP DESTI: "); for(i=0;i<4;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+32]); }
		
			cuantos = (int)(*(unsigned short *)(new_skb->buf_start+18)) - 20;
			rt_printk("\n\nDATA (%d):", cuantos);  
			rt_printk("\n:");  		   for(i=0;i<cuantos;i++){ rt_printk("0x%02X ", new_skb->buf_start[i+36]); }		
		}
#endif

		rtnetif_rx(new_skb);
		rt_mark_stack_mgr(rtdev);
	}
	
rt_loopback_xmit_end:
	kfree_rtskb(skb);
	return err;
}