Beispiel #1
0
s32 mswitch_neighbour6_create(struct neighbour *neigh)
{
    u32 nd_index;
    u32 ret = 0;
    u32 board_idx = 0;
    u32 board_max = 0;
    u32 vsm_board = 0;
    struct VLAN *vlan;
    drv_arp_info_t arp_info;

    struct net_device *dev = neigh->dev;;

  
    /*备板直接返回*/
    if(switchover_get_board_type() == DPX_BOARD_SPARE)
    {
        return 0;
    }
   
    if (!is_vlan_dev(dev))
    {
        return 0;
    }
    vlan = VLAN_INTERFACE_INFO(dev);
    memcpy(arp_info.mac_add,neigh->ha,ETH_ALEN);
    arp_info.out_port   = 0;
    arp_info.vlan_id    = vlan->vlan_id;
    arp_info.entry_type = DRV_NH_ENTRY_TYPE_L3UC;
    arp_info.valid      = IPV6_ND_INVALID;

	/* 给每个槽都加入ND表项 */
	board_max = IPV6_ND_MAX_SIZE;
	for (board_idx = 1;board_idx < board_max;board_idx++)
	{
        nd_index = 0;
        vsm_board = board_idx + g_board_offset;

        if (!if_slot_is_available(vsm_board))
            continue ;
        

		arp_info.egr_intf_idx = vlan->intf_idx[vsm_board];
	    ret = drv_arp_add(vsm_board, &arp_info, &nd_index);
		 
	    if (0 == ret)
	    {
	        neigh->neigh_index[board_idx] = nd_index;
	    }
	    else
	    {
	        neigh->neigh_index[board_idx] = 0;
	    }

	}

    neigh->neigh_flags |= NEIGH_MSWITCH;

    return 0;
}
static int vlan_dev_init(struct net_device *dev)
{
	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
	int subclass = 0;

	netif_carrier_off(dev);

	/* IFF_BROADCAST|IFF_MULTICAST; ??? */
	dev->flags  = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
					  IFF_MASTER | IFF_SLAVE);
	dev->iflink = real_dev->ifindex;
	dev->state  = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
					  (1<<__LINK_STATE_DORMANT))) |
		      (1<<__LINK_STATE_PRESENT);

	dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
			   NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
			   NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
			   NETIF_F_ALL_FCOE;

	dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
	dev->gso_max_size = real_dev->gso_max_size;

	/* ipv6 shared card related stuff */
	dev->dev_id = real_dev->dev_id;

	if (is_zero_ether_addr(dev->dev_addr))
		memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
	if (is_zero_ether_addr(dev->broadcast))
		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);

#if IS_ENABLED(CONFIG_FCOE)
	dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
#endif

	dev->needed_headroom = real_dev->needed_headroom;
	if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
		dev->header_ops      = real_dev->header_ops;
		dev->hard_header_len = real_dev->hard_header_len;
	} else {
		dev->header_ops      = &vlan_header_ops;
		dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
	}

	dev->netdev_ops = &vlan_netdev_ops;

	SET_NETDEV_DEVTYPE(dev, &vlan_type);

	if (is_vlan_dev(real_dev))
		subclass = 1;

	vlan_dev_set_lockdep_class(dev, subclass);

	vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
	if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
		return -ENOMEM;

	return 0;
}
Beispiel #3
0
static bool find_gid_index(const union ib_gid *gid,
			   const struct ib_gid_attr *gid_attr,
			   void *context)
{
	struct find_gid_index_context *ctx =
		(struct find_gid_index_context *)context;

	if (ctx->gid_type != gid_attr->gid_type)
		return false;

	if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
	    (is_vlan_dev(gid_attr->ndev) &&
	     vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
		return false;

	return true;
}
Beispiel #4
0
s32 mswitch_fib6_add (struct rt6_info* info, u32 vsm_board)
{
    struct net_device* dev = NULL;
	struct neighbour *n = NULL;
    lpm_rt_entry_s* rt_entry = NULL;
    s32 ret = 0;

    /* 克隆表项不处理 */
    if (info->rt6i_flags & RTF_CACHE || info->rt6i_idev == NULL)
    {
        return 0;
    }

    /* 不处理组播表项 */
    if (ipv6_addr_type(&info->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
    {
        return 0;
    }

    /* vlan接口以外的接口不处理 */
    dev = info->rt6i_idev->dev;
    if (dev == NULL || !is_vlan_dev(dev))
    {
        return 0;
	}

    LPM_LOCK(&lpm_lock);

	rt_entry = info->rt_entry;
    if (rt_entry == NULL)
    {
        LPM_UNLOCK(&lpm_lock);
        return 0;
    }
    
    rt_entry->slot_id = vsm_board;

	if (is_ip6_zero(&info->rt6i_gateway))    /* 默认路由上本机 */
	{
        n = __neigh_lookup(&nd_tbl, &info->rt6i_gateway, dev, 1);
	}

    /* 标记此路由用来转发还是上本机 */
    if(n)
    {
        lpm_copy_nh (rt_entry, 0, n->neigh_index);
    }
   
    ret = lpm_add_fb (rt_entry, 1, LPM_SPECIFY_SLOT);
    if (ret != 0)
    {
        LPM_IDX_INFO(rt_entry).fib_index = 0;
    }
    
    LPM_UNLOCK(&lpm_lock);
    return 0;
}
Beispiel #5
0
static int vlan_dev_init(struct net_device *dev)
{
	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
	int subclass = 0;

	netif_carrier_off(dev);

	/* IFF_BROADCAST|IFF_MULTICAST; ??? */
	dev->flags  = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
					  IFF_MASTER | IFF_SLAVE);
	dev->iflink = real_dev->ifindex;
	dev->state  = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
					  (1<<__LINK_STATE_DORMANT))) |
		      (1<<__LINK_STATE_PRESENT);

	dev->features |= real_dev->features & real_dev->vlan_features;
	dev->gso_max_size = real_dev->gso_max_size;

	/* ipv6 shared card related stuff */
	dev->dev_id = real_dev->dev_id;

	if (is_zero_ether_addr(dev->dev_addr))
		memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
	if (is_zero_ether_addr(dev->broadcast))
		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);

#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
	dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
#endif

	if (real_dev->features & NETIF_F_HW_VLAN_TX) {
		dev->header_ops      = real_dev->header_ops;
		dev->hard_header_len = real_dev->hard_header_len;
		if (real_dev->netdev_ops->ndo_select_queue)
			dev->netdev_ops = &vlan_netdev_accel_ops_sq;
		else
			dev->netdev_ops = &vlan_netdev_accel_ops;
	} else {
		dev->header_ops      = &vlan_header_ops;
		dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
		if (real_dev->netdev_ops->ndo_select_queue)
			dev->netdev_ops = &vlan_netdev_ops_sq;
		else
			dev->netdev_ops = &vlan_netdev_ops;
	}

	if (is_vlan_dev(real_dev))
		subclass = 1;

	vlan_dev_set_lockdep_class(dev, subclass);

	vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
	if (!vlan_dev_info(dev)->vlan_rx_stats)
		return -ENOMEM;

	return 0;
}
Beispiel #6
0
static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
				    struct mlx4_ib_ah *ah)
{
	struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
	struct mlx4_dev *dev = ibdev->dev;
	int is_mcast = 0;
	struct in6_addr in6;
	u16 vlan_tag = 0xffff;
	union ib_gid sgid;
	struct ib_gid_attr gid_attr;
	int ret;

	memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
	if (rdma_is_multicast_addr(&in6)) {
		is_mcast = 1;
		rdma_get_mcast_mac(&in6, ah->av.eth.mac);
	} else {
		memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
	}
	ret = ib_get_cached_gid(pd->device, ah_attr->port_num,
				ah_attr->grh.sgid_index, &sgid, &gid_attr);
	if (ret)
		return ERR_PTR(ret);
	eth_zero_addr(ah->av.eth.s_mac);
	if (gid_attr.ndev) {
		if (is_vlan_dev(gid_attr.ndev))
			vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
		memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN);
		dev_put(gid_attr.ndev);
	}
	if (vlan_tag < 0x1000)
		vlan_tag |= (ah_attr->sl & 7) << 13;
	ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
	ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
	if (ret < 0)
		return ERR_PTR(ret);
	ah->av.eth.gid_index = ret;
	ah->av.eth.vlan = cpu_to_be16(vlan_tag);
	ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
	if (ah_attr->static_rate) {
		ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
		while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
		       !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
			--ah->av.eth.stat_rate;
	}

	/*
	 * HW requires multicast LID so we just choose one.
	 */
	if (is_mcast)
		ah->av.ib.dlid = cpu_to_be16(0xc000);

	memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
	ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);

	return &ah->ibah;
}
static int vlandev_seq_show(struct seq_file *seq, void *offset)
{
	struct net_device *vlandev = (struct net_device *) seq->private;
	const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
	struct rtnl_link_stats64 temp;
	const struct rtnl_link_stats64 *stats;
	static const char fmt[] = "%30s %12lu\n";
	static const char fmt64[] = "%30s %12llu\n";
	int i;

	if (!is_vlan_dev(vlandev))
		return 0;

	stats = dev_get_stats(vlandev, &temp);
	seq_printf(seq,
		   "%s  VID: %d	 REORDER_HDR: %i  dev->priv_flags: %hx\n",
		   vlandev->name, dev_info->vlan_id,
		   (int)(dev_info->flags & 1), vlandev->priv_flags);

	seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
	seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
	seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast);
	seq_puts(seq, "\n");
	seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
	seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
	seq_printf(seq, fmt, "total headroom inc",
		   dev_info->cnt_inc_headroom_on_tx);
	seq_printf(seq, fmt, "total encap on xmit",
		   dev_info->cnt_encap_on_xmit);
	seq_printf(seq, "Device: %s", dev_info->real_dev->name);
	/* now show all PRIORITY mappings relating to this VLAN */
	seq_printf(seq, "\nINGRESS priority mappings: "
			"0:%u  1:%u  2:%u  3:%u  4:%u  5:%u  6:%u 7:%u\n",
		   dev_info->ingress_priority_map[0],
		   dev_info->ingress_priority_map[1],
		   dev_info->ingress_priority_map[2],
		   dev_info->ingress_priority_map[3],
		   dev_info->ingress_priority_map[4],
		   dev_info->ingress_priority_map[5],
		   dev_info->ingress_priority_map[6],
		   dev_info->ingress_priority_map[7]);

	seq_printf(seq, " EGRESS priority mappings: ");
	for (i = 0; i < 16; i++) {
		const struct vlan_priority_tci_mapping *mp
			= dev_info->egress_priority_map[i];
		while (mp) {
			seq_printf(seq, "%u:%hu ",
				   mp->priority, ((mp->vlan_qos >> 13) & 0x7));
			mp = mp->next;
		}
	}
	seq_puts(seq, "\n");

	return 0;
}
Beispiel #8
0
struct device *rxe_dma_device(struct rxe_dev *rxe)
{
	struct net_device *ndev;

	ndev = rxe->ndev;

	if (is_vlan_dev(ndev))
		ndev = vlan_dev_real_dev(ndev);

	return ndev->dev.parent;
}
Beispiel #9
0
static int vlan_calculate_locking_subclass(struct net_device *real_dev)
{
	int subclass = 0;

	while (is_vlan_dev(real_dev)) {
		subclass++;
		real_dev = vlan_dev_priv(real_dev)->real_dev;
	}

	return subclass;
}
Beispiel #10
0
struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
				 struct rdma_ah_attr *ah_attr,
				 struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
	const struct ib_gid_attr *gid_attr;
	struct device *dev = hr_dev->dev;
	struct hns_roce_ah *ah;
	u16 vlan_tag = 0xffff;
	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
	bool vlan_en = false;

	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
	if (!ah)
		return ERR_PTR(-ENOMEM);

	/* Get mac address */
	memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);

	gid_attr = ah_attr->grh.sgid_attr;
	if (is_vlan_dev(gid_attr->ndev)) {
		vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
		vlan_en = true;
	}

	if (vlan_tag < 0x1000)
		vlan_tag |= (rdma_ah_get_sl(ah_attr) &
			     HNS_ROCE_VLAN_SL_BIT_MASK) <<
			     HNS_ROCE_VLAN_SL_SHIFT;

	ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
				     (rdma_ah_get_port_num(ah_attr) <<
				     HNS_ROCE_PORT_NUM_SHIFT));
	ah->av.gid_index = grh->sgid_index;
	ah->av.vlan = cpu_to_le16(vlan_tag);
	ah->av.vlan_en = vlan_en;
	dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
		ah->av.vlan);

	if (rdma_ah_get_static_rate(ah_attr))
		ah->av.stat_rate = IB_RATE_10_GBPS;

	memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
	ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) <<
						 HNS_ROCE_SL_SHIFT);

	return &ah->ibah;
}
Beispiel #11
0
unsigned long dev_trans_start(struct net_device *dev)
{
	unsigned long val, res;
	unsigned int i;

	if (is_vlan_dev(dev))
		dev = vlan_dev_real_dev(dev);
	res = netdev_get_tx_queue(dev, 0)->trans_start;
	for (i = 1; i < dev->num_tx_queues; i++) {
		val = netdev_get_tx_queue(dev, i)->trans_start;
		if (val && time_after(val, res))
			res = val;
	}

	return res;
}
Beispiel #12
0
/*
 * vlantag_match_pre()
 *	One of the iptables hooks has a packet for us to analyze, do so.
 */
static bool vlantag_match_pre(const struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_vlantag_match_info *info = par->targinfo;
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_ct_vlantag_ext *ncve;
	unsigned short int prio;

	if (!is_vlan_dev(skb->dev)) {
		DEBUGP("pre dev %s is not vlan\n", skb->dev->name);
		return false;
	}

	ct = nf_ct_get(skb, &ctinfo);
	if (!ct) {
		DEBUGP("pre xt_VLANTAG: No conntrack connection\n");
		return false;
	}

	ncve = nf_ct_vlantag_ext_find(ct);
	if (!ncve) {
		DEBUGP("pre xt_VLANTAG: No conntrack extension\n");
		return false;
	}

	if ((skb->priority < XT_VLANTAG_SKB_PRIO_MIN) || (skb->priority > XT_VLANTAG_SKB_PRIO_MAX)) {
		DEBUGP("pre dev: %s skb prioriy %d is out of range (%d - %d)",
				skb->dev->name, skb->priority,
				XT_VLANTAG_SKB_PRIO_MIN, XT_VLANTAG_SKB_PRIO_MAX);
		return false;
	}

	prio = skb->priority - XT_VLANTAG_SKB_PRIO_MIN;
	if ((prio & info->imask) == info->itag) {
		ncve->prio = prio;
	} else {
		ncve->prio = XT_VLANTAG_EXT_PRIO_NOT_VALID;
	}

	ncve->imask = info->imask;
	ncve->itag =info->itag;

	DEBUGP("xt_VLANTAG: pre match continue\n");
	return true;
}
Beispiel #13
0
/*
 * vlantag_target_post()
 *	One of the iptables hooks has a packet for us to analyze, do so.
 */
static bool vlantag_target_post(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_vlantag_target_info *info = par->targinfo;
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_ct_vlantag_ext *ncve;
	unsigned int vlan_prio;

	if (!is_vlan_dev(skb->dev)) {
		DEBUGP("post dev %s is not vlan\n", skb->dev->name);
		return true;
	}

	ct = nf_ct_get(skb, &ctinfo);
	if (!ct) {
		DEBUGP("post xt_VLANTAG: No conntrack connection\n");
		return true;
	}

	ncve = nf_ct_vlantag_ext_find(ct);
	if (!ncve) {
		DEBUGP("post No vlan tag extension\n");
		return true;
	}

	if (ncve->prio == XT_VLANTAG_EXT_PRIO_NOT_VALID) {
		DEBUGP("post vlan tag ext prio is not set\n");
		if ((skb->priority >= XT_VLANTAG_SKB_PRIO_MIN) && (skb->priority <= XT_VLANTAG_SKB_PRIO_MAX)) {
			DEBUGP("but skb prio %d is in the range of our iptables rule, drop the packet\n", skb->priority);
			return false;
		}
		return true;
	}

	vlan_prio = (ncve->prio & info->omask) | info->oval;
	skb->priority = vlan_prio + XT_VLANTAG_SKB_PRIO_MIN;

	ncve->magic = true;
	ncve->omask = info->omask;
	ncve->oval = info->oval;

	DEBUGP("post xt_VLANTAG: post target continue\n");
	return true;
}
Beispiel #14
0
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct net_device *dev;

	++*pos;

	dev = (struct net_device *)v;
	if (v == SEQ_START_TOKEN)
		dev = net_device_entry(&dev_base_head);

	for_each_netdev_continue(dev) {
		if (!is_vlan_dev(dev))
			continue;

		return dev;
	}

	return NULL;
}
static int vlan_dev_init(struct net_device *dev)
{
	struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
	int subclass = 0;

	netif_carrier_off(dev);

	/* IFF_BROADCAST|IFF_MULTICAST; ??? */
	dev->flags  = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI);
	dev->iflink = real_dev->ifindex;
	dev->state  = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
					  (1<<__LINK_STATE_DORMANT))) |
		      (1<<__LINK_STATE_PRESENT);

	dev->features |= real_dev->features & real_dev->vlan_features;
	dev->gso_max_size = real_dev->gso_max_size;

	/* ipv6 shared card related stuff */
	dev->dev_id = real_dev->dev_id;

	if (is_zero_ether_addr(dev->dev_addr))
		memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
	if (is_zero_ether_addr(dev->broadcast))
		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);

	if (real_dev->features & NETIF_F_HW_VLAN_TX) {
		dev->header_ops      = real_dev->header_ops;
		dev->hard_header_len = real_dev->hard_header_len;
		dev->netdev_ops         = &vlan_netdev_accel_ops;
	} else {
		dev->header_ops      = &vlan_header_ops;
		dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
		dev->netdev_ops         = &vlan_netdev_ops;
	}
	netdev_resync_ops(dev);

	if (is_vlan_dev(real_dev))
		subclass = 1;

	vlan_dev_set_lockdep_class(dev, subclass);
	return 0;
}
Beispiel #16
0
static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
	struct udphdr *udph;
	struct net_device *ndev = skb->dev;
	struct net_device *rdev = ndev;
	struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);

	if (!rxe && is_vlan_dev(rdev)) {
		rdev = vlan_dev_real_dev(ndev);
		rxe = rxe_get_dev_from_net(rdev);
	}
	if (!rxe)
		goto drop;

	if (skb_linearize(skb)) {
		pr_err("skb_linearize failed\n");
		ib_device_put(&rxe->ib_dev);
		goto drop;
	}

	udph = udp_hdr(skb);
	pkt->rxe = rxe;
	pkt->port_num = 1;
	pkt->hdr = (u8 *)(udph + 1);
	pkt->mask = RXE_GRH_MASK;
	pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);

	rxe_rcv(skb);

	/*
	 * FIXME: this is in the wrong place, it needs to be done when pkt is
	 * destroyed
	 */
	ib_device_put(&rxe->ib_dev);

	return 0;
drop:
	kfree_skb(skb);

	return 0;
}
Beispiel #17
0
/* start read of /proc/net/vlan/config */
static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
{
	struct net_device *dev;
	loff_t i = 1;

	read_lock(&dev_base_lock);

	if (*pos == 0)
		return SEQ_START_TOKEN;

	for_each_netdev(dev) {
		if (!is_vlan_dev(dev))
			continue;

		if (i++ == *pos)
			return dev;
	}

	return  NULL;
}
Beispiel #18
0
/*
 *	VLAN IOCTL handler.
 *	o execute requested action or pass command to the device driver
 *   arg is really a struct vlan_ioctl_args __user *.
 */
static int vlan_ioctl_handler(struct net *net, void __user *arg)
{
	int err;
	struct vlan_ioctl_args args;
	struct net_device *dev = NULL;

	if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
		return -EFAULT;

	/* Null terminate this sucker, just in case. */
	args.device1[23] = 0;
	args.u.device2[23] = 0;

	rtnl_lock();

	switch (args.cmd) {
	case SET_VLAN_INGRESS_PRIORITY_CMD:
	case SET_VLAN_EGRESS_PRIORITY_CMD:
	case SET_VLAN_FLAG_CMD:
	case ADD_VLAN_CMD:
	case DEL_VLAN_CMD:
	case GET_VLAN_REALDEV_NAME_CMD:
	case GET_VLAN_VID_CMD:
		err = -ENODEV;
		dev = __dev_get_by_name(net, args.device1);
		if (!dev)
			goto out;

		err = -EINVAL;
		if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
			goto out;
	}

	switch (args.cmd) {
	case SET_VLAN_INGRESS_PRIORITY_CMD:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		vlan_dev_set_ingress_priority(dev,
					      args.u.skb_priority,
					      args.vlan_qos);
		err = 0;
		break;

	case SET_VLAN_EGRESS_PRIORITY_CMD:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		err = vlan_dev_set_egress_priority(dev,
						   args.u.skb_priority,
						   args.vlan_qos);
		break;

	case SET_VLAN_FLAG_CMD:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		err = vlan_dev_change_flags(dev,
					    args.vlan_qos ? args.u.flag : 0,
					    args.u.flag);
		break;

	case SET_VLAN_NAME_TYPE_CMD:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		if ((args.u.name_type >= 0) &&
		    (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
			struct vlan_net *vn;

			vn = net_generic(net, vlan_net_id);
			vn->name_type = args.u.name_type;
			err = 0;
		} else {
			err = -EINVAL;
		}
		break;

	case ADD_VLAN_CMD:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		err = register_vlan_device(dev, args.u.VID);
		break;

	case DEL_VLAN_CMD:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			break;
		unregister_vlan_dev(dev, NULL);
		err = 0;
		break;

	case GET_VLAN_REALDEV_NAME_CMD:
		err = 0;
		vlan_dev_get_realdev_name(dev, args.u.device2);
		if (copy_to_user(arg, &args,
				 sizeof(struct vlan_ioctl_args)))
			err = -EFAULT;
		break;

	case GET_VLAN_VID_CMD:
		err = 0;
		args.u.VID = vlan_dev_vlan_id(dev);
		if (copy_to_user(arg, &args,
				 sizeof(struct vlan_ioctl_args)))
		      err = -EFAULT;
		break;

	default:
		err = -EOPNOTSUPP;
		break;
	}
out:
	rtnl_unlock();
	return err;
}
Beispiel #19
0
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
			     void *ptr)
{
	struct net_device *dev = ptr;
	struct vlan_group *grp;
	struct vlan_info *vlan_info;
	int i, flgs;
	struct net_device *vlandev;
	struct vlan_dev_priv *vlan;
	LIST_HEAD(list);

	if (is_vlan_dev(dev))
		__vlan_device_event(dev, event);

	if ((event == NETDEV_UP) &&
	    (dev->features & NETIF_F_HW_VLAN_FILTER)) {
		pr_info("adding VLAN 0 to HW filter on device %s\n",
			dev->name);
		vlan_vid_add(dev, 0);
	}

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		goto out;
	grp = &vlan_info->grp;

	/* It is OK that we do not hold the group lock right now,
	 * as we run under the RTNL lock.
	 */

	switch (event) {
	case NETDEV_CHANGE:
		/* Propagate real device state to vlan devices */
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			netif_stacked_transfer_operstate(dev, vlandev);
		}
		break;

	case NETDEV_CHANGEADDR:
		/* Adjust unicast filters on underlying device */
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			flgs = vlandev->flags;
			if (!(flgs & IFF_UP))
				continue;

			vlan_sync_address(dev, vlandev);
		}
		break;

	case NETDEV_CHANGEMTU:
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			if (vlandev->mtu <= dev->mtu)
				continue;

			dev_set_mtu(vlandev, dev->mtu);
		}
		break;

	case NETDEV_FEAT_CHANGE:
		/* Propagate device features to underlying device */
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			vlan_transfer_features(dev, vlandev);
		}

		break;

	case NETDEV_DOWN:
		if (dev->features & NETIF_F_HW_VLAN_FILTER)
			vlan_vid_del(dev, 0);

		/* Put all VLANs for this dev in the down state too.  */
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			flgs = vlandev->flags;
			if (!(flgs & IFF_UP))
				continue;

			vlan = vlan_dev_priv(vlandev);
			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
				dev_change_flags(vlandev, flgs & ~IFF_UP);
			netif_stacked_transfer_operstate(dev, vlandev);
		}
		break;

	case NETDEV_UP:
		/* Put all VLANs for this dev in the up state too.  */
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			flgs = vlandev->flags;
			if (flgs & IFF_UP)
				continue;

			vlan = vlan_dev_priv(vlandev);
			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
				dev_change_flags(vlandev, flgs | IFF_UP);
			netif_stacked_transfer_operstate(dev, vlandev);
		}
		break;

	case NETDEV_UNREGISTER:
		/* twiddle thumbs on netns device moves */
		if (dev->reg_state != NETREG_UNREGISTERING)
			break;

		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			/* removal of last vid destroys vlan_info, abort
			 * afterwards */
			if (vlan_info->nr_vids == 1)
				i = VLAN_N_VID;

			unregister_vlan_dev(vlandev, &list);
		}
		unregister_netdevice_many(&list);
		break;

	case NETDEV_PRE_TYPE_CHANGE:
		/* Forbid underlaying device to change its type. */
		return NOTIFY_BAD;

	case NETDEV_NOTIFY_PEERS:
	case NETDEV_BONDING_FAILOVER:
		/* Propagate to vlan devices */
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			call_netdevice_notifiers(event, vlandev);
		}
		break;
	}

out:
	return NOTIFY_DONE;
}
Beispiel #20
0
struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
			       struct ib_udata *udata)
{
	u32 *ahid_addr;
	int status;
	struct ocrdma_ah *ah;
	bool isvlan = false;
	u16 vlan_tag = 0xffff;
	struct ib_gid_attr sgid_attr;
	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
	struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
	union ib_gid sgid;

	if (!(attr->ah_flags & IB_AH_GRH))
		return ERR_PTR(-EINVAL);

	if (atomic_cmpxchg(&dev->update_sl, 1, 0))
		ocrdma_init_service_level(dev);

	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
	if (!ah)
		return ERR_PTR(-ENOMEM);

	status = ocrdma_alloc_av(dev, ah);
	if (status)
		goto av_err;

	status = ib_get_cached_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid,
				   &sgid_attr);
	if (status) {
		pr_err("%s(): Failed to query sgid, status = %d\n",
		      __func__, status);
		goto av_conf_err;
	}
	if (sgid_attr.ndev) {
		if (is_vlan_dev(sgid_attr.ndev))
			vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
		dev_put(sgid_attr.ndev);
	}
	/* Get network header type for this GID */
	ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);

	if ((pd->uctx) &&
	    (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
	    (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
		status = rdma_addr_find_l2_eth_by_grh(&sgid, &attr->grh.dgid,
						      attr->dmac, &vlan_tag,
						      &sgid_attr.ndev->ifindex,
						      NULL);
		if (status) {
			pr_err("%s(): Failed to resolve dmac from gid." 
				"status = %d\n", __func__, status);
			goto av_conf_err;
		}
	}

	status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan, vlan_tag);
	if (status)
		goto av_conf_err;

	/* if pd is for the user process, pass the ah_id to user space */
	if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
		ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
		*ahid_addr = 0;
		*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
		if (ocrdma_is_udp_encap_supported(dev)) {
			*ahid_addr |= ((u32)ah->hdr_type &
				       OCRDMA_AH_L3_TYPE_MASK) <<
				       OCRDMA_AH_L3_TYPE_SHIFT;
		}
		if (isvlan)
			*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
				       OCRDMA_AH_VLAN_VALID_SHIFT);
	}

	return &ah->ibah;

av_conf_err:
	ocrdma_free_av(dev, ah);
av_err:
	kfree(ah);
	return ERR_PTR(status);
}
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
			     void *ptr)
{
	struct net_device *dev = ptr;
	struct vlan_group *grp;
	struct vlan_info *vlan_info;
	int i, flgs;
	struct net_device *vlandev;
	struct vlan_dev_priv *vlan;
	LIST_HEAD(list);

	if (is_vlan_dev(dev))
		__vlan_device_event(dev, event);

	if ((event == NETDEV_UP) &&
	    (dev->features & NETIF_F_HW_VLAN_FILTER)) {
		pr_info("adding VLAN 0 to HW filter on device %s\n",
			dev->name);
		vlan_vid_add(dev, 0);
	}

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		goto out;
	grp = &vlan_info->grp;


	switch (event) {
	case NETDEV_CHANGE:
		
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			netif_stacked_transfer_operstate(dev, vlandev);
		}
		break;

	case NETDEV_CHANGEADDR:
		
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			flgs = vlandev->flags;
			if (!(flgs & IFF_UP))
				continue;

			vlan_sync_address(dev, vlandev);
		}
		break;

	case NETDEV_CHANGEMTU:
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			if (vlandev->mtu <= dev->mtu)
				continue;

			dev_set_mtu(vlandev, dev->mtu);
		}
		break;

	case NETDEV_FEAT_CHANGE:
		
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			vlan_transfer_features(dev, vlandev);
		}

		break;

	case NETDEV_DOWN:
		
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			flgs = vlandev->flags;
			if (!(flgs & IFF_UP))
				continue;

			vlan = vlan_dev_priv(vlandev);
			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
				dev_change_flags(vlandev, flgs & ~IFF_UP);
			netif_stacked_transfer_operstate(dev, vlandev);
		}
		break;

	case NETDEV_UP:
		
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			flgs = vlandev->flags;
			if (flgs & IFF_UP)
				continue;

			vlan = vlan_dev_priv(vlandev);
			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
				dev_change_flags(vlandev, flgs | IFF_UP);
			netif_stacked_transfer_operstate(dev, vlandev);
		}
		break;

	case NETDEV_UNREGISTER:
		
		if (dev->reg_state != NETREG_UNREGISTERING)
			break;

		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			if (vlan_info->nr_vids == 1)
				i = VLAN_N_VID;

			unregister_vlan_dev(vlandev, &list);
		}
		unregister_netdevice_many(&list);
		break;

	case NETDEV_PRE_TYPE_CHANGE:
		
		return NOTIFY_BAD;

	case NETDEV_NOTIFY_PEERS:
	case NETDEV_BONDING_FAILOVER:
		
		for (i = 0; i < VLAN_N_VID; i++) {
			vlandev = vlan_group_get_device(grp, i);
			if (!vlandev)
				continue;

			call_netdevice_notifiers(event, vlandev);
		}
		break;
	}

out:
	return NOTIFY_DONE;
}
Beispiel #22
0
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
			     void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct vlan_group *grp;
	struct vlan_info *vlan_info;
	int i, flgs;
	struct net_device *vlandev;
	struct vlan_dev_priv *vlan;
	bool last = false;
	LIST_HEAD(list);

	if (is_vlan_dev(dev))
		__vlan_device_event(dev, event);

	if ((event == NETDEV_UP) &&
	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
		pr_info("adding VLAN 0 to HW filter on device %s\n",
			dev->name);
		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
	}

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		goto out;
	grp = &vlan_info->grp;

	/* It is OK that we do not hold the group lock right now,
	 * as we run under the RTNL lock.
	 */

	switch (event) {
	case NETDEV_CHANGE:
		/* Propagate real device state to vlan devices */
		vlan_group_for_each_dev(grp, i, vlandev)
			netif_stacked_transfer_operstate(dev, vlandev);
		break;

	case NETDEV_CHANGEADDR:
		/* Adjust unicast filters on underlying device */
		vlan_group_for_each_dev(grp, i, vlandev) {
			flgs = vlandev->flags;
			if (!(flgs & IFF_UP))
				continue;

			vlan_sync_address(dev, vlandev);
		}
		break;

	case NETDEV_CHANGEMTU:
		vlan_group_for_each_dev(grp, i, vlandev) {
			if (vlandev->mtu <= dev->mtu)
				continue;

			dev_set_mtu(vlandev, dev->mtu);
		}
		break;

	case NETDEV_FEAT_CHANGE:
		/* Propagate device features to underlying device */
		vlan_group_for_each_dev(grp, i, vlandev)
			vlan_transfer_features(dev, vlandev);
		break;

	case NETDEV_DOWN:
		if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
			vlan_vid_del(dev, htons(ETH_P_8021Q), 0);

		/* Put all VLANs for this dev in the down state too.  */
		vlan_group_for_each_dev(grp, i, vlandev) {
			flgs = vlandev->flags;
			if (!(flgs & IFF_UP))
				continue;

			vlan = vlan_dev_priv(vlandev);
			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
				dev_change_flags(vlandev, flgs & ~IFF_UP);
			netif_stacked_transfer_operstate(dev, vlandev);
		}
Beispiel #23
0
static enum resp_states do_complete(struct rxe_qp *qp,
				    struct rxe_pkt_info *pkt)
{
	struct rxe_cqe cqe;
	struct ib_wc *wc = &cqe.ibwc;
	struct ib_uverbs_wc *uwc = &cqe.uibwc;
	struct rxe_recv_wqe *wqe = qp->resp.wqe;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);

	if (unlikely(!wqe))
		return RESPST_CLEANUP;

	memset(&cqe, 0, sizeof(cqe));

	if (qp->rcq->is_user) {
		uwc->status             = qp->resp.status;
		uwc->qp_num             = qp->ibqp.qp_num;
		uwc->wr_id              = wqe->wr_id;
	} else {
		wc->status              = qp->resp.status;
		wc->qp                  = &qp->ibqp;
		wc->wr_id               = wqe->wr_id;
	}

	if (wc->status == IB_WC_SUCCESS) {
		rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
				pkt->mask & RXE_WRITE_MASK) ?
					IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
		wc->vendor_err = 0;
		wc->byte_len = wqe->dma.length - wqe->dma.resid;

		/* fields after byte_len are different between kernel and user
		 * space
		 */
		if (qp->rcq->is_user) {
			uwc->wc_flags = IB_WC_GRH;

			if (pkt->mask & RXE_IMMDT_MASK) {
				uwc->wc_flags |= IB_WC_WITH_IMM;
				uwc->ex.imm_data = immdt_imm(pkt);
			}

			if (pkt->mask & RXE_IETH_MASK) {
				uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
				uwc->ex.invalidate_rkey = ieth_rkey(pkt);
			}

			uwc->qp_num		= qp->ibqp.qp_num;

			if (pkt->mask & RXE_DETH_MASK)
				uwc->src_qp = deth_sqp(pkt);

			uwc->port_num		= qp->attr.port_num;
		} else {
			struct sk_buff *skb = PKT_TO_SKB(pkt);

			wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
			if (skb->protocol == htons(ETH_P_IP))
				wc->network_hdr_type = RDMA_NETWORK_IPV4;
			else
				wc->network_hdr_type = RDMA_NETWORK_IPV6;

			if (is_vlan_dev(skb->dev)) {
				wc->wc_flags |= IB_WC_WITH_VLAN;
				wc->vlan_id = vlan_dev_vlan_id(skb->dev);
			}

			if (pkt->mask & RXE_IMMDT_MASK) {
				wc->wc_flags |= IB_WC_WITH_IMM;
				wc->ex.imm_data = immdt_imm(pkt);
			}

			if (pkt->mask & RXE_IETH_MASK) {
				struct rxe_mem *rmr;

				wc->wc_flags |= IB_WC_WITH_INVALIDATE;
				wc->ex.invalidate_rkey = ieth_rkey(pkt);

				rmr = rxe_pool_get_index(&rxe->mr_pool,
							 wc->ex.invalidate_rkey >> 8);
				if (unlikely(!rmr)) {
					pr_err("Bad rkey %#x invalidation\n",
					       wc->ex.invalidate_rkey);
					return RESPST_ERROR;
				}
				rmr->state = RXE_MEM_STATE_FREE;
				rxe_drop_ref(rmr);
			}

			wc->qp			= &qp->ibqp;

			if (pkt->mask & RXE_DETH_MASK)
				wc->src_qp = deth_sqp(pkt);

			wc->port_num		= qp->attr.port_num;
		}
	}