/* * Add entries in sysfs onto the existing network class device * for the bridge. * Adds a attribute group "bridge" containing tuning parameters. * Sub directory to hold links to interfaces. * * Note: the ifobj exists only to be a subdirectory * to hold links. The ifobj exists in the same data structure * as its parent the bridge so reference counting works. */ int ovs_dp_sysfs_add_dp(struct datapath *dp) { struct vport *vport = ovs_vport_rtnl(dp, OVSP_LOCAL); struct kobject *kobj = vport->ops->get_kobj(vport); int err; #ifdef CONFIG_NET_NS /* Due to bug in 2.6.32 kernel, sysfs_create_group() could panic * in other namespace than init_net. Following check is to avoid it. */ if (!kobj->sd) return -ENOENT; #endif /* Create /sys/class/net/<devname>/bridge directory. */ err = sysfs_create_group(kobj, &bridge_group); if (err) { pr_info("%s: can't create group %s/%s\n", __func__, ovs_dp_name(dp), bridge_group.name); goto out1; } /* Create /sys/class/net/<devname>/brif directory. */ err = kobject_add(&dp->ifobj, kobj, SYSFS_BRIDGE_PORT_SUBDIR); if (err) { pr_info("%s: can't add kobject (directory) %s/%s\n", __func__, ovs_dp_name(dp), kobject_name(&dp->ifobj)); goto out2; } kobject_uevent(&dp->ifobj, KOBJ_ADD); return 0; out2: sysfs_remove_group(kobj, &bridge_group); out1: return err; }
static int netdev_send(struct vport *vport, struct sk_buff *skb) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); int mtu = netdev_vport->dev->mtu; int len; if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", ovs_dp_name(vport->dp), packet_length(skb), mtu); goto error; } if (unlikely(skb_warn_if_lro(skb))) goto error; skb->dev = netdev_vport->dev; len = skb->len; dev_queue_xmit(skb); return len; error: kfree_skb(skb); ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); return 0; }
static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions) { if (net_ratelimit()) pr_warn("%s: flow looped %d times, dropping\n", ovs_dp_name(dp), MAX_LOOPS); actions->actions_len = 0; return -ELOOP; }
static ssize_t store_group_addr(DEVICE_PARAMS, const char *buf, size_t len) { struct datapath *dp; ssize_t result = len; rcu_read_lock(); dp = sysfs_get_dp(to_net_dev(d)); if (dp) pr_info("%s: xxx attempt to store_group_addr()\n", ovs_dp_name(dp)); else result = -ENODEV; rcu_read_unlock(); return result; }
/* * Common code for storing bridge parameters. */ static ssize_t store_bridge_parm(DEVICE_PARAMS, const char *buf, size_t len, void (*set)(struct datapath *, unsigned long)) { char *endp; unsigned long val; ssize_t result = len; if (!capable(CAP_NET_ADMIN)) return -EPERM; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; /* xxx We use a default value of 0 for all fields. If the caller is * xxx attempting to set the value to our default, just silently * xxx ignore the request. */ if (val != 0) { struct datapath *dp; rcu_read_lock(); dp = sysfs_get_dp(to_net_dev(d)); if (dp) pr_warning("%s: xxx writing dp parms not supported yet!\n", ovs_dp_name(dp)); else result = -ENODEV; rcu_read_unlock(); } return result; }
static int netdev_send(struct vport *vport, struct sk_buff *skb) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); int mtu = netdev_vport->dev->mtu; int len; if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { if (net_ratelimit()) pr_warn("%s: dropped over-mtu packet: %d > %d\n", ovs_dp_name(vport->dp), packet_length(skb), mtu); goto error; } if (unlikely(skb_warn_if_lro(skb))) goto error; skb->dev = netdev_vport->dev; forward_ip_summed(skb, true); if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { int features; features = netif_skb_features(skb); if (!vlan_tso) features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO | NETIF_F_FSO); if (netif_needs_gso(skb, features)) { struct sk_buff *nskb; nskb = skb_gso_segment(skb, features); if (!nskb) { if (unlikely(skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { kfree_skb(skb); return 0; } skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; goto tag; } if (IS_ERR(nskb)) { kfree_skb(skb); return 0; } consume_skb(skb); skb = nskb; len = 0; do { nskb = skb->next; skb->next = NULL; skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (likely(skb)) { len += skb->len; vlan_set_tci(skb, 0); dev_queue_xmit(skb); } skb = nskb; } while (skb); return len; } tag: skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) return 0; vlan_set_tci(skb, 0); } len = skb->len; dev_queue_xmit(skb); return len; error: kfree_skb(skb); ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); return 0; }
static void set_priority(struct datapath *dp, unsigned long val) { pr_info("%s: xxx attempt to set_priority()\n", ovs_dp_name(dp)); }
static void set_ageing_time(struct datapath *dp, unsigned long val) { pr_info("%s: xxx attempt to set_ageing_time()\n", ovs_dp_name(dp)); }