static int read_xenbus_vif_flags(struct backend_info *be) { struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; unsigned int rx_copy; int err; err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", dev->otherend); return err; } if (!rx_copy) return -EOPNOTSUPP; if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) { /* - Reduce drain timeout to poll more frequently for * Rx requests. * - Disable Rx stall detection. */ be->vif->drain_timeout = msecs_to_jiffies(30); be->vif->stall_timeout = 0; } vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0); vif->gso_mask = 0; if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0)) vif->gso_mask |= GSO_BIT(TCPV4); if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0)) vif->gso_mask |= GSO_BIT(TCPV6); vif->ip_csum = !xenbus_read_unsigned(dev->otherend, "feature-no-csum-offload", 0); vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend, "feature-ipv6-csum-offload", 0); return 0; }
static netdev_features_t xenvif_fix_features(struct net_device *dev, netdev_features_t features) { struct xenvif *vif = netdev_priv(dev); if (!vif->can_sg) features &= ~NETIF_F_SG; if (~(vif->gso_mask) & GSO_BIT(TCPV4)) features &= ~NETIF_F_TSO; if (~(vif->gso_mask) & GSO_BIT(TCPV6)) features &= ~NETIF_F_TSO6; if (!vif->ip_csum) features &= ~NETIF_F_IP_CSUM; if (!vif->ipv6_csum) features &= ~NETIF_F_IPV6_CSUM; return features; }
static int read_xenbus_vif_flags(struct backend_info *be) { struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; unsigned int rx_copy; int err, val; err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", dev->otherend); return err; } if (!rx_copy) return -EOPNOTSUPP; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d", &val) < 0 || val == 0) { xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory"); return -EINVAL; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0) val = 0; vif->can_sg = !!val; vif->gso_mask = 0; vif->gso_prefix_mask = 0; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (val) vif->gso_mask |= GSO_BIT(TCPV4); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", "%d", &val) < 0) val = 0; if (val) vif->gso_prefix_mask |= GSO_BIT(TCPV4); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", "%d", &val) < 0) val = 0; if (val) vif->gso_mask |= GSO_BIT(TCPV6); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", "%d", &val) < 0) val = 0; if (val) vif->gso_prefix_mask |= GSO_BIT(TCPV6); if (vif->gso_mask & vif->gso_prefix_mask) { xenbus_dev_fatal(dev, err, "%s: gso and gso prefix flags are not " "mutually exclusive", dev->otherend); return -EOPNOTSUPP; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", "%d", &val) < 0) val = 0; vif->ip_csum = !val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", "%d", &val) < 0) val = 0; vif->ipv6_csum = !!val; return 0; }
static int connect_rings(struct backend_info *be) { struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; unsigned long tx_ring_ref, rx_ring_ref; unsigned int tx_evtchn, rx_evtchn, rx_copy; int err; int val; err = xenbus_gather(XBT_NIL, dev->otherend, "tx-ring-ref", "%lu", &tx_ring_ref, "rx-ring-ref", "%lu", &rx_ring_ref, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dev->otherend); return err; } /* Try split event channels first, then single event channel. */ err = xenbus_gather(XBT_NIL, dev->otherend, "event-channel-tx", "%u", &tx_evtchn, "event-channel-rx", "%u", &rx_evtchn, NULL); if (err < 0) { err = xenbus_scanf(XBT_NIL, dev->otherend, "event-channel", "%u", &tx_evtchn); if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/event-channel(-tx/rx)", dev->otherend); return err; } rx_evtchn = tx_evtchn; } err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", dev->otherend); return err; } if (!rx_copy) return -EOPNOTSUPP; if (vif->dev->tx_queue_len != 0) { if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d", &val) < 0) val = 0; if (val) vif->can_queue = 1; else /* Must be non-zero for pfifo_fast to work. */ vif->dev->tx_queue_len = 1; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0) val = 0; vif->can_sg = !!val; vif->gso_mask = 0; vif->gso_prefix_mask = 0; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (val) vif->gso_mask |= GSO_BIT(TCPV4); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", "%d", &val) < 0) val = 0; if (val) vif->gso_prefix_mask |= GSO_BIT(TCPV4); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", "%d", &val) < 0) val = 0; if (val) vif->gso_mask |= GSO_BIT(TCPV6); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", "%d", &val) < 0) val = 0; if (val) vif->gso_prefix_mask |= GSO_BIT(TCPV6); if (vif->gso_mask & vif->gso_prefix_mask) { xenbus_dev_fatal(dev, err, "%s: gso and gso prefix flags are not " "mutually exclusive", dev->otherend); return -EOPNOTSUPP; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", "%d", &val) < 0) val = 0; vif->ip_csum = !val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", "%d", &val) < 0) val = 0; vif->ipv6_csum = !!val; /* Map the shared frame, irq etc. */ err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, tx_evtchn, rx_evtchn); if (err) { xenbus_dev_fatal(dev, err, "mapping shared-frames %lu/%lu port tx %u rx %u", tx_ring_ref, rx_ring_ref, tx_evtchn, rx_evtchn); return err; } return 0; }
static int read_xenbus_vif_flags(struct backend_info *be) { struct xenvif *vif = be->vif; struct xenbus_device *dev = be->dev; unsigned int rx_copy; int err, val; err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", &rx_copy); if (err == -ENOENT) { err = 0; rx_copy = 0; } if (err < 0) { xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", dev->otherend); return err; } if (!rx_copy) return -EOPNOTSUPP; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-notify", "%d", &val) < 0) val = 0; if (!val) { /* - Reduce drain timeout to poll more frequently for * Rx requests. * - Disable Rx stall detection. */ be->vif->drain_timeout = msecs_to_jiffies(30); be->vif->stall_timeout = 0; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0) val = 0; vif->can_sg = !!val; vif->gso_mask = 0; vif->gso_prefix_mask = 0; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (val) vif->gso_mask |= GSO_BIT(TCPV4); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", "%d", &val) < 0) val = 0; if (val) vif->gso_prefix_mask |= GSO_BIT(TCPV4); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", "%d", &val) < 0) val = 0; if (val) vif->gso_mask |= GSO_BIT(TCPV6); if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", "%d", &val) < 0) val = 0; if (val) vif->gso_prefix_mask |= GSO_BIT(TCPV6); if (vif->gso_mask & vif->gso_prefix_mask) { xenbus_dev_fatal(dev, err, "%s: gso and gso prefix flags are not " "mutually exclusive", dev->otherend); return -EOPNOTSUPP; } if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", "%d", &val) < 0) val = 0; vif->ip_csum = !val; if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", "%d", &val) < 0) val = 0; vif->ipv6_csum = !!val; if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control", "%d", &val) < 0) val = 0; vif->multicast_control = !!val; return 0; }