/* Announce (supervision frame) timer function */ static void hsr_announce(unsigned long data) { struct hsr_priv *hsr; struct hsr_port *master; hsr = (struct hsr_priv *) data; rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); if (hsr->announce_count < 3 && hsr->protVersion == 0) { send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE, hsr->protVersion); hsr->announce_count++; hsr->announce_timer.expires = jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL); } else { send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK, hsr->protVersion); hsr->announce_timer.expires = jiffies + msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); } if (is_admin_up(master->dev)) add_timer(&hsr->announce_timer); rcu_read_unlock(); }
void hsr_del_port(struct hsr_port *port) { struct hsr_priv *hsr; struct hsr_port *master; hsr = port->hsr; master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); list_del_rcu(&port->port_list); if (port != master) { if (master != NULL) { netdev_update_features(master->dev); dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); } netdev_rx_handler_unregister(port->dev); dev_set_promiscuity(port->dev, -1); } /* FIXME? * netdev_upper_dev_unlink(port->dev, port->hsr->dev); */ synchronize_rcu(); if (port != master) dev_put(port->dev); }
/* This is called when we haven't heard from the node with MAC address addr for * some time (just before the node is removed from the node table/list). */ void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]) { struct sk_buff *skb; void *msg_head; struct hsr_port *master; int res; skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb) goto fail; msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN); if (!msg_head) goto nla_put_failure; res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); if (res < 0) goto nla_put_failure; genlmsg_end(skb, msg_head); genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); return; nla_put_failure: kfree_skb(skb); fail: rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); netdev_warn(master->dev, "Could not send HSR node down\n"); rcu_read_unlock(); }
int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, enum hsr_port_type type) { struct hsr_port *port, *master; int res; if (type != HSR_PT_MASTER) { res = hsr_check_dev_ok(dev); if (res) return res; } port = hsr_port_get_hsr(hsr, type); if (port != NULL) return -EBUSY; /* This port already exists */ port = kzalloc(sizeof(*port), GFP_KERNEL); if (port == NULL) return -ENOMEM; if (type != HSR_PT_MASTER) { res = hsr_portdev_setup(dev, port); if (res) goto fail_dev_setup; } port->hsr = hsr; port->dev = dev; port->type = type; list_add_tail_rcu(&port->port_list, &hsr->ports); synchronize_rcu(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); netdev_update_features(master->dev); dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); return 0; fail_dev_setup: kfree(port); return res; }
static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct hsr_priv *hsr = netdev_priv(dev); struct hsr_port *master; master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); skb->dev = master->dev; hsr_forward_skb(skb, master); return NETDEV_TX_OK; }
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr) { struct hsr_port *master; unsigned char old_operstate; bool has_carrier; master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); /* netif_stacked_transfer_operstate() cannot be used here since * it doesn't set IF_OPER_LOWERLAYERDOWN (?) */ old_operstate = master->dev->operstate; has_carrier = hsr_check_carrier(master); hsr_set_operstate(master, has_carrier); hsr_check_announce(master->dev, old_operstate); }
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct hsr_priv *hsr; struct hsr_port *port; int res; hsr = netdev_priv(dev); res = 0; rcu_read_lock(); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); if (port) res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex); rcu_read_unlock(); if (res) goto nla_put_failure; rcu_read_lock(); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); if (port) res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex); rcu_read_unlock(); if (res) goto nla_put_failure; if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, hsr->sup_multicast_addr) || nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }
static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu) { struct hsr_priv *hsr; struct hsr_port *master; hsr = netdev_priv(dev); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); if (new_mtu > hsr_get_max_mtu(hsr)) { netdev_info(master->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n", HSR_HLEN); return -EINVAL; } dev->mtu = new_mtu; return 0; }
/* This is called if for some node with MAC address addr, we only get frames * over one of the slave interfaces. This would indicate an open network ring * (i.e. a link has failed somewhere). */ void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], struct hsr_port *port) { struct sk_buff *skb; void *msg_head; struct hsr_port *master; int res; skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb) goto fail; msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR); if (!msg_head) goto nla_put_failure; res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); if (res < 0) goto nla_put_failure; res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex); if (res < 0) goto nla_put_failure; genlmsg_end(skb, msg_head); genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); return; nla_put_failure: kfree_skb(skb); fail: rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); netdev_warn(master->dev, "Could not send HSR ring error message\n"); rcu_read_unlock(); }
/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table * about the status of a specific node in the network, defined by its MAC * address. * * Input: hsr ifindex, node mac address * Output: hsr ifindex, node mac address (copied from request), * age of latest frame from node over slave 1, slave 2 [ms] */ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) { /* For receiving */ struct nlattr *na; struct net_device *hsr_dev; /* For sending */ struct sk_buff *skb_out; void *msg_head; struct hsr_priv *hsr; struct hsr_port *port; unsigned char hsr_node_addr_b[ETH_ALEN]; int hsr_node_if1_age; u16 hsr_node_if1_seq; int hsr_node_if2_age; u16 hsr_node_if2_seq; int addr_b_ifindex; int res; if (!info) goto invalid; na = info->attrs[HSR_A_IFINDEX]; if (!na) goto invalid; na = info->attrs[HSR_A_NODE_ADDR]; if (!na) goto invalid; hsr_dev = __dev_get_by_index(genl_info_net(info), nla_get_u32(info->attrs[HSR_A_IFINDEX])); if (!hsr_dev) goto invalid; if (!is_hsr_master(hsr_dev)) goto invalid; /* Send reply */ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb_out) { res = -ENOMEM; goto fail; } msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, info->snd_seq, &hsr_genl_family, 0, HSR_C_SET_NODE_STATUS); if (!msg_head) { res = -ENOMEM; goto nla_put_failure; } res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); if (res < 0) goto nla_put_failure; hsr = netdev_priv(hsr_dev); res = hsr_get_node_data(hsr, (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]), hsr_node_addr_b, &addr_b_ifindex, &hsr_node_if1_age, &hsr_node_if1_seq, &hsr_node_if2_age, &hsr_node_if2_seq); if (res < 0) goto nla_put_failure; res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, nla_data(info->attrs[HSR_A_NODE_ADDR])); if (res < 0) goto nla_put_failure; if (addr_b_ifindex > -1) { res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN, hsr_node_addr_b); if (res < 0) goto nla_put_failure; res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex); if (res < 0) goto nla_put_failure; } res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age); if (res < 0) goto nla_put_failure; res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq); if (res < 0) goto nla_put_failure; rcu_read_lock(); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); if (port) res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, port->dev->ifindex); rcu_read_unlock(); if (res < 0) goto nla_put_failure; res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age); if (res < 0) goto nla_put_failure; res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq); if (res < 0) goto nla_put_failure; rcu_read_lock(); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); if (port) res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, port->dev->ifindex); rcu_read_unlock(); if (res < 0) goto nla_put_failure; genlmsg_end(skb_out, msg_head); genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); return 0; invalid: netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL); return 0; nla_put_failure: kfree_skb(skb_out); /* Fall through */ fail: return res; }