static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, u32 br_port_mask) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg, p_ctl; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { if (!((1 << i) & br_port_mask)) continue; /* Add this local port to the remote port VLAN control * membership and update the remote port bitmask */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg |= 1 << port; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[i].vlan_ctl_mask = reg; p_ctl |= 1 << i; } /* Configure the local port VLAN control membership to include * remote ports and update the local port bitmask */ core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; return 0; }
static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable) { u32 mgmt, vc0, vc1, vc4, vc5; mgmt = core_readl(priv, CORE_SWMODE); vc0 = core_readl(priv, CORE_VLAN_CTRL0); vc1 = core_readl(priv, CORE_VLAN_CTRL1); vc4 = core_readl(priv, CORE_VLAN_CTRL4); vc5 = core_readl(priv, CORE_VLAN_CTRL5); mgmt &= ~SW_FWDG_MODE; if (enable) { vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL; vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP; vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); vc4 |= INGR_VID_CHK_DROP; vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD; } else { vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL); vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP); vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD); vc4 |= INGR_VID_CHK_VID_VIOL_IMP; } core_writel(priv, vc0, CORE_VLAN_CTRL0); core_writel(priv, vc1, CORE_VLAN_CTRL1); core_writel(priv, 0, CORE_VLAN_CTRL3); core_writel(priv, vc4, CORE_VLAN_CTRL4); core_writel(priv, vc5, CORE_VLAN_CTRL5); core_writel(priv, mgmt, CORE_SWMODE); }
static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, u32 br_port_mask) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg, p_ctl; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { /* Don't touch the remaining ports */ if (!((1 << i) & br_port_mask)) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg &= ~(1 << port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[port].vlan_ctl_mask = reg; /* Prevent self removal to preserve isolation */ if (port != i) p_ctl &= ~(1 << i); } core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; return 0; }
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 off, reg; if (priv->wol_ports_mask & (1 << port)) return; if (port == 7) { intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF)); intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR); } if (port == 0 && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); if (dsa_is_cpu_port(ds, port)) off = CORE_IMP_CTL; else off = CORE_G_PCTL_PORT(port); reg = core_readl(priv, off); reg |= RX_DIS | TX_DIS; core_writel(priv, reg, off); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg |= P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); }
/* Fast-ageing of ARL entries for a given port, equivalent to an ARL * flush for that port. */ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int timeout = 1000; u32 reg; core_writel(priv, port, CORE_FAST_AGE_PORT); reg = core_readl(priv, CORE_FAST_AGE_CTRL); reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; core_writel(priv, reg, CORE_FAST_AGE_CTRL); do { reg = core_readl(priv, CORE_FAST_AGE_CTRL); if (!(reg & FAST_AGE_STR_DONE)) break; cpu_relax(); } while (timeout--); if (!timeout) return -ETIMEDOUT; core_writel(priv, 0, CORE_FAST_AGE_CTRL); return 0; }
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; u32 reg; /* Clear the memory power down */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); /* Enable port 7 interrupts to get notified */ if (port == 7) intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); /* Set this port, and only this one to be in the default VLAN */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); reg &= ~PORT_VLAN_CTRL_MASK; reg |= (1 << port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); bcm_sf2_imp_vlan_setup(ds, cpu_port); /* If EEE was enabled, restore it */ if (priv->port_sts[port].eee.eee_enabled) bcm_sf2_eee_enable_set(ds, port, true); return 0; }
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 off, reg; if (priv->wol_ports_mask & (1 << port)) return; if (port == priv->moca_port) bcm_sf2_port_intr_disable(priv, port); if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); if (dsa_is_cpu_port(ds, port)) off = CORE_IMP_CTL; else off = CORE_G_PCTL_PORT(port); reg = core_readl(priv, off); reg |= RX_DIS | TX_DIS; core_writel(priv, reg, off); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg |= P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); }
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg; /* Disable learning while in WoL mode */ if (priv->wol_ports_mask & (1 << port)) { reg = core_readl(priv, CORE_DIS_LEARN); reg |= BIT(port); core_writel(priv, reg, CORE_DIS_LEARN); return; } if (port == priv->moca_port) bcm_sf2_port_intr_disable(priv, port); if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); b53_disable_port(ds, port, phy); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg |= P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); }
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; u32 reg; /* Clear the memory power down */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); /* Re-enable the GPHY and re-apply workarounds */ if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { bcm_sf2_gphy_enable_set(ds, true); if (phy) { /* if phy_stop() has been called before, phy * will be in halted state, and phy_start() * will call resume. * * the resume path does not configure back * autoneg settings, and since we hard reset * the phy manually here, we need to reset the * state machine also. */ phy->state = PHY_READY; phy_init_hw(phy); } } /* Enable MoCA port interrupts to get notified */ if (port == priv->moca_port) bcm_sf2_port_intr_enable(priv, port); /* Set this port, and only this one to be in the default VLAN, * if member of a bridge, restore its membership prior to * bringing down this port. */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); reg &= ~PORT_VLAN_CTRL_MASK; reg |= (1 << port); reg |= priv->port_sts[port].vlan_ctl_mask; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); bcm_sf2_imp_vlan_setup(ds, cpu_port); /* If EEE was enabled, restore it */ if (priv->port_sts[port].eee.eee_enabled) bcm_sf2_eee_enable_set(ds, port, true); return 0; }
static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u8 hw_state, cur_hw_state; int ret = 0; u32 reg; reg = core_readl(priv, CORE_G_PCTL_PORT(port)); cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); switch (state) { case BR_STATE_DISABLED: hw_state = G_MISTP_DIS_STATE; break; case BR_STATE_LISTENING: hw_state = G_MISTP_LISTEN_STATE; break; case BR_STATE_LEARNING: hw_state = G_MISTP_LEARN_STATE; break; case BR_STATE_FORWARDING: hw_state = G_MISTP_FWD_STATE; break; case BR_STATE_BLOCKING: hw_state = G_MISTP_BLOCK_STATE; break; default: pr_err("%s: invalid STP state: %d\n", __func__, state); return -EINVAL; } /* Fast-age ARL entries if we are moving a port from Learning or * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening * state (hw_state) */ if (cur_hw_state != hw_state) { if (cur_hw_state >= G_MISTP_LEARN_STATE && hw_state <= G_MISTP_LISTEN_STATE) { ret = bcm_sf2_sw_fast_age_port(ds, port); if (ret) { pr_err("%s: fast-ageing failed\n", __func__); return ret; } } } reg = core_readl(priv, CORE_G_PCTL_PORT(port)); reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); reg |= hw_state; core_writel(priv, reg, CORE_G_PCTL_PORT(port)); return 0; }
static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 duplex, pause; u32 reg; duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); status->link = 0; /* MoCA port is special as we do not get link status from CORE_LNKSTS, * which means that we need to force the link at the port override * level to get the data to flow. We do use what the interrupt handler * did determine before. * * For the other ports, we just force the link status, since this is * a fixed PHY device. */ if (port == priv->moca_port) { status->link = priv->port_sts[port].link; /* For MoCA interfaces, also force a link down notification * since some version of the user-space daemon (mocad) use * cmd->autoneg to force the link, which messes up the PHY * state machine and make it go in PHY_FORCING state instead. */ if (!status->link) netif_carrier_off(ds->ports[port].netdev); status->duplex = 1; } else { status->link = 1; status->duplex = !!(duplex & (1 << port)); } reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port)); reg |= SW_OVERRIDE; if (status->link) reg |= LINK_STS; else reg &= ~LINK_STS; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { status->asym_pause = 1; status->pause = 1; } if (pause & (1 << port)) status->pause = 1; }
static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { struct bcm_sf2_priv *priv = ds_to_priv(ds); const struct bcm_sf2_hw_stats *s; unsigned int i; u64 val = 0; u32 offset; mutex_lock(&priv->stats_mutex); /* Now fetch the per-port counters */ for (i = 0; i < BCM_SF2_STATS_SIZE; i++) { s = &bcm_sf2_mib[i]; /* Do a latched 64-bit read if needed */ offset = s->reg + CORE_P_MIB_OFFSET(port); if (s->sizeof_stat == 8) val = core_readq(priv, offset); else val = core_readl(priv, offset); data[i] = (u64)val; } mutex_unlock(&priv->stats_mutex); }
static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, int regnum, u16 val) { int ret = 0; u32 reg; reg = reg_readl(priv, REG_SWITCH_CNTRL); reg |= MDIO_MASTER_SEL; reg_writel(priv, reg, REG_SWITCH_CNTRL); /* Page << 8 | offset */ reg = 0x70; reg <<= 2; core_writel(priv, addr, reg); /* Page << 8 | offset */ reg = 0x80 << 8 | regnum << 1; reg <<= 2; if (op) ret = core_readl(priv, reg); else core_writel(priv, val, reg); reg = reg_readl(priv, REG_SWITCH_CNTRL); reg &= ~MDIO_MASTER_SEL; reg_writel(priv, reg, REG_SWITCH_CNTRL); return ret & 0xffff; }
static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, int (*cb)(struct switchdev_obj *obj)) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct bcm_sf2_port_status *p = &priv->port_sts[port]; struct bcm_sf2_vlan *vl; u16 vid, pvid; int err = 0; pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); for (vid = 0; vid < VLAN_N_VID; vid++) { vl = &priv->vlans[vid]; if (!(vl->members & BIT(port))) continue; vlan->vid_begin = vlan->vid_end = vid; vlan->flags = 0; if (vl->untag & BIT(port)) vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (p->pvid == vid) vlan->flags |= BRIDGE_VLAN_INFO_PVID; err = cb(&vlan->obj); if (err) break; } return err; }
static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac, u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx, bool is_valid) { unsigned int i; int ret; ret = bcm_sf2_arl_op_wait(priv); if (ret) return ret; /* Read the 4 bins */ for (i = 0; i < 4; i++) { u64 mac_vid; u32 fwd_entry; mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i)); fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i)); bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry); if (ent->is_valid && is_valid) { *idx = i; return 0; } /* This is the MAC we just deleted */ if (!is_valid && (mac_vid & mac)) return 0; } return -ENOENT; }
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int i; u32 reg, offset; if (priv->type == BCM7445_DEVICE_ID) offset = CORE_STS_OVERRIDE_IMP; else offset = CORE_STS_OVERRIDE_IMP2; /* Enable the port memories */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ reg = core_readl(priv, CORE_IMP_CTL); reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); reg &= ~(RX_DIS | TX_DIS); core_writel(priv, reg, CORE_IMP_CTL); /* Enable forwarding */ core_writel(priv, SW_FWDG_EN, CORE_SWMODE); /* Enable IMP port in dumb mode */ reg = core_readl(priv, CORE_SWITCH_CTRL); reg |= MII_DUMB_FWDG_EN; core_writel(priv, reg, CORE_SWITCH_CTRL); /* Configure Traffic Class to QoS mapping, allow each priority to map * to a different queue number */ reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) reg |= i << (PRT_TO_QID_SHIFT * i); core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); b53_brcm_hdr_setup(ds, port); /* Force link status for IMP port */ reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); core_writel(priv, reg, offset); }
static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) { struct bcm_sf2_priv *priv = dev->priv; *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); return 0; }
static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx, struct bcm_sf2_arl_entry *ent) { u64 mac_vid; u32 fwd_entry; mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx)); fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx)); bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry); }
static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct bcm_sf2_priv *priv = ds_to_priv(ds); s8 cpu_port = ds->dst->cpu_port; unsigned int i; u32 reg, p_ctl; /* Make this port leave the all VLANs join since we will have proper * VLAN entries from now on */ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); reg &= ~BIT(port); if ((reg & BIT(cpu_port)) == BIT(cpu_port)) reg &= ~BIT(cpu_port); core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); priv->port_sts[port].bridge_dev = bridge; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { if (priv->port_sts[i].bridge_dev != bridge) continue; /* Add this local port to the remote port VLAN control * membership and update the remote port bitmask */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg |= 1 << port; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[i].vlan_ctl_mask = reg; p_ctl |= 1 << i; } /* Configure the local port VLAN control membership to include * remote ports and update the local port bitmask */ core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; return 0; }
static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 link, duplex, pause; u32 reg; link = core_readl(priv, CORE_LNKSTS); duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); status->link = 0; /* Port 7 is special as we do not get link status from CORE_LNKSTS, * which means that we need to force the link at the port override * level to get the data to flow. We do use what the interrupt handler * did determine before. */ if (port == 7) { status->link = priv->port_sts[port].link; reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7)); reg |= SW_OVERRIDE; if (status->link) reg |= LINK_STS; else reg &= ~LINK_STS; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7)); status->duplex = 1; } else { status->link = !!(link & (1 << port)); status->duplex = !!(duplex & (1 << port)); } if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { status->asym_pause = 1; status->pause = 1; } if (pause & (1 << port)) status->pause = 1; }
static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 reg; reg = core_readl(priv, CORE_EEE_EN_CTRL); if (enable) reg |= 1 << port; else reg &= ~(1 << port); core_writel(priv, reg, CORE_EEE_EN_CTRL); }
static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; u32 reg; reg = core_readl(priv, CORE_EEE_LPI_INDICATE); e->eee_enabled = p->eee_enabled; e->eee_active = !!(reg & (1 << port)); return 0; }
static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) { unsigned int timeout = 1000; u32 reg; reg = core_readl(priv, CORE_WATCHDOG_CTRL); reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; core_writel(priv, reg, CORE_WATCHDOG_CTRL); do { reg = core_readl(priv, CORE_WATCHDOG_CTRL); if (!(reg & SOFTWARE_RESET)) break; usleep_range(1000, 2000); } while (timeout-- > 0); if (timeout == 0) return -ETIMEDOUT; return 0; }
static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv) { unsigned int timeout = 1000; u32 reg; reg = core_readl(priv, CORE_FAST_AGE_CTRL); reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; core_writel(priv, reg, CORE_FAST_AGE_CTRL); do { reg = core_readl(priv, CORE_FAST_AGE_CTRL); if (!(reg & FAST_AGE_STR_DONE)) break; cpu_relax(); } while (timeout--); if (!timeout) return -ETIMEDOUT; core_writel(priv, 0, CORE_FAST_AGE_CTRL); return 0; }
/* Address Resolution Logic routines */ static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv) { unsigned int timeout = 10; u32 reg; do { reg = core_readl(priv, CORE_ARLA_RWCTL); if (!(reg & ARL_STRTDN)) return 0; usleep_range(1000, 2000); } while (timeout--); return -ETIMEDOUT; }
static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct net_device *bridge = priv->port_sts[port].bridge_dev; s8 cpu_port = ds->dst->cpu_port; unsigned int i; u32 reg, p_ctl; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { /* Don't touch the remaining ports */ if (priv->port_sts[i].bridge_dev != bridge) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg &= ~(1 << port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[port].vlan_ctl_mask = reg; /* Prevent self removal to preserve isolation */ if (port != i) p_ctl &= ~(1 << i); } core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; priv->port_sts[port].bridge_dev = NULL; /* Make this port join all VLANs without VLAN entries */ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); reg |= BIT(port); if (!(reg & BIT(cpu_port))) reg |= BIT(cpu_port); core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); }
static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, struct bcm_sf2_vlan *vlan) { u32 entry; int ret; core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ); if (ret) return ret; entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY); vlan->members = entry & FWD_MAP_MASK; vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK; return 0; }
static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv) { unsigned timeout = 1000; u32 reg; do { reg = core_readl(priv, CORE_ARLA_SRCH_CTL); if (!(reg & ARLA_SRCH_STDN)) return 0; if (reg & ARLA_SRCH_VLID) return 0; usleep_range(1000, 2000); } while (timeout--); return -ETIMEDOUT; }
static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op) { u32 cmd; if (op > ARL_RW) return -EINVAL; cmd = core_readl(priv, CORE_ARLA_RWCTL); cmd &= ~IVL_SVL_SELECT; cmd |= ARL_STRTDN; if (op) cmd |= ARL_RW; else cmd &= ~ARL_RW; core_writel(priv, cmd, CORE_ARLA_RWCTL); return bcm_sf2_arl_op_wait(priv); }
static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg; /* Enable the IMP Port to be in the same VLAN as the other ports * on a per-port basis such that we only have Port i and IMP in * the same VLAN. */ for (i = 0; i < priv->hw_params.num_ports; i++) { if (!((1 << i) & ds->enabled_port_mask)) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg |= (1 << cpu_port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); } }