static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, int regnum, u16 val) { int ret = 0; u32 reg; reg = reg_readl(priv, REG_SWITCH_CNTRL); reg |= MDIO_MASTER_SEL; reg_writel(priv, reg, REG_SWITCH_CNTRL); /* Page << 8 | offset */ reg = 0x70; reg <<= 2; core_writel(priv, addr, reg); /* Page << 8 | offset */ reg = 0x80 << 8 | regnum << 1; reg <<= 2; if (op) ret = core_readl(priv, reg); else core_writel(priv, val, reg); reg = reg_readl(priv, REG_SWITCH_CNTRL); reg &= ~MDIO_MASTER_SEL; reg_writel(priv, reg, REG_SWITCH_CNTRL); return ret & 0xffff; }
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg; /* Disable learning while in WoL mode */ if (priv->wol_ports_mask & (1 << port)) { reg = core_readl(priv, CORE_DIS_LEARN); reg |= BIT(port); core_writel(priv, reg, CORE_DIS_LEARN); return; } if (port == priv->moca_port) bcm_sf2_port_intr_disable(priv, port); if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); b53_disable_port(ds, port, phy); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg |= P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); }
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 off, reg; if (priv->wol_ports_mask & (1 << port)) return; if (port == priv->moca_port) bcm_sf2_port_intr_disable(priv, port); if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); if (dsa_is_cpu_port(ds, port)) off = CORE_IMP_CTL; else off = CORE_G_PCTL_PORT(port); reg = core_readl(priv, off); reg |= RX_DIS | TX_DIS; core_writel(priv, reg, off); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg |= P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); }
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; u32 reg; /* Clear the memory power down */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); /* Enable port 7 interrupts to get notified */ if (port == 7) intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); /* Set this port, and only this one to be in the default VLAN */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); reg &= ~PORT_VLAN_CTRL_MASK; reg |= (1 << port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); bcm_sf2_imp_vlan_setup(ds, cpu_port); /* If EEE was enabled, restore it */ if (priv->port_sts[port].eee.eee_enabled) bcm_sf2_eee_enable_set(ds, port, true); return 0; }
static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, u32 br_port_mask) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg, p_ctl; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { if (!((1 << i) & br_port_mask)) continue; /* Add this local port to the remote port VLAN control * membership and update the remote port bitmask */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg |= 1 << port; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[i].vlan_ctl_mask = reg; p_ctl |= 1 << i; } /* Configure the local port VLAN control membership to include * remote ports and update the local port bitmask */ core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; return 0; }
static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { struct bcm_sf2_priv *priv = ds_to_priv(ds); bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; s8 cpu_port = ds->dst->cpu_port; struct bcm_sf2_vlan *vl; u16 vid; for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { vl = &priv->vlans[vid]; bcm_sf2_get_vlan_entry(priv, vid, vl); vl->members |= BIT(port) | BIT(cpu_port); if (untagged) vl->untag |= BIT(port) | BIT(cpu_port); else vl->untag &= ~(BIT(port) | BIT(cpu_port)); bcm_sf2_set_vlan_entry(priv, vid, vl); bcm_sf2_sw_fast_age_vlan(priv, vid); } if (pvid) { core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port)); core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(cpu_port)); bcm_sf2_sw_fast_age_vlan(priv, vid); } }
static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, u32 br_port_mask) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg, p_ctl; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { /* Don't touch the remaining ports */ if (!((1 << i) & br_port_mask)) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg &= ~(1 << port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[port].vlan_ctl_mask = reg; /* Prevent self removal to preserve isolation */ if (port != i) p_ctl &= ~(1 << i); } core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; return 0; }
/* Fast-ageing of ARL entries for a given port, equivalent to an ARL * flush for that port. */ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int timeout = 1000; u32 reg; core_writel(priv, port, CORE_FAST_AGE_PORT); reg = core_readl(priv, CORE_FAST_AGE_CTRL); reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; core_writel(priv, reg, CORE_FAST_AGE_CTRL); do { reg = core_readl(priv, CORE_FAST_AGE_CTRL); if (!(reg & FAST_AGE_STR_DONE)) break; cpu_relax(); } while (timeout--); if (!timeout) return -ETIMEDOUT; core_writel(priv, 0, CORE_FAST_AGE_CTRL); return 0; }
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 off, reg; if (priv->wol_ports_mask & (1 << port)) return; if (port == 7) { intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF)); intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR); } if (port == 0 && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); if (dsa_is_cpu_port(ds, port)) off = CORE_IMP_CTL; else off = CORE_G_PCTL_PORT(port); reg = core_readl(priv, off); reg |= RX_DIS | TX_DIS; core_writel(priv, reg, off); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg |= P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); }
static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = ds_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; u32 reg; /* Clear the memory power down */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); /* Clear the Rx and Tx disable bits and set to no spanning tree */ core_writel(priv, 0, CORE_G_PCTL_PORT(port)); /* Re-enable the GPHY and re-apply workarounds */ if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { bcm_sf2_gphy_enable_set(ds, true); if (phy) { /* if phy_stop() has been called before, phy * will be in halted state, and phy_start() * will call resume. * * the resume path does not configure back * autoneg settings, and since we hard reset * the phy manually here, we need to reset the * state machine also. */ phy->state = PHY_READY; phy_init_hw(phy); } } /* Enable MoCA port interrupts to get notified */ if (port == priv->moca_port) bcm_sf2_port_intr_enable(priv, port); /* Set this port, and only this one to be in the default VLAN, * if member of a bridge, restore its membership prior to * bringing down this port. */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); reg &= ~PORT_VLAN_CTRL_MASK; reg |= (1 << port); reg |= priv->port_sts[port].vlan_ctl_mask; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); bcm_sf2_imp_vlan_setup(ds, cpu_port); /* If EEE was enabled, restore it */ if (priv->port_sts[port].eee.eee_enabled) bcm_sf2_eee_enable_set(ds, port, true); return 0; }
static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, struct bcm_sf2_vlan *vlan) { int ret; core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members, CORE_ARLA_VTBL_ENTRY); ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE); if (ret) pr_err("failed to write VLAN entry\n"); }
static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port, const unsigned char *addr, u16 vid, bool is_valid) { struct bcm_sf2_arl_entry ent; u32 fwd_entry; u64 mac, mac_vid = 0; u8 idx = 0; int ret; /* Convert the array into a 64-bit MAC */ mac = bcm_sf2_mac_to_u64(addr); /* Perform a read for the given MAC and VID */ core_writeq(priv, mac, CORE_ARLA_MAC); core_writel(priv, vid, CORE_ARLA_VID); /* Issue a read operation for this MAC */ ret = bcm_sf2_arl_rw_op(priv, 1); if (ret) return ret; ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid); /* If this is a read, just finish now */ if (op) return ret; /* We could not find a matching MAC, so reset to a new entry */ if (ret) { fwd_entry = 0; idx = 0; } memset(&ent, 0, sizeof(ent)); ent.port = port; ent.is_valid = is_valid; ent.vid = vid; ent.is_static = true; memcpy(ent.mac, addr, ETH_ALEN); bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent); core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx)); core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx)); ret = bcm_sf2_arl_rw_op(priv, 0); if (ret) return ret; /* Re-read the entry to check */ return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid); }
/* Fast-ageing of ARL entries for a given port, equivalent to an ARL * flush for that port. */ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); core_writel(priv, port, CORE_FAST_AGE_PORT); return bcm_sf2_fast_age_op(priv); }
static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, u32 value) { struct bcm_sf2_priv *priv = dev->priv; core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); return 0; }
static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct bcm_sf2_priv *priv = ds_to_priv(ds); s8 cpu_port = ds->dst->cpu_port; unsigned int i; u32 reg, p_ctl; /* Make this port leave the all VLANs join since we will have proper * VLAN entries from now on */ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); reg &= ~BIT(port); if ((reg & BIT(cpu_port)) == BIT(cpu_port)) reg &= ~BIT(cpu_port); core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); priv->port_sts[port].bridge_dev = bridge; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { if (priv->port_sts[i].bridge_dev != bridge) continue; /* Add this local port to the remote port VLAN control * membership and update the remote port bitmask */ reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg |= 1 << port; core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[i].vlan_ctl_mask = reg; p_ctl |= 1 << i; } /* Configure the local port VLAN control membership to include * remote ports and update the local port bitmask */ core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; return 0; }
static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u8 hw_state, cur_hw_state; int ret = 0; u32 reg; reg = core_readl(priv, CORE_G_PCTL_PORT(port)); cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); switch (state) { case BR_STATE_DISABLED: hw_state = G_MISTP_DIS_STATE; break; case BR_STATE_LISTENING: hw_state = G_MISTP_LISTEN_STATE; break; case BR_STATE_LEARNING: hw_state = G_MISTP_LEARN_STATE; break; case BR_STATE_FORWARDING: hw_state = G_MISTP_FWD_STATE; break; case BR_STATE_BLOCKING: hw_state = G_MISTP_BLOCK_STATE; break; default: pr_err("%s: invalid STP state: %d\n", __func__, state); return -EINVAL; } /* Fast-age ARL entries if we are moving a port from Learning or * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening * state (hw_state) */ if (cur_hw_state != hw_state) { if (cur_hw_state >= G_MISTP_LEARN_STATE && hw_state <= G_MISTP_LISTEN_STATE) { ret = bcm_sf2_sw_fast_age_port(ds, port); if (ret) { pr_err("%s: fast-ageing failed\n", __func__); return ret; } } } reg = core_readl(priv, CORE_G_PCTL_PORT(port)); reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); reg |= hw_state; core_writel(priv, reg, CORE_G_PCTL_PORT(port)); return 0; }
static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 reg; reg = core_readl(priv, CORE_EEE_EN_CTRL); if (enable) reg |= 1 << port; else reg &= ~(1 << port); core_writel(priv, reg, CORE_EEE_EN_CTRL); }
static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable) { u32 mgmt, vc0, vc1, vc4, vc5; mgmt = core_readl(priv, CORE_SWMODE); vc0 = core_readl(priv, CORE_VLAN_CTRL0); vc1 = core_readl(priv, CORE_VLAN_CTRL1); vc4 = core_readl(priv, CORE_VLAN_CTRL4); vc5 = core_readl(priv, CORE_VLAN_CTRL5); mgmt &= ~SW_FWDG_MODE; if (enable) { vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL; vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP; vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); vc4 |= INGR_VID_CHK_DROP; vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD; } else { vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL); vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP); vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD); vc4 |= INGR_VID_CHK_VID_VIOL_IMP; } core_writel(priv, vc0, CORE_VLAN_CTRL0); core_writel(priv, vc1, CORE_VLAN_CTRL1); core_writel(priv, 0, CORE_VLAN_CTRL3); core_writel(priv, vc4, CORE_VLAN_CTRL4); core_writel(priv, vc5, CORE_VLAN_CTRL5); core_writel(priv, mgmt, CORE_SWMODE); }
static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { struct bcm_sf2_priv *priv = ds_to_priv(ds); bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; s8 cpu_port = ds->dst->cpu_port; struct bcm_sf2_vlan *vl; u16 vid, pvid; int ret; pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { vl = &priv->vlans[vid]; ret = bcm_sf2_get_vlan_entry(priv, vid, vl); if (ret) return ret; vl->members &= ~BIT(port); if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) vl->members = 0; if (pvid == vid) pvid = 0; if (untagged) { vl->untag &= ~BIT(port); if ((vl->untag & BIT(port)) == BIT(cpu_port)) vl->untag = 0; } bcm_sf2_set_vlan_entry(priv, vid, vl); bcm_sf2_sw_fast_age_vlan(priv, vid); } core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port)); core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port)); bcm_sf2_sw_fast_age_vlan(priv, vid); return 0; }
static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 duplex, pause; u32 reg; duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); status->link = 0; /* MoCA port is special as we do not get link status from CORE_LNKSTS, * which means that we need to force the link at the port override * level to get the data to flow. We do use what the interrupt handler * did determine before. * * For the other ports, we just force the link status, since this is * a fixed PHY device. */ if (port == priv->moca_port) { status->link = priv->port_sts[port].link; /* For MoCA interfaces, also force a link down notification * since some version of the user-space daemon (mocad) use * cmd->autoneg to force the link, which messes up the PHY * state machine and make it go in PHY_FORCING state instead. */ if (!status->link) netif_carrier_off(ds->ports[port].netdev); status->duplex = 1; } else { status->link = 1; status->duplex = !!(duplex & (1 << port)); } reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port)); reg |= SW_OVERRIDE; if (status->link) reg |= LINK_STS; else reg &= ~LINK_STS; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { status->asym_pause = 1; status->pause = 1; } if (pause & (1 << port)) status->pause = 1; }
static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct net_device *bridge = priv->port_sts[port].bridge_dev; s8 cpu_port = ds->dst->cpu_port; unsigned int i; u32 reg, p_ctl; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { /* Don't touch the remaining ports */ if (priv->port_sts[i].bridge_dev != bridge) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg &= ~(1 << port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); priv->port_sts[port].vlan_ctl_mask = reg; /* Prevent self removal to preserve isolation */ if (port != i) p_ctl &= ~(1 << i); } core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; priv->port_sts[port].bridge_dev = NULL; /* Make this port join all VLANs without VLAN entries */ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); reg |= BIT(port); if (!(reg & BIT(cpu_port))) reg |= BIT(cpu_port); core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); }
static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int port; /* Clear all VLANs */ bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR); for (port = 0; port < priv->hw_params.num_ports; port++) { if (!((1 << port) & ds->enabled_port_mask)) continue; core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port)); } }
static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv) { unsigned int timeout = 1000; u32 reg; reg = core_readl(priv, CORE_FAST_AGE_CTRL); reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; core_writel(priv, reg, CORE_FAST_AGE_CTRL); do { reg = core_readl(priv, CORE_FAST_AGE_CTRL); if (!(reg & FAST_AGE_STR_DONE)) break; cpu_relax(); } while (timeout--); if (!timeout) return -ETIMEDOUT; core_writel(priv, 0, CORE_FAST_AGE_CTRL); return 0; }
static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op) { u32 cmd; if (op > ARL_RW) return -EINVAL; cmd = core_readl(priv, CORE_ARLA_RWCTL); cmd &= ~IVL_SVL_SELECT; cmd |= ARL_STRTDN; if (op) cmd |= ARL_RW; else cmd &= ~ARL_RW; core_writel(priv, cmd, CORE_ARLA_RWCTL); return bcm_sf2_arl_op_wait(priv); }
static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, struct bcm_sf2_vlan *vlan) { u32 entry; int ret; core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ); if (ret) return ret; entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY); vlan->members = entry & FWD_MAP_MASK; vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK; return 0; }
static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg; /* Enable the IMP Port to be in the same VLAN as the other ports * on a per-port basis such that we only have Port i and IMP in * the same VLAN. */ for (i = 0; i < priv->hw_params.num_ports; i++) { if (!((1 << i) & ds->enabled_port_mask)) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); reg |= (1 << cpu_port); core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); } }
static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = ds_to_priv(ds); u32 link, duplex, pause; u32 reg; link = core_readl(priv, CORE_LNKSTS); duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); status->link = 0; /* Port 7 is special as we do not get link status from CORE_LNKSTS, * which means that we need to force the link at the port override * level to get the data to flow. We do use what the interrupt handler * did determine before. */ if (port == 7) { status->link = priv->port_sts[port].link; reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7)); reg |= SW_OVERRIDE; if (status->link) reg |= LINK_STS; else reg &= ~LINK_STS; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7)); status->duplex = 1; } else { status->link = !!(link & (1 << port)); status->duplex = !!(duplex & (1 << port)); } if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { status->asym_pause = 1; status->pause = 1; } if (pause & (1 << port)) status->pause = 1; }
static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) { unsigned int timeout = 1000; u32 reg; reg = core_readl(priv, CORE_WATCHDOG_CTRL); reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; core_writel(priv, reg, CORE_WATCHDOG_CTRL); do { reg = core_readl(priv, CORE_WATCHDOG_CTRL); if (!(reg & SOFTWARE_RESET)) break; usleep_range(1000, 2000); } while (timeout-- > 0); if (timeout == 0) return -ETIMEDOUT; return 0; }
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int i; u32 reg, offset; if (priv->type == BCM7445_DEVICE_ID) offset = CORE_STS_OVERRIDE_IMP; else offset = CORE_STS_OVERRIDE_IMP2; /* Enable the port memories */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); reg &= ~P_TXQ_PSM_VDD(port); core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ reg = core_readl(priv, CORE_IMP_CTL); reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); reg &= ~(RX_DIS | TX_DIS); core_writel(priv, reg, CORE_IMP_CTL); /* Enable forwarding */ core_writel(priv, SW_FWDG_EN, CORE_SWMODE); /* Enable IMP port in dumb mode */ reg = core_readl(priv, CORE_SWITCH_CTRL); reg |= MII_DUMB_FWDG_EN; core_writel(priv, reg, CORE_SWITCH_CTRL); /* Configure Traffic Class to QoS mapping, allow each priority to map * to a different queue number */ reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port)); for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) reg |= i << (PRT_TO_QID_SHIFT * i); core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); b53_brcm_hdr_setup(ds, port); /* Force link status for IMP port */ reg = core_readl(priv, offset); reg |= (MII_SW_OR | LINK_STS); core_writel(priv, reg, offset); }
static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, int (*cb)(struct switchdev_obj *obj)) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct net_device *dev = ds->ports[port].netdev; struct bcm_sf2_arl_entry results[2]; unsigned int count = 0; int ret; /* Start search operation */ core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL); do { ret = bcm_sf2_arl_search_wait(priv); if (ret) return ret; /* Read both entries, then return their values back */ bcm_sf2_arl_search_rd(priv, 0, &results[0]); ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb); if (ret) return ret; bcm_sf2_arl_search_rd(priv, 1, &results[1]); ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb); if (ret) return ret; if (!results[0].is_valid && !results[1].is_valid) break; } while (count++ < CORE_ARLA_NUM_ENTRIES); return 0; }