int bcm_phy_set_eee(struct phy_device *phydev, bool enable) { int val; /* Enable EEE at PHY level */ val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL); if (val < 0) return val; if (enable) val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X; else val &= ~(LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X); phy_write_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL, (u32)val); /* Advertise EEE */ val = phy_read_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV); if (val < 0) return val; if (enable) val |= (MDIO_EEE_100TX | MDIO_EEE_1000T); else val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T); phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val); return 0; }
static int amd_xgbe_phy_soft_reset(struct phy_device *phydev) { int count, ret; ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); if (ret < 0) return ret; ret |= MDIO_CTRL1_RESET; phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); count = 50; do { msleep(20); ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); if (ret < 0) return ret; } while ((ret & MDIO_CTRL1_RESET) && --count); if (ret & MDIO_CTRL1_RESET) return -ETIMEDOUT; /* Disable auto-negotiation for now */ ret = amd_xgbe_phy_disable_an(phydev); if (ret < 0) return ret; /* Clear auto-negotiation interrupts */ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); return 0; }
static void dp83822_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { int value; u16 sopass_val; wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE); wol->wolopts = 0; value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG); if (value & DP83822_WOL_MAGIC_EN) wol->wolopts |= WAKE_MAGIC; if (value & DP83822_WOL_SECURE_ON) { sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RXSOP1); wol->sopass[0] = (sopass_val & 0xff); wol->sopass[1] = (sopass_val >> 8); sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RXSOP2); wol->sopass[2] = (sopass_val & 0xff); wol->sopass[3] = (sopass_val >> 8); sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RXSOP3); wol->sopass[4] = (sopass_val & 0xff); wol->sopass[5] = (sopass_val >> 8); wol->wolopts |= WAKE_MAGICSECURE; }
static int amd_xgbe_phy_update_link(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; int ret; /* If we're doing auto-negotiation don't report link down */ if (priv->an_state != AMD_XGBE_AN_READY) { phydev->link = 1; return 0; } /* Link status is latched low, so read once to clear * and then read again to get current state */ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); if (ret < 0) return ret; ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); if (ret < 0) return ret; phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0; return 0; }
static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev, enum amd_xgbe_phy_rx *state) { unsigned int link_support; int ret, ad_reg, lp_reg; /* Read Base Ability register 2 first */ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (ret < 0) return AMD_XGBE_AN_ERROR; /* Check for a supported mode, otherwise restart in a different one */ link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20; if (!(ret & link_support)) return AMD_XGBE_AN_INCOMPAT_LINK; /* Check Extended Next Page support */ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); if (ad_reg < 0) return AMD_XGBE_AN_ERROR; lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); if (lp_reg < 0) return AMD_XGBE_AN_ERROR; return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ? amd_xgbe_an_tx_xnp(phydev, state) : amd_xgbe_an_tx_training(phydev, state); }
static int dp83822_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { struct net_device *ndev = phydev->attached_dev; u16 value; const u8 *mac; if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { mac = (const u8 *)ndev->dev_addr; if (!is_valid_ether_addr(mac)) return -EINVAL; /* MAC addresses start with byte 5, but stored in mac[0]. * 822 PHYs store bytes 4|5, 2|3, 0|1 */ phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA1, (mac[1] << 8) | mac[0]); phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA2, (mac[3] << 8) | mac[2]); phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA3, (mac[5] << 8) | mac[4]); value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG); if (wol->wolopts & WAKE_MAGIC) value |= DP83822_WOL_MAGIC_EN; else value &= ~DP83822_WOL_MAGIC_EN; if (wol->wolopts & WAKE_MAGICSECURE) { phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RXSOP1, (wol->sopass[1] << 8) | wol->sopass[0]); phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RXSOP2, (wol->sopass[3] << 8) | wol->sopass[2]); phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RXSOP3, (wol->sopass[5] << 8) | wol->sopass[4]); value |= DP83822_WOL_SECURE_ON; } else { value &= ~DP83822_WOL_SECURE_ON; } value |= (DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL | DP83822_WOL_CLR_INDICATION); phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, value); } else { value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG); value &= ~DP83822_WOL_EN; phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, value); } return 0; }
static int amd_xgbe_an_init(struct phy_device *phydev) { int ret; /* Set up Advertisement register 3 first */ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); if (ret < 0) return ret; if (phydev->supported & SUPPORTED_10000baseR_FEC) ret |= 0xc000; else ret &= ~0xc000; phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret); /* Set up Advertisement register 2 next */ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); if (ret < 0) return ret; if (phydev->supported & SUPPORTED_10000baseKR_Full) ret |= 0x80; else ret &= ~0x80; if ((phydev->supported & SUPPORTED_1000baseKX_Full) || (phydev->supported & SUPPORTED_2500baseX_Full)) ret |= 0x20; else ret &= ~0x20; phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret); /* Set up Advertisement register 1 last */ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); if (ret < 0) return ret; if (phydev->supported & SUPPORTED_Pause) ret |= 0x400; else ret &= ~0x400; if (phydev->supported & SUPPORTED_Asym_Pause) ret |= 0x800; else ret &= ~0x800; /* We don't intend to perform XNP */ ret &= ~XNP_NP_EXCHANGE; phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret); return 0; }
/** * genphy_c45_setup_forced - configures a forced speed * @phydev: target phy_device struct */ int genphy_c45_pma_setup_forced(struct phy_device *phydev) { int ctrl1, ctrl2, ret; /* Half duplex is not supported */ if (phydev->duplex != DUPLEX_FULL) return -EINVAL; ctrl1 = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1); if (ctrl1 < 0) return ctrl1; ctrl2 = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2); if (ctrl2 < 0) return ctrl2; ctrl1 &= ~MDIO_CTRL1_SPEEDSEL; /* * PMA/PMD type selection is 1.7.5:0 not 1.7.3:0. See 45.2.1.6.1 * in 802.3-2012 and 802.3-2015. */ ctrl2 &= ~(MDIO_PMA_CTRL2_TYPE | 0x30); switch (phydev->speed) { case SPEED_10: ctrl2 |= MDIO_PMA_CTRL2_10BT; break; case SPEED_100: ctrl1 |= MDIO_PMA_CTRL1_SPEED100; ctrl2 |= MDIO_PMA_CTRL2_100BTX; break; case SPEED_1000: ctrl1 |= MDIO_PMA_CTRL1_SPEED1000; /* Assume 1000base-T */ ctrl2 |= MDIO_PMA_CTRL2_1000BT; break; case SPEED_10000: ctrl1 |= MDIO_CTRL1_SPEED10G; /* Assume 10Gbase-T */ ctrl2 |= MDIO_PMA_CTRL2_10GBT; break; default: return -EINVAL; } ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, ctrl1); if (ret < 0) return ret; return phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2); }
static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; int ret; /* Disable KR training */ ret = amd_xgbe_an_disable_kr_training(phydev); if (ret < 0) return ret; /* Set PCS to KX/1G speed */ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); if (ret < 0) return ret; ret &= ~MDIO_PCS_CTRL2_TYPE; ret |= MDIO_PCS_CTRL2_10GBX; phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret); ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); if (ret < 0) return ret; ret &= ~MDIO_CTRL1_SPEEDSEL; ret |= MDIO_CTRL1_SPEED1G; phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); ret = amd_xgbe_phy_pcs_power_cycle(phydev); if (ret < 0) return ret; /* Set SerDes to 1G speed */ amd_xgbe_phy_serdes_start_ratechange(phydev); XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE); XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD); XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL); XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE, priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]); XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]); XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, priv->serdes_blwc[XGBE_PHY_SPEED_1000]); XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); amd_xgbe_phy_serdes_complete_ratechange(phydev); return 0; }
static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, enum amd_xgbe_phy_rx *state) { struct amd_xgbe_phy_priv *priv = phydev->priv; int ad_reg, lp_reg, ret; *state = AMD_XGBE_RX_COMPLETE; /* If we're not in KR mode then we're done */ if (!amd_xgbe_phy_in_kr_mode(phydev)) return AMD_XGBE_AN_PAGE_RECEIVED; /* Enable/Disable FEC */ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); if (ad_reg < 0) return AMD_XGBE_AN_ERROR; lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg < 0) return AMD_XGBE_AN_ERROR; ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL); if (ret < 0) return AMD_XGBE_AN_ERROR; ret &= ~XGBE_PHY_FEC_MASK; if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) ret |= priv->fec_ability; phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret); /* Start KR training */ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); if (ret < 0) return AMD_XGBE_AN_ERROR; if (ret & XGBE_PHY_KR_TRAINING_ENABLE) { XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1); ret |= XGBE_PHY_KR_TRAINING_START; phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0); } return AMD_XGBE_AN_PAGE_RECEIVED; }
static int teranetics_aneg_done(struct phy_device *phydev) { int reg; reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); }
/** * genphy_c45_read_pma - read link speed etc from PMA * @phydev: target phy_device struct */ int genphy_c45_read_pma(struct phy_device *phydev) { int val; val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1); if (val < 0) return val; switch (val & MDIO_CTRL1_SPEEDSEL) { case 0: phydev->speed = SPEED_10; break; case MDIO_PMA_CTRL1_SPEED100: phydev->speed = SPEED_100; break; case MDIO_PMA_CTRL1_SPEED1000: phydev->speed = SPEED_1000; break; case MDIO_CTRL1_SPEED10G: phydev->speed = SPEED_10000; break; default: phydev->speed = SPEED_UNKNOWN; break; } phydev->duplex = DUPLEX_FULL; return 0; }
/** * genphy_c45_read_mdix - read mdix status from PMA * @phydev: target phy_device struct */ int genphy_c45_read_mdix(struct phy_device *phydev) { int val; if (phydev->speed == SPEED_10000) { val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_SWAPPOL); if (val < 0) return val; switch (val) { case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: phydev->mdix = ETH_TP_MDI; break; case 0: phydev->mdix = ETH_TP_MDI_X; break; default: phydev->mdix = ETH_TP_MDI_INVALID; break; } } return 0; }
static int aqr_ack_interrupt(struct phy_device *phydev) { int reg; reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2); return (reg < 0) ? reg : 0; }
static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev, enum amd_xgbe_phy_rx *state) { int ad_reg, lp_reg; /* Check Extended Next Page support */ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP); if (ad_reg < 0) return AMD_XGBE_AN_ERROR; lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX); if (lp_reg < 0) return AMD_XGBE_AN_ERROR; return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ? amd_xgbe_an_tx_xnp(phydev, state) : amd_xgbe_an_tx_training(phydev, state); }
static void aqr107_link_change_notify(struct phy_device *phydev) { u8 fw_major, fw_minor; bool downshift, short_reach, afr; int mode, val; if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE) return; val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1); /* call failed or link partner is no Aquantia PHY */ if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY)) return; short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH; downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT; val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4); if (val < 0) return; fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val); fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val); val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3); if (val < 0) return; afr = val & MDIO_AN_RX_VEND_STAT3_AFR; phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n", fw_major, fw_minor, short_reach ? ", short reach mode" : "", downshift ? ", fast-retrain downshift advertised" : "", afr ? ", fast reframe advertised" : ""); val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9); if (val < 0) return; mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val); if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2) phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n"); }
static int aqr107_read_downshift_event(struct phy_device *phydev) { int val; val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS1); if (val < 0) return val; return !!(val & MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT); }
/** * genphy_c45_restart_aneg - Enable and restart auto-negotiation * @phydev: target phy_device struct * * This assumes that the auto-negotiation MMD is present. * * Enable and restart auto-negotiation. */ int genphy_c45_restart_aneg(struct phy_device *phydev) { int val; val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); if (val < 0) return val; val |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART; return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val); }
/** * genphy_c45_an_disable_aneg - disable auto-negotiation * @phydev: target phy_device struct * * Disable auto-negotiation in the Clause 45 PHY. The link parameters * parameters are controlled through the PMA/PMD MMD registers. * * Returns zero on success, negative errno code on failure. */ int genphy_c45_an_disable_aneg(struct phy_device *phydev) { int val; val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); if (val < 0) return val; val &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val); }
static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev) { int ret; ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); if (ret < 0) return ret; ret &= ~XGBE_PHY_KR_TRAINING_ENABLE; phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); return 0; }
static int mv3310_aneg_done(struct phy_device *phydev) { int val; val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1); if (val < 0) return val; if (val & MDIO_STAT1_LSTATUS) return 1; return genphy_c45_aneg_done(phydev); }
static void aqr107_chip_info(struct phy_device *phydev) { u8 fw_major, fw_minor, build_id, prov_id; int val; val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID); if (val < 0) return; fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val); fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val); val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1); if (val < 0) return; build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val); prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val); phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n", fw_major, fw_minor, build_id, prov_id); }
/* If we configure settings whilst firmware is still initializing the chip, * then these settings may be overwritten. Therefore make sure chip * initialization has completed. Use presence of the firmware ID as * indicator for initialization having completed. * The chip also provides a "reset completed" bit, but it's cleared after * read. Therefore function would time out if called again. */ static int aqr107_wait_reset_complete(struct phy_device *phydev) { int val, retries = 100; do { val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID); if (val < 0) return val; msleep(20); } while (!val && --retries); return val ? 0 : -ETIMEDOUT; }
/** * genphy_c45_read_lpa - read the link partner advertisment and pause * @phydev: target phy_device struct * * Read the Clause 45 defined base (7.19) and 10G (7.33) status registers, * filling in the link partner advertisment, pause and asym_pause members * in @phydev. This assumes that the auto-negotiation MMD is present, and * the backplane bit (7.48.0) is clear. Clause 45 PHY drivers are expected * to fill in the remainder of the link partner advert from vendor registers. */ int genphy_c45_read_lpa(struct phy_device *phydev) { int val; /* Read the link partner's base page advertisment */ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); if (val < 0) return val; phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val); phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0; phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0; /* Read the link partner's 10G advertisment */ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT); if (val < 0) return val; if (val & MDIO_AN_10GBT_STAT_LP10G) phydev->lp_advertising |= ADVERTISED_10000baseT_Full; return 0; }
static u64 aqr107_get_stat(struct phy_device *phydev, int index) { const struct aqr107_hw_stat *stat = aqr107_hw_stats + index; int len_l = min(stat->size, 16); int len_h = stat->size - len_l; u64 ret; int val; val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg); if (val < 0) return U64_MAX; ret = val & GENMASK(len_l - 1, 0); if (len_h) { val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1); if (val < 0) return U64_MAX; ret += (val & GENMASK(len_h - 1, 0)) << 16; } return ret; }
static int aqr107_get_downshift(struct phy_device *phydev, u8 *data) { int val, cnt, enable; val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV); if (val < 0) return val; enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val); cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val); *data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE; return 0; }
static int teranetics_read_status(struct phy_device *phydev) { int reg; phydev->link = 1; phydev->speed = SPEED_10000; phydev->duplex = DUPLEX_FULL; if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT); if (reg < 0 || !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) { phydev->link = 0; return 0; } reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) phydev->link = 0; } return 0; }
static int amd_xgbe_phy_cur_mode(struct phy_device *phydev, enum amd_xgbe_phy_mode *mode) { int ret; ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); if (ret < 0) return ret; if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR) *mode = AMD_XGBE_MODE_KR; else *mode = AMD_XGBE_MODE_KX; return 0; }
static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, u16 mask, u16 bits) { int old, val, ret; old = phy_read_mmd(phydev, devad, reg); if (old < 0) return old; val = (old & ~mask) | (bits & mask); if (val == old) return 0; ret = phy_write_mmd(phydev, devad, reg, val); return ret < 0 ? ret : 1; }
static int dp83867_config_port_mirroring(struct phy_device *phydev) { struct dp83867_private *dp83867 = (struct dp83867_private *)phydev->priv; u16 val; val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4); if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN) val |= DP83867_CFG4_PORT_MIRROR_EN; else val &= ~DP83867_CFG4_PORT_MIRROR_EN; phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val); return 0; }