void hns_ae_get_regs(struct hnae_handle *handle, void *data) { u32 *p = data; u32 rcb_com_idx; int i; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); hns_ppe_get_regs(ppe_cb, p); p += hns_ppe_get_regs_count(); rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index); hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p); p += hns_rcb_get_common_regs_count(); for (i = 0; i < handle->q_num; i++) { hns_rcb_get_ring_regs(handle->qs[i], p); p += hns_rcb_get_ring_regs_count(); } hns_mac_get_regs(vf_cb->mac_cb, p); p += hns_mac_get_regs_count(vf_cb->mac_cb); if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE) hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p); }
void hns_ae_get_strings(struct hnae_handle *handle, u32 stringset, u8 *data) { int port; int idx; struct hns_mac_cb *mac_cb; struct hns_ppe_cb *ppe_cb; u8 *p = data; struct hnae_vf_cb *vf_cb; assert(handle); vf_cb = hns_ae_get_vf_cb(handle); port = vf_cb->port_index; mac_cb = hns_get_mac_cb(handle); ppe_cb = hns_get_ppe_cb(handle); for (idx = 0; idx < handle->q_num; idx++) { hns_rcb_get_strings(stringset, p, idx); p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset); } hns_ppe_get_strings(ppe_cb, stringset, p); p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset); hns_mac_get_strings(mac_cb, stringset, p); p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset); if (mac_cb->mac_type == HNAE_PORT_SERVICE) hns_dsaf_get_strings(stringset, p, port); }
static void hns_ae_fini_queue(struct hnae_queue *q) { struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle); if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE) hns_rcb_reset_ring_hw(q); }
void hns_ae_get_stats(struct hnae_handle *handle, u64 *data) { int idx; struct hns_mac_cb *mac_cb; struct hns_ppe_cb *ppe_cb; u64 *p = data; struct hnae_vf_cb *vf_cb; if (!handle || !data) { pr_err("hns_ae_get_stats NULL handle or data pointer!\n"); return; } vf_cb = hns_ae_get_vf_cb(handle); mac_cb = hns_get_mac_cb(handle); ppe_cb = hns_get_ppe_cb(handle); for (idx = 0; idx < handle->q_num; idx++) { hns_rcb_get_stats(handle->qs[idx], p); p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS); } hns_ppe_get_stats(ppe_cb, p); p += hns_ppe_get_sset_count((int)ETH_SS_STATS); hns_mac_get_stats(mac_cb, p); p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS); if (mac_cb->mac_type == HNAE_PORT_SERVICE) hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index); }
static void hns_ae_reset(struct hnae_handle *handle) { struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) { u8 ppe_common_index = vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1; hns_mac_reset(vf_cb->mac_cb); hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index); } }
static void hns_ae_put_handle(struct hnae_handle *handle) { struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); int i; vf_cb->mac_cb = NULL; kfree(vf_cb); for (i = 0; i < handle->q_num; i++) hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; }
int hns_ae_get_regs_len(struct hnae_handle *handle) { u32 total_num; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); total_num = hns_ppe_get_regs_count(); total_num += hns_rcb_get_common_regs_count(); total_num += hns_rcb_get_ring_regs_count() * handle->q_num; total_num += hns_mac_get_regs_count(vf_cb->mac_cb); if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE) total_num += hns_dsaf_get_regs_count(); return total_num; }
static int hns_ae_config_loopback(struct hnae_handle *handle, enum hnae_loop loop, int en) { int ret; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); switch (loop) { case MAC_INTERNALLOOP_SERDES: ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); break; case MAC_INTERNALLOOP_MAC: ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en); break; default: ret = -EINVAL; } return ret; }
static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle) { int ppe_index; int ppe_common_index; struct ppe_common_cb *ppe_comm; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) { ppe_index = vf_cb->port_index; ppe_common_index = 0; } else { ppe_index = 0; ppe_common_index = vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1; } ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index]; return &ppe_comm->ppe_cb[ppe_index]; }
/** *hns_rcb_reset_ring_hw - ring reset *@q: ring struct pointer */ void hns_rcb_reset_ring_hw(struct hnae_queue *q) { u32 wait_cnt; u32 try_cnt = 0; u32 could_ret; u32 tx_fbd_num; while (try_cnt++ < RCB_RESET_TRY_TIMES) { usleep_range(100, 200); tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); if (tx_fbd_num) continue; dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); msleep(20); could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); wait_cnt = 0; while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); msleep(20); could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); wait_cnt++; } dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); if (could_ret) break; } if (try_cnt >= RCB_RESET_TRY_TIMES) dev_err(q->dev->dev, "port%d reset ring fail\n", hns_ae_get_vf_cb(q->handle)->port_index); }
void hns_ae_update_stats(struct hnae_handle *handle, struct net_device_stats *net_stats) { int port; int idx; struct dsaf_device *dsaf_dev; struct hns_mac_cb *mac_cb; struct hns_ppe_cb *ppe_cb; struct hnae_queue *queue; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0; u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0; u64 rx_missed_errors = 0; dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); if (!dsaf_dev) return; port = vf_cb->port_index; ppe_cb = hns_get_ppe_cb(handle); mac_cb = hns_get_mac_cb(handle); for (idx = 0; idx < handle->q_num; idx++) { queue = handle->qs[idx]; hns_rcb_update_stats(queue); tx_bytes += queue->tx_ring.stats.tx_bytes; tx_packets += queue->tx_ring.stats.tx_pkts; rx_bytes += queue->rx_ring.stats.rx_bytes; rx_packets += queue->rx_ring.stats.rx_pkts; rx_errors += queue->rx_ring.stats.err_pkt_len + queue->rx_ring.stats.l2_err + queue->rx_ring.stats.l3l4_csum_err; } hns_ppe_update_stats(ppe_cb); rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf; tx_errors += ppe_cb->hw_stats.tx_err_checksum + ppe_cb->hw_stats.tx_err_fifo_empty; if (mac_cb->mac_type == HNAE_PORT_SERVICE) { hns_dsaf_update_stats(dsaf_dev, port); /* for port upline direction, i.e., rx. */ rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop; rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop; rx_missed_errors += dsaf_dev->hw_stats[port].crc_false; /* for port downline direction, i.e., tx. */ port = port + DSAF_PPE_INODE_BASE; hns_dsaf_update_stats(dsaf_dev, port); tx_dropped += dsaf_dev->hw_stats[port].bp_drop; tx_dropped += dsaf_dev->hw_stats[port].pad_drop; tx_dropped += dsaf_dev->hw_stats[port].crc_false; tx_dropped += dsaf_dev->hw_stats[port].rslt_drop; tx_dropped += dsaf_dev->hw_stats[port].vlan_drop; tx_dropped += dsaf_dev->hw_stats[port].stp_drop; } hns_mac_update_stats(mac_cb); rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err; tx_errors += mac_cb->hw_stats.tx_bad_pkts + mac_cb->hw_stats.tx_fragment_err + mac_cb->hw_stats.tx_jabber_err + mac_cb->hw_stats.tx_underrun_err + mac_cb->hw_stats.tx_crc_err; net_stats->tx_bytes = tx_bytes; net_stats->tx_packets = tx_packets; net_stats->rx_bytes = rx_bytes; net_stats->rx_dropped = 0; net_stats->rx_packets = rx_packets; net_stats->rx_errors = rx_errors; net_stats->tx_errors = tx_errors; net_stats->tx_dropped = tx_dropped; net_stats->rx_missed_errors = rx_missed_errors; net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err; net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err; net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err; net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err; net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts; }
static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle) { struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); return vf_cb->mac_cb; }