static sw_error_t _athena_vlan_create(a_uint32_t dev_id, a_uint32_t vlan_id) { sw_error_t rv; a_uint32_t vtable_entry = 0; #ifdef HSL_STANDALONG a_int16_t i, loc = MAX_VLAN_ENTRY; v_array_t *p_v_array; #endif HSL_DEV_ID_CHECK(dev_id); if ((vlan_id == 0) || (vlan_id > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; #ifdef HSL_STANDALONG if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; for (i = 0; i < MAX_VLAN_ENTRY; i++) { if (p_v_array[i].active == A_FALSE) { loc = i; } else if (p_v_array[i].vlan_entry.vid == vlan_id) { return SW_ALREADY_EXIST; } } if (loc == MAX_VLAN_ENTRY) return SW_FULL; #endif /* set default value for VLAN_TABLE_FUNC0, all 0 except vid */ vtable_entry = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VLAN_ID, vlan_id, vtable_entry); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&vtable_entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* set default value for VLAN_TABLE_FUNC1, all 0 */ vtable_entry = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC1, VT_VALID, 1, vtable_entry); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC1, 0, (a_uint8_t *) (&vtable_entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_vlan_commit(dev_id, VLAN_LOAD_ENTRY); SW_RTN_ON_ERROR(rv); #ifdef HSL_STANDALONG p_v_array[loc].vlan_entry.vid = vlan_id; p_v_array[loc].vlan_entry.mem_ports = 0; p_v_array[loc].vlan_entry.u_ports = 0; p_v_array[loc].vlan_entry.vid_pri_en = A_FALSE; p_v_array[loc].vlan_entry.vid_pri = 0; p_v_array[loc].active = A_TRUE; #endif return SW_OK; }
static sw_error_t _horus_port_unk_sa_cmd_set(a_uint32_t dev_id, fal_port_t port_id, fal_fwd_cmd_t cmd) { sw_error_t rv; a_uint32_t data; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_CTL, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (FAL_MAC_FRWRD == cmd) { SW_SET_REG_BY_FIELD(PORT_CTL, PORT_LOCK_EN, 0, data); } else if (FAL_MAC_DROP == cmd) { SW_SET_REG_BY_FIELD(PORT_CTL, PORT_LOCK_EN, 1, data); SW_SET_REG_BY_FIELD(PORT_CTL, LOCK_DROP_EN, 1, data); } else if (FAL_MAC_RDT_TO_CPU == cmd) { SW_SET_REG_BY_FIELD(PORT_CTL, PORT_LOCK_EN, 1, data); SW_SET_REG_BY_FIELD(PORT_CTL, LOCK_DROP_EN, 0, data); } else { return SW_NOT_SUPPORTED; } HSL_REG_ENTRY_SET(rv, dev_id, PORT_CTL, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); return rv; }
static sw_error_t _athena_vlan_member_update(a_uint32_t dev_id, a_uint32_t vlan_id, fal_pbmp_t member, fal_pbmp_t u_member) { #ifdef HSL_STANDALONG sw_error_t rv; a_int16_t loc; a_uint32_t reg_tmp; v_array_t *p_v_array; fal_vlan_t *p_sw_vlan; HSL_DEV_ID_CHECK(dev_id); if ((vlan_id == 0) || (vlan_id > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; if (A_FALSE == hsl_mports_prop_check(dev_id, member, HSL_PP_INCL_CPU)) return SW_BAD_PARAM; if (u_member != 0) return SW_BAD_PARAM; if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; rv = athena_vlan_table_location(dev_id, vlan_id, &loc); SW_RTN_ON_ERROR(rv); p_sw_vlan = &p_v_array[loc].vlan_entry; /* set value for VLAN_TABLE_FUNC0, all 0 except vid */ reg_tmp = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VLAN_ID, vlan_id, reg_tmp); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI_EN, (a_int32_t)p_sw_vlan->vid_pri_en, reg_tmp); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI, p_sw_vlan->vid_pri, reg_tmp); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (®_tmp), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* set vlan member for VLAN_TABLE_FUNC1 */ HSL_REG_FIELD_SET(rv, dev_id, VLAN_TABLE_FUNC1, 0, VID_MEM, (a_uint8_t *) (&member), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_vlan_commit(dev_id, VLAN_LOAD_ENTRY); SW_RTN_ON_ERROR(rv); p_v_array[loc].vlan_entry.mem_ports = member; return SW_OK; #else return SW_NOT_SUPPORTED; #endif }
static sw_error_t _athena_port_duplex_set(a_uint32_t dev_id, fal_port_t port_id, fal_port_duplex_t duplex) { sw_error_t rv; a_uint32_t phy_id = 0; a_uint32_t reg_save = 0; a_uint32_t reg_val = 0; hsl_phy_ops_t *phy_drv; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_PHY)) { return SW_BAD_PARAM; } SW_RTN_ON_NULL (phy_drv = hsl_phy_api_ops_get (dev_id)); if (NULL == phy_drv->phy_duplex_set) return SW_NOT_SUPPORTED; if (FAL_DUPLEX_BUTT <= duplex) { return SW_BAD_PARAM; } rv = hsl_port_prop_get_phyid(dev_id, port_id, &phy_id); SW_RTN_ON_ERROR(rv); //save reg value HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®_val), sizeof (a_uint32_t)); reg_save = reg_val; SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 0, reg_val); SW_SET_REG_BY_FIELD(PORT_STATUS, RXMAC_EN, 0, reg_val); SW_SET_REG_BY_FIELD(PORT_STATUS, TXMAC_EN, 0, reg_val); //set mac be config by sw and turn off RX TX MAC_EN HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®_val), sizeof (a_uint32_t)); rv = phy_drv->phy_duplex_set(dev_id, phy_id, duplex); //retore reg value HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®_save), sizeof (a_uint32_t)); return rv; }
static sw_error_t _shiva_fdb_del_by_port(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t flag) { sw_error_t rv; a_uint32_t reg = 0; HSL_DEV_ID_CHECK(dev_id); if (A_FALSE == hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_PORT_NUM, port_id, reg); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (FAL_FDB_DEL_STATIC & flag) { rv = shiva_fdb_commit(dev_id, ARL_FLUSH_PORT_AND_STATIC); } else { rv = shiva_fdb_commit(dev_id, ARL_FLUSH_PORT_NO_STATIC); } return rv; }
static sw_error_t horus_hw_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { hsl_dev_t *pdev = NULL; a_uint32_t port_id; a_uint32_t data; sw_error_t rv; pdev = hsl_dev_ptr_get(dev_id); if (NULL == pdev) { return SW_NOT_INITIALIZED; } for (port_id = 0; port_id < pdev->nr_ports; port_id++) { if (port_id == pdev->cpu_port_nr) { continue; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 1, data); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
static sw_error_t athena_vlan_commit(a_uint32_t dev_id, a_uint32_t op) { a_uint32_t vt_busy = 1, i = 0x1000, vt_full, val; sw_error_t rv; while (vt_busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, VLAN_TABLE_FUNC0, 0, VT_BUSY, (a_uint8_t *) (&vt_busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (i == 0) return SW_BUSY; HSL_REG_ENTRY_GET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_FUNC, op, val); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_BUSY, 1, val); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_FIELD_GET(rv, dev_id, VLAN_TABLE_FUNC0, 0, VT_FULL_VIO, (a_uint8_t *) (&vt_full), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (vt_full) { val = 0x10; HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (VLAN_LOAD_ENTRY == op) { return SW_FULL; } else if (VLAN_PURGE_ENTRY == op) { return SW_NOT_FOUND; } } return SW_OK; }
static sw_error_t _horus_port_flowctrl_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) { sw_error_t rv; a_uint32_t val, force, reg; if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } if (A_TRUE == enable) { val = 1; } else if (A_FALSE == enable) { val = 0; } else { return SW_BAD_PARAM; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(PORT_STATUS, FLOW_LINK_EN, force, reg); if (force) { /* flow control isn't in force mode so can't set */ return SW_DISABLE; } SW_SET_REG_BY_FIELD(PORT_STATUS, RX_FLOW_EN, val, reg); SW_SET_REG_BY_FIELD(PORT_STATUS, TX_FLOW_EN, val, reg); SW_SET_REG_BY_FIELD(PORT_STATUS, TX_HALF_FLOW_EN, val, reg); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); return rv; }
static sw_error_t _dess_mib_op_commit(a_uint32_t dev_id, a_uint32_t op) { a_uint32_t mib_busy = 1, i = 0x1000, val; sw_error_t rv; while (mib_busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, MIB_FUNC, 0, MIB_BUSY, (a_uint8_t *) (&mib_busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (i == 0) return SW_BUSY; HSL_REG_ENTRY_GET(rv, dev_id, MIB_FUNC, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_SET_REG_BY_FIELD(MIB_FUNC, MIB_FUN, op, val); SW_SET_REG_BY_FIELD(MIB_FUNC, MIB_BUSY, 1, val); HSL_REG_ENTRY_SET(rv, dev_id, MIB_FUNC, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); mib_busy = 1; i = 0x1000; while (mib_busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, MIB_FUNC, 0, MIB_BUSY, (a_uint8_t *) (&mib_busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (i == 0) return SW_FAIL; return SW_OK; }
static sw_error_t _horus_port_flowctrl_forcemode_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) { sw_error_t rv; a_uint32_t force, reg; if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(PORT_STATUS, FLOW_LINK_EN, force, reg); if (force != (a_uint32_t) enable) { return SW_OK; } if (A_TRUE == enable) { SW_SET_REG_BY_FIELD(PORT_STATUS, FLOW_LINK_EN, 0, reg); SW_SET_REG_BY_FIELD(PORT_STATUS, TX_FLOW_EN, 0, reg); } else if (A_FALSE == enable) { SW_SET_REG_BY_FIELD(PORT_STATUS, FLOW_LINK_EN, 1, reg); } else { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(PORT_STATUS, TX_HALF_FLOW_EN, 0, reg); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); return rv; }
static sw_error_t _shiva_igmp_mld_entry_queue_set(a_uint32_t dev_id, a_bool_t enable, a_uint32_t queue) { sw_error_t rv; a_uint32_t entry; hsl_dev_t *p_dev; HSL_DEV_ID_CHECK(dev_id); HSL_REG_ENTRY_GET(rv, dev_id, QM_CTL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (A_TRUE == enable) { SW_SET_REG_BY_FIELD(QM_CTL, IGMP_PRI_EN, 1, entry); SW_RTN_ON_NULL(p_dev = hsl_dev_ptr_get(dev_id)); if (queue >= p_dev->nr_queue) { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(QM_CTL, IGMP_PRI, queue, entry); } else if (A_FALSE == enable) { SW_SET_REG_BY_FIELD(QM_CTL, IGMP_PRI_EN, 0, entry); SW_SET_REG_BY_FIELD(QM_CTL, IGMP_PRI, 0, entry); } else { return SW_BAD_PARAM; } HSL_REG_ENTRY_SET(rv, dev_id, QM_CTL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); return rv; }
static void shiva_fdb_fill_addr(fal_mac_addr_t addr, a_uint32_t * reg0, a_uint32_t * reg1) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC1, AT_ADDR_BYTE0, addr.uc[0], *reg1); SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC1, AT_ADDR_BYTE1, addr.uc[1], *reg1); SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC1, AT_ADDR_BYTE2, addr.uc[2], *reg1); SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC1, AT_ADDR_BYTE3, addr.uc[3], *reg1); SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_ADDR_BYTE4, addr.uc[4], *reg0); SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_ADDR_BYTE5, addr.uc[5], *reg0); return; }
static sw_error_t _isis_acl_rule_bind(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t ports) { sw_error_t rv; a_uint32_t i; isis_acl_rule_t *sw_rule; for (i = 0; i < ISIS_MAX_FILTER; i++) { sw_rule = &(sw_rule_ent[dev_id][i]); if ((ENT_USED & sw_rule->status) && (list_id == sw_rule->list_id)) { rv = _isis_filter_ports_bind(dev_id, i, ports); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(MAC_RUL_V4, SRC_PT, ports, (sw_rule->filter.vlu[4])); } } return SW_OK; }
static sw_error_t isis_hw_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { hsl_dev_t *pdev = NULL; a_uint32_t port_id; a_uint32_t data; sw_error_t rv; pdev = hsl_dev_ptr_get(dev_id); if (NULL == pdev) { return SW_NOT_INITIALIZED; } /* Set default FDB hash mode as CRC10 */ data = 1; HSL_REG_FIELD_SET(rv, dev_id, FORWARD_CTL0, 0, HASH_MODE, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); for (port_id = 0; port_id < pdev->nr_ports; port_id++) { if (port_id == pdev->cpu_port_nr) { continue; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 1, data); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
static sw_error_t athena_vlan_sw_to_hw(const fal_vlan_t * vlan_entry, a_uint32_t reg[]) { if (A_TRUE == vlan_entry->vid_pri_en) { SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI_EN, 1, reg[0]); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI, vlan_entry->vid_pri, reg[0]); }else { SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI_EN, 0, reg[0]); } SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VLAN_ID, vlan_entry->vid, reg[0]); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC1, VT_VALID, 1, reg[1]); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC1, VID_MEM, vlan_entry->mem_ports, reg[1]); if (0 != vlan_entry->u_ports) { return SW_BAD_VALUE; } return SW_OK; }
static sw_error_t _horus_led_ctrl_pattern_set(a_uint32_t dev_id, led_pattern_group_t group, led_pattern_id_t id, led_ctrl_pattern_t * pattern) { a_uint32_t data = 0, reg, mode; a_uint32_t addr; sw_error_t rv; HSL_DEV_ID_CHECK(dev_id); if (group >= LED_GROUP_BUTT) { return SW_BAD_PARAM; } if (id > MAX_LED_PATTERN_ID) { return SW_BAD_PARAM; } if ((LED_MAC_PORT_GROUP == group) && (0 != id)) { return SW_BAD_PARAM; } if (LED_MAC_PORT_GROUP == group) { addr = LED_PATTERN_ADDR + 8; } else { addr = LED_PATTERN_ADDR + (id << 2); } if (LED_ALWAYS_OFF == pattern->mode) { mode = 0; } else if (LED_ALWAYS_BLINK == pattern->mode) { mode = 1; } else if (LED_ALWAYS_ON == pattern->mode) { mode = 2; } else if (LED_PATTERN_MAP_EN == pattern->mode) { mode = 3; } else { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(LED_CTRL, PATTERN_EN, mode, data); if (pattern->map & (1 << FULL_DUPLEX_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, FULL_LIGHT_EN, 1, data); } if (pattern->map & (1 << HALF_DUPLEX_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, HALF_LIGHT_EN, 1, data); } if (pattern->map & (1 << POWER_ON_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, POWERON_LIGHT_EN, 1, data); } if (pattern->map & (1 << LINK_1000M_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, GE_LIGHT_EN, 1, data); } if (pattern->map & (1 << LINK_100M_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, FE_LIGHT_EN, 1, data); } if (pattern->map & (1 << LINK_10M_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, ETH_LIGHT_EN, 1, data); } if (pattern->map & (1 << COLLISION_BLINK_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, COL_BLINK_EN, 1, data); } if (pattern->map & (1 << RX_TRAFFIC_BLINK_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, RX_BLINK_EN, 1, data); } if (pattern->map & (1 << TX_TRAFFIC_BLINK_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, TX_BLINK_EN, 1, data); } if (pattern->map & (1 << LINKUP_OVERRIDE_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, LINKUP_OVER_EN, 1, data); } else { SW_SET_REG_BY_FIELD(LED_CTRL, LINKUP_OVER_EN, 0, data); } if (LED_BLINK_2HZ == pattern->freq) { SW_SET_REG_BY_FIELD(LED_CTRL, BLINK_FREQ, 0, data); } else if (LED_BLINK_4HZ == pattern->freq) { SW_SET_REG_BY_FIELD(LED_CTRL, BLINK_FREQ, 1, data); } else if (LED_BLINK_8HZ == pattern->freq) { SW_SET_REG_BY_FIELD(LED_CTRL, BLINK_FREQ, 2, data); } else if (LED_BLINK_TXRX == pattern->freq) { SW_SET_REG_BY_FIELD(LED_CTRL, BLINK_FREQ, 3, data); } else { return SW_BAD_PARAM; } HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (LED_WAN_PORT_GROUP == group) { reg &= 0xffff; reg |= (data << 16); } else { reg &= 0xffff0000; reg |= data; } HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (LED_WAN_PORT_GROUP == group) { return SW_OK; } HSL_REG_ENTRY_GET(rv, dev_id, LED_PATTERN, 0, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (LED_LAN_PORT_GROUP == group) { if (id) { SW_SET_REG_BY_FIELD(LED_PATTERN, P3L1_MODE, mode, data); SW_SET_REG_BY_FIELD(LED_PATTERN, P2L1_MODE, mode, data); SW_SET_REG_BY_FIELD(LED_PATTERN, P1L1_MODE, mode, data); } else { SW_SET_REG_BY_FIELD(LED_PATTERN, P3L0_MODE, mode, data); SW_SET_REG_BY_FIELD(LED_PATTERN, P2L0_MODE, mode, data); SW_SET_REG_BY_FIELD(LED_PATTERN, P1L0_MODE, mode, data); } } else { SW_SET_REG_BY_FIELD(LED_PATTERN, M5_MODE, mode, data); } HSL_REG_ENTRY_SET(rv, dev_id, LED_PATTERN, 0, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); return SW_OK; }
static sw_error_t shiva_fdb_commit(a_uint32_t dev_id, a_uint32_t op) { sw_error_t rv; a_uint32_t busy = 1; a_uint32_t full_vio; a_uint32_t i = 1000; a_uint32_t entry; a_uint32_t hwop = op; while (busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, AT_BUSY, (a_uint8_t *) (&busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (0 == i) { return SW_BUSY; } HSL_REG_ENTRY_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_BUSY, 1, entry); if (ARL_FLUSH_PORT_AND_STATIC == hwop) { hwop = ARL_FLUSH_PORT_UNICAST; SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, FLUSH_ST_EN, 1, entry); } if (ARL_FLUSH_PORT_NO_STATIC == hwop) { hwop = ARL_FLUSH_PORT_UNICAST; SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, FLUSH_ST_EN, 0, entry); } SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_FUNC, hwop, entry); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); busy = 1; i = 1000; while (busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, AT_BUSY, (a_uint8_t *) (&busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (0 == i) { return SW_FAIL; } HSL_REG_FIELD_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, AT_FULL_VIO, (a_uint8_t *) (&full_vio), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (full_vio) { /* must clear AT_FULL_VOI bit */ entry = 0x1000; HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (ARL_LOAD_ENTRY == hwop) { return SW_FULL; } else if ((ARL_PURGE_ENTRY == hwop) || (ARL_FLUSH_PORT_UNICAST == hwop)) { return SW_NOT_FOUND; } else { return SW_FAIL; } } return SW_OK; }
static sw_error_t _isis_acl_rule_add(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, a_uint32_t rule_nr, fal_acl_rule_t * rule) { sw_error_t rv; isis_acl_list_t *sw_list; isis_acl_rule_t *sw_rule; a_uint32_t i, free_flt_nr, old_flt_nr, old_flt_idx, new_flt_nr, bind_pts; HSL_DEV_ID_CHECK(dev_id); if (ISIS_MAX_LIST_ID < list_id) { return SW_NOT_SUPPORTED; } if ((0 == rule_nr) || (NULL == rule)) { return SW_BAD_PARAM; } sw_list = _isis_acl_list_loc(dev_id, list_id); if (NULL == sw_list) { return SW_NOT_FOUND; } if (rule_id != sw_list->rule_nr) { return SW_BAD_PARAM; } old_flt_idx = 0; old_flt_nr = 0; free_flt_nr = 0; aos_mem_zero(hw_rule_tmp[dev_id], ISIS_HW_RULE_TMP_CNT * sizeof (isis_acl_rule_t)); aos_mem_zero(sw_rule_tmp[dev_id], ISIS_MAX_FILTER * sizeof (isis_acl_rule_t)); for (i = 0; i < ISIS_MAX_FILTER; i++) { sw_rule = &(sw_rule_ent[dev_id][i]); if (ENT_USED & sw_rule->status) { if (sw_rule->list_id == sw_list->list_id) { aos_mem_copy(&(hw_rule_tmp[dev_id][old_flt_nr]), sw_rule, sizeof (isis_acl_rule_t)); if (!old_flt_nr) { old_flt_idx = i; } old_flt_nr++; } } else { free_flt_nr++; } } if (!free_flt_nr) { return SW_NO_RESOURCE; } /* parse rule entry and alloc rule resource */ new_flt_nr = old_flt_nr; for (i = 0; i < rule_nr; i++) { rv = _isis_acl_rule_sw_to_hw(dev_id, &rule[i], hw_rule_tmp[dev_id], &new_flt_nr); SW_RTN_ON_ERROR(rv); } if (free_flt_nr < (new_flt_nr - old_flt_nr)) { return SW_NO_RESOURCE; } for (i = old_flt_nr; i < new_flt_nr; i++) { hw_rule_tmp[dev_id][i].status |= ENT_USED; hw_rule_tmp[dev_id][i].list_id = sw_list->list_id; hw_rule_tmp[dev_id][i].list_pri = sw_list->list_pri; bind_pts = sw_list->bind_pts; SW_SET_REG_BY_FIELD(MAC_RUL_V4, SRC_PT, bind_pts, (hw_rule_tmp[dev_id][i].filter.vlu[4])); } for (i = 0; i < old_flt_nr; i++) { sw_rule = &(sw_rule_ent[dev_id][old_flt_idx + i]); sw_rule->status &= (~ENT_USED); sw_rule->status |= (ENT_TMP); } rv = _isis_acl_rule_alloc(dev_id, sw_list, new_flt_nr); if (SW_OK != rv) { aos_mem_zero(sw_rule_tmp[dev_id], ISIS_MAX_FILTER * sizeof (isis_acl_rule_t)); rv = _isis_acl_rule_reorder(dev_id, sw_list); } for (i = 0; i < old_flt_nr; i++) { sw_rule = &(sw_rule_ent[dev_id][i + old_flt_idx]); sw_rule->status |= (ENT_USED); sw_rule->status &= (~ENT_TMP); } SW_RTN_ON_ERROR(rv); _isis_acl_rule_sync(dev_id, 0, ISIS_MAX_FILTER); sw_list->rule_nr += rule_nr; _isis_acl_sw_rule_dump("sw rule after add", sw_rule_ent[dev_id]); return SW_OK; }
static sw_error_t _isis_acl_port_udf_profile_set(a_uint32_t dev_id, fal_port_t port_id, fal_acl_udf_type_t udf_type, a_uint32_t offset, a_uint32_t length) { sw_error_t rv; a_uint32_t reg; HSL_DEV_ID_CHECK(dev_id); if (ISIS_UDF_MAX_OFFSET < offset) { return SW_BAD_PARAM; } if (ISIS_UDF_MAX_OFFSET < length) { return SW_BAD_PARAM; } if ((FAL_ACL_UDF_TYPE_L2_SNAP == udf_type) || (FAL_ACL_UDF_TYPE_L3_PLUS == udf_type)) { HSL_REG_ENTRY_GET(rv, dev_id, WIN_RULE_CTL1, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); } else { HSL_REG_ENTRY_GET(rv, dev_id, WIN_RULE_CTL0, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); } SW_RTN_ON_ERROR(rv); switch (udf_type) { case FAL_ACL_UDF_TYPE_L2: SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L2_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L2_LENGTH, length, reg); break; case FAL_ACL_UDF_TYPE_L3: SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L3_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L3_LENGTH, length, reg); break; case FAL_ACL_UDF_TYPE_L4: SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L4_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L4_LENGTH, length, reg); break; case FAL_ACL_UDF_TYPE_L2_SNAP: SW_SET_REG_BY_FIELD(WIN_RULE_CTL1, L2S_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL1, L2S_LENGTH, length, reg); break; case FAL_ACL_UDF_TYPE_L3_PLUS: SW_SET_REG_BY_FIELD(WIN_RULE_CTL1, L3P_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL1, L3P_LENGTH, length, reg); break; default: return SW_BAD_PARAM; } if ((FAL_ACL_UDF_TYPE_L2_SNAP == udf_type) || (FAL_ACL_UDF_TYPE_L3_PLUS == udf_type)) { HSL_REG_ENTRY_SET(rv, dev_id, WIN_RULE_CTL1, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); } else { HSL_REG_ENTRY_SET(rv, dev_id, WIN_RULE_CTL0, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); } return rv; }
static sw_error_t garuda_bist_test(a_uint32_t dev_id) { a_uint32_t entry, data, i; sw_error_t rv; data = 1; i = 0x1000; while (data && --i) { HSL_REG_ENTRY_GET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(BIST_CTRL, BIST_BUSY, data, entry); aos_udelay(5); } if (0 == i) { return SW_INIT_ERROR; } entry = 0; SW_SET_REG_BY_FIELD(BIST_CTRL, BIST_BUSY, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN2, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN1, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN0, 1, entry); HSL_REG_ENTRY_SET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); data = 1; i = 0x1000; while (data && --i) { HSL_REG_ENTRY_GET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(BIST_CTRL, BIST_BUSY, data, entry); aos_udelay(5); } if (0 == i) { return SW_INIT_ERROR; } SW_GET_FIELD_BY_REG(BIST_CTRL, ERR_CNT, data, entry); if (data) { SW_GET_FIELD_BY_REG(BIST_CTRL, ONE_ERR, data, entry); if (!data) { return SW_INIT_ERROR; } SW_GET_FIELD_BY_REG(BIST_CTRL, ERR_ADDR, data, entry); entry = 0; SW_SET_REG_BY_FIELD(BIST_RCV, RCV_EN, 1, entry); SW_SET_REG_BY_FIELD(BIST_RCV, RCV_ADDR, data, entry); HSL_REG_ENTRY_SET(rv, dev_id, BIST_RCV, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } else { return SW_OK; } entry = 0; SW_SET_REG_BY_FIELD(BIST_CTRL, BIST_BUSY, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN2, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN1, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN0, 1, entry); HSL_REG_ENTRY_SET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); data = 1; i = 0x1000; while (data && --i) { HSL_REG_ENTRY_GET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(BIST_CTRL, BIST_BUSY, data, entry); aos_udelay(5); } if (0 == i) { return SW_INIT_ERROR; } SW_GET_FIELD_BY_REG(BIST_CTRL, ERR_CNT, data, entry); if (data) { return SW_INIT_ERROR; } return SW_OK; }
static sw_error_t garuda_hw_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { garuda_init_spec_cfg *garuda_init_cfg = NULL; hsl_dev_t *pdev = NULL; hsl_init_mode cpu_mode; a_uint32_t port_id; a_uint32_t data; sw_error_t rv; pdev = hsl_dev_ptr_get(dev_id); if (NULL == pdev) { return SW_NOT_INITIALIZED; } cpu_mode = cfg->cpu_mode; HSL_REG_ENTRY_GET(rv, dev_id, POSTRIP, 0, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* phy pll on */ SW_SET_REG_BY_FIELD(POSTRIP, PHY_PLL_ON, 1, data); garuda_init_cfg = (garuda_init_spec_cfg* )(cfg->chip_spec_cfg); if (!garuda_init_cfg) { return SW_BAD_PARAM; } /* delay */ if (A_TRUE == garuda_init_cfg->rx_delay_s1) { SW_SET_REG_BY_FIELD(POSTRIP, RXDELAY_S1, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, RXDELAY_S1, 0, data); } if (A_TRUE == garuda_init_cfg->rx_delay_s0) { SW_SET_REG_BY_FIELD(POSTRIP, RXDELAY_S0, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, RXDELAY_S0, 0, data); } if (A_TRUE == garuda_init_cfg->tx_delay_s1) { SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S1, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S1, 0, data); } if (A_TRUE == garuda_init_cfg->tx_delay_s0) { SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S0, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S0, 0, data); } /* tx/rx delay enable */ if (A_TRUE == garuda_init_cfg->rgmii_txclk_delay) { SW_SET_REG_BY_FIELD(POSTRIP, RGMII_TXCLK_DELAY_EN, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, RGMII_TXCLK_DELAY_EN, 0, data); } /* tx/rx delay enable */ if (A_TRUE == garuda_init_cfg->rgmii_rxclk_delay) { SW_SET_REG_BY_FIELD(POSTRIP, RGMII_RXCLK_DELAY_EN, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, RGMII_RXCLK_DELAY_EN, 0, data); } /* mac5 default mode */ /*SW_SET_REG_BY_FIELD(POSTRIP, MAC5_PHY_MODE, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, MAC5_MAC_MODE, 0, data);*/ /* mac0 default phy mode */ SW_SET_REG_BY_FIELD(POSTRIP, MAC0_MAC_MODE, 0, data); /* mac0 default rgmii mode */ SW_SET_REG_BY_FIELD(POSTRIP, MAC0_RGMII_EN, 1, data); SW_SET_REG_BY_FIELD(POSTRIP, MAC0_GMII_EN, 0, data); /* mac5 default disable mode */ SW_SET_REG_BY_FIELD(POSTRIP, MAC5_PHY_MODE, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, MAC5_MAC_MODE, 0, data); /* phy default mode */ SW_SET_REG_BY_FIELD(POSTRIP, PHY4_RGMII_EN, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, PHY4_GMII_EN, 0, data); /* modify default mode */ if (A_FALSE == garuda_init_cfg->mac0_rgmii) { SW_SET_REG_BY_FIELD(POSTRIP, MAC0_RGMII_EN, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, MAC0_GMII_EN, 1, data); /*invert clock output for port0 gmii pad.*/ a_uint32_t temp; HSL_REG_ENTRY_GET(rv, dev_id, MASK_CTL, 0, (a_uint8_t *) (&temp), sizeof (a_uint32_t)); temp |= 1<<MASK_CTL_MII_CLK0_SEL_BOFFSET; HSL_REG_ENTRY_SET(rv, dev_id, MASK_CTL, 0, (a_uint8_t *) (&temp), sizeof (a_uint32_t)); } if(HSL_CPU_2 == cpu_mode) { if (A_TRUE == garuda_init_cfg->mac5_rgmii) { SW_SET_REG_BY_FIELD(POSTRIP, PHY4_RGMII_EN, 1, data); SW_SET_REG_BY_FIELD(POSTRIP, PHY4_GMII_EN, 0, data); a_uint32_t phy_id = 4; /* phy4 rgmii mode enable */ phy_dport_set(dev_id, phy_id, F1_DEBUG_PORT_RGMII_MODE, F1_DEBUG_PORT_RGMII_MODE_EN); /* Rx delay enable */ if (A_TRUE == garuda_init_cfg->phy4_rx_delay) { phy_dport_set(dev_id, phy_id, F1_DEBUG_PORT_RX_DELAY, F1_DEBUG_PORT_RX_DELAY_EN); } else { phy_dport_clear(dev_id, phy_id, F1_DEBUG_PORT_RX_DELAY, F1_DEBUG_PORT_RX_DELAY_EN); } /* Tx delay enable */ if (A_TRUE == garuda_init_cfg->phy4_tx_delay) { phy_dport_set(dev_id, phy_id, F1_DEBUG_PORT_TX_DELAY, F1_DEBUG_PORT_TX_DELAY_EN); } else { phy_dport_clear(dev_id, phy_id, F1_DEBUG_PORT_TX_DELAY, F1_DEBUG_PORT_TX_DELAY_EN); } } else { SW_SET_REG_BY_FIELD(POSTRIP, PHY4_RGMII_EN, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, PHY4_GMII_EN, 1, data); } } else if (HSL_CPU_1 == cpu_mode) { //SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S0, 0, data); } else if (HSL_CPU_1_PLUS == cpu_mode) { SW_SET_REG_BY_FIELD(POSTRIP, MAC5_MAC_MODE, 1, data); } else if (HSL_NO_CPU == cpu_mode) { } HSL_REG_ENTRY_SET(rv, dev_id, POSTRIP, 0, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); for (port_id = 0; port_id < pdev->nr_ports; port_id++) { if (port_id == pdev->cpu_port_nr) { continue; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 1, data); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
static sw_error_t shiva_atu_sw_to_hw(a_uint32_t dev_id, const fal_fdb_entry_t * entry, a_uint32_t reg[]) { a_uint32_t port; if (A_FALSE == entry->portmap_en) { if (A_TRUE != hsl_port_prop_check(dev_id, entry->port.id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } port = 0x1UL << entry->port.id; } else { if (A_FALSE == hsl_mports_prop_check(dev_id, entry->port.map, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } port = entry->port.map; } if (FAL_MAC_CPY_TO_CPU == entry->dacmd) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, COPY_TO_CPU, 1, reg[2]); } else if (FAL_MAC_RDT_TO_CPU == entry->dacmd) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, REDRCT_TO_CPU, 1, reg[2]); } else if (FAL_MAC_FRWRD != entry->dacmd) { return SW_NOT_SUPPORTED; } if (FAL_MAC_DROP == entry->sacmd) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, SA_DROP_EN, 1, reg[2]); } else if (FAL_MAC_FRWRD != entry->sacmd) { return SW_NOT_SUPPORTED; } if (A_TRUE == entry->leaky_en) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, LEAKY_EN, 1, reg[2]); } else { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, LEAKY_EN, 0, reg[2]); } if (A_TRUE == entry->static_en) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, AT_STATUS, 15, reg[2]); } else { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, AT_STATUS, 7, reg[2]); } if (A_TRUE == entry->mirror_en) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, MIRROR_EN, 1, reg[2]); } if (A_TRUE == entry->clone_en) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, CLONE_EN, 1, reg[2]); } if (A_TRUE == entry->cross_pt_state) { SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, CROSS_PT, 1, reg[2]); } if (A_TRUE == entry->da_pri_en) { hsl_dev_t *p_dev; SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, AT_PRI_EN, 1, reg[2]); SW_RTN_ON_NULL(p_dev = hsl_dev_ptr_get(dev_id)); if (entry->da_queue > (p_dev->nr_queue - 1)) return SW_BAD_PARAM; SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, AT_PRI, entry->da_queue, reg[2]); } SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC2, DES_PORT, port, reg[2]); shiva_fdb_fill_addr(entry->addr, ®[0], ®[1]); return SW_OK; }