static sw_error_t _athena_vlan_create(a_uint32_t dev_id, a_uint32_t vlan_id) { sw_error_t rv; a_uint32_t vtable_entry = 0; #ifdef HSL_STANDALONG a_int16_t i, loc = MAX_VLAN_ENTRY; v_array_t *p_v_array; #endif HSL_DEV_ID_CHECK(dev_id); if ((vlan_id == 0) || (vlan_id > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; #ifdef HSL_STANDALONG if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; for (i = 0; i < MAX_VLAN_ENTRY; i++) { if (p_v_array[i].active == A_FALSE) { loc = i; } else if (p_v_array[i].vlan_entry.vid == vlan_id) { return SW_ALREADY_EXIST; } } if (loc == MAX_VLAN_ENTRY) return SW_FULL; #endif /* set default value for VLAN_TABLE_FUNC0, all 0 except vid */ vtable_entry = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VLAN_ID, vlan_id, vtable_entry); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&vtable_entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* set default value for VLAN_TABLE_FUNC1, all 0 */ vtable_entry = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC1, VT_VALID, 1, vtable_entry); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC1, 0, (a_uint8_t *) (&vtable_entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_vlan_commit(dev_id, VLAN_LOAD_ENTRY); SW_RTN_ON_ERROR(rv); #ifdef HSL_STANDALONG p_v_array[loc].vlan_entry.vid = vlan_id; p_v_array[loc].vlan_entry.mem_ports = 0; p_v_array[loc].vlan_entry.u_ports = 0; p_v_array[loc].vlan_entry.vid_pri_en = A_FALSE; p_v_array[loc].vlan_entry.vid_pri = 0; p_v_array[loc].active = A_TRUE; #endif return SW_OK; }
static sw_error_t _athena_fdb_add(a_uint32_t dev_id, const fal_fdb_entry_t * entry) { sw_error_t rv; a_uint32_t reg[3] = { 0, 0, 0 }; HSL_DEV_ID_CHECK(dev_id); if ((A_TRUE == athena_fdb_is_zeroaddr(entry->addr)) && (0 == entry->port.map) && (0 == entry->port.id)) { return SW_BAD_PARAM; } rv = athena_atu_sw_to_hw(dev_id, entry, reg); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC2, 0, (a_uint8_t *) (®[2]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC1, 0, (a_uint8_t *) (®[1]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®[0]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_fdb_commit(dev_id, ARL_LOAD_ENTRY); return rv; }
static sw_error_t _shiva_fdb_add(a_uint32_t dev_id, const fal_fdb_entry_t * entry) { sw_error_t rv; a_uint32_t reg[3] = { 0, 0, 0 }; HSL_DEV_ID_CHECK(dev_id); rv = shiva_atu_sw_to_hw(dev_id, entry, reg); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC2, 0, (a_uint8_t *) (®[2]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC1, 0, (a_uint8_t *) (®[1]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®[0]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = shiva_fdb_commit(dev_id, ARL_LOAD_ENTRY); return rv; }
static sw_error_t _athena_vlan_entry_append(a_uint32_t dev_id, const fal_vlan_t * vlan_entry) { sw_error_t rv; a_uint32_t reg[2] = { 0 }; #ifdef HSL_STANDALONG a_int16_t i, loc = MAX_VLAN_ENTRY; v_array_t *p_v_array; #endif HSL_DEV_ID_CHECK(dev_id); if ((vlan_entry->vid == 0) || (vlan_entry->vid > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; if (A_FALSE == hsl_mports_prop_check(dev_id, vlan_entry->mem_ports, HSL_PP_INCL_CPU)) return SW_BAD_PARAM; #ifdef HSL_STANDALONG if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; for (i = 0; i < MAX_VLAN_ENTRY; i++) { if (p_v_array[i].active == A_FALSE) { loc = i; } else if (p_v_array[i].vlan_entry.vid == vlan_entry->vid) { return SW_ALREADY_EXIST; } } if (loc == MAX_VLAN_ENTRY) return SW_FULL; #endif rv = athena_vlan_sw_to_hw(vlan_entry, reg); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (®[0]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC1, 0, (a_uint8_t *) (®[1]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_vlan_commit(dev_id, VLAN_LOAD_ENTRY); SW_RTN_ON_ERROR(rv); #ifdef HSL_STANDALONG p_v_array[loc].vlan_entry = *vlan_entry; p_v_array[loc].active = A_TRUE; #endif return SW_OK; }
static sw_error_t _athena_port_duplex_set(a_uint32_t dev_id, fal_port_t port_id, fal_port_duplex_t duplex) { sw_error_t rv; a_uint32_t phy_id = 0; a_uint32_t reg_save = 0; a_uint32_t reg_val = 0; hsl_phy_ops_t *phy_drv; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_PHY)) { return SW_BAD_PARAM; } SW_RTN_ON_NULL (phy_drv = hsl_phy_api_ops_get (dev_id)); if (NULL == phy_drv->phy_duplex_set) return SW_NOT_SUPPORTED; if (FAL_DUPLEX_BUTT <= duplex) { return SW_BAD_PARAM; } rv = hsl_port_prop_get_phyid(dev_id, port_id, &phy_id); SW_RTN_ON_ERROR(rv); //save reg value HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®_val), sizeof (a_uint32_t)); reg_save = reg_val; SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 0, reg_val); SW_SET_REG_BY_FIELD(PORT_STATUS, RXMAC_EN, 0, reg_val); SW_SET_REG_BY_FIELD(PORT_STATUS, TXMAC_EN, 0, reg_val); //set mac be config by sw and turn off RX TX MAC_EN HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®_val), sizeof (a_uint32_t)); rv = phy_drv->phy_duplex_set(dev_id, phy_id, duplex); //retore reg value HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®_save), sizeof (a_uint32_t)); return rv; }
static sw_error_t horus_hw_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { hsl_dev_t *pdev = NULL; a_uint32_t port_id; a_uint32_t data; sw_error_t rv; pdev = hsl_dev_ptr_get(dev_id); if (NULL == pdev) { return SW_NOT_INITIALIZED; } for (port_id = 0; port_id < pdev->nr_ports; port_id++) { if (port_id == pdev->cpu_port_nr) { continue; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 1, data); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
static sw_error_t _shiva_fdb_del_by_port(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t flag) { sw_error_t rv; a_uint32_t reg = 0; HSL_DEV_ID_CHECK(dev_id); if (A_FALSE == hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_PORT_NUM, port_id, reg); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (FAL_FDB_DEL_STATIC & flag) { rv = shiva_fdb_commit(dev_id, ARL_FLUSH_PORT_AND_STATIC); } else { rv = shiva_fdb_commit(dev_id, ARL_FLUSH_PORT_NO_STATIC); } return rv; }
static sw_error_t _horus_port_unk_sa_cmd_set(a_uint32_t dev_id, fal_port_t port_id, fal_fwd_cmd_t cmd) { sw_error_t rv; a_uint32_t data; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_CTL, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (FAL_MAC_FRWRD == cmd) { SW_SET_REG_BY_FIELD(PORT_CTL, PORT_LOCK_EN, 0, data); } else if (FAL_MAC_DROP == cmd) { SW_SET_REG_BY_FIELD(PORT_CTL, PORT_LOCK_EN, 1, data); SW_SET_REG_BY_FIELD(PORT_CTL, LOCK_DROP_EN, 1, data); } else if (FAL_MAC_RDT_TO_CPU == cmd) { SW_SET_REG_BY_FIELD(PORT_CTL, PORT_LOCK_EN, 1, data); SW_SET_REG_BY_FIELD(PORT_CTL, LOCK_DROP_EN, 0, data); } else { return SW_NOT_SUPPORTED; } HSL_REG_ENTRY_SET(rv, dev_id, PORT_CTL, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); return rv; }
static sw_error_t athena_vlan_commit(a_uint32_t dev_id, a_uint32_t op) { a_uint32_t vt_busy = 1, i = 0x1000, vt_full, val; sw_error_t rv; while (vt_busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, VLAN_TABLE_FUNC0, 0, VT_BUSY, (a_uint8_t *) (&vt_busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (i == 0) return SW_BUSY; HSL_REG_ENTRY_GET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_FUNC, op, val); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_BUSY, 1, val); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_FIELD_GET(rv, dev_id, VLAN_TABLE_FUNC0, 0, VT_FULL_VIO, (a_uint8_t *) (&vt_full), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (vt_full) { val = 0x10; HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (VLAN_LOAD_ENTRY == op) { return SW_FULL; } else if (VLAN_PURGE_ENTRY == op) { return SW_NOT_FOUND; } } return SW_OK; }
static sw_error_t _athena_vlan_member_update(a_uint32_t dev_id, a_uint32_t vlan_id, fal_pbmp_t member, fal_pbmp_t u_member) { #ifdef HSL_STANDALONG sw_error_t rv; a_int16_t loc; a_uint32_t reg_tmp; v_array_t *p_v_array; fal_vlan_t *p_sw_vlan; HSL_DEV_ID_CHECK(dev_id); if ((vlan_id == 0) || (vlan_id > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; if (A_FALSE == hsl_mports_prop_check(dev_id, member, HSL_PP_INCL_CPU)) return SW_BAD_PARAM; if (u_member != 0) return SW_BAD_PARAM; if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; rv = athena_vlan_table_location(dev_id, vlan_id, &loc); SW_RTN_ON_ERROR(rv); p_sw_vlan = &p_v_array[loc].vlan_entry; /* set value for VLAN_TABLE_FUNC0, all 0 except vid */ reg_tmp = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VLAN_ID, vlan_id, reg_tmp); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI_EN, (a_int32_t)p_sw_vlan->vid_pri_en, reg_tmp); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI, p_sw_vlan->vid_pri, reg_tmp); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (®_tmp), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* set vlan member for VLAN_TABLE_FUNC1 */ HSL_REG_FIELD_SET(rv, dev_id, VLAN_TABLE_FUNC1, 0, VID_MEM, (a_uint8_t *) (&member), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_vlan_commit(dev_id, VLAN_LOAD_ENTRY); SW_RTN_ON_ERROR(rv); p_v_array[loc].vlan_entry.mem_ports = member; return SW_OK; #else return SW_NOT_SUPPORTED; #endif }
static sw_error_t _shiva_fdb_del_by_mac(a_uint32_t dev_id, const fal_fdb_entry_t * entry) { sw_error_t rv; a_uint32_t reg0 = 0, reg1 = 0; HSL_DEV_ID_CHECK(dev_id); shiva_fdb_fill_addr(entry->addr, ®0, ®1); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC1, 0, (a_uint8_t *) (®1), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®0), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = shiva_fdb_commit(dev_id, ARL_PURGE_ENTRY); return rv; }
void host_helper_init(void) { int i; sw_error_t rv; a_uint32_t entry; /* header len 4 with type 0xaaaa */ isis_header_type_set(0, A_TRUE, 0xaaaa); #ifdef ISISC /* For S17c (ISISC), it is not necessary to make all frame with header */ isis_port_txhdr_mode_set(0, 0, FAL_ONLY_MANAGE_FRAME_EN); /* Fix tag disappear problem, set TO_CPU_VID_CHG_EN, 0xc00 bit1 */ isis_cpu_vid_en_set(0, A_TRUE); /* set RM_RTD_PPPOE_EN, 0xc00 bit0 */ isis_rtd_pppoe_en_set(0, A_TRUE); /* Enable ARP ack frame as management frame. */ for (i=1; i<6; i++) { isis_port_arp_ack_status_set(0, i, A_TRUE); } isis_arp_cmd_set(0, FAL_MAC_FRWRD); /* set VLAN_TRANS_TEST register bit, to block packets from WAN port has private dip */ isis_netisolate_set(0, A_TRUE); #else isis_port_txhdr_mode_set(0, 0, FAL_ALL_TYPE_FRAME_EN); #endif isis_cpu_port_status_set(0, A_TRUE); isis_ip_route_status_set(0, A_TRUE); /* CPU port with VLAN tag, others w/o VLAN */ entry = 0x01111112; HSL_REG_ENTRY_SET(rv, 0, ROUTER_EG, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); napt_procfs_init(); nf_register_hook(&arpinhook); #ifdef CONFIG_IPV6_HWACCEL aos_printk("Registering IPv6 hooks... \n"); nf_register_hook(&ipv6_inhook); #endif /* Enable ACLs to handle MLD packets */ upnp_ssdp_add_acl_rules(); ipv6_snooping_solicted_node_add_acl_rules(); ipv6_snooping_sextuple0_group_add_acl_rules(); ipv6_snooping_quintruple0_1_group_add_acl_rules(); }
static sw_error_t _horus_port_flowctrl_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) { sw_error_t rv; a_uint32_t val, force, reg; if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } if (A_TRUE == enable) { val = 1; } else if (A_FALSE == enable) { val = 0; } else { return SW_BAD_PARAM; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(PORT_STATUS, FLOW_LINK_EN, force, reg); if (force) { /* flow control isn't in force mode so can't set */ return SW_DISABLE; } SW_SET_REG_BY_FIELD(PORT_STATUS, RX_FLOW_EN, val, reg); SW_SET_REG_BY_FIELD(PORT_STATUS, TX_FLOW_EN, val, reg); SW_SET_REG_BY_FIELD(PORT_STATUS, TX_HALF_FLOW_EN, val, reg); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); return rv; }
static sw_error_t _dess_mib_op_commit(a_uint32_t dev_id, a_uint32_t op) { a_uint32_t mib_busy = 1, i = 0x1000, val; sw_error_t rv; while (mib_busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, MIB_FUNC, 0, MIB_BUSY, (a_uint8_t *) (&mib_busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (i == 0) return SW_BUSY; HSL_REG_ENTRY_GET(rv, dev_id, MIB_FUNC, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_SET_REG_BY_FIELD(MIB_FUNC, MIB_FUN, op, val); SW_SET_REG_BY_FIELD(MIB_FUNC, MIB_BUSY, 1, val); HSL_REG_ENTRY_SET(rv, dev_id, MIB_FUNC, 0, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); mib_busy = 1; i = 0x1000; while (mib_busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, MIB_FUNC, 0, MIB_BUSY, (a_uint8_t *) (&mib_busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (i == 0) return SW_FAIL; return SW_OK; }
static sw_error_t _horus_port_flowctrl_forcemode_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) { sw_error_t rv; a_uint32_t force, reg; if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(PORT_STATUS, FLOW_LINK_EN, force, reg); if (force != (a_uint32_t) enable) { return SW_OK; } if (A_TRUE == enable) { SW_SET_REG_BY_FIELD(PORT_STATUS, FLOW_LINK_EN, 0, reg); SW_SET_REG_BY_FIELD(PORT_STATUS, TX_FLOW_EN, 0, reg); } else if (A_FALSE == enable) { SW_SET_REG_BY_FIELD(PORT_STATUS, FLOW_LINK_EN, 1, reg); } else { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(PORT_STATUS, TX_HALF_FLOW_EN, 0, reg); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); return rv; }
static sw_error_t isis_hw_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { hsl_dev_t *pdev = NULL; a_uint32_t port_id; a_uint32_t data; sw_error_t rv; pdev = hsl_dev_ptr_get(dev_id); if (NULL == pdev) { return SW_NOT_INITIALIZED; } /* Set default FDB hash mode as CRC10 */ data = 1; HSL_REG_FIELD_SET(rv, dev_id, FORWARD_CTL0, 0, HASH_MODE, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); for (port_id = 0; port_id < pdev->nr_ports; port_id++) { if (port_id == pdev->cpu_port_nr) { continue; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 1, data); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
static sw_error_t _shiva_igmp_mld_entry_queue_set(a_uint32_t dev_id, a_bool_t enable, a_uint32_t queue) { sw_error_t rv; a_uint32_t entry; hsl_dev_t *p_dev; HSL_DEV_ID_CHECK(dev_id); HSL_REG_ENTRY_GET(rv, dev_id, QM_CTL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (A_TRUE == enable) { SW_SET_REG_BY_FIELD(QM_CTL, IGMP_PRI_EN, 1, entry); SW_RTN_ON_NULL(p_dev = hsl_dev_ptr_get(dev_id)); if (queue >= p_dev->nr_queue) { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(QM_CTL, IGMP_PRI, queue, entry); } else if (A_FALSE == enable) { SW_SET_REG_BY_FIELD(QM_CTL, IGMP_PRI_EN, 0, entry); SW_SET_REG_BY_FIELD(QM_CTL, IGMP_PRI, 0, entry); } else { return SW_BAD_PARAM; } HSL_REG_ENTRY_SET(rv, dev_id, QM_CTL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); return rv; }
static sw_error_t _horus_led_ctrl_pattern_set(a_uint32_t dev_id, led_pattern_group_t group, led_pattern_id_t id, led_ctrl_pattern_t * pattern) { a_uint32_t data = 0, reg, mode; a_uint32_t addr; sw_error_t rv; HSL_DEV_ID_CHECK(dev_id); if (group >= LED_GROUP_BUTT) { return SW_BAD_PARAM; } if (id > MAX_LED_PATTERN_ID) { return SW_BAD_PARAM; } if ((LED_MAC_PORT_GROUP == group) && (0 != id)) { return SW_BAD_PARAM; } if (LED_MAC_PORT_GROUP == group) { addr = LED_PATTERN_ADDR + 8; } else { addr = LED_PATTERN_ADDR + (id << 2); } if (LED_ALWAYS_OFF == pattern->mode) { mode = 0; } else if (LED_ALWAYS_BLINK == pattern->mode) { mode = 1; } else if (LED_ALWAYS_ON == pattern->mode) { mode = 2; } else if (LED_PATTERN_MAP_EN == pattern->mode) { mode = 3; } else { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(LED_CTRL, PATTERN_EN, mode, data); if (pattern->map & (1 << FULL_DUPLEX_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, FULL_LIGHT_EN, 1, data); } if (pattern->map & (1 << HALF_DUPLEX_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, HALF_LIGHT_EN, 1, data); } if (pattern->map & (1 << POWER_ON_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, POWERON_LIGHT_EN, 1, data); } if (pattern->map & (1 << LINK_1000M_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, GE_LIGHT_EN, 1, data); } if (pattern->map & (1 << LINK_100M_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, FE_LIGHT_EN, 1, data); } if (pattern->map & (1 << LINK_10M_LIGHT_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, ETH_LIGHT_EN, 1, data); } if (pattern->map & (1 << COLLISION_BLINK_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, COL_BLINK_EN, 1, data); } if (pattern->map & (1 << RX_TRAFFIC_BLINK_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, RX_BLINK_EN, 1, data); } if (pattern->map & (1 << TX_TRAFFIC_BLINK_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, TX_BLINK_EN, 1, data); } if (pattern->map & (1 << LINKUP_OVERRIDE_EN)) { SW_SET_REG_BY_FIELD(LED_CTRL, LINKUP_OVER_EN, 1, data); } else { SW_SET_REG_BY_FIELD(LED_CTRL, LINKUP_OVER_EN, 0, data); } if (LED_BLINK_2HZ == pattern->freq) { SW_SET_REG_BY_FIELD(LED_CTRL, BLINK_FREQ, 0, data); } else if (LED_BLINK_4HZ == pattern->freq) { SW_SET_REG_BY_FIELD(LED_CTRL, BLINK_FREQ, 1, data); } else if (LED_BLINK_8HZ == pattern->freq) { SW_SET_REG_BY_FIELD(LED_CTRL, BLINK_FREQ, 2, data); } else if (LED_BLINK_TXRX == pattern->freq) { SW_SET_REG_BY_FIELD(LED_CTRL, BLINK_FREQ, 3, data); } else { return SW_BAD_PARAM; } HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (LED_WAN_PORT_GROUP == group) { reg &= 0xffff; reg |= (data << 16); } else { reg &= 0xffff0000; reg |= data; } HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (LED_WAN_PORT_GROUP == group) { return SW_OK; } HSL_REG_ENTRY_GET(rv, dev_id, LED_PATTERN, 0, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (LED_LAN_PORT_GROUP == group) { if (id) { SW_SET_REG_BY_FIELD(LED_PATTERN, P3L1_MODE, mode, data); SW_SET_REG_BY_FIELD(LED_PATTERN, P2L1_MODE, mode, data); SW_SET_REG_BY_FIELD(LED_PATTERN, P1L1_MODE, mode, data); } else { SW_SET_REG_BY_FIELD(LED_PATTERN, P3L0_MODE, mode, data); SW_SET_REG_BY_FIELD(LED_PATTERN, P2L0_MODE, mode, data); SW_SET_REG_BY_FIELD(LED_PATTERN, P1L0_MODE, mode, data); } } else { SW_SET_REG_BY_FIELD(LED_PATTERN, M5_MODE, mode, data); } HSL_REG_ENTRY_SET(rv, dev_id, LED_PATTERN, 0, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); return SW_OK; }
static sw_error_t shiva_fdb_commit(a_uint32_t dev_id, a_uint32_t op) { sw_error_t rv; a_uint32_t busy = 1; a_uint32_t full_vio; a_uint32_t i = 1000; a_uint32_t entry; a_uint32_t hwop = op; while (busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, AT_BUSY, (a_uint8_t *) (&busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (0 == i) { return SW_BUSY; } HSL_REG_ENTRY_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_BUSY, 1, entry); if (ARL_FLUSH_PORT_AND_STATIC == hwop) { hwop = ARL_FLUSH_PORT_UNICAST; SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, FLUSH_ST_EN, 1, entry); } if (ARL_FLUSH_PORT_NO_STATIC == hwop) { hwop = ARL_FLUSH_PORT_UNICAST; SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, FLUSH_ST_EN, 0, entry); } SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_FUNC, hwop, entry); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); busy = 1; i = 1000; while (busy && --i) { HSL_REG_FIELD_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, AT_BUSY, (a_uint8_t *) (&busy), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_udelay(5); } if (0 == i) { return SW_FAIL; } HSL_REG_FIELD_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, AT_FULL_VIO, (a_uint8_t *) (&full_vio), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (full_vio) { /* must clear AT_FULL_VOI bit */ entry = 0x1000; HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (ARL_LOAD_ENTRY == hwop) { return SW_FULL; } else if ((ARL_PURGE_ENTRY == hwop) || (ARL_FLUSH_PORT_UNICAST == hwop)) { return SW_NOT_FOUND; } else { return SW_FAIL; } } return SW_OK; }
static sw_error_t _isis_acl_port_udf_profile_set(a_uint32_t dev_id, fal_port_t port_id, fal_acl_udf_type_t udf_type, a_uint32_t offset, a_uint32_t length) { sw_error_t rv; a_uint32_t reg; HSL_DEV_ID_CHECK(dev_id); if (ISIS_UDF_MAX_OFFSET < offset) { return SW_BAD_PARAM; } if (ISIS_UDF_MAX_OFFSET < length) { return SW_BAD_PARAM; } if ((FAL_ACL_UDF_TYPE_L2_SNAP == udf_type) || (FAL_ACL_UDF_TYPE_L3_PLUS == udf_type)) { HSL_REG_ENTRY_GET(rv, dev_id, WIN_RULE_CTL1, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); } else { HSL_REG_ENTRY_GET(rv, dev_id, WIN_RULE_CTL0, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); } SW_RTN_ON_ERROR(rv); switch (udf_type) { case FAL_ACL_UDF_TYPE_L2: SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L2_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L2_LENGTH, length, reg); break; case FAL_ACL_UDF_TYPE_L3: SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L3_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L3_LENGTH, length, reg); break; case FAL_ACL_UDF_TYPE_L4: SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L4_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL0, L4_LENGTH, length, reg); break; case FAL_ACL_UDF_TYPE_L2_SNAP: SW_SET_REG_BY_FIELD(WIN_RULE_CTL1, L2S_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL1, L2S_LENGTH, length, reg); break; case FAL_ACL_UDF_TYPE_L3_PLUS: SW_SET_REG_BY_FIELD(WIN_RULE_CTL1, L3P_OFFSET, offset, reg); SW_SET_REG_BY_FIELD(WIN_RULE_CTL1, L3P_LENGTH, length, reg); break; default: return SW_BAD_PARAM; } if ((FAL_ACL_UDF_TYPE_L2_SNAP == udf_type) || (FAL_ACL_UDF_TYPE_L3_PLUS == udf_type)) { HSL_REG_ENTRY_SET(rv, dev_id, WIN_RULE_CTL1, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); } else { HSL_REG_ENTRY_SET(rv, dev_id, WIN_RULE_CTL0, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); } return rv; }
static sw_error_t garuda_bist_test(a_uint32_t dev_id) { a_uint32_t entry, data, i; sw_error_t rv; data = 1; i = 0x1000; while (data && --i) { HSL_REG_ENTRY_GET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(BIST_CTRL, BIST_BUSY, data, entry); aos_udelay(5); } if (0 == i) { return SW_INIT_ERROR; } entry = 0; SW_SET_REG_BY_FIELD(BIST_CTRL, BIST_BUSY, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN2, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN1, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN0, 1, entry); HSL_REG_ENTRY_SET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); data = 1; i = 0x1000; while (data && --i) { HSL_REG_ENTRY_GET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(BIST_CTRL, BIST_BUSY, data, entry); aos_udelay(5); } if (0 == i) { return SW_INIT_ERROR; } SW_GET_FIELD_BY_REG(BIST_CTRL, ERR_CNT, data, entry); if (data) { SW_GET_FIELD_BY_REG(BIST_CTRL, ONE_ERR, data, entry); if (!data) { return SW_INIT_ERROR; } SW_GET_FIELD_BY_REG(BIST_CTRL, ERR_ADDR, data, entry); entry = 0; SW_SET_REG_BY_FIELD(BIST_RCV, RCV_EN, 1, entry); SW_SET_REG_BY_FIELD(BIST_RCV, RCV_ADDR, data, entry); HSL_REG_ENTRY_SET(rv, dev_id, BIST_RCV, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } else { return SW_OK; } entry = 0; SW_SET_REG_BY_FIELD(BIST_CTRL, BIST_BUSY, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN2, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN1, 1, entry); SW_SET_REG_BY_FIELD(BIST_CTRL, PTN_EN0, 1, entry); HSL_REG_ENTRY_SET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); data = 1; i = 0x1000; while (data && --i) { HSL_REG_ENTRY_GET(rv, dev_id, BIST_CTRL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(BIST_CTRL, BIST_BUSY, data, entry); aos_udelay(5); } if (0 == i) { return SW_INIT_ERROR; } SW_GET_FIELD_BY_REG(BIST_CTRL, ERR_CNT, data, entry); if (data) { return SW_INIT_ERROR; } return SW_OK; }
static sw_error_t garuda_hw_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { garuda_init_spec_cfg *garuda_init_cfg = NULL; hsl_dev_t *pdev = NULL; hsl_init_mode cpu_mode; a_uint32_t port_id; a_uint32_t data; sw_error_t rv; pdev = hsl_dev_ptr_get(dev_id); if (NULL == pdev) { return SW_NOT_INITIALIZED; } cpu_mode = cfg->cpu_mode; HSL_REG_ENTRY_GET(rv, dev_id, POSTRIP, 0, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* phy pll on */ SW_SET_REG_BY_FIELD(POSTRIP, PHY_PLL_ON, 1, data); garuda_init_cfg = (garuda_init_spec_cfg* )(cfg->chip_spec_cfg); if (!garuda_init_cfg) { return SW_BAD_PARAM; } /* delay */ if (A_TRUE == garuda_init_cfg->rx_delay_s1) { SW_SET_REG_BY_FIELD(POSTRIP, RXDELAY_S1, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, RXDELAY_S1, 0, data); } if (A_TRUE == garuda_init_cfg->rx_delay_s0) { SW_SET_REG_BY_FIELD(POSTRIP, RXDELAY_S0, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, RXDELAY_S0, 0, data); } if (A_TRUE == garuda_init_cfg->tx_delay_s1) { SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S1, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S1, 0, data); } if (A_TRUE == garuda_init_cfg->tx_delay_s0) { SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S0, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S0, 0, data); } /* tx/rx delay enable */ if (A_TRUE == garuda_init_cfg->rgmii_txclk_delay) { SW_SET_REG_BY_FIELD(POSTRIP, RGMII_TXCLK_DELAY_EN, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, RGMII_TXCLK_DELAY_EN, 0, data); } /* tx/rx delay enable */ if (A_TRUE == garuda_init_cfg->rgmii_rxclk_delay) { SW_SET_REG_BY_FIELD(POSTRIP, RGMII_RXCLK_DELAY_EN, 1, data); } else { SW_SET_REG_BY_FIELD(POSTRIP, RGMII_RXCLK_DELAY_EN, 0, data); } /* mac5 default mode */ /*SW_SET_REG_BY_FIELD(POSTRIP, MAC5_PHY_MODE, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, MAC5_MAC_MODE, 0, data);*/ /* mac0 default phy mode */ SW_SET_REG_BY_FIELD(POSTRIP, MAC0_MAC_MODE, 0, data); /* mac0 default rgmii mode */ SW_SET_REG_BY_FIELD(POSTRIP, MAC0_RGMII_EN, 1, data); SW_SET_REG_BY_FIELD(POSTRIP, MAC0_GMII_EN, 0, data); /* mac5 default disable mode */ SW_SET_REG_BY_FIELD(POSTRIP, MAC5_PHY_MODE, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, MAC5_MAC_MODE, 0, data); /* phy default mode */ SW_SET_REG_BY_FIELD(POSTRIP, PHY4_RGMII_EN, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, PHY4_GMII_EN, 0, data); /* modify default mode */ if (A_FALSE == garuda_init_cfg->mac0_rgmii) { SW_SET_REG_BY_FIELD(POSTRIP, MAC0_RGMII_EN, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, MAC0_GMII_EN, 1, data); /*invert clock output for port0 gmii pad.*/ a_uint32_t temp; HSL_REG_ENTRY_GET(rv, dev_id, MASK_CTL, 0, (a_uint8_t *) (&temp), sizeof (a_uint32_t)); temp |= 1<<MASK_CTL_MII_CLK0_SEL_BOFFSET; HSL_REG_ENTRY_SET(rv, dev_id, MASK_CTL, 0, (a_uint8_t *) (&temp), sizeof (a_uint32_t)); } if(HSL_CPU_2 == cpu_mode) { if (A_TRUE == garuda_init_cfg->mac5_rgmii) { SW_SET_REG_BY_FIELD(POSTRIP, PHY4_RGMII_EN, 1, data); SW_SET_REG_BY_FIELD(POSTRIP, PHY4_GMII_EN, 0, data); a_uint32_t phy_id = 4; /* phy4 rgmii mode enable */ phy_dport_set(dev_id, phy_id, F1_DEBUG_PORT_RGMII_MODE, F1_DEBUG_PORT_RGMII_MODE_EN); /* Rx delay enable */ if (A_TRUE == garuda_init_cfg->phy4_rx_delay) { phy_dport_set(dev_id, phy_id, F1_DEBUG_PORT_RX_DELAY, F1_DEBUG_PORT_RX_DELAY_EN); } else { phy_dport_clear(dev_id, phy_id, F1_DEBUG_PORT_RX_DELAY, F1_DEBUG_PORT_RX_DELAY_EN); } /* Tx delay enable */ if (A_TRUE == garuda_init_cfg->phy4_tx_delay) { phy_dport_set(dev_id, phy_id, F1_DEBUG_PORT_TX_DELAY, F1_DEBUG_PORT_TX_DELAY_EN); } else { phy_dport_clear(dev_id, phy_id, F1_DEBUG_PORT_TX_DELAY, F1_DEBUG_PORT_TX_DELAY_EN); } } else { SW_SET_REG_BY_FIELD(POSTRIP, PHY4_RGMII_EN, 0, data); SW_SET_REG_BY_FIELD(POSTRIP, PHY4_GMII_EN, 1, data); } } else if (HSL_CPU_1 == cpu_mode) { //SW_SET_REG_BY_FIELD(POSTRIP, TXDELAY_S0, 0, data); } else if (HSL_CPU_1_PLUS == cpu_mode) { SW_SET_REG_BY_FIELD(POSTRIP, MAC5_MAC_MODE, 1, data); } else if (HSL_NO_CPU == cpu_mode) { } HSL_REG_ENTRY_SET(rv, dev_id, POSTRIP, 0, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); for (port_id = 0; port_id < pdev->nr_ports; port_id++) { if (port_id == pdev->cpu_port_nr) { continue; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 1, data); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
static sw_error_t shiva_atu_get(a_uint32_t dev_id, fal_fdb_entry_t * entry, a_uint32_t op) { sw_error_t rv; a_uint32_t reg[3] = { 0 }; a_uint32_t status = 0; a_uint32_t hwop = op; if ((ARL_NEXT_ENTRY == op) || (ARL_FIND_ENTRY == op)) { shiva_fdb_fill_addr(entry->addr, ®[0], ®[1]); } HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®[0]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC1, 0, (a_uint8_t *) (®[1]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* set status not zero */ if (ARL_NEXT_ENTRY == op) { reg[2] = 0xf0000; } if (ARL_FIRST_ENTRY == op) { hwop = ARL_NEXT_ENTRY; } HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC2, 0, (a_uint8_t *) (®[2]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = shiva_fdb_commit(dev_id, hwop); SW_RTN_ON_ERROR(rv); /* get hardware enrety */ HSL_REG_ENTRY_GET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®[0]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_GET(rv, dev_id, ADDR_TABLE_FUNC1, 0, (a_uint8_t *) (®[1]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_GET(rv, dev_id, ADDR_TABLE_FUNC2, 0, (a_uint8_t *) (®[2]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(ADDR_TABLE_FUNC2, AT_STATUS, status, reg[2]); shiva_atu_hw_to_sw(reg, entry); /* If hardware return back with address and status all zero, that means no other next valid entry in fdb table */ if ((A_TRUE == shiva_fdb_is_zeroaddr(entry->addr)) && (0 == status)) { if (ARL_NEXT_ENTRY == op) { return SW_NO_MORE; } else if ((ARL_FIND_ENTRY == op) || (ARL_FIRST_ENTRY == op)) { return SW_NOT_FOUND; } else { return SW_FAIL; } } else { return SW_OK; } }