static sw_error_t _athena_fdb_add(a_uint32_t dev_id, const fal_fdb_entry_t * entry) { sw_error_t rv; a_uint32_t reg[3] = { 0, 0, 0 }; HSL_DEV_ID_CHECK(dev_id); if ((A_TRUE == athena_fdb_is_zeroaddr(entry->addr)) && (0 == entry->port.map) && (0 == entry->port.id)) { return SW_BAD_PARAM; } rv = athena_atu_sw_to_hw(dev_id, entry, reg); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC2, 0, (a_uint8_t *) (®[2]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC1, 0, (a_uint8_t *) (®[1]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®[0]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_fdb_commit(dev_id, ARL_LOAD_ENTRY); return rv; }
static sw_error_t horus_hw_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { hsl_dev_t *pdev = NULL; a_uint32_t port_id; a_uint32_t data; sw_error_t rv; pdev = hsl_dev_ptr_get(dev_id); if (NULL == pdev) { return SW_NOT_INITIALIZED; } for (port_id = 0; port_id < pdev->nr_ports; port_id++) { if (port_id == pdev->cpu_port_nr) { continue; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_SET_REG_BY_FIELD(PORT_STATUS, LINK_EN, 1, data); HSL_REG_ENTRY_SET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
sw_error_t garuda_acl_slct_write(a_uint32_t dev_id, a_uint32_t slct_idx, a_uint32_t slct[8]) { sw_error_t rv; a_uint32_t base, addr; a_uint32_t i; base = GARUDA_RULE_SLCT_ADDR + (slct_idx << 5); /* set rule address */ for (i = 1; i < 7; i++) { addr = base + (i << 2); HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(slct[i])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } /* set rule enable */ HSL_REG_ENTRY_GEN_SET(rv, dev_id, base, sizeof (a_uint32_t), (a_uint8_t *) (&(slct[0])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); return SW_OK; }
static sw_error_t _isis_filter_ports_bind(a_uint32_t dev_id, a_uint32_t flt_idx, a_uint32_t ports) { #ifdef ISIS_SW_ENTRY hw_filter_t filter; _isis_filter_up_to_sw(dev_id, &filter, flt_idx); filter.vlu[4] &= 0xffffff80; filter.vlu[4] |= (ports & 0x7f); _isis_filter_down_to_hw(dev_id, &filter, flt_idx); return SW_OK; #else #ifdef ISIS_HW_ENTRY hw_filter_t filter; filter = sw_rule_ent[dev_id][flt_idx].filter; filter.vlu[4] &= 0xffffff80; filter.vlu[4] |= (ports & 0x7f); _isis_filter_down_to_hw(dev_id, &filter, flt_idx); return SW_OK; #else sw_error_t rv; a_uint32_t addr, data; /* read filter value at first */ addr = ISIS_RULE_FUNC_ADDR; data = (flt_idx & 0x7f) | (0x1 << 10) | (0x1 << 31); HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* get filter value and modify it */ addr = ISIS_RULE_FUNC_ADDR + 20; HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); data &= 0xffffff80; data |= (ports & 0x7f); HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* write back filter value */ addr = ISIS_RULE_FUNC_ADDR; data = (flt_idx & 0x7f) | (0x1 << 31); HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); return SW_OK; #endif #endif }
sw_error_t hsl_dev_cleanup(void) { sw_error_t rv = SW_OK; a_uint32_t dev_id; for (dev_id = 0; dev_id < SW_MAX_NR_DEV; dev_id++) { if (dev_ssdk_cfg[dev_id]) { hsl_api_t *p_api; SW_RTN_ON_NULL(p_api = hsl_api_ptr_get(dev_id)); if (p_api->dev_clean) { rv = p_api->dev_clean(dev_id); SW_RTN_ON_ERROR(rv); } aos_mem_free(dev_ssdk_cfg[dev_id]); dev_ssdk_cfg[dev_id] = NULL; } } #ifdef UK_IF SW_RTN_ON_ERROR(sw_uk_cleanup()); #endif return SW_OK; }
static sw_error_t _shiva_fdb_add(a_uint32_t dev_id, const fal_fdb_entry_t * entry) { sw_error_t rv; a_uint32_t reg[3] = { 0, 0, 0 }; HSL_DEV_ID_CHECK(dev_id); rv = shiva_atu_sw_to_hw(dev_id, entry, reg); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC2, 0, (a_uint8_t *) (®[2]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC1, 0, (a_uint8_t *) (®[1]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®[0]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = shiva_fdb_commit(dev_id, ARL_LOAD_ENTRY); return rv; }
sw_error_t garuda_acl_rule_write(a_uint32_t dev_id, a_uint32_t rule_idx, a_uint32_t vlu[8], a_uint32_t msk[8]) { sw_error_t rv; a_uint32_t i, base, addr; /* set rule value */ base = GARUDA_RULE_VLU_ADDR + (rule_idx << 5); for (i = 0; i < 5; i++) { addr = base + (i << 2); HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(vlu[i])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } /* set rule mask */ base = GARUDA_RULE_MSK_ADDR + (rule_idx << 5); for (i = 0; i < 5; i++) { addr = base + (i << 2); HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(msk[i])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
static sw_error_t _athena_vlan_create(a_uint32_t dev_id, a_uint32_t vlan_id) { sw_error_t rv; a_uint32_t vtable_entry = 0; #ifdef HSL_STANDALONG a_int16_t i, loc = MAX_VLAN_ENTRY; v_array_t *p_v_array; #endif HSL_DEV_ID_CHECK(dev_id); if ((vlan_id == 0) || (vlan_id > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; #ifdef HSL_STANDALONG if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; for (i = 0; i < MAX_VLAN_ENTRY; i++) { if (p_v_array[i].active == A_FALSE) { loc = i; } else if (p_v_array[i].vlan_entry.vid == vlan_id) { return SW_ALREADY_EXIST; } } if (loc == MAX_VLAN_ENTRY) return SW_FULL; #endif /* set default value for VLAN_TABLE_FUNC0, all 0 except vid */ vtable_entry = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VLAN_ID, vlan_id, vtable_entry); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (&vtable_entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* set default value for VLAN_TABLE_FUNC1, all 0 */ vtable_entry = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC1, VT_VALID, 1, vtable_entry); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC1, 0, (a_uint8_t *) (&vtable_entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_vlan_commit(dev_id, VLAN_LOAD_ENTRY); SW_RTN_ON_ERROR(rv); #ifdef HSL_STANDALONG p_v_array[loc].vlan_entry.vid = vlan_id; p_v_array[loc].vlan_entry.mem_ports = 0; p_v_array[loc].vlan_entry.u_ports = 0; p_v_array[loc].vlan_entry.vid_pri_en = A_FALSE; p_v_array[loc].vlan_entry.vid_pri = 0; p_v_array[loc].active = A_TRUE; #endif return SW_OK; }
static sw_error_t _isis_filter_read(a_uint32_t dev_id, a_uint32_t reg[], a_uint32_t flt_idx, a_uint32_t op) { a_uint32_t i, addr, data, idx = 6; sw_error_t rv; addr = ISIS_RULE_FUNC_ADDR; data = (flt_idx & 0x7f) | (op << 8) | (0x1 << 10) | (0x1 << 31); HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (ISIS_FILTER_ACT_OP == op) { idx = 4; } for (i = 1; i < idx; i++) { addr = ISIS_RULE_FUNC_ADDR + (i << 2); HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(reg[i - 1])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } return SW_OK; }
static sw_error_t _athena_port_autoneg_adv_get(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t * autoadv) { sw_error_t rv; a_uint32_t phy_id; hsl_phy_ops_t *phy_drv; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_PHY)) { return SW_BAD_PARAM; } SW_RTN_ON_NULL (phy_drv = hsl_phy_api_ops_get (dev_id)); if (NULL == phy_drv->phy_autoneg_adv_get) return SW_NOT_SUPPORTED; rv = hsl_port_prop_get_phyid(dev_id, port_id, &phy_id); SW_RTN_ON_ERROR(rv); *autoadv = 0; rv = phy_drv->phy_autoneg_adv_get(dev_id, phy_id, autoadv); SW_RTN_ON_ERROR(rv); return SW_OK; }
static sw_error_t _athena_vlan_entry_append(a_uint32_t dev_id, const fal_vlan_t * vlan_entry) { sw_error_t rv; a_uint32_t reg[2] = { 0 }; #ifdef HSL_STANDALONG a_int16_t i, loc = MAX_VLAN_ENTRY; v_array_t *p_v_array; #endif HSL_DEV_ID_CHECK(dev_id); if ((vlan_entry->vid == 0) || (vlan_entry->vid > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; if (A_FALSE == hsl_mports_prop_check(dev_id, vlan_entry->mem_ports, HSL_PP_INCL_CPU)) return SW_BAD_PARAM; #ifdef HSL_STANDALONG if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; for (i = 0; i < MAX_VLAN_ENTRY; i++) { if (p_v_array[i].active == A_FALSE) { loc = i; } else if (p_v_array[i].vlan_entry.vid == vlan_entry->vid) { return SW_ALREADY_EXIST; } } if (loc == MAX_VLAN_ENTRY) return SW_FULL; #endif rv = athena_vlan_sw_to_hw(vlan_entry, reg); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (®[0]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC1, 0, (a_uint8_t *) (®[1]), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_vlan_commit(dev_id, VLAN_LOAD_ENTRY); SW_RTN_ON_ERROR(rv); #ifdef HSL_STANDALONG p_v_array[loc].vlan_entry = *vlan_entry; p_v_array[loc].active = A_TRUE; #endif return SW_OK; }
static sw_error_t _shiva_fdb_iterate(a_uint32_t dev_id, a_uint32_t * iterator, fal_fdb_entry_t * entry) { a_uint32_t index, addr, data, tbl[3] = { 0 }; sw_error_t rv; if ((NULL == iterator) || (NULL == entry)) { return SW_BAD_PTR; } if (SHIVA_FDB_ENTRY_NUM == *iterator) { return SW_NO_MORE; } if (SHIVA_FDB_ENTRY_NUM < *iterator) { return SW_BAD_PARAM; } for (index = *iterator; index < SHIVA_FDB_ENTRY_NUM; index++) { addr = SHIVA_FDB_ENTRY_ADDR2 + (index << 4); HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(tbl[2])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(FDB_TABLE_FUNC2, FDB_STATUS, data, tbl[2]); if (data) { addr = SHIVA_FDB_ENTRY_ADDR0 + (index << 4); HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(tbl[0])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); addr = SHIVA_FDB_ENTRY_ADDR1 + (index << 4); HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(tbl[1])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); _shiva_fdb_hw_to_sw(tbl, entry); break; } } if (SHIVA_FDB_ENTRY_NUM == index) { return SW_NO_MORE; } *iterator = index + 1; return SW_OK; }
static sw_error_t _athena_vlan_member_update(a_uint32_t dev_id, a_uint32_t vlan_id, fal_pbmp_t member, fal_pbmp_t u_member) { #ifdef HSL_STANDALONG sw_error_t rv; a_int16_t loc; a_uint32_t reg_tmp; v_array_t *p_v_array; fal_vlan_t *p_sw_vlan; HSL_DEV_ID_CHECK(dev_id); if ((vlan_id == 0) || (vlan_id > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; if (A_FALSE == hsl_mports_prop_check(dev_id, member, HSL_PP_INCL_CPU)) return SW_BAD_PARAM; if (u_member != 0) return SW_BAD_PARAM; if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; rv = athena_vlan_table_location(dev_id, vlan_id, &loc); SW_RTN_ON_ERROR(rv); p_sw_vlan = &p_v_array[loc].vlan_entry; /* set value for VLAN_TABLE_FUNC0, all 0 except vid */ reg_tmp = 0; SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VLAN_ID, vlan_id, reg_tmp); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI_EN, (a_int32_t)p_sw_vlan->vid_pri_en, reg_tmp); SW_SET_REG_BY_FIELD(VLAN_TABLE_FUNC0, VT_PRI, p_sw_vlan->vid_pri, reg_tmp); HSL_REG_ENTRY_SET(rv, dev_id, VLAN_TABLE_FUNC0, 0, (a_uint8_t *) (®_tmp), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); /* set vlan member for VLAN_TABLE_FUNC1 */ HSL_REG_FIELD_SET(rv, dev_id, VLAN_TABLE_FUNC1, 0, VID_MEM, (a_uint8_t *) (&member), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); rv = athena_vlan_commit(dev_id, VLAN_LOAD_ENTRY); SW_RTN_ON_ERROR(rv); p_v_array[loc].vlan_entry.mem_ports = member; return SW_OK; #else return SW_NOT_SUPPORTED; #endif }
static sw_error_t _garuda_port_1qmode_get(a_uint32_t dev_id, fal_port_t port_id, fal_pt_1qmode_t * pport_1qmode) { sw_error_t rv; a_uint32_t regval = 0; fal_pt_1qmode_t retval[4] = { FAL_1Q_DISABLE, FAL_1Q_FALLBACK, FAL_1Q_CHECK, FAL_1Q_SECURE }; HSL_DEV_ID_CHECK(dev_id); if (A_FALSE == hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } SW_RTN_ON_NULL(pport_1qmode); HSL_REG_FIELD_GET(rv, dev_id, PORT_BASE_VLAN, port_id, DOT1Q_MODE, (a_uint8_t *) (®val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); *pport_1qmode = retval[regval & 0x3]; return SW_OK; }
static sw_error_t _dess_mib_cpukeep_set(a_uint32_t dev_id, a_bool_t enable) { sw_error_t rv; a_uint32_t val; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE == enable) { val = 1; } else if (A_FALSE == enable) { val = 0; } else { return SW_BAD_PARAM; } HSL_REG_FIELD_SET(rv, dev_id, MIB_FUNC, 0, MIB_CPU_KEEP, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); return rv; }
static sw_error_t _horus_port_flowctrl_forcemode_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) { sw_error_t rv; a_uint32_t force, reg; if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_ENTRY_GET(rv, dev_id, PORT_STATUS, port_id, (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(PORT_STATUS, FLOW_LINK_EN, force, reg); if (0 == force) { *enable = A_TRUE; } else { *enable = A_FALSE; } return SW_OK; }
static sw_error_t _athena_vlan_find(a_uint32_t dev_id, a_uint32_t vlan_id, fal_vlan_t * p_vlan) { #ifdef HSL_STANDALONG a_int16_t loc; v_array_t *p_v_array; sw_error_t rv; HSL_DEV_ID_CHECK(dev_id); if ((vlan_id == 0) || (vlan_id > MAX_VLAN_ID)) return SW_OUT_OF_RANGE; if ((p_v_array = p_vlan_table[dev_id]) == NULL) return SW_NOT_INITIALIZED; rv = athena_vlan_table_location(dev_id, vlan_id, &loc); SW_RTN_ON_ERROR(rv); *p_vlan = p_v_array[loc].vlan_entry; return SW_OK; #else return SW_NOT_SUPPORTED; #endif }
static sw_error_t _horus_port_cdt(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t mdi_pair, fal_cable_status_t *cable_status, a_uint32_t *cable_len) { sw_error_t rv; a_uint32_t phy_id = 0; hsl_phy_ops_t *phy_drv; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_PHY)) { return SW_BAD_PARAM; } SW_RTN_ON_NULL (phy_drv = hsl_phy_api_ops_get (dev_id)); if (NULL == phy_drv->phy_cdt) return SW_NOT_SUPPORTED; rv = hsl_port_prop_get_phyid(dev_id, port_id, &phy_id); SW_RTN_ON_ERROR(rv); rv = phy_drv->phy_cdt(dev_id, phy_id, mdi_pair, cable_status, cable_len); return rv; }
static sw_error_t _horus_port_duplex_get(a_uint32_t dev_id, fal_port_t port_id, fal_port_duplex_t * pduplex) { sw_error_t rv; a_uint32_t phy_id; hsl_phy_ops_t *phy_drv; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_PHY)) { return SW_BAD_PARAM; } SW_RTN_ON_NULL (phy_drv = hsl_phy_api_ops_get (dev_id)); if (NULL == phy_drv->phy_duplex_get) return SW_NOT_SUPPORTED; rv = hsl_port_prop_get_phyid(dev_id, port_id, &phy_id); SW_RTN_ON_ERROR(rv); rv = phy_drv->phy_duplex_get(dev_id, phy_id, pduplex); return rv; }
static sw_error_t _garuda_port_force_portvlan_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) { sw_error_t rv; a_uint32_t val; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_FIELD_GET(rv, dev_id, PORT_BASE_VLAN, port_id, FORCE_PVLAN, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (val) { *enable = A_TRUE; } else { *enable = A_FALSE; } return SW_OK; }
static sw_error_t isis_dev_init(a_uint32_t dev_id, hsl_init_mode cpu_mode) { a_uint32_t entry; sw_error_t rv; hsl_dev_t *pdev = NULL; pdev = hsl_dev_ptr_get(dev_id); if (pdev == NULL) return SW_NOT_INITIALIZED; HSL_REG_FIELD_GET(rv, dev_id, MASK_CTL, 0, DEVICE_ID, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (S17_DEVICE_ID == entry) { pdev->nr_ports = 7; pdev->nr_phy = 5; pdev->cpu_port_nr = 0; pdev->nr_vlans = 4096; pdev->hw_vlan_query = A_TRUE; pdev->nr_queue = 6; pdev->cpu_mode = cpu_mode; } else { pdev->nr_ports = 6; pdev->nr_phy = 5; pdev->cpu_port_nr = 0; pdev->nr_vlans = 4096; pdev->hw_vlan_query = A_TRUE; pdev->nr_queue = 6; pdev->cpu_mode = cpu_mode; } return SW_OK; }
static sw_error_t _shiva_fdb_del_by_port(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t flag) { sw_error_t rv; a_uint32_t reg = 0; HSL_DEV_ID_CHECK(dev_id); if (A_FALSE == hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } SW_SET_REG_BY_FIELD(ADDR_TABLE_FUNC0, AT_PORT_NUM, port_id, reg); HSL_REG_ENTRY_SET(rv, dev_id, ADDR_TABLE_FUNC0, 0, (a_uint8_t *) (®), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (FAL_FDB_DEL_STATIC & flag) { rv = shiva_fdb_commit(dev_id, ARL_FLUSH_PORT_AND_STATIC); } else { rv = shiva_fdb_commit(dev_id, ARL_FLUSH_PORT_NO_STATIC); } return rv; }
static sw_error_t _athena_port_speed_set(a_uint32_t dev_id, fal_port_t port_id, fal_port_speed_t speed) { sw_error_t rv; a_uint32_t phy_id = 0; hsl_phy_ops_t *phy_drv; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_PHY)) { return SW_BAD_PARAM; } SW_RTN_ON_NULL (phy_drv = hsl_phy_api_ops_get (dev_id)); if (NULL == phy_drv->phy_speed_set) return SW_NOT_SUPPORTED; rv = hsl_port_prop_get_phyid(dev_id, port_id, &phy_id); SW_RTN_ON_ERROR(rv); if (FAL_SPEED_100 < speed) { return SW_BAD_PARAM; } rv = phy_drv->phy_speed_set(dev_id, phy_id, speed); return rv; }
static sw_error_t _isisc_mirr_port_in_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) { sw_error_t rv; a_uint32_t val; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_FIELD_GET(rv, dev_id, PORT_LOOKUP_CTL, port_id, ING_MIRROR_EN, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (1 == val) { *enable = A_TRUE; } else { *enable = A_FALSE; } return SW_OK; }
static sw_error_t _athena_port_hibernate_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable) { sw_error_t rv; a_uint32_t phy_id = 0; hsl_phy_ops_t *phy_drv; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_PHY)) { return SW_BAD_PARAM; } SW_RTN_ON_NULL (phy_drv = hsl_phy_api_ops_get (dev_id)); if (NULL == phy_drv->phy_hibernation_get) return SW_NOT_SUPPORTED; rv = hsl_port_prop_get_phyid(dev_id, port_id, &phy_id); SW_RTN_ON_ERROR(rv); rv = phy_drv->phy_hibernation_get(dev_id, phy_id, enable); return rv; }
static sw_error_t _garuda_portvlan_member_del(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t mem_port_id) { sw_error_t rv; a_uint32_t regval = 0; HSL_DEV_ID_CHECK(dev_id); if (A_FALSE == hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } if (A_FALSE == hsl_port_prop_check(dev_id, mem_port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_FIELD_GET(rv, dev_id, PORT_BASE_VLAN, port_id, PORT_VID_MEM, (a_uint8_t *) (®val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); regval &= (~(0x1UL << mem_port_id)); HSL_REG_FIELD_SET(rv, dev_id, PORT_BASE_VLAN, port_id, PORT_VID_MEM, (a_uint8_t *) (®val), sizeof (a_uint32_t)); return rv; }
static sw_error_t _athena_port_igmps_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) { sw_error_t rv; a_uint32_t val; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_EXCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_FIELD_GET(rv, dev_id, PORT_CTL, port_id, IGMP_MLD_EN, (a_uint8_t *) (&val), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (1 == val) { *enable = A_TRUE; } else { *enable = A_FALSE; } return SW_OK; }
sw_error_t isisc_cleanup(a_uint32_t dev_id) { sw_error_t rv; if (isisc_cfg[dev_id]) { #if defined(IN_NAT_HELPER) sw_error_t rv; if(isisc_nat_global_status) { ISISC_NAT_HELPER_CLEANUP(rv, dev_id); isisc_nat_global_status = 0; } #endif ISISC_ACL_CLEANUP(rv, dev_id); SW_RTN_ON_ERROR(hsl_port_prop_cleanup_by_dev(dev_id)); aos_mem_free(isisc_cfg[dev_id]); isisc_cfg[dev_id] = NULL; } return SW_OK; }
static sw_error_t _shiva_port_mc_leaky_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable) { a_uint32_t data; sw_error_t rv; HSL_DEV_ID_CHECK(dev_id); if (A_TRUE != hsl_port_prop_check(dev_id, port_id, HSL_PP_INCL_CPU)) { return SW_BAD_PARAM; } HSL_REG_FIELD_GET(rv, dev_id, PORT_BASE_VLAN, port_id, MUL_LEAKY_EN, (a_uint8_t *) (&data), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); if (1 == data) { *enable = A_TRUE; } else { *enable = A_FALSE; } return SW_OK; }
static sw_error_t _shiva_igmp_mld_entry_queue_get(a_uint32_t dev_id, a_bool_t * enable, a_uint32_t * queue) { sw_error_t rv; a_uint32_t entry, data; HSL_DEV_ID_CHECK(dev_id); HSL_REG_ENTRY_GET(rv, dev_id, QM_CTL, 0, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); SW_GET_FIELD_BY_REG(QM_CTL, IGMP_PRI_EN, data, entry); if (data) { *enable = A_TRUE; SW_GET_FIELD_BY_REG(QM_CTL, IGMP_PRI, data, entry); *queue = data; } else { *enable = A_FALSE; *queue = 0; } return SW_OK; }