static sw_error_t _isis_acl_rule_query(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, fal_acl_rule_t * rule) { sw_error_t rv; isis_acl_rule_t *sw_rule; a_uint32_t flt_nr = 0, i; HSL_DEV_ID_CHECK(dev_id); if (ISIS_MAX_LIST_ID < list_id) { return SW_NOT_SUPPORTED; } aos_mem_zero(hw_rule_tmp[dev_id], ISIS_HW_RULE_TMP_CNT * sizeof (isis_acl_rule_t)); for (i = 0; i < ISIS_MAX_FILTER; i++) { sw_rule = &(sw_rule_ent[dev_id][i]); if (ENT_USED & sw_rule->status) { if ((sw_rule->list_id == list_id) && (sw_rule->rule_id == rule_id)) { aos_mem_copy(&(hw_rule_tmp[dev_id][flt_nr]), sw_rule, sizeof (isis_acl_rule_t)); flt_nr++; } } } if (!flt_nr) { return SW_NOT_FOUND; } aos_mem_zero(rule, sizeof (fal_acl_rule_t)); rv = _isis_acl_rule_hw_to_sw(dev_id, rule, hw_rule_tmp[dev_id], 0, flt_nr); return rv; }
static sw_error_t _isis_filter_up_to_sw(a_uint32_t dev_id, hw_filter_t * filter, a_uint32_t flt_idx) { #ifdef ISIS_SW_ENTRY a_uint8_t *tbl = sw_filter_mem + sizeof (hw_filter_t) * flt_idx; aos_mem_copy(filter, tbl, sizeof (hw_filter_t)); #else #ifdef ISIS_HW_ENTRY sw_error_t rv; a_uint32_t i, base, addr; base = ISIS_FILTER_VLU_ADDR + (flt_idx << 5); for (i = 0; i < 5; i++) { addr = base + (i << 2); HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(filter->vlu[i])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } base = ISIS_FILTER_MSK_ADDR + (flt_idx << 5); for (i = 0; i < 5; i++) { addr = base + (i << 2); HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(filter->msk[i])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } base = ISIS_FILTER_ACT_ADDR + (flt_idx << 4); for (i = 0; i < 3; i++) { addr = base + (i << 2); HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t), (a_uint8_t *) (&(filter->act[i])), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); } #else sw_error_t rv; rv = _isis_filter_read(dev_id, &(filter->vlu[0]), flt_idx, ISIS_FILTER_VLU_OP); SW_RTN_ON_ERROR(rv); rv = _isis_filter_read(dev_id, &(filter->msk[0]), flt_idx, ISIS_FILTER_MSK_OP); SW_RTN_ON_ERROR(rv); rv = _isis_filter_read(dev_id, &(filter->act[0]), flt_idx, ISIS_FILTER_ACT_OP); SW_RTN_ON_ERROR(rv); #endif #endif return SW_OK; }
sw_error_t hsl_phy_api_ops_register(hsl_phy_ops_t * phy_api_ops) { //global_phy_api_ops = *phy_api_ops; aos_mem_copy(&global_phy_api_ops[0], phy_api_ops, sizeof(hsl_phy_ops_t)); return SW_OK; }
static void _isis_acl_rule_sync(a_uint32_t dev_id, a_uint32_t flt_idx, a_uint32_t flt_nr) { a_uint32_t i, data; for (i = flt_idx; i < (flt_idx + flt_nr); i++) { if (aos_mem_cmp (&(sw_rule_ent[dev_id][i]), &(sw_rule_tmp[dev_id][i]), sizeof (isis_acl_rule_t))) { SW_GET_FIELD_BY_REG(MAC_RUL_M4, RULE_TYP, data, sw_rule_tmp[dev_id][i].filter.msk[4]); if (data) { _isis_filter_down_to_hw(dev_id, &(sw_rule_tmp[dev_id][i].filter), i); } else { _isis_filter_valid_set(dev_id, i, 0); } aos_mem_copy(&(sw_rule_ent[dev_id][i]), &(sw_rule_tmp[dev_id][i]), sizeof (isis_acl_rule_t)); } } }
sw_error_t hsl_dev_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { sw_error_t rv = SW_OK; if (SW_MAX_NR_DEV <= dev_id) { return SW_BAD_PARAM; } aos_mem_set(&dev_table[dev_id], 0, sizeof (hsl_dev_t)); SW_RTN_ON_ERROR(sd_init(dev_id,cfg)); #ifdef UK_IF SW_RTN_ON_ERROR(sw_uk_init(cfg->nl_prot)); #endif #if defined API_LOCK SW_RTN_ON_ERROR(hsl_api_lock_init()); #endif rv = SW_INIT_ERROR; switch (cfg->chip_type) { case CHIP_ATHENA: #if defined ATHENA rv = athena_init(dev_id, cfg); #endif break; case CHIP_GARUDA: #if defined GARUDA rv = garuda_init(dev_id, cfg); #endif break; case CHIP_SHIVA: #if defined SHIVA rv = shiva_init(dev_id, cfg); #endif break; case CHIP_HORUS: #if defined HORUS rv = horus_init(dev_id, cfg); #endif break; case CHIP_ISIS: #if defined ISIS rv = isis_init(dev_id, cfg); #endif break; case CHIP_ISISC: #if defined ISISC rv = isis_init(dev_id, cfg); #endif break; case CHIP_UNSPECIFIED: #if defined ATHENA rv = athena_init(dev_id, cfg); #elif defined GARUDA rv = garuda_init(dev_id, cfg); #elif defined SHIVA rv = shiva_init(dev_id, cfg); #elif defined HORUS rv = horus_init(dev_id, cfg); #elif defined ISIS rv = isis_init(dev_id, cfg); #elif defined ISISC rv = isis_init(dev_id, cfg); #endif break; default: return SW_BAD_PARAM; } SW_RTN_ON_ERROR(rv); if (NULL == dev_ssdk_cfg[dev_id]) { dev_ssdk_cfg[dev_id] = aos_mem_alloc(sizeof (ssdk_init_cfg)); } if (NULL == dev_ssdk_cfg[dev_id]) { return SW_OUT_OF_MEM; } aos_mem_copy(dev_ssdk_cfg[dev_id], cfg, sizeof (ssdk_init_cfg)); #if defined UK_MINOR_DEV dev_ssdk_cfg[dev_id]->nl_prot = UK_MINOR_DEV; #endif return rv; }
sw_error_t hsl_ssdk_cfg(a_uint32_t dev_id, ssdk_cfg_t *ssdk_cfg) { aos_mem_set(&(ssdk_cfg->init_cfg), 0, sizeof(ssdk_init_cfg)); aos_mem_copy(&(ssdk_cfg->init_cfg), dev_ssdk_cfg[dev_id], sizeof(ssdk_init_cfg)); #ifdef VERSION aos_mem_copy(ssdk_cfg->build_ver, VERSION, sizeof(VERSION)); #endif #ifdef BUILD_DATE aos_mem_copy(ssdk_cfg->build_date, BUILD_DATE, sizeof(BUILD_DATE)); #endif switch (dev_ssdk_cfg[dev_id]->chip_type) { case CHIP_ATHENA: aos_mem_copy(ssdk_cfg->chip_type, "athena", sizeof("athena")); break; case CHIP_GARUDA: aos_mem_copy(ssdk_cfg->chip_type, "garuda", sizeof("garuda")); break; case CHIP_SHIVA: aos_mem_copy(ssdk_cfg->chip_type, "shiva", sizeof("shiva")); break; case CHIP_HORUS: aos_mem_copy(ssdk_cfg->chip_type, "horus", sizeof("horus")); break; case CHIP_ISIS: aos_mem_copy(ssdk_cfg->chip_type, "isis", sizeof("isis")); break; case CHIP_ISISC: aos_mem_copy(ssdk_cfg->chip_type, "isisc", sizeof("isisc")); break; case CHIP_UNSPECIFIED: #if defined ATHENA aos_mem_copy(ssdk_cfg->chip_type, "athena", sizeof("athena")); #elif defined GARUDA aos_mem_copy(ssdk_cfg->chip_type, "garuda", sizeof("garuda")); #elif defined SHIVA aos_mem_copy(ssdk_cfg->chip_type, "shiva", sizeof("shiva")); #elif defined HORUS aos_mem_copy(ssdk_cfg->chip_type, "horus", sizeof("horus")); #elif defined ISIS aos_mem_copy(ssdk_cfg->chip_type, "isis", sizeof("isis")); #elif defined ISISC aos_mem_copy(ssdk_cfg->chip_type, "isisc", sizeof("isisc")); #endif break; default: return SW_BAD_PARAM; } #ifdef CPU aos_mem_copy(ssdk_cfg->cpu_type, CPU, sizeof(CPU)); #endif #ifdef OS aos_mem_copy(ssdk_cfg->os_info, OS, sizeof(OS)); #if defined KVER26 aos_mem_copy(ssdk_cfg->os_info+sizeof(OS)-1, " version 2.6", sizeof(" version 2.6")); #elif defined KVER24 aos_mem_copy(ssdk_cfg->os_info+sizeof(OS)-1, " version 2.4", sizeof(" version 2.4")); #else aos_mem_copy(ssdk_cfg->os_info+sizeof(OS)-1, " version unknown", sizeof(" version unknown")); #endif #endif #ifdef HSL_STANDALONG ssdk_cfg->fal_mod = A_FALSE; #else ssdk_cfg->fal_mod = A_TRUE; #endif #ifdef USER_MODE ssdk_cfg->kernel_mode = A_FALSE; #else ssdk_cfg->kernel_mode = A_TRUE; #endif #ifdef UK_IF ssdk_cfg->uk_if = A_TRUE; #else ssdk_cfg->uk_if = A_FALSE; #endif #ifdef IN_ACL ssdk_cfg->features.in_acl = A_TRUE; #endif #ifdef IN_FDB ssdk_cfg->features.in_fdb = A_TRUE; #endif #ifdef IN_IGMP ssdk_cfg->features.in_igmp = A_TRUE; #endif #ifdef IN_LEAKY ssdk_cfg->features.in_leaky = A_TRUE; #endif #ifdef IN_LED ssdk_cfg->features.in_led = A_TRUE; #endif #ifdef IN_MIB ssdk_cfg->features.in_mib = A_TRUE; #endif #ifdef IN_MIRROR ssdk_cfg->features.in_mirror = A_TRUE; #endif #ifdef IN_MISC ssdk_cfg->features.in_misc = A_TRUE; #endif #ifdef IN_PORTCONTROL ssdk_cfg->features.in_portcontrol = A_TRUE; #endif #ifdef IN_PORTVLAN ssdk_cfg->features.in_portvlan = A_TRUE; #endif #ifdef IN_QOS ssdk_cfg->features.in_qos = A_TRUE; #endif #ifdef IN_RATE ssdk_cfg->features.in_rate = A_TRUE; #endif #ifdef IN_STP ssdk_cfg->features.in_stp = A_TRUE; #endif #ifdef IN_VLAN ssdk_cfg->features.in_vlan = A_TRUE; #endif #ifdef IN_REDUCED_ACL ssdk_cfg->features.in_reduced_acl = A_TRUE; #endif #ifdef IN_IP ssdk_cfg->features.in_ip = A_TRUE; #endif #ifdef IN_NAT ssdk_cfg->features.in_nat = A_TRUE; #endif #ifdef IN_COSMAP ssdk_cfg->features.in_cosmap = A_TRUE; #endif #ifdef IN_SEC ssdk_cfg->features.in_sec = A_TRUE; #endif #ifdef IN_TRUNK ssdk_cfg->features.in_trunk = A_TRUE; #endif #ifdef IN_NAT_HELPER ssdk_cfg->features.in_nathelper= A_TRUE; #endif #ifdef IN_INTERFACECONTROL ssdk_cfg->features.in_interfacectrl= A_TRUE; #endif return SW_OK; }
/** * @brief Init hsl layer. * @details Comments: * This operation will init hsl layer and hsl layer * @param[in] dev_id device id * @param[in] cfg configuration for initialization * @return SW_OK or error code */ sw_error_t isis_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { HSL_DEV_ID_CHECK(dev_id); if (NULL == isis_cfg[dev_id]) { isis_cfg[dev_id] = aos_mem_alloc(sizeof (ssdk_init_cfg)); } if (NULL == isis_cfg[dev_id]) { return SW_OUT_OF_MEM; } aos_mem_copy(isis_cfg[dev_id], cfg, sizeof (ssdk_init_cfg)); SW_RTN_ON_ERROR(isis_reg_access_init(dev_id, cfg->reg_mode)); SW_RTN_ON_ERROR(isis_dev_init(dev_id, cfg->cpu_mode)); #if !(defined(KERNEL_MODULE) && defined(USER_MODE)) { sw_error_t rv; SW_RTN_ON_ERROR(hsl_port_prop_init()); SW_RTN_ON_ERROR(hsl_port_prop_init_by_dev(dev_id)); SW_RTN_ON_ERROR(isis_portproperty_init(dev_id, cfg->cpu_mode)); ISIS_MIB_INIT(rv, dev_id); ISIS_PORT_CTRL_INIT(rv, dev_id); ISIS_PORTVLAN_INIT(rv, dev_id); ISIS_VLAN_INIT(rv, dev_id); ISIS_FDB_INIT(rv, dev_id); ISIS_QOS_INIT(rv, dev_id); ISIS_STP_INIT(rv, dev_id); ISIS_MIRR_INIT(rv, dev_id); ISIS_RATE_INIT(rv, dev_id); ISIS_MISC_INIT(rv, dev_id); ISIS_LEAKY_INIT(rv, dev_id); ISIS_IGMP_INIT(rv, dev_id); ISIS_ACL_INIT(rv, dev_id); ISIS_LED_INIT(rv, dev_id); ISIS_COSMAP_INIT(rv, dev_id); ISIS_IP_INIT(rv, dev_id); ISIS_NAT_INIT(rv, dev_id); ISIS_TRUNK_INIT(rv, dev_id); ISIS_SEC_INIT(rv, dev_id); ISIS_INTERFACE_CTRL_INIT(rv, dev_id); { hsl_api_t *p_api; SW_RTN_ON_NULL(p_api = hsl_api_ptr_get(dev_id)); p_api->dev_reset = isis_reset; p_api->dev_clean = isis_cleanup; } SW_RTN_ON_ERROR(isis_hw_init(dev_id, cfg)); #if defined(IN_NAT_HELPER) ISIS_NAT_HELPER_INIT(rv, dev_id); #endif } #endif return SW_OK; }
static sw_error_t _isis_acl_rule_delete(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, a_uint32_t rule_nr) { isis_acl_rule_t *sw_rule; isis_acl_list_t *sw_list; a_uint32_t i, flt_idx = 0, src_idx, dst_idx, del_nr = 0, flt_nr = 0; sw_error_t rv; HSL_DEV_ID_CHECK(dev_id); if (ISIS_MAX_LIST_ID < list_id) { return SW_NOT_SUPPORTED; } sw_list = _isis_acl_list_loc(dev_id, list_id); if (NULL == sw_list) { return SW_NOT_FOUND; } if (sw_list->rule_nr < (rule_id + rule_nr)) { return SW_BAD_PARAM; } aos_mem_zero(hw_rule_tmp[dev_id], ISIS_HW_RULE_TMP_CNT * sizeof (isis_acl_rule_t)); aos_mem_zero(sw_rule_tmp[dev_id], ISIS_MAX_FILTER * sizeof (isis_acl_rule_t)); for (i = 0; i < ISIS_MAX_FILTER; i++) { sw_rule = &(sw_rule_ent[dev_id][i]); if ((ENT_USED & sw_rule->status) && (sw_rule->list_id == list_id)) { if (!flt_nr) { flt_idx = i; } if ((sw_rule->rule_id >= rule_id) && (sw_rule->rule_id < (rule_id + rule_nr))) { del_nr++; } else { aos_mem_copy(&(hw_rule_tmp[dev_id][flt_idx + flt_nr]), sw_rule, sizeof (isis_acl_rule_t)); } flt_nr++; } } if (!del_nr) { return SW_NOT_FOUND; } _isis_acl_sw_rule_dump("hw rule before del", hw_rule_tmp[dev_id]); for (i = 0; i < flt_nr; i++) { sw_rule = &(hw_rule_tmp[dev_id][i]); if (ENT_USED & sw_rule->status) { break; } } if (i != flt_nr) { src_idx = flt_idx + i; dst_idx = flt_idx; rv = _isis_acl_list_insert(dev_id, &src_idx, &dst_idx, hw_rule_tmp[dev_id], sw_rule_tmp[dev_id]); SW_RTN_ON_ERROR(rv); } _isis_acl_rule_sync(dev_id, flt_idx, flt_nr); sw_list->rule_nr -= rule_nr; _isis_acl_sw_rule_dump("sw rule after del", sw_rule_ent[dev_id]); return SW_OK; }
static sw_error_t _isis_acl_rule_add(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, a_uint32_t rule_nr, fal_acl_rule_t * rule) { sw_error_t rv; isis_acl_list_t *sw_list; isis_acl_rule_t *sw_rule; a_uint32_t i, free_flt_nr, old_flt_nr, old_flt_idx, new_flt_nr, bind_pts; HSL_DEV_ID_CHECK(dev_id); if (ISIS_MAX_LIST_ID < list_id) { return SW_NOT_SUPPORTED; } if ((0 == rule_nr) || (NULL == rule)) { return SW_BAD_PARAM; } sw_list = _isis_acl_list_loc(dev_id, list_id); if (NULL == sw_list) { return SW_NOT_FOUND; } if (rule_id != sw_list->rule_nr) { return SW_BAD_PARAM; } old_flt_idx = 0; old_flt_nr = 0; free_flt_nr = 0; aos_mem_zero(hw_rule_tmp[dev_id], ISIS_HW_RULE_TMP_CNT * sizeof (isis_acl_rule_t)); aos_mem_zero(sw_rule_tmp[dev_id], ISIS_MAX_FILTER * sizeof (isis_acl_rule_t)); for (i = 0; i < ISIS_MAX_FILTER; i++) { sw_rule = &(sw_rule_ent[dev_id][i]); if (ENT_USED & sw_rule->status) { if (sw_rule->list_id == sw_list->list_id) { aos_mem_copy(&(hw_rule_tmp[dev_id][old_flt_nr]), sw_rule, sizeof (isis_acl_rule_t)); if (!old_flt_nr) { old_flt_idx = i; } old_flt_nr++; } } else { free_flt_nr++; } } if (!free_flt_nr) { return SW_NO_RESOURCE; } /* parse rule entry and alloc rule resource */ new_flt_nr = old_flt_nr; for (i = 0; i < rule_nr; i++) { rv = _isis_acl_rule_sw_to_hw(dev_id, &rule[i], hw_rule_tmp[dev_id], &new_flt_nr); SW_RTN_ON_ERROR(rv); } if (free_flt_nr < (new_flt_nr - old_flt_nr)) { return SW_NO_RESOURCE; } for (i = old_flt_nr; i < new_flt_nr; i++) { hw_rule_tmp[dev_id][i].status |= ENT_USED; hw_rule_tmp[dev_id][i].list_id = sw_list->list_id; hw_rule_tmp[dev_id][i].list_pri = sw_list->list_pri; bind_pts = sw_list->bind_pts; SW_SET_REG_BY_FIELD(MAC_RUL_V4, SRC_PT, bind_pts, (hw_rule_tmp[dev_id][i].filter.vlu[4])); } for (i = 0; i < old_flt_nr; i++) { sw_rule = &(sw_rule_ent[dev_id][old_flt_idx + i]); sw_rule->status &= (~ENT_USED); sw_rule->status |= (ENT_TMP); } rv = _isis_acl_rule_alloc(dev_id, sw_list, new_flt_nr); if (SW_OK != rv) { aos_mem_zero(sw_rule_tmp[dev_id], ISIS_MAX_FILTER * sizeof (isis_acl_rule_t)); rv = _isis_acl_rule_reorder(dev_id, sw_list); } for (i = 0; i < old_flt_nr; i++) { sw_rule = &(sw_rule_ent[dev_id][i + old_flt_idx]); sw_rule->status |= (ENT_USED); sw_rule->status &= (~ENT_TMP); } SW_RTN_ON_ERROR(rv); _isis_acl_rule_sync(dev_id, 0, ISIS_MAX_FILTER); sw_list->rule_nr += rule_nr; _isis_acl_sw_rule_dump("sw rule after add", sw_rule_ent[dev_id]); return SW_OK; }
static sw_error_t _isis_acl_rule_alloc(a_uint32_t dev_id, isis_acl_list_t * sw_list, a_uint32_t filter_nr) { a_uint32_t free_flt_nr, load_idx, begin_idx, start_idx, end_idx, i; a_uint32_t largest_nr, largest_idx; sw_error_t rv; /* calculate the proper location, [start_idx, end_idx) */ start_idx = 0; end_idx = ISIS_MAX_FILTER; for (i = 0; i < ISIS_MAX_FILTER; i++) { if (ENT_USED & sw_rule_ent[dev_id][i].status) { if (sw_rule_ent[dev_id][i].list_pri < sw_list->list_pri) { start_idx = i + 1; } else if (sw_rule_ent[dev_id][i].list_pri > sw_list->list_pri) { end_idx = i; break; } } } /* find the larget free filters block */ largest_nr = 0; largest_idx = 0; free_flt_nr = 0; begin_idx = start_idx; for (i = start_idx; i < end_idx; i++) { if (!(ENT_USED & sw_rule_ent[dev_id][i].status)) { free_flt_nr++; } else { if (free_flt_nr > largest_nr) { largest_nr = free_flt_nr; largest_idx = begin_idx; } free_flt_nr = 0; begin_idx = i + 1; } } if (free_flt_nr > largest_nr) { largest_nr = free_flt_nr; largest_idx = begin_idx; } if ((!largest_nr) || ((largest_nr + 1) < filter_nr)) { return SW_NO_RESOURCE; } for (i = 0; i < ISIS_MAX_FILTER; i++) { if (ENT_USED & sw_rule_ent[dev_id][i].status) { aos_mem_copy(&(sw_rule_tmp[dev_id][i]), &(sw_rule_ent[dev_id][i]), sizeof (isis_acl_rule_t)); } } begin_idx = 0; load_idx = largest_idx; rv = _isis_acl_list_insert(dev_id, &begin_idx, &load_idx, hw_rule_tmp[dev_id], sw_rule_tmp[dev_id]); return rv; }
static sw_error_t _isis_acl_list_insert(a_uint32_t dev_id, a_uint32_t * src_idx, a_uint32_t * dst_idx, isis_acl_rule_t * src_rule, isis_acl_rule_t * dst_rule) { a_uint32_t i, data, rule_id, list_id, list_pri; rule_id = 0; list_id = src_rule[*src_idx].list_id; list_pri = src_rule[*src_idx].list_pri; for (i = *src_idx; i < ISIS_MAX_FILTER; i++) { if (!(ENT_USED & src_rule[i].status)) { break; } if (src_rule[i].list_id != list_id) { break; } SW_GET_FIELD_BY_REG(MAC_RUL_M4, RULE_TYP, data, src_rule[i].filter.msk[4]); if (!data) { continue; } if (ISIS_MAX_FILTER <= *dst_idx) { return SW_NO_RESOURCE; } if (ENT_USED & dst_rule[*dst_idx].status) { return SW_NO_RESOURCE; } SW_GET_FIELD_BY_REG(MAC_RUL_M4, RULE_VALID, data, src_rule[i].filter.msk[4]); if ((FLT_START == data) && (*dst_idx % 2)) { if (*src_idx != i) { dst_rule[*dst_idx].list_id = list_id; dst_rule[*dst_idx].list_pri = list_pri; dst_rule[*dst_idx].rule_id = rule_id - 1; dst_rule[*dst_idx].status |= ENT_USED; } (*dst_idx)++; if (ISIS_MAX_FILTER <= *dst_idx) { return SW_NO_RESOURCE; } if (ENT_USED & dst_rule[*dst_idx].status) { return SW_NO_RESOURCE; } } aos_mem_copy(&(dst_rule[*dst_idx].filter), &(src_rule[i].filter), sizeof (hw_filter_t)); dst_rule[*dst_idx].list_id = list_id; dst_rule[*dst_idx].list_pri = list_pri; dst_rule[*dst_idx].rule_id = rule_id; dst_rule[*dst_idx].status |= ENT_USED; (*dst_idx)++; if ((FLT_END == data) && (*dst_idx % 2)) { if (ISIS_MAX_FILTER > *dst_idx) { dst_rule[*dst_idx].list_id = list_id; dst_rule[*dst_idx].list_pri = list_pri; dst_rule[*dst_idx].rule_id = rule_id; dst_rule[*dst_idx].status |= ENT_USED; (*dst_idx)++; } } if ((FLT_END == data) || (FLT_STARTEND == data)) { rule_id++; } } *src_idx = i; return SW_OK; }
/** * @brief Init hsl layer. * @details Comments: * This operation will init hsl layer and hsl layer * @param[in] dev_id device id * @param[in] cfg configuration for initialization * @return SW_OK or error code */ sw_error_t garuda_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { a_uint8_t *p_mem; HSL_DEV_ID_CHECK(dev_id); p_mem = (a_uint8_t *)garuda_cfg[dev_id]; if (NULL == p_mem) { p_mem = aos_mem_alloc(sizeof (ssdk_init_cfg) + sizeof(garuda_init_spec_cfg)); garuda_cfg[dev_id] = (ssdk_init_cfg *)p_mem; garuda_cfg[dev_id]->chip_spec_cfg = (garuda_init_spec_cfg *) (p_mem + sizeof (ssdk_init_cfg)); } if (NULL == p_mem) { return SW_OUT_OF_MEM; } aos_mem_copy(garuda_cfg[dev_id]->chip_spec_cfg, cfg->chip_spec_cfg, sizeof (garuda_init_spec_cfg)); aos_mem_copy(garuda_cfg[dev_id], cfg, sizeof (ssdk_init_cfg)); garuda_cfg[dev_id]->chip_spec_cfg = (garuda_init_spec_cfg *) (p_mem + sizeof (ssdk_init_cfg)); SW_RTN_ON_ERROR(garuda_reg_access_init(dev_id, cfg->reg_mode)); SW_RTN_ON_ERROR(garuda_dev_init(dev_id, cfg->cpu_mode)); #if !(defined(KERNEL_MODULE) && defined(USER_MODE)) { a_uint32_t i, entry; sw_error_t rv; if(HSL_MDIO == cfg->reg_mode) { SW_RTN_ON_ERROR(garuda_bist_test(dev_id)); entry = 0x1; HSL_REG_FIELD_SET(rv, dev_id, MASK_CTL, 0, SOFT_RST, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); i = 0x10; do { HSL_REG_FIELD_GET(rv, dev_id, MASK_CTL, 0, SOFT_RST, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_mdelay(10); } while (entry && --i); if (0 == i) { return SW_INIT_ERROR; } } SW_RTN_ON_ERROR(hsl_port_prop_init()); SW_RTN_ON_ERROR(hsl_port_prop_init_by_dev(dev_id)); SW_RTN_ON_ERROR(garuda_portproperty_init(dev_id, cfg->cpu_mode)); GARUDA_MIB_INIT(rv, dev_id); GARUDA_PORT_CTRL_INIT(rv, dev_id); GARUDA_PORTVLAN_INIT(rv, dev_id); GARUDA_VLAN_INIT(rv, dev_id); GARUDA_FDB_INIT(rv, dev_id); GARUDA_QOS_INIT(rv, dev_id); GARUDA_STP_INIT(rv, dev_id); GARUDA_MIRR_INIT(rv, dev_id); GARUDA_RATE_INIT(rv, dev_id); GARUDA_MISC_INIT(rv, dev_id); GARUDA_LEAKY_INIT(rv, dev_id); GARUDA_IGMP_INIT(rv, dev_id); GARUDA_ACL_INIT(rv, dev_id); GARUDA_LED_INIT(rv, dev_id); { hsl_api_t *p_api; SW_RTN_ON_NULL(p_api = hsl_api_ptr_get(dev_id)); p_api->dev_reset = garuda_reset; p_api->dev_clean = garuda_cleanup; } if(cfg->reg_mode == HSL_MDIO) { SW_RTN_ON_ERROR(garuda_hw_init(dev_id, cfg)); } } #endif return SW_OK; }
/** * @brief Init hsl layer. * @details Comments: * This operation will init hsl layer and hsl layer * @param[in] dev_id device id * @param[in] cfg configuration for initialization * @return SW_OK or error code */ sw_error_t horus_init(a_uint32_t dev_id, ssdk_init_cfg *cfg) { HSL_DEV_ID_CHECK(dev_id); if (NULL == horus_cfg[dev_id]) { horus_cfg[dev_id] = aos_mem_alloc(sizeof (ssdk_init_cfg)); } if (NULL == horus_cfg[dev_id]) { return SW_OUT_OF_MEM; } aos_mem_copy(horus_cfg[dev_id], cfg, sizeof (ssdk_init_cfg)); SW_RTN_ON_ERROR(horus_reg_access_init(dev_id, cfg->reg_mode)); SW_RTN_ON_ERROR(horus_dev_init(dev_id, cfg->cpu_mode)); #if !(defined(KERNEL_MODULE) && defined(USER_MODE)) { a_uint32_t i, entry; sw_error_t rv; SW_RTN_ON_ERROR(horus_bist_test(dev_id)); entry = 0x1; HSL_REG_FIELD_SET(rv, dev_id, MASK_CTL, 0, SOFT_RST, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); i = 0x10; do { HSL_REG_FIELD_GET(rv, dev_id, MASK_CTL, 0, SOFT_RST, (a_uint8_t *) (&entry), sizeof (a_uint32_t)); SW_RTN_ON_ERROR(rv); aos_mdelay(10); } while (entry && --i); if (0 == i) { return SW_INIT_ERROR; } SW_RTN_ON_ERROR(hsl_port_prop_init()); SW_RTN_ON_ERROR(hsl_port_prop_init_by_dev(dev_id)); SW_RTN_ON_ERROR(horus_portproperty_init(dev_id, cfg->cpu_mode)); HORUS_MIB_INIT(rv, dev_id); HORUS_PORT_CTRL_INIT(rv, dev_id); HORUS_PORTVLAN_INIT(rv, dev_id); HORUS_VLAN_INIT(rv, dev_id); HORUS_FDB_INIT(rv, dev_id); HORUS_QOS_INIT(rv, dev_id); HORUS_STP_INIT(rv, dev_id); HORUS_MIRR_INIT(rv, dev_id); HORUS_RATE_INIT(rv, dev_id); HORUS_MISC_INIT(rv, dev_id); HORUS_LEAKY_INIT(rv, dev_id); HORUS_IGMP_INIT(rv, dev_id); HORUS_LED_INIT(rv, dev_id); { hsl_api_t *p_api; SW_RTN_ON_NULL(p_api = hsl_api_ptr_get(dev_id)); p_api->dev_reset = horus_reset; p_api->dev_clean = horus_cleanup; } SW_RTN_ON_ERROR(horus_hw_init(dev_id, cfg)); } #endif return SW_OK; }
sw_error_t sw_uk_if(a_uint32_t arg_val[SW_MAX_API_PARAM]) { struct sockaddr_nl src_addr; struct sockaddr_nl dest_addr; struct msghdr msg; struct iovec iov; struct nlmsghdr *nlh; ssdk_sock_t * sock; a_int32_t sock_fd; a_uint32_t curr_pid; sw_error_t rv = SW_OK; curr_pid = getpid(); SOCK_LOCKER_LOCK; sock = ssdk_sock_find(curr_pid); if (!sock) { sock = ssdk_sock_alloc(curr_pid); if (!sock) { SW_OUT_ON_ERROR(SW_NO_RESOURCE); } sock_fd = socket(PF_NETLINK, SOCK_RAW, ssdk_sock_prot); aos_mem_set(&src_addr, 0, sizeof(src_addr)); src_addr.nl_family = AF_NETLINK; src_addr.nl_pid = curr_pid; src_addr.nl_groups = 0; bind(sock_fd, (struct sockaddr*)&src_addr, sizeof(src_addr)); sock->ssdk_sock_fd = sock_fd; sock->ssdk_sock_pid = curr_pid; } else { sock_fd = sock->ssdk_sock_fd; } aos_mem_set(&dest_addr, 0, sizeof(dest_addr)); dest_addr.nl_family = AF_NETLINK; dest_addr.nl_pid = 0; dest_addr.nl_groups = 0; nlh = nl_hdr; aos_mem_set(nlh, 0, NLMSG_SPACE(SW_MAX_PAYLOAD)); nlh->nlmsg_len = NLMSG_SPACE(SW_MAX_PAYLOAD); nlh->nlmsg_pid = curr_pid; nlh->nlmsg_flags = 0; aos_mem_copy(NLMSG_DATA(nlh), arg_val, SW_MAX_PAYLOAD); iov.iov_base = (void *)nlh; iov.iov_len = nlh->nlmsg_len; aos_mem_set(&msg, 0, sizeof(msg)); msg.msg_name = (void *)&dest_addr; msg.msg_namelen = sizeof(dest_addr); msg.msg_iov = &iov; msg.msg_iovlen = 1; if (sendmsg(sock_fd, &msg, 0) <= 0) { SW_OUT_ON_ERROR(SW_FAIL); } aos_mem_set(nlh, 0, NLMSG_SPACE(SW_MAX_PAYLOAD)); if (recvmsg(sock_fd, &msg, 0) <= 0) { SW_OUT_ON_ERROR(SW_FAIL); } out: SOCK_LOCKER_UNLOCK; return rv; }