/** * i40e_hmc_get_object_va - retrieves an object's virtual address * @hw: pointer to the hw structure * @object_base: pointer to u64 to get the va * @rsrc_type: the hmc resource type * @obj_idx: hmc object index * * This function retrieves the object's virtual address from the object * base pointer. This function is used for LAN Queue contexts. **/ static enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base, enum i40e_hmc_lan_rsrc_type rsrc_type, u32 obj_idx) { u32 obj_offset_in_sd, obj_offset_in_pd; struct i40e_hmc_info *hmc_info = &hw->hmc; struct i40e_hmc_sd_entry *sd_entry; struct i40e_hmc_pd_entry *pd_entry; u32 pd_idx, pd_lmt, rel_pd_idx; enum i40e_status_code ret_code = I40E_SUCCESS; u64 obj_offset_in_fpm; u32 sd_idx, sd_lmt; if (NULL == hmc_info->hmc_obj) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); goto exit; } if (NULL == object_base) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n"); goto exit; } if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n", ret_code); ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; goto exit; } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, &sd_idx, &sd_lmt); sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + hmc_info->hmc_obj[rsrc_type].size * obj_idx; if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, &pd_idx, &pd_lmt); rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; obj_offset_in_pd = (u32)(obj_offset_in_fpm % I40E_HMC_PAGED_BP_SIZE); *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; } else { obj_offset_in_sd = (u32)(obj_offset_in_fpm % I40E_HMC_DIRECT_BP_SIZE); *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; } exit: return ret_code; }
/** * i40e_delete_hmc_object - remove hmc objects * @hw: pointer to the HW structure * @info: pointer to i40e_hmc_delete_obj_info struct * * This will de-populate the SDs and PDs. It frees * the memory for PDS and backing storage. After this function is returned, * caller should deallocate memory allocated previously for * book-keeping information about PDs and backing storage. **/ enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_delete_obj_info *info) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_pd_table *pd_table; u32 pd_idx, pd_lmt, rel_pd_idx; u32 sd_idx, sd_lmt; u32 i, j; if (NULL == info) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n"); goto exit; } if (NULL == info->hmc_info) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n"); goto exit; } if (NULL == info->hmc_info->sd_table.sd_entry) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n"); goto exit; } if (NULL == info->hmc_info->hmc_obj) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); goto exit; } if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n", ret_code); goto exit; } if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n", ret_code); goto exit; } I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &pd_idx, &pd_lmt); for (j = pd_idx; j < pd_lmt; j++) { sd_idx = j / I40E_HMC_PD_CNT_IN_SD; if (I40E_SD_TYPE_PAGED != info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) continue; rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; if (pd_table->pd_entry[rel_pd_idx].valid) { ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); if (I40E_SUCCESS != ret_code) goto exit; } } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_SD_INDEX; goto exit; } for (i = sd_idx; i < sd_lmt; i++) { if (!info->hmc_info->sd_table.sd_entry[i].valid) continue; switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { case I40E_SD_TYPE_DIRECT: ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); if (I40E_SUCCESS != ret_code) goto exit; break; case I40E_SD_TYPE_PAGED: ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); if (I40E_SUCCESS != ret_code) goto exit; break; default: break; } } exit: return ret_code; }
/** * i40e_create_lan_hmc_object - allocate backing store for hmc objects * @hw: pointer to the HW structure * @info: pointer to i40e_hmc_create_obj_info struct * * This will allocate memory for PDs and backing pages and populate * the sd and pd entries. **/ enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw, struct i40e_hmc_lan_create_obj_info *info) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_sd_entry *sd_entry; u32 pd_idx1 = 0, pd_lmt1 = 0; u32 pd_idx = 0, pd_lmt = 0; bool pd_error = false; u32 sd_idx, sd_lmt; u64 sd_size; u32 i, j; if (NULL == info) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n"); goto exit; } if (NULL == info->hmc_info) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n"); goto exit; } if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { ret_code = I40E_ERR_BAD_PTR; DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n"); goto exit; } if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n", ret_code); goto exit; } if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n", ret_code); goto exit; } /* find sd index and limit */ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &sd_idx, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_SD_INDEX; goto exit; } /* find pd index */ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, info->start_idx, info->count, &pd_idx, &pd_lmt); /* This is to cover for cases where you may not want to have an SD with * the full 2M memory but something smaller. By not filling out any * size, the function will default the SD size to be 2M. */ if (info->direct_mode_sz == 0) sd_size = I40E_HMC_DIRECT_BP_SIZE; else sd_size = info->direct_mode_sz; /* check if all the sds are valid. If not, allocate a page and * initialize it. */ for (j = sd_idx; j < sd_lmt; j++) { /* update the sd table entry */ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, info->entry_type, sd_size); if (I40E_SUCCESS != ret_code) goto exit_sd_error; sd_entry = &info->hmc_info->sd_table.sd_entry[j]; if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { /* check if all the pds in this sd are valid. If not, * allocate a page and initialize it. */ /* find pd_idx and pd_lmt in this sd */ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, ((j + 1) * I40E_HMC_MAX_BP_COUNT)); for (i = pd_idx1; i < pd_lmt1; i++) { /* update the pd table entry */ ret_code = i40e_add_pd_table_entry(hw, info->hmc_info, i); if (I40E_SUCCESS != ret_code) { pd_error = true; break; } } if (pd_error) { /* remove the backing pages from pd_idx1 to i */ while (i && (i > pd_idx1)) { i40e_remove_pd_bp(hw, info->hmc_info, (i - 1)); i--; } } } if (!sd_entry->valid) { sd_entry->valid = true; switch (sd_entry->entry_type) { case I40E_SD_TYPE_PAGED: I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.pd_table.pd_page_addr.pa, j, sd_entry->entry_type); break; case I40E_SD_TYPE_DIRECT: I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, j, sd_entry->entry_type); break; default: ret_code = I40E_ERR_INVALID_SD_TYPE; goto exit; } } } goto exit; exit_sd_error: /* cleanup for sd entries from j to sd_idx */ while (j && (j > sd_idx)) { sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; switch (sd_entry->entry_type) { case I40E_SD_TYPE_PAGED: pd_idx1 = max(pd_idx, ((j - 1) * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); for (i = pd_idx1; i < pd_lmt1; i++) { i40e_remove_pd_bp(hw, info->hmc_info, i); } i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); break; case I40E_SD_TYPE_DIRECT: i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); break; default: ret_code = I40E_ERR_INVALID_SD_TYPE; break; } j--; } exit: return ret_code; }