/** * i40e_free_asq_bufs - Free send queue buffer info elements * @hw: pointer to the hardware structure **/ static void i40e_free_asq_bufs(struct i40e_hw *hw) { int i; /* only unmap if the address is non-NULL */ for (i = 0; i < hw->aq.num_asq_entries; i++) if (hw->aq.asq.r.asq_bi[i].pa) i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); /* free the buffer info list */ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); /* free the descriptor memory */ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); /* free the dma header */ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); }
/** * i40e_remove_pd_bp - remove a backing page from a page descriptor * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * @is_pf: distinguishes a VF from a PF * * This function: * 1. Marks the entry in pd tabe (for paged address mode) or in sd table * (for direct address mode) invalid. * 2. Write to register PMPDINV to invalidate the backing page in FV cache * 3. Decrement the ref count for the pd _entry * assumptions: * 1. Caller can deallocate the memory used by backing storage after this * function returns. **/ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf) { i40e_status ret_code = 0; struct i40e_hmc_pd_entry *pd_entry; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_sd_entry *sd_entry; u32 sd_idx, rel_pd_idx; u64 *pd_addr; /* calculate index */ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; if (sd_idx >= hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n"); goto exit; } sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { ret_code = I40E_ERR_INVALID_SD_TYPE; hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n"); goto exit; } /* get the entry and decrease its ref counter */ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; I40E_DEC_BP_REFCNT(&pd_entry->bp); if (pd_entry->bp.ref_cnt) goto exit; /* mark the entry invalid */ pd_entry->valid = false; I40E_DEC_PD_REFCNT(pd_table); pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; memset(pd_addr, 0, sizeof(u64)); if (is_pf) I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); else I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id); /* free memory here */ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); if (ret_code) goto exit; if (!pd_table->ref_cnt) i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); exit: return ret_code; }
/** * i40e_free_arq_bufs - Free receive queue buffer info elements * @hw: pointer to the hardware structure **/ STATIC void i40e_free_arq_bufs(struct i40e_hw *hw) { int i; /* free descriptors */ for (i = 0; i < hw->aq.num_arq_entries; i++) i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); /* free the descriptor memory */ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); /* free the dma header */ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); }
/** * i40e_remove_pd_bp - remove a backing page from a page descriptor * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index * @is_pf: distinguishes a VF from a PF * * This function: * 1. Marks the entry in pd tabe (for paged address mode) or in sd table * (for direct address mode) invalid. * 2. Write to register PMPDINV to invalidate the backing page in FV cache * 3. Decrement the ref count for the pd _entry * assumptions: * 1. Caller can deallocate the memory used by backing storage after this * function returns. **/ enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx) { enum i40e_status_code ret_code = I40E_SUCCESS; struct i40e_hmc_pd_entry *pd_entry; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_sd_entry *sd_entry; u32 sd_idx, rel_pd_idx; u64 *pd_addr; /* calculate index */ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; if (sd_idx >= hmc_info->sd_table.sd_cnt) { ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; DEBUGOUT("i40e_remove_pd_bp: bad idx\n"); goto exit; } sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { ret_code = I40E_ERR_INVALID_SD_TYPE; DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n"); goto exit; } /* get the entry and decrease its ref counter */ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; I40E_DEC_BP_REFCNT(&pd_entry->bp); if (pd_entry->bp.ref_cnt) goto exit; /* mark the entry invalid */ pd_entry->valid = FALSE; I40E_DEC_PD_REFCNT(pd_table); pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM); I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); /* free memory here */ if (!pd_entry->rsrc_pg) ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); if (I40E_SUCCESS != ret_code) goto exit; if (!pd_table->ref_cnt) i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); exit: return ret_code; }
/** * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory * @hw: pointer to the hw structure * * This must be called by drivers as they are shutting down and being * removed from the OS. **/ enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw) { struct i40e_hmc_lan_delete_obj_info info; enum i40e_status_code ret_code; info.hmc_info = &hw->hmc; info.rsrc_type = I40E_HMC_LAN_FULL; info.start_idx = 0; info.count = 1; /* delete the object */ ret_code = i40e_delete_lan_hmc_object(hw, &info); /* free the SD table entry for LAN */ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); hw->hmc.sd_table.sd_cnt = 0; hw->hmc.sd_table.sd_entry = NULL; /* free memory used for hmc_obj */ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); hw->hmc.hmc_obj = NULL; return ret_code; }
/** * i40e_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) { i40e_status ret_code = 0; if (i40e_check_asq_alive(hw)) i40e_aq_queue_shutdown(hw, true); i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); return ret_code; }
/** * i40e_shutdown_adminq - shutdown routine for the Admin Queue * @hw: pointer to the hardware structure **/ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; if (i40e_check_asq_alive(hw)) i40e_aq_queue_shutdown(hw, true); i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); i40e_destroy_spinlock(&hw->aq.asq_spinlock); i40e_destroy_spinlock(&hw->aq.arq_spinlock); if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); return ret_code; }
/** * i40e_free_adminq_asq - Free Admin Queue send rings * @hw: pointer to the hardware structure * * This assumes the posted send buffers have already been cleaned * and de-allocated **/ void i40e_free_adminq_asq(struct i40e_hw *hw) { i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); }
/** * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue * @hw: pointer to the hardware structure **/ STATIC enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) { enum i40e_status_code ret_code; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; int i; /* We'll be allocating the buffer info memory first, then we can * allocate the mapped buffers for the event processing */ /* buffer_info structures do not need alignment */ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_arq_bufs; hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_arq_entries; i++) { bi = &hw->aq.arq.r.arq_bi[i]; ret_code = i40e_allocate_dma_mem(hw, bi, i40e_mem_arq_buf, hw->aq.arq_buf_size, I40E_ADMINQ_DESC_ALIGNMENT); if (ret_code) goto unwind_alloc_arq_bufs; /* now configure the descriptors for use */ desc = I40E_ADMINQ_DESC(hw->aq.arq, i); desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB); desc->opcode = 0; /* This is in accordance with Admin queue design, there is no * register for buffer size configuration */ desc->datalen = CPU_TO_LE16((u16)bi->size); desc->retval = 0; desc->cookie_high = 0; desc->cookie_low = 0; desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); desc->params.external.param0 = 0; desc->params.external.param1 = 0; } alloc_arq_bufs: return ret_code; unwind_alloc_arq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); return ret_code; }