void ecore_init_free(struct ecore_hwfn *p_hwfn) { OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val); p_hwfn->rt_data.init_val = OSAL_NULL; OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid); p_hwfn->rt_data.b_valid = OSAL_NULL; }
void ecore_ooo_free(struct ecore_hwfn *p_hwfn) { struct ecore_ooo_info *p_ooo_info = p_hwfn->p_ooo_info; struct ecore_ooo_buffer *p_buffer; if (!p_ooo_info) return; ecore_ooo_release_all_isles(p_ooo_info); while (!OSAL_LIST_IS_EMPTY(&p_ooo_info->free_buffers_list)) { p_buffer = OSAL_LIST_FIRST_ENTRY(&p_ooo_info-> free_buffers_list, struct ecore_ooo_buffer, list_entry); if (p_buffer == OSAL_NULL) break; #if defined(_NTDDK_) #pragma warning(suppress : 6011 28182) #endif OSAL_LIST_REMOVE_ENTRY(&p_buffer->list_entry, &p_ooo_info->free_buffers_list); OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_buffer->rx_buffer_virt_addr, p_buffer->rx_buffer_phys_addr, p_buffer->rx_buffer_size); OSAL_FREE(p_hwfn->p_dev, p_buffer); } OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_isles_mem); OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_archipelagos_mem); OSAL_FREE(p_hwfn->p_dev, p_ooo_info->ooo_history.p_cqes); OSAL_FREE(p_hwfn->p_dev, p_ooo_info); p_hwfn->p_ooo_info = OSAL_NULL; }
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) { if (p_hwfn->p_ptt_pool) OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock); OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); p_hwfn->p_ptt_pool = OSAL_NULL; }
static void qed_free_stream_mem(struct ecore_dev *edev) { int i; for_each_hwfn(edev, i) { struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; if (!p_hwfn->stream) return; OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream); } }
/****************************************************************************** * Name : uint8 Osal_Queue_Delete(T_QUEUE_INFO *ptQueueInfo) * Function : Delete a queue * Input : T_QUEUE_INFO *ptQueueInfo The pointer of queue information data structure * Output: : None. * Return : SW_OK Successful. * SW_ERR Failed. * description: To be done. * Version : V1.00 * Author : Ian * Date : 10th Jun 2016 ******************************************************************************/ uint8 Osal_Queue_Delete(T_QUEUE_INFO *ptQueueInfo) { uint32 u32IntSt; /* If the pointer of queue information is invalid */ if(NULL == ptQueueInfo) { DBG_PRINT("The length of buffer is invalid!!\n"); return SW_ERR; } ENTER_CRITICAL_ZONE(u32IntSt); /* Enter the critical zone to prevent event updating unexpectedly */ /**************************************************************************************************/ OSAL_FREE(ptQueueInfo->pu8Addr); /**************************************************************************************************/ EXIT_CRITICAL_ZONE(u32IntSt); /* Exit the critical zone */ return SW_OK; }
enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn) { struct ecore_rt_data *rt_data = &p_hwfn->rt_data; if (IS_VF(p_hwfn->p_dev)) return ECORE_SUCCESS; rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(bool) * RUNTIME_ARRAY_SIZE); if (!rt_data->b_valid) return ECORE_NOMEM; rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(u32) * RUNTIME_ARRAY_SIZE); if (!rt_data->init_val) { OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid); return ECORE_NOMEM; } return ECORE_SUCCESS; }
void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn) { OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info); }
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int phase, int phase_id, int modes) { struct ecore_dev *p_dev = p_hwfn->p_dev; u32 cmd_num, num_init_ops; union init_op *init_ops; bool b_dmae = false; enum _ecore_status_t rc = ECORE_SUCCESS; num_init_ops = p_dev->fw_data->init_ops_size; init_ops = p_dev->fw_data->init_ops; #ifdef CONFIG_ECORE_ZIPPED_FW p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, MAX_ZIPPED_SIZE * 4); if (!p_hwfn->unzip_buf) { DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n"); return ECORE_NOMEM; } #endif for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { union init_op *cmd = &init_ops[cmd_num]; u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data); switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { case INIT_OP_WRITE: rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, b_dmae); break; case INIT_OP_READ: ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); break; case INIT_OP_IF_MODE: cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode, modes); break; case INIT_OP_IF_PHASE: cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase, phase_id); b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE); break; case INIT_OP_DELAY: /* ecore_init_run is always invoked from * sleep-able context */ OSAL_UDELAY(cmd->delay.delay); break; case INIT_OP_CALLBACK: rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); break; } if (rc) break; } #ifdef CONFIG_ECORE_ZIPPED_FW OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf); p_hwfn->unzip_buf = OSAL_NULL; #endif return rc; }
enum _ecore_status_t ecore_ooo_alloc(struct ecore_hwfn *p_hwfn) { u16 max_num_archipelagos = 0, cid_base; struct ecore_ooo_info *p_ooo_info; u16 max_num_isles = 0; u32 i; switch (p_hwfn->hw_info.personality) { case ECORE_PCI_ISCSI: max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons; cid_base =(u16)ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI); break; case ECORE_PCI_ETH_RDMA: case ECORE_PCI_ETH_IWARP: max_num_archipelagos = (u16)ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_IWARP, OSAL_NULL); cid_base = (u16)ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP); break; default: DP_NOTICE(p_hwfn, true, "Failed to allocate ecore_ooo_info: unknown personalization\n"); return ECORE_INVAL; } max_num_isles = ECORE_MAX_NUM_ISLES + max_num_archipelagos; if (!max_num_archipelagos) { DP_NOTICE(p_hwfn, true, "Failed to allocate ecore_ooo_info: unknown amount of connections\n"); return ECORE_INVAL; } p_ooo_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_ooo_info)); if (!p_ooo_info) { DP_NOTICE(p_hwfn, true, "Failed to allocate ecore_ooo_info\n"); return ECORE_NOMEM; } p_ooo_info->cid_base = cid_base; /* We look only at the icid */ p_ooo_info->max_num_archipelagos = max_num_archipelagos; OSAL_LIST_INIT(&p_ooo_info->free_buffers_list); OSAL_LIST_INIT(&p_ooo_info->ready_buffers_list); OSAL_LIST_INIT(&p_ooo_info->free_isles_list); p_ooo_info->p_isles_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_ooo_isle) * max_num_isles); if (!p_ooo_info->p_isles_mem) { DP_NOTICE(p_hwfn,true, "Failed to allocate ecore_ooo_info (isles)\n"); goto no_isles_mem; } for (i = 0; i < max_num_isles; i++) { OSAL_LIST_INIT(&p_ooo_info->p_isles_mem[i].buffers_list); OSAL_LIST_PUSH_TAIL(&p_ooo_info->p_isles_mem[i].list_entry, &p_ooo_info->free_isles_list); } p_ooo_info->p_archipelagos_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_ooo_archipelago) * max_num_archipelagos); if (!p_ooo_info->p_archipelagos_mem) { DP_NOTICE(p_hwfn,true, "Failed to allocate ecore_ooo_info(archpelagos)\n"); goto no_archipelagos_mem; } for (i = 0; i < max_num_archipelagos; i++) { OSAL_LIST_INIT(&p_ooo_info->p_archipelagos_mem[i].isles_list); } p_ooo_info->ooo_history.p_cqes = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ooo_opaque) * ECORE_MAX_NUM_OOO_HISTORY_ENTRIES); if (!p_ooo_info->ooo_history.p_cqes) { DP_NOTICE(p_hwfn,true, "Failed to allocate ecore_ooo_info(history)\n"); goto no_history_mem; } p_ooo_info->ooo_history.num_of_cqes = ECORE_MAX_NUM_OOO_HISTORY_ENTRIES; p_hwfn->p_ooo_info = p_ooo_info; return ECORE_SUCCESS; no_history_mem: OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_archipelagos_mem); no_archipelagos_mem: OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_isles_mem); no_isles_mem: OSAL_FREE(p_hwfn->p_dev, p_ooo_info); return ECORE_NOMEM; }