enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn) { struct ecore_rt_data *rt_data = &p_hwfn->rt_data; if (IS_VF(p_hwfn->p_dev)) return ECORE_SUCCESS; rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(bool) * RUNTIME_ARRAY_SIZE); if (!rt_data->b_valid) return ECORE_NOMEM; rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(u32) * RUNTIME_ARRAY_SIZE); if (!rt_data->init_val) { OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid); return ECORE_NOMEM; } return ECORE_SUCCESS; }
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn) { p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_hwfn->p_dcbx_info)); if (!p_hwfn->p_dcbx_info) { DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_dcbx_info'"); return ECORE_NOMEM; } p_hwfn->p_dcbx_info->iwarp_port = p_hwfn->pf_params.rdma_pf_params.iwarp_port; return ECORE_SUCCESS; }
static int qed_alloc_stream_mem(struct ecore_dev *edev) { int i; for_each_hwfn(edev, i) { struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_hwfn->stream)); if (!p_hwfn->stream) return -ENOMEM; } return 0; }
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int phase, int phase_id, int modes) { struct ecore_dev *p_dev = p_hwfn->p_dev; u32 cmd_num, num_init_ops; union init_op *init_ops; bool b_dmae = false; enum _ecore_status_t rc = ECORE_SUCCESS; num_init_ops = p_dev->fw_data->init_ops_size; init_ops = p_dev->fw_data->init_ops; #ifdef CONFIG_ECORE_ZIPPED_FW p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, MAX_ZIPPED_SIZE * 4); if (!p_hwfn->unzip_buf) { DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n"); return ECORE_NOMEM; } #endif for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { union init_op *cmd = &init_ops[cmd_num]; u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data); switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { case INIT_OP_WRITE: rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, b_dmae); break; case INIT_OP_READ: ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); break; case INIT_OP_IF_MODE: cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode, modes); break; case INIT_OP_IF_PHASE: cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase, phase_id); b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE); break; case INIT_OP_DELAY: /* ecore_init_run is always invoked from * sleep-able context */ OSAL_UDELAY(cmd->delay.delay); break; case INIT_OP_CALLBACK: rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); break; } if (rc) break; } #ifdef CONFIG_ECORE_ZIPPED_FW OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf); p_hwfn->unzip_buf = OSAL_NULL; #endif return rc; }
enum _ecore_status_t ecore_ooo_alloc(struct ecore_hwfn *p_hwfn) { u16 max_num_archipelagos = 0, cid_base; struct ecore_ooo_info *p_ooo_info; u16 max_num_isles = 0; u32 i; switch (p_hwfn->hw_info.personality) { case ECORE_PCI_ISCSI: max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons; cid_base =(u16)ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI); break; case ECORE_PCI_ETH_RDMA: case ECORE_PCI_ETH_IWARP: max_num_archipelagos = (u16)ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_IWARP, OSAL_NULL); cid_base = (u16)ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP); break; default: DP_NOTICE(p_hwfn, true, "Failed to allocate ecore_ooo_info: unknown personalization\n"); return ECORE_INVAL; } max_num_isles = ECORE_MAX_NUM_ISLES + max_num_archipelagos; if (!max_num_archipelagos) { DP_NOTICE(p_hwfn, true, "Failed to allocate ecore_ooo_info: unknown amount of connections\n"); return ECORE_INVAL; } p_ooo_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_ooo_info)); if (!p_ooo_info) { DP_NOTICE(p_hwfn, true, "Failed to allocate ecore_ooo_info\n"); return ECORE_NOMEM; } p_ooo_info->cid_base = cid_base; /* We look only at the icid */ p_ooo_info->max_num_archipelagos = max_num_archipelagos; OSAL_LIST_INIT(&p_ooo_info->free_buffers_list); OSAL_LIST_INIT(&p_ooo_info->ready_buffers_list); OSAL_LIST_INIT(&p_ooo_info->free_isles_list); p_ooo_info->p_isles_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_ooo_isle) * max_num_isles); if (!p_ooo_info->p_isles_mem) { DP_NOTICE(p_hwfn,true, "Failed to allocate ecore_ooo_info (isles)\n"); goto no_isles_mem; } for (i = 0; i < max_num_isles; i++) { OSAL_LIST_INIT(&p_ooo_info->p_isles_mem[i].buffers_list); OSAL_LIST_PUSH_TAIL(&p_ooo_info->p_isles_mem[i].list_entry, &p_ooo_info->free_isles_list); } p_ooo_info->p_archipelagos_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_ooo_archipelago) * max_num_archipelagos); if (!p_ooo_info->p_archipelagos_mem) { DP_NOTICE(p_hwfn,true, "Failed to allocate ecore_ooo_info(archpelagos)\n"); goto no_archipelagos_mem; } for (i = 0; i < max_num_archipelagos; i++) { OSAL_LIST_INIT(&p_ooo_info->p_archipelagos_mem[i].isles_list); } p_ooo_info->ooo_history.p_cqes = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ooo_opaque) * ECORE_MAX_NUM_OOO_HISTORY_ENTRIES); if (!p_ooo_info->ooo_history.p_cqes) { DP_NOTICE(p_hwfn,true, "Failed to allocate ecore_ooo_info(history)\n"); goto no_history_mem; } p_ooo_info->ooo_history.num_of_cqes = ECORE_MAX_NUM_OOO_HISTORY_ENTRIES; p_hwfn->p_ooo_info = p_ooo_info; return ECORE_SUCCESS; no_history_mem: OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_archipelagos_mem); no_archipelagos_mem: OSAL_FREE(p_hwfn->p_dev, p_ooo_info->p_isles_mem); no_isles_mem: OSAL_FREE(p_hwfn->p_dev, p_ooo_info); return ECORE_NOMEM; }