示例#1
0
文件: qed_int.c 项目: raoy1990/linux
static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
				 struct qed_ptt *p_ptt)
{
	struct qed_dev *cdev = p_hwfn->cdev;
	struct qed_sb_attn_info *p_sb;
	void *p_virt;
	dma_addr_t p_phys = 0;

	/* SB struct */
	p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
	if (!p_sb) {
		DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
		return -ENOMEM;
	}

	/* SB ring  */
	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
				    SB_ATTN_ALIGNED_SIZE(p_hwfn),
				    &p_phys, GFP_KERNEL);

	if (!p_virt) {
		DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
		kfree(p_sb);
		return -ENOMEM;
	}

	/* Attention setup */
	p_hwfn->p_sb_attn = p_sb;
	qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);

	return 0;
}
示例#2
0
文件: qede_main.c 项目: Leon555/dpdk
static int qed_load_firmware_data(struct ecore_dev *edev)
{
	int fd;
	struct stat st;
	const char *fw = RTE_LIBRTE_QEDE_FW;

	if (strcmp(fw, "") == 0)
		strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
	else
		strcpy(fw_file, fw);

	fd = open(fw_file, O_RDONLY);
	if (fd < 0) {
		DP_NOTICE(edev, false, "Can't open firmware file\n");
		return -ENOENT;
	}

	if (fstat(fd, &st) < 0) {
		DP_NOTICE(edev, false, "Can't stat firmware file\n");
		close(fd);
		return -1;
	}

	edev->firmware = rte_zmalloc("qede_fw", st.st_size,
				    RTE_CACHE_LINE_SIZE);
	if (!edev->firmware) {
		DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
		close(fd);
		return -ENOMEM;
	}

	if (read(fd, edev->firmware, st.st_size) != st.st_size) {
		DP_NOTICE(edev, false, "Can't read firmware data\n");
		close(fd);
		return -1;
	}

	edev->fw_len = st.st_size;
	if (edev->fw_len < 104) {
		DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
			  edev->fw_len);
		close(fd);
		return -EINVAL;
	}

	close(fd);
	return 0;
}
示例#3
0
static int
qed_configure_filter_ucast(struct ecore_dev *edev,
                           struct qed_filter_ucast_params *params)
{
    struct ecore_filter_ucast ucast;

    if (!params->vlan_valid && !params->mac_valid) {
        DP_NOTICE(edev, true,
                  "Tried configuring a unicast filter,"
                  "but both MAC and VLAN are not set\n");
        return -EINVAL;
    }

    memset(&ucast, 0, sizeof(ucast));
    switch (params->type) {
    case QED_FILTER_XCAST_TYPE_ADD:
        ucast.opcode = ECORE_FILTER_ADD;
        break;
    case QED_FILTER_XCAST_TYPE_DEL:
        ucast.opcode = ECORE_FILTER_REMOVE;
        break;
    case QED_FILTER_XCAST_TYPE_REPLACE:
        ucast.opcode = ECORE_FILTER_REPLACE;
        break;
    default:
        DP_NOTICE(edev, true, "Unknown unicast filter type %d\n",
                  params->type);
    }

    if (params->vlan_valid && params->mac_valid) {
        ucast.type = ECORE_FILTER_MAC_VLAN;
        ether_addr_copy((struct ether_addr *)&params->mac,
                        (struct ether_addr *)&ucast.mac);
        ucast.vlan = params->vlan;
    } else if (params->mac_valid) {
        ucast.type = ECORE_FILTER_MAC;
        ether_addr_copy((struct ether_addr *)&params->mac,
                        (struct ether_addr *)&ucast.mac);
    } else {
        ucast.type = ECORE_FILTER_VLAN;
        ucast.vlan = params->vlan;
    }

    ucast.is_rx_filter = true;
    ucast.is_tx_filter = true;

    return ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
}
示例#4
0
int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
{
	struct qed_fw_data *fw = cdev->fw_data;
	struct bin_buffer_hdr *buf_hdr;
	u32 offset, len;

	if (!data) {
		DP_NOTICE(cdev, "Invalid fw data\n");
		return -EINVAL;
	}

	/* First Dword contains metadata and should be skipped */
	buf_hdr = (struct bin_buffer_hdr *)data;

	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
	fw->fw_ver_info = (struct fw_ver_info *)(data + offset);

	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
	fw->init_ops = (union init_op *)(data + offset);

	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
	fw->arr_data = (u32 *)(data + offset);

	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
	fw->modes_tree_buf = (u8 *)(data + offset);
	len = buf_hdr[BIN_BUF_INIT_CMD].length;
	fw->init_ops_size = len / sizeof(struct init_raw_op);

	return 0;
}
示例#5
0
文件: qed_mcp.c 项目: panyfx/ath
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
		struct qed_ptt *p_ptt,
		u32 cmd,
		u32 param,
		u32 *o_mcp_resp,
		u32 *o_mcp_param)
{
	int rc = 0;

	/* MCP not initialized */
	if (!qed_mcp_is_init(p_hwfn)) {
		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
		return -EBUSY;
	}

	/* Lock Mutex to ensure only single thread is
	 * accessing the MCP at one time
	 */
	mutex_lock(&p_hwfn->mcp_info->mutex);
	rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
			    o_mcp_resp, o_mcp_param);
	/* Release Mutex */
	mutex_unlock(&p_hwfn->mcp_info->mutex);

	return rc;
}
示例#6
0
int qed_init_fw_data(struct qed_dev *cdev,
		     const u8 *data)
{
	struct qed_fw_data *fw = cdev->fw_data;
	struct bin_buffer_hdr *buf_hdr;
	u32 offset, len;

	if (!data) {
		DP_NOTICE(cdev, "Invalid fw data\n");
		return -EINVAL;
	}

	buf_hdr = (struct bin_buffer_hdr *)data;

	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
	fw->init_ops = (union init_op *)(data + offset);

	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
	fw->arr_data = (u32 *)(data + offset);

	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
	fw->modes_tree_buf = (u8 *)(data + offset);
	len = buf_hdr[BIN_BUF_INIT_CMD].length;
	fw->init_ops_size = len / sizeof(struct init_raw_op);

	return 0;
}
示例#7
0
static struct ecore_ooo_isle *ecore_ooo_seek_isle(struct ecore_hwfn *p_hwfn,
						  struct ecore_ooo_info *p_ooo_info,
						  u32 cid, u8 isle)
{
	struct ecore_ooo_archipelago *p_archipelago = OSAL_NULL;
	struct ecore_ooo_isle *p_isle = OSAL_NULL;
	u8 the_num_of_isle = 1;

	p_archipelago = ecore_ooo_seek_archipelago(p_ooo_info, cid);
	if (!p_archipelago) {
		DP_NOTICE(p_hwfn, true,
			 "Connection %d is not found in OOO list\n", cid);
		return OSAL_NULL;
	}

	OSAL_LIST_FOR_EACH_ENTRY(p_isle,
				 &p_archipelago->isles_list,
				 list_entry, struct ecore_ooo_isle) {
		if (the_num_of_isle == isle)
			return p_isle;
		the_num_of_isle++;
	}

	return OSAL_NULL;
}
示例#8
0
文件: qed_cxt.c 项目: 513855417/linux
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
{
	struct qed_cxt_mngr *p_mngr;
	u32 i;

	p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
	if (!p_mngr) {
		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
		return -ENOMEM;
	}

	/* Initialize ILT client registers */
	p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
	p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
	p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);

	p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
	p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
	p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);

	/* default ILT page size for all clients is 32K */
	for (i = 0; i < ILT_CLI_MAX; i++)
		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;

	if (p_hwfn->cdev->p_iov_info)
		p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;

	/* Set the cxt mangr pointer priori to further allocations */
	p_hwfn->p_cxt_mngr = p_mngr;

	return 0;
}
示例#9
0
文件: qede_filter.c 项目: krzk/linux
static int
qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
				      struct qede_arfs_fltr_node *fltr,
				      u16 bucket_idx)
{
	fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
				       fltr->buf_len, DMA_TO_DEVICE);
	if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
		DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
		qede_free_arfs_filter(edev, fltr);
		return -ENOMEM;
	}

	INIT_HLIST_NODE(&fltr->node);
	hlist_add_head(&fltr->node,
		       QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));

	edev->arfs->filter_count++;
	if (edev->arfs->filter_count == 1 &&
	    edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
		edev->ops->configure_arfs_searcher(edev->cdev,
						   fltr->tuple.mode);
		edev->arfs->mode = fltr->tuple.mode;
	}

	return 0;
}
static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
				u8 start_vport,
				u8 num_vports,
				struct init_qm_vport_params *vport_params)
{
	u8 i, vport_id;

	/* go over all PF VPORTs */
	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);

		if (inc_val > QM_RL_MAX_INC_VAL) {
			DP_NOTICE(p_hwfn,
				  "Invalid VPORT rate-limit configuration");
			return -1;
		}

		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
			     QM_RL_CRD_REG_SIGN_BIT);
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
			     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
		STORE_RT_REG(p_hwfn,
			     QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
			     inc_val);
	}

	return 0;
}
示例#11
0
/* init_ops callbacks entry point */
static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
			      struct ecore_ptt *p_ptt,
			      struct init_callback_op *p_cmd)
{
	DP_NOTICE(p_hwfn, true,
		  "Currently init values have no need of callbacks\n");
}
示例#12
0
static int
qed_configure_filter_mcast(struct ecore_dev *edev,
                           struct qed_filter_mcast_params *params)
{
    struct ecore_filter_mcast mcast;
    int i;

    memset(&mcast, 0, sizeof(mcast));
    switch (params->type) {
    case QED_FILTER_XCAST_TYPE_ADD:
        mcast.opcode = ECORE_FILTER_ADD;
        break;
    case QED_FILTER_XCAST_TYPE_DEL:
        mcast.opcode = ECORE_FILTER_REMOVE;
        break;
    default:
        DP_NOTICE(edev, true, "Unknown multicast filter type %d\n",
                  params->type);
    }

    mcast.num_mc_addrs = params->num;
    for (i = 0; i < mcast.num_mc_addrs; i++)
        ether_addr_copy((struct ether_addr *)&params->mac[i],
                        (struct ether_addr *)&mcast.mac[i]);

    return ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
}
示例#13
0
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
{
	int rc = 0;

	p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
	if (!p_hwfn->p_dcbx_info) {
		DP_NOTICE(p_hwfn,
			  "Failed to allocate 'struct qed_dcbx_info'\n");
		rc = -ENOMEM;
	}

	return rc;
}
示例#14
0
文件: qed_mcp.c 项目: panyfx/ath
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
		     struct qed_ptt *p_ptt)
{
	struct qed_mcp_info *p_info;
	u32 size;

	/* Allocate mcp_info structure */
	p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
	if (!p_hwfn->mcp_info)
		goto err;
	p_info = p_hwfn->mcp_info;

	if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
		DP_NOTICE(p_hwfn, "MCP is not initialized\n");
		/* Do not free mcp_info here, since public_base indicate that
		 * the MCP is not initialized
		 */
		return 0;
	}

	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
	p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
	p_info->mfw_mb_shadow =
		kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
				p_info->mfw_mb_length), GFP_KERNEL);
	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
		goto err;

	/* Initialize the MFW mutex */
	mutex_init(&p_info->mutex);

	return 0;

err:
	DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
	qed_mcp_free(p_hwfn);
	return -ENOMEM;
}
示例#15
0
文件: qed_mcp.c 项目: panyfx/ath
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
		     struct qed_ptt *p_ptt,
		     u32 *p_load_code)
{
	struct qed_dev *cdev = p_hwfn->cdev;
	u32 param;
	int rc;

	if (!qed_mcp_is_init(p_hwfn)) {
		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
		return -EBUSY;
	}

	/* Save driver's version to shmem */
	qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);

	DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
		   p_hwfn->mcp_info->drv_mb_seq,
		   p_hwfn->mcp_info->drv_pulse_seq);

	/* Load Request */
	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
			 (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
			  cdev->drv_type),
			 p_load_code, &param);

	/* if mcp fails to respond we must abort */
	if (rc) {
		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
		return rc;
	}

	/* If MFW refused (e.g. other port is in diagnostic mode) we
	 * must abort. This can happen in the following cases:
	 * - Other port is in diagnostic mode
	 * - Previously loaded function on the engine is not compliant with
	 *   the requester.
	 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
	 *      -
	 */
	if (!(*p_load_code) ||
	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
		DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
		return -EBUSY;
	}

	return 0;
}
示例#16
0
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
{
	p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
					  sizeof(*p_hwfn->p_dcbx_info));
	if (!p_hwfn->p_dcbx_info) {
		DP_NOTICE(p_hwfn, true,
			  "Failed to allocate `struct ecore_dcbx_info'");
		return ECORE_NOMEM;
	}

	p_hwfn->p_dcbx_info->iwarp_port =
		p_hwfn->pf_params.rdma_pf_params.iwarp_port;

	return ECORE_SUCCESS;
}
示例#17
0
文件: qed_cxt.c 项目: 513855417/linux
int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
{
	int rc;

	/* Allocate the ILT shadow table */
	rc = qed_ilt_shadow_alloc(p_hwfn);
	if (rc) {
		DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
		goto tables_alloc_fail;
	}

	/* Allocate and initialize the acquired cids bitmaps */
	rc = qed_cid_map_alloc(p_hwfn);
	if (rc) {
		DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
		goto tables_alloc_fail;
	}

	return 0;

tables_alloc_fail:
	qed_cxt_mngr_free(p_hwfn);
	return rc;
}
示例#18
0
文件: qed_cxt.c 项目: 513855417/linux
static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
{
	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
	struct qed_ilt_client_cfg *clients = p_mngr->clients;
	struct qed_ilt_cli_blk *p_blk;
	u32 size, i, j, k;
	int rc;

	size = qed_cxt_ilt_shadow_size(clients);
	p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
				     GFP_KERNEL);
	if (!p_mngr->ilt_shadow) {
		DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
		rc = -ENOMEM;
		goto ilt_shadow_fail;
	}

	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
		   "Allocated 0x%x bytes for ilt shadow\n",
		   (u32)(size * sizeof(struct qed_dma_mem)));

	for_each_ilt_valid_client(i, clients) {
		if (!clients[i].active)
			continue;
		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
			p_blk = &clients[i].pf_blks[j];
			rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
			if (rc != 0)
				goto ilt_shadow_fail;
		}
		for (k = 0; k < p_mngr->vf_count; k++) {
			for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
				u32 lines = clients[i].vf_total_lines * k;

				p_blk = &clients[i].vf_blks[j];
				rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
				if (rc != 0)
					goto ilt_shadow_fail;
			}
		}
	}

	return 0;

ilt_shadow_fail:
	qed_ilt_shadow_free(p_hwfn);
	return rc;
}
示例#19
0
/* init_ops write command */
static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
					      struct ecore_ptt *p_ptt,
					      struct init_write_op *p_cmd,
					      bool b_can_dmae)
{
	enum _ecore_status_t rc = ECORE_SUCCESS;
	bool b_must_dmae;
	u32 addr, data;

	data = OSAL_LE32_TO_CPU(p_cmd->data);
	b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
	addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;

	/* Sanitize */
	if (b_must_dmae && !b_can_dmae) {
		DP_NOTICE(p_hwfn, true,
			  "Need to write to %08x for Wide-bus but DMAE isn't"
			  " allowed\n",
			  addr);
		return ECORE_INVAL;
	}

	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
	case INIT_SRC_INLINE:
		data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
		ecore_wr(p_hwfn, p_ptt, addr, data);
		break;
	case INIT_SRC_ZEROS:
		data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
		if (b_must_dmae || (b_can_dmae && (data >= 64)))
			rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
		else
			ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
		break;
	case INIT_SRC_ARRAY:
		rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
					  b_must_dmae, b_can_dmae);
		break;
	case INIT_SRC_RUNTIME:
		ecore_init_rt(p_hwfn, p_ptt, addr,
			      OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
			      OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
			      b_must_dmae);
		break;
	}

	return rc;
}
/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
 * Return -1 on error.
 */
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
			      u8 start_vport,
			      u8 num_vports,
			      struct init_qm_vport_params *vport_params)
{
	u8 tc, i, vport_id;
	u32 inc_val;

	/* go over all PF VPORTs */
	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
		u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
		u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];

		if (!vport_params[i].vport_wfq)
			continue;

		inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
		if (inc_val > QM_WFQ_MAX_INC_VAL) {
			DP_NOTICE(p_hwfn,
				  "Invalid VPORT WFQ weight configuration");
			return -1;
		}

		/* each VPORT can have several VPORT PQ IDs for
		 * different TCs
		 */
		for (tc = 0; tc < NUM_OF_TCS; tc++) {
			u16 vport_pq_id = pq_ids[tc];

			if (vport_pq_id != QM_INVALID_PQ_ID) {
				STORE_RT_REG(p_hwfn,
					     QM_REG_WFQVPWEIGHT_RT_OFFSET +
					     vport_pq_id, inc_val);
				STORE_RT_REG(p_hwfn, temp + vport_pq_id,
					     QM_WFQ_UPPER_BOUND |
					     QM_WFQ_CRD_REG_SIGN_BIT);
				STORE_RT_REG(p_hwfn,
					     QM_REG_WFQVPCRD_RT_OFFSET +
					     vport_pq_id,
					     QM_WFQ_INIT_CRD(inc_val) |
					     QM_WFQ_CRD_REG_SIGN_BIT);
			}
		}
	}

	return 0;
}
示例#21
0
/* init_ops write command */
static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
			   struct qed_ptt *p_ptt,
			   struct init_write_op *cmd,
			   bool b_can_dmae)
{
	u32 data = le32_to_cpu(cmd->data);
	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
	union init_write_args *arg = &cmd->args;
	int rc = 0;

	/* Sanitize */
	if (b_must_dmae && !b_can_dmae) {
		DP_NOTICE(p_hwfn,
			  "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
			  addr);
		return -EINVAL;
	}

	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
	case INIT_SRC_INLINE:
		qed_wr(p_hwfn, p_ptt, addr,
		       le32_to_cpu(arg->inline_val));
		break;
	case INIT_SRC_ZEROS:
		if (b_must_dmae ||
		    (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
			rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
						le32_to_cpu(arg->zeros_count));
		else
			qed_init_fill(p_hwfn, p_ptt, addr, 0,
				      le32_to_cpu(arg->zeros_count));
		break;
	case INIT_SRC_ARRAY:
		rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
					b_must_dmae, b_can_dmae);
		break;
	case INIT_SRC_RUNTIME:
		qed_init_rt(p_hwfn, p_ptt, addr,
			    le16_to_cpu(arg->runtime.offset),
			    le16_to_cpu(arg->runtime.size),
			    b_must_dmae);
		break;
	}

	return rc;
}
示例#22
0
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
			    struct ecore_ptt *p_ptt,
			    struct ecore_tunnel_info *p_tunn,
			    enum spq_mode comp_mode,
			    struct ecore_spq_comp_cb *p_comp_data)
{
	struct ecore_spq_entry *p_ent = OSAL_NULL;
	struct ecore_sp_init_data init_data;
	enum _ecore_status_t rc = ECORE_NOTIMPL;

	if (IS_VF(p_hwfn->p_dev))
		return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);

	if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
		DP_NOTICE(p_hwfn, true,
			  "A0 chip: tunnel pf update config is not supported\n");
		return rc;
	}

	if (!p_tunn)
		return ECORE_INVAL;

	/* Get SPQ entry */
	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
	init_data.cid = ecore_spq_get_cid(p_hwfn);
	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
	init_data.comp_mode = comp_mode;
	init_data.p_comp_data = p_comp_data;

	rc = ecore_sp_init_request(p_hwfn, &p_ent,
				   COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
				   &init_data);
	if (rc != ECORE_SUCCESS)
		return rc;

	ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
					&p_ent->ramrod.pf_update.tunnel_config);

	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
	if (rc != ECORE_SUCCESS)
		return rc;

	ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);

	return rc;
}
/* Prepare PF RL runtime init values for the specified PF.
 * Return -1 on error.
 */
static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
			     u8 pf_id,
			     u32 pf_rl)
{
	u32 inc_val = QM_RL_INC_VAL(pf_rl);

	if (inc_val > QM_RL_MAX_INC_VAL) {
		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
		return -1;
	}
	STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
		     QM_RL_CRD_REG_SIGN_BIT);
	STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
		     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
	return 0;
}
示例#24
0
/* init_ops callbacks entry point */
static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
			   struct qed_ptt *p_ptt,
			   struct init_callback_op *p_cmd)
{
	int rc;

	switch (p_cmd->callback_id) {
	case DMAE_READY_CB:
		rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
		break;
	default:
		DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
			  p_cmd->callback_id);
		return -EINVAL;
	}

	return rc;
}
示例#25
0
/* init_ops callbacks entry point */
static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
					      struct ecore_ptt *p_ptt,
					      struct init_callback_op *p_cmd)
{
	enum _ecore_status_t rc;

	switch (p_cmd->callback_id) {
	case DMAE_READY_CB:
		rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
		break;
	default:
		DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
			  p_cmd->callback_id);
		return ECORE_INVAL;
	}

	return rc;
}
示例#26
0
static int
qed_configure_filter(struct ecore_dev *edev, struct qed_filter_params *params)
{
    switch (params->type) {
    case QED_FILTER_TYPE_UCAST:
        return qed_configure_filter_ucast(edev, &params->filter.ucast);
    case QED_FILTER_TYPE_MCAST:
        return qed_configure_filter_mcast(edev, &params->filter.mcast);
    case QED_FILTER_TYPE_RX_MODE:
        return qed_configure_filter_rx_mode(edev,
                                            params->filter.
                                            accept_flags);
    default:
        DP_NOTICE(edev, true, "Unknown filter type %d\n",
                  (int)params->type);
        return -EINVAL;
    }
}
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
		   struct qed_ptt *p_ptt,
		   u8 pf_id,
		   u32 pf_rl)
{
	u32 inc_val = QM_RL_INC_VAL(pf_rl);

	if (inc_val > QM_RL_MAX_INC_VAL) {
		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
		return -1;
	}

	qed_wr(p_hwfn, p_ptt,
	       QM_REG_RLPFCRD + pf_id * 4,
	       QM_RL_CRD_REG_SIGN_BIT);
	qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);

	return 0;
}
示例#28
0
static void
qedr_iw_qp_event(void *context,
		 struct qed_iwarp_cm_event_params *params,
		 enum ib_event_type ib_event, char *str)
{
	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
	struct qedr_dev *dev = ep->dev;
	struct ib_qp *ibqp = &ep->qp->ibqp;
	struct ib_event event;

	DP_NOTICE(dev, "QP error received: %s\n", str);

	if (ibqp->event_handler) {
		event.event = ib_event;
		event.device = ibqp->device;
		event.element.qp = ibqp;
		ibqp->event_handler(&event, ibqp->qp_context);
	}
}
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
		      struct qed_ptt *p_ptt,
		      u8 vport_id,
		      u32 vport_rl)
{
	u32 inc_val = QM_RL_INC_VAL(vport_rl);

	if (inc_val > QM_RL_MAX_INC_VAL) {
		DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
		return -1;
	}

	qed_wr(p_hwfn, p_ptt,
	       QM_REG_RLGLBLCRD + vport_id * 4,
	       QM_RL_CRD_REG_SIGN_BIT);
	qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);

	return 0;
}
示例#30
0
文件: qede_filter.c 项目: krzk/linux
void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
{
	struct qede_arfs_fltr_node *fltr = filter;
	struct qede_dev *edev = dev;

	fltr->fw_rc = fw_rc;

	if (fw_rc) {
		DP_NOTICE(edev,
			  "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
			  fw_rc, fltr->flow_id, fltr->sw_id,
			  ntohs(fltr->tuple.src_port),
			  ntohs(fltr->tuple.dst_port), fltr->rxq_id);

		spin_lock_bh(&edev->arfs->arfs_list_lock);

		fltr->used = false;
		clear_bit(QEDE_FLTR_VALID, &fltr->state);

		spin_unlock_bh(&edev->arfs->arfs_list_lock);
		return;
	}

	spin_lock_bh(&edev->arfs->arfs_list_lock);

	fltr->used = false;

	if (fltr->filter_op) {
		set_bit(QEDE_FLTR_VALID, &fltr->state);
		if (fltr->rxq_id != fltr->next_rxq_id)
			qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
						 false);
	} else {
		clear_bit(QEDE_FLTR_VALID, &fltr->state);
		if (fltr->rxq_id != fltr->next_rxq_id) {
			fltr->rxq_id = fltr->next_rxq_id;
			qede_configure_arfs_fltr(edev, fltr,
						 fltr->rxq_id, true);
		}
	}

	spin_unlock_bh(&edev->arfs->arfs_list_lock);
}