Ejemplo n.º 1
0
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
{
	struct tp_tcp_stats v4, v6;
	struct tp_rdma_stats rdma_stats;
	struct tp_err_stats err_stats;
	struct tp_usm_stats usm_stats;
	u64 val1, val2;

	memset(s, 0, sizeof(*s));

	spin_lock(&adap->stats_lock);
	t4_tp_get_tcp_stats(adap, &v4, &v6);
	t4_tp_get_rdma_stats(adap, &rdma_stats);
	t4_get_usm_stats(adap, &usm_stats);
	t4_tp_get_err_stats(adap, &err_stats);
	spin_unlock(&adap->stats_lock);

	s->db_drop = adap->db_stats.db_drop;
	s->db_full = adap->db_stats.db_full;
	s->db_empty = adap->db_stats.db_empty;

	s->tcp_v4_out_rsts = v4.tcp_out_rsts;
	s->tcp_v4_in_segs = v4.tcp_in_segs;
	s->tcp_v4_out_segs = v4.tcp_out_segs;
	s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
	s->tcp_v6_out_rsts = v6.tcp_out_rsts;
	s->tcp_v6_in_segs = v6.tcp_in_segs;
	s->tcp_v6_out_segs = v6.tcp_out_segs;
	s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;

	if (is_offload(adap)) {
		s->frames = usm_stats.frames;
		s->octets = usm_stats.octets;
		s->drops = usm_stats.drops;
		s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
		s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
	}

	s->ofld_no_neigh = err_stats.ofld_no_neigh;
	s->ofld_cong_defer = err_stats.ofld_cong_defer;

	if (!is_t4(adap->params.chip)) {
		int v;

		v = t4_read_reg(adap, SGE_STAT_CFG_A);
		if (STATSOURCE_T5_G(v) == 7) {
			val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
			val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
			s->wc_success = val1 - val2;
			s->wc_fail = val2;
		}
	}
}
Ejemplo n.º 2
0
/*
 * Wait for the device to become ready (signified by our "who am I" register
 * returning a value other than all 1's).  Return an error if it doesn't
 * become ready ...
 */
int t4vf_wait_dev_ready(struct adapter *adapter)
{
	const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
	const u32 notready1 = 0xffffffff;
	const u32 notready2 = 0xeeeeeeee;
	u32 val;

	val = t4_read_reg(adapter, whoami);
	if (val != notready1 && val != notready2)
		return 0;
	msleep(500);
	val = t4_read_reg(adapter, whoami);
	if (val != notready1 && val != notready2)
		return 0;
	else
		return -EIO;
}
Ejemplo n.º 3
0
static int
cxgbei_ddp_init(struct adapter *sc, struct cxgbei_data *ci)
{
	int nppods, bits, max_sz, rc;
	static const u_int pgsz_order[] = {0, 1, 2, 3};

	MPASS(sc->vres.iscsi.size > 0);

	ci->llimit = sc->vres.iscsi.start;
	ci->ulimit = sc->vres.iscsi.start + sc->vres.iscsi.size - 1;
	max_sz = G_MAXRXDATA(t4_read_reg(sc, A_TP_PARA_REG2));

	nppods = sc->vres.iscsi.size >> IPPOD_SIZE_SHIFT;
	if (nppods <= 1024)
		return (ENXIO);

	bits = fls(nppods);
	if (bits > IPPOD_IDX_MAX_SIZE)
		bits = IPPOD_IDX_MAX_SIZE;
	nppods = (1 << (bits - 1)) - 1;

	rc = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR,
	    BUS_SPACE_MAXADDR, NULL, NULL, UINT32_MAX , 8, BUS_SPACE_MAXSIZE,
	    BUS_DMA_ALLOCNOW, NULL, NULL, &ci->ulp_ddp_tag);
	if (rc != 0) {
		device_printf(sc->dev, "%s: failed to create DMA tag: %u.\n",
		    __func__, rc);
		return (rc);
	}

	ci->colors = malloc(nppods * sizeof(char), M_CXGBE, M_NOWAIT | M_ZERO);
	ci->gl_map = malloc(nppods * sizeof(struct cxgbei_ulp2_gather_list *),
	    M_CXGBE, M_NOWAIT | M_ZERO);
	if (ci->colors == NULL || ci->gl_map == NULL) {
		bus_dma_tag_destroy(ci->ulp_ddp_tag);
		free(ci->colors, M_CXGBE);
		free(ci->gl_map, M_CXGBE);
		return (ENOMEM);
	}

	mtx_init(&ci->map_lock, "ddp lock", NULL, MTX_DEF | MTX_DUPOK);
	ci->max_txsz = ci->max_rxsz = min(max_sz, ULP2_MAX_PKT_SIZE);
	ci->nppods = nppods;
	ci->idx_last = nppods;
	ci->idx_bits = bits;
	ci->idx_mask = (1 << bits) - 1;
	ci->rsvd_tag_mask = (1 << (bits + IPPOD_IDX_SHIFT)) - 1;

	ci->tag_format.sw_bits = bits;
	ci->tag_format.rsvd_bits = bits;
	ci->tag_format.rsvd_shift = IPPOD_IDX_SHIFT;
	ci->tag_format.rsvd_mask = ci->idx_mask;

	t4_iscsi_init(sc, ci->idx_mask << IPPOD_IDX_SHIFT, pgsz_order);

	return (rc);
}
Ejemplo n.º 4
0
int init_hash_filter(struct adapter *adap)
{
	/* On T6, verify the necessary register configs and warn the user in
	 * case of improper config
	 */
	if (is_t6(adap->params.chip)) {
		if (TCAM_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_0_A)) != 4)
			goto err;

		if (HASH_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_1_A)) != 4)
			goto err;
	} else {
		dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
		return -EINVAL;
	}
	adap->params.hash_filter = 1;
	return 0;
err:
	dev_warn(adap->pdev_dev, "Invalid hash filter config!\n");
	return -EINVAL;
}
Ejemplo n.º 5
0
static void
t4_dump_tcb(struct adapter *sc, int tid)
{
    uint32_t tcb_base, off, i, j;

    /* Dump TCB for the tid */
    tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
    t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2),
                 tcb_base + tid * TCB_SIZE);
    t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
    off = 0;
    printf("\n");
    for (i = 0; i < 4; i++) {
        uint32_t buf[8];
        for (j = 0; j < 8; j++, off += 4)
            buf[j] = htonl(t4_read_reg(sc, MEMWIN2_BASE + off));

        printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
               buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
               buf[7]);
    }
}
Ejemplo n.º 6
0
static void
read_pdu_limits(struct adapter *sc, uint32_t *max_tx_pdu_len,
    uint32_t *max_rx_pdu_len)
{
	uint32_t tx_len, rx_len, r, v;

	rx_len = t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE);
	tx_len = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);

	r = t4_read_reg(sc, A_TP_PARA_REG2);
	rx_len = min(rx_len, G_MAXRXDATA(r));
	tx_len = min(tx_len, G_MAXRXDATA(r));

	r = t4_read_reg(sc, A_TP_PARA_REG7);
	v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r));
	rx_len = min(rx_len, v);
	tx_len = min(tx_len, v);

	/* Remove after FW_FLOWC_MNEM_TXDATAPLEN_MAX fix in firmware. */
	tx_len = min(tx_len, 3 * 4096);

	*max_tx_pdu_len = rounddown2(tx_len, 512);
	*max_rx_pdu_len = rounddown2(rx_len, 512);
}
Ejemplo n.º 7
0
static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
{
	int ret;
	const struct firmware *fw;
	struct adapter *adap = netdev2adap(netdev);
	unsigned int mbox = PCIE_FW_MASTER_M + 1;
	u32 pcie_fw;
	unsigned int master;
	u8 master_vld = 0;

	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
	master = PCIE_FW_MASTER_G(pcie_fw);
	if (pcie_fw & PCIE_FW_MASTER_VLD_F)
		master_vld = 1;
	/* if csiostor is the master return */
	if (master_vld && (master != adap->pf)) {
		dev_warn(adap->pdev_dev,
			 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
		return -EOPNOTSUPP;
	}

	ef->data[sizeof(ef->data) - 1] = '\0';
	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
	if (ret < 0)
		return ret;

	/* If the adapter has been fully initialized then we'll go ahead and
	 * try to get the firmware's cooperation in upgrading to the new
	 * firmware image otherwise we'll try to do the entire job from the
	 * host ... and we always "force" the operation in this path.
	 */
	if (adap->flags & FULL_INIT_DONE)
		mbox = adap->mbox;

	ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
	release_firmware(fw);
	if (!ret)
		dev_info(adap->pdev_dev,
			 "loaded firmware %s, reload cxgb4 driver\n", ef->data);
	return ret;
}
Ejemplo n.º 8
0
/*
 * Initialize the software state of the iSCSI ULP driver.
 *
 * ENXIO means firmware didn't set up something that it was supposed to.
 */
static int
cxgbei_init(struct adapter *sc, struct cxgbei_data *ci)
{
	struct sysctl_oid *oid;
	struct sysctl_oid_list *children;
	struct ppod_region *pr;
	uint32_t r;
	int rc;

	MPASS(sc->vres.iscsi.size > 0);
	MPASS(ci != NULL);

	rc = alloc_ci_counters(ci);
	if (rc != 0)
		return (rc);

	read_pdu_limits(sc, &ci->max_tx_pdu_len, &ci->max_rx_pdu_len);

	pr = &ci->pr;
	r = t4_read_reg(sc, A_ULP_RX_ISCSI_PSZ);
	rc = t4_init_ppod_region(pr, &sc->vres.iscsi, r, "iSCSI page pods");
	if (rc != 0) {
		device_printf(sc->dev,
		    "%s: failed to initialize the iSCSI page pod region: %u.\n",
		    __func__, rc);
		free_ci_counters(ci);
		return (rc);
	}

	r = t4_read_reg(sc, A_ULP_RX_ISCSI_TAGMASK);
	r &= V_ISCSITAGMASK(M_ISCSITAGMASK);
	if (r != pr->pr_tag_mask) {
		/*
		 * Recent firmwares are supposed to set up the iSCSI tagmask
		 * but we'll do it ourselves it the computed value doesn't match
		 * what's in the register.
		 */
		device_printf(sc->dev,
		    "tagmask 0x%08x does not match computed mask 0x%08x.\n", r,
		    pr->pr_tag_mask);
		t4_set_reg_field(sc, A_ULP_RX_ISCSI_TAGMASK,
		    V_ISCSITAGMASK(M_ISCSITAGMASK), pr->pr_tag_mask);
	}

	sysctl_ctx_init(&ci->ctx);
	oid = device_get_sysctl_tree(sc->dev);	/* dev.t5nex.X */
	children = SYSCTL_CHILDREN(oid);

	oid = SYSCTL_ADD_NODE(&ci->ctx, children, OID_AUTO, "iscsi", CTLFLAG_RD,
	    NULL, "iSCSI ULP statistics");
	children = SYSCTL_CHILDREN(oid);

	SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_setup_ok",
	    CTLFLAG_RD, &ci->ddp_setup_ok,
	    "# of times DDP buffer was setup successfully.");

	SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_setup_error",
	    CTLFLAG_RD, &ci->ddp_setup_error,
	    "# of times DDP buffer setup failed.");

	SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_bytes",
	    CTLFLAG_RD, &ci->ddp_bytes, "# of bytes placed directly");

	SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_pdus",
	    CTLFLAG_RD, &ci->ddp_pdus, "# of PDUs with data placed directly.");

	SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "fl_bytes",
	    CTLFLAG_RD, &ci->fl_bytes, "# of data bytes delivered in freelist");

	SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "fl_pdus",
	    CTLFLAG_RD, &ci->fl_pdus,
	    "# of PDUs with data delivered in freelist");

	ci->ddp_threshold = 2048;
	SYSCTL_ADD_UINT(&ci->ctx, children, OID_AUTO, "ddp_threshold",
	    CTLFLAG_RW, &ci->ddp_threshold, 0, "Rx zero copy threshold");

	return (0);
}
Ejemplo n.º 9
0
/**
 *	t4vf_wr_mbox_core - send a command to FW through the mailbox
 *	@adapter: the adapter
 *	@cmd: the command to write
 *	@size: command length in bytes
 *	@rpl: where to optionally store the reply
 *	@sleep_ok: if true we may sleep while awaiting command completion
 *
 *	Sends the given command to FW through the mailbox and waits for the
 *	FW to execute the command.  If @rpl is not %NULL it is used to store
 *	the FW's reply to the command.  The command and its optional reply
 *	are of the same length.  FW can take up to 500 ms to respond.
 *	@sleep_ok determines whether we may sleep while awaiting the response.
 *	If sleeping is allowed we use progressive backoff otherwise we spin.
 *
 *	The return value is 0 on success or a negative errno on failure.  A
 *	failure can happen either because we are not able to execute the
 *	command or FW executes it but signals an error.  In the latter case
 *	the return value is the error code indicated by FW (negated).
 */
int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
		      void *rpl, bool sleep_ok)
{
	static const int delay[] = {
		1, 1, 3, 5, 10, 10, 20, 50, 100
	};

	u32 v;
	int i, ms, delay_idx;
	const __be64 *p;
	u32 mbox_data = T4VF_MBDATA_BASE_ADDR;
	u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;

	/*
	 * Commands must be multiples of 16 bytes in length and may not be
	 * larger than the size of the Mailbox Data register array.
	 */
	if ((size % 16) != 0 ||
	    size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
		return -EINVAL;

	/*
	 * Loop trying to get ownership of the mailbox.  Return an error
	 * if we can't gain ownership.
	 */
	v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
		v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl));
	if (v != MBOX_OWNER_DRV)
		return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;

	/*
	 * Write the command array into the Mailbox Data register array and
	 * transfer ownership of the mailbox to the firmware.
	 *
	 * For the VFs, the Mailbox Data "registers" are actually backed by
	 * T4's "MA" interface rather than PL Registers (as is the case for
	 * the PFs).  Because these are in different coherency domains, the
	 * write to the VF's PL-register-backed Mailbox Control can race in
	 * front of the writes to the MA-backed VF Mailbox Data "registers".
	 * So we need to do a read-back on at least one byte of the VF Mailbox
	 * Data registers before doing the write to the VF Mailbox Control
	 * register.
	 */
	for (i = 0, p = cmd; i < size; i += 8)
		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
	t4_read_reg(adapter, mbox_data);         /* flush write */

	t4_write_reg(adapter, mbox_ctl,
		     MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
	t4_read_reg(adapter, mbox_ctl);          /* flush write */

	/*
	 * Spin waiting for firmware to acknowledge processing our command.
	 */
	delay_idx = 0;
	ms = delay[0];

	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
		if (sleep_ok) {
			ms = delay[delay_idx];
			if (delay_idx < ARRAY_SIZE(delay) - 1)
				delay_idx++;
			msleep(ms);
		} else
			mdelay(ms);

		/*
		 * If we're the owner, see if this is the reply we wanted.
		 */
		v = t4_read_reg(adapter, mbox_ctl);
		if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
			/*
			 * If the Message Valid bit isn't on, revoke ownership
			 * of the mailbox and continue waiting for our reply.
			 */
			if ((v & MBMSGVALID) == 0) {
				t4_write_reg(adapter, mbox_ctl,
					     MBOWNER(MBOX_OWNER_NONE));
				continue;
			}

			/*
			 * We now have our reply.  Extract the command return
			 * value, copy the reply back to our caller's buffer
			 * (if specified) and revoke ownership of the mailbox.
			 * We return the (negated) firmware command return
			 * code (this depends on FW_SUCCESS == 0).
			 */

			/* return value in low-order little-endian word */
			v = t4_read_reg(adapter, mbox_data);
			if (FW_CMD_RETVAL_GET(v))
				dump_mbox(adapter, "FW Error", mbox_data);

			if (rpl) {
				/* request bit in high-order BE word */
				WARN_ON((be32_to_cpu(*(const u32 *)cmd)
					 & FW_CMD_REQUEST) == 0);
				get_mbox_rpl(adapter, rpl, size, mbox_data);
				WARN_ON((be32_to_cpu(*(u32 *)rpl)
					 & FW_CMD_REQUEST) != 0);
			}
			t4_write_reg(adapter, mbox_ctl,
				     MBOWNER(MBOX_OWNER_NONE));
			return -FW_CMD_RETVAL_GET(v);
		}
	}

	/*
	 * We timed out.  Return the error ...
	 */
	dump_mbox(adapter, "FW Timeout", mbox_data);
	return -ETIMEDOUT;
}
Ejemplo n.º 10
0
static int get_filter_count(struct adapter *adapter, unsigned int fidx,
			    u64 *pkts, u64 *bytes, bool hash)
{
	unsigned int tcb_base, tcbaddr;
	unsigned int word_offset;
	struct filter_entry *f;
	__be64 be64_byte_count;
	int ret;

	tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
	if (is_hashfilter(adapter) && hash) {
		if (fidx < adapter->tids.ntids) {
			f = adapter->tids.tid_tab[fidx];
			if (!f)
				return -EINVAL;
		} else {
			return -E2BIG;
		}
	} else {
		if ((fidx != (adapter->tids.nftids +
			      adapter->tids.nsftids - 1)) &&
		    fidx >= adapter->tids.nftids)
			return -E2BIG;

		f = &adapter->tids.ftid_tab[fidx];
		if (!f->valid)
			return -EINVAL;
	}
	tcbaddr = tcb_base + f->tid * TCB_SIZE;

	spin_lock(&adapter->win0_lock);
	if (is_t4(adapter->params.chip)) {
		__be64 be64_count;

		/* T4 doesn't maintain byte counts in hw */
		*bytes = 0;

		/* Get pkts */
		word_offset = 4;
		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
				   tcbaddr + (word_offset * sizeof(__be32)),
				   sizeof(be64_count),
				   (__be32 *)&be64_count,
				   T4_MEMORY_READ);
		if (ret < 0)
			goto out;
		*pkts = be64_to_cpu(be64_count);
	} else {
		__be32 be32_count;

		/* Get bytes */
		word_offset = 4;
		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
				   tcbaddr + (word_offset * sizeof(__be32)),
				   sizeof(be64_byte_count),
				   &be64_byte_count,
				   T4_MEMORY_READ);
		if (ret < 0)
			goto out;
		*bytes = be64_to_cpu(be64_byte_count);

		/* Get pkts */
		word_offset = 6;
		ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
				   tcbaddr + (word_offset * sizeof(__be32)),
				   sizeof(be32_count),
				   &be32_count,
				   T4_MEMORY_READ);
		if (ret < 0)
			goto out;
		*pkts = (u64)be32_to_cpu(be32_count);
	}

out:
	spin_unlock(&adapter->win0_lock);
	return ret;
}
Ejemplo n.º 11
0
static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
{
	struct cudbg_tcam tcam_region = { 0 };
	u32 value, n = 0, len = 0;

	switch (entity) {
	case CUDBG_REG_DUMP:
		switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
		case CHELSIO_T4:
			len = T4_REGMAP_SIZE;
			break;
		case CHELSIO_T5:
		case CHELSIO_T6:
			len = T5_REGMAP_SIZE;
			break;
		default:
			break;
		}
		break;
	case CUDBG_DEV_LOG:
		len = adap->params.devlog.size;
		break;
	case CUDBG_CIM_LA:
		if (is_t6(adap->params.chip)) {
			len = adap->params.cim_la_size / 10 + 1;
			len *= 10 * sizeof(u32);
		} else {
			len = adap->params.cim_la_size / 8;
			len *= 8 * sizeof(u32);
		}
		len += sizeof(u32); /* for reading CIM LA configuration */
		break;
	case CUDBG_CIM_MA_LA:
		len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
		break;
	case CUDBG_CIM_QCFG:
		len = sizeof(struct cudbg_cim_qcfg);
		break;
	case CUDBG_CIM_IBQ_TP0:
	case CUDBG_CIM_IBQ_TP1:
	case CUDBG_CIM_IBQ_ULP:
	case CUDBG_CIM_IBQ_SGE0:
	case CUDBG_CIM_IBQ_SGE1:
	case CUDBG_CIM_IBQ_NCSI:
		len = CIM_IBQ_SIZE * 4 * sizeof(u32);
		break;
	case CUDBG_CIM_OBQ_ULP0:
		len = cudbg_cim_obq_size(adap, 0);
		break;
	case CUDBG_CIM_OBQ_ULP1:
		len = cudbg_cim_obq_size(adap, 1);
		break;
	case CUDBG_CIM_OBQ_ULP2:
		len = cudbg_cim_obq_size(adap, 2);
		break;
	case CUDBG_CIM_OBQ_ULP3:
		len = cudbg_cim_obq_size(adap, 3);
		break;
	case CUDBG_CIM_OBQ_SGE:
		len = cudbg_cim_obq_size(adap, 4);
		break;
	case CUDBG_CIM_OBQ_NCSI:
		len = cudbg_cim_obq_size(adap, 5);
		break;
	case CUDBG_CIM_OBQ_RXQ0:
		len = cudbg_cim_obq_size(adap, 6);
		break;
	case CUDBG_CIM_OBQ_RXQ1:
		len = cudbg_cim_obq_size(adap, 7);
		break;
	case CUDBG_EDC0:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & EDRAM0_ENABLE_F) {
			value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
			len = EDRAM0_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	case CUDBG_EDC1:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & EDRAM1_ENABLE_F) {
			value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
			len = EDRAM1_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	case CUDBG_MC0:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & EXT_MEM0_ENABLE_F) {
			value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
			len = EXT_MEM0_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	case CUDBG_MC1:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & EXT_MEM1_ENABLE_F) {
			value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
			len = EXT_MEM1_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	case CUDBG_RSS:
		len = t4_chip_rss_size(adap) * sizeof(u16);
		break;
	case CUDBG_RSS_VF_CONF:
		len = adap->params.arch.vfcount *
		      sizeof(struct cudbg_rss_vf_conf);
		break;
	case CUDBG_PATH_MTU:
		len = NMTUS * sizeof(u16);
		break;
	case CUDBG_PM_STATS:
		len = sizeof(struct cudbg_pm_stats);
		break;
	case CUDBG_HW_SCHED:
		len = sizeof(struct cudbg_hw_sched);
		break;
	case CUDBG_TP_INDIRECT:
		switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
		case CHELSIO_T5:
			n = sizeof(t5_tp_pio_array) +
			    sizeof(t5_tp_tm_pio_array) +
			    sizeof(t5_tp_mib_index_array);
			break;
		case CHELSIO_T6:
			n = sizeof(t6_tp_pio_array) +
			    sizeof(t6_tp_tm_pio_array) +
			    sizeof(t6_tp_mib_index_array);
			break;
		default:
			break;
		}
		n = n / (IREG_NUM_ELEM * sizeof(u32));
		len = sizeof(struct ireg_buf) * n;
		break;
	case CUDBG_SGE_INDIRECT:
		len = sizeof(struct ireg_buf) * 2 +
		      sizeof(struct sge_qbase_reg_field);
		break;
	case CUDBG_ULPRX_LA:
		len = sizeof(struct cudbg_ulprx_la);
		break;
	case CUDBG_TP_LA:
		len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
		break;
	case CUDBG_MEMINFO:
		len = sizeof(struct cudbg_meminfo);
		break;
	case CUDBG_CIM_PIF_LA:
		len = sizeof(struct cudbg_cim_pif_la);
		len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
		break;
	case CUDBG_CLK:
		len = sizeof(struct cudbg_clk_info);
		break;
	case CUDBG_PCIE_INDIRECT:
		n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
		len = sizeof(struct ireg_buf) * n * 2;
		break;
	case CUDBG_PM_INDIRECT:
		n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
		len = sizeof(struct ireg_buf) * n * 2;
		break;
	case CUDBG_TID_INFO:
		len = sizeof(struct cudbg_tid_info_region_rev1);
		break;
	case CUDBG_PCIE_CONFIG:
		len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
		break;
	case CUDBG_DUMP_CONTEXT:
		len = cudbg_dump_context_size(adap);
		break;
	case CUDBG_MPS_TCAM:
		len = sizeof(struct cudbg_mps_tcam) *
		      adap->params.arch.mps_tcam_size;
		break;
	case CUDBG_VPD_DATA:
		len = sizeof(struct cudbg_vpd_data);
		break;
	case CUDBG_LE_TCAM:
		cudbg_fill_le_tcam_info(adap, &tcam_region);
		len = sizeof(struct cudbg_tcam) +
		      sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
		break;
	case CUDBG_CCTRL:
		len = sizeof(u16) * NMTUS * NCCTRL_WIN;
		break;
	case CUDBG_MA_INDIRECT:
		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
			n = sizeof(t6_ma_ireg_array) /
			    (IREG_NUM_ELEM * sizeof(u32));
			len = sizeof(struct ireg_buf) * n * 2;
		}
		break;
	case CUDBG_ULPTX_LA:
		len = sizeof(struct cudbg_ulptx_la);
		break;
	case CUDBG_UP_CIM_INDIRECT:
		n = 0;
		if (is_t5(adap->params.chip))
			n = sizeof(t5_up_cim_reg_array) /
			    ((IREG_NUM_ELEM + 1) * sizeof(u32));
		else if (is_t6(adap->params.chip))
			n = sizeof(t6_up_cim_reg_array) /
			    ((IREG_NUM_ELEM + 1) * sizeof(u32));
		len = sizeof(struct ireg_buf) * n;
		break;
	case CUDBG_PBT_TABLE:
		len = sizeof(struct cudbg_pbt_tables);
		break;
	case CUDBG_MBOX_LOG:
		len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
		break;
	case CUDBG_HMA_INDIRECT:
		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
			n = sizeof(t6_hma_ireg_array) /
			    (IREG_NUM_ELEM * sizeof(u32));
			len = sizeof(struct ireg_buf) * n;
		}
		break;
	case CUDBG_HMA:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & HMA_MUX_F) {
			/* In T6, there's no MC1.  So, HMA shares MC1
			 * address space.
			 */
			value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
			len = EXT_MEM1_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	default:
		break;
	}

	return len;
}
Ejemplo n.º 12
0
/**
 *	t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
 *	@adapter: the adapter
 *
 *	Retrieves various core SGE parameters in the form of hardware SGE
 *	register values.  The caller is responsible for decoding these as
 *	needed.  The SGE parameters are stored in @adapter->params.sge.
 */
int t4vf_get_sge_params(struct adapter *adapter)
{
    struct sge_params *sp = &adapter->params.sge;
    u32 params[7], vals[7];
    u32 whoami;
    unsigned int pf, s_hps;
    int i, v;

    params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
    params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                 V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
    params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
    params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
    params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
    params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
    params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
    v = t4vf_query_params(adapter, 7, params, vals);
    if (v != FW_SUCCESS)
        return v;

    sp->sge_control = vals[0];
    sp->counter_val[0] = G_THRESHOLD_0(vals[6]);
    sp->counter_val[1] = G_THRESHOLD_1(vals[6]);
    sp->counter_val[2] = G_THRESHOLD_2(vals[6]);
    sp->counter_val[3] = G_THRESHOLD_3(vals[6]);
    sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(vals[2]));
    sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(vals[2]));
    sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(vals[3]));
    sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(vals[3]));
    sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(vals[4]));
    sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(vals[4]));

    sp->fl_starve_threshold = G_EGRTHRESHOLD(vals[5]) * 2 + 1;
    if (is_t4(adapter))
        sp->fl_starve_threshold2 = sp->fl_starve_threshold;
    else
        sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(vals[5]) * 2 +
                                   1;

    /*
     * We need the Queues/Page and Host Page Size for our VF.
     * This is based on the PF from which we're instantiated.
     */
    whoami = t4_read_reg(adapter, VF_PL_REG(A_PL_VF_WHOAMI));
    pf = G_SOURCEPF(whoami);

    s_hps = (S_HOSTPAGESIZEPF0 +
             (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * pf);
    sp->page_shift = ((vals[1] >> s_hps) & M_HOSTPAGESIZEPF0) + 10;

    for (i = 0; i < SGE_FLBUF_SIZES; i++) {
        params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                     V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0 + (4 * i)));
        v = t4vf_query_params(adapter, 1, params, vals);
        if (v != FW_SUCCESS)
            return v;

        sp->sge_fl_buffer_size[i] = vals[0];
    }

    /*
     * T4 uses a single control field to specify both the PCIe Padding and
     * Packing Boundary.  T5 introduced the ability to specify these
     * separately with the Padding Boundary in SGE_CONTROL and and Packing
     * Boundary in SGE_CONTROL2.  So for T5 and later we need to grab
     * SGE_CONTROL in order to determine how ingress packet data will be
     * laid out in Packed Buffer Mode.  Unfortunately, older versions of
     * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
     * failure grabbing it we throw an error since we can't figure out the
     * right value.
     */
    sp->spg_len = sp->sge_control & F_EGRSTATUSPAGESIZE ? 128 : 64;
    sp->fl_pktshift = G_PKTSHIFT(sp->sge_control);
    sp->pad_boundary = 1 << (G_INGPADBOUNDARY(sp->sge_control) + 5);
    if (is_t4(adapter))
        sp->pack_boundary = sp->pad_boundary;
    else {
        params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                     V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
        v = t4vf_query_params(adapter, 1, params, vals);
        if (v != FW_SUCCESS) {
            CH_ERR(adapter, "Unable to get SGE Control2; "
                   "probably old firmware.\n");
            return v;
        }
        if (G_INGPACKBOUNDARY(vals[0]) == 0)
            sp->pack_boundary = 16;
        else
            sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(vals[0]) +
                                      5);
    }

    /*
     * For T5 and later we want to use the new BAR2 Doorbells.
     * Unfortunately, older firmware didn't allow the this register to be
     * read.
     */
    if (!is_t4(adapter)) {
        unsigned int s_qpp;

        params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                     V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
        params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
                     V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
        v = t4vf_query_params(adapter, 2, params, vals);
        if (v != FW_SUCCESS) {
            CH_WARN(adapter, "Unable to get VF SGE Queues/Page; "
                    "probably old firmware.\n");
            return v;
        }

        s_qpp = (S_QUEUESPERPAGEPF0 +
                 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * pf);
        sp->eq_s_qpp = ((vals[0] >> s_qpp) & M_QUEUESPERPAGEPF0);
        sp->iq_s_qpp = ((vals[1] >> s_qpp) & M_QUEUESPERPAGEPF0);
    }