示例#1
0
int
t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
{
	int rc, i, enabled;
	struct trace_params tp;

	if (t->idx >= NTRACE) {
		t->idx = 0xff;
		t->enabled = 0;
		t->valid = 0;
		return (0);
	}

	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
	    "t4gett");
	if (rc)
		return (rc);

	for (i = t->idx; i < NTRACE; i++) {
		if (isset(&sc->tracer_valid, t->idx)) {
			t4_get_trace_filter(sc, &tp, i, &enabled);
			t->idx = i;
			t->enabled = enabled;
			t->valid = 1;
			memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
			memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
			t->tp.snap_len = tp.snap_len;
			t->tp.min_len = tp.min_len;
			t->tp.skip_ofst = tp.skip_ofst;
			t->tp.skip_len = tp.skip_len;
			t->tp.invert = tp.invert;

			/* convert channel to port iff 0 <= port < 8. */
			if (tp.port < 4)
				t->tp.port = sc->chan_map[tp.port];
			else if (tp.port < 8)
				t->tp.port = sc->chan_map[tp.port - 4] + 4;
			else
				t->tp.port = tp.port;

			goto done;
		}
	}

	t->idx = 0xff;
	t->enabled = 0;
	t->valid = 0;
done:
	end_synchronized_op(sc, LOCK_HELD);

	return (rc);
}
示例#2
0
static void
cxgbe_nm_init(void *arg)
{
	struct port_info *pi = arg;
	struct adapter *sc = pi->adapter;

	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nminit") != 0)
		return;
	cxgbe_nm_init_synchronized(pi);
	end_synchronized_op(sc, 0);

	return;
}
示例#3
0
static int
set_sched_class_config(struct adapter *sc, int minmax)
{
	int rc;

	if (minmax < 0)
		return (EINVAL);

	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc");
	if (rc)
		return (rc);
	rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
	end_synchronized_op(sc, 0);

	return (rc);
}
示例#4
0
static void
update_tx_sched(void *context, int pending)
{
	int i, j, mode, rateunit, ratemode, maxrate, pktsize, rc;
	struct port_info *pi;
	struct tx_cl_rl_params *tc;
	struct adapter *sc = context;
	const int n = sc->chip_params->nsched_cls;

	mtx_lock(&sc->tc_lock);
	for_each_port(sc, i) {
		pi = sc->port[i];
		tc = &pi->sched_params->cl_rl[0];
		for (j = 0; j < n; j++, tc++) {
			MPASS(mtx_owned(&sc->tc_lock));
			if ((tc->flags & TX_CLRL_REFRESH) == 0)
				continue;

			mode = tc->mode;
			rateunit = tc->rateunit;
			ratemode = tc->ratemode;
			maxrate = tc->maxrate;
			pktsize = tc->pktsize;
			mtx_unlock(&sc->tc_lock);

			if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
			    "t4utxs") != 0) {
				mtx_lock(&sc->tc_lock);
				continue;
			}
			rc = t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
			    FW_SCHED_PARAMS_LEVEL_CL_RL, mode, rateunit,
			    ratemode, pi->tx_chan, j, 0, maxrate, 0, pktsize,
			    1);
			end_synchronized_op(sc, 0);

			mtx_lock(&sc->tc_lock);
			if (rc != 0) {
				tc->flags |= TX_CLRL_ERROR;
			} else if (tc->mode == mode &&
			    tc->rateunit == rateunit &&
			    tc->maxrate == maxrate &&
			    tc->pktsize == tc->pktsize) {
				tc->flags &= ~(TX_CLRL_REFRESH | TX_CLRL_ERROR);
			}
		}
	}
示例#5
0
static int
cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
{
	int rc = 0, mtu, flags;
	struct port_info *pi = ifp->if_softc;
	struct adapter *sc = pi->adapter;
	struct ifreq *ifr = (struct ifreq *)data;
	uint32_t mask;

	MPASS(pi->nm_ifp == ifp);

	switch (cmd) {
	case SIOCSIFMTU:
		mtu = ifr->ifr_mtu;
		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
			return (EINVAL);

		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmtu");
		if (rc)
			return (rc);
		ifp->if_mtu = mtu;
		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
			rc = update_mac_settings(ifp, XGMAC_MTU);
		end_synchronized_op(sc, 0);
		break;

	case SIOCSIFFLAGS:
		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nflg");
		if (rc)
			return (rc);

		if (ifp->if_flags & IFF_UP) {
			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
				flags = pi->nmif_flags;
				if ((ifp->if_flags ^ flags) &
				    (IFF_PROMISC | IFF_ALLMULTI)) {
					rc = update_mac_settings(ifp,
					    XGMAC_PROMISC | XGMAC_ALLMULTI);
				}
			} else
				rc = cxgbe_nm_init_synchronized(pi);
			pi->nmif_flags = ifp->if_flags;
		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
			rc = cxgbe_nm_uninit_synchronized(pi);
		end_synchronized_op(sc, 0);
		break;

	case SIOCADDMULTI:
	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4nmulti");
		if (rc)
			return (rc);
		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
		end_synchronized_op(sc, LOCK_HELD);
		break;

	case SIOCSIFCAP:
		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
		if (mask & IFCAP_TXCSUM) {
			ifp->if_capenable ^= IFCAP_TXCSUM;
			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
		}
		if (mask & IFCAP_TXCSUM_IPV6) {
			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
		}
		if (mask & IFCAP_RXCSUM)
			ifp->if_capenable ^= IFCAP_RXCSUM;
		if (mask & IFCAP_RXCSUM_IPV6)
			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
		break;

	case SIOCSIFMEDIA:
	case SIOCGIFMEDIA:
		ifmedia_ioctl(ifp, ifr, &pi->nm_media, cmd);
		break;

	default:
		rc = ether_ioctl(ifp, cmd, data);
	}

	return (rc);
}
示例#6
0
static int
set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
    int sleep_ok)
{
	int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode;
	struct port_info *pi;
	struct tx_cl_rl_params *tc;

	if (p->level == SCHED_CLASS_LEVEL_CL_RL)
		fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
	else if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
		fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
	else if (p->level == SCHED_CLASS_LEVEL_CH_RL)
		fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
	else
		return (EINVAL);

	if (p->mode == SCHED_CLASS_MODE_CLASS)
		fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
	else if (p->mode == SCHED_CLASS_MODE_FLOW)
		fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
	else
		return (EINVAL);

	if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS)
		fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
	else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS)
		fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
	else
		return (EINVAL);

	if (p->ratemode == SCHED_CLASS_RATEMODE_REL)
		fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
	else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS)
		fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
	else
		return (EINVAL);

	/* Vet our parameters ... */
	if (!in_range(p->channel, 0, sc->chip_params->nchan - 1))
		return (ERANGE);

	pi = sc->port[sc->chan_map[p->channel]];
	if (pi == NULL)
		return (ENXIO);
	MPASS(pi->tx_chan == p->channel);
	top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */

	if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) ||
	    !in_range(p->minrate, 0, top_speed) ||
	    !in_range(p->maxrate, 0, top_speed) ||
	    !in_range(p->weight, 0, 100))
		return (ERANGE);

	/*
	 * Translate any unset parameters into the firmware's
	 * nomenclature and/or fail the call if the parameters
	 * are required ...
	 */
	if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0)
		return (EINVAL);

	if (p->minrate < 0)
		p->minrate = 0;
	if (p->maxrate < 0) {
		if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
		    p->level == SCHED_CLASS_LEVEL_CH_RL)
			return (EINVAL);
		else
			p->maxrate = 0;
	}
	if (p->weight < 0) {
		if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
			return (EINVAL);
		else
			p->weight = 0;
	}
	if (p->pktsize < 0) {
		if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
		    p->level == SCHED_CLASS_LEVEL_CH_RL)
			return (EINVAL);
		else
			p->pktsize = 0;
	}

	rc = begin_synchronized_op(sc, NULL,
	    sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp");
	if (rc)
		return (rc);
	if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
		tc = &pi->sched_params->cl_rl[p->cl];
		if (tc->refcount > 0) {
			rc = EBUSY;
			goto done;
		} else {
			tc->ratemode = fw_ratemode;
			tc->rateunit = fw_rateunit;
			tc->mode = fw_mode;
			tc->maxrate = p->maxrate;
			tc->pktsize = p->pktsize;
		}
	}
	rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode,
	    fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate,
	    p->weight, p->pktsize, sleep_ok);
	if (p->level == SCHED_CLASS_LEVEL_CL_RL && rc != 0) {
		/*
		 * Unknown state at this point, see parameters in tc for what
		 * was attempted.
		 */
		tc->flags |= TX_CLRL_ERROR;
	}
done:
	end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD);

	return (rc);
}
示例#7
0
int
t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
{
	int rc;
	struct trace_params tp, *tpp;

	if (t->idx >= NTRACE)
		return (EINVAL);

	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
	    "t4sett");
	if (rc)
		return (rc);

	/*
	 * If no tracing filter is specified this time then check if the filter
	 * at the index is valid anyway because it was set previously.  If so
	 * then this is a legitimate enable/disable operation.
	 */
	if (t->valid == 0) {
		if (isset(&sc->tracer_valid, t->idx))
			tpp = NULL;
		else
			rc = EINVAL;
		goto done;
	}

	if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
	    t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
	    t->tp.skip_ofst > M_TFOFFSET) {
		rc = EINVAL;
		goto done;
	}

	memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
	memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
	tp.snap_len = t->tp.snap_len;
	tp.min_len = t->tp.min_len;
	tp.skip_ofst = t->tp.skip_ofst;
	tp.skip_len = t->tp.skip_len;
	tp.invert = !!t->tp.invert;

	/* convert port to channel iff 0 <= port < 8. */
	if (t->tp.port < 4) {
		if (sc->port[t->tp.port] == NULL) {
			rc = EINVAL;
			goto done;
		}
		tp.port = sc->port[t->tp.port]->tx_chan;
	} else if (t->tp.port < 8) {
		if (sc->port[t->tp.port - 4] == NULL) {
			rc = EINVAL;
			goto done;
		}
		tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
	}
	tpp = &tp;
done:
	if (rc == 0) {
		rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
		if (rc == 0) {
			if (t->enabled) {
				setbit(&sc->tracer_valid, t->idx);
				if (sc->tracer_enabled == 0) {
					t4_set_reg_field(sc, A_MPS_TRC_CFG,
					    F_TRCEN, F_TRCEN);
				}
				setbit(&sc->tracer_enabled, t->idx);
			} else {
				clrbit(&sc->tracer_enabled, t->idx);
				if (sc->tracer_enabled == 0) {
					t4_set_reg_field(sc, A_MPS_TRC_CFG,
					    F_TRCEN, 0);
				}
			}
		}
	}
	end_synchronized_op(sc, LOCK_HELD);

	return (rc);
}
示例#8
0
static int
t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
{
	struct match_rr mrr;
	struct adapter *sc;
	struct ifnet *ifp;
	int rc, unit;
	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};

	mrr.name = name;
	mrr.lock = 1;
	mrr.sc = NULL;
	mrr.rc = ENOENT;
	t4_iterate(match_name, &mrr);

	if (mrr.rc != 0)
		return (mrr.rc);
	sc = mrr.sc;

	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
	    __func__, name));
	ASSERT_SYNCHRONIZED_OP(sc);

	sx_xlock(&t4_trace_lock);

	if (sc->ifp != NULL) {
		rc = EEXIST;
		goto done;
	}
	if (sc->traceq < 0) {
		rc = EAGAIN;
		goto done;
	}


	unit = -1;
	rc = ifc_alloc_unit(ifc, &unit);
	if (rc != 0)
		goto done;

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		ifc_free_unit(ifc, unit);
		rc = ENOMEM;
		goto done;
	}

	/* Note that if_xname is not <if_dname><if_dunit>. */
	strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname));
	ifp->if_dname = t4_cloner_name;
	ifp->if_dunit = unit;
	ifp->if_init = tracer_init;
	ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING;
	ifp->if_ioctl = tracer_ioctl;
	ifp->if_transmit = tracer_transmit;
	ifp->if_qflush = tracer_qflush;
	ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
	    tracer_media_status);
	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
	ether_ifattach(ifp, lla);

	mtx_lock(&sc->ifp_lock);
	ifp->if_softc = sc;
	sc->ifp = ifp;
	mtx_unlock(&sc->ifp_lock);
done:
	sx_xunlock(&t4_trace_lock);
	end_synchronized_op(sc, 0);
	return (rc);
}