int t4_get_tracer(struct adapter *sc, struct t4_tracer *t) { int rc, i, enabled; struct trace_params tp; if (t->idx >= NTRACE) { t->idx = 0xff; t->enabled = 0; t->valid = 0; return (0); } rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4gett"); if (rc) return (rc); for (i = t->idx; i < NTRACE; i++) { if (isset(&sc->tracer_valid, t->idx)) { t4_get_trace_filter(sc, &tp, i, &enabled); t->idx = i; t->enabled = enabled; t->valid = 1; memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data)); memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask)); t->tp.snap_len = tp.snap_len; t->tp.min_len = tp.min_len; t->tp.skip_ofst = tp.skip_ofst; t->tp.skip_len = tp.skip_len; t->tp.invert = tp.invert; /* convert channel to port iff 0 <= port < 8. */ if (tp.port < 4) t->tp.port = sc->chan_map[tp.port]; else if (tp.port < 8) t->tp.port = sc->chan_map[tp.port - 4] + 4; else t->tp.port = tp.port; goto done; } } t->idx = 0xff; t->enabled = 0; t->valid = 0; done: end_synchronized_op(sc, LOCK_HELD); return (rc); }
static void cxgbe_nm_init(void *arg) { struct port_info *pi = arg; struct adapter *sc = pi->adapter; if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nminit") != 0) return; cxgbe_nm_init_synchronized(pi); end_synchronized_op(sc, 0); return; }
static int set_sched_class_config(struct adapter *sc, int minmax) { int rc; if (minmax < 0) return (EINVAL); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc"); if (rc) return (rc); rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1); end_synchronized_op(sc, 0); return (rc); }
static void update_tx_sched(void *context, int pending) { int i, j, mode, rateunit, ratemode, maxrate, pktsize, rc; struct port_info *pi; struct tx_cl_rl_params *tc; struct adapter *sc = context; const int n = sc->chip_params->nsched_cls; mtx_lock(&sc->tc_lock); for_each_port(sc, i) { pi = sc->port[i]; tc = &pi->sched_params->cl_rl[0]; for (j = 0; j < n; j++, tc++) { MPASS(mtx_owned(&sc->tc_lock)); if ((tc->flags & TX_CLRL_REFRESH) == 0) continue; mode = tc->mode; rateunit = tc->rateunit; ratemode = tc->ratemode; maxrate = tc->maxrate; pktsize = tc->pktsize; mtx_unlock(&sc->tc_lock); if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4utxs") != 0) { mtx_lock(&sc->tc_lock); continue; } rc = t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, FW_SCHED_PARAMS_LEVEL_CL_RL, mode, rateunit, ratemode, pi->tx_chan, j, 0, maxrate, 0, pktsize, 1); end_synchronized_op(sc, 0); mtx_lock(&sc->tc_lock); if (rc != 0) { tc->flags |= TX_CLRL_ERROR; } else if (tc->mode == mode && tc->rateunit == rateunit && tc->maxrate == maxrate && tc->pktsize == tc->pktsize) { tc->flags &= ~(TX_CLRL_REFRESH | TX_CLRL_ERROR); } } }
static void match_name(struct adapter *sc, void *arg) { struct match_rr *mrr = arg; if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0) return; KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s", __func__, mrr->sc, sc, mrr->name)); mrr->sc = sc; if (mrr->lock) mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon"); else mrr->rc = 0; }
static int cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) { int rc = 0, mtu, flags; struct port_info *pi = ifp->if_softc; struct adapter *sc = pi->adapter; struct ifreq *ifr = (struct ifreq *)data; uint32_t mask; MPASS(pi->nm_ifp == ifp); switch (cmd) { case SIOCSIFMTU: mtu = ifr->ifr_mtu; if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) return (EINVAL); rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmtu"); if (rc) return (rc); ifp->if_mtu = mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MTU); end_synchronized_op(sc, 0); break; case SIOCSIFFLAGS: rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nflg"); if (rc) return (rc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { flags = pi->nmif_flags; if ((ifp->if_flags ^ flags) & (IFF_PROMISC | IFF_ALLMULTI)) { rc = update_mac_settings(ifp, XGMAC_PROMISC | XGMAC_ALLMULTI); } } else rc = cxgbe_nm_init_synchronized(pi); pi->nmif_flags = ifp->if_flags; } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) rc = cxgbe_nm_uninit_synchronized(pi); end_synchronized_op(sc, 0); break; case SIOCADDMULTI: case SIOCDELMULTI: /* these two are called with a mutex held :-( */ rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4nmulti"); if (rc) return (rc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MCADDRS); end_synchronized_op(sc, LOCK_HELD); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); } if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: ifmedia_ioctl(ifp, ifr, &pi->nm_media, cmd); break; default: rc = ether_ioctl(ifp, cmd, data); } return (rc); }
static int set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p, int sleep_ok) { int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode; struct port_info *pi; struct tx_cl_rl_params *tc; if (p->level == SCHED_CLASS_LEVEL_CL_RL) fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; else if (p->level == SCHED_CLASS_LEVEL_CL_WRR) fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; else if (p->level == SCHED_CLASS_LEVEL_CH_RL) fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; else return (EINVAL); if (p->mode == SCHED_CLASS_MODE_CLASS) fw_mode = FW_SCHED_PARAMS_MODE_CLASS; else if (p->mode == SCHED_CLASS_MODE_FLOW) fw_mode = FW_SCHED_PARAMS_MODE_FLOW; else return (EINVAL); if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS) fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS) fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; else return (EINVAL); if (p->ratemode == SCHED_CLASS_RATEMODE_REL) fw_ratemode = FW_SCHED_PARAMS_RATE_REL; else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS) fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; else return (EINVAL); /* Vet our parameters ... */ if (!in_range(p->channel, 0, sc->chip_params->nchan - 1)) return (ERANGE); pi = sc->port[sc->chan_map[p->channel]]; if (pi == NULL) return (ENXIO); MPASS(pi->tx_chan == p->channel); top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */ if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) || !in_range(p->minrate, 0, top_speed) || !in_range(p->maxrate, 0, top_speed) || !in_range(p->weight, 0, 100)) return (ERANGE); /* * Translate any unset parameters into the firmware's * nomenclature and/or fail the call if the parameters * are required ... */ if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0) return (EINVAL); if (p->minrate < 0) p->minrate = 0; if (p->maxrate < 0) { if (p->level == SCHED_CLASS_LEVEL_CL_RL || p->level == SCHED_CLASS_LEVEL_CH_RL) return (EINVAL); else p->maxrate = 0; } if (p->weight < 0) { if (p->level == SCHED_CLASS_LEVEL_CL_WRR) return (EINVAL); else p->weight = 0; } if (p->pktsize < 0) { if (p->level == SCHED_CLASS_LEVEL_CL_RL || p->level == SCHED_CLASS_LEVEL_CH_RL) return (EINVAL); else p->pktsize = 0; } rc = begin_synchronized_op(sc, NULL, sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp"); if (rc) return (rc); if (p->level == SCHED_CLASS_LEVEL_CL_RL) { tc = &pi->sched_params->cl_rl[p->cl]; if (tc->refcount > 0) { rc = EBUSY; goto done; } else { tc->ratemode = fw_ratemode; tc->rateunit = fw_rateunit; tc->mode = fw_mode; tc->maxrate = p->maxrate; tc->pktsize = p->pktsize; } } rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode, fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate, p->weight, p->pktsize, sleep_ok); if (p->level == SCHED_CLASS_LEVEL_CL_RL && rc != 0) { /* * Unknown state at this point, see parameters in tc for what * was attempted. */ tc->flags |= TX_CLRL_ERROR; } done: end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD); return (rc); }
int t4_set_tracer(struct adapter *sc, struct t4_tracer *t) { int rc; struct trace_params tp, *tpp; if (t->idx >= NTRACE) return (EINVAL); rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4sett"); if (rc) return (rc); /* * If no tracing filter is specified this time then check if the filter * at the index is valid anyway because it was set previously. If so * then this is a legitimate enable/disable operation. */ if (t->valid == 0) { if (isset(&sc->tracer_valid, t->idx)) tpp = NULL; else rc = EINVAL; goto done; } if (t->tp.port > 19 || t->tp.snap_len > 9600 || t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH || t->tp.skip_ofst > M_TFOFFSET) { rc = EINVAL; goto done; } memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data)); memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask)); tp.snap_len = t->tp.snap_len; tp.min_len = t->tp.min_len; tp.skip_ofst = t->tp.skip_ofst; tp.skip_len = t->tp.skip_len; tp.invert = !!t->tp.invert; /* convert port to channel iff 0 <= port < 8. */ if (t->tp.port < 4) { if (sc->port[t->tp.port] == NULL) { rc = EINVAL; goto done; } tp.port = sc->port[t->tp.port]->tx_chan; } else if (t->tp.port < 8) { if (sc->port[t->tp.port - 4] == NULL) { rc = EINVAL; goto done; } tp.port = sc->port[t->tp.port - 4]->tx_chan + 4; } tpp = &tp; done: if (rc == 0) { rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled); if (rc == 0) { if (t->enabled) { setbit(&sc->tracer_valid, t->idx); if (sc->tracer_enabled == 0) { t4_set_reg_field(sc, A_MPS_TRC_CFG, F_TRCEN, F_TRCEN); } setbit(&sc->tracer_enabled, t->idx); } else { clrbit(&sc->tracer_enabled, t->idx); if (sc->tracer_enabled == 0) { t4_set_reg_field(sc, A_MPS_TRC_CFG, F_TRCEN, 0); } } } } end_synchronized_op(sc, LOCK_HELD); return (rc); }