Пример #1
0
static int
do_rx_iscsi_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
{
	struct adapter *sc = iq->adapter;
	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
	struct cpl_iscsi_data *cpl =  mtod(m, struct cpl_iscsi_data *);
	u_int tid = GET_TID(cpl);
	struct toepcb *toep = lookup_tid(sc, tid);
	struct icl_cxgbei_pdu *icp = toep->ulpcb2;

	M_ASSERTPKTHDR(m);
	MPASS(m->m_pkthdr.len == be16toh(cpl->len) + sizeof(*cpl));

	/* Must already have received the header (but not the data). */
	MPASS(icp != NULL);
	MPASS(icp->icp_flags == ICPF_RX_HDR);
	MPASS(icp->ip.ip_data_mbuf == NULL);


	m_adj(m, sizeof(*cpl));
	MPASS(icp->ip.ip_data_len == m->m_pkthdr.len);

	icp->icp_flags |= ICPF_RX_FLBUF;
	icp->ip.ip_data_mbuf = m;
	counter_u64_add(ci->fl_pdus, 1);
	counter_u64_add(ci->fl_bytes, m->m_pkthdr.len);

#if 0
	CTR3(KTR_CXGBE, "%s: tid %u, cpl->len %u", __func__, tid,
	    be16toh(cpl->len));
#endif

	return (0);
}
Пример #2
0
static struct mbuf *
rtwn_report_intr(struct rtwn_usb_softc *uc, struct usb_xfer *xfer,
    struct rtwn_data *data)
{
	struct rtwn_softc *sc = &uc->uc_sc;
	struct ieee80211com *ic = &sc->sc_ic;
	uint8_t *buf;
	int len;

	usbd_xfer_status(xfer, &len, NULL, NULL, NULL);

	if (__predict_false(len < sizeof(struct r92c_rx_stat))) {
		counter_u64_add(ic->ic_ierrors, 1);
		return (NULL);
	}

	buf = data->buf;
	switch (rtwn_classify_intr(sc, buf, len)) {
	case RTWN_RX_DATA:
		return (rtwn_rxeof(sc, buf, len));
	case RTWN_RX_TX_REPORT:
		if (sc->sc_ratectl != RTWN_RATECTL_NET80211) {
			/* shouldn't happen */
			device_printf(sc->sc_dev,
			    "%s called while ratectl = %d!\n",
			    __func__, sc->sc_ratectl);
			break;
		}

		RTWN_NT_LOCK(sc);
		rtwn_handle_tx_report(sc, buf, len);
		RTWN_NT_UNLOCK(sc);

#ifdef IEEE80211_SUPPORT_SUPERG
		/*
		 * NB: this will executed only when 'report' bit is set.
		 */
		if (sc->sc_tx_n_active > 0 && --sc->sc_tx_n_active <= 1)
			rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
#endif
		break;
	case RTWN_RX_OTHER:
		rtwn_handle_c2h_report(sc, buf, len);
		break;
	default:
		/* NOTREACHED */
		KASSERT(0, ("unknown Rx classification code"));
		break;
	}

	return (NULL);
}
Пример #3
0
/*
 * MP-friendly version of ppsratecheck().
 *
 * Returns non-negative if we are in the rate, negative otherwise.
 *  0 - rate limit not reached.
 * -1 - rate limit reached.
 * >0 - rate limit was reached before, and was just reset. The return value
 *      is number of events since last reset.
 */
int64_t
counter_ratecheck(struct counter_rate *cr, int64_t limit)
{
	int64_t val;
	int now;

	val = cr->cr_over;
	now = ticks;

	if (now - cr->cr_ticks >= hz) {
		/*
		 * Time to clear the structure, we are in the next second.
		 * First try unlocked read, and then proceed with atomic.
		 */
		if ((cr->cr_lock == 0) &&
		    atomic_cmpset_acq_int(&cr->cr_lock, 0, 1)) {
			/*
			 * Check if other thread has just went through the
			 * reset sequence before us.
			 */
			if (now - cr->cr_ticks >= hz) {
				val = counter_u64_fetch(cr->cr_rate);
				counter_u64_zero(cr->cr_rate);
				cr->cr_over = 0;
				cr->cr_ticks = now;
				if (val <= limit)
					val = 0;
			}
			atomic_store_rel_int(&cr->cr_lock, 0);
		} else
			/*
			 * We failed to lock, in this case other thread may
			 * be running counter_u64_zero(), so it is not safe
			 * to do an update, we skip it.
			 */
			return (val);
	}

	counter_u64_add(cr->cr_rate, 1);
	if (cr->cr_over != 0)
		return (-1);
	if (counter_u64_fetch(cr->cr_rate) > limit)
		val = cr->cr_over = -1;

	return (val);
}
Пример #4
0
/*
 * Enqueue n items and maybe drain the ring for some time.
 *
 * Returns an errno.
 */
int
mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
{
	union ring_state os, ns;
	uint16_t pidx_start, pidx_stop;
	int i;

	MPASS(items != NULL);
	MPASS(n > 0);

	/*
	 * Reserve room for the new items.  Our reservation, if successful, is
	 * from 'pidx_start' to 'pidx_stop'.
	 */
	for (;;) {
		os.state = r->state;
		if (n >= space_available(r, os)) {
			counter_u64_add(r->drops, n);
			MPASS(os.flags != IDLE);
			if (os.flags == STALLED)
				mp_ring_check_drainage(r, 0);
			return (ENOBUFS);
		}
		ns.state = os.state;
		ns.pidx_head = increment_idx(r, os.pidx_head, n);
		critical_enter();
		if (atomic_cmpset_64(&r->state, os.state, ns.state))
			break;
		critical_exit();
		cpu_spinwait();
	}
	pidx_start = os.pidx_head;
	pidx_stop = ns.pidx_head;

	/*
	 * Wait for other producers who got in ahead of us to enqueue their
	 * items, one producer at a time.  It is our turn when the ring's
	 * pidx_tail reaches the begining of our reservation (pidx_start).
	 */
	while (ns.pidx_tail != pidx_start) {
		cpu_spinwait();
		ns.state = r->state;
	}

	/* Now it is our turn to fill up the area we reserved earlier. */
	i = pidx_start;
	do {
		r->items[i] = *items++;
		if (__predict_false(++i == r->size))
			i = 0;
	} while (i != pidx_stop);

	/*
	 * Update the ring's pidx_tail.  The release style atomic guarantees
	 * that the items are visible to any thread that sees the updated pidx.
	 */
	do {
		os.state = ns.state = r->state;
		ns.pidx_tail = pidx_stop;
		ns.flags = BUSY;
	} while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0);
	critical_exit();
	counter_u64_add(r->enqueues, n);

	/*
	 * Turn into a consumer if some other thread isn't active as a consumer
	 * already.
	 */
	if (os.flags != BUSY)
		drain_ring(r, ns, os.flags, budget);

	return (0);
}
Пример #5
0
/*
 * Caller passes in a state, with a guarantee that there is work to do and that
 * all items up to the pidx_tail in the state are visible.
 */
static void
drain_ring(struct mp_ring *r, union ring_state os, uint16_t prev, int budget)
{
	union ring_state ns;
	int n, pending, total;
	uint16_t cidx = os.cidx;
	uint16_t pidx = os.pidx_tail;

	MPASS(os.flags == BUSY);
	MPASS(cidx != pidx);

	if (prev == IDLE)
		counter_u64_add(r->starts, 1);
	pending = 0;
	total = 0;

	while (cidx != pidx) {

		/* Items from cidx to pidx are available for consumption. */
		n = r->drain(r, cidx, pidx);
		if (n == 0) {
			critical_enter();
			do {
				os.state = ns.state = r->state;
				ns.cidx = cidx;
				ns.flags = STALLED;
			} while (atomic_cmpset_64(&r->state, os.state,
			    ns.state) == 0);
			critical_exit();
			if (prev != STALLED)
				counter_u64_add(r->stalls, 1);
			else if (total > 0) {
				counter_u64_add(r->restarts, 1);
				counter_u64_add(r->stalls, 1);
			}
			break;
		}
		cidx = increment_idx(r, cidx, n);
		pending += n;
		total += n;

		/*
		 * We update the cidx only if we've caught up with the pidx, the
		 * real cidx is getting too far ahead of the one visible to
		 * everyone else, or we have exceeded our budget.
		 */
		if (cidx != pidx && pending < 64 && total < budget)
			continue;
		critical_enter();
		do {
			os.state = ns.state = r->state;
			ns.cidx = cidx;
			ns.flags = state_to_flags(ns, total >= budget);
		} while (atomic_cmpset_acq_64(&r->state, os.state, ns.state) == 0);
		critical_exit();

		if (ns.flags == ABDICATED)
			counter_u64_add(r->abdications, 1);
		if (ns.flags != BUSY) {
			/* Wrong loop exit if we're going to stall. */
			MPASS(ns.flags != STALLED);
			if (prev == STALLED) {
				MPASS(total > 0);
				counter_u64_add(r->restarts, 1);
			}
			break;
		}

		/*
		 * The acquire style atomic above guarantees visibility of items
		 * associated with any pidx change that we notice here.
		 */
		pidx = ns.pidx_tail;
		pending = 0;
	}
}
Пример #6
0
/*
 * Kernel module interface for updating udpstat.  The argument is an index
 * into udpstat treated as an array of u_long.  While this encodes the
 * general layout of udpstat into the caller, it doesn't encode its location,
 * so that future changes to add, for example, per-CPU stats support won't
 * cause binary compatibility problems for kernel modules.
 */
void
kmod_udpstat_inc(int statnum)
{

	counter_u64_add(VNET(udpstat)[statnum], 1);
}
Пример #7
0
static int
do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
{
	struct adapter *sc = iq->adapter;
	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
	const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
	u_int tid = GET_TID(cpl);
	struct toepcb *toep = lookup_tid(sc, tid);
	struct inpcb *inp = toep->inp;
	struct socket *so;
	struct sockbuf *sb;
	struct tcpcb *tp;
	struct icl_cxgbei_conn *icc;
	struct icl_conn *ic;
	struct icl_cxgbei_pdu *icp = toep->ulpcb2;
	struct icl_pdu *ip;
	u_int pdu_len, val;

	MPASS(m == NULL);

	/* Must already be assembling a PDU. */
	MPASS(icp != NULL);
	MPASS(icp->icp_flags & ICPF_RX_HDR);	/* Data is optional. */
	MPASS((icp->icp_flags & ICPF_RX_STATUS) == 0);

	pdu_len = be16toh(cpl->len);	/* includes everything. */
	val = be32toh(cpl->ddpvld);

#if 0
	CTR5(KTR_CXGBE,
	    "%s: tid %u, cpl->len %u, ddpvld 0x%08x, icp_flags 0x%08x",
	    __func__, tid, pdu_len, val, icp->icp_flags);
#endif

	icp->icp_flags |= ICPF_RX_STATUS;
	ip = &icp->ip;
	if (val & F_DDP_PADDING_ERR)
		icp->icp_flags |= ICPF_PAD_ERR;
	if (val & F_DDP_HDRCRC_ERR)
		icp->icp_flags |= ICPF_HCRC_ERR;
	if (val & F_DDP_DATACRC_ERR)
		icp->icp_flags |= ICPF_DCRC_ERR;
	if (val & F_DDP_PDU && ip->ip_data_mbuf == NULL) {
		MPASS((icp->icp_flags & ICPF_RX_FLBUF) == 0);
		MPASS(ip->ip_data_len > 0);
		icp->icp_flags |= ICPF_RX_DDP;
		counter_u64_add(ci->ddp_pdus, 1);
		counter_u64_add(ci->ddp_bytes, ip->ip_data_len);
	}

	INP_WLOCK(inp);
	if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
		    __func__, tid, pdu_len, inp->inp_flags);
		INP_WUNLOCK(inp);
		icl_cxgbei_conn_pdu_free(NULL, ip);
#ifdef INVARIANTS
		toep->ulpcb2 = NULL;
#endif
		return (0);
	}

	tp = intotcpcb(inp);
	MPASS(icp->icp_seq == tp->rcv_nxt);
	MPASS(tp->rcv_wnd >= pdu_len);
	tp->rcv_nxt += pdu_len;
	tp->rcv_wnd -= pdu_len;
	tp->t_rcvtime = ticks;

	/* update rx credits */
	toep->rx_credits += pdu_len;
	t4_rcvd(&toep->td->tod, tp);	/* XXX: sc->tom_softc.tod */

	so = inp->inp_socket;
	sb = &so->so_rcv;
	SOCKBUF_LOCK(sb);

	icc = toep->ulpcb;
	if (__predict_false(icc == NULL || sb->sb_state & SBS_CANTRCVMORE)) {
		CTR5(KTR_CXGBE,
		    "%s: tid %u, excess rx (%d bytes), icc %p, sb_state 0x%x",
		    __func__, tid, pdu_len, icc, sb->sb_state);
		SOCKBUF_UNLOCK(sb);
		INP_WUNLOCK(inp);

		INP_INFO_RLOCK(&V_tcbinfo);
		INP_WLOCK(inp);
		tp = tcp_drop(tp, ECONNRESET);
		if (tp)
			INP_WUNLOCK(inp);
		INP_INFO_RUNLOCK(&V_tcbinfo);

		icl_cxgbei_conn_pdu_free(NULL, ip);
#ifdef INVARIANTS
		toep->ulpcb2 = NULL;
#endif
		return (0);
	}
	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
	ic = &icc->ic;
	icl_cxgbei_new_pdu_set_conn(ip, ic);

	MPASS(m == NULL); /* was unused, we'll use it now. */
	m = sbcut_locked(sb, sbused(sb)); /* XXXNP: toep->sb_cc accounting? */
	if (__predict_false(m != NULL)) {
		int len = m_length(m, NULL);

		/*
		 * PDUs were received before the tid transitioned to ULP mode.
		 * Convert them to icl_cxgbei_pdus and send them to ICL before
		 * the PDU in icp/ip.
		 */
		CTR3(KTR_CXGBE, "%s: tid %u, %u bytes in so_rcv", __func__, tid,
		    len);

		/* XXXNP: needs to be rewritten. */
		if (len == sizeof(struct iscsi_bhs) || len == 4 + sizeof(struct
		    iscsi_bhs)) {
			struct icl_cxgbei_pdu *icp0;
			struct icl_pdu *ip0;

			ip0 = icl_cxgbei_new_pdu(M_NOWAIT);
			icl_cxgbei_new_pdu_set_conn(ip0, ic);
			if (ip0 == NULL)
				CXGBE_UNIMPLEMENTED("PDU allocation failure");
			icp0 = ip_to_icp(ip0);
			icp0->icp_seq = 0; /* XXX */
			icp0->icp_flags = ICPF_RX_HDR | ICPF_RX_STATUS;
			m_copydata(m, 0, sizeof(struct iscsi_bhs), (void *)ip0->ip_bhs);
			STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip0, ip_next);
		}
		m_freem(m);
	}

	STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip, ip_next);
	if ((icc->rx_flags & RXF_ACTIVE) == 0) {
		struct cxgbei_worker_thread_softc *cwt = &cwt_softc[icc->cwt];

		mtx_lock(&cwt->cwt_lock);
		icc->rx_flags |= RXF_ACTIVE;
		TAILQ_INSERT_TAIL(&cwt->rx_head, icc, rx_link);
		if (cwt->cwt_state == CWT_SLEEPING) {
			cwt->cwt_state = CWT_RUNNING;
			cv_signal(&cwt->cwt_cv);
		}
		mtx_unlock(&cwt->cwt_lock);
	}
	SOCKBUF_UNLOCK(sb);
	INP_WUNLOCK(inp);

#ifdef INVARIANTS
	toep->ulpcb2 = NULL;
#endif

	return (0);
}
Пример #8
0
static int
mpc7xxx_intr(struct trapframe *tf)
{
	int i, error, retval, cpu;
	uint32_t config;
	struct pmc *pm;
	struct powerpc_cpu *pac;

	cpu = curcpu;
	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
	    ("[powerpc,%d] out of range CPU %d", __LINE__, cpu));

	PMCDBG3(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
	    TRAPF_USERMODE(tf));

	retval = 0;

	pac = powerpc_pcpu[cpu];

	config  = mfspr(SPR_MMCR0) & ~SPR_MMCR0_FC;

	/*
	 * look for all PMCs that have interrupted:
	 * - look for a running, sampling PMC which has overflowed
	 *   and which has a valid 'struct pmc' association
	 *
	 * If found, we call a helper to process the interrupt.
	 */

	for (i = 0; i < MPC7XXX_MAX_PMCS; i++) {
		if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
			continue;
		}

		if (!MPC7XXX_PMC_HAS_OVERFLOWED(i))
			continue;

		retval = 1;	/* Found an interrupting PMC. */

		if (pm->pm_state != PMC_STATE_RUNNING)
			continue;

		/* Stop the counter if logging fails. */
		error = pmc_process_interrupt(PMC_HR, pm, tf);
		if (error != 0)
			mpc7xxx_stop_pmc(cpu, i);

		/* reload count. */
		mpc7xxx_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount);
	}
	if (retval)
		counter_u64_add(pmc_stats.pm_intr_processed, 1);
	else
		counter_u64_add(pmc_stats.pm_intr_ignored, 1);

	/* Re-enable PERF exceptions. */
	if (retval)
		mtspr(SPR_MMCR0, config | SPR_MMCR0_PMXE);

	return (retval);
}
Пример #9
0
static struct mbuf *
rtwn_rx_copy_to_mbuf(struct rtwn_softc *sc, struct r92c_rx_stat *stat,
    int totlen)
{
	struct ieee80211com *ic = &sc->sc_ic;
	struct mbuf *m;
	uint32_t rxdw0;
	int pktlen;

	RTWN_ASSERT_LOCKED(sc);

	/* Dump Rx descriptor. */
	RTWN_DPRINTF(sc, RTWN_DEBUG_RECV_DESC,
	    "%s: dw: 0 %08X, 1 %08X, 2 %08X, 3 %08X, 4 %08X, tsfl %08X\n",
	    __func__, le32toh(stat->rxdw0), le32toh(stat->rxdw1),
	    le32toh(stat->rxdw2), le32toh(stat->rxdw3), le32toh(stat->rxdw4),
	    le32toh(stat->tsf_low));

	/*
	 * don't pass packets to the ieee80211 framework if the driver isn't
	 * RUNNING.
	 */
	if (!(sc->sc_flags & RTWN_RUNNING))
		return (NULL);

	rxdw0 = le32toh(stat->rxdw0);
	if (__predict_false(rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR))) {
		/*
		 * This should not happen since we setup our Rx filter
		 * to not receive these frames.
		 */
		RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
		    "%s: RX flags error (%s)\n", __func__,
		    rxdw0 & R92C_RXDW0_CRCERR ? "CRC" : "ICV");
		goto fail;
	}

	pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN);
	if (__predict_false(pktlen < sizeof(struct ieee80211_frame_ack))) {
		/*
		 * Should not happen (because of Rx filter setup).
		 */
		RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
		    "%s: frame is too short: %d\n", __func__, pktlen);
		goto fail;
	}

	m = m_get2(totlen, M_NOWAIT, MT_DATA, M_PKTHDR);
	if (__predict_false(m == NULL)) {
		device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n",
		    __func__);
		goto fail;
	}

	/* Finalize mbuf. */
	memcpy(mtod(m, uint8_t *), (uint8_t *)stat, totlen);
	m->m_pkthdr.len = m->m_len = totlen;

	if (rtwn_check_frame(sc, m) != 0) {
		m_freem(m);
		goto fail;
	}

	return (m);
fail:
	counter_u64_add(ic->ic_ierrors, 1);
	return (NULL);
}
Пример #10
0
void
rtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct rtwn_usb_softc *uc = usbd_xfer_softc(xfer);
	struct rtwn_softc *sc = &uc->uc_sc;
	struct ieee80211com *ic = &sc->sc_ic;
	struct ieee80211_node *ni;
	struct mbuf *m = NULL, *next;
	struct rtwn_data *data;
	int8_t nf, rssi;

	RTWN_ASSERT_LOCKED(sc);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		data = STAILQ_FIRST(&uc->uc_rx_active);
		if (data == NULL)
			goto tr_setup;
		STAILQ_REMOVE_HEAD(&uc->uc_rx_active, next);
		m = rtwn_report_intr(uc, xfer, data);
		STAILQ_INSERT_TAIL(&uc->uc_rx_inactive, data, next);
		/* FALLTHROUGH */
	case USB_ST_SETUP:
tr_setup:
		data = STAILQ_FIRST(&uc->uc_rx_inactive);
		if (data == NULL) {
			KASSERT(m == NULL, ("mbuf isn't NULL"));
			goto finish;
		}
		STAILQ_REMOVE_HEAD(&uc->uc_rx_inactive, next);
		STAILQ_INSERT_TAIL(&uc->uc_rx_active, data, next);
		usbd_xfer_set_frame_data(xfer, 0, data->buf,
		    usbd_xfer_max_len(xfer));
		usbd_transfer_submit(xfer);

		/*
		 * To avoid LOR we should unlock our private mutex here to call
		 * ieee80211_input() because here is at the end of a USB
		 * callback and safe to unlock.
		 */
		while (m != NULL) {
			next = m->m_next;
			m->m_next = NULL;

			ni = rtwn_rx_frame(sc, m, &rssi);

			RTWN_UNLOCK(sc);

			nf = RTWN_NOISE_FLOOR;
			if (ni != NULL) {
				if (ni->ni_flags & IEEE80211_NODE_HT)
					m->m_flags |= M_AMPDU;
				(void)ieee80211_input(ni, m, rssi - nf, nf);
				ieee80211_free_node(ni);
			} else {
				(void)ieee80211_input_all(ic, m,
				    rssi - nf, nf);
			}
			RTWN_LOCK(sc);
			m = next;
		}
		break;
	default:
		/* needs it to the inactive queue due to a error. */
		data = STAILQ_FIRST(&uc->uc_rx_active);
		if (data != NULL) {
			STAILQ_REMOVE_HEAD(&uc->uc_rx_active, next);
			STAILQ_INSERT_TAIL(&uc->uc_rx_inactive, data, next);
		}
		if (error != USB_ERR_CANCELLED) {
			usbd_xfer_set_stall(xfer);
			counter_u64_add(ic->ic_ierrors, 1);
			goto tr_setup;
		}
		break;
	}
finish:
	/* Finished receive; age anything left on the FF queue by a little bump */
	/*
	 * XXX TODO: just make this a callout timer schedule so we can
	 * flush the FF staging queue if we're approaching idle.
	 */
#ifdef	IEEE80211_SUPPORT_SUPERG
	if (!(sc->sc_flags & RTWN_FW_LOADED) ||
	    sc->sc_ratectl != RTWN_RATECTL_NET80211)
		rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
#endif

	/* Kick-start more transmit in case we stalled */
	rtwn_start(sc);
}