예제 #1
0
/**
 * Periodic timer tick for slow management operations
 *
 * @param arg    Device to check
 */
static void cvm_do_timer(void *arg)
{
	static int port;
	static int updated;
	if (port < CVMX_PIP_NUM_INPUT_PORTS) {
		if (cvm_oct_device[port]) {
			int queues_per_port;
			int qos;
			cvm_oct_private_t *priv = (cvm_oct_private_t *)cvm_oct_device[port]->if_softc;

			cvm_oct_common_poll(priv->ifp);
			if (priv->need_link_update) {
				updated++;
				taskqueue_enqueue(cvm_oct_link_taskq, &priv->link_task);
			}

			queues_per_port = cvmx_pko_get_num_queues(port);
			/* Drain any pending packets in the free list */
			for (qos = 0; qos < queues_per_port; qos++) {
				if (_IF_QLEN(&priv->tx_free_queue[qos]) > 0) {
					IF_LOCK(&priv->tx_free_queue[qos]);
					while (_IF_QLEN(&priv->tx_free_queue[qos]) > cvmx_fau_fetch_and_add32(priv->fau+qos*4, 0)) {
						struct mbuf *m;

						_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
						m_freem(m);
					}
					IF_UNLOCK(&priv->tx_free_queue[qos]);

					/*
					 * XXX locking!
					 */
					priv->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
				}
			}
		}
		port++;
		/* Poll the next port in a 50th of a second.
		   This spreads the polling of ports out a little bit */
		callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL);
	} else {
		port = 0;
		/* If any updates were made in this run, continue iterating at
		 * 1/50th of a second, so that if a link has merely gone down
		 * temporarily (e.g. because of interface reinitialization) it
		 * will not be forced to stay down for an entire second.
		 */
		if (updated > 0) {
			updated = 0;
			callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL);
		} else {
			/* All ports have been polled. Start the next iteration through
			   the ports in one second */
			callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);
		}
	}
}
예제 #2
0
static int
ng_bt3c_rcvmsg(node_p node, item_p item, hook_p lasthook)
{
	bt3c_softc_p	 sc = (bt3c_softc_p) NG_NODE_PRIVATE(node);
	struct ng_mesg	*msg = NULL, *rsp = NULL;
	int		 error = 0;

	if (sc == NULL) {
		NG_FREE_ITEM(item);
		return (EHOSTDOWN);
	}

	NGI_GET_MSG(item, msg);

	switch (msg->header.typecookie) {
	case NGM_GENERIC_COOKIE:
		switch (msg->header.cmd) {
		case NGM_TEXT_STATUS:
			NG_MKRESPONSE(rsp, msg, NG_TEXTRESPONSE, M_NOWAIT);
			if (rsp == NULL)
				error = ENOMEM;
			else
				snprintf(rsp->data, NG_TEXTRESPONSE,
					"Hook: %s\n" \
					"Flags: %#x\n" \
					"Debug: %d\n"  \
					"State: %d\n"  \
					"IncmQ: [len:%d,max:%d]\n" \
					"OutgQ: [len:%d,max:%d]\n",
					(sc->hook != NULL)? NG_BT3C_HOOK : "",
					sc->flags,
					sc->debug,
					sc->state,
					_IF_QLEN(&sc->inq), /* XXX */
					sc->inq.ifq_maxlen, /* XXX */
					_IF_QLEN(&sc->outq), /* XXX */
					sc->outq.ifq_maxlen /* XXX */
					);
			break;

		default:
			error = EINVAL;
			break;
		}
		break;

	case NGM_BT3C_COOKIE:
		switch (msg->header.cmd) {
		case NGM_BT3C_NODE_GET_STATE:
			NG_MKRESPONSE(rsp, msg, sizeof(ng_bt3c_node_state_ep),
				M_NOWAIT);
			if (rsp == NULL)
				error = ENOMEM;
			else
				*((ng_bt3c_node_state_ep *)(rsp->data)) = 
					sc->state;
			break;

		case NGM_BT3C_NODE_SET_DEBUG:
			if (msg->header.arglen != sizeof(ng_bt3c_node_debug_ep))
				error = EMSGSIZE;
			else
				sc->debug =
					*((ng_bt3c_node_debug_ep *)(msg->data));
			break;

		case NGM_BT3C_NODE_GET_DEBUG:
			NG_MKRESPONSE(rsp, msg, sizeof(ng_bt3c_node_debug_ep),
				M_NOWAIT);
			if (rsp == NULL)
				error = ENOMEM;
			else
				*((ng_bt3c_node_debug_ep *)(rsp->data)) = 
					sc->debug;
			break;

		case NGM_BT3C_NODE_GET_QLEN:
			NG_MKRESPONSE(rsp, msg, sizeof(ng_bt3c_node_qlen_ep),
				M_NOWAIT);
			if (rsp == NULL) {
				error = ENOMEM;
				break;
			}

			switch (((ng_bt3c_node_qlen_ep *)(msg->data))->queue) {
			case NGM_BT3C_NODE_IN_QUEUE:
				((ng_bt3c_node_qlen_ep *)(rsp->data))->queue =
					NGM_BT3C_NODE_IN_QUEUE;
				((ng_bt3c_node_qlen_ep *)(rsp->data))->qlen =
					sc->inq.ifq_maxlen;
				break;

			case NGM_BT3C_NODE_OUT_QUEUE:
				((ng_bt3c_node_qlen_ep *)(rsp->data))->queue =
					NGM_BT3C_NODE_OUT_QUEUE;
				((ng_bt3c_node_qlen_ep *)(rsp->data))->qlen =
					sc->outq.ifq_maxlen;
				break;

			default:
				NG_FREE_MSG(rsp);
				error = EINVAL;
				break;
			}
			break;

		case NGM_BT3C_NODE_SET_QLEN:
			if (msg->header.arglen != sizeof(ng_bt3c_node_qlen_ep)){
				error = EMSGSIZE;
				break;
			}

			if (((ng_bt3c_node_qlen_ep *)(msg->data))->qlen <= 0) {
				error = EINVAL;
				break;
			}

			switch (((ng_bt3c_node_qlen_ep *)(msg->data))->queue) {
			case NGM_BT3C_NODE_IN_QUEUE:
				sc->inq.ifq_maxlen = ((ng_bt3c_node_qlen_ep *)
					(msg->data))->qlen; /* XXX */
				break;

			case NGM_BT3C_NODE_OUT_QUEUE:
				sc->outq.ifq_maxlen = ((ng_bt3c_node_qlen_ep *)
					(msg->data))->qlen; /* XXX */
				break;

			default:
				error = EINVAL;
				break;
			}
			break;

		case NGM_BT3C_NODE_GET_STAT:
			NG_MKRESPONSE(rsp, msg, sizeof(ng_bt3c_node_stat_ep),
				M_NOWAIT);
			if (rsp == NULL)
				error = ENOMEM;
			else
				bcopy(&sc->stat, rsp->data,
					sizeof(ng_bt3c_node_stat_ep));
			break;

		case NGM_BT3C_NODE_RESET_STAT:
			NG_BT3C_STAT_RESET(sc->stat);
			break;

		case NGM_BT3C_NODE_DOWNLOAD_FIRMWARE:
			if (msg->header.arglen < 
					sizeof(ng_bt3c_firmware_block_ep))
				error = EMSGSIZE;
			else	
				bt3c_download_firmware(sc, msg->data,
							msg->header.arglen);
			break;

		default:
			error = EINVAL;
			break;
		}
		break;

	default:
		error = EINVAL;
		break;
	}

	NG_RESPOND_MSG(error, node, item, rsp);
	NG_FREE_MSG(msg);

	return (error);
} /* ng_bt3c_rcvmsg */
예제 #3
0
/**
 * Packet transmit
 *
 * @param m    Packet to send
 * @param dev    Device info structure
 * @return Always returns zero
 */
int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
{
	cvmx_pko_command_word0_t    pko_command;
	cvmx_buf_ptr_t              hw_buffer;
	int                         dropped;
	int                         qos;
	cvm_oct_private_t          *priv = (cvm_oct_private_t *)ifp->if_softc;
	int32_t in_use;
	int32_t buffers_to_free;
	cvmx_wqe_t *work;

	/* Prefetch the private data structure.
	   It is larger that one cache line */
	CVMX_PREFETCH(priv, 0);

	/* Start off assuming no drop */
	dropped = 0;

	/* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
	   remove "qos" in the event neither interface supports multiple queues
	   per port */
	if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
	    (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
		qos = GET_MBUF_QOS(m);
		if (qos <= 0)
			qos = 0;
		else if (qos >= cvmx_pko_get_num_queues(priv->port))
			qos = 0;
	} else
		qos = 0;

	/* The CN3XXX series of parts has an errata (GMX-401) which causes the
	   GMX block to hang if a collision occurs towards the end of a
	   <68 byte packet. As a workaround for this, we pad packets to be
	   68 bytes whenever we are in half duplex mode. We don't handle
	   the case of having a small packet but no room to add the padding.
	   The kernel should always give us at least a cache line */
	if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
		cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
		int interface = INTERFACE(priv->port);
		int index = INDEX(priv->port);

		if (interface < 2) {
			/* We only need to pad packet in half duplex mode */
			gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
			if (gmx_prt_cfg.s.duplex == 0) {
				static uint8_t pad[64];

				if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
					printf("%s: unable to padd small packet.", __func__);
			}
		}
	}

#ifdef OCTEON_VENDOR_RADISYS
	/*
	 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes.
	 */
	if (__predict_false(m->m_pkthdr.len < 60) &&
	    cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
		static uint8_t pad[60];

		if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
			printf("%s: unable to pad small packet.", __func__);
	}
#endif

	/*
	 * If the packet is not fragmented.
	 */
	if (m->m_pkthdr.len == m->m_len) {
		/* Build the PKO buffer pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
		hw_buffer.s.pool = 0;
		hw_buffer.s.size = m->m_len;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = 1;
		pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA.  */

		work = NULL;
	} else {
		struct mbuf *n;
		unsigned segs;
		uint64_t *gp;

		/*
		 * The packet is fragmented, we need to send a list of segments
		 * in memory we borrow from the WQE pool.
		 */
		work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
		if (work == NULL) {
			m_freem(m);
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
			return 1;
		}

		segs = 0;
		gp = (uint64_t *)work;
		for (n = m; n != NULL; n = n->m_next) {
			if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
				panic("%s: too many segments in packet; call m_collapse().", __func__);

			/* Build the PKO buffer pointer */
			hw_buffer.u64 = 0;
			hw_buffer.s.i = 1; /* Do not put this buffer into the FPA.  */
			hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
			hw_buffer.s.pool = 0;
			hw_buffer.s.size = n->m_len;

			*gp++ = hw_buffer.u64;
			segs++;
		}

		/* Build the PKO buffer gather list pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(work);
		hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
		hw_buffer.s.size = segs;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = segs;
		pko_command.s.gather = 1;
		pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA.  */
	}

	/* Finish building the PKO command */
	pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
	pko_command.s.reg0 = priv->fau+qos*4;
	pko_command.s.total_bytes = m->m_pkthdr.len;
	pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
	pko_command.s.subone0 = 1;

	/* Check if we can use the hardware checksumming */
	if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
		/* Use hardware checksum calc */
		pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
	}

	/*
	 * XXX
	 * Could use a different free queue (and different FAU address) per
	 * core instead of per QoS, to reduce contention here.
	 */
	IF_LOCK(&priv->tx_free_queue[qos]);
	/* Get the number of mbufs in use by the hardware */
	in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
	buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);

	/* Drop this packet if we have too many already queued to the HW */
	if (_IF_QFULL(&priv->tx_free_queue[qos])) {
		dropped = 1;
	}
	/* Send the packet to the output queue */
	else
	if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
		DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
		dropped = 1;
	}

	if (__predict_false(dropped)) {
		m_freem(m);
		cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
	} else {
		/* Put this packet on the queue to be freed later */
		_IF_ENQUEUE(&priv->tx_free_queue[qos], m);

		/* Pass it to any BPF listeners.  */
		ETHER_BPF_MTAP(ifp, m);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
		if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
	}

	/* Free mbufs not in use by the hardware */
	if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
		while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
			_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
			m_freem(m);
		}
	}
	IF_UNLOCK(&priv->tx_free_queue[qos]);

	return dropped;
}