Ejemplo n.º 1
0
int main(){
	int i,j;
	for(i=0;i<2;i++){
		for(j=0;j<2;j++){
			printf("%d AND %d = %d\n",i,j,AND(i,j));
		}
	}
	printf("\n");

	for(i=0;i<2;i++){
		for(j=0;j<2;j++){
			printf("%d OR %d = %d\n",i,j,OR(i,j));
		}
	}
	printf("\n");

	for(i=0;i<2;i++){
			printf("%d NOT = %d\n",i,NOT(i));
	}
	printf("\n");

	for(i=0;i<2;i++){
		for(j=0;j<2;j++){
			printf("%d NAND %d = %d\n",i,j,NAND(i,j));
		}
	}
	printf("\n");

	for(i=0;i<2;i++){
		for(j=0;j<2;j++){
			printf("%d NOR %d = %d\n",i,j,NOR(i,j));
		}
	}
	printf("\n");

	for(i=0;i<2;i++){
		for(j=0;j<2;j++){
			printf("%d XOR %d = %d\n",i,j,XOR(i,j));
		}
	}
	printf("\n");

	for(i=0;i<2;i++){
		for(j=0;j<2;j++){
			printf("%d XNOR %d = %d\n",i,j,XNOR(i,j));
		}
	}
	printf("\n");
    return 0;
}
Ejemplo n.º 2
0
/* Process 3OP Integer instructions */
bool eval_3OP_Int(struct lilith* vm, struct Instruction* c)
{
	#ifdef DEBUG
	char Name[20] = "ILLEGAL_3OP";
	#endif

	switch(c->raw_XOP)
	{
		case 0x000: /* ADD */
		{
			#ifdef DEBUG
			strncpy(Name, "ADD", 19);
			#elif TRACE
			record_trace("ADD");
			#endif

			ADD(vm, c);
			break;
		}
		case 0x001: /* ADDU */
		{
			#ifdef DEBUG
			strncpy(Name, "ADDU", 19);
			#elif TRACE
			record_trace("ADDU");
			#endif

			ADDU(vm, c);
			break;
		}
		case 0x002: /* SUB */
		{
			#ifdef DEBUG
			strncpy(Name, "SUB", 19);
			#elif TRACE
			record_trace("SUB");
			#endif

			SUB(vm, c);
			break;
		}
		case 0x003: /* SUBU */
		{
			#ifdef DEBUG
			strncpy(Name, "SUBU", 19);
			#elif TRACE
			record_trace("SUBU");
			#endif

			SUBU(vm, c);
			break;
		}
		case 0x004: /* CMP */
		{
			#ifdef DEBUG
			strncpy(Name, "CMP", 19);
			#elif TRACE
			record_trace("CMP");
			#endif

			CMP(vm, c);
			break;
		}
		case 0x005: /* CMPU */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPU", 19);
			#elif TRACE
			record_trace("CMPU");
			#endif

			CMPU(vm, c);
			break;
		}
		case 0x006: /* MUL */
		{
			#ifdef DEBUG
			strncpy(Name, "MUL", 19);
			#elif TRACE
			record_trace("MUL");
			#endif

			MUL(vm, c);
			break;
		}
		case 0x007: /* MULH */
		{
			#ifdef DEBUG
			strncpy(Name, "MULH", 19);
			#elif TRACE
			record_trace("MULH");
			#endif

			MULH(vm, c);
			break;
		}
		case 0x008: /* MULU */
		{
			#ifdef DEBUG
			strncpy(Name, "MULU", 19);
			#elif TRACE
			record_trace("MULU");
			#endif

			MULU(vm, c);
			break;
		}
		case 0x009: /* MULUH */
		{
			#ifdef DEBUG
			strncpy(Name, "MULUH", 19);
			#elif TRACE
			record_trace("MULUH");
			#endif

			MULUH(vm, c);
			break;
		}
		case 0x00A: /* DIV */
		{
			#ifdef DEBUG
			strncpy(Name, "DIV", 19);
			#elif TRACE
			record_trace("DIV");
			#endif

			DIV(vm, c);
			break;
		}
		case 0x00B: /* MOD */
		{
			#ifdef DEBUG
			strncpy(Name, "MOD", 19);
			#elif TRACE
			record_trace("MOD");
			#endif

			MOD(vm, c);
			break;
		}
		case 0x00C: /* DIVU */
		{
			#ifdef DEBUG
			strncpy(Name, "DIVU", 19);
			#elif TRACE
			record_trace("DIVU");
			#endif

			DIVU(vm, c);
			break;
		}
		case 0x00D: /* MODU */
		{
			#ifdef DEBUG
			strncpy(Name, "MODU", 19);
			#elif TRACE
			record_trace("MODU");
			#endif

			MODU(vm, c);
			break;
		}
		case 0x010: /* MAX */
		{
			#ifdef DEBUG
			strncpy(Name, "MAX", 19);
			#elif TRACE
			record_trace("MAX");
			#endif

			MAX(vm, c);
			break;
		}
		case 0x011: /* MAXU */
		{
			#ifdef DEBUG
			strncpy(Name, "MAXU", 19);
			#elif TRACE
			record_trace("MAXU");
			#endif

			MAXU(vm, c);
			break;
		}
		case 0x012: /* MIN */
		{
			#ifdef DEBUG
			strncpy(Name, "MIN", 19);
			#elif TRACE
			record_trace("MIN");
			#endif

			MIN(vm, c);
			break;
		}
		case 0x013: /* MINU */
		{
			#ifdef DEBUG
			strncpy(Name, "MINU", 19);
			#elif TRACE
			record_trace("MINU");
			#endif

			MINU(vm, c);
			break;
		}
		case 0x014: /* PACK */
		case 0x015: /* UNPACK */
		case 0x016: /* PACK8.CO */
		case 0x017: /* PACK8U.CO */
		case 0x018: /* PACK16.CO */
		case 0x019: /* PACK16U.CO */
		case 0x01A: /* PACK32.CO */
		case 0x01B: /* PACK32U.CO */
		{
			illegal_instruction(vm, c);
			break;
		}
		case 0x020: /* AND */
		{
			#ifdef DEBUG
			strncpy(Name, "AND", 19);
			#elif TRACE
			record_trace("AND");
			#endif

			AND(vm, c);
			break;
		}
		case 0x021: /* OR */
		{
			#ifdef DEBUG
			strncpy(Name, "OR", 19);
			#elif TRACE
			record_trace("OR");
			#endif

			OR(vm, c);
			break;
		}
		case 0x022: /* XOR */
		{
			#ifdef DEBUG
			strncpy(Name, "XOR", 19);
			#elif TRACE
			record_trace("XOR");
			#endif

			XOR(vm, c);
			break;
		}
		case 0x023: /* NAND */
		{
			#ifdef DEBUG
			strncpy(Name, "NAND", 19);
			#elif TRACE
			record_trace("NAND");
			#endif

			NAND(vm, c);
			break;
		}
		case 0x024: /* NOR */
		{
			#ifdef DEBUG
			strncpy(Name, "NOR", 19);
			#elif TRACE
			record_trace("NOR");
			#endif

			NOR(vm, c);
			break;
		}
		case 0x025: /* XNOR */
		{
			#ifdef DEBUG
			strncpy(Name, "XNOR", 19);
			#elif TRACE
			record_trace("XNOR");
			#endif

			XNOR(vm, c);
			break;
		}
		case 0x026: /* MPQ */
		{
			#ifdef DEBUG
			strncpy(Name, "MPQ", 19);
			#elif TRACE
			record_trace("MPQ");
			#endif

			MPQ(vm, c);
			break;
		}
		case 0x027: /* LPQ */
		{
			#ifdef DEBUG
			strncpy(Name, "LPQ", 19);
			#elif TRACE
			record_trace("LPQ");
			#endif

			LPQ(vm, c);
			break;
		}
		case 0x028: /* CPQ */
		{
			#ifdef DEBUG
			strncpy(Name, "CPQ", 19);
			#elif TRACE
			record_trace("CPQ");
			#endif

			CPQ(vm, c);
			break;
		}
		case 0x029: /* BPQ */
		{
			#ifdef DEBUG
			strncpy(Name, "BPQ", 19);
			#elif TRACE
			record_trace("BPQ");
			#endif

			BPQ(vm, c);
			break;
		}
		case 0x030: /* SAL */
		{
			#ifdef DEBUG
			strncpy(Name, "SAL", 19);
			#elif TRACE
			record_trace("SAL");
			#endif

			SAL(vm, c);
			break;
		}
		case 0x031: /* SAR */
		{
			#ifdef DEBUG
			strncpy(Name, "SAR", 19);
			#elif TRACE
			record_trace("SAR");
			#endif

			SAR(vm, c);
			break;
		}
		case 0x032: /* SL0 */
		{
			#ifdef DEBUG
			strncpy(Name, "SL0", 19);
			#elif TRACE
			record_trace("SL0");
			#endif

			SL0(vm, c);
			break;
		}
		case 0x033: /* SR0 */
		{
			#ifdef DEBUG
			strncpy(Name, "SR0", 19);
			#elif TRACE
			record_trace("SR0");
			#endif

			SR0(vm, c);
			break;
		}
		case 0x034: /* SL1 */
		{
			#ifdef DEBUG
			strncpy(Name, "SL1", 19);
			#elif TRACE
			record_trace("SL1");
			#endif

			SL1(vm, c);
			break;
		}
		case 0x035: /* SR1 */
		{
			#ifdef DEBUG
			strncpy(Name, "SR1", 19);
			#elif TRACE
			record_trace("SR1");
			#endif

			SR1(vm, c);
			break;
		}
		case 0x036: /* ROL */
		{
			#ifdef DEBUG
			strncpy(Name, "ROL", 19);
			#elif TRACE
			record_trace("ROL");
			#endif

			ROL(vm, c);
			break;
		}
		case 0x037: /* ROR */
		{
			#ifdef DEBUG
			strncpy(Name, "ROR", 19);
			#elif TRACE
			record_trace("ROR");
			#endif

			ROR(vm, c);
			break;
		}
		case 0x038: /* LOADX */
		{
			#ifdef DEBUG
			strncpy(Name, "LOADX", 19);
			#elif TRACE
			record_trace("LOADX");
			#endif

			LOADX(vm, c);
			break;
		}
		case 0x039: /* LOADX8 */
		{
			#ifdef DEBUG
			strncpy(Name, "LOADX8", 19);
			#elif TRACE
			record_trace("LOADX8");
			#endif

			LOADX8(vm, c);
			break;
		}
		case 0x03A: /* LOADXU8 */
		{
			#ifdef DEBUG
			strncpy(Name, "LOADXU8", 19);
			#elif TRACE
			record_trace("LOADXU8");
			#endif

			LOADXU8(vm, c);
			break;
		}
		case 0x03B: /* LOADX16 */
		{
			#ifdef DEBUG
			strncpy(Name, "LOADX16", 19);
			#elif TRACE
			record_trace("LOADX16");
			#endif

			LOADX16(vm, c);
			break;
		}
		case 0x03C: /* LOADXU16 */
		{
			#ifdef DEBUG
			strncpy(Name, "LOADXU16", 19);
			#elif TRACE
			record_trace("LOADXU16");
			#endif

			LOADXU16(vm, c);
			break;
		}
		case 0x03D: /* LOADX32 */
		{
			#ifdef DEBUG
			strncpy(Name, "LOADX32", 19);
			#elif TRACE
			record_trace("LOADX32");
			#endif

			LOADX32(vm, c);
			break;
		}
		case 0x03E: /* LOADXU32 */
		{
			#ifdef DEBUG
			strncpy(Name, "LOADXU32", 19);
			#elif TRACE
			record_trace("LOADXU32");
			#endif

			LOADXU32(vm, c);
			break;
		}
		case 0x048: /* STOREX */
		{
			#ifdef DEBUG
			strncpy(Name, "STOREX", 19);
			#elif TRACE
			record_trace("STOREX");
			#endif

			STOREX(vm, c);
			break;
		}
		case 0x049: /* STOREX8 */
		{
			#ifdef DEBUG
			strncpy(Name, "STOREX8", 19);
			#elif TRACE
			record_trace("STOREX8");
			#endif

			STOREX8(vm, c);
			break;
		}
		case 0x04A: /* STOREX16 */
		{
			#ifdef DEBUG
			strncpy(Name, "STOREX16", 19);
			#elif TRACE
			record_trace("STOREX16");
			#endif

			STOREX16(vm, c);
			break;
		}
		case 0x04B: /* STOREX32 */
		{
			#ifdef DEBUG
			strncpy(Name, "STOREX32", 19);
			#elif TRACE
			record_trace("STOREX32");
			#endif

			STOREX32(vm, c);
			break;
		}
		case 0x050: /* CMPJUMP.G */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMP.G", 19);
			#elif TRACE
			record_trace("CMPJUMP.G");
			#endif

			CMPJUMP_G(vm, c);
			break;
		}
		case 0x051: /* CMPJUMP.GE */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMP.GE", 19);
			#elif TRACE
			record_trace("CMPJUMP.GE");
			#endif

			CMPJUMP_GE(vm, c);
			break;
		}
		case 0x052: /* CMPJUMP.E */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMP.E", 19);
			#elif TRACE
			record_trace("CMPJUMP.E");
			#endif

			CMPJUMP_E(vm, c);
			break;
		}
		case 0x053: /* CMPJUMP.NE */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMP.NE", 19);
			#elif TRACE
			record_trace("CMPJUMP.NE");
			#endif

			CMPJUMP_NE(vm, c);
			break;
		}
		case 0x054: /* CMPJUMP.LE */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMP.LE", 19);
			#elif TRACE
			record_trace("CMPJUMP.LE");
			#endif

			CMPJUMP_LE(vm, c);
			break;
		}
		case 0x055: /* CMPJUMP.L */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMP.L", 19);
			#elif TRACE
			record_trace("CMPJUMP.L");
			#endif

			CMPJUMP_L(vm, c);
			break;
		}
		case 0x060: /* CMPJUMPU.G */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMPU.G", 19);
			#elif TRACE
			record_trace("CMPJUMPU.G");
			#endif

			CMPJUMPU_G(vm, c);
			break;
		}
		case 0x061: /* CMPJUMPU.GE */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMPU.GE", 19);
			#elif TRACE
			record_trace("CMPJUMPU.GE");
			#endif

			CMPJUMPU_GE(vm, c);
			break;
		}
		case 0x064: /* CMPJUMPU.LE */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMPU.LE", 19);
			#elif TRACE
			record_trace("CMPJUMPU.LE");
			#endif

			CMPJUMPU_LE(vm, c);
			break;
		}
		case 0x065: /* CMPJUMPU.L */
		{
			#ifdef DEBUG
			strncpy(Name, "CMPJUMPU.L", 19);
			#elif TRACE
			record_trace("CMPJUMPU.L");
			#endif

			CMPJUMPU_L(vm, c);
			break;
		}
		default:
		{
			illegal_instruction(vm, c);
			break;
		}
	}
	#ifdef DEBUG
	fprintf(stdout, "# %s reg%u reg%u reg%u\n", Name, c->reg0, c->reg1, c->reg2);
	#endif
	return false;
}
Ejemplo n.º 3
0
static int mlx4_en_process_tx_cq(struct net_device *dev,
				 struct mlx4_en_cq *cq)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
	struct mlx4_cqe *cqe;
	u16 index;
	u16 new_index, ring_index, stamp_index;
	u32 txbbs_skipped = 0;
#ifndef CONFIG_WQE_FORMAT_1
	u32 txbbs_stamp = 0;
#endif
	u32 cons_index = mcq->cons_index;
	int size = cq->size;
	u32 size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;
	u32 packets = 0;
	u32 bytes = 0;
	int factor = priv->cqe_factor;
	u64 timestamp = 0;
	int done = 0;


	if (!priv->port_up)
		return 0;

	index = cons_index & size_mask;
	cqe = &buf[(index << factor) + factor];
	ring_index = ring->cons & size_mask;
	stamp_index = ring_index;

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size)) {
		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
		 */
		rmb();

		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
			     MLX4_CQE_OPCODE_ERROR)) {
			en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n",
			       ((struct mlx4_err_cqe *)cqe)->
				       vendor_err_syndrome,
			       ((struct mlx4_err_cqe *)cqe)->syndrome);
		}

		/* Skip over last polled CQE */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			txbbs_skipped += ring->last_nr_txbb;
			ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
			/* free next descriptor */
			ring->last_nr_txbb = mlx4_en_free_tx_desc(
					priv, ring, ring_index,
					!!((ring->cons + txbbs_skipped) &
					ring->size), timestamp);
#ifndef CONFIG_WQE_FORMAT_1
			mlx4_en_stamp_wqe(priv, ring, stamp_index,
					  !!((ring->cons + txbbs_stamp) &
						ring->size));
			stamp_index = ring_index;
			txbbs_stamp = txbbs_skipped;
#endif
			packets++;
			bytes += ring->tx_info[ring_index].nr_bytes;
		} while (ring_index != new_index);

		++cons_index;
		index = cons_index & size_mask;
		cqe = &buf[(index << factor) + factor];
	}


	/*
	 * To prevent CQ overflow we first update CQ consumer and only then
	 * the ring consumer.
	 */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();
	ring->cons += txbbs_skipped;

	/* Wakeup Tx queue if it was stopped and ring is not full */
	if (unlikely(ring->blocked) &&
	    (ring->prod - ring->cons) <= ring->full_size) {
		ring->blocked = 0;
#ifdef CONFIG_RATELIMIT
		if (cq->ring < priv->native_tx_ring_num) {
			if (atomic_fetchadd_int(&priv->blocked, -1) == 1)
				atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE);
			priv->port_stats.wake_queue++;
		}
#else
		if (atomic_fetchadd_int(&priv->blocked, -1) == 1)
			atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE);
		priv->port_stats.wake_queue++;
#endif
		ring->wake_queue++;
	}
	return done;
}
Ejemplo n.º 4
0
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cqe *cqe;
	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
	struct skb_frag_struct *skb_frags;
	struct mlx4_en_rx_desc *rx_desc;
	struct sk_buff *skb;
	int index;
	int nr;
	unsigned int length;
	int polled = 0;
	int ip_summed;

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
	cqe = &cq->buf[index];

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

		skb_frags = ring->rx_info + (index << priv->log_rx_info);
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
			en_err(priv, "CQE completed in error - vendor "
				  "syndrom:%d syndrom:%d\n",
				  ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
				  ((struct mlx4_err_cqe *) cqe)->syndrome);
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
			goto next;
		}

		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
		ring->bytes += length;
		ring->packets++;

		if (likely(dev->features & NETIF_F_RXCSUM)) {
			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
			    (cqe->checksum == cpu_to_be16(0xffff))) {
				priv->port_stats.rx_chksum_good++;
				/* This packet is eligible for LRO if it is:
				 * - DIX Ethernet (type interpretation)
				 * - TCP/IP (v4)
				 * - without IP options
				 * - not an IP fragment */
				if (dev->features & NETIF_F_GRO) {
					struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
					if (!gro_skb)
						goto next;

					nr = mlx4_en_complete_rx_desc(
						priv, rx_desc,
						skb_frags, skb_shinfo(gro_skb)->frags,
						ring->page_alloc, length);
					if (!nr)
						goto next;

					skb_shinfo(gro_skb)->nr_frags = nr;
					gro_skb->len = length;
					gro_skb->data_len = length;
					gro_skb->truesize += length;
					gro_skb->ip_summed = CHECKSUM_UNNECESSARY;

					if (priv->vlgrp && (cqe->vlan_my_qpn &
							    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)))
						vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid));
					else
						napi_gro_frags(&cq->napi);

					goto next;
				}

				/* LRO not possible, complete processing here */
				ip_summed = CHECKSUM_UNNECESSARY;
			} else {
				ip_summed = CHECKSUM_NONE;
				priv->port_stats.rx_chksum_none++;
			}
		} else {
			ip_summed = CHECKSUM_NONE;
			priv->port_stats.rx_chksum_none++;
		}

		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
				     ring->page_alloc, length);
		if (!skb) {
			priv->stats.rx_dropped++;
			goto next;
		}

                if (unlikely(priv->validate_loopback)) {
			validate_loopback(priv, skb);
			goto next;
		}

		skb->ip_summed = ip_summed;
		skb->protocol = eth_type_trans(skb, dev);
		skb_record_rx_queue(skb, cq->ring);

		/* Push it up the stack */
		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
				    MLX4_CQE_VLAN_PRESENT_MASK)) {
			vlan_hwaccel_receive_skb(skb, priv->vlgrp,
						be16_to_cpu(cqe->sl_vid));
		} else
			netif_receive_skb(skb);

next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
		cqe = &cq->buf[index];
		if (++polled == budget) {
			/* We are here because we reached the NAPI budget -
			 * flush only pending LRO sessions */
			goto out;
		}
	}

out:
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
	ring->prod += polled; /* Polled descriptors were realocated in place */
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}
Ejemplo n.º 5
0
Archivo: en_tx.c Proyecto: 8l/akaros
static bool mlx4_en_process_tx_cq(struct ether *dev,
				  struct mlx4_en_cq *cq)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
	struct mlx4_cqe *cqe;
	uint16_t index;
	uint16_t new_index, ring_index, stamp_index;
	uint32_t txbbs_skipped = 0;
	uint32_t txbbs_stamp = 0;
	uint32_t cons_index = mcq->cons_index;
	int size = cq->size;
	uint32_t size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;
	uint32_t packets = 0;
	uint32_t bytes = 0;
	int factor = priv->cqe_factor;
	uint64_t timestamp = 0;
	int done = 0;
	int budget = priv->tx_work_limit;
	uint32_t last_nr_txbb;
	uint32_t ring_cons;

	if (!priv->port_up)
		return true;

#if 0 // AKAROS_PORT
	netdev_txq_bql_complete_prefetchw(ring->tx_queue);
#endif

	index = cons_index & size_mask;
	cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
	ring_cons = ACCESS_ONCE(ring->cons);
	ring_index = ring_cons & size_mask;
	stamp_index = ring_index;

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size) && (done < budget)) {
		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
		 */
		bus_rmb();

		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
			     MLX4_CQE_OPCODE_ERROR)) {
			struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;

			en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
			       cqe_err->vendor_err_syndrome,
			       cqe_err->syndrome);
		}

		/* Skip over last polled CQE */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			txbbs_skipped += last_nr_txbb;
			ring_index = (ring_index + last_nr_txbb) & size_mask;
			if (ring->tx_info[ring_index].ts_requested)
				timestamp = mlx4_en_get_cqe_ts(cqe);

			/* free next descriptor */
			last_nr_txbb = mlx4_en_free_tx_desc(
					priv, ring, ring_index,
					!!((ring_cons + txbbs_skipped) &
					ring->size), timestamp);

			mlx4_en_stamp_wqe(priv, ring, stamp_index,
					  !!((ring_cons + txbbs_stamp) &
						ring->size));
			stamp_index = ring_index;
			txbbs_stamp = txbbs_skipped;
			packets++;
			bytes += ring->tx_info[ring_index].nr_bytes;
		} while ((++done < budget) && (ring_index != new_index));

		++cons_index;
		index = cons_index & size_mask;
		cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	}


	/*
	 * To prevent CQ overflow we first update CQ consumer and only then
	 * the ring consumer.
	 */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();

	/* we want to dirty this cache line once */
	ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
	ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;

#if 0 // AKAROS_PORT
	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);

	/*
	 * Wakeup Tx queue if this stopped, and at least 1 packet
	 * was completed
	 */
	if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
		netif_tx_wake_queue(ring->tx_queue);
		ring->wake_queue++;
	}
#endif
	return done < budget;
}
int mlx4_en_process_tx_cq(struct net_device *dev,
			     struct mlx4_en_cq *cq,
			     int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
	struct mlx4_cqe *cqe;
	u16 index;
	u16 new_index, ring_index;
	u32 txbbs_skipped = 0;
	u32 cons_index = mcq->cons_index;
	int size = cq->size;
	u32 size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;
	int factor = priv->cqe_factor;
	int done = 0;

	index = cons_index & size_mask;
	cqe = &buf[(index << factor) + factor];
	ring_index = ring->cons & size_mask;

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size) && done < budget) {
		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
		 */
		rmb();

		/* Skip over last polled CQE */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			txbbs_skipped += ring->last_nr_txbb;
			ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
			/* free next descriptor */
			ring->last_nr_txbb = mlx4_en_free_tx_desc(
					priv, ring, ring_index,
					!!((ring->cons + txbbs_skipped) &
							ring->size));
		} while ((++done < budget) && ring_index != new_index);

		++cons_index;
		index = cons_index & size_mask;
		cqe = &buf[(index << factor) + factor];
	}

	/*
	 * To prevent CQ overflow we first update CQ consumer and only then
	 * the ring consumer.
	 */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();
	ring->cons += txbbs_skipped;
	atomic_sub(txbbs_skipped, &ring->inflight);

	/* Wakeup Tx queue if this ring stopped it */
	if (unlikely(ring->blocked && txbbs_skipped > 0)) {
		ring->blocked = 0;
#ifndef __VMKERNEL_MLX4_EN_TX_HASH__
		netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
#else
		netif_tx_wake_queue(netdev_get_tx_queue(dev, ring->reported_index));
#endif	/* NOT __VMKERNEL_MLX4_EN_TX_HASH__ */
		priv->port_stats.wake_queue++;
	}
	return done;
}
Ejemplo n.º 7
0
Fixed DizzyTestAudioID(ULONG id, struct TagItem *tags )
{
  ULONG volume=0,stereo=0,panning=0,hifi=0,pingpong=0,record=0,realtime=0,
        fullduplex=0,bits=0,channels=0,minmix=0,maxmix=0;
  ULONG total=0,hits=0;
  struct TagItem *tstate, *tag;
  
  if(tags == NULL)
  {
    return (Fixed) 0x10000;
  }

  if(id == AHI_DEFAULT_ID)
  {
    id = AHIBase->ahib_AudioMode;
  }

  AHI_GetAudioAttrs( id, NULL,
                     AHIDB_Volume,      (ULONG) &volume,
                     AHIDB_Stereo,      (ULONG) &stereo,
                     AHIDB_Panning,     (ULONG) &panning,
                     AHIDB_HiFi,        (ULONG) &hifi,
                     AHIDB_PingPong,    (ULONG) &pingpong,
                     AHIDB_Record,      (ULONG) &record,
                     AHIDB_Bits,        (ULONG) &bits,
                     AHIDB_MaxChannels, (ULONG) &channels,
                     AHIDB_MinMixFreq,  (ULONG) &minmix,
                     AHIDB_MaxMixFreq,  (ULONG) &maxmix,
                     AHIDB_Realtime,    (ULONG) &realtime,
                     AHIDB_FullDuplex,  (ULONG) &fullduplex,
                     TAG_DONE );

  tstate = tags;

  while ((tag = NextTagItem(&tstate)))
  {
    switch (tag->ti_Tag)
    {
      // Check source mode

      case AHIDB_AudioID:    
        total++;
        if( ((tag->ti_Data)&0xffff0000) == (id & 0xffff0000) )
          hits++;
        break;

      // Boolean tags

      case AHIDB_Volume:
        total++;
        if(XNOR(tag->ti_Data, volume))
          hits++;
        break;

      case AHIDB_Stereo:
        total++;
        if(XNOR(tag->ti_Data, stereo))
          hits++;
        break;
      case AHIDB_Panning:
        total++;
        if(XNOR(tag->ti_Data, panning))
          hits++;
        break;
      case AHIDB_HiFi:
        total++;
        if(XNOR(tag->ti_Data, hifi))
          hits++;
        break;
      case AHIDB_PingPong:
        total++;
        if(XNOR(tag->ti_Data, pingpong))
          hits++;
        break;
      case AHIDB_Record:
        total++;
        if(XNOR(tag->ti_Data, record))
          hits++;
        break;
      case AHIDB_Realtime:
        total++;
        if(XNOR(tag->ti_Data, realtime))
          hits++;
        break;
      case AHIDB_FullDuplex:
        total++;
        if(XNOR(tag->ti_Data, fullduplex))
          hits++;
        break;

      // The rest

      case AHIDB_Bits:
        total++;
        if(tag->ti_Data <= bits)
          hits++;
        break;
      case AHIDB_MaxChannels:
        total++;
        if(tag->ti_Data <= channels )
          hits++;
        break;
      case AHIDB_MinMixFreq:
        total++;
        if(tag->ti_Data >= minmix)
          hits++;
        break;
      case AHIDB_MaxMixFreq:
        total++;
        if(tag->ti_Data <= maxmix)
          hits++;
        break;
    } /* switch */
  } /* while */


  if(total)
    return (Fixed) ((hits<<16)/total);
  else
    return (Fixed) 0x10000;
}
Ejemplo n.º 8
0
Archivo: en_tx.c Proyecto: avagin/linux
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
			    struct mlx4_en_tx_ring *ring,
			    int index, u64 timestamp,
			    int napi_mode)
{
	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
	struct mlx4_en_rx_alloc frame = {
		.page = tx_info->page,
		.dma = tx_info->map0_dma,
	};

	if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
		dma_unmap_page(priv->ddev, tx_info->map0_dma,
			       PAGE_SIZE, priv->dma_dir);
		put_page(tx_info->page);
	}

	return tx_info->nr_txbb;
}

int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int cnt = 0;

	/* Skip last polled descriptor */
	ring->cons += ring->last_nr_txbb;
	en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
		 ring->cons, ring->prod);

	if ((u32) (ring->prod - ring->cons) > ring->size) {
		if (netif_msg_tx_err(priv))
			en_warn(priv, "Tx consumer passed producer!\n");
		return 0;
	}

	while (ring->cons != ring->prod) {
		ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
						ring->cons & ring->size_mask,
						0, 0 /* Non-NAPI caller */);
		ring->cons += ring->last_nr_txbb;
		cnt++;
	}

	if (ring->tx_queue)
		netdev_tx_reset_queue(ring->tx_queue);

	if (cnt)
		en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);

	return cnt;
}

bool mlx4_en_process_tx_cq(struct net_device *dev,
			   struct mlx4_en_cq *cq, int napi_budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
	struct mlx4_cqe *cqe;
	u16 index, ring_index, stamp_index;
	u32 txbbs_skipped = 0;
	u32 txbbs_stamp = 0;
	u32 cons_index = mcq->cons_index;
	int size = cq->size;
	u32 size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;
	u32 packets = 0;
	u32 bytes = 0;
	int factor = priv->cqe_factor;
	int done = 0;
	int budget = priv->tx_work_limit;
	u32 last_nr_txbb;
	u32 ring_cons;

	if (unlikely(!priv->port_up))
		return true;

	netdev_txq_bql_complete_prefetchw(ring->tx_queue);

	index = cons_index & size_mask;
	cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
	ring_cons = READ_ONCE(ring->cons);
	ring_index = ring_cons & size_mask;
	stamp_index = ring_index;

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size) && (done < budget)) {
		u16 new_index;

		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
		 */
		dma_rmb();

		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
			     MLX4_CQE_OPCODE_ERROR)) {
			struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;

			en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
			       cqe_err->vendor_err_syndrome,
			       cqe_err->syndrome);
		}

		/* Skip over last polled CQE */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			u64 timestamp = 0;

			txbbs_skipped += last_nr_txbb;
			ring_index = (ring_index + last_nr_txbb) & size_mask;

			if (unlikely(ring->tx_info[ring_index].ts_requested))
				timestamp = mlx4_en_get_cqe_ts(cqe);

			/* free next descriptor */
			last_nr_txbb = ring->free_tx_desc(
					priv, ring, ring_index,
					timestamp, napi_budget);

			mlx4_en_stamp_wqe(priv, ring, stamp_index,
					  !!((ring_cons + txbbs_stamp) &
						ring->size));
			stamp_index = ring_index;
			txbbs_stamp = txbbs_skipped;
			packets++;
			bytes += ring->tx_info[ring_index].nr_bytes;
		} while ((++done < budget) && (ring_index != new_index));

		++cons_index;
		index = cons_index & size_mask;
		cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
	}

	/*
	 * To prevent CQ overflow we first update CQ consumer and only then
	 * the ring consumer.
	 */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();

	/* we want to dirty this cache line once */
	WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
	WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);

	if (cq->type == TX_XDP)
		return done < budget;

	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);

	/* Wakeup Tx queue if this stopped, and ring is not full.
	 */
	if (netif_tx_queue_stopped(ring->tx_queue) &&
	    !mlx4_en_is_tx_ring_full(ring)) {
		netif_tx_wake_queue(ring->tx_queue);
		ring->wake_queue++;
	}

	return done < budget;
}

void mlx4_en_tx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

	if (likely(priv->port_up))
		napi_schedule_irqoff(&cq->napi);
	else
		mlx4_en_arm_cq(priv, cq);
}

/* TX CQ polling - called by NAPI */
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	bool clean_complete;

	clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
	if (!clean_complete)
		return budget;

	napi_complete(napi);
	mlx4_en_arm_cq(priv, cq);

	return 0;
}

static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
						      struct mlx4_en_tx_ring *ring,
						      u32 index,
						      unsigned int desc_size)
{
	u32 copy = (ring->size - index) << LOG_TXBB_SIZE;
	int i;

	for (i = desc_size - copy - 4; i >= 0; i -= 4) {
		if ((i & (TXBB_SIZE - 1)) == 0)
			wmb();

		*((u32 *) (ring->buf + i)) =
			*((u32 *) (ring->bounce_buf + copy + i));
	}

	for (i = copy - 4; i >= 4 ; i -= 4) {
		if ((i & (TXBB_SIZE - 1)) == 0)
			wmb();

		*((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) =
			*((u32 *) (ring->bounce_buf + i));
	}

	/* Return real descriptor location */
	return ring->buf + (index << LOG_TXBB_SIZE);
}
Ejemplo n.º 9
0
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cqe *cqe;
	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
	struct mbuf **mb_list;
	struct mlx4_en_rx_desc *rx_desc;
	struct mbuf *mb;
#ifdef INET
	struct lro_entry *queued;
#endif
	int index;
	unsigned int length;
	int polled = 0;

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
	cqe = &cq->buf[index];

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

		mb_list = ring->rx_info + (index << priv->log_rx_info);
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		if (invalid_cqe(priv, cqe))
			goto next;

		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
		mb = mlx4_en_rx_mb(priv, rx_desc, mb_list, length);
		if (!mb) {
			ring->errors++;
			goto next;
		}

		ring->bytes += length;
		ring->packets++;

                if (unlikely(priv->validate_loopback)) {
			validate_loopback(priv, mb);
			goto next;
		}

		mb->m_pkthdr.flowid = cq->ring;
		mb->m_flags |= M_FLOWID;
		mb->m_pkthdr.rcvif = dev;
		if (be32_to_cpu(cqe->vlan_my_qpn) &
		    MLX4_CQE_VLAN_PRESENT_MASK) {
			mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
			mb->m_flags |= M_VLANTAG;
		}
		if (likely(priv->rx_csum) &&
		    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
		    (cqe->checksum == cpu_to_be16(0xffff))) {
			priv->port_stats.rx_chksum_good++;
			mb->m_pkthdr.csum_flags = 
			    CSUM_IP_CHECKED | CSUM_IP_VALID |
			    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
			mb->m_pkthdr.csum_data = htons(0xffff);
			/* This packet is eligible for LRO if it is:
			 * - DIX Ethernet (type interpretation)
			 * - TCP/IP (v4)
			 * - without IP options
			 * - not an IP fragment
			 */
#ifdef INET
			if (mlx4_en_can_lro(cqe->status) &&
			    (dev->if_capenable & IFCAP_LRO)) {
				if (ring->lro.lro_cnt != 0 &&
				    tcp_lro_rx(&ring->lro, mb, 0) == 0)
					goto next;
			}
#endif

			/* LRO not possible, complete processing here */
			INC_PERF_COUNTER(priv->pstats.lro_misses);
		} else {
			mb->m_pkthdr.csum_flags = 0;
			priv->port_stats.rx_chksum_none++;
#ifdef INET
			if (priv->ip_reasm &&
			    cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4) &&
			    !mlx4_en_rx_frags(priv, ring, mb, cqe))
				goto next;
#endif
		}

		/* Push it up the stack */
		dev->if_input(dev, mb);

next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
		cqe = &cq->buf[index];
		if (++polled == budget)
			goto out;
	}
	/* Flush all pending IP reassembly sessions */
out:
#ifdef INET
	mlx4_en_flush_frags(priv, ring);
	while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
		SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
		tcp_lro_flush(&ring->lro, queued);
	}
#endif
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
	ring->prod += polled; /* Polled descriptors were realocated in place */
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}
Ejemplo n.º 10
0
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
	struct mlx4_cqe *cqe;
	u16 index;
	u16 new_index, ring_index;
	u32 txbbs_skipped = 0;
	u32 cons_index = mcq->cons_index;
	int size = cq->size;
	u32 size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;

	if (!priv->port_up)
		return;

	index = cons_index & size_mask;
	cqe = &buf[index];
	ring_index = ring->cons & size_mask;

	/*                            */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size)) {
		/*
                                                
                  
   */
		rmb();

		/*                           */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			txbbs_skipped += ring->last_nr_txbb;
			ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
			/*                      */
			ring->last_nr_txbb = mlx4_en_free_tx_desc(
					priv, ring, ring_index,
					!!((ring->cons + txbbs_skipped) &
							ring->size));
		} while (ring_index != new_index);

		++cons_index;
		index = cons_index & size_mask;
		cqe = &buf[index];
	}


	/*
                                                                    
                      
  */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();
	ring->cons += txbbs_skipped;

	/*                                         */
	if (unlikely(ring->blocked)) {
		if ((u32) (ring->prod - ring->cons) <=
		     ring->size - HEADROOM - MAX_DESC_TXBBS) {
			ring->blocked = 0;
			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
			priv->port_stats.wake_queue++;
		}
	}
}
Ejemplo n.º 11
0
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_cqe *cqe;
	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
	struct skb_frag_struct *skb_frags;
	struct mlx4_en_rx_desc *rx_desc;
	struct sk_buff *skb;
	int index;
	unsigned int length;
	int polled = 0;
	int ip_summed;

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
	cqe = &cq->buf[index];

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

		skb_frags = ring->rx_info + (index << priv->log_rx_info);
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
			mlx4_err(mdev, "CQE completed in error - vendor "
				  "syndrom:%d syndrom:%d\n",
				  ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
				  ((struct mlx4_err_cqe *) cqe)->syndrome);
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
			mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
			goto next;
		}

		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
		ring->bytes += length;
		ring->packets++;

		if (likely(priv->rx_csum)) {
			if ((cqe->status & MLX4_CQE_STATUS_IPOK) &&
			    (cqe->checksum == 0xffff)) {
				priv->port_stats.rx_chksum_good++;
				if (mdev->profile.num_lro &&
				    !mlx4_en_lro_rx(priv, ring, rx_desc,
						    skb_frags, length, cqe))
					goto next;

				/* LRO not possible, complete processing here */
				ip_summed = CHECKSUM_UNNECESSARY;
				INC_PERF_COUNTER(priv->pstats.lro_misses);
			} else {
				ip_summed = CHECKSUM_NONE;
				priv->port_stats.rx_chksum_none++;
			}
		} else {
			ip_summed = CHECKSUM_NONE;
			priv->port_stats.rx_chksum_none++;
		}

		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
				     ring->page_alloc, length);
		if (!skb) {
			priv->stats.rx_dropped++;
			goto next;
		}

		skb->ip_summed = ip_summed;
		skb->protocol = eth_type_trans(skb, dev);

		/* Push it up the stack */
		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
				    MLX4_CQE_VLAN_PRESENT_MASK)) {
			vlan_hwaccel_receive_skb(skb, priv->vlgrp,
						be16_to_cpu(cqe->sl_vid));
		} else
			netif_receive_skb(skb);

		dev->last_rx = jiffies;

next:
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
		cqe = &cq->buf[index];
		if (++polled == budget) {
			/* We are here because we reached the NAPI budget -
			 * flush only pending LRO sessions */
			if (mdev->profile.num_lro)
				mlx4_en_lro_flush(priv, ring, 0);
			goto out;
		}
	}

	/* If CQ is empty flush all LRO sessions unconditionally */
	if (mdev->profile.num_lro)
		mlx4_en_lro_flush(priv, ring, 1);

out:
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
	ring->prod += polled; /* Polled descriptors were realocated in place */
	if (unlikely(!ring->full)) {
		mlx4_en_copy_desc(priv, ring, ring->cons - polled,
				  ring->prod - polled, polled);
		mlx4_en_fill_rx_buf(dev, ring);
	}
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}
Ejemplo n.º 12
0
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_cq *mcq = &cq->mcq;
	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
	struct mlx4_cqe *cqe;
	u16 index;
	u16 new_index, ring_index;
	u32 txbbs_skipped = 0;
	u32 cons_index = mcq->cons_index;
	int size = cq->size;
	u32 size_mask = ring->size_mask;
	struct mlx4_cqe *buf = cq->buf;

	if (!priv->port_up)
		return;

	index = cons_index & size_mask;
	cqe = &buf[index];
	ring_index = ring->cons & size_mask;

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
			cons_index & size)) {
		/*
		 * make sure we read the CQE after we read the
		 * ownership bit
		 */
		rmb();

		/* Skip over last polled CQE */
		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

		do {
			txbbs_skipped += ring->last_nr_txbb;
			ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
			/* free next descriptor */
			ring->last_nr_txbb = mlx4_en_free_tx_desc(
					priv, ring, ring_index,
					!!((ring->cons + txbbs_skipped) &
							ring->size));
		} while (ring_index != new_index);

		++cons_index;
		index = cons_index & size_mask;
		cqe = &buf[index];
	}


	/*
	 * To prevent CQ overflow we first update CQ consumer and only then
	 * the ring consumer.
	 */
	mcq->cons_index = cons_index;
	mlx4_cq_set_ci(mcq);
	wmb();
	ring->cons += txbbs_skipped;

	/* Wakeup Tx queue if this ring stopped it */
	if (unlikely(ring->blocked)) {
		if ((u32) (ring->prod - ring->cons) <=
		     ring->size - HEADROOM - MAX_DESC_TXBBS) {
			ring->blocked = 0;
			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
			priv->port_stats.wake_queue++;
		}
	}
}