/*
 * __hal_stats_disable
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 *
 * Ask device to stop collecting stats.
 * See also: xge_hal_stats_getinfo().
 */
void
__hal_stats_disable (xge_hal_stats_t *stats)
{
	xge_hal_device_t *hldev;
	xge_hal_pci_bar0_t *bar0;
	u64 val64;

	xge_assert(stats->hw_info);

	hldev = (xge_hal_device_t*)stats->devh;
	xge_assert(hldev);
	bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;

	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	    &bar0->stat_cfg);
	val64 &= ~XGE_HAL_STAT_CFG_STAT_EN;
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
	    &bar0->stat_cfg);
	/* flush the write */
	(void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	    &bar0->stat_cfg);

	xge_debug_stats(XGE_TRACE, "stats disabled at 0x"XGE_OS_LLXFMT,
	     (unsigned long long)stats->dma_addr);

	stats->is_enabled = 0;
}
Exemple #2
0
void
__hal_ring_hw_initialize(xge_hal_device_h devh)
{
    xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
    xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
    u64 val64;
    int i, j;

    /* Rx DMA intialization. */

    val64 = 0;
    for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
        if (!hldev->config.ring.queue[i].configured)
            continue;
        val64 |= vBIT(hldev->config.ring.queue[i].priority,
                      (5 + (i * 8)), 3);
    }
    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
                           &bar0->rx_queue_priority);
    xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
                   (unsigned long long)val64);

    /* Configuring ring queues according to per-ring configuration */
    val64 = 0;
    for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
        if (!hldev->config.ring.queue[i].configured)
            continue;
        val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
    }
    xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
                           &bar0->rx_queue_cfg);
    xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
                   (unsigned long long)val64);

    if (!hldev->config.rts_qos_en &&
            !hldev->config.rts_port_en &&
            !hldev->config.rts_mac_en) {

        /*
         * Activate default (QoS-based) Rx steering
         */

        val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
                                      &bar0->rts_qos_steering);
        for (j = 0; j < 8 /* QoS max */; j++)
        {
            for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
            {
                if (!hldev->config.ring.queue[i].configured)
                    continue;
                if (!hldev->config.ring.queue[i].rth_en)
                    val64 |= (BIT(i) >> (j*8));
            }
        }
        xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
                               &bar0->rts_qos_steering);
        xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
                       (unsigned long long)val64);

    }
Exemple #3
0
VOID
xmpTmpRssRegDump(
  xmpNicCtx_t                *pNicCtx
  )
{
  xge_hal_device_t           *hldev;
  xge_hal_pci_bar0_t         *bar0;
  U64 val64;
  UINT i;

  hldev = XMP_NIC_GET_DEV(pNicCtx);
  bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_ctrl);
  DBGPRINT(("RTS_CTRL=0x%I64x\n", val64));
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_rth_cfg);
  DBGPRINT(("RTS_RTH_CFG=0x%I64x\n", val64));
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_default_q);
  DBGPRINT(("RTS_DEFAULT_Q=0x%I64x\n", val64));

  for (i=0; i < 5; i++)
  {
    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_rth_hash_mask[i]);
     DBGPRINT(("RTS_HASH_MASK[%d]=0x%I64x\n", i, val64));
  }

  for (i=0; i < 4; i++)
  {
    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->prc_ctrl_n[i]);
    DBGPRINT(("PRC_CTRL[%d]=0x%I64x\n", i, val64));
  }
  
  for (i = 0; i < pNicCtx->RssParamsReq.TableSz; i++)
  {
   
		/* execute */
		val64 = ( XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
             XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(i));
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
                           &bar0->rts_rth_map_mem_ctrl);

  
    
		/* poll until done */
		if (__hal_device_register_poll(hldev,
                                   &bar0->rts_rth_map_mem_ctrl, 0,
                                   XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
                                   XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS)
           != XGE_HAL_OK)
      /* FIXME: Return Error or what */;
    val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 
                                  &bar0->rts_rth_map_mem_data);
    DBGPRINT(("iTable[%d] = 0x%I64x\n", i, val64));
  }

}
Exemple #4
0
void
__hal_ring_prc_disable(xge_hal_channel_h channelh)
{
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_pci_bar0_t *bar0;
	u64 val64;

	xge_assert(ring);
	xge_assert(ring->channel.pdev);
	bar0 = (xge_hal_pci_bar0_t *) (void *)
			((xge_hal_device_t *)ring->channel.devh)->bar0;

	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
	ring->channel.regh0,
			      &bar0->prc_ctrl_n[ring->channel.post_qid]);
	val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
}
Exemple #5
0
void
__hal_ring_hw_initialize(xge_hal_device_h devh)
{
	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
	u64 val64;
	int i, j;

	/* Rx DMA intialization. */

	val64 = 0;
	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
		if (!hldev->config.ring.queue[i].configured)
			continue;
		val64 |= vBIT(hldev->config.ring.queue[i].priority,
							(5 + (i * 8)), 3);
	}
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
			&bar0->rx_queue_priority);
	xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x%llx",
			(unsigned long long)val64);

	/* Configuring ring queues according to per-ring configuration */
	val64 = 0;
	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
		if (!hldev->config.ring.queue[i].configured)
			continue;
		val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
	}
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
	                     &bar0->rx_queue_cfg);
	xge_debug_ring(XGE_TRACE, "DRAM configured to 0x%llx",
			(unsigned long long)val64);

	/* Activate Rx steering */
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	                            &bar0->rts_qos_steering);
	for (j = 0; j < 8 /* QoS max */; j++) {
		for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
			if (!hldev->config.ring.queue[i].configured)
				continue;
			if (!hldev->config.ring.queue[i].rth_en)
				val64 |= (BIT(i) >> (j*8));
		}
	}
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
	                     &bar0->rts_qos_steering);
	xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x%llx",
			(unsigned long long)val64);

	/* Note: If a queue does not exist, it should be assigned a maximum
	 *	 length of zero. Otherwise, packet loss could occur.
	 *	 P. 4-4 User guide.
	 *
	 * All configured rings will be properly set at device open time
	 * by utilizing device_mtu_set() API call. */
	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
		if (hldev->config.ring.queue[i].configured)
			continue;
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
		                     &bar0->rts_frm_len_n[i]);
	}

#ifdef XGE_HAL_HERC_EMULATION
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
		((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
	val64 |= 0x0000000000010000;
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
		((u8 *)bar0 + 0x2e60));

	val64 |= 0x003a000000000000;
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
		((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
	xge_os_mdelay(2000);
#endif

	/* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	                            &bar0->mc_rldram_mrs);
	val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
		 XGE_HAL_MC_RLDRAM_MRS_ENABLE;
	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
	                     &bar0->mc_rldram_mrs);
	xge_os_wmb();
	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
	                     &bar0->mc_rldram_mrs);

	/* RLDRAM initialization procedure require 500us to complete */
	xge_os_mdelay(1);

	/* Temporary fixes for Herc RLDRAM */
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
		val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
		                     &bar0->mc_rldram_ref_per_herc);

		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	                            &bar0->mc_rldram_mrs_herc);
		xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x%llx",
			       (unsigned long long)val64);

		val64 = 0x0003570003010300ULL;
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
		                       &bar0->mc_rldram_mrs_herc);

		xge_os_mdelay(1);
	}
	xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
}
Exemple #6
0
void
__hal_ring_prc_enable(xge_hal_channel_h channelh)
{
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
	xge_hal_pci_bar0_t *bar0;
	u64 val64;
	void *first_block;
	int block_num;
	xge_hal_ring_queue_t *queue;
	pci_dma_h dma_handle;

	xge_assert(ring);
	xge_assert(ring->channel.pdev);
	bar0 = (xge_hal_pci_bar0_t *) (void *)
			((xge_hal_device_t *)ring->channel.devh)->bar0;

	queue = &ring->config->queue[ring->channel.post_qid];
	xge_assert(queue->buffer_mode == 1 ||
		    queue->buffer_mode == 3 ||
		    queue->buffer_mode == 5);

	/* last block in fact becomes first. This is just the way it
	 * is filled up and linked by item_alloc() */

	block_num = queue->initial;
	first_block = __hal_mempool_item(ring->mempool, block_num - 1);
	val64 = __hal_ring_item_dma_addr(ring->mempool,
					 first_block, &dma_handle);
	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);

	xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x%llx initialized",
			ring->channel.post_qid, (unsigned long long)val64);

	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
		ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
	    !queue->rth_en) {
		val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
	}
	val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;

	val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
	val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
	val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
		(hldev->config.pci_freq_mherz * queue->backoff_interval_us));

	/* Beware: no snoop by the bridge if (no_snoop_bits) */
	val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);

        /* Herc: always use group_reads */
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
	        val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;

	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);

	/* Configure Receive Protocol Assist */
	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
			ring->channel.regh0, &bar0->rx_pa_cfg);
	val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
	val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
	/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);

	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->rx_pa_cfg);

	xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
			ring->channel.post_qid, queue->buffer_mode);
}
Exemple #7
0
void
__hal_fifo_hw_initialize(xge_hal_device_h devh)
{
	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void
	*)hldev->bar0;
	u64* tx_fifo_partitions[4];
	u64* tx_fifo_wrr[5];
	u64 val64, part0;
	int priority = 0;
	int i;

	/*  Tx DMA Initialization */

	tx_fifo_partitions[0] = &bar0->tx_fifo_partition_0;
	tx_fifo_partitions[1] = &bar0->tx_fifo_partition_1;
	tx_fifo_partitions[2] = &bar0->tx_fifo_partition_2;
	tx_fifo_partitions[3] = &bar0->tx_fifo_partition_3;

	tx_fifo_wrr[0] = &bar0->tx_w_round_robin_0;
	tx_fifo_wrr[1] = &bar0->tx_w_round_robin_1;
	tx_fifo_wrr[2] = &bar0->tx_w_round_robin_2;
	tx_fifo_wrr[3] = &bar0->tx_w_round_robin_3;
	tx_fifo_wrr[4] = &bar0->tx_w_round_robin_4;

	/* Note: WRR calendar must be configured before the transmit
	         FIFOs are enabled! page 6-77 user guide */

	/* all zeroes for Round-Robin */
	for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0,
				tx_fifo_wrr[i]);
	}

	/* reset all of them but '0' */
	for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) {
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
		                     tx_fifo_partitions[i]);
	}

	/* configure only configured FIFOs */
	val64 = 0; part0 = 0;
	for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
		int reg_half = i % 2;
		int reg_num = i / 2;

		priority = 0;

		if (hldev->config.fifo.queue[i].configured) {
			val64 |=
			    vBIT((hldev->config.fifo.queue[i].max-1),
				(((reg_half) * 32) + 19),
				13) | vBIT(priority, (((reg_half)*32) + 5), 3);
		}

		/* NOTE: do write operation for each second u64 half
		         or force for first one if configured number
			 is even */
		if (reg_half) {
			if (reg_num == 0) {
				/* skip partition '0', must write it once at
				 * the end */
				part0 = val64;
			} else {
				xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
				     val64, tx_fifo_partitions[reg_num]);
				xge_debug_fifo(XGE_TRACE,
					"fifo partition_%d at: "
					"0x%llx is: 0x%llx", reg_num,
					(unsigned long long)(ulong_t)
						tx_fifo_partitions[reg_num],
					(unsigned long long)val64);
			}
			val64 = 0;
		}
	}

	part0 |= BIT(0); /* to enable the FIFO partition. */
	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)part0,
	                     tx_fifo_partitions[0]);
	xge_os_wmb();
	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32),
	                     tx_fifo_partitions[0]);
	xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: "
			"0x%llx is: 0x%llx",
			(unsigned long long)(ulong_t)
				tx_fifo_partitions[0],
			(unsigned long long) part0);

	/*
	 * Initialization of Tx_PA_CONFIG register to ignore packet
	 * integrity checking.
	 */
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	                            &bar0->tx_pa_cfg);
	val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR |
		 XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI |
		 XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL |
		 XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR;
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
	                     &bar0->tx_pa_cfg);
	xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized");
}