Ejemplo n.º 1
0
/**
 * xge_hal_stats_hw - Get HW device statistics.
 * @devh: HAL device handle.
 * @hw_info: Xframe statistic counters. See xge_hal_stats_hw_info_t.
 *           Returned by HAL.
 *
 * Get device and HAL statistics. The latter is part of the in-host statistics
 * that HAL maintains for _that_ device.
 *
 * Returns: XGE_HAL_OK - success.
 * XGE_HAL_INF_STATS_IS_NOT_READY - Statistics information is not
 * currently available.
 *
 * See also: xge_hal_status_e{}.
 */
xge_hal_status_e
xge_hal_stats_hw(xge_hal_device_h devh, xge_hal_stats_hw_info_t **hw_info)
{
	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;

	xge_assert(xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN)

	if (!hldev->stats.is_initialized ||
	    !hldev->stats.is_enabled) {
	    *hw_info = NULL;
	    return XGE_HAL_INF_STATS_IS_NOT_READY;
	}

#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_STATS_STREAMING)
	xge_os_dma_sync(hldev->pdev,
	              hldev->stats.hw_info_dmah,
	          hldev->stats.dma_addr,
	          0,
	          sizeof(xge_hal_stats_hw_info_t),
	          XGE_OS_DMA_DIR_FROMDEVICE);
#endif

	    /*
	 * update hw counters, taking into account
	 * the "reset" or "saved"
	 * values
	 */
	__hal_stats_update_latest(devh);

	/*
	 * statistics HW bug fixups for Xena and Herc
	 */
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA ||
	    xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
	    u64 mcst, bcst;
	    xge_hal_stats_hw_info_t *hwsta = &hldev->stats.hw_info_latest;

	    mcst = ((u64)hwsta->rmac_vld_mcst_frms_oflow << 32) |
	        hwsta->rmac_vld_mcst_frms;

	    bcst = ((u64)hwsta->rmac_vld_bcst_frms_oflow << 32) |
	        hwsta->rmac_vld_bcst_frms;

	    mcst -= bcst;

	    hwsta->rmac_vld_mcst_frms_oflow = (u32)(mcst >> 32);
	    hwsta->rmac_vld_mcst_frms = (u32)mcst;
	}
Ejemplo n.º 2
0
void
__hal_fifo_close(xge_hal_channel_h channelh)
{
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_device_t *hldev = (xge_hal_device_t *)fifo->channel.devh;

	if (fifo->mempool) {
		__hal_mempool_destroy(fifo->mempool);
	}

	__hal_channel_terminate(channelh);

#if defined(XGE_HAL_TX_MULTI_RESERVE)
	xge_os_spin_lock_destroy(&fifo->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_destroy_irq(&fifo->channel.reserve_lock, hldev->pdev);
#endif
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)  {
#if defined(XGE_HAL_TX_MULTI_POST)
		xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
		xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
					     hldev->pdev);
#endif
	}
}
Ejemplo n.º 3
0
/*
 * __hal_stats_terminate
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 * Terminate per-device statistics object.
 */
void
__hal_stats_terminate (xge_hal_stats_t *stats)
{
	xge_hal_device_t *hldev;

	xge_assert(stats->hw_info);

	hldev = (xge_hal_device_t*)stats->devh;
	xge_assert(hldev);
	xge_assert(stats->is_initialized);
	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    xge_os_dma_unmap(hldev->pdev,
	               stats->hw_info_dmah,
	           stats->dma_addr,
	           sizeof(xge_hal_stats_hw_info_t),
	           XGE_OS_DMA_DIR_FROMDEVICE);

	    xge_os_dma_free(hldev->pdev,
	          stats->hw_info,
	          sizeof(xge_hal_stats_hw_info_t),
	          &stats->hw_info_dma_acch,
	          &stats->hw_info_dmah);
	} else {
	    xge_os_dma_unmap(hldev->pdev,
	               stats->hw_info_dmah,
	           stats->dma_addr,
	           sizeof(xge_hal_stats_pcim_info_t),
	           XGE_OS_DMA_DIR_FROMDEVICE);

	    xge_os_dma_free(hldev->pdev,
	          stats->pcim_info,
	          sizeof(xge_hal_stats_pcim_info_t),
	          &stats->hw_info_dma_acch,
	          &stats->hw_info_dmah);

	    xge_os_free(hldev->pdev, stats->pcim_info_saved,
	        sizeof(xge_hal_stats_pcim_info_t));

	    xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));

	}

	stats->is_initialized = 0;
	stats->is_enabled = 0;
}
Ejemplo n.º 4
0
static void
__hal_stats_save (xge_hal_stats_t *stats)
{
	xge_hal_device_t *hldev = (xge_hal_device_t*)stats->devh;

	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    xge_hal_stats_hw_info_t *latest;

	    (void) xge_hal_stats_hw(stats->devh, &latest);

	    xge_os_memcpy(&stats->hw_info_saved, stats->hw_info,
	          sizeof(xge_hal_stats_hw_info_t));
	} else {
	    xge_hal_stats_pcim_info_t   *latest;

	    (void) xge_hal_stats_pcim(stats->devh, &latest);

	    xge_os_memcpy(stats->pcim_info_saved, stats->pcim_info,
	          sizeof(xge_hal_stats_pcim_info_t));
	}
}
Ejemplo n.º 5
0
void
__hal_ring_hw_initialize(xge_hal_device_h devh)
{
	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
	u64 val64;
	int i, j;

	/* Rx DMA intialization. */

	val64 = 0;
	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
		if (!hldev->config.ring.queue[i].configured)
			continue;
		val64 |= vBIT(hldev->config.ring.queue[i].priority,
							(5 + (i * 8)), 3);
	}
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
			&bar0->rx_queue_priority);
	xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x%llx",
			(unsigned long long)val64);

	/* Configuring ring queues according to per-ring configuration */
	val64 = 0;
	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
		if (!hldev->config.ring.queue[i].configured)
			continue;
		val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
	}
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
	                     &bar0->rx_queue_cfg);
	xge_debug_ring(XGE_TRACE, "DRAM configured to 0x%llx",
			(unsigned long long)val64);

	/* Activate Rx steering */
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	                            &bar0->rts_qos_steering);
	for (j = 0; j < 8 /* QoS max */; j++) {
		for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
			if (!hldev->config.ring.queue[i].configured)
				continue;
			if (!hldev->config.ring.queue[i].rth_en)
				val64 |= (BIT(i) >> (j*8));
		}
	}
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
	                     &bar0->rts_qos_steering);
	xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x%llx",
			(unsigned long long)val64);

	/* Note: If a queue does not exist, it should be assigned a maximum
	 *	 length of zero. Otherwise, packet loss could occur.
	 *	 P. 4-4 User guide.
	 *
	 * All configured rings will be properly set at device open time
	 * by utilizing device_mtu_set() API call. */
	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
		if (hldev->config.ring.queue[i].configured)
			continue;
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
		                     &bar0->rts_frm_len_n[i]);
	}

#ifdef XGE_HAL_HERC_EMULATION
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
		((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
	val64 |= 0x0000000000010000;
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
		((u8 *)bar0 + 0x2e60));

	val64 |= 0x003a000000000000;
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
		((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
	xge_os_mdelay(2000);
#endif

	/* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	                            &bar0->mc_rldram_mrs);
	val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
		 XGE_HAL_MC_RLDRAM_MRS_ENABLE;
	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
	                     &bar0->mc_rldram_mrs);
	xge_os_wmb();
	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
	                     &bar0->mc_rldram_mrs);

	/* RLDRAM initialization procedure require 500us to complete */
	xge_os_mdelay(1);

	/* Temporary fixes for Herc RLDRAM */
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
		val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
		                     &bar0->mc_rldram_ref_per_herc);

		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	                            &bar0->mc_rldram_mrs_herc);
		xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x%llx",
			       (unsigned long long)val64);

		val64 = 0x0003570003010300ULL;
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
		                       &bar0->mc_rldram_mrs_herc);

		xge_os_mdelay(1);
	}
	xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
}
Ejemplo n.º 6
0
void
__hal_ring_prc_enable(xge_hal_channel_h channelh)
{
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
	xge_hal_pci_bar0_t *bar0;
	u64 val64;
	void *first_block;
	int block_num;
	xge_hal_ring_queue_t *queue;
	pci_dma_h dma_handle;

	xge_assert(ring);
	xge_assert(ring->channel.pdev);
	bar0 = (xge_hal_pci_bar0_t *) (void *)
			((xge_hal_device_t *)ring->channel.devh)->bar0;

	queue = &ring->config->queue[ring->channel.post_qid];
	xge_assert(queue->buffer_mode == 1 ||
		    queue->buffer_mode == 3 ||
		    queue->buffer_mode == 5);

	/* last block in fact becomes first. This is just the way it
	 * is filled up and linked by item_alloc() */

	block_num = queue->initial;
	first_block = __hal_mempool_item(ring->mempool, block_num - 1);
	val64 = __hal_ring_item_dma_addr(ring->mempool,
					 first_block, &dma_handle);
	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);

	xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x%llx initialized",
			ring->channel.post_qid, (unsigned long long)val64);

	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
		ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
	    !queue->rth_en) {
		val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
	}
	val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;

	val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
	val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
	val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
		(hldev->config.pci_freq_mherz * queue->backoff_interval_us));

	/* Beware: no snoop by the bridge if (no_snoop_bits) */
	val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);

        /* Herc: always use group_reads */
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
	        val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;

	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);

	/* Configure Receive Protocol Assist */
	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
			ring->channel.regh0, &bar0->rx_pa_cfg);
	val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
	val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
	/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);

	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->rx_pa_cfg);

	xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
			ring->channel.post_qid, queue->buffer_mode);
}
Ejemplo n.º 7
0
/*
 * __hal_stats_update_latest - Update hw stats counters, based on the real
 * hardware maintained counters and the stored "reset" values.
 */
static void
__hal_stats_update_latest(xge_hal_device_h devh)
{
	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;

#define set_latest_stat_cnt(_dev, _p)                                   \
	    hldev->stats.hw_info_latest._p =                                \
	((hldev->stats.hw_info->_p >= hldev->stats.hw_info_saved._p) ?  \
	      hldev->stats.hw_info->_p - hldev->stats.hw_info_saved._p :    \
	  ((-1) - hldev->stats.hw_info_saved._p) + hldev->stats.hw_info->_p)

	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN) {
	    __hal_stats_pcim_update_latest(devh);
	    return;
	}

	/* Tx MAC statistics counters. */
	set_latest_stat_cnt(hldev, tmac_frms);
	set_latest_stat_cnt(hldev, tmac_data_octets);
	set_latest_stat_cnt(hldev, tmac_drop_frms);
	set_latest_stat_cnt(hldev, tmac_mcst_frms);
	set_latest_stat_cnt(hldev, tmac_bcst_frms);
	set_latest_stat_cnt(hldev, tmac_pause_ctrl_frms);
	set_latest_stat_cnt(hldev, tmac_ttl_octets);
	set_latest_stat_cnt(hldev, tmac_ucst_frms);
	set_latest_stat_cnt(hldev, tmac_nucst_frms);
	set_latest_stat_cnt(hldev, tmac_any_err_frms);
	set_latest_stat_cnt(hldev, tmac_ttl_less_fb_octets);
	set_latest_stat_cnt(hldev, tmac_vld_ip_octets);
	set_latest_stat_cnt(hldev, tmac_vld_ip);
	set_latest_stat_cnt(hldev, tmac_drop_ip);
	set_latest_stat_cnt(hldev, tmac_icmp);
	set_latest_stat_cnt(hldev, tmac_rst_tcp);
	set_latest_stat_cnt(hldev, tmac_tcp);
	set_latest_stat_cnt(hldev, tmac_udp);
	set_latest_stat_cnt(hldev, reserved_0);

	/* Rx MAC Statistics counters. */
	set_latest_stat_cnt(hldev, rmac_vld_frms);
	set_latest_stat_cnt(hldev, rmac_data_octets);
	set_latest_stat_cnt(hldev, rmac_fcs_err_frms);
	set_latest_stat_cnt(hldev, rmac_drop_frms);
	set_latest_stat_cnt(hldev, rmac_vld_mcst_frms);
	set_latest_stat_cnt(hldev, rmac_vld_bcst_frms);
	set_latest_stat_cnt(hldev, rmac_in_rng_len_err_frms);
	set_latest_stat_cnt(hldev, rmac_out_rng_len_err_frms);
	set_latest_stat_cnt(hldev, rmac_long_frms);
	set_latest_stat_cnt(hldev, rmac_pause_ctrl_frms);
	set_latest_stat_cnt(hldev, rmac_unsup_ctrl_frms);
	set_latest_stat_cnt(hldev, rmac_ttl_octets);
	set_latest_stat_cnt(hldev, rmac_accepted_ucst_frms);
	set_latest_stat_cnt(hldev, rmac_accepted_nucst_frms);
	set_latest_stat_cnt(hldev, rmac_discarded_frms);
	set_latest_stat_cnt(hldev, rmac_drop_events);
	set_latest_stat_cnt(hldev, reserved_1);
	set_latest_stat_cnt(hldev, rmac_ttl_less_fb_octets);
	set_latest_stat_cnt(hldev, rmac_ttl_frms);
	set_latest_stat_cnt(hldev, reserved_2);
	set_latest_stat_cnt(hldev, reserved_3);
	set_latest_stat_cnt(hldev, rmac_usized_frms);
	set_latest_stat_cnt(hldev, rmac_osized_frms);
	set_latest_stat_cnt(hldev, rmac_frag_frms);
	set_latest_stat_cnt(hldev, rmac_jabber_frms);
	set_latest_stat_cnt(hldev, reserved_4);
	set_latest_stat_cnt(hldev, rmac_ttl_64_frms);
	set_latest_stat_cnt(hldev, rmac_ttl_65_127_frms);
	set_latest_stat_cnt(hldev, reserved_5);
	set_latest_stat_cnt(hldev, rmac_ttl_128_255_frms);
	set_latest_stat_cnt(hldev, rmac_ttl_256_511_frms);
	set_latest_stat_cnt(hldev, reserved_6);
	set_latest_stat_cnt(hldev, rmac_ttl_512_1023_frms);
	set_latest_stat_cnt(hldev, rmac_ttl_1024_1518_frms);
	set_latest_stat_cnt(hldev, reserved_7);
	set_latest_stat_cnt(hldev, rmac_ip);
	set_latest_stat_cnt(hldev, rmac_ip_octets);
	set_latest_stat_cnt(hldev, rmac_hdr_err_ip);
	set_latest_stat_cnt(hldev, rmac_drop_ip);
	set_latest_stat_cnt(hldev, rmac_icmp);
	set_latest_stat_cnt(hldev, reserved_8);
	set_latest_stat_cnt(hldev, rmac_tcp);
	set_latest_stat_cnt(hldev, rmac_udp);
	set_latest_stat_cnt(hldev, rmac_err_drp_udp);
	set_latest_stat_cnt(hldev, rmac_xgmii_err_sym);
	set_latest_stat_cnt(hldev, rmac_frms_q0);
	set_latest_stat_cnt(hldev, rmac_frms_q1);
	set_latest_stat_cnt(hldev, rmac_frms_q2);
	set_latest_stat_cnt(hldev, rmac_frms_q3);
	set_latest_stat_cnt(hldev, rmac_frms_q4);
	set_latest_stat_cnt(hldev, rmac_frms_q5);
	set_latest_stat_cnt(hldev, rmac_frms_q6);
	set_latest_stat_cnt(hldev, rmac_frms_q7);
	set_latest_stat_cnt(hldev, rmac_full_q0);
	set_latest_stat_cnt(hldev, rmac_full_q1);
	set_latest_stat_cnt(hldev, rmac_full_q2);
	set_latest_stat_cnt(hldev, rmac_full_q3);
	set_latest_stat_cnt(hldev, rmac_full_q4);
	set_latest_stat_cnt(hldev, rmac_full_q5);
	set_latest_stat_cnt(hldev, rmac_full_q6);
	set_latest_stat_cnt(hldev, rmac_full_q7);
	set_latest_stat_cnt(hldev, rmac_pause_cnt);
	set_latest_stat_cnt(hldev, reserved_9);
	set_latest_stat_cnt(hldev, rmac_xgmii_data_err_cnt);
	set_latest_stat_cnt(hldev, rmac_xgmii_ctrl_err_cnt);
	set_latest_stat_cnt(hldev, rmac_accepted_ip);
	set_latest_stat_cnt(hldev, rmac_err_tcp);

	/* PCI/PCI-X Read transaction statistics. */
	set_latest_stat_cnt(hldev, rd_req_cnt);
	set_latest_stat_cnt(hldev, new_rd_req_cnt);
	set_latest_stat_cnt(hldev, new_rd_req_rtry_cnt);
	set_latest_stat_cnt(hldev, rd_rtry_cnt);
	set_latest_stat_cnt(hldev, wr_rtry_rd_ack_cnt);

	/* PCI/PCI-X write transaction statistics. */
	set_latest_stat_cnt(hldev, wr_req_cnt);
	set_latest_stat_cnt(hldev, new_wr_req_cnt);
	set_latest_stat_cnt(hldev, new_wr_req_rtry_cnt);
	set_latest_stat_cnt(hldev, wr_rtry_cnt);
	set_latest_stat_cnt(hldev, wr_disc_cnt);
	set_latest_stat_cnt(hldev, rd_rtry_wr_ack_cnt);

	/* DMA Transaction statistics. */
	set_latest_stat_cnt(hldev, txp_wr_cnt);
	set_latest_stat_cnt(hldev, txd_rd_cnt);
	set_latest_stat_cnt(hldev, txd_wr_cnt);
	set_latest_stat_cnt(hldev, rxd_rd_cnt);
	set_latest_stat_cnt(hldev, rxd_wr_cnt);
	set_latest_stat_cnt(hldev, txf_rd_cnt);
	set_latest_stat_cnt(hldev, rxf_wr_cnt);

	/* Enhanced Herc statistics */
	set_latest_stat_cnt(hldev, tmac_frms_oflow);
	set_latest_stat_cnt(hldev, tmac_data_octets_oflow);
	set_latest_stat_cnt(hldev, tmac_mcst_frms_oflow);
	set_latest_stat_cnt(hldev, tmac_bcst_frms_oflow);
	set_latest_stat_cnt(hldev, tmac_ttl_octets_oflow);
	set_latest_stat_cnt(hldev, tmac_ucst_frms_oflow);
	set_latest_stat_cnt(hldev, tmac_nucst_frms_oflow);
	set_latest_stat_cnt(hldev, tmac_any_err_frms_oflow);
	set_latest_stat_cnt(hldev, tmac_vlan_frms);
	set_latest_stat_cnt(hldev, tmac_vld_ip_oflow);
	set_latest_stat_cnt(hldev, tmac_drop_ip_oflow);
	set_latest_stat_cnt(hldev, tmac_icmp_oflow);
	set_latest_stat_cnt(hldev, tmac_rst_tcp_oflow);
	set_latest_stat_cnt(hldev, tmac_udp_oflow);
	set_latest_stat_cnt(hldev, tpa_unknown_protocol);
	set_latest_stat_cnt(hldev, tpa_parse_failure);
	set_latest_stat_cnt(hldev, rmac_vld_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_data_octets_oflow);
	set_latest_stat_cnt(hldev, rmac_vld_mcst_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_vld_bcst_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_ttl_octets_oflow);
	set_latest_stat_cnt(hldev, rmac_accepted_ucst_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_accepted_nucst_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_discarded_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_drop_events_oflow);
	set_latest_stat_cnt(hldev, rmac_usized_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_osized_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_frag_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_jabber_frms_oflow);
	set_latest_stat_cnt(hldev, rmac_ip_oflow);
	set_latest_stat_cnt(hldev, rmac_drop_ip_oflow);
	set_latest_stat_cnt(hldev, rmac_icmp_oflow);
	set_latest_stat_cnt(hldev, rmac_udp_oflow);
	set_latest_stat_cnt(hldev, rmac_err_drp_udp_oflow);
	set_latest_stat_cnt(hldev, rmac_pause_cnt_oflow);
	set_latest_stat_cnt(hldev, rmac_ttl_1519_4095_frms);
	set_latest_stat_cnt(hldev, rmac_ttl_4096_8191_frms);
	set_latest_stat_cnt(hldev, rmac_ttl_8192_max_frms);
	set_latest_stat_cnt(hldev, rmac_ttl_gt_max_frms);
	set_latest_stat_cnt(hldev, rmac_osized_alt_frms);
	set_latest_stat_cnt(hldev, rmac_jabber_alt_frms);
	set_latest_stat_cnt(hldev, rmac_gt_max_alt_frms);
	set_latest_stat_cnt(hldev, rmac_vlan_frms);
	set_latest_stat_cnt(hldev, rmac_fcs_discard);
	set_latest_stat_cnt(hldev, rmac_len_discard);
	set_latest_stat_cnt(hldev, rmac_da_discard);
	set_latest_stat_cnt(hldev, rmac_pf_discard);
	set_latest_stat_cnt(hldev, rmac_rts_discard);
	set_latest_stat_cnt(hldev, rmac_red_discard);
	set_latest_stat_cnt(hldev, rmac_ingm_full_discard);
	set_latest_stat_cnt(hldev, rmac_accepted_ip_oflow);
	set_latest_stat_cnt(hldev, link_fault_cnt);
}
Ejemplo n.º 8
0
/*
 * __hal_stats_initialize
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 * @devh: HAL device handle.
 *
 * Initialize per-device statistics object.
 * See also: xge_hal_stats_getinfo(), xge_hal_status_e{}.
 */
xge_hal_status_e
__hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh)
{
	int dma_flags;
	xge_hal_device_t *hldev = (xge_hal_device_t*)devh;

	xge_assert(!stats->is_initialized);

	dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	dma_flags |= XGE_OS_DMA_CONSISTENT;
#else
	dma_flags |= XGE_OS_DMA_STREAMING;
#endif
	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    stats->hw_info =
	        (xge_hal_stats_hw_info_t *) xge_os_dma_malloc(
	                hldev->pdev,
	                sizeof(xge_hal_stats_hw_info_t),
	                dma_flags,
	                &stats->hw_info_dmah,
	                &stats->hw_info_dma_acch);

	    if (stats->hw_info == NULL) {
	        xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }
	    xge_os_memzero(stats->hw_info,
	        sizeof(xge_hal_stats_hw_info_t));
	    xge_os_memzero(&stats->hw_info_saved,
	        sizeof(xge_hal_stats_hw_info_t));
	    xge_os_memzero(&stats->hw_info_latest,
	        sizeof(xge_hal_stats_hw_info_t));



	    stats->dma_addr = xge_os_dma_map(hldev->pdev,
	                               stats->hw_info_dmah,
	                   stats->hw_info,
	                   sizeof(xge_hal_stats_hw_info_t),
	                   XGE_OS_DMA_DIR_FROMDEVICE,
	                   XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	                   XGE_OS_DMA_CONSISTENT
#else
	                       XGE_OS_DMA_STREAMING
#endif
	                                   );
	    if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_debug_stats(XGE_ERR,
	            "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
	            (unsigned long long)(ulong_t)stats->hw_info);
	        xge_os_dma_free(hldev->pdev,
	              stats->hw_info,
	              sizeof(xge_hal_stats_hw_info_t),
	              &stats->hw_info_dma_acch,
	              &stats->hw_info_dmah);
	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }
	}
	else {
	    stats->pcim_info_saved =
	        (xge_hal_stats_pcim_info_t *)xge_os_malloc(
	        hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
	    if (stats->pcim_info_saved == NULL) {
	        xge_debug_stats(XGE_ERR, "%s", "can not alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }

	    stats->pcim_info_latest =
	        (xge_hal_stats_pcim_info_t *)xge_os_malloc(
	        hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
	    if (stats->pcim_info_latest == NULL) {
	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_debug_stats(XGE_ERR, "%s", "can not alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }

	    stats->pcim_info =
	        (xge_hal_stats_pcim_info_t *) xge_os_dma_malloc(
	                hldev->pdev,
	                sizeof(xge_hal_stats_pcim_info_t),
	                dma_flags,
	                &stats->hw_info_dmah,
	                &stats->hw_info_dma_acch);

	    if (stats->pcim_info == NULL) {
	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }


	    xge_os_memzero(stats->pcim_info,
	        sizeof(xge_hal_stats_pcim_info_t));
	    xge_os_memzero(stats->pcim_info_saved,
	        sizeof(xge_hal_stats_pcim_info_t));
	    xge_os_memzero(stats->pcim_info_latest,
	        sizeof(xge_hal_stats_pcim_info_t));



	    stats->dma_addr = xge_os_dma_map(hldev->pdev,
	                               stats->hw_info_dmah,
	                   stats->pcim_info,
	                   sizeof(xge_hal_stats_pcim_info_t),
	                   XGE_OS_DMA_DIR_FROMDEVICE,
	                   XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	                   XGE_OS_DMA_CONSISTENT
#else
	                       XGE_OS_DMA_STREAMING
#endif
	                                   );
	    if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_debug_stats(XGE_ERR,
	            "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
	            (unsigned long long)(ulong_t)stats->hw_info);

	        xge_os_dma_free(hldev->pdev,
	              stats->pcim_info,
	              sizeof(xge_hal_stats_pcim_info_t),
	              &stats->hw_info_dma_acch,
	              &stats->hw_info_dmah);

	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));

	        xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));

	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }
	}
	stats->devh = devh;
	xge_os_memzero(&stats->sw_dev_info_stats,
	         sizeof(xge_hal_stats_device_info_t));

	stats->is_initialized = 1;

	return XGE_HAL_OK;
}
Ejemplo n.º 9
0
xge_hal_status_e
__hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
{
	xge_hal_device_t *hldev;
	xge_hal_status_e status;
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_queue_t *queue;
	int i, txdl_size, max_arr_index, mid_point;
	xge_hal_dtr_h  dtrh;

	hldev = (xge_hal_device_t *)fifo->channel.devh;
	fifo->config = &hldev->config.fifo;
	queue = &fifo->config->queue[attr->post_qid];

#if defined(XGE_HAL_TX_MULTI_RESERVE)
	xge_os_spin_lock_init(&fifo->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_init_irq(&fifo->channel.reserve_lock, hldev->irqh);
#endif
#if defined(XGE_HAL_TX_MULTI_POST)
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)  {
                fifo->post_lock_ptr = &hldev->xena_post_lock;
	} else {
	        xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev);
                fifo->post_lock_ptr = &fifo->channel.post_lock;
	}
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)  {
                fifo->post_lock_ptr = &hldev->xena_post_lock;
	} else {
	        xge_os_spin_lock_init_irq(&fifo->channel.post_lock,
					hldev->irqh);
                fifo->post_lock_ptr = &fifo->channel.post_lock;
	}
#endif

	/* Initializing the BAR1 address as the start of
	 * the FIFO queue pointer and as a location of FIFO control
	 * word. */
	fifo->hw_pair =
	        (xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 +
		        (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));

	/* apply "interrupts per txdl" attribute */
	fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ;
	if (queue->intr) {
		fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST;
	}
	fifo->no_snoop_bits =
		(int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits));

	/*
	 * FIFO memory management strategy:
	 *
	 * TxDL splitted into three independent parts:
	 *	- set of TxD's
	 *	- TxD HAL private part
	 *	- upper layer private part
	 *
	 * Adaptative memory allocation used. i.e. Memory allocated on
	 * demand with the size which will fit into one memory block.
	 * One memory block may contain more than one TxDL. In simple case
	 * memory block size can be equal to CPU page size. On more
	 * sophisticated OS's memory block can be contigious across
	 * several pages.
	 *
	 * During "reserve" operations more memory can be allocated on demand
	 * for example due to FIFO full condition.
	 *
	 * Pool of memory memblocks never shrinks except __hal_fifo_close
	 * routine which will essentially stop channel and free the resources.
	 */

	/* TxDL common private size == TxDL private + ULD private */
	fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) +
	attr->per_dtr_space;
	fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) /
                               __xge_os_cacheline_size) *
                               __xge_os_cacheline_size;

	/* recompute txdl size to be cacheline aligned */
	fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t);
	txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) /
			__xge_os_cacheline_size) * __xge_os_cacheline_size;

	if (fifo->txdl_size != txdl_size)
	        xge_debug_fifo(XGE_ERR, "cacheline > 128 (??): %d, %d, %d, %d",
		fifo->config->max_frags, fifo->txdl_size, txdl_size,
		__xge_os_cacheline_size);

	fifo->txdl_size = txdl_size;

	/* since dtr_init() callback will be called from item_alloc(),
	 * the same way channels userdata might be used prior to
	 * channel_initialize() */
	fifo->channel.dtr_init = attr->dtr_init;
	fifo->channel.userdata = attr->userdata;
	fifo->txdl_per_memblock = fifo->config->memblock_size /
		fifo->txdl_size;

	fifo->mempool = __hal_mempool_create(hldev->pdev,
					     fifo->config->memblock_size,
					     fifo->txdl_size,
					     fifo->priv_size,
					     queue->initial,
					     queue->max,
					     __hal_fifo_mempool_item_alloc,
					     __hal_fifo_mempool_item_free,
					     fifo);
	if (fifo->mempool == NULL) {
		return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	status = __hal_channel_initialize(channelh, attr,
					__hal_mempool_items_arr(fifo->mempool),
					queue->initial, queue->max,
					fifo->config->reserve_threshold);
	if (status != XGE_HAL_OK) {
		__hal_fifo_close(channelh);
		return status;
	}
	xge_debug_fifo(XGE_TRACE,
		"DTR  reserve_length:%d reserve_top:%d\n"
		"max_frags:%d reserve_threshold:%d\n"
		"memblock_size:%d alignment_size:%d max_aligned_frags:%d\n",
		fifo->channel.reserve_length, fifo->channel.reserve_top,
		fifo->config->max_frags, fifo->config->reserve_threshold,
		fifo->config->memblock_size, fifo->config->alignment_size,
		fifo->config->max_aligned_frags);

#ifdef XGE_DEBUG_ASSERT
	for ( i = 0; i < fifo->channel.reserve_length; i++) {
		xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d"
		" handle:%p\n", i, fifo->channel.reserve_arr[i]);
	}
#endif

	xge_assert(fifo->channel.reserve_length);
	/* reverse the FIFO dtr array */
	max_arr_index	= fifo->channel.reserve_length - 1;
	max_arr_index	-=fifo->channel.reserve_top;
	xge_assert(max_arr_index);
	mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2;
	for (i = 0; i < mid_point; i++) {
		dtrh = 	fifo->channel.reserve_arr[i];
		fifo->channel.reserve_arr[i] = 
			fifo->channel.reserve_arr[max_arr_index - i];
		fifo->channel.reserve_arr[max_arr_index  - i] = dtrh;
	}

#ifdef XGE_DEBUG_ASSERT
	for ( i = 0; i < fifo->channel.reserve_length; i++) {
		xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d"
		" handle:%p\n", i, fifo->channel.reserve_arr[i]);
	}
#endif

	return XGE_HAL_OK;
}