void __hal_ring_hw_initialize(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; int i, j; /* Rx DMA intialization. */ val64 = 0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!hldev->config.ring.queue[i].configured) continue; val64 |= vBIT(hldev->config.ring.queue[i].priority, (5 + (i * 8)), 3); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_queue_priority); xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT, (unsigned long long)val64); /* Configuring ring queues according to per-ring configuration */ val64 = 0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!hldev->config.ring.queue[i].configured) continue; val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_queue_cfg); xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT, (unsigned long long)val64); if (!hldev->config.rts_qos_en && !hldev->config.rts_port_en && !hldev->config.rts_mac_en) { /* * Activate default (QoS-based) Rx steering */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_qos_steering); for (j = 0; j < 8 /* QoS max */; j++) { for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!hldev->config.ring.queue[i].configured) continue; if (!hldev->config.ring.queue[i].rth_en) val64 |= (BIT(i) >> (j*8)); } } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_qos_steering); xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT, (unsigned long long)val64); }
void __hal_ring_hw_initialize(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; int i, j; /* Rx DMA intialization. */ val64 = 0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!hldev->config.ring.queue[i].configured) continue; val64 |= vBIT(hldev->config.ring.queue[i].priority, (5 + (i * 8)), 3); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_queue_priority); xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x%llx", (unsigned long long)val64); /* Configuring ring queues according to per-ring configuration */ val64 = 0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!hldev->config.ring.queue[i].configured) continue; val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_queue_cfg); xge_debug_ring(XGE_TRACE, "DRAM configured to 0x%llx", (unsigned long long)val64); /* Activate Rx steering */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_qos_steering); for (j = 0; j < 8 /* QoS max */; j++) { for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!hldev->config.ring.queue[i].configured) continue; if (!hldev->config.ring.queue[i].rth_en) val64 |= (BIT(i) >> (j*8)); } } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_qos_steering); xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x%llx", (unsigned long long)val64); /* Note: If a queue does not exist, it should be assigned a maximum * length of zero. Otherwise, packet loss could occur. * P. 4-4 User guide. * * All configured rings will be properly set at device open time * by utilizing device_mtu_set() API call. */ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (hldev->config.ring.queue[i].configured) continue; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, &bar0->rts_frm_len_n[i]); } #ifdef XGE_HAL_HERC_EMULATION val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */ val64 |= 0x0000000000010000; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, ((u8 *)bar0 + 0x2e60)); val64 |= 0x003a000000000000; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */ xge_os_mdelay(2000); #endif /* now enabling MC-RLDRAM after setting MC_QUEUE sizes */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mc_rldram_mrs); val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE | XGE_HAL_MC_RLDRAM_MRS_ENABLE; __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), &bar0->mc_rldram_mrs); xge_os_wmb(); __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64, &bar0->mc_rldram_mrs); /* RLDRAM initialization procedure require 500us to complete */ xge_os_mdelay(1); /* Temporary fixes for Herc RLDRAM */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_ref_per_herc); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mc_rldram_mrs_herc); xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x%llx", (unsigned long long)val64); val64 = 0x0003570003010300ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_mrs_herc); xge_os_mdelay(1); } xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized"); }
void __hal_fifo_hw_initialize(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64* tx_fifo_partitions[4]; u64* tx_fifo_wrr[5]; u64 val64, part0; int priority = 0; int i; /* Tx DMA Initialization */ tx_fifo_partitions[0] = &bar0->tx_fifo_partition_0; tx_fifo_partitions[1] = &bar0->tx_fifo_partition_1; tx_fifo_partitions[2] = &bar0->tx_fifo_partition_2; tx_fifo_partitions[3] = &bar0->tx_fifo_partition_3; tx_fifo_wrr[0] = &bar0->tx_w_round_robin_0; tx_fifo_wrr[1] = &bar0->tx_w_round_robin_1; tx_fifo_wrr[2] = &bar0->tx_w_round_robin_2; tx_fifo_wrr[3] = &bar0->tx_w_round_robin_3; tx_fifo_wrr[4] = &bar0->tx_w_round_robin_4; /* Note: WRR calendar must be configured before the transmit FIFOs are enabled! page 6-77 user guide */ /* all zeroes for Round-Robin */ for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, tx_fifo_wrr[i]); } /* reset all of them but '0' */ for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, tx_fifo_partitions[i]); } /* configure only configured FIFOs */ val64 = 0; part0 = 0; for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { int reg_half = i % 2; int reg_num = i / 2; priority = 0; if (hldev->config.fifo.queue[i].configured) { val64 |= vBIT((hldev->config.fifo.queue[i].max-1), (((reg_half) * 32) + 19), 13) | vBIT(priority, (((reg_half)*32) + 5), 3); } /* NOTE: do write operation for each second u64 half or force for first one if configured number is even */ if (reg_half) { if (reg_num == 0) { /* skip partition '0', must write it once at * the end */ part0 = val64; } else { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, tx_fifo_partitions[reg_num]); xge_debug_fifo(XGE_TRACE, "fifo partition_%d at: " "0x%llx is: 0x%llx", reg_num, (unsigned long long)(ulong_t) tx_fifo_partitions[reg_num], (unsigned long long)val64); } val64 = 0; } } part0 |= BIT(0); /* to enable the FIFO partition. */ __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)part0, tx_fifo_partitions[0]); xge_os_wmb(); __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32), tx_fifo_partitions[0]); xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: " "0x%llx is: 0x%llx", (unsigned long long)(ulong_t) tx_fifo_partitions[0], (unsigned long long) part0); /* * Initialization of Tx_PA_CONFIG register to ignore packet * integrity checking. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->tx_pa_cfg); val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR | XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL | XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->tx_pa_cfg); xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized"); }