static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry entries[MLX4_NUM_EQ]; int err; int i; if (msi_x) { for (i = 0; i < MLX4_NUM_EQ; ++i) entries[i].entry = i; err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries)); if (err) { if (err > 0) mlx4_info(dev, "Only %d MSI-X vectors available, " "not using MSI-X\n", err); goto no_msi; } for (i = 0; i < MLX4_NUM_EQ; ++i) priv->eq_table.eq[i].irq = entries[i].vector; dev->flags |= MLX4_FLAG_MSI_X; return; } no_msi: for (i = 0; i < MLX4_NUM_EQ; ++i) priv->eq_table.eq[i].irq = dev->pdev->irq; }
/** * mlx4_en_remove_timestamp - disable PTP device * @mdev: board private structure * * Stop the PTP support. **/ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) { if (mdev->ptp_clock) { ptp_clock_unregister(mdev->ptp_clock); mdev->ptp_clock = NULL; mlx4_info(mdev, "removed PHC\n"); } }
static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx) { int i; struct mlx4_en_dev *mdev = ctx; /* Create a netdev for each port */ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { mlx4_info(mdev, "Activating port:%d\n", i); if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) mdev->pndev[i] = NULL; }
/** * mlx4_en_remove_timestamp - disable PTP device * @mdev: board private structure * * Stop the PTP support. **/ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) { panic("Disabled"); #if 0 // AKAROS_PORT if (mdev->ptp_clock) { ptp_clock_unregister(mdev->ptp_clock); mdev->ptp_clock = NULL; mlx4_info(mdev, "removed PHC\n"); } #endif }
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { panic("Disabled"); #if 0 // AKAROS_PORT struct mlx4_dev *dev = mdev->dev; unsigned long flags; uint64_t ns, zero = 0; rwlock_init(&mdev->clock_lock); memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; mdev->cycles.mask = CLOCKSOURCE_MASK(48); /* Using shift to make calculation more accurate. Since current HW * clock frequency is 427 MHz, and cycles are given using a 48 bits * register, the biggest shift when calculating using u64, is 14 * (max_cycles * multiplier < 2^64) */ mdev->cycles.shift = 14; mdev->cycles.mult = clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); mdev->nominal_c_mult = mdev->cycles.mult; write_lock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, epoch_nsec()); write_unlock_irqrestore(&mdev->clock_lock, flags); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. */ ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero); do_div(ns, NSEC_PER_SEC / 2 / HZ); mdev->overflow_period = ns; /* Configure the PHC */ mdev->ptp_clock_info = mlx4_en_ptp_clock_info; snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, &mdev->pdev->dev); if (IS_ERR(mdev->ptp_clock)) { mdev->ptp_clock = NULL; mlx4_err(mdev, "ptp_clock_register failed\n"); } else { mlx4_info(mdev, "registered PHC clock\n"); } #endif }
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) { int err = 0; mlx4_enter_error_state(persist); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP && !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { err = mlx4_restart_one(persist->pdev, false, NULL); mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", err); } mutex_unlock(&persist->interface_state_mutex); }
static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int nreq; int err; int i; if (msi_x) { nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, num_possible_cpus() + 1); entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) goto no_msi; for (i = 0; i < nreq; ++i) entries[i].entry = i; retry: err = pci_enable_msix(dev->pdev, entries, nreq); if (err) { if (err > 1) { mlx4_info(dev, "Requested %d vectors, " "but only %d MSI-X vectors available, " "trying again\n", nreq, err); nreq = err; goto retry; } kfree(entries); goto no_msi; } dev->caps.num_comp_vectors = nreq - 1; for (i = 0; i < nreq; ++i) priv->eq_table.eq[i].irq = entries[i].vector; dev->flags |= MLX4_FLAG_MSI_X; kfree(entries); return; } no_msi: dev->caps.num_comp_vectors = 1; for (i = 0; i < 2; ++i) priv->eq_table.eq[i].irq = dev->pdev->irq; }
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; unsigned long flags; u64 ns; /* mlx4_en_init_timestamp is called for each netdev. * mdev->ptp_clock is common for all ports, skip initialization if * was done for other port. */ if (mdev->ptp_clock) return; rwlock_init(&mdev->clock_lock); memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; mdev->cycles.mask = CLOCKSOURCE_MASK(48); mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock); mdev->cycles.mult = clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); mdev->nominal_c_mult = mdev->cycles.mult; write_lock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, ktime_to_ns(ktime_get_real())); write_unlock_irqrestore(&mdev->clock_lock, flags); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. */ ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask); do_div(ns, NSEC_PER_SEC / 2 / HZ); mdev->overflow_period = ns; /* Configure the PHC */ mdev->ptp_clock_info = mlx4_en_ptp_clock_info; snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, &mdev->pdev->dev); if (IS_ERR(mdev->ptp_clock)) { mdev->ptp_clock = NULL; mlx4_err(mdev, "ptp_clock_register failed\n"); } else { mlx4_info(mdev, "registered PHC clock\n"); } }
static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_adapter adapter; struct mlx4_dev_cap dev_cap; struct mlx4_mod_stat_cfg mlx4_cfg; struct mlx4_profile profile; struct mlx4_init_hca_param init_hca; u64 icm_size; int err; err = mlx4_QUERY_FW(dev); if (err) { if (err == -EACCES) mlx4_info(dev, "non-primary physical function, skipping.\n"); else mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); return err; } err = mlx4_load_fw(dev); if (err) { mlx4_err(dev, "Failed to start FW, aborting.\n"); return err; } mlx4_cfg.log_pg_sz_m = 1; mlx4_cfg.log_pg_sz = 0; err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); if (err) mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); goto err_stop_fw; } profile = default_profile; icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); if ((long long) icm_size < 0) { err = icm_size; goto err_stop_fw; } init_hca.log_uar_sz = ilog2(dev->caps.num_uars); err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); if (err) goto err_stop_fw; err = mlx4_INIT_HCA(dev, &init_hca); if (err) { mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); goto err_free_icm; } err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); goto err_close; } priv->eq_table.inta_pin = adapter.inta_pin; memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); return 0; err_close: mlx4_CLOSE_HCA(dev, 0); err_free_icm: mlx4_free_icms(dev); err_stop_fw: mlx4_UNMAP_FA(dev); mlx4_free_icm(dev, priv->fw.fw_icm, 0); return err; }
static void *mlx4_en_add(struct mlx4_dev *dev) { static int mlx4_en_version_printed; struct mlx4_en_dev *mdev; int i; int err; if (!mlx4_en_version_printed) { printk(KERN_INFO "%s", mlx4_en_version); mlx4_en_version_printed++; } mdev = kzalloc(sizeof *mdev, GFP_KERNEL); if (!mdev) { dev_err(&dev->pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; goto err_free_res; } if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) goto err_free_dev; if (mlx4_uar_alloc(dev, &mdev->priv_uar)) goto err_pd; mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!mdev->uar_map) goto err_uar; spin_lock_init(&mdev->uar_lock); mdev->dev = dev; mdev->dma_device = &(dev->pdev->dev); mdev->pdev = dev->pdev; mdev->device_up = false; mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); if (!mdev->LSO_support) mlx4_warn(mdev, "LSO not supported, please upgrade to later " "FW version to enable LSO\n"); if(mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 0, 0, &mdev->mr)){ mlx4_err(mdev, "Failed allocating memory region\n"); goto err_uar; } if(mlx4_mr_enable(mdev->dev, &mdev->mr)){ mlx4_err(mdev, "Failed enabling memory region\n"); goto err_mr; } /* Build device profile according to supplied module parameters */ err = mlx4_en_get_profile(mdev); if (err) { mlx4_err(mdev, "Bad module parameters, aborting.\n"); goto err_mr; } /* Configure wich ports to start according to module parameters */ mdev->port_cnt = 0; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) mdev->port_cnt++; /* If we did not receive an explicit number of Rx rings, default to * the number of completion vectors populated by the mlx4_core */ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { mlx4_info(mdev, "Using %d tx rings for port:%d\n", mdev->profile.prof[i].tx_ring_num, i); if (!mdev->profile.prof[i].rx_ring_num) { mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors; mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", dev->caps.num_comp_vectors, i); } else mlx4_info(mdev, "Using %d rx rings for port:%d\n", mdev->profile.prof[i].rx_ring_num, i); }
static void *mlx4_en_add(struct mlx4_dev *dev) { struct mlx4_en_dev *mdev; int i; printk_once(KERN_INFO "%s", mlx4_en_version); mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) goto err_free_res; if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) goto err_free_dev; if (mlx4_uar_alloc(dev, &mdev->priv_uar)) goto err_pd; mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!mdev->uar_map) goto err_uar; spin_lock_init(&mdev->uar_lock); mdev->dev = dev; mdev->dma_device = &dev->persist->pdev->dev; mdev->pdev = dev->persist->pdev; mdev->device_up = false; mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); if (!mdev->LSO_support) mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n"); if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 0, 0, &mdev->mr)) { mlx4_err(mdev, "Failed allocating memory region\n"); goto err_map; } if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { mlx4_err(mdev, "Failed enabling memory region\n"); goto err_mr; } /* Build device profile according to supplied module parameters */ if (mlx4_en_get_profile(mdev)) { mlx4_err(mdev, "Bad module parameters, aborting\n"); goto err_mr; } /* Configure which ports to start according to module parameters */ mdev->port_cnt = 0; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) mdev->port_cnt++; /* Initialize time stamp mechanism */ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) mlx4_en_init_timestamp(mdev); /* Set default number of RX rings*/ mlx4_en_set_num_rx_rings(mdev); /* Create our own workqueue for reset/multicast tasks * Note: we cannot use the shared workqueue because of deadlocks caused * by the rtnl lock */ mdev->workqueue = create_singlethread_workqueue("mlx4_en"); if (!mdev->workqueue) goto err_mr; /* At this stage all non-port specific tasks are complete: * mark the card state as up */ mutex_init(&mdev->state_lock); mdev->device_up = true; /* Setup ports */ /* Create a netdev for each port */ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { mlx4_info(mdev, "Activating port:%d\n", i); if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) mdev->pndev[i] = NULL; }
static void *mlx4_en_add(struct mlx4_dev *dev) { static int mlx4_en_version_printed; struct mlx4_en_dev *mdev; int i; int err; if (!mlx4_en_version_printed) { printk(KERN_INFO "%s", mlx4_en_version); mlx4_en_version_printed++; } mdev = kzalloc(sizeof *mdev, GFP_KERNEL); if (!mdev) { dev_err(&dev->pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; goto err_free_res; } if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) goto err_free_dev; if (mlx4_uar_alloc(dev, &mdev->priv_uar)) goto err_pd; mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!mdev->uar_map) goto err_uar; spin_lock_init(&mdev->uar_lock); mdev->dev = dev; mdev->dma_device = &(dev->pdev->dev); mdev->pdev = dev->pdev; mdev->device_up = false; mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); if (!mdev->LSO_support) mlx4_warn(mdev, "LSO not supported, please upgrade to later " "FW version to enable LSO\n"); if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 0, 0, &mdev->mr)) { mlx4_err(mdev, "Failed allocating memory region\n"); goto err_uar; } if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { mlx4_err(mdev, "Failed enabling memory region\n"); goto err_mr; } err = mlx4_en_get_profile(mdev); if (err) { mlx4_err(mdev, "Bad module parameters, aborting.\n"); goto err_mr; } mdev->port_cnt = 0; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) mdev->port_cnt++; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { mlx4_info(mdev, "Using %d tx rings for port:%d\n", mdev->profile.prof[i].tx_ring_num, i); mdev->profile.prof[i].rx_ring_num = min_t(int, roundup_pow_of_two(dev->caps.num_comp_vectors), MAX_RX_RINGS); mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", mdev->profile.prof[i].rx_ring_num, i); }