Exemplo n.º 1
0
/*
 #ifdef __linux__
 static ssize_t shared_mr_proc_read(struct file *file,
 char __user *buffer,
 size_t len,
 loff_t *offset)
 {

 return -ENOSYS;

 }

 static ssize_t shared_mr_proc_write(struct file *file,
 const char __user *buffer,
 size_t len,
 loff_t *offset)
 {

 return -ENOSYS;
 }

 static int shared_mr_mmap(struct file *filep, struct vm_area_struct *vma)
 {

 struct proc_dir_entry *pde = PDE(filep->f_path.dentry->d_inode);
 struct mlx4_shared_mr_info *smr_info =
 (struct mlx4_shared_mr_info *)pde->data;

 Prevent any mapping not on start of area
 if (vma->vm_pgoff != 0)
 return -EINVAL;

 return ib_umem_map_to_vma(smr_info->umem,
 vma);

 }

 static const struct file_operations shared_mr_proc_ops = {
 .owner	= THIS_MODULE,
 .read	= shared_mr_proc_read,
 .write	= shared_mr_proc_write,
 .mmap	= shared_mr_mmap
 };

 static mode_t convert_shared_access(int acc)
 {

 return (acc & IB_ACCESS_SHARED_MR_USER_READ ? S_IRUSR       : 0) |
 (acc & IB_ACCESS_SHARED_MR_USER_WRITE  ? S_IWUSR : 0) |
 (acc & IB_ACCESS_SHARED_MR_GROUP_READ   ? S_IRGRP  : 0) |
 (acc & IB_ACCESS_SHARED_MR_GROUP_WRITE   ? S_IWGRP  : 0) |
 (acc & IB_ACCESS_SHARED_MR_OTHER_READ   ? S_IROTH  : 0) |
 (acc & IB_ACCESS_SHARED_MR_OTHER_WRITE   ? S_IWOTH  : 0);

 }
 #endif
 */
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) {
	struct mlx4_ib_mr *mr;
	int err;

	mr = calloc(1, sizeof *mr);
	if (!mr)
		return ERR_PTR(-ENOMEM);

	err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, ~0ull,
			convert_access(acc), 0, 0, &mr->mmr);
	if (err)
		goto err_free;

	err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
	if (err)
		goto err_mr;

	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
	mr->umem = NULL;

	return &mr->ibmr;

	/*TODO*/
	err_mr: /*mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);*/

	err_free: free(mr);

	return ERR_PTR(err);
}
Exemplo n.º 2
0
static void *mlx4_en_add(struct mlx4_dev *dev)
{
    struct mlx4_en_dev *mdev;
    int i;
    int err;

    printk_once(KERN_INFO "%s", mlx4_en_version);

    mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
    if (!mdev) {
        dev_err(&dev->pdev->dev, "Device struct alloc failed, "
                "aborting.\n");
        err = -ENOMEM;
        goto err_free_res;
    }

    if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
        goto err_free_dev;

    if (mlx4_uar_alloc(dev, &mdev->priv_uar))
        goto err_pd;

    mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
                            PAGE_SIZE);
    if (!mdev->uar_map)
        goto err_uar;
    spin_lock_init(&mdev->uar_lock);

    mdev->dev = dev;
    mdev->dma_device = &(dev->pdev->dev);
    mdev->pdev = dev->pdev;
    mdev->device_up = false;

    mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
    if (!mdev->LSO_support)
        mlx4_warn(mdev, "LSO not supported, please upgrade to later "
                  "FW version to enable LSO\n");

    if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
                      MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
                      0, 0, &mdev->mr)) {
        mlx4_err(mdev, "Failed allocating memory region\n");
        goto err_map;
    }
    if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
        mlx4_err(mdev, "Failed enabling memory region\n");
        goto err_mr;
    }

    /* Build device profile according to supplied module parameters */
    err = mlx4_en_get_profile(mdev);
    if (err) {
        mlx4_err(mdev, "Bad module parameters, aborting.\n");
        goto err_mr;
    }

    /* Configure which ports to start according to module parameters */
    mdev->port_cnt = 0;
    mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
    mdev->port_cnt++;


    mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
        if (!dev->caps.comp_pool) {
            mdev->profile.prof[i].rx_ring_num =
                rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
                                           min_t(int,
                                                 dev->caps.num_comp_vectors,
                                                 DEF_RX_RINGS)));
        } else {
Exemplo n.º 3
0
static void *mlx4_en_add(struct mlx4_dev *dev)
{
	static int mlx4_en_version_printed;
	struct mlx4_en_dev *mdev;
	int i;
	int err;

	if (!mlx4_en_version_printed) {
		printk(KERN_INFO "%s", mlx4_en_version);
		mlx4_en_version_printed++;
	}

	mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
	if (!mdev) {
		dev_err(&dev->pdev->dev, "Device struct alloc failed, "
			"aborting.\n");
		err = -ENOMEM;
		goto err_free_res;
	}

	if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
		goto err_free_dev;

	if (mlx4_uar_alloc(dev, &mdev->priv_uar))
		goto err_pd;

	mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
	if (!mdev->uar_map)
		goto err_uar;
	spin_lock_init(&mdev->uar_lock);

	mdev->dev = dev;
	mdev->dma_device = &(dev->pdev->dev);
	mdev->pdev = dev->pdev;
	mdev->device_up = false;

	mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
	if (!mdev->LSO_support)
		mlx4_warn(mdev, "LSO not supported, please upgrade to later "
				"FW version to enable LSO\n");

	if(mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
			 MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
			 0, 0, &mdev->mr)){
		mlx4_err(mdev, "Failed allocating memory region\n");
		goto err_uar;
	}
	if(mlx4_mr_enable(mdev->dev, &mdev->mr)){
		mlx4_err(mdev, "Failed enabling memory region\n");
		goto err_mr;
	}

	/* Build device profile according to supplied module parameters */
	err = mlx4_en_get_profile(mdev);
	if (err) {
		mlx4_err(mdev, "Bad module parameters, aborting.\n");
		goto err_mr;
	}

	/* Configure wich ports to start according to module parameters */
	mdev->port_cnt = 0;
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
		mdev->port_cnt++;

	/* If we did not receive an explicit number of Rx rings, default to
	 * the number of completion vectors populated by the mlx4_core */
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
		mlx4_info(mdev, "Using %d tx rings for port:%d\n",
			  mdev->profile.prof[i].tx_ring_num, i);
		if (!mdev->profile.prof[i].rx_ring_num) {
			mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors;
			mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
				  dev->caps.num_comp_vectors, i);
		} else
			mlx4_info(mdev, "Using %d rx rings for port:%d\n",
				  mdev->profile.prof[i].rx_ring_num, i);
	}
Exemplo n.º 4
0
static void *mlx4_en_add(struct mlx4_dev *dev)
{
	struct mlx4_en_dev *mdev;
	int i;

	printk_once(KERN_INFO "%s", mlx4_en_version);

	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
	if (!mdev)
		goto err_free_res;

	if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
		goto err_free_dev;

	if (mlx4_uar_alloc(dev, &mdev->priv_uar))
		goto err_pd;

	mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
				PAGE_SIZE);
	if (!mdev->uar_map)
		goto err_uar;
	spin_lock_init(&mdev->uar_lock);

	mdev->dev = dev;
	mdev->dma_device = &dev->persist->pdev->dev;
	mdev->pdev = dev->persist->pdev;
	mdev->device_up = false;

	mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
	if (!mdev->LSO_support)
		mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");

	if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
			 MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
			 0, 0, &mdev->mr)) {
		mlx4_err(mdev, "Failed allocating memory region\n");
		goto err_map;
	}
	if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
		mlx4_err(mdev, "Failed enabling memory region\n");
		goto err_mr;
	}

	/* Build device profile according to supplied module parameters */
	if (mlx4_en_get_profile(mdev)) {
		mlx4_err(mdev, "Bad module parameters, aborting\n");
		goto err_mr;
	}

	/* Configure which ports to start according to module parameters */
	mdev->port_cnt = 0;
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
		mdev->port_cnt++;

	/* Initialize time stamp mechanism */
	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
		mlx4_en_init_timestamp(mdev);

	/* Set default number of RX rings*/
	mlx4_en_set_num_rx_rings(mdev);

	/* Create our own workqueue for reset/multicast tasks
	 * Note: we cannot use the shared workqueue because of deadlocks caused
	 *       by the rtnl lock */
	mdev->workqueue = create_singlethread_workqueue("mlx4_en");
	if (!mdev->workqueue)
		goto err_mr;

	/* At this stage all non-port specific tasks are complete:
	 * mark the card state as up */
	mutex_init(&mdev->state_lock);
	mdev->device_up = true;

	/* Setup ports */

	/* Create a netdev for each port */
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
		mlx4_info(mdev, "Activating port:%d\n", i);
		if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
			mdev->pndev[i] = NULL;
	}
Exemplo n.º 5
0
static void *mlx4_en_add(struct mlx4_dev *dev)
{
	static int mlx4_en_version_printed;
	struct mlx4_en_dev *mdev;
	int i;
	int err;

	if (!mlx4_en_version_printed) {
		printk(KERN_INFO "%s", mlx4_en_version);
		mlx4_en_version_printed++;
	}

	mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
	if (!mdev) {
		dev_err(&dev->pdev->dev, "Device struct alloc failed, "
			"aborting.\n");
		err = -ENOMEM;
		goto err_free_res;
	}

	if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
		goto err_free_dev;

	if (mlx4_uar_alloc(dev, &mdev->priv_uar))
		goto err_pd;

	mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
	if (!mdev->uar_map)
		goto err_uar;
	spin_lock_init(&mdev->uar_lock);

	mdev->dev = dev;
	mdev->dma_device = &(dev->pdev->dev);
	mdev->pdev = dev->pdev;
	mdev->device_up = false;

	mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
	if (!mdev->LSO_support)
		mlx4_warn(mdev, "LSO not supported, please upgrade to later "
				"FW version to enable LSO\n");

	if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
			 MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
			 0, 0, &mdev->mr)) {
		mlx4_err(mdev, "Failed allocating memory region\n");
		goto err_uar;
	}
	if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
		mlx4_err(mdev, "Failed enabling memory region\n");
		goto err_mr;
	}

	
	err = mlx4_en_get_profile(mdev);
	if (err) {
		mlx4_err(mdev, "Bad module parameters, aborting.\n");
		goto err_mr;
	}

	
	mdev->port_cnt = 0;
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
		mdev->port_cnt++;

	
	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
		mlx4_info(mdev, "Using %d tx rings for port:%d\n",
			  mdev->profile.prof[i].tx_ring_num, i);
		mdev->profile.prof[i].rx_ring_num = min_t(int,
			roundup_pow_of_two(dev->caps.num_comp_vectors),
			MAX_RX_RINGS);
		mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
			  mdev->profile.prof[i].rx_ring_num, i);
	}