Exemplo n.º 1
0
Arquivo: cq.c Projeto: Cai900205/test
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
	if (mlx4_is_slave(dev))
		return;
	/* Nothing to do to clean up radix_tree */
	mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}
Exemplo n.º 2
0
void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	phys_addr_t addr;

	INIT_LIST_HEAD(&priv->catas_err.list);
	timer_setup(&priv->catas_err.timer, poll_catas, 0);
	priv->catas_err.map = NULL;

	if (!mlx4_is_slave(dev)) {
		addr = pci_resource_start(dev->persist->pdev,
					  priv->fw.catas_bar) +
					  priv->fw.catas_offset;

		priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
		if (!priv->catas_err.map) {
			mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
				  (unsigned long long)addr);
			return;
		}
	}

	priv->catas_err.timer.expires  =
		round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
	add_timer(&priv->catas_err.timer);
}
Exemplo n.º 3
0
static void poll_catas(struct timer_list *t)
{
	struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
	struct mlx4_dev *dev = &priv->dev;
	u32 slave_read;

	if (mlx4_is_slave(dev)) {
		slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
		if (mlx4_comm_internal_err(slave_read)) {
			mlx4_warn(dev, "Internal error detected on the communication channel\n");
			goto internal_err;
		}
	} else if (readl(priv->catas_err.map)) {
		dump_err_buf(dev);
		goto internal_err;
	}

	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
		mlx4_warn(dev, "Internal error mark was detected on device\n");
		goto internal_err;
	}

	mod_timer(&priv->catas_err.timer,
		  round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
	return;

internal_err:
	if (mlx4_internal_err_reset)
		queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
}
Exemplo n.º 4
0
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
	int offset;

	uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
	if (uar->index == -1)
		return -ENOMEM;

	if (mlx4_is_slave(dev))
		offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
				       dev->caps.uar_page_size);
	else
		offset = uar->index;
	uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
	return 0;
}
Exemplo n.º 5
0
void mlx4_unregister_device(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_interface *intf;

	if (!mlx4_is_slave(dev))
		mlx4_stop_catas_poll(dev);
	mutex_lock(&intf_mutex);

	list_for_each_entry(intf, &intf_list, list)
		mlx4_remove_device(intf, priv);

	list_del(&priv->dev_list);

	mutex_unlock(&intf_mutex);
}
Exemplo n.º 6
0
Arquivo: cq.c Projeto: Cai900205/test
int mlx4_init_cq_table(struct mlx4_dev *dev)
{
	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
	int err;

	spin_lock_init(&cq_table->lock);
	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
	if (mlx4_is_slave(dev))
		return 0;

	err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
			       dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
	if (err)
		return err;

	return 0;
}
Exemplo n.º 7
0
int mlx4_register_device(struct mlx4_dev *dev)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_interface *intf;

	mutex_lock(&intf_mutex);

	list_add_tail(&priv->dev_list, &dev_list);
	list_for_each_entry(intf, &intf_list, list)
		mlx4_add_device(intf, priv);

	mutex_unlock(&intf_mutex);
	if (!mlx4_is_slave(dev))
		mlx4_start_catas_poll(dev);

	return 0;
}
Exemplo n.º 8
0
void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
{
	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
	u64 in_param;
	int err;

	if (mlx4_is_slave(dev)) {
		*((u32 *) &in_param) = srqn;
		*(((u32 *) &in_param) + 1) = 0;
		err = mlx4_cmd(dev, in_param, RES_SRQ, ICM_RESERVE_AND_ALLOC,
						       MLX4_CMD_FREE_RES,
						       MLX4_CMD_TIME_CLASS_A);
		if (err)
			mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
	} else {
		mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
		mlx4_table_put(dev, &srq_table->table, srqn);
		mlx4_bitmap_free(&srq_table->bitmap, srqn);
	}
}
Exemplo n.º 9
0
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
{
	int err;
	struct mlx4_dev *dev;

	if (!mlx4_internal_err_reset)
		return;

	mutex_lock(&persist->device_state_mutex);
	if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
		goto out;

	dev = persist->dev;
	mlx4_err(dev, "device is going to be reset\n");
	if (mlx4_is_slave(dev)) {
		err = mlx4_reset_slave(dev);
	} else {
		mlx4_crdump_collect(dev);
		err = mlx4_reset_master(dev);
	}

	if (!err) {
		mlx4_err(dev, "device was reset successfully\n");
	} else {
		/* EEH could have disabled the PCI channel during reset. That's
		 * recoverable and the PCI error flow will handle it.
		 */
		if (!pci_channel_offline(dev->persist->pdev))
			BUG_ON(1);
	}
	dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
	mutex_unlock(&persist->device_state_mutex);

	/* At that step HW was already reset, now notify clients */
	mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
	mlx4_cmd_wake_completions(dev);
	return;

out:
	mutex_unlock(&persist->device_state_mutex);
}
Exemplo n.º 10
0
int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_cq_table *cq_table = &priv->cq_table;
	u64 out_param;
	int err;

	if (mlx4_is_slave(dev)) {
		err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
						       ICM_RESERVE_AND_ALLOC,
						       MLX4_CMD_ALLOC_RES,
						       MLX4_CMD_TIME_CLASS_A);
		if (err) {
			*cqn = -1;
			return err;
		} else {
			*cqn = out_param;
			return 0;
		}
	}

	*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
	if (*cqn == -1)
		return -ENOMEM;

	err = mlx4_table_get(dev, &cq_table->table, *cqn);
	if (err)
		goto err_out;

	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
	if (err)
		goto err_put;
	return 0;

err_put:
	mlx4_table_put(dev, &cq_table->table, *cqn);

err_out:
	mlx4_bitmap_free(&cq_table->bitmap, *cqn);
	return err;
}
Exemplo n.º 11
0
/*
 void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
 {
 mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap);
 }
 */
#define PAGE_SHIFT	12		/* LOG2(PAGE_SIZE) */
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) {
	struct mlx4_priv *priv = mlx4_priv(dev);
	int offset;

	uar->index = mlx4_bitmap_alloc(&priv->uar_table.bitmap);
	if (uar->index == -1)
		return -ENOMEM;

	if (mlx4_is_slave(&priv->dev))
		offset = uar->index
				% ((int) priv->dev.bar_info[1].bytes
						/ priv->dev.caps.uar_page_size);
	else
		offset = uar->index;

	/*MLX4_DEBUG("%p, %d\n", priv->dev.bar_info[1].vaddr, offset);*/

	uar->pfn = ((volatile uint64_t) priv->dev.bar_info[1].vaddr >> PAGE_SHIFT)
			+ offset;
	uar->map = NULL;
	return 0;
}
Exemplo n.º 12
0
static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	int i = 0;

	err = mlx4_test_async(mdev->dev);
	/* When not in MSI_X or slave, test only async */
	if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
		return err;

	/* A loop over all completion vectors of current port,
	 * for each vector check whether it works by mapping command
	 * completions to that vector and performing a NOP command
	 */
	for (i = 0; i < priv->rx_ring_num; i++) {
		err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
		if (err)
			break;
	}

	return err;
}
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
{
	if (mlx4_is_slave(dev))
		return;
	mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}
Exemplo n.º 14
0
/*
 void mlx4_free_eq_table(struct mlx4_priv *priv)
 {
 kfree(mlx4_priv(&priv->dev)->eq_table.eq);
 }
 */
int mlx4_init_eq_table(struct mlx4_priv *priv) {
	int err;
	int i;

	priv->eq_table.uar_map = calloc(mlx4_num_eq_uar(&priv->dev),
			sizeof *priv->eq_table.uar_map);
	if (!priv->eq_table.uar_map) {
		err = -ENOMEM;
		goto err_out_free;
	}

	err = mlx4_bitmap_init(&priv->eq_table.bitmap, priv->dev.caps.num_eqs,
			priv->dev.caps.num_eqs - 1, priv->dev.caps.reserved_eqs, 0);
	if (err)
		goto err_out_free;

	for (i = 0; i < mlx4_num_eq_uar(&priv->dev); ++i)
		priv->eq_table.uar_map[i] = NULL;

	if (!mlx4_is_slave(&priv->dev)) {
		err = mlx4_map_clr_int(priv);
		if (err)
			goto err_out_bitmap;

		priv->eq_table.clr_mask = swab32(1 << (priv->eq_table.inta_pin & 31));
		priv->eq_table.clr_int = priv->clr_base
				+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
	}

	priv->eq_table.irq_names = malloc(
			MLX4_IRQNAME_SIZE
					* (priv->dev.caps.num_comp_vectors + 1
							+ priv->dev.caps.comp_pool));
	if (!priv->eq_table.irq_names) {
		err = -ENOMEM;
		goto err_out_clr_int;
	}

	for (i = 0; i < priv->dev.caps.num_comp_vectors; ++i) {
		err = mlx4_create_eq(priv,
				priv->dev.caps.num_cqs - priv->dev.caps.reserved_cqs
						+ MLX4_NUM_SPARE_EQE,
				(priv->dev.flags & MLX4_FLAG_MSI_X) ? i : 0,
				&priv->eq_table.eq[i]);
		if (err) {
			--i;
			goto err_out_unmap;
		}
	}

	err = mlx4_create_eq(priv, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
			(priv->dev.flags & MLX4_FLAG_MSI_X) ?
					priv->dev.caps.num_comp_vectors : 0,
			&priv->eq_table.eq[priv->dev.caps.num_comp_vectors]);
	if (err)
		goto err_out_comp;

	/*if additional completion vectors poolsize is 0 this loop will not run*/
	for (i = priv->dev.caps.num_comp_vectors + 1;
			i < priv->dev.caps.num_comp_vectors + priv->dev.caps.comp_pool + 1;
			++i) {

		err = mlx4_create_eq(priv,
				priv->dev.caps.num_cqs - priv->dev.caps.reserved_cqs
						+ MLX4_NUM_SPARE_EQE,
				(priv->dev.flags & MLX4_FLAG_MSI_X) ? i : 0,
				&priv->eq_table.eq[i]);
		if (err) {
			--i;
			goto err_out_unmap;
		}
	}

	if (priv->dev.flags & MLX4_FLAG_MSI_X) {
		assert(!"not implemented!");
		/*const char *eq_name;

		 for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i) {
		 if (i < priv->dev.caps.num_comp_vectors) {
		 snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE,
		 MLX4_IRQNAME_SIZE, "mlx4-comp-%d@pci:", i
		 pci_name(priv->dev.pdev));
		 } else {
		 snprintf(priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE,
		 MLX4_IRQNAME_SIZE, "mlx4-async@pci:"
		 pci_name(priv->dev.pdev));
		 }

		 eq_name = priv->eq_table.irq_names + i * MLX4_IRQNAME_SIZE;
		 err = request_irq(priv->eq_table.eq[i].irq, mlx4_msi_x_interrupt, 0,
		 eq_name, priv->eq_table.eq + i);
		 if (err)
		 goto err_out_async;

		 priv->eq_table.eq[i].have_irq = 1;
		 }*/
	} else {
		snprintf(priv->eq_table.irq_names, MLX4_IRQNAME_SIZE,
		DRV_NAME "@pci:"/*, pci_name(priv->dev.pdev)*/);
		/*err = request_irq(priv->dev.pdev->irq, mlx4_interrupt, IRQF_SHARED,
		 priv->eq_table.irq_names, dev);
		 if (err)
		 goto err_out_async;*/

		priv->eq_table.have_irq = 1;
	}

	err = mlx4_MAP_EQ(priv, get_async_ev_mask(priv), 0,
			priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn);
	if (err)
		MLX4_DEBUG("MAP_EQ for async EQ %d failed (%d)\n",
				priv->eq_table.eq[priv->dev.caps.num_comp_vectors].eqn, err);

	for (i = 0; i < priv->dev.caps.num_comp_vectors + 1; ++i)
		eq_set_ci(&priv->eq_table.eq[i], 1);

	return 0;

	/*TODO*/
	/*err_out_async:*//*mlx4_free_eq(&priv->dev,
	 &priv->eq_table.eq[priv->dev.caps.num_comp_vectors]);*/

	err_out_comp: i = priv->dev.caps.num_comp_vectors - 1;

	err_out_unmap: /*while (i >= 0) {
	 mlx4_free_eq(&priv->dev, &priv->eq_table.eq[i]);
	 --i;
	 }
	 mlx4_free_irqs(&priv->dev);*/

	err_out_clr_int: /*if (!mlx4_is_slave(&priv->dev))
	 mlx4_unmap_clr_int(&priv->dev);*/

	err_out_bitmap: /*mlx4_unmap_uar(&priv->dev);*/
	mlx4_bitmap_cleanup(&priv->eq_table.bitmap);

	err_out_free: free(priv->eq_table.uar_map);

	return err;
}