static int mthca_tune_pci(struct mthca_dev *mdev)
{
	if (!tune_pci)
		return 0;

	/* First try to max out Read Byte Count */
	if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
		if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
			mthca_err(mdev, "Couldn't set PCI-X max read count, "
				"aborting.\n");
			return -ENODEV;
		}
	} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
		mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");

	if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
		if (pcie_set_readrq(mdev->pdev, 4096)) {
			mthca_err(mdev, "Couldn't write PCI Express read request, "
				"aborting.\n");
			return -ENODEV;
		}
	} else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
		mthca_info(mdev, "No PCI Express capability, "
			   "not setting Max Read Request Size.\n");

	return 0;
}
static void handle_catas(struct mthca_dev *dev)
{
	struct ib_event event;
	const char *type;
	int i;

	event.device = &dev->ib_dev;
	event.event  = IB_EVENT_DEVICE_FATAL;
	event.element.port_num = 0;

	ib_dispatch_event(&event);

	switch (swab32(readl(dev->catas_err.map)) >> 24) {
	case MTHCA_CATAS_TYPE_INTERNAL:
		type = "internal error";
		break;
	case MTHCA_CATAS_TYPE_UPLINK:
		type = "uplink bus error";
		break;
	case MTHCA_CATAS_TYPE_DDR:
		type = "DDR data error";
		break;
	case MTHCA_CATAS_TYPE_PARITY:
		type = "internal parity error";
		break;
	default:
		type = "unknown error";
		break;
	}

	mthca_err(dev, "Catastrophic error detected: %s\n", type);
	for (i = 0; i < dev->catas_err.size; ++i)
		mthca_err(dev, "  buf[%02x]: %08x\n",
			  i, swab32(readl(dev->catas_err.map + i)));
}
static int mthca_load_fw(struct mthca_dev *mdev)
{
	u8 status;
	int err;

	/* FIXME: use HCA-attached memory for FW if present */

	mdev->fw.arbel.fw_icm =
		mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
				GFP_HIGHUSER | __GFP_NOWARN, 0);
	if (!mdev->fw.arbel.fw_icm) {
		mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
		return -ENOMEM;
	}

	err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
	if (err) {
		mthca_err(mdev, "MAP_FA command failed, aborting.\n");
		goto err_free;
	}
	if (status) {
		mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
		err = -EINVAL;
		goto err_free;
	}
	err = mthca_RUN_FW(mdev, &status);
	if (err) {
		mthca_err(mdev, "RUN_FW command failed, aborting.\n");
		goto err_unmap_fa;
	}
	if (status) {
		mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
		err = -EINVAL;
		goto err_unmap_fa;
	}

	return 0;

err_unmap_fa:
	mthca_UNMAP_FA(mdev, &status);

err_free:
	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
	return err;
}
static int mthca_setup_hca(struct mthca_dev *dev)
{
	int err;
	u8 status;

	MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);

	err = mthca_init_uar_table(dev);
	if (err) {
		mthca_err(dev, "Failed to initialize "
			  "user access region table, aborting.\n");
		return err;
	}

	err = mthca_uar_alloc(dev, &dev->driver_uar);
	if (err) {
		mthca_err(dev, "Failed to allocate driver access region, "
			  "aborting.\n");
		goto err_uar_table_free;
	}

<<<<<<< HEAD
static int mthca_init_hca(struct mthca_dev *mdev)
{
	u8 status;
	int err;
	struct mthca_adapter adapter;

	if (mthca_is_memfree(mdev))
		err = mthca_init_arbel(mdev);
	else
		err = mthca_init_tavor(mdev);

	if (err)
		return err;

	err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
	if (err) {
		mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
		goto err_close;
	}
	if (status) {
		mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
			  "aborting.\n", status);
		err = -EINVAL;
		goto err_close;
	}

	mdev->eq_table.inta_pin = adapter.inta_pin;
	if (!mthca_is_memfree(mdev))
		mdev->rev_id = adapter.revision_id;
	memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);

	return 0;

err_close:
	mthca_close_hca(mdev);
	return err;
}
Exemplo n.º 6
0
int mthca_create_agents(struct mthca_dev *dev)
{
	struct ib_mad_agent *agent;
	int p, q;
	int ret;

	spin_lock_init(&dev->sm_lock);

	for (p = 0; p < dev->limits.num_ports; ++p)
		for (q = 0; q <= 1; ++q) {
			agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
						      q ? IB_QPT_GSI : IB_QPT_SMI,
						      NULL, 0, send_handler,
						      NULL, NULL);
			if (IS_ERR(agent)) {
				ret = PTR_ERR(agent);
				goto err;
			}
			dev->send_agent[p][q] = agent;
		}


	for (p = 1; p <= dev->limits.num_ports; ++p) {
		ret = mthca_update_rate(dev, p);
		if (ret) {
			mthca_err(dev, "Failed to obtain port %d rate."
				  " aborting.\n", p);
			goto err;
		}
	}

	return 0;

err:
	for (p = 0; p < dev->limits.num_ports; ++p)
		for (q = 0; q <= 1; ++q)
			if (dev->send_agent[p][q])
				ib_unregister_mad_agent(dev->send_agent[p][q]);

	return ret;
}
Exemplo n.º 7
0
int mthca_reset(struct mthca_dev *mdev)
{
	int i;
	int err = 0;
	u32 *hca_header    = NULL;
	u32 *bridge_header = NULL;
	struct pci_dev *bridge = NULL;
	int bridge_pcix_cap = 0;
	int hca_pcie_cap = 0;
	int hca_pcix_cap = 0;

	u16 devctl;
	u16 linkctl;

#define MTHCA_RESET_OFFSET 0xf0010
#define MTHCA_RESET_VALUE  swab32(1)

	/*
	 * Reset the chip.  This is somewhat ugly because we have to
	 * save off the PCI header before reset and then restore it
	 * after the chip reboots.  We skip config space offsets 22
	 * and 23 since those have a special meaning.
	 *
	 * To make matters worse, for Tavor (PCI-X HCA) we have to
	 * find the associated bridge device and save off its PCI
	 * header as well.
	 */

	if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) {
		/* Look for the bridge -- its device ID will be 2 more
		   than HCA's device ID. */
		while ((bridge = pci_get_device(mdev->pdev->vendor,
						mdev->pdev->device + 2,
						bridge)) != NULL) {
			if (bridge->hdr_type    == PCI_HEADER_TYPE_BRIDGE &&
			    bridge->subordinate == mdev->pdev->bus) {
				mthca_dbg(mdev, "Found bridge: %s\n",
					  pci_name(bridge));
				break;
			}
		}

		if (!bridge) {
			/*
			 * Didn't find a bridge for a Tavor device --
			 * assume we're in no-bridge mode and hope for
			 * the best.
			 */
			mthca_warn(mdev, "No bridge found for %s\n",
				  pci_name(mdev->pdev));
		}

	}

	/* For Arbel do we need to save off the full 4K PCI Express header?? */
	hca_header = kmalloc(256, GFP_KERNEL);
	if (!hca_header) {
		err = -ENOMEM;
		mthca_err(mdev, "Couldn't allocate memory to save HCA "
			  "PCI header, aborting.\n");
		goto out;
	}

	for (i = 0; i < 64; ++i) {
		if (i == 22 || i == 23)
			continue;
		if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't save HCA "
				  "PCI header, aborting.\n");
			goto out;
		}
	}

	hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
<<<<<<< HEAD
Exemplo n.º 8
0
int mthca_reset(struct mthca_dev *mdev)
{
	int i;
	int err = 0;
	u32 *hca_header    = NULL;
	u32 *bridge_header = NULL;
	struct pci_dev *bridge = NULL;
	int bridge_pcix_cap = 0;
	int hca_pcie_cap = 0;
	int hca_pcix_cap = 0;

	u16 devctl;
	u16 linkctl;

#define MTHCA_RESET_OFFSET 0xf0010
#define MTHCA_RESET_VALUE  swab32(1)

	/*
	 * Reset the chip.  This is somewhat ugly because we have to
	 * save off the PCI header before reset and then restore it
	 * after the chip reboots.  We skip config space offsets 22
	 * and 23 since those have a special meaning.
	 *
	 * To make matters worse, for Tavor (PCI-X HCA) we have to
	 * find the associated bridge device and save off its PCI
	 * header as well.
	 */

	if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) {
		/* Look for the bridge -- its device ID will be 2 more
		   than HCA's device ID. */
		while ((bridge = pci_get_device(mdev->pdev->vendor,
						mdev->pdev->device + 2,
						bridge)) != NULL) {
			if (bridge->hdr_type    == PCI_HEADER_TYPE_BRIDGE &&
			    bridge->subordinate == mdev->pdev->bus) {
				mthca_dbg(mdev, "Found bridge: %s\n",
					  pci_name(bridge));
				break;
			}
		}

		if (!bridge) {
			/*
			 * Didn't find a bridge for a Tavor device --
			 * assume we're in no-bridge mode and hope for
			 * the best.
			 */
			mthca_warn(mdev, "No bridge found for %s\n",
				  pci_name(mdev->pdev));
		}

	}

	/* For Arbel do we need to save off the full 4K PCI Express header?? */
	hca_header = kmalloc(256, GFP_KERNEL);
	if (!hca_header) {
		err = -ENOMEM;
		mthca_err(mdev, "Couldn't allocate memory to save HCA "
			  "PCI header, aborting.\n");
		goto out;
	}

	for (i = 0; i < 64; ++i) {
		if (i == 22 || i == 23)
			continue;
		if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't save HCA "
				  "PCI header, aborting.\n");
			goto out;
		}
	}

	hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
	hca_pcie_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);

	if (bridge) {
		bridge_header = kmalloc(256, GFP_KERNEL);
		if (!bridge_header) {
			err = -ENOMEM;
			mthca_err(mdev, "Couldn't allocate memory to save HCA "
				  "bridge PCI header, aborting.\n");
			goto out;
		}

		for (i = 0; i < 64; ++i) {
			if (i == 22 || i == 23)
				continue;
			if (pci_read_config_dword(bridge, i * 4, bridge_header + i)) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't save HCA bridge "
					  "PCI header, aborting.\n");
				goto out;
			}
		}
		bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
		if (!bridge_pcix_cap) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't locate HCA bridge "
					  "PCI-X capability, aborting.\n");
				goto out;
		}
	}

	/* actually hit reset */
	{
		void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) +
					      MTHCA_RESET_OFFSET, 4);

		if (!reset) {
			err = -ENOMEM;
			mthca_err(mdev, "Couldn't map HCA reset register, "
				  "aborting.\n");
			goto out;
		}

		writel(MTHCA_RESET_VALUE, reset);
		iounmap(reset);
	}

	/* Docs say to wait one second before accessing device */
	msleep(1000);

	/* Now wait for PCI device to start responding again */
	{
		u32 v;
		int c = 0;

		for (c = 0; c < 100; ++c) {
			if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't access HCA after reset, "
					  "aborting.\n");
				goto out;
			}

			if (v != 0xffffffff)
				goto good;

			msleep(100);
		}

		err = -ENODEV;
		mthca_err(mdev, "PCI device did not come back after reset, "
			  "aborting.\n");
		goto out;
	}

good:
	/* Now restore the PCI headers */
	if (bridge) {
		if (pci_write_config_dword(bridge, bridge_pcix_cap + 0x8,
				 bridge_header[(bridge_pcix_cap + 0x8) / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
				  "split transaction control, aborting.\n");
			goto out;
		}
		if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc,
				 bridge_header[(bridge_pcix_cap + 0xc) / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
				  "split transaction control, aborting.\n");
			goto out;
		}
		/*
		 * Bridge control register is at 0x3e, so we'll
		 * naturally restore it last in this loop.
		 */
		for (i = 0; i < 16; ++i) {
			if (i * 4 == PCI_COMMAND)
				continue;

			if (pci_write_config_dword(bridge, i * 4, bridge_header[i])) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
					  "aborting.\n", i);
				goto out;
			}
		}

		if (pci_write_config_dword(bridge, PCI_COMMAND,
					   bridge_header[PCI_COMMAND / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
				  "aborting.\n");
			goto out;
		}
	}

	if (hca_pcix_cap) {
		if (pci_write_config_dword(mdev->pdev, hca_pcix_cap,
				 hca_header[hca_pcix_cap / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI-X "
				  "command register, aborting.\n");
			goto out;
		}
	}

	if (hca_pcie_cap) {
		devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4];
		if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL,
					   devctl)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI Express "
				  "Device Control register, aborting.\n");
			goto out;
		}
		linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
		if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL,
					   linkctl)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI Express "
				  "Link control register, aborting.\n");
			goto out;
		}
	}

	for (i = 0; i < 16; ++i) {
		if (i * 4 == PCI_COMMAND)
			continue;

		if (pci_write_config_dword(mdev->pdev, i * 4, hca_header[i])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA reg %x, "
				  "aborting.\n", i);
			goto out;
		}
	}

	if (pci_write_config_dword(mdev->pdev, PCI_COMMAND,
				   hca_header[PCI_COMMAND / 4])) {
		err = -ENODEV;
		mthca_err(mdev, "Couldn't restore HCA COMMAND, "
			  "aborting.\n");
		goto out;
	}

out:
	if (bridge)
		pci_dev_put(bridge);
	kfree(bridge_header);
	kfree(hca_header);

	return err;
}
Exemplo n.º 9
0
int mthca_process_mad(struct ib_device *ibdev,
		      int mad_flags,
		      u8 port_num,
		      struct ib_wc *in_wc,
		      struct ib_grh *in_grh,
		      struct ib_mad *in_mad,
		      struct ib_mad *out_mad)
{
	int err;
	u8 status;
	u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);

	/* Forward locally generated traps to the SM */
	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
	    slid == 0) {
		forward_trap(to_mdev(ibdev), port_num, in_mad);
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
	}

	/*
	 * Only handle SM gets, sets and trap represses for SM class
	 *
	 * Only handle PMA and Mellanox vendor-specific class gets and
	 * sets for other classes.
	 */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
			return IB_MAD_RESULT_SUCCESS;

		/*
		 * Don't process SMInfo queries or vendor-specific
		 * MADs -- the SMA can't handle them.
		 */
		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
		    ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
		     IB_SMP_ATTR_VENDOR_MASK))
			return IB_MAD_RESULT_SUCCESS;
	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
		   in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1     ||
		   in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
			return IB_MAD_RESULT_SUCCESS;
	} else
		return IB_MAD_RESULT_SUCCESS;

	err = mthca_MAD_IFC(to_mdev(ibdev),
			    mad_flags & IB_MAD_IGNORE_MKEY,
			    mad_flags & IB_MAD_IGNORE_BKEY,
			    port_num, in_wc, in_grh, in_mad, out_mad,
			    &status);
	if (err) {
		mthca_err(to_mdev(ibdev), "MAD_IFC failed\n");
		return IB_MAD_RESULT_FAILURE;
	}
	if (status == MTHCA_CMD_STAT_BAD_PKT)
		return IB_MAD_RESULT_SUCCESS;
	if (status) {
		mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n",
			  status);
		return IB_MAD_RESULT_FAILURE;
	}

	if (!out_mad->mad_hdr.status) {
		smp_snoop(ibdev, port_num, in_mad);
		node_desc_override(ibdev, out_mad);
	}

	/* set return bit in status of directed route responses */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);

	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
		/* no response for trap repress */
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;

	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
Exemplo n.º 10
0
static int mthca_init_arbel(struct mthca_dev *mdev)
{
	struct mthca_dev_lim        dev_lim;
	struct mthca_profile        profile;
	struct mthca_init_hca_param init_hca;
	s64 icm_size;
	u8 status;
	int err;

	err = mthca_QUERY_FW(mdev, &status);
	if (err) {
		mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
		return err;
	}
	if (status) {
		mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
			  "aborting.\n", status);
		return -EINVAL;
	}

	err = mthca_ENABLE_LAM(mdev, &status);
	if (err) {
		mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
		return err;
	}
	if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
		mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
		mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
	} else if (status) {
		mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
			  "aborting.\n", status);
		return -EINVAL;
	}

	err = mthca_load_fw(mdev);
	if (err) {
		mthca_err(mdev, "Failed to start FW, aborting.\n");
		goto err_disable;
	}

	err = mthca_dev_lim(mdev, &dev_lim);
	if (err) {
		mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
		goto err_stop_fw;
	}

	profile = hca_profile;
	profile.num_uar  = dev_lim.uar_size / PAGE_SIZE;
	profile.num_udav = 0;
	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
		profile.num_srq = dev_lim.max_srqs;

	icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
	if (icm_size < 0) {
		err = icm_size;
		goto err_stop_fw;
	}

	err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
	if (err)
		goto err_stop_fw;

	err = mthca_INIT_HCA(mdev, &init_hca, &status);
	if (err) {
		mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
		goto err_free_icm;
	}
	if (status) {
		mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
			  "aborting.\n", status);
		err = -EINVAL;
		goto err_free_icm;
	}

	return 0;

err_free_icm:
	mthca_free_icms(mdev);

err_stop_fw:
	mthca_UNMAP_FA(mdev, &status);
	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);

err_disable:
	if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
		mthca_DISABLE_LAM(mdev, &status);

	return err;
}
Exemplo n.º 11
0
static int mthca_init_icm(struct mthca_dev *mdev,
			  struct mthca_dev_lim *dev_lim,
			  struct mthca_init_hca_param *init_hca,
			  u64 icm_size)
{
	u64 aux_pages;
	u8 status;
	int err;

	err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
	if (err) {
		mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
		return err;
	}
	if (status) {
		mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
			  "aborting.\n", status);
		return -EINVAL;
	}

	mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
		  (unsigned long long) icm_size >> 10,
		  (unsigned long long) aux_pages << 2);

	mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
						 GFP_HIGHUSER | __GFP_NOWARN, 0);
	if (!mdev->fw.arbel.aux_icm) {
		mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
		return -ENOMEM;
	}

	err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
	if (err) {
		mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
		goto err_free_aux;
	}
	if (status) {
		mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
		err = -EINVAL;
		goto err_free_aux;
	}

	err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
	if (err) {
		mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
		goto err_unmap_aux;
	}

	/* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
	mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
					   dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;

	mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
							 mdev->limits.mtt_seg_size,
							 mdev->limits.num_mtt_segs,
							 mdev->limits.reserved_mtts,
							 1, 0);
	if (!mdev->mr_table.mtt_table) {
		mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
		err = -ENOMEM;
		goto err_unmap_eq;
	}

	mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
							 dev_lim->mpt_entry_sz,
							 mdev->limits.num_mpts,
							 mdev->limits.reserved_mrws,
							 1, 1);
	if (!mdev->mr_table.mpt_table) {
		mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
		err = -ENOMEM;
		goto err_unmap_mtt;
	}

	mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
							dev_lim->qpc_entry_sz,
							mdev->limits.num_qps,
							mdev->limits.reserved_qps,
							0, 0);
	if (!mdev->qp_table.qp_table) {
		mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
		err = -ENOMEM;
		goto err_unmap_mpt;
	}

	mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
							 dev_lim->eqpc_entry_sz,
							 mdev->limits.num_qps,
							 mdev->limits.reserved_qps,
							 0, 0);
	if (!mdev->qp_table.eqp_table) {
		mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
		err = -ENOMEM;
		goto err_unmap_qp;
	}

	mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
							 MTHCA_RDB_ENTRY_SIZE,
							 mdev->limits.num_qps <<
							 mdev->qp_table.rdb_shift, 0,
							 0, 0);
	if (!mdev->qp_table.rdb_table) {
		mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
		err = -ENOMEM;
		goto err_unmap_eqp;
	}

       mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
						    dev_lim->cqc_entry_sz,
						    mdev->limits.num_cqs,
						    mdev->limits.reserved_cqs,
						    0, 0);
	if (!mdev->cq_table.table) {
		mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
		err = -ENOMEM;
		goto err_unmap_rdb;
	}

	if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
		mdev->srq_table.table =
			mthca_alloc_icm_table(mdev, init_hca->srqc_base,
					      dev_lim->srq_entry_sz,
					      mdev->limits.num_srqs,
					      mdev->limits.reserved_srqs,
					      0, 0);
		if (!mdev->srq_table.table) {
			mthca_err(mdev, "Failed to map SRQ context memory, "
				  "aborting.\n");
			err = -ENOMEM;
			goto err_unmap_cq;
		}
	}

	/*
	 * It's not strictly required, but for simplicity just map the
	 * whole multicast group table now.  The table isn't very big
	 * and it's a lot easier than trying to track ref counts.
	 */
	mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
						      MTHCA_MGM_ENTRY_SIZE,
						      mdev->limits.num_mgms +
						      mdev->limits.num_amgms,
						      mdev->limits.num_mgms +
						      mdev->limits.num_amgms,
						      0, 0);
	if (!mdev->mcg_table.table) {
		mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
		err = -ENOMEM;
		goto err_unmap_srq;
	}

	return 0;

err_unmap_srq:
	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
		mthca_free_icm_table(mdev, mdev->srq_table.table);

err_unmap_cq:
	mthca_free_icm_table(mdev, mdev->cq_table.table);

err_unmap_rdb:
	mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);

err_unmap_eqp:
	mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);

err_unmap_qp:
	mthca_free_icm_table(mdev, mdev->qp_table.qp_table);

err_unmap_mpt:
	mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);

err_unmap_mtt:
	mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);

err_unmap_eq:
	mthca_unmap_eq_icm(mdev);

err_unmap_aux:
	mthca_UNMAP_ICM_AUX(mdev, &status);

err_free_aux:
	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);

	return err;
}
Exemplo n.º 12
0
static int mthca_init_tavor(struct mthca_dev *mdev)
{
	s64 size;
	u8 status;
	int err;
	struct mthca_dev_lim        dev_lim;
	struct mthca_profile        profile;
	struct mthca_init_hca_param init_hca;

	err = mthca_SYS_EN(mdev, &status);
	if (err) {
		mthca_err(mdev, "SYS_EN command failed, aborting.\n");
		return err;
	}
	if (status) {
		mthca_err(mdev, "SYS_EN returned status 0x%02x, "
			  "aborting.\n", status);
		return -EINVAL;
	}

	err = mthca_QUERY_FW(mdev, &status);
	if (err) {
		mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
		goto err_disable;
	}
	if (status) {
		mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
			  "aborting.\n", status);
		err = -EINVAL;
		goto err_disable;
	}
	err = mthca_QUERY_DDR(mdev, &status);
	if (err) {
		mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
		goto err_disable;
	}
	if (status) {
		mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
			  "aborting.\n", status);
		err = -EINVAL;
		goto err_disable;
	}

	err = mthca_dev_lim(mdev, &dev_lim);
	if (err) {
		mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
		goto err_disable;
	}

	profile = hca_profile;
	profile.num_uar   = dev_lim.uar_size / PAGE_SIZE;
	profile.uarc_size = 0;
	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
		profile.num_srq = dev_lim.max_srqs;

	size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
	if (size < 0) {
		err = size;
		goto err_disable;
	}

	err = mthca_INIT_HCA(mdev, &init_hca, &status);
	if (err) {
		mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
		goto err_disable;
	}
	if (status) {
		mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
			  "aborting.\n", status);
		err = -EINVAL;
		goto err_disable;
	}

	return 0;

err_disable:
	mthca_SYS_DIS(mdev, &status);

	return err;
}
Exemplo n.º 13
0
static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
{
	int err;
	u8 status;

	mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
	err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
	if (err) {
		mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
		return err;
	}
	if (status) {
		mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
			  "aborting.\n", status);
		return -EINVAL;
	}
	if (dev_lim->min_page_sz > PAGE_SIZE) {
		mthca_err(mdev, "HCA minimum page size of %d bigger than "
			  "kernel PAGE_SIZE of %ld, aborting.\n",
			  dev_lim->min_page_sz, PAGE_SIZE);
		return -ENODEV;
	}
	if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
		mthca_err(mdev, "HCA has %d ports, but we only support %d, "
			  "aborting.\n",
			  dev_lim->num_ports, MTHCA_MAX_PORTS);
		return -ENODEV;
	}

	if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
		mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
			  "PCI resource 2 size of 0x%llx, aborting.\n",
			  dev_lim->uar_size,
			  (unsigned long long)pci_resource_len(mdev->pdev, 2));
		return -ENODEV;
	}

	mdev->limits.num_ports      	= dev_lim->num_ports;
	mdev->limits.vl_cap             = dev_lim->max_vl;
	mdev->limits.mtu_cap            = dev_lim->max_mtu;
	mdev->limits.gid_table_len  	= dev_lim->max_gids;
	mdev->limits.pkey_table_len 	= dev_lim->max_pkeys;
	mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
	/*
	 * Need to allow for worst case send WQE overhead and check
	 * whether max_desc_sz imposes a lower limit than max_sg; UD
	 * send has the biggest overhead.
	 */
	mdev->limits.max_sg		= min_t(int, dev_lim->max_sg,
					      (dev_lim->max_desc_sz -
					       sizeof (struct mthca_next_seg) -
					       (mthca_is_memfree(mdev) ?
						sizeof (struct mthca_arbel_ud_seg) :
						sizeof (struct mthca_tavor_ud_seg))) /
						sizeof (struct mthca_data_seg));
	mdev->limits.max_wqes           = dev_lim->max_qp_sz;
	mdev->limits.max_qp_init_rdma   = dev_lim->max_requester_per_qp;
	mdev->limits.reserved_qps       = dev_lim->reserved_qps;
	mdev->limits.max_srq_wqes       = dev_lim->max_srq_sz;
	mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
	mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
	mdev->limits.max_desc_sz        = dev_lim->max_desc_sz;
	mdev->limits.max_srq_sge	= mthca_max_srq_sge(mdev);
	/*
	 * Subtract 1 from the limit because we need to allocate a
	 * spare CQE so the HCA HW can tell the difference between an
	 * empty CQ and a full CQ.
	 */
	mdev->limits.max_cqes           = dev_lim->max_cq_sz - 1;
	mdev->limits.reserved_cqs       = dev_lim->reserved_cqs;
	mdev->limits.reserved_eqs       = dev_lim->reserved_eqs;
	mdev->limits.reserved_mtts      = dev_lim->reserved_mtts;
	mdev->limits.reserved_mrws      = dev_lim->reserved_mrws;
	mdev->limits.reserved_uars      = dev_lim->reserved_uars;
	mdev->limits.reserved_pds       = dev_lim->reserved_pds;
	mdev->limits.port_width_cap     = dev_lim->max_port_width;
	mdev->limits.page_size_cap      = ~(u32) (dev_lim->min_page_sz - 1);
	mdev->limits.flags              = dev_lim->flags;
	/*
	 * For old FW that doesn't return static rate support, use a
	 * value of 0x3 (only static rate values of 0 or 1 are handled),
	 * except on Sinai, where even old FW can handle static rate
	 * values of 2 and 3.
	 */
	if (dev_lim->stat_rate_support)
		mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
	else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
		mdev->limits.stat_rate_support = 0xf;
	else
		mdev->limits.stat_rate_support = 0x3;

	/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
	   May be doable since hardware supports it for SRQ.

	   IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.

	   IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
	   supported by driver. */
	mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
		IB_DEVICE_PORT_ACTIVE_EVENT |
		IB_DEVICE_SYS_IMAGE_GUID |
		IB_DEVICE_RC_RNR_NAK_GEN;

	if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
		mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;

	if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
		mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;

	if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
		mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;

	if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
		mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;

	if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
		mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;

	if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
		mdev->mthca_flags |= MTHCA_FLAG_SRQ;

	if (mthca_is_memfree(mdev))
		if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
			mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;

	return 0;
}
int mthca_reset(struct mthca_dev *mdev)
{
	int i;
	int err = 0;
	u32 *hca_header    = NULL;
	u32 *bridge_header = NULL;
	struct pci_dev *bridge = NULL;
	int bridge_pcix_cap = 0;
	int hca_pcie_cap = 0;
	int hca_pcix_cap = 0;

	u16 devctl;
	u16 linkctl;

#define MTHCA_RESET_OFFSET 0xf0010
#define MTHCA_RESET_VALUE  swab32(1)


	if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) {
		while ((bridge = pci_get_device(mdev->pdev->vendor,
						mdev->pdev->device + 2,
						bridge)) != NULL) {
			if (bridge->hdr_type    == PCI_HEADER_TYPE_BRIDGE &&
			    bridge->subordinate == mdev->pdev->bus) {
				mthca_dbg(mdev, "Found bridge: %s\n",
					  pci_name(bridge));
				break;
			}
		}

		if (!bridge) {
			mthca_warn(mdev, "No bridge found for %s\n",
				  pci_name(mdev->pdev));
		}

	}

	
	hca_header = kmalloc(256, GFP_KERNEL);
	if (!hca_header) {
		err = -ENOMEM;
		mthca_err(mdev, "Couldn't allocate memory to save HCA "
			  "PCI header, aborting.\n");
		goto out;
	}

	for (i = 0; i < 64; ++i) {
		if (i == 22 || i == 23)
			continue;
		if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't save HCA "
				  "PCI header, aborting.\n");
			goto out;
		}
	}

	hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
	hca_pcie_cap = pci_pcie_cap(mdev->pdev);

	if (bridge) {
		bridge_header = kmalloc(256, GFP_KERNEL);
		if (!bridge_header) {
			err = -ENOMEM;
			mthca_err(mdev, "Couldn't allocate memory to save HCA "
				  "bridge PCI header, aborting.\n");
			goto out;
		}

		for (i = 0; i < 64; ++i) {
			if (i == 22 || i == 23)
				continue;
			if (pci_read_config_dword(bridge, i * 4, bridge_header + i)) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't save HCA bridge "
					  "PCI header, aborting.\n");
				goto out;
			}
		}
		bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
		if (!bridge_pcix_cap) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't locate HCA bridge "
					  "PCI-X capability, aborting.\n");
				goto out;
		}
	}

	
	{
		void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) +
					      MTHCA_RESET_OFFSET, 4);

		if (!reset) {
			err = -ENOMEM;
			mthca_err(mdev, "Couldn't map HCA reset register, "
				  "aborting.\n");
			goto out;
		}

		writel(MTHCA_RESET_VALUE, reset);
		iounmap(reset);
	}

	
	msleep(1000);

	
	{
		u32 v;
		int c = 0;

		for (c = 0; c < 100; ++c) {
			if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't access HCA after reset, "
					  "aborting.\n");
				goto out;
			}

			if (v != 0xffffffff)
				goto good;

			msleep(100);
		}

		err = -ENODEV;
		mthca_err(mdev, "PCI device did not come back after reset, "
			  "aborting.\n");
		goto out;
	}

good:
	
	if (bridge) {
		if (pci_write_config_dword(bridge, bridge_pcix_cap + 0x8,
				 bridge_header[(bridge_pcix_cap + 0x8) / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
				  "split transaction control, aborting.\n");
			goto out;
		}
		if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc,
				 bridge_header[(bridge_pcix_cap + 0xc) / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
				  "split transaction control, aborting.\n");
			goto out;
		}
		for (i = 0; i < 16; ++i) {
			if (i * 4 == PCI_COMMAND)
				continue;

			if (pci_write_config_dword(bridge, i * 4, bridge_header[i])) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
					  "aborting.\n", i);
				goto out;
			}
		}

		if (pci_write_config_dword(bridge, PCI_COMMAND,
					   bridge_header[PCI_COMMAND / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
				  "aborting.\n");
			goto out;
		}
	}

	if (hca_pcix_cap) {
		if (pci_write_config_dword(mdev->pdev, hca_pcix_cap,
				 hca_header[hca_pcix_cap / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI-X "
				  "command register, aborting.\n");
			goto out;
		}
	}

	if (hca_pcie_cap) {
		devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4];
		if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL,
					   devctl)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI Express "
				  "Device Control register, aborting.\n");
			goto out;
		}
		linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
		if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL,
					   linkctl)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI Express "
				  "Link control register, aborting.\n");
			goto out;
		}
	}

	for (i = 0; i < 16; ++i) {
		if (i * 4 == PCI_COMMAND)
			continue;

		if (pci_write_config_dword(mdev->pdev, i * 4, hca_header[i])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA reg %x, "
				  "aborting.\n", i);
			goto out;
		}
	}

	if (pci_write_config_dword(mdev->pdev, PCI_COMMAND,
				   hca_header[PCI_COMMAND / 4])) {
		err = -ENODEV;
		mthca_err(mdev, "Couldn't restore HCA COMMAND, "
			  "aborting.\n");
		goto out;
	}

out:
	if (bridge)
		pci_dev_put(bridge);
	kfree(bridge_header);
	kfree(hca_header);

	return err;
}
Exemplo n.º 15
0
int mthca_process_mad(struct ib_device *ibdev,
		      int mad_flags,
		      u8 port_num,
		      const struct ib_wc *in_wc,
		      const struct ib_grh *in_grh,
		      const struct ib_mad_hdr *in, size_t in_mad_size,
		      struct ib_mad_hdr *out, size_t *out_mad_size,
		      u16 *out_mad_pkey_index)
{
	int err;
	u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
	u16 prev_lid = 0;
	struct ib_port_attr pattr;
	const struct ib_mad *in_mad = (const struct ib_mad *)in;
	struct ib_mad *out_mad = (struct ib_mad *)out;

	if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
			 *out_mad_size != sizeof(*out_mad)))
		return IB_MAD_RESULT_FAILURE;

	/* Forward locally generated traps to the SM */
	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
	    slid == 0) {
		forward_trap(to_mdev(ibdev), port_num, in_mad);
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
	}

	/*
	 * Only handle SM gets, sets and trap represses for SM class
	 *
	 * Only handle PMA and Mellanox vendor-specific class gets and
	 * sets for other classes.
	 */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
			return IB_MAD_RESULT_SUCCESS;

		/*
		 * Don't process SMInfo queries or vendor-specific
		 * MADs -- the SMA can't handle them.
		 */
		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
		    ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
		     IB_SMP_ATTR_VENDOR_MASK))
			return IB_MAD_RESULT_SUCCESS;
	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
		   in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1     ||
		   in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
			return IB_MAD_RESULT_SUCCESS;
	} else
		return IB_MAD_RESULT_SUCCESS;
	if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	     in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	    in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
	    in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
	    !ib_query_port(ibdev, port_num, &pattr))
		prev_lid = ib_lid_cpu16(pattr.lid);

	err = mthca_MAD_IFC(to_mdev(ibdev),
			    mad_flags & IB_MAD_IGNORE_MKEY,
			    mad_flags & IB_MAD_IGNORE_BKEY,
			    port_num, in_wc, in_grh, in_mad, out_mad);
	if (err == -EBADMSG)
		return IB_MAD_RESULT_SUCCESS;
	else if (err) {
		mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err);
		return IB_MAD_RESULT_FAILURE;
	}

	if (!out_mad->mad_hdr.status) {
		smp_snoop(ibdev, port_num, in_mad, prev_lid);
		node_desc_override(ibdev, out_mad);
	}

	/* set return bit in status of directed route responses */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);

	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
		/* no response for trap repress */
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;

	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}