示例#1
0
/**
 * pvrdma_modify_port - modify device port attributes
 * @ibdev: the device to modify
 * @port: the port number
 * @mask: attributes to modify
 * @props: the device properties
 *
 * @return: 0 on success, otherwise negative errno
 */
int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
		       struct ib_port_modify *props)
{
	struct ib_port_attr attr;
	struct pvrdma_dev *vdev = to_vdev(ibdev);
	int ret;

	if (mask & ~IB_PORT_SHUTDOWN) {
		dev_warn(&vdev->pdev->dev,
			 "unsupported port modify mask %#x\n", mask);
		return -EOPNOTSUPP;
	}

	mutex_lock(&vdev->port_mutex);
	ret = ib_query_port(ibdev, port, &attr);
	if (ret)
		goto out;

	vdev->port_cap_mask |= props->set_port_cap_mask;
	vdev->port_cap_mask &= ~props->clr_port_cap_mask;

	if (mask & IB_PORT_SHUTDOWN)
		vdev->ib_active = false;

out:
	mutex_unlock(&vdev->port_mutex);
	return ret;
}
示例#2
0
static int populate_port_data(rdma_ctx_t ctx)
{
    int retval;
    
    //LOG_KERN(LOG_INFO, ("Get port data\n"));
    retval = ib_query_port(rdma_ib_device.dev, 1, &rdma_ib_device.attr);
    CHECK_MSG_RET(retval == 0, "Error querying port", -1);
    CHECK_MSG_RET(rdma_ib_device.attr.active_mtu == 5, "Error: Wrong device", -1);

    rdma_ib_device.lid = rdma_ib_device.attr.lid;
    ib_query_gid(rdma_ib_device.dev, 1, 0, &rdma_ib_device.gid, &rdma_ib_device.gid_attr);

    return 0;
} 
示例#3
0
static int update_port_info(struct sa_db_port *port)
{
	struct ib_port_attr port_attr;
	int ret;

	ret = ib_query_port(port->dev->device, port->port_num, &port_attr);
	if (ret)
		return ret;

	if (port_attr.state != IB_PORT_ACTIVE)
		return -ENODATA;

        port->sm_lid = port_attr.sm_lid;
	port->sm_sl = port_attr.sm_sl;
	return 0;
}
ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
			     const char __user *buf,
			     int in_len, int out_len)
{
	struct ib_uverbs_query_port      cmd;
	struct ib_uverbs_query_port_resp resp;
	struct ib_port_attr              attr;
	int                              ret;

	if (out_len < sizeof resp)
		return -ENOSPC;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
	if (ret)
		return ret;

	memset(&resp, 0, sizeof resp);

	resp.state 	     = attr.state;
	resp.max_mtu 	     = attr.max_mtu;
	resp.active_mtu      = attr.active_mtu;
	resp.gid_tbl_len     = attr.gid_tbl_len;
	resp.port_cap_flags  = attr.port_cap_flags;
	resp.max_msg_sz      = attr.max_msg_sz;
	resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
	resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
	resp.pkey_tbl_len    = attr.pkey_tbl_len;
	resp.lid 	     = attr.lid;
	resp.sm_lid 	     = attr.sm_lid;
	resp.lmc 	     = attr.lmc;
	resp.max_vl_num      = attr.max_vl_num;
	resp.sm_sl 	     = attr.sm_sl;
	resp.subnet_timeout  = attr.subnet_timeout;
	resp.init_type_reply = attr.init_type_reply;
	resp.active_width    = attr.active_width;
	resp.active_speed    = attr.active_speed;
	resp.phys_state      = attr.phys_state;

	if (copy_to_user((void __user *) (unsigned long) cmd.response,
			 &resp, sizeof resp))
		return -EFAULT;

	return in_len;
}
示例#5
0
文件: rxe_verbs.c 项目: avagin/linux
static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
			      struct ib_port_immutable *immutable)
{
	int err;
	struct ib_port_attr attr;

	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;

	err = ib_query_port(dev, port_num, &attr);
	if (err)
		return err;

	immutable->pkey_tbl_len = attr.pkey_tbl_len;
	immutable->gid_tbl_len = attr.gid_tbl_len;
	immutable->max_mad_size = IB_MGMT_MAD_SIZE;

	return 0;
}
示例#6
0
static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
			         struct ib_port_immutable *immutable)
{
	struct ib_port_attr attr;
	struct ocrdma_dev *dev;
	int err;

	dev = get_ocrdma_dev(ibdev);
	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
	if (ocrdma_is_udp_encap_supported(dev))
		immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;

	err = ib_query_port(ibdev, port_num, &attr);
	if (err)
		return err;

	immutable->pkey_tbl_len = attr.pkey_tbl_len;
	immutable->gid_tbl_len = attr.gid_tbl_len;
	immutable->max_mad_size = IB_MGMT_MAD_SIZE;

	return 0;
}
示例#7
0
static int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
{
	struct ib_port_attr *tprops = NULL;
	int                  ret;

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return -ENOMEM;

	ret = ib_query_port(&dev->ib_dev, port_num, tprops);
	if (ret) {
		printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
		       ret, dev->ib_dev.name, port_num);
		goto out;
	}

	dev->rate[port_num - 1] = tprops->active_speed *
				  ib_width_enum_to_int(tprops->active_width);

out:
	kfree(tprops);
	return ret;
}
示例#8
0
static void add_device(struct ib_device* dev)
{
    // first check this is the mellanox device
    int retval = ib_query_port(dev, 1, &rdma_ib_device.attr);
    CHECK_MSG_NO_RET(retval == 0, "Error querying port");

    if(rdma_ib_device.attr.active_mtu == 5) {
        rdma_ib_device.dev = dev;
    } else {
        return;
    }

    // get device attributes
    ib_query_device(dev, &rdma_ib_device.mlnx_device_attr); 

    // register handler
    INIT_IB_EVENT_HANDLER(&rdma_ib_device.ieh, dev, async_event_handler);
    ib_register_event_handler(&rdma_ib_device.ieh);

    // We got the right device
    // library can be used now
    rdma_ib_device.ready_to_use = true;
}
示例#9
0
static int ipoib_get_link_ksettings(struct net_device *netdev,
				    struct ethtool_link_ksettings *cmd)
{
	struct ipoib_dev_priv *priv = ipoib_priv(netdev);
	struct ib_port_attr attr;
	int ret, speed, width;

	if (!netif_carrier_ok(netdev)) {
		cmd->base.speed = SPEED_UNKNOWN;
		cmd->base.duplex = DUPLEX_UNKNOWN;
		return 0;
	}

	ret = ib_query_port(priv->ca, priv->port, &attr);
	if (ret < 0)
		return -EINVAL;

	speed = ib_speed_enum_to_int(attr.active_speed);
	width = ib_width_enum_to_int(attr.active_width);

	if (speed < 0 || width < 0)
		return -EINVAL;

	/* Except the following are set, the other members of
	 * the struct ethtool_link_settings are initialized to
	 * zero in the function __ethtool_get_link_ksettings.
	 */
	cmd->base.speed		 = speed * width;
	cmd->base.duplex	 = DUPLEX_FULL;

	cmd->base.phy_address	 = 0xFF;

	cmd->base.autoneg	 = AUTONEG_ENABLE;
	cmd->base.port		 = PORT_OTHER;

	return 0;
}
示例#10
0
static void ib_cache_update(struct ib_device *device,
			    u8                port,
			    bool	      enforce_security)
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
	int                        i;
	int                        ret;

	if (!rdma_is_port_valid(device, port))
		return;

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
		dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
		goto err;
	}

	if (!rdma_protocol_roce(device, port)) {
		ret = config_non_roce_gid_cache(device, port,
						tprops->gid_tbl_len);
		if (ret)
			goto err;
	}

	pkey_cache = kmalloc(struct_size(pkey_cache, table,
					 tprops->pkey_tbl_len),
			     GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
			dev_warn(&device->dev,
				 "ib_query_pkey failed (%d) for index %d\n",
				 ret, i);
			goto err;
		}
	}

	write_lock_irq(&device->cache.lock);

	old_pkey_cache = device->cache.ports[port -
		rdma_start_port(device)].pkey;

	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
	device->cache.ports[port - rdma_start_port(device)].port_state =
		tprops->state;

	device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
							tprops->subnet_prefix;
	write_unlock_irq(&device->cache.lock);

	if (enforce_security)
		ib_security_cache_change(device,
					 port,
					 tprops->subnet_prefix);

	kfree(old_pkey_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(tprops);
}
示例#11
0
文件: mad.c 项目: CSCLOG/beaglebone
static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
			struct ib_wc *in_wc, struct ib_grh *in_grh,
			struct ib_mad *in_mad, struct ib_mad *out_mad)
{
	u16 slid, prev_lid = 0;
	int err;
	struct ib_port_attr pattr;

	slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);

	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
		forward_trap(to_mdev(ibdev), port_num, in_mad);
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
	}

	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
			return IB_MAD_RESULT_SUCCESS;

		/*
		 * Don't process SMInfo queries or vendor-specific
		 * MADs -- the SMA can't handle them.
		 */
		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
		    ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
		     IB_SMP_ATTR_VENDOR_MASK))
			return IB_MAD_RESULT_SUCCESS;
	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
		   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
		   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2   ||
		   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
			return IB_MAD_RESULT_SUCCESS;
	} else
		return IB_MAD_RESULT_SUCCESS;

	if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	     in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	    in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
	    in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
	    !ib_query_port(ibdev, port_num, &pattr))
		prev_lid = pattr.lid;

	err = mlx4_MAD_IFC(to_mdev(ibdev),
			   mad_flags & IB_MAD_IGNORE_MKEY,
			   mad_flags & IB_MAD_IGNORE_BKEY,
			   port_num, in_wc, in_grh, in_mad, out_mad);
	if (err)
		return IB_MAD_RESULT_FAILURE;

	if (!out_mad->mad_hdr.status) {
		smp_snoop(ibdev, port_num, in_mad, prev_lid);
		node_desc_override(ibdev, out_mad);
	}

	/* set return bit in status of directed route responses */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);

	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
		/* no response for trap repress */
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;

	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
示例#12
0
static void ib_cache_update(struct ib_device *device,
			    u8                port)
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
	struct ib_gid_cache       *gid_cache = NULL, *old_gid_cache;
	int                        i;
	int                        ret;

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
		printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
		       ret, device->name);
		goto err;
	}

	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->table, GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

	gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
			    sizeof *gid_cache->table, GFP_KERNEL);
	if (!gid_cache)
		goto err;

	gid_cache->table_len = tprops->gid_tbl_len;

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
			printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
			       ret, device->name, i);
			goto err;
		}
	}

	for (i = 0; i < gid_cache->table_len; ++i) {
		ret = ib_query_gid(device, port, i, gid_cache->table + i);
		if (ret) {
			printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
			       ret, device->name, i);
			goto err;
		}
	}

	write_lock_irq(&device->cache.lock);

	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
	old_gid_cache  = device->cache.gid_cache [port - start_port(device)];

	device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
	device->cache.gid_cache [port - start_port(device)] = gid_cache;

	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;

	write_unlock_irq(&device->cache.lock);

	kfree(old_pkey_cache);
	kfree(old_gid_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(gid_cache);
	kfree(tprops);
}
示例#13
0
static void ib_cache_update(struct ib_device *device,
			    u8                port)
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
	struct ib_gid_cache       *gid_cache = NULL, *old_gid_cache;
	int                        i;
	int                        ret;

	tprops = malloc(sizeof *tprops, M_DEVBUF, M_NOWAIT);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
		log(LOG_WARNING, "ib_query_port failed (%d) for %s\n",
		       ret, device->name);
		goto err;
	}

	pkey_cache = malloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->table, M_DEVBUF, M_NOWAIT);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

	gid_cache = malloc(sizeof *gid_cache + tprops->gid_tbl_len *
			    sizeof *gid_cache->table, M_DEVBUF, M_NOWAIT);
	if (!gid_cache)
		goto err;

	gid_cache->table_len = tprops->gid_tbl_len;

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
			log(LOG_WARNING, "ib_query_pkey failed (%d) for %s (index %d)\n",
			       ret, device->name, i);
			goto err;
		}
	}

	for (i = 0; i < gid_cache->table_len; ++i) {
		ret = ib_query_gid(device, port, i, gid_cache->table + i);
		if (ret) {
			log(LOG_WARNING, "ib_query_gid failed (%d) for %s (index %d)\n",
			       ret, device->name, i);
			goto err;
		}
	}

	mtx_lock(&device->cache.lock);

	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
	old_gid_cache  = device->cache.gid_cache [port - start_port(device)];

	device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
	device->cache.gid_cache [port - start_port(device)] = gid_cache;

	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;

	mtx_unlock(&device->cache.lock);

	free(old_pkey_cache, M_DEVBUF);
	free(old_gid_cache, M_DEVBUF);
	free(tprops, M_DEVBUF);
	return;

err:
	free(pkey_cache, M_DEVBUF);
	free(gid_cache, M_DEVBUF);
	free(tprops, M_DEVBUF);
}
示例#14
0
static void ib_cache_update(struct ib_device *device,
			    u8                port)
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
	struct ib_gid_cache {
		int             table_len;
		union ib_gid    table[0];
	}			  *gid_cache = NULL;
	int                        i;
	int                        ret;
	struct ib_gid_table	  *table;
	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
	bool			   use_roce_gid_table =
					rdma_cap_roce_gid_table(device, port);

	if (port < rdma_start_port(device) || port > rdma_end_port(device))
		return;

	table = ports_table[port - rdma_start_port(device)];

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
		printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
		       ret, device->name);
		goto err;
	}

	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->table, GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

	if (!use_roce_gid_table) {
		gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
			    sizeof(*gid_cache->table), GFP_KERNEL);
		if (!gid_cache)
			goto err;

		gid_cache->table_len = tprops->gid_tbl_len;
	}

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
			printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
			       ret, device->name, i);
			goto err;
		}
	}

	if (!use_roce_gid_table) {
		for (i = 0;  i < gid_cache->table_len; ++i) {
			ret = ib_query_gid(device, port, i,
					   gid_cache->table + i, NULL);
			if (ret) {
				printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
				       ret, device->name, i);
				goto err;
			}
		}
	}

	write_lock_irq(&device->cache.lock);

	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];

	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
	if (!use_roce_gid_table) {
		for (i = 0; i < gid_cache->table_len; i++) {
			modify_gid(device, port, table, i, gid_cache->table + i,
				   &zattr, false);
		}
	}

	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;

	write_unlock_irq(&device->cache.lock);

	kfree(gid_cache);
	kfree(old_pkey_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(gid_cache);
	kfree(tprops);
}
示例#15
0
static void verbs_add_device (struct ib_device *dev)
{
	int ret;
	struct ib_qp_init_attr attrs;

	if (ib_dev)
		return;

	/* durty hack for ib_dma_map_single not to segfault */
	dev->dma_ops = NULL;
	ib_dev = dev;

	printk (KERN_INFO "IB add device called. Name = %s\n", dev->name);

	ret = ib_query_device (dev, &dev_attr);
	if (ret) {
		printk (KERN_INFO "ib_quer_device failed: %d\n", ret);
		return;
	}

	printk (KERN_INFO "IB device caps: max_qp %d, max_mcast_grp: %d, max_pkeys: %d\n",
		dev_attr.max_qp, dev_attr.max_mcast_grp, (int)dev_attr.max_pkeys);

	/* We'll work with first port. It's a sample module, anyway. Who is that moron which decided
	 * to count ports from one? */
	ret = ib_query_port (dev, 1, &port_attr);
	if (ret) {
		printk (KERN_INFO "ib_query_port failed: %d\n", ret);
		return;
	}

	printk (KERN_INFO "Port info: lid: %u, sm_lid: %u, max_msg_size: %u\n",
		(unsigned)port_attr.lid, (unsigned)port_attr.sm_lid, port_attr.max_msg_sz);

	pd = ib_alloc_pd (dev);
	if (IS_ERR (pd)) {
		ret = PTR_ERR (pd);
		printk (KERN_INFO "pd allocation failed: %d\n", ret);
		return;
	}

	printk (KERN_INFO "PD allocated\n");

	mr = ib_get_dma_mr (pd, IB_ACCESS_LOCAL_WRITE);
	if (IS_ERR (mr)) {
		ret = PTR_ERR (mr);
		printk (KERN_INFO "get_dma_mr failed: %d\n", ret);
		return;
	}

	send_cq = ib_create_cq (dev, NULL, NULL, NULL, 1, 1);
	if (IS_ERR (send_cq)) {
		ret = PTR_ERR (send_cq);
		printk (KERN_INFO "ib_create_cq failed: %d\n", ret);
		return;
	}

	recv_cq = ib_create_cq (dev, verbs_comp_handler_recv, NULL, NULL, 1, 1);
	if (IS_ERR (recv_cq)) {
		ret = PTR_ERR (recv_cq);
		printk (KERN_INFO "ib_create_cq failed: %d\n", ret);
		return;
	}

	ib_req_notify_cq (recv_cq, IB_CQ_NEXT_COMP);
	printk (KERN_INFO "CQs allocated\n");

	ib_query_pkey (dev, 1, 0, &pkey);

	/* allocate memory */
	send_buf = kmalloc (buf_size + 40, GFP_KERNEL);
	recv_buf = kmalloc (buf_size + 40, GFP_KERNEL);

	if (!send_buf || !recv_buf) {
		printk (KERN_INFO "Memory allocation error\n");
		return;
	}

	printk (KERN_INFO "Trying to register regions\n");
	if (ib_dev->dma_ops)
		printk (KERN_INFO "DMA ops are defined\n");

	memset (send_buf, 0, buf_size+40);
	memset (send_buf, 0, buf_size+40);

	send_key = ib_dma_map_single (ib_dev, send_buf, buf_size, DMA_FROM_DEVICE);
	printk (KERN_INFO "send_key obtained %llx\n", send_key);
	recv_key = ib_dma_map_single (ib_dev, recv_buf, buf_size, DMA_TO_DEVICE);
	printk (KERN_INFO "recv_key obtained %llx\n", recv_key);

	if (ib_dma_mapping_error (ib_dev, send_key)) {
		printk (KERN_INFO "Error mapping send buffer\n");
		return;
	}

	if (ib_dma_mapping_error (ib_dev, recv_key)) {
		printk (KERN_INFO "Error mapping recv buffer\n");
		return;
	}

	memset (&attrs, 0, sizeof (attrs));
	attrs.qp_type = IB_QPT_UD;
	attrs.sq_sig_type = IB_SIGNAL_ALL_WR;
	attrs.event_handler = verbs_qp_event;
	attrs.cap.max_send_wr = CQ_SIZE;
	attrs.cap.max_recv_wr = CQ_SIZE;
	attrs.cap.max_send_sge = 1;
	attrs.cap.max_recv_sge = 1;
	attrs.send_cq = send_cq;
	attrs.recv_cq = recv_cq;

	qp = ib_create_qp (pd, &attrs);
	if (IS_ERR (qp)) {
		ret = PTR_ERR (qp);
		printk (KERN_INFO "qp allocation failed: %d\n", ret);
		return;
	}

	printk (KERN_INFO "Create QP with num %x\n", qp->qp_num);

	if (init_qp (qp)) {
		printk (KERN_INFO "Failed to initialize QP\n");
		return;
	}

	ret = ib_query_gid (ib_dev, 1, 0, &local_info.gid);
	if (ret) {
		printk (KERN_INFO "query_gid failed %d\n", ret);
		return;
	}

	local_info.qp_num = qp->qp_num;
	local_info.lid = port_attr.lid;

	/* now we are ready to send our QP number and other stuff to other party */
	if (!server_addr) {
		schedule_work (&sock_accept);
		flush_scheduled_work ();
	}
	else
		exchange_info (server_addr);

	if (!have_remote_info) {
		printk (KERN_INFO "Have no remote info, give up\n");
		return;
	}

	ret = path_rec_lookup_start ();
	if (ret) {
		printk (KERN_INFO "path_rec lookup start failed: %d\n", ret);
		return;
	}

	/* post receive request */
	verbs_post_recv_req ();

	mod_timer (&verbs_timer, NEXTJIFF(1));
}
int mthca_process_mad(struct ib_device *ibdev,
		      int mad_flags,
		      u8 port_num,
		      struct ib_wc *in_wc,
		      struct ib_grh *in_grh,
		      struct ib_mad *in_mad,
		      struct ib_mad *out_mad)
{
	int err;
	u8 status;
	u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
	u16 prev_lid = 0;
	struct ib_port_attr pattr;

	/* Forward locally generated traps to the SM */
	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
	    slid == 0) {
		forward_trap(to_mdev(ibdev), port_num, in_mad);
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
	}

	/*
	 * Only handle SM gets, sets and trap represses for SM class
	 *
	 * Only handle PMA and Mellanox vendor-specific class gets and
	 * sets for other classes.
	 */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
			return IB_MAD_RESULT_SUCCESS;

		/*
		 * Don't process SMInfo queries or vendor-specific
		 * MADs -- the SMA can't handle them.
		 */
		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
		    ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
		     IB_SMP_ATTR_VENDOR_MASK))
			return IB_MAD_RESULT_SUCCESS;
	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
		   in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1     ||
		   in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
			return IB_MAD_RESULT_SUCCESS;
	} else
		return IB_MAD_RESULT_SUCCESS;
	if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	     in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	    in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
	    in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
	    !ib_query_port(ibdev, port_num, &pattr))
		prev_lid = pattr.lid;

	err = mthca_MAD_IFC(to_mdev(ibdev),
			    mad_flags & IB_MAD_IGNORE_MKEY,
			    mad_flags & IB_MAD_IGNORE_BKEY,
			    port_num, in_wc, in_grh, in_mad, out_mad,
			    &status);
	if (err) {
		mthca_err(to_mdev(ibdev), "MAD_IFC failed\n");
		return IB_MAD_RESULT_FAILURE;
	}
	if (status == MTHCA_CMD_STAT_BAD_PKT)
		return IB_MAD_RESULT_SUCCESS;
	if (status) {
		mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n",
			  status);
		return IB_MAD_RESULT_FAILURE;
	}

	if (!out_mad->mad_hdr.status) {
		smp_snoop(ibdev, port_num, in_mad, prev_lid);
		node_desc_override(ibdev, out_mad);
	}

	/* set return bit in status of directed route responses */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);

	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
		/* no response for trap repress */
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;

	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
示例#17
0
static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
			struct ib_wc *in_wc, struct ib_grh *in_grh,
			struct ib_mad *in_mad, struct ib_mad *out_mad)
{
	u16 slid, prev_lid = 0;
	int err;
	struct ib_port_attr pattr;

	if (in_wc && in_wc->qp->qp_num) {
		pr_debug("received MAD: slid:%d sqpn:%d "
			"dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
			in_wc->slid, in_wc->src_qp,
			in_wc->dlid_path_bits,
			in_wc->qp->qp_num,
			in_wc->wc_flags,
			in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
			be16_to_cpu(in_mad->mad_hdr.attr_id));
		if (in_wc->wc_flags & IB_WC_GRH) {
			pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
				 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
				 be64_to_cpu(in_grh->sgid.global.interface_id));
			pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
				 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
				 be64_to_cpu(in_grh->dgid.global.interface_id));
		}
	}

	slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);

	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
		forward_trap(to_mdev(ibdev), port_num, in_mad);
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
	}

	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
			return IB_MAD_RESULT_SUCCESS;

		/*
		 * Don't process SMInfo queries -- the SMA can't handle them.
		 */
		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
			return IB_MAD_RESULT_SUCCESS;
	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
		   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
		   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2   ||
		   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
			return IB_MAD_RESULT_SUCCESS;
	} else
		return IB_MAD_RESULT_SUCCESS;

	if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
	     in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
	    in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
	    in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
	    !ib_query_port(ibdev, port_num, &pattr))
		prev_lid = pattr.lid;

	err = mlx4_MAD_IFC(to_mdev(ibdev),
			   mad_flags & IB_MAD_IGNORE_MKEY,
			   mad_flags & IB_MAD_IGNORE_BKEY,
			   port_num, in_wc, in_grh, in_mad, out_mad);
	if (err)
		return IB_MAD_RESULT_FAILURE;

	if (!out_mad->mad_hdr.status) {
		if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
			smp_snoop(ibdev, port_num, in_mad, prev_lid);
		node_desc_override(ibdev, out_mad);
	}

	/* set return bit in status of directed route responses */
	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);

	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
		/* no response for trap repress */
		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;

	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
示例#18
0
文件: cache.c 项目: 3null/fastsocket
static void ib_cache_update(struct ib_device *device,
			    u8                port)
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
	struct ib_gid_cache       *gid_cache = NULL, *old_gid_cache;
	int                        i, j;
	int                        ret;
	union ib_gid		   gid, empty_gid;
	u16			   pkey;

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
		printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
		       ret, device->name);
		goto err;
	}

	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->entry, GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = 0;

	gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
			    sizeof *gid_cache->entry, GFP_KERNEL);
	if (!gid_cache)
		goto err;

	gid_cache->table_len = 0;

	for (i = 0, j = 0; i < tprops->pkey_tbl_len; ++i) {
		ret = ib_query_pkey(device, port, i, &pkey);
		if (ret) {
			printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
			       ret, device->name, i);
			goto err;
		}
		/* pkey 0xffff must be the default pkeyand 0x0000 must be the invalid
		 * pkey per IBTA spec */
		if (pkey) {
			pkey_cache->entry[j].index = i;
			pkey_cache->entry[j++].pkey = pkey;
		}
	}
	pkey_cache->table_len = j;

	memset(&empty_gid, 0, sizeof empty_gid);
	for (i = 0, j = 0; i < tprops->gid_tbl_len; ++i) {
		ret = ib_query_gid(device, port, i, &gid);
		if (ret) {
			printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
			       ret, device->name, i);
			goto err;
		}
		/* if the lower 8 bytes the device GID entry is all 0,
		 * our entry is a blank, invalid entry...
		 * depending on device, the upper 8 bytes might or might
		 * not be prefilled with a valid subnet prefix, so
		 * don't rely on them for determining a valid gid
		 * entry
		 */
		if (memcmp(&gid + 8, &empty_gid + 8, sizeof gid - 8)) {
			gid_cache->entry[j].index = i;
			gid_cache->entry[j++].gid = gid;
		}
	}
	gid_cache->table_len = j;

	old_pkey_cache = pkey_cache;
	pkey_cache = kmalloc(sizeof *pkey_cache + old_pkey_cache->table_len *
			     sizeof *pkey_cache->entry, GFP_KERNEL);
	if (!pkey_cache)
		pkey_cache = old_pkey_cache;
	else {
		pkey_cache->table_len = old_pkey_cache->table_len;
		memcpy(&pkey_cache->entry[0], &old_pkey_cache->entry[0],
		       pkey_cache->table_len * sizeof *pkey_cache->entry);
		kfree(old_pkey_cache);
	}

	old_gid_cache = gid_cache;
	gid_cache = kmalloc(sizeof *gid_cache + old_gid_cache->table_len *
			    sizeof *gid_cache->entry, GFP_KERNEL);
	if (!gid_cache)
		gid_cache = old_gid_cache;
	else {
		gid_cache->table_len = old_gid_cache->table_len;
		memcpy(&gid_cache->entry[0], &old_gid_cache->entry[0],
		       gid_cache->table_len * sizeof *gid_cache->entry);
		kfree(old_gid_cache);
	}

	write_lock_irq(&device->cache.lock);

	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
	old_gid_cache  = device->cache.gid_cache [port - start_port(device)];

	device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
	device->cache.gid_cache [port - start_port(device)] = gid_cache;

	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;

	write_unlock_irq(&device->cache.lock);

	kfree(old_pkey_cache);
	kfree(old_gid_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(gid_cache);
	kfree(tprops);
}