Ejemplo n.º 1
0
int ib_cache_setup_one(struct ib_device *device)
{
	int p;
	int err;

	rwlock_init(&device->cache.lock);

	device->cache.ports =
		kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
			sizeof(*device->cache.ports),
			GFP_KERNEL);
	if (!device->cache.ports)
		return -ENOMEM;

	err = gid_table_setup_one(device);
	if (err) {
		kfree(device->cache.ports);
		device->cache.ports = NULL;
		return err;
	}

	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
		ib_cache_update(device, p + rdma_start_port(device), true);

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
	ib_register_event_handler(&device->cache.event_handler);
	return 0;
}
Ejemplo n.º 2
0
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
	device->pd = ib_alloc_pd(device->ib_device);
	if (IS_ERR(device->pd))
		goto pd_err;

	device->rx_cq = ib_create_cq(device->ib_device,
				  iser_cq_callback,
				  iser_cq_event_callback,
				  (void *)device,
				  ISER_MAX_RX_CQ_LEN, 0);
	if (IS_ERR(device->rx_cq))
		goto rx_cq_err;

	device->tx_cq = ib_create_cq(device->ib_device,
				  NULL, iser_cq_event_callback,
				  (void *)device,
				  ISER_MAX_TX_CQ_LEN, 0);

	if (IS_ERR(device->tx_cq))
		goto tx_cq_err;

	if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
		goto cq_arm_err;

	tasklet_init(&device->cq_tasklet,
		     iser_cq_tasklet_fn,
		     (unsigned long)device);

	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
				   IB_ACCESS_REMOTE_WRITE |
				   IB_ACCESS_REMOTE_READ);
	if (IS_ERR(device->mr))
		goto dma_mr_err;

	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
				iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;

	return 0;

handler_err:
	ib_dereg_mr(device->mr);
dma_mr_err:
	tasklet_kill(&device->cq_tasklet);
cq_arm_err:
	ib_destroy_cq(device->tx_cq);
tx_cq_err:
	ib_destroy_cq(device->rx_cq);
rx_cq_err:
	ib_dealloc_pd(device->pd);
pd_err:
	iser_err("failed to allocate an IB resource\n");
	return -1;
}
Ejemplo n.º 3
0
static void ib_cache_setup_one(struct ib_device *device)
{
	int p;

	mtx_init(&device->cache.lock, "ib device cache", NULL, 
		MTX_DUPOK|MTX_DEF);

	device->cache.pkey_cache =
		malloc(sizeof *device->cache.pkey_cache *
			(end_port(device) - start_port(device) + 1), M_DEVBUF, 
			M_NOWAIT);
	device->cache.gid_cache =
		malloc(sizeof *device->cache.gid_cache *
			(end_port(device) - start_port(device) + 1), M_DEVBUF, 
			M_NOWAIT);

	device->cache.lmc_cache = malloc(sizeof *device->cache.lmc_cache *
					  (end_port(device) -
					  start_port(device) + 1),
					  M_DEVBUF, M_NOWAIT);

	if (!device->cache.pkey_cache || !device->cache.gid_cache ||
	    !device->cache.lmc_cache) {
		log(LOG_WARNING, "Couldn't allocate cache "
		       "for %s\n", device->name);
		goto err;
	}

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		device->cache.pkey_cache[p] = NULL;
		device->cache.gid_cache [p] = NULL;
		ib_cache_update(device, p + start_port(device));
	}

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
	if (ib_register_event_handler(&device->cache.event_handler))
		goto err_cache;

	return;

err_cache:
	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		free(device->cache.pkey_cache[p], M_DEVBUF);
		free(device->cache.gid_cache[p], M_DEVBUF);
	}

err:
	free(device->cache.pkey_cache, M_DEVBUF);
	free(device->cache.gid_cache, M_DEVBUF);
	free(device->cache.lmc_cache, M_DEVBUF);
}
Ejemplo n.º 4
0
static void ib_cache_setup_one(struct ib_device *device)
{
	int p;

	rwlock_init(&device->cache.lock);

	device->cache.pkey_cache =
		kmalloc(sizeof *device->cache.pkey_cache *
			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
	device->cache.gid_cache =
		kmalloc(sizeof *device->cache.gid_cache *
			(end_port(device) - start_port(device) + 1), GFP_KERNEL);

	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
					  (end_port(device) -
					   start_port(device) + 1),
					  GFP_KERNEL);

	if (!device->cache.pkey_cache || !device->cache.gid_cache ||
	    !device->cache.lmc_cache) {
		printk(KERN_WARNING "Couldn't allocate cache "
		       "for %s\n", device->name);
		goto err;
	}

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		device->cache.pkey_cache[p] = NULL;
		device->cache.gid_cache [p] = NULL;
		ib_cache_update(device, p + start_port(device));
	}

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
	if (ib_register_event_handler(&device->cache.event_handler))
		goto err_cache;

	return;

err_cache:
	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
		kfree(device->cache.pkey_cache[p]);
		kfree(device->cache.gid_cache[p]);
	}

err:
	kfree(device->cache.pkey_cache);
	kfree(device->cache.gid_cache);
	kfree(device->cache.lmc_cache);
	device->cache.pkey_cache = NULL;
	device->cache.gid_cache = NULL;
	device->cache.lmc_cache = NULL;
}
Ejemplo n.º 5
0
int ib_cache_setup_one(struct ib_device *device)
{
	int p;
	int err;

	rwlock_init(&device->cache.lock);

	device->cache.pkey_cache =
		kzalloc(sizeof *device->cache.pkey_cache *
			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
					  (rdma_end_port(device) -
					   rdma_start_port(device) + 1),
					  GFP_KERNEL);
	if (!device->cache.pkey_cache ||
	    !device->cache.lmc_cache) {
		printk(KERN_WARNING "Couldn't allocate cache "
		       "for %s\n", device->name);
		return -ENOMEM;
	}

	err = gid_table_setup_one(device);
	if (err)
		/* Allocated memory will be cleaned in the release function */
		return err;

	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
		ib_cache_update(device, p + rdma_start_port(device));

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
	err = ib_register_event_handler(&device->cache.event_handler);
	if (err)
		goto err;

	return 0;

err:
	gid_table_cleanup_one(device);
	return err;
}
Ejemplo n.º 6
0
static void add_device(struct ib_device* dev)
{
    // first check this is the mellanox device
    int retval = ib_query_port(dev, 1, &rdma_ib_device.attr);
    CHECK_MSG_NO_RET(retval == 0, "Error querying port");

    if(rdma_ib_device.attr.active_mtu == 5) {
        rdma_ib_device.dev = dev;
    } else {
        return;
    }

    // get device attributes
    ib_query_device(dev, &rdma_ib_device.mlnx_device_attr); 

    // register handler
    INIT_IB_EVENT_HANDLER(&rdma_ib_device.ieh, dev, async_event_handler);
    ib_register_event_handler(&rdma_ib_device.ieh);

    // We got the right device
    // library can be used now
    rdma_ib_device.ready_to_use = true;
}
ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
			      const char __user *buf,
			      int in_len, int out_len)
{
	struct ib_uverbs_get_context      cmd;
	struct ib_uverbs_get_context_resp resp;
	struct ib_udata                   udata;
	struct ib_device                 *ibdev = file->device->ib_dev;
	struct ib_ucontext		 *ucontext;
	struct file			 *filp;
	int ret;

	if (out_len < sizeof resp)
		return -ENOSPC;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	mutex_lock(&file->mutex);

	if (file->ucontext) {
		ret = -EINVAL;
		goto err;
	}

	INIT_UDATA(&udata, buf + sizeof cmd,
		   (unsigned long) cmd.response + sizeof resp,
		   in_len - sizeof cmd, out_len - sizeof resp);

	ucontext = ibdev->alloc_ucontext(ibdev, &udata);
	if (IS_ERR(ucontext))
		return PTR_ERR(file->ucontext);

	ucontext->device = ibdev;
	INIT_LIST_HEAD(&ucontext->pd_list);
	INIT_LIST_HEAD(&ucontext->mr_list);
	INIT_LIST_HEAD(&ucontext->mw_list);
	INIT_LIST_HEAD(&ucontext->cq_list);
	INIT_LIST_HEAD(&ucontext->qp_list);
	INIT_LIST_HEAD(&ucontext->srq_list);
	INIT_LIST_HEAD(&ucontext->ah_list);

	resp.num_comp_vectors = file->device->num_comp_vectors;

	filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
	if (IS_ERR(filp)) {
		ret = PTR_ERR(filp);
		goto err_free;
	}

	if (copy_to_user((void __user *) (unsigned long) cmd.response,
			 &resp, sizeof resp)) {
		ret = -EFAULT;
		goto err_file;
	}

	file->async_file = filp->private_data;

	INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
			      ib_uverbs_event_handler);
	ret = ib_register_event_handler(&file->event_handler);
	if (ret)
		goto err_file;

	kref_get(&file->async_file->ref);
	kref_get(&file->ref);
	file->ucontext = ucontext;

	fd_install(resp.async_fd, filp);

	mutex_unlock(&file->mutex);

	return in_len;

err_file:
	put_unused_fd(resp.async_fd);
	fput(filp);

err_free:
	ibdev->dealloc_ucontext(ucontext);

err:
	mutex_unlock(&file->mutex);
	return ret;
}