Example #1
0
/* Try opening fi->domain_attr->cq_cnt number of completion queues
 * simultaneously using a size hint of 0 (indicating the provider should choose
 * the size)
 */
static int cq_open_close_simultaneous(void)
{
	int ret;
	int opened;
	size_t count;
	int testret = FAIL;
	struct fid_cq **cq_array;

	count = fi->domain_attr->cq_cnt;
	FT_DEBUG("testing creation of up to %zu simultaneous CQs\n", count);

	cq_array = calloc(count, sizeof(*cq_array));
	if (!cq_array)
		return -FI_ENOMEM;

	ret = 0;
	for (opened = 0; opened < count && !ret; opened++) {
		ret = create_cq(&cq_array[opened], 0, 0, FI_CQ_FORMAT_UNSPEC,
				FI_WAIT_UNSPEC);
	}
	if (ret) {
		FT_WARN("fi_cq_open failed after %d (cq_cnt: %zu): %s",
			opened, count, fi_strerror(-ret));
	}

	testret = PASS;

	FT_CLOSEV_FID(cq_array, opened);
	free(cq_array);

	return TEST_RET_VAL(ret, testret);
}
Example #2
0
int openib_initialize()
{

    // Use the previously cached info
    me = l_state.rank;
    nprocs = l_state.size;
    assert(l_state.world_comm);

    // initialize the envs
    openib_init_envs();

    //Initialize the registration cache

    reg_cache_init(nprocs, 0);

    init_params();

    if(open_hca()) {
        release_resources();
        exit(1);
    }

    if(create_cq()) {
        release_resources();
        exit(1);
    }

    if(get_lid()) {
        release_resources();
        exit(1);
    }

    if(create_qp()) {
        release_resources();
        exit(1);
    }

    if(exch_addr()) {
        release_resources();
        exit(1);
    }

    if(connect_qp()) {
        release_resources();
        exit(1);
    }

    // Create network locks
    openib_create_locks();

    // Allocate buffers for one sided operations
    openib_alloc_buf();

    MPI_Barrier(l_state.world_comm);

    return 0;
}
Example #3
0
static int
cq_signal()
{
	struct fid_cq *cq;
	struct fi_cq_tagged_entry entry;
	int64_t elapsed;
	int testret;
	int ret;

	testret = FAIL;

	ret = create_cq(&cq, 1, 0, FI_CQ_FORMAT_UNSPEC, FI_WAIT_UNSPEC);
	if (ret) {
		sprintf(err_buf, "fi_cq_open(1, 0, FI_CQ_FORMAT_UNSPEC, "
				"FI_WAIT_UNSPEC) = %d, %s",
				ret, fi_strerror(-ret));
		goto fail1;
	}

	ret = fi_cq_signal(cq);
	if (ret) {
		sprintf(err_buf, "fi_cq_signal = %d %s", ret, fi_strerror(-ret));
		goto fail2;
	}

	ft_start();
	ret = fi_cq_sread(cq, &entry, 1, NULL, 2000);
	ft_stop();
	elapsed = get_elapsed(&start, &end, MILLI);
	if (ret != -FI_EAGAIN && ret != -FI_ECANCELED) {
		sprintf(err_buf, "fi_cq_sread = %d %s", ret, fi_strerror(-ret));
		goto fail2;
	}

	if (elapsed > 1000) {
		sprintf(err_buf, "fi_cq_sread - signal ignored");
		goto fail2;
	}

	ret = fi_close(&cq->fid);
	if (ret) {
		sprintf(err_buf, "close(cq) = %d, %s", ret, fi_strerror(-ret));
		goto fail1;
	}
	cq = NULL;

	testret = PASS;
fail2:
	FT_CLOSE_FID(cq);
fail1:
	cq = NULL;
	return TEST_RET_VAL(ret, testret);
}
Example #4
0
/*
 * Tests:
 * - test open and close of CQ over a range of sizes
 */
static int
cq_open_close_sizes()
{
	int i;
	int ret;
	int size;
	int testret;
	struct fid_cq *cq;

	testret = FAIL;

	for (i = -1; i < 17; ++i) {
		size = (i < 0) ? 0 : 1 << i;

		ret = create_cq(&cq, size, 0, FI_CQ_FORMAT_UNSPEC, FI_WAIT_UNSPEC);
		if (ret == -FI_EINVAL) {
			FT_WARN("\nSuccessfully completed %d iterations up to "
				"size %d before the provider returned "
				"EINVAL...",
				i + 1, size >> 1);
			ret = 0;
			goto pass;
		}
		if (ret != 0) {
			sprintf(err_buf, "fi_cq_open(%d, 0, FI_CQ_FORMAT_UNSPEC, "
					"FI_WAIT_UNSPEC) = %d, %s",
					size, ret, fi_strerror(-ret));
			goto fail;
		}

		ret = fi_close(&cq->fid);
		if (ret != 0) {
			sprintf(err_buf, "close(cq) = %d, %s", ret, fi_strerror(-ret));
			goto fail;
		}
		cq = NULL;
	}
Example #5
0
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
			     int vector, struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct c4iw_dev *rhp;
	struct c4iw_cq *chp;
	struct c4iw_create_cq_resp uresp;
	struct c4iw_ucontext *ucontext = NULL;
	int ret;
	size_t memsize, hwentries;
	struct c4iw_mm_entry *mm, *mm2;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);

	rhp = to_c4iw_dev(ibdev);

	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context)
		ucontext = to_c4iw_ucontext(ib_context);

	/* account for the status page. */
	entries++;

	/* IQ needs one extra entry to differentiate full vs empty. */
	entries++;

	/*
	 * entries must be multiple of 16 for HW.
	 */
	entries = roundup(entries, 16);

	/*
	 * Make actual HW queue 2x to avoid cdix_inc overflows.
	 */
	hwentries = entries * 2;

	/*
	 * Make HW queue at least 64 entries so GTS updates aren't too
	 * frequent.
	 */
	if (hwentries < 64)
		hwentries = 64;

	memsize = hwentries * sizeof *chp->cq.queue;

	/*
	 * memsize must be a multiple of the page size if its a user cq.
	 */
	if (ucontext) {
		memsize = roundup(memsize, PAGE_SIZE);
		hwentries = memsize / sizeof *chp->cq.queue;
	}
	chp->cq.size = hwentries;
	chp->cq.memsize = memsize;

	ret = create_cq(&rhp->rdev, &chp->cq,
			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
	if (ret)
		goto err1;

	chp->rhp = rhp;
	chp->cq.size--;				/* status page */
	chp->ibcq.cqe = entries - 2;
	spin_lock_init(&chp->lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
	if (ret)
		goto err2;

	if (ucontext) {
		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm)
			goto err3;
		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
		if (!mm2)
			goto err4;

		uresp.qid_mask = rhp->rdev.cqmask;
		uresp.cqid = chp->cq.cqid;
		uresp.size = chp->cq.size;
		uresp.memsize = chp->cq.memsize;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		uresp.gts_key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (ret)
			goto err5;

		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = chp->cq.memsize;
		insert_mmap(ucontext, mm);

		mm2->key = uresp.gts_key;
		mm2->addr = chp->cq.ugts;
		mm2->len = PAGE_SIZE;
		insert_mmap(ucontext, mm2);
	}
	PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
	     __func__, chp->cq.cqid, chp, chp->cq.size,
	     chp->cq.memsize,
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
err5:
	kfree(mm2);
err4:
	kfree(mm);
err3:
	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
err2:
	destroy_cq(&chp->rhp->rdev, &chp->cq,
		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
err1:
	kfree(chp);
	return ERR_PTR(ret);
}
Example #6
0
File: cq.c Project: avagin/linux
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
			     const struct ib_cq_init_attr *attr,
			     struct ib_udata *udata)
{
	int entries = attr->cqe;
	int vector = attr->comp_vector;
	struct c4iw_dev *rhp;
	struct c4iw_cq *chp;
	struct c4iw_create_cq ucmd;
	struct c4iw_create_cq_resp uresp;
	int ret, wr_len;
	size_t memsize, hwentries;
	struct c4iw_mm_entry *mm, *mm2;
	struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
		udata, struct c4iw_ucontext, ibucontext);

	pr_debug("ib_dev %p entries %d\n", ibdev, entries);
	if (attr->flags)
		return ERR_PTR(-EINVAL);

	rhp = to_c4iw_dev(ibdev);

	if (vector >= rhp->rdev.lldi.nciq)
		return ERR_PTR(-EINVAL);

	if (udata) {
		if (udata->inlen < sizeof(ucmd))
			ucontext->is_32b_cqe = 1;
	}

	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
	if (!chp->wr_waitp) {
		ret = -ENOMEM;
		goto err_free_chp;
	}
	c4iw_init_wr_wait(chp->wr_waitp);

	wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
	chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!chp->destroy_skb) {
		ret = -ENOMEM;
		goto err_free_wr_wait;
	}

	/* account for the status page. */
	entries++;

	/* IQ needs one extra entry to differentiate full vs empty. */
	entries++;

	/*
	 * entries must be multiple of 16 for HW.
	 */
	entries = roundup(entries, 16);

	/*
	 * Make actual HW queue 2x to avoid cdix_inc overflows.
	 */
	hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);

	/*
	 * Make HW queue at least 64 entries so GTS updates aren't too
	 * frequent.
	 */
	if (hwentries < 64)
		hwentries = 64;

	memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
			(sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));

	/*
	 * memsize must be a multiple of the page size if its a user cq.
	 */
	if (udata)
		memsize = roundup(memsize, PAGE_SIZE);

	chp->cq.size = hwentries;
	chp->cq.memsize = memsize;
	chp->cq.vector = vector;

	ret = create_cq(&rhp->rdev, &chp->cq,
			ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
			chp->wr_waitp);
	if (ret)
		goto err_free_skb;

	chp->rhp = rhp;
	chp->cq.size--;				/* status page */
	chp->ibcq.cqe = entries - 2;
	spin_lock_init(&chp->lock);
	spin_lock_init(&chp->comp_handler_lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
	if (ret)
		goto err_destroy_cq;

	if (ucontext) {
		ret = -ENOMEM;
		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm)
			goto err_remove_handle;
		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
		if (!mm2)
			goto err_free_mm;

		memset(&uresp, 0, sizeof(uresp));
		uresp.qid_mask = rhp->rdev.cqmask;
		uresp.cqid = chp->cq.cqid;
		uresp.size = chp->cq.size;
		uresp.memsize = chp->cq.memsize;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		uresp.gts_key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		/* communicate to the userspace that
		 * kernel driver supports 64B CQE
		 */
		uresp.flags |= C4IW_64B_CQE;

		spin_unlock(&ucontext->mmap_lock);
		ret = ib_copy_to_udata(udata, &uresp,
				       ucontext->is_32b_cqe ?
				       sizeof(uresp) - sizeof(uresp.flags) :
				       sizeof(uresp));
		if (ret)
			goto err_free_mm2;

		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = chp->cq.memsize;
		insert_mmap(ucontext, mm);

		mm2->key = uresp.gts_key;
		mm2->addr = chp->cq.bar2_pa;
		mm2->len = PAGE_SIZE;
		insert_mmap(ucontext, mm2);
	}
	pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
		 chp->cq.cqid, chp, chp->cq.size,
		 chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
	return &chp->ibcq;
err_free_mm2:
	kfree(mm2);
err_free_mm:
	kfree(mm);
err_remove_handle:
	xa_erase_irq(&rhp->cqs, chp->cq.cqid);
err_destroy_cq:
	destroy_cq(&chp->rhp->rdev, &chp->cq,
		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
		   chp->destroy_skb, chp->wr_waitp);
err_free_skb:
	kfree_skb(chp->destroy_skb);
err_free_wr_wait:
	c4iw_put_wr_wait(chp->wr_waitp);
err_free_chp:
	kfree(chp);
	return ERR_PTR(ret);
}
Example #7
0
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
			     int vector, struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct c4iw_dev *rhp;
	struct c4iw_cq *chp;
	struct c4iw_create_cq_resp uresp;
	struct c4iw_ucontext *ucontext = NULL;
	int ret;
	size_t memsize, hwentries;
	struct c4iw_mm_entry *mm, *mm2;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);

	rhp = to_c4iw_dev(ibdev);

	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context)
		ucontext = to_c4iw_ucontext(ib_context);

	/*                              */
	entries++;

	/*                                                          */
	entries++;

	/*
                                          
  */
	entries = roundup(entries, 16);

	/*
                                                        
  */
	hwentries = entries * 2;

	/*
                                                               
             
  */
	if (hwentries < 64)
		hwentries = 64;

	memsize = hwentries * sizeof *chp->cq.queue;

	/*
                                                                 
  */
	if (ucontext) {
		memsize = roundup(memsize, PAGE_SIZE);
		hwentries = memsize / sizeof *chp->cq.queue;
		while (hwentries > T4_MAX_IQ_SIZE) {
			memsize -= PAGE_SIZE;
			hwentries = memsize / sizeof *chp->cq.queue;
		}
	}
	chp->cq.size = hwentries;
	chp->cq.memsize = memsize;

	ret = create_cq(&rhp->rdev, &chp->cq,
			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
	if (ret)
		goto err1;

	chp->rhp = rhp;
	chp->cq.size--;				/*             */
	chp->ibcq.cqe = entries - 2;
	spin_lock_init(&chp->lock);
	spin_lock_init(&chp->comp_handler_lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
	if (ret)
		goto err2;

	if (ucontext) {
		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm)
			goto err3;
		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
		if (!mm2)
			goto err4;

		uresp.qid_mask = rhp->rdev.cqmask;
		uresp.cqid = chp->cq.cqid;
		uresp.size = chp->cq.size;
		uresp.memsize = chp->cq.memsize;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		uresp.gts_key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (ret)
			goto err5;

		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = chp->cq.memsize;
		insert_mmap(ucontext, mm);

		mm2->key = uresp.gts_key;
		mm2->addr = chp->cq.ugts;
		mm2->len = PAGE_SIZE;
		insert_mmap(ucontext, mm2);
	}
	PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
	     __func__, chp->cq.cqid, chp, chp->cq.size,
	     chp->cq.memsize,
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
err5:
	kfree(mm2);
err4:
	kfree(mm);
err3:
	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
err2:
	destroy_cq(&chp->rhp->rdev, &chp->cq,
		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
err1:
	kfree(chp);
	return ERR_PTR(ret);
}