/*****************************************
* Function: poll_completion
*****************************************/
static int poll_completion(
	struct resources *res)
{
	struct ibv_wc wc;
	void *ev_ctx;
	struct ibv_cq *ev_cq;
	int rc;


	fprintf(stdout, "waiting for completion event\n");

	/* Wait for the completion event */
	if (ibv_get_cq_event(res->comp_channel, &ev_cq, &ev_ctx)) {
		fprintf(stderr, "failed to get cq_event\n");
		return 1;
	}

	fprintf(stdout, "got completion event\n");

	/* Ack the event */
	ibv_ack_cq_events(ev_cq, 1);

	/* Request notification upon the next completion event */
	rc = ibv_req_notify_cq(ev_cq, 0);
	if (rc) {
		fprintf(stderr, "Couldn't request CQ notification\n");
		return 1;
	}

	/* in a real program, the user should empty the CQ before waiting for the next completion event */

	/* poll the completion that causes thew event (if exists) */
	rc = ibv_poll_cq(res->cq, 1, &wc);
	if (rc < 0) {
		fprintf(stderr, "poll CQ failed\n");
		return 1;
	}

	/* check if the CQ is empty (there can be an event event when the CQ is empty, this can happen 
	   when more than one completion(s) are being created. Here we create only one completion 
	   so empty CQ means there is an error) */
	if (rc == 0) {
		fprintf(stderr, "completion wasn't found in the CQ after timeout\n");
		return 1;
	}

	fprintf(stdout, "completion was found in CQ with status 0x%x\n", wc.status);

	/* check the completion status (here we don't care about the completion opcode */
	if (wc.status != IBV_WC_SUCCESS) {
		fprintf(stderr, "got bad completion with status: 0x%x, vendor syndrome: 0x%x\n", 
			wc.status, wc.vendor_err);
		return 1;
	}

	return 0;
}
Example #2
1
static void build_verbs(IbvConnection *conn, struct ibv_context *verbs)
{
    conn->ibvctx = verbs;
    TEST_Z(conn->pd = ibv_alloc_pd(conn->ibvctx));
    TEST_Z(conn->comp_channel = ibv_create_comp_channel(conn->ibvctx));
    TEST_Z(conn->cq = ibv_create_cq(conn->ibvctx, 10, NULL, conn->comp_channel, 0)); /* cqe=10 is arbitrary */
    TEST_NZ(ibv_req_notify_cq(conn->cq, 0));

    TEST_NZ(pthread_create(&conn->cq_poller_thread, NULL, poll_cq, conn));
}
void send_ack() {
    /* Send ack */
    ack_buffer = client_pdata.index;
    sge_send.addr = (uintptr_t)&ack_buffer;
    sge_send.length = sizeof(ack_buffer);
    sge_send.lkey = mr_ack_buffer->lkey;

    send_wr.wr_id = 1;
    send_wr.opcode = IBV_WR_SEND;
    send_wr.send_flags = IBV_SEND_SIGNALED;
    send_wr.sg_list = &sge_send;
    send_wr.num_sge = 1;

    err = ibv_post_send(cm_id->qp, &send_wr, &bad_send_wr);
    assert(err == 0);

    /* Wait send completion */
    err = ibv_get_cq_event(comp_chan, &evt_cq, &cq_context);
    assert(err == 0);

    ibv_ack_cq_events(evt_cq, 1);

    err = ibv_req_notify_cq(cq, 0);
    assert(err == 0);
    
    n = ibv_poll_cq(cq, 1, &wc);
    assert(n >= 1); 
    if (wc.status != IBV_WC_SUCCESS) 
        printf("Warning: Client %d send ack failed\n", client_pdata.index);
}
Example #4
0
void * poll_cq(void *ctx)
{
    struct ibv_cq *cq;
    struct ibv_wc wc;

    while (1) {
        if (!paused) {
            // rdma_debug("get cq event ...");
            TEST_NZ(ibv_get_cq_event(s_ctx->comp_channel, &cq, &ctx));
            ibv_ack_cq_events(cq, 1);
            TEST_NZ(ibv_req_notify_cq(cq, 0));

            while (ibv_poll_cq(cq, 1, &wc)) {
                // rdma_debug("handle cq ...");
                on_completion(&wc);
            }
        } else {
            // rdma_debug("wait signal ...");
            pthread_mutex_lock(&mutex);
            pthread_cond_wait(&resume_cond, &mutex);
            pthread_mutex_unlock(&mutex);
        }
    }

    return NULL;
}
Example #5
0
File: rping.c Project: hkimura/pib
static void *cq_thread(void *arg)
{
	struct rping_cb *cb = arg;
	struct ibv_cq *ev_cq;
	void *ev_ctx;
	int ret;
	
	DEBUG_LOG("cq_thread started.\n");

	while (1) {	
		pthread_testcancel();

		ret = ibv_get_cq_event(cb->channel, &ev_cq, &ev_ctx);
		if (ret) {
			fprintf(stderr, "Failed to get cq event!\n");
			pthread_exit(NULL);
		}
		if (ev_cq != cb->cq) {
			fprintf(stderr, "Unknown CQ!\n");
			pthread_exit(NULL);
		}
		ret = ibv_req_notify_cq(cb->cq, 0);
		if (ret) {
			fprintf(stderr, "Failed to set notify!\n");
			pthread_exit(NULL);
		}
		ret = rping_cq_event_handler(cb);
		ibv_ack_cq_events(cb->cq, 1);
		if (ret)
			pthread_exit(NULL);
	}
}
Example #6
0
static void *comp_handler_thread(void *arg)
{
    RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg;
    int rc;
    struct ibv_cq *ev_cq;
    void *ev_ctx;
    int flags;
    GPollFD pfds[1];

    /* Change to non-blocking mode */
    flags = fcntl(backend_dev->channel->fd, F_GETFL);
    rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK);
    if (rc < 0) {
        rdma_error_report("Failed to change backend channel FD to non-blocking");
        return NULL;
    }

    pfds[0].fd = backend_dev->channel->fd;
    pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR;

    backend_dev->comp_thread.is_running = true;

    while (backend_dev->comp_thread.run) {
        do {
            rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS);
            if (!rc) {
                backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++;
            }
        } while (!rc && backend_dev->comp_thread.run);

        if (backend_dev->comp_thread.run) {
            rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx);
            if (unlikely(rc)) {
                rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc,
                                  errno);
                continue;
            }

            rc = ibv_req_notify_cq(ev_cq, 0);
            if (unlikely(rc)) {
                rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc,
                                  errno);
            }

            backend_dev->rdma_dev_res->stats.poll_cq_from_bk++;
            rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq);

            ibv_ack_cq_events(ev_cq, 1);
        }
    }

    backend_dev->comp_thread.is_running = false;

    qemu_thread_exit(0);

    return NULL;
}
void ParallelRenderingClientIBVerbs::connectToServer()
{

    sleep(2);

    int tx_depth = 1;
    int ib_port = 1;
    int port = 18515 + number;

    int sockfd = ib->client_connect(const_cast<char *>(compositor.c_str()), port);

    char buf[16];
    int bytesWritten = 0;
    snprintf(buf, 16, "%d %d", width, height);
    while (bytesWritten < 16)
    {
        int num = write(sockfd, buf + bytesWritten, 16 - bytesWritten);
        if (num > 0)
            bytesWritten += num;
        else
        {
            fprintf(stderr, "ParallelRenderingClientIBVerbs::connect: socket error\n");
            return;
        }
    }

    int size = width * height * 4;

    ctx = ib->init_ctx(size, tx_depth, ib_port, NULL, image);
    fprintf(stderr, "image: [%p]\n", image);

    if (!ctx)
    {
        fprintf(stderr, "failed to initialize context\n");
        return;
    }
    else
    {
        dest = ib->init_dest(ctx, ib_port);

        remoteDest = ib->client_exch_dest(sockfd, dest);
        ib->connect_ctx(ctx, ib_port, dest->psn, remoteDest);
        /* An additional handshake is required *after* moving qp to RTR
         Arbitrarily reuse exch_dest for this purpose. */
        Destination *d = ib->client_exch_dest(sockfd, dest);
        delete d;
        close(sockfd);
    }

    if (ibv_req_notify_cq(ctx->cq, 0))
    {
        fprintf(stderr, "ibv_req_notify failed!\n");
        return;
    }
    connected = true;
}
Example #8
0
RDMAAdapter::RDMAAdapter()
    : context_(open_default_device()),
      pd_(alloc_protection_domain(context_)) {
  channel_ = ibv_create_comp_channel(context_);
  CHECK(channel_) << "Failed to create completion channel";
  cq_ = ibv_create_cq(context_, MAX_CONCURRENT_WRITES * 2, NULL, channel_, 0);
  CHECK(cq_) << "Failed to create completion queue";
  CHECK(!ibv_req_notify_cq(cq_, 0)) << "Failed to request CQ notification";

  StartInternalThread();
}
Example #9
0
void * poll_cq2(void *ctx)
{
  struct ibv_cq *cq;
  struct ibv_wc wc;
  while (1) {
    TEST_NZ(ibv_get_cq_event(s_ctx->comp_channel, &cq, &ctx));
    ibv_ack_cq_events(cq, 1);
    TEST_NZ(ibv_req_notify_cq(cq, 0));
    while (ibv_poll_cq(cq, 1, &wc))
      on_completion(&wc);
  }
  return NULL;
}
Example #10
0
static ucs_status_t uct_ib_iface_arm_cq(uct_ib_iface_t *iface, struct ibv_cq *cq,
                                        int solicited)
{
    int ret;

    ret = ibv_req_notify_cq(cq, solicited);
    if (ret != 0) {
        ucs_error("ibv_req_notify_cq("UCT_IB_IFACE_FMT", cq) failed: %m",
                  UCT_IB_IFACE_ARG(iface));
        return UCS_ERR_IO_ERROR;
    }
    return UCS_OK;
}
Example #11
0
File: rping.c Project: hkimura/pib
static int rping_setup_qp(struct rping_cb *cb, struct rdma_cm_id *cm_id)
{
	int ret;

	cb->pd = ibv_alloc_pd(cm_id->verbs);
	if (!cb->pd) {
		fprintf(stderr, "ibv_alloc_pd failed\n");
		return errno;
	}
	DEBUG_LOG("created pd %p\n", cb->pd);

	cb->channel = ibv_create_comp_channel(cm_id->verbs);
	if (!cb->channel) {
		fprintf(stderr, "ibv_create_comp_channel failed\n");
		ret = errno;
		goto err1;
	}
	DEBUG_LOG("created channel %p\n", cb->channel);

	cb->cq = ibv_create_cq(cm_id->verbs, RPING_SQ_DEPTH * 2, cb,
				cb->channel, 0);
	if (!cb->cq) {
		fprintf(stderr, "ibv_create_cq failed\n");
		ret = errno;
		goto err2;
	}
	DEBUG_LOG("created cq %p\n", cb->cq);

	ret = ibv_req_notify_cq(cb->cq, 0);
	if (ret) {
		fprintf(stderr, "ibv_create_cq failed\n");
		ret = errno;
		goto err3;
	}

	ret = rping_create_qp(cb);
	if (ret) {
		perror("rdma_create_qp");
		goto err3;
	}
	DEBUG_LOG("created qp %p\n", cb->qp);
	return 0;

err3:
	ibv_destroy_cq(cb->cq);
err2:
	ibv_destroy_comp_channel(cb->channel);
err1:
	ibv_dealloc_pd(cb->pd);
	return ret;
}
Example #12
0
static ucs_status_t uct_ib_iface_arm_cq(uct_ib_iface_t *iface, struct ibv_cq *cq,
                                        int solicited)
{
    int ret;

    ret = ibv_req_notify_cq(cq, solicited);
    if (ret != 0) {
        uct_ib_device_t *dev = uct_ib_iface_device(iface);
        ucs_error("ibv_req_notify_cq(%s:%d, cq) failed: %m",
                  uct_ib_device_name(dev), iface->port_num);
        return UCS_ERR_IO_ERROR;
    }
    return UCS_OK;
}
void ParallelRenderingClientIBVerbs::run()
{

    struct ibv_wc wc;
    struct ibv_cq *ev_cq;
    void *ev_ctx;

    while (keepRunning)
    {

        lock.lock();
        int ne;

        do
        {
            ne = ibv_poll_cq(ctx->cq, 1, &wc);
            if (ne > 0)
            {
                if (ibv_get_cq_event(ctx->ch, &ev_cq, &ev_ctx))
                {
                    fprintf(stderr, "Failed to get cq event!\n");
                    return;
                }
                if (ev_cq != ctx->cq)
                {
                    fprintf(stderr, "Unkown CQ!\n");
                    return;
                }
                ibv_ack_cq_events(ctx->cq, 1);
                ibv_req_notify_cq(ctx->cq, 0);
            }
            microSleep(100);
        } while (ne == 0);

        if (ne < 0)
        {
            fprintf(stderr, "poll CQ failed %d\n", ne);
            return;
        }
        if (wc.status != IBV_WC_SUCCESS)
        {
            fprintf(stderr, "Completion with error at client\n");
            fprintf(stderr, "Failed status %d: wr_id %d\n",
                    wc.status, (int)wc.wr_id);
            return;
        }
    }
}
int wait_receive_data() {
    /* Wait for receive completion */
    err = ibv_get_cq_event(comp_chan, &evt_cq, &cq_context);
    if (err) return 1;

    ibv_ack_cq_events(evt_cq, 1);

    err = ibv_req_notify_cq(cq, 0);
    if (err) return 1;

    n = ibv_poll_cq(cq, 1, &wc);
    if (n <= 0) return 1;
    if (wc.status != IBV_WC_SUCCESS) return 1;
    
    return 0;
}
Example #15
0
static int get_thread_wc(struct thread_context_t *t_ctx, struct ibv_wc *wc, int is_send)
{
	struct ibv_cq           *cq;
	struct ibv_comp_channel *comp_channel;
	struct rdma_resource_t *rdma_resource;
	struct user_param_t *user_param;
	void *ectx;
	int rc = 0;

	rdma_resource = t_ctx->rdma_resource;
	user_param    = &(rdma_resource->user_param);

	if (is_send) {
		cq = t_ctx->send_cq;
		comp_channel = t_ctx->send_comp_channel;
	} else {
		cq = t_ctx->recv_cq;
		comp_channel = t_ctx->recv_comp_channel;
	}

	if (user_param->use_event) {
		rc = ibv_get_cq_event(comp_channel, &cq, &ectx);
		if (rc != 0) {
			ERROR("Failed to do ibv_get_cq_event.\n");
			return 1;
		}

		ibv_ack_cq_events(cq, 1);

		rc = ibv_req_notify_cq(cq, 0);
		if (rc != 0) {
			ERROR("Failed to do ibv_get_cq_event");
			return 1;
		}
	}

	do {
		rc = ibv_poll_cq(cq, 1, wc);
		if (rc < 0) {
			ERROR("Failed to poll CQ.\n");
			return 1;
		}
	} while (!user_param->use_event && (rc == 0)); /// need timeout

	return 0;
}
Example #16
0
    /// Initialize the InfiniBand verbs context.
    void init_context(struct ibv_context* context)
    {
        context_ = context;

        L_(debug) << "create verbs objects";

        pd_ = ibv_alloc_pd(context);
        if (!pd_)
            throw InfinibandException("ibv_alloc_pd failed");

        cq_ = ibv_create_cq(context, num_cqe_, nullptr, nullptr, 0);
        if (!cq_)
            throw InfinibandException("ibv_create_cq failed");

        if (ibv_req_notify_cq(cq_, 0))
            throw InfinibandException("ibv_req_notify_cq failed");
    }
Example #17
0
static void async_completion_thread()
{
    int ret;
    struct ibv_comp_channel *ev_ch;
    struct ibv_cq *ev_cq;
    void *ev_ctx;

    /* This thread should be in a cancel enabled state */
    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);

    ev_ch = viadev.comp_channel;

    while(1) {
        pthread_testcancel();
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
        do {
            ret = ibv_get_cq_event(ev_ch, &ev_cq, &ev_ctx);

            if (ret && errno != EINTR) {
                error_abort_all(IBV_RETURN_ERR,
                        "Failed to get cq event: %d\n", ret);
            }

        } while (ret && errno == EINTR);

        pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);

        if (ev_cq != viadev.cq_hndl) {
            error_abort_all(GEN_ASSERT_ERR, "Event in unknown CQ\n");
        }

        pthread_kill(parent_threadId, SIGUSR1);

        ibv_ack_cq_events(viadev.cq_hndl, 1);

        pthread_testcancel();

        pthread_testcancel();


        if (ibv_req_notify_cq(viadev.cq_hndl, 1)) {
            error_abort_all(IBV_RETURN_ERR,
                    "Couldn't request for CQ notification\n");
        }
    }
}
Example #18
0
/**
 * Polling for events on a inner thread allows processing of management messages
 * like buffer connection immediately, even if the user is not polling.
 * Otherwise buffer constructors would block indefinitely.
 *
 * Deep learning workloads are about sending small numbers of large messages,
 * in which case this model works great. If the library was to be used to
 * exchange large numbers of short messages, it would be useful to split
 * management and data messages over two different queue pairs. User threads
 * could then wait or poll on the data queue pair directly.
 */
void RDMAAdapter::InternalThreadEntry() {
  while (!must_stop()) {
    ibv_cq* cq;
    void* cq_context;
    CHECK(!ibv_get_cq_event(channel_, &cq, &cq_context));
    CHECK(cq == cq_);
    ibv_ack_cq_events(cq, 1);
    CHECK(!ibv_req_notify_cq(cq_, 0));

    int ne = ibv_poll_cq(cq_, MAX_CONCURRENT_WRITES * 2,
      static_cast<ibv_wc*>(wc_));
    CHECK_GE(ne, 0);

    for (int i = 0; i < ne; ++i) {
      CHECK(wc_[i].status == IBV_WC_SUCCESS) << "Failed status \n"
                                             << ibv_wc_status_str(wc_[i].status)
                                             << " " << wc_[i].status << " "
                                             << static_cast<int>(wc_[i].wr_id)
                                             << " "<< wc_[i].vendor_err;

      if (wc_[i].opcode == IBV_WC_RECV_RDMA_WITH_IMM) {
        // Data message, add it to user received queue
        RDMAChannel* channel = reinterpret_cast<RDMAChannel*>(wc_[i].wr_id);
        channel->recv();
        int id = wc_[i].imm_data;
        if (id >= CTRL_ID_OFFSET) {
        // ctrl signal
          ctrl_received_.push(channel->buffers_[id - CTRL_ID_OFFSET]);
        } else {
        // data
          received_.push(channel->buffers_[id]);
        }
      } else {
        if (wc_[i].opcode & IBV_WC_RECV) {
          // Buffer connection message
          RDMAChannel* channel = reinterpret_cast<RDMAChannel*>(wc_[i].wr_id);
          int id = wc_[i].imm_data;
          channel->memory_regions_queue_.push(channel->memory_regions_[id]);
          CHECK(id == channel->memory_regions_received_++);
          CHECK(!ibv_dereg_mr(channel->region_regions_[id]));
        }
      }
    }
  }
}
Example #19
0
inline void cfio_rdma_client_wait(void *ctx)
{
    struct ibv_cq *cq;
    struct ibv_wc wc;

    while (request_stack_size) {
        // rdma_debug("get cq event ...");
        TEST_NZ(ibv_get_cq_event(s_ctx->comp_channel, &cq, &ctx));
        // rdma_debug("ibv_ack_cq_events...");
        ibv_ack_cq_events(cq, 1);
        TEST_NZ(ibv_req_notify_cq(cq, 0));

        while (ibv_poll_cq(cq, 1, &wc)) {
            // rdma_debug("handle cq ...");
            on_completion(&wc);
        }
    }
}
Example #20
0
void build_context(struct ibv_context *verbs)
{
    if (s_ctx) {
        if (s_ctx->ctx != verbs) {
            die("cannot handle events in more than one context.");
        }
        return;
    }

    s_ctx = (rdma_ctx_t *)malloc(sizeof(rdma_ctx_t));

    s_ctx->ctx = verbs;
    TEST_Z(s_ctx->pd = ibv_alloc_pd(s_ctx->ctx));
    TEST_Z(s_ctx->comp_channel = ibv_create_comp_channel(s_ctx->ctx));
    TEST_Z(s_ctx->cq = ibv_create_cq(s_ctx->ctx, 10, NULL, s_ctx->comp_channel, 0)); /* cqe=10 is arbitrary */

    TEST_NZ(ibv_req_notify_cq(s_ctx->cq, 0));
}
Example #21
0
File: ibv.c Project: carriercomm/ix
void event_handler(struct ibv_cq *cq)
{
  int ret;
 
  while(1) {
     /* int ibv_poll_cq(a,b,c):
      *	    a: command queue to poll
      *	    b: max number of completions to return
      *	    c: array of at least (b) entries of ibv_wc where these
      *		completion events will be returned.
      */
    ret = ibv_poll_cq(cq, 1, &wc);

     if(ret == 0) {
        LOGPRINTF(("Empty completion queue, requesting next notification"));
        ibv_req_notify_cq(r_cq_hndl, 0);  /* ... explained in prev line.. */
        return;
     } else if(ret < 0) {
        fprintf(stderr, "Error in event_handler (polling cq)\n");
        exit(-1);
     } else if(wc.status != IBV_WC_SUCCESS) {
        fprintf(stderr, "Error in event_handler, on returned work completion "
		"status: %d\n", wc.status);
        exit(-1);
     }
     
     LOGPRINTF(("Retrieved work completion"));

     /* For ping-pong mode at least, this check shouldn't be needed for
      * normal operation, but it will help catch any bugs with multiple
      * sends coming through when we're only expecting one.
      */
     if(receive_complete == 1) {

        while(receive_complete != 0) sched_yield();

     }

     receive_complete = 1;

  }
  
}
Example #22
0
void *
poll_cq(void *ctx)
{
    struct ibv_cq *cq;
    struct ibv_wc wc;
    IbvConnection *conn = (IbvConnection *)ctx;

    while (1) {
        TEST_NZ(ibv_get_cq_event(conn->comp_channel, &cq, &ctx));
        ibv_ack_cq_events(cq, 1);
        TEST_NZ(ibv_req_notify_cq(cq, 0));

        while (ibv_poll_cq(cq, 1, &wc)) {
            (OnCompletionHandler)(&wc);
        }
    }

    return NULL;
}
Example #23
0
static void build_context(struct ibv_context *verbs)
{
  if (s_ctx) {
    if (s_ctx->ctx != verbs)
      die("cannot handle events in more than one context.");

    return;
  }

  s_ctx = (struct context *)malloc(sizeof(struct context));

  s_ctx->ctx = verbs;

  TEST_Z(s_ctx->pd = ibv_alloc_pd(s_ctx->ctx));
  TEST_Z(s_ctx->comp_channel = ibv_create_comp_channel(s_ctx->ctx));
  TEST_Z(s_ctx->cq = ibv_create_cq(s_ctx->ctx, 10, NULL, s_ctx->comp_channel, 0)); /* cqe=10 is arbitrary */
  TEST_NZ(ibv_req_notify_cq(s_ctx->cq, 0));

  //  TEST_NZ(pthread_create(&s_ctx->cq_poller_thread, NULL, poll_cq, NULL));
}
Example #24
0
int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq,
                           int cqe)
{
    int rc;

    cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL,
                             backend_dev->channel, 0);
    if (!cq->ibcq) {
        rdma_error_report("ibv_create_cq fail, errno=%d", errno);
        return -EIO;
    }

    rc = ibv_req_notify_cq(cq->ibcq, 0);
    if (rc) {
        rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno);
    }

    cq->backend_dev = backend_dev;

    return 0;
}
Example #25
0
void Connector::build_context(struct ibv_context* verb_)
{
  if (s_ctx_ && s_ctx_->ctx_ != verb_) {
    log_(ERROR, "cannot handle events in more than one context.")
    exit(EXIT_FAILURE);
  }
  
  s_ctx_ = (struct context*)malloc(sizeof(struct context) );
  
  s_ctx_->ctx_ = verb_;
  
  TEST_Z(s_ctx_->pd_ = ibv_alloc_pd(s_ctx_->ctx_) );
  TEST_Z(s_ctx_->comp_channel_ = ibv_create_comp_channel(s_ctx_->ctx_) );
  TEST_Z(s_ctx_->cq_ = ibv_create_cq(s_ctx_->ctx_, MAX_QP__CQ_LENGTH, NULL, s_ctx_->comp_channel_, 0) );
  TEST_NZ(ibv_req_notify_cq(s_ctx_->cq_, 0) )
  // TODO
  // TEST_NZ(pthread_create(pthread_v.back(), NULL, &Connector::bst_poll_cq, (void*)(this) ) )
  pthread_v.push_back(new pthread_t() );
  wrap_Connector* wrap_ = new wrap_Connector(this, s_ctx_);
  TEST_NZ(pthread_create(pthread_v.back(), NULL, call_poll_cq_w_wrap, wrap_) )
}
Example #26
0
static int get_cq_event(void)
{
	struct ibv_cq *ev_cq;
	void          *ev_ctx;

	if (ibv_get_cq_event(ctx.channel, &ev_cq, &ev_ctx)) {
		fprintf(stderr, "Failed to get cq_event\n");
		return 1;
	}

	if (ev_cq != ctx.recv_cq) {
		fprintf(stderr, "CQ event for unknown CQ %p\n", ev_cq);
		return 1;
	}

	if (ibv_req_notify_cq(ctx.recv_cq, 0)) {
		fprintf(stderr, "Couldn't request CQ notification\n");
		return 1;
	}

	return 0;
}
Example #27
0
/*
 * Open a RDMA device.
 */
int
rd_open(DEVICE *dev, int trans, int max_send_wr, int max_recv_wr)
{
#if 0
    /* Send request to client */
    if (is_client())
        client_send_request();
#endif

    /* Clear structure */
    memset(dev, 0, sizeof(*dev));

    /* Set transport type and maximum work request parameters */
#if 0
    dev->trans = trans;
#endif
    dev->max_send_wr = max_send_wr;
    dev->max_recv_wr = max_recv_wr;

    /* Open device */
#if 0
    if (Req.use_cm)
        cm_open(dev);
    else
#endif
        int r = ib_open(dev);
    if (r != 0) {
        return r;
    }

    /* Request CQ notification if not polling */
    if (!Req.poll_mode) {
        if (ibv_req_notify_cq(dev->cq, 0) != 0)
            return error(SYS, "failed to request CQ notification");
    }

    return r;
}
static void *mca_oob_ud_event_dispatch(int fd, int flags, void *context)
{
    int rc;
    mca_oob_ud_device_t *device = (mca_oob_ud_device_t *) context;
    mca_oob_ud_port_t *port = NULL;
    struct ibv_cq *event_cq = NULL;
    void *event_context     = NULL;

    do {
        rc = ibv_get_cq_event (device->ib_channel, &event_cq, &event_context);
    } while (rc && errno == EINTR);

    if (NULL == event_cq) {
        /* re-arm the event */
        opal_event_add (&port->device->event, NULL);

        return NULL;
    }

    port = (mca_oob_ud_port_t *) event_context;

    rc = mca_oob_ud_process_messages (event_cq, port);
    if (rc < 0) {
        opal_output (0, "%s oob:ud:event_dispatch error processing messages",
                     ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
        return NULL;
    }

    if (ibv_req_notify_cq(event_cq, 0)) {
        opal_output (0, "%s oob:ud:event_dispatch error asking for cq notifications",
                     ORTE_NAME_PRINT(ORTE_PROC_MY_NAME));
    }

    /* re-arm the event */
    opal_event_add (&port->device->event, NULL);
 
    return NULL;
}
Example #29
0
void  poll_cq(void *ctx)
{
  struct ibv_cq *cq;
  struct ibv_wc wc;
  int ne;

    TEST_NZ(ibv_get_cq_event(s_ctx->comp_channel, &cq, &ctx));//block by default
    ibv_ack_cq_events(cq, 1);
    TEST_NZ(ibv_req_notify_cq(cq, 0));

    do {
        ne = ibv_poll_cq(cq, 1, &wc);
        if(ne < 0){
            printf("fail to poll completion from the CQ. ret = %d\n", ne);
            return;
        }
        else if(ne == 0)
            continue;
        else
            on_completion(&wc);
    } while (ne == 0);

  return;
}
Example #30
0
/*
 * Return -1 for error and 'nr events' for a positive number
 * of events
 */
static int rdma_poll_wait(struct thread_data *td, enum ibv_wc_opcode opcode)
{
	struct rdmaio_data *rd = td->io_ops->data;
	struct ibv_cq *ev_cq;
	void *ev_ctx;
	int ret;

	if (rd->cq_event_num > 0) {	/* previous left */
		rd->cq_event_num--;
		return 0;
	}

again:
	if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
		log_err("fio: Failed to get cq event!\n");
		return -1;
	}
	if (ev_cq != rd->cq) {
		log_err("fio: Unknown CQ!\n");
		return -1;
	}
	if (ibv_req_notify_cq(rd->cq, 0) != 0) {
		log_err("fio: Failed to set notify!\n");
		return -1;
	}

	ret = cq_event_handler(td, opcode);
	if (ret == 0)
		goto again;

	ibv_ack_cq_events(rd->cq, ret);

	rd->cq_event_num--;

	return ret;
}