Example #1
0
static int set_up_connection(struct pingpong_context *ctx,
							 struct perftest_parameters *user_parm,
							 struct pingpong_dest *my_dest) {

	int i;
	union ibv_gid temp_gid;

	if (user_parm->gid_index != -1) {
		if (ibv_query_gid(ctx->context,user_parm->ib_port,user_parm->gid_index,&temp_gid)) {
			return -1;
		}
	}
	
	for (i=0; i < user_parm->num_of_qps; i++) {
		my_dest[i].lid   = ctx_get_local_lid(ctx->context,user_parm->ib_port);
		my_dest[i].qpn   = ctx->qp[i]->qp_num;
		my_dest[i].psn   = lrand48() & 0xffffff;
		my_dest[i].rkey  = ctx->mr->rkey;
		// Each qp gives his receive buffer address .
		my_dest[i].vaddr = (uintptr_t)ctx->buf + (user_parm->num_of_qps + i)*BUFF_SIZE(ctx->size);
		memcpy(my_dest[i].gid.raw,temp_gid.raw ,16);

		// We do not fail test upon lid above RoCE.
		if (user_parm->gid_index < 0) {
			if (!my_dest[i].lid) {
				fprintf(stderr," Local lid 0x0 detected. Is an SM running? \n");
				return -1;
			}
		}
	}
	return 0;
}
int set_up_connection(struct pingpong_context *ctx,
		struct perftest_parameters *user_param,
		struct pingpong_dest *my_dest)
{
	int num_of_qps = user_param->num_of_qps;
	int num_of_qps_per_port = user_param->num_of_qps / 2;

	int i;
	int is_ipv4;

	union ibv_gid temp_gid;
	union ibv_gid temp_gid2;
	struct ibv_port_attr attr;

	srand48(getpid() * time(NULL));

	/*in xrc with bidirectional,
	there are send qps and recv qps. the actual number of send/recv qps
	is num_of_qps / 2.
	*/
	if ( (user_param->connection_type == DC || user_param->use_xrc) && (user_param->duplex || user_param->tst == LAT)) {
		num_of_qps /= 2;
		num_of_qps_per_port = num_of_qps / 2;
	}

	if (user_param->gid_index != -1) {
		if (ibv_query_port(ctx->context,user_param->ib_port,&attr))
			return 0;

		if (user_param->use_gid_user) {
			if (ibv_query_gid(ctx->context,user_param->ib_port,user_param->gid_index,&temp_gid)) {
				return -1;
			}
		} else {
			for (i=0 ; i < attr.gid_tbl_len; i++) {
				if (ibv_query_gid(ctx->context,user_param->ib_port,i,&temp_gid)) {	
					return -1;
				}
				is_ipv4 = ipv6_addr_v4mapped((struct in6_addr *)temp_gid.raw);
				if ((user_param->ipv6 && !is_ipv4) || (!user_param->ipv6 && is_ipv4)) {
					user_param->gid_index = i;
					break;
				}
			}
		}
	}

	if (user_param->dualport==ON) {
		if (user_param->gid_index2 != -1) {
			if (ibv_query_port(ctx->context,user_param->ib_port2,&attr))
				return 0;

			if (user_param->use_gid_user) {
				if (ibv_query_gid(ctx->context,user_param->ib_port2,user_param->gid_index,&temp_gid2))
					return -1;
			} else {
				for (i=0 ; i < attr.gid_tbl_len; i++) {
					if (ibv_query_gid(ctx->context,user_param->ib_port2,i,&temp_gid2)) {
						return -1;
					}
					is_ipv4 = ipv6_addr_v4mapped((struct in6_addr *)temp_gid2.raw);
					if ((user_param->ipv6 && !is_ipv4) || (!user_param->ipv6 && is_ipv4)) {
						user_param->gid_index = i;
						break;
					}
				}
			}
		}
	}

	for (i = 0; i < user_param->num_of_qps; i++) {

		if (user_param->dualport == ON) {
			/*first half of qps are for ib_port and second half are for ib_port2
			in xrc with bidirectional, the first half of qps are xrc_send qps and
			the second half are xrc_recv qps. the first half of the send/recv qps
			are for ib_port1 and the second half are for ib_port2
			*/
			if (i % num_of_qps < num_of_qps_per_port) {
				my_dest[i].lid   = ctx_get_local_lid(ctx->context,user_param->ib_port);
				my_dest[i].gid_index = user_param->gid_index;
			} else {
				my_dest[i].lid   = ctx_get_local_lid(ctx->context,user_param->ib_port2);
				my_dest[i].gid_index = user_param->gid_index2;
			}
			/*single-port case*/
		} else {
			my_dest[i].lid   = ctx_get_local_lid(ctx->context,user_param->ib_port);
			my_dest[i].gid_index = user_param->gid_index;
		}

		my_dest[i].qpn   = ctx->qp[i]->qp_num;
		my_dest[i].psn   = lrand48() & 0xffffff;
		my_dest[i].rkey  = ctx->mr->rkey;

		/* Each qp gives his receive buffer address.*/
		my_dest[i].out_reads = user_param->out_reads;
		my_dest[i].vaddr = (uintptr_t)ctx->buf + (user_param->num_of_qps + i)*BUFF_SIZE(ctx->size,ctx->cycle_buffer);

		if (user_param->dualport==ON) {

			if (i % num_of_qps < num_of_qps_per_port)
				memcpy(my_dest[i].gid.raw,temp_gid.raw ,16);

			else
				memcpy(my_dest[i].gid.raw,temp_gid2.raw ,16);
		} else {
			memcpy(my_dest[i].gid.raw,temp_gid.raw ,16);
		}

		/*
		We do not fail test upon lid above RoCE.
		if ( (user_param->gid_index < 0) ||  ((user_param->gid_index2 < 0) && (user_param->dualport == ON))  ){
			if (!my_dest[i].lid) {
				fprintf(stderr," Local lid 0x0 detected. Is an SM running? \n");
				return -1;
			}
		}
		*/
	}

	#ifdef HAVE_XRCD
	if (user_param->use_xrc) {
		for (i=0; i < user_param->num_of_qps; i++) {
			if (ibv_get_srq_num(ctx->srq,&(my_dest[i].srqn))) {
				fprintf(stderr, "Couldn't get SRQ number\n");
				return 1;
			}
		}
	}
	#endif

	#ifdef HAVE_DC
	if(user_param->machine == SERVER || user_param->duplex || user_param->tst == LAT) {
		if (user_param->connection_type == DC) {
			for (i=0; i < user_param->num_of_qps; i++) {
				if (ibv_get_srq_num(ctx->srq, &(my_dest[i].srqn))) {
					fprintf(stderr, "Couldn't get SRQ number\n");
					return 1;
				}
			}
		}
	}
	#endif
	return 0;
}
Example #3
0
// A buffer of size ConsoleNative::MaxConsoleTitleLength is quite big.
// First, we try allocating a smaller buffer because most often, the console title is short.
// If it turns out that the short buffer size is insufficient, we try again using a larger buffer.
INT32 QCALLTYPE ConsoleNative::GetTitle(QCall::StringHandleOnStack outTitle, INT32& outTitleLen) {

    QCALL_CONTRACT;
    
    INT32 result = 0;
    
    BEGIN_QCALL;

    // Reserve buffer:   
    InlineSBuffer< ADJUST_NUM_CHARS(BUFF_SIZE(ShortConsoleTitleLength)) > titleBuff;
        
    // Hold last error:
    DWORD lastError;

    // Read console title, get length of the title:    
    
    BYTE *buffPtr = titleBuff.OpenRawBuffer( ADJUST_NUM_CHARS(BUFF_SIZE(ShortConsoleTitleLength)) );
    
    SetLastError(0);
    DWORD len = GetConsoleTitle((TCHAR *) buffPtr, ADJUST_NUM_CHARS(ShortConsoleTitleLength + 1));
    lastError = GetLastError();

    titleBuff.CloseRawBuffer();
    
    // If the title length is larger than supported maximum, do not bother reading it, just return the length:
    if (len > MaxConsoleTitleLength) {
    
        outTitleLen = len;
        outTitle.Set(W(""));    
        result = 0;  
    
    // If title length is within valid range:
    } else {
    
        // If the title is longer than the short buffer, but can fit in the max supported length,
        // read it again with the long buffer:
        if (len > ShortConsoleTitleLength) {
        
            COUNT_T buffSize = ADJUST_NUM_CHARS(BUFF_SIZE(len));
            titleBuff.SetSize(buffSize);
            
            BYTE *buffPtr = titleBuff.OpenRawBuffer(buffSize);
            
            SetLastError(0);
            len = GetConsoleTitle((TCHAR *) buffPtr, ADJUST_NUM_CHARS(len + 1));
            lastError = GetLastError();

            titleBuff.CloseRawBuffer();
        }
        
        // Zero may indicate error or empty title. Check for error:
        result = (INT32) (0 == len ? lastError : 0);
        
        // If no error, set title and length:
        if (0 == result) {
            const BYTE *cBuffPtr = (const BYTE *) titleBuff;
            outTitle.Set((TCHAR *) cBuffPtr);
            outTitleLen = (INT32) len;
            
        // If error, set to empty:
        } else {            
            outTitleLen = (INT32) -1;
            // No need to set the title string if we have an error anyway.
        }
    }  // if title length is within valid range.
        
    END_QCALL;

    return result;
}
Example #4
0
int run_iter(struct pingpong_context *ctx, struct perftest_parameters *user_param,
			struct pingpong_dest *rem_dest, int size,int maxpostsofqpiniteration)
{

    int                totscnt = 0;
	int 			   totccnt = 0;
	int                i       = 0;
    int                index,ne;
	int				   warmindex;
    struct ibv_send_wr *bad_wr;
    struct ibv_wc 	   *wc       = NULL;
	struct ibv_sge     *sge_list = NULL;
    struct ibv_send_wr *wr       = NULL;
	uint64_t		   *my_addr  = NULL;
	uint64_t		   *rem_addr = NULL;

	ALLOCATE(wr ,struct ibv_send_wr , user_param->num_of_qps);
	ALLOCATE(sge_list ,struct ibv_sge , user_param->num_of_qps);
	ALLOCATE(my_addr ,uint64_t ,user_param->num_of_qps);
	ALLOCATE(rem_addr ,uint64_t ,user_param->num_of_qps);
	ALLOCATE(wc ,struct ibv_wc , DEF_WC_SIZE);


	// Each QP has its own wr and sge , that holds the qp addresses and attr.
	// We write in cycles on the buffer to exploid the "Nahalem" system.
	for (index = 0 ; index < user_param->num_of_qps ; index++) {

		sge_list[index].addr   = (uintptr_t)ctx->buf + (index*BUFF_SIZE(ctx->size));
		sge_list[index].length = size;
		sge_list[index].lkey   = ctx->mr->lkey;

		wr[index].sg_list             = &sge_list[index]; 
		wr[index].num_sge             = MAX_SEND_SGE;
		wr[index].opcode	          = IBV_WR_RDMA_WRITE;
		wr[index].next                = NULL;
		wr[index].wr.rdma.remote_addr = rem_dest[index].vaddr;
		wr[index].wr.rdma.rkey        = rem_dest[index].rkey;
		wr[index].wr_id               = index;
		wr[index].send_flags          = IBV_SEND_SIGNALED;

		if (size <= user_param->inline_size) 
			wr[index].send_flags |= IBV_SEND_INLINE;

		ctx->scnt[index] = 0;
		ctx->ccnt[index] = 0;
		my_addr[index]	 = sge_list[index].addr;
		rem_addr[index]  = wr[index].wr.rdma.remote_addr;

	}
	
	// Done with setup. Start the test. warm up posting of total 100 wq's per 
    // qp 1 for each qp till all qps have 100.
	for (warmindex = 0 ;warmindex < maxpostsofqpiniteration ;warmindex ++ ) {
	  for (index =0 ; index < user_param->num_of_qps ; index++) {

			if (totscnt % CQ_MODERATION == 0)
				wr[index].send_flags &= ~IBV_SEND_SIGNALED;

			tposted[totscnt] = get_cycles();
            if (ibv_post_send(ctx->qp[index],&wr[index],&bad_wr)) {
                fprintf(stderr,"Couldn't post send: qp %d scnt=%d \n",index,ctx->scnt[index]);
                return 1;
            }
			// If we can increase the remote address , so the next write will be to other address ,
			// We do it.
			if (size <= (CYCLE_BUFFER / 2)) { 
				increase_rem_addr(&wr[index],size,ctx->scnt[index],rem_addr[index]);
				increase_loc_addr(wr[index].sg_list,size,ctx->scnt[index],my_addr[index],0);
			}


			ctx->scnt[index]++;
            totscnt++;

			if (totscnt%CQ_MODERATION == CQ_MODERATION - 1 || totscnt == user_param->iters - 1)
				wr[index].send_flags |= IBV_SEND_SIGNALED;

      }
	}  

	// main loop for posting 
	while (totscnt < (user_param->iters * user_param->num_of_qps)  || totccnt < (user_param->iters * user_param->num_of_qps) ) {

		// main loop to run over all the qps and post each time n messages 
		for (index =0 ; index < user_param->num_of_qps ; index++) {
          
			while (ctx->scnt[index] < user_param->iters && (ctx->scnt[index] - ctx->ccnt[index]) < maxpostsofqpiniteration) {

				if (totscnt % CQ_MODERATION == 0)
					wr[index].send_flags &= ~IBV_SEND_SIGNALED;

				tposted[totscnt] = get_cycles();
				if (ibv_post_send(ctx->qp[index],&wr[index],&bad_wr)) {
					fprintf(stderr,"Couldn't post send: qp %d scnt=%d \n",index,ctx->scnt[index]);
					return 1;
				}     
				
				if (size <= (CYCLE_BUFFER / 2)) { 
					increase_rem_addr(&wr[index],size,ctx->scnt[index],rem_addr[index]);
					increase_loc_addr(wr[index].sg_list,size,ctx->scnt[index],my_addr[index],0);
				}

				ctx->scnt[index] = ctx->scnt[index]+1;
				totscnt++;

				if (totscnt%CQ_MODERATION == CQ_MODERATION - 1 || totscnt == user_param->iters - 1)
					wr[index].send_flags |= IBV_SEND_SIGNALED;
			}
		}

		// finished posting now polling 
		if (totccnt < (user_param->iters * user_param->num_of_qps) ) {
	    
			do {
				ne = ibv_poll_cq(ctx->cq, DEF_WC_SIZE, wc);
				if (ne > 0) {
					for (i = 0; i < ne; i++) {

						if (wc[i].status != IBV_WC_SUCCESS) 
							NOTIFY_COMP_ERROR_SEND(wc[i],totscnt,totccnt);

						ctx->ccnt[(int)wc[i].wr_id] += CQ_MODERATION;
						totccnt += CQ_MODERATION;

						if (totccnt >= user_param->iters - 1)
							tcompleted[user_param->iters - 1] = get_cycles();

						else 
							tcompleted[totccnt-1] = get_cycles();
					}
				}
			} while (ne > 0);

			if (ne < 0) {
				fprintf(stderr, "poll CQ failed %d\n", ne);
				return 1;
			}
		}
	}

	free(wr);
	free(sge_list);
	free(my_addr);
	free(rem_addr);
	free(wc);
	return 0;
}
Example #5
0
static struct pingpong_context *pp_init_ctx(struct ibv_device *ib_dev,unsigned size,
											struct perftest_parameters *user_parm)
{
	struct pingpong_context *ctx;
	int counter;

	ALLOCATE(ctx,struct pingpong_context,1);
	ALLOCATE(ctx->qp,struct ibv_qp*,user_parm->num_of_qps);
	ALLOCATE(ctx->scnt,int,user_parm->num_of_qps);
	ALLOCATE(ctx->ccnt,int,user_parm->num_of_qps);

	memset(ctx->scnt, 0, user_parm->num_of_qps * sizeof (int));
	memset(ctx->ccnt, 0, user_parm->num_of_qps * sizeof (int));
	
	ctx->size     = size;
	ctx->tx_depth = user_parm->tx_depth;

	// We allocate the buffer in BUFF_SIZE size to support max performance in
	// "Nahalem" systems , as described in BUFF_SIZE macro in perftest_resources.h
	ctx->buf = memalign(page_size, BUFF_SIZE(size) * 2 * user_parm->num_of_qps);
	if (!ctx->buf) {
		fprintf(stderr, "Couldn't allocate work buf.\n");
		return NULL;
	}

	memset(ctx->buf, 0, BUFF_SIZE(size) * 2 * user_parm->num_of_qps);

	ctx->context = ibv_open_device(ib_dev);
	if (!ctx->context) {
		fprintf(stderr, "Couldn't get context for %s\n",
			ibv_get_device_name(ib_dev));
		return NULL;
	}

	// Finds the link type and configure the HCA accordingly.
	if (ctx_set_link_layer(ctx->context,user_parm)) {
		fprintf(stderr, " Couldn't set the link layer\n");
		return NULL;
	}

	// Configure the Link MTU acoording to the user or the active mtu.
	if (ctx_set_mtu(ctx->context,user_parm)) {
		fprintf(stderr, "Couldn't set the link layer\n");
		return NULL;
	}

	ctx->pd = ibv_alloc_pd(ctx->context);
	if (!ctx->pd) {
		fprintf(stderr, "Couldn't allocate PD\n");
		return NULL;
	}

	// We dont really want IBV_ACCESS_LOCAL_WRITE, but IB spec says:
	// The Consumer is not allowed to assign Remote Write or Remote Atomic to
	// a Memory Region that has not been assigned Local Write.
	ctx->mr = ibv_reg_mr(ctx->pd, ctx->buf, BUFF_SIZE(size) * 2 * user_parm->num_of_qps,IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE);
	if (!ctx->mr) {
		fprintf(stderr, "Couldn't allocate MR\n");
		return NULL;
	}

	// Creates the CQ according to ctx_cq_create in perfetst_resources.
	ctx->cq = ctx_cq_create(ctx->context,NULL,user_parm);
	if (!ctx->cq) {
		fprintf(stderr, "Couldn't create CQ\n");
		return NULL;
	}


	for (counter = 0 ; counter < user_parm->num_of_qps ; counter++) {

		ctx->qp[counter] = ctx_qp_create(ctx->pd,ctx->cq,ctx->cq,user_parm);
		if (!ctx->qp[counter])  {
			fprintf(stderr, "Couldn't create QP\n");
			return NULL;
		}

		if (ctx_modify_qp_to_init(ctx->qp[counter],user_parm)) {
			fprintf(stderr, "Failed to modify QP to INIT\n");
			return NULL;
		}
	}
	return ctx;
}
Example #6
0
static struct pingpong_context *pp_init_ctx(struct ibv_device *ib_dev,int size,
										    struct perftest_parameters *user_parm) {

	struct pingpong_context *ctx;

	ALLOCATE(ctx,struct pingpong_context,1);
	
	ctx->size     = size;
	ctx->tx_depth = user_parm->tx_depth;

	ctx->buf = memalign(page_size, BUFF_SIZE(size));
	if (!ctx->buf) {
		fprintf(stderr, "Couldn't allocate work buf.\n");
		return NULL;
	}

	memset(ctx->buf, 0, BUFF_SIZE(size));

	ctx->context = ibv_open_device(ib_dev);
	if (!ctx->context) {
		fprintf(stderr, "Couldn't get context for %s\n",ibv_get_device_name(ib_dev));
		return NULL;
	}

	// Finds the link type and configure the HCA accordingly.
	if (ctx_set_link_layer(ctx->context,user_parm)) {
		fprintf(stderr, " Couldn't set the link layer\n");
		return NULL;
	}

	// Configure the Link MTU acoording to the user or the active mtu.
	if (ctx_set_mtu(ctx->context,user_parm)) {
		fprintf(stderr, "Couldn't set the link layer\n");
		return NULL;
	}

	if (user_parm->use_event) {
		ctx->channel = ibv_create_comp_channel(ctx->context);
		if (!ctx->channel) {
			fprintf(stderr, "Couldn't create completion channel\n");
			return NULL;
		}
	} else
		ctx->channel = NULL;

	ctx->pd = ibv_alloc_pd(ctx->context);
	if (!ctx->pd) {
		fprintf(stderr, "Couldn't allocate PD\n");
		return NULL;
	}

	ctx->mr = ibv_reg_mr(ctx->pd, ctx->buf,BUFF_SIZE(size),IBV_ACCESS_REMOTE_WRITE | 
														   IBV_ACCESS_LOCAL_WRITE  | 
														   IBV_ACCESS_REMOTE_READ);
	if (!ctx->mr) {
		fprintf(stderr, "Couldn't allocate MR\n");
		return NULL;
	}

	// Creates the CQ according to ctx_cq_create in perfetst_resources.
	ctx->cq = ctx_cq_create(ctx->context,ctx->channel,user_parm);
	if (!ctx->cq) {
		fprintf(stderr, "Couldn't create CQ\n");
		return NULL;
	}

	ctx->qp = ctx_qp_create(ctx->pd,ctx->cq,ctx->cq,user_parm);
	if (!ctx->qp)  {
		fprintf(stderr, "Couldn't create QP\n");
		return NULL;
	}

	if (ctx_modify_qp_to_init(ctx->qp,user_parm)) {
		fprintf(stderr, "Failed to modify QP to INIT\n");
		return NULL;
	}
	return ctx;
}