예제 #1
0
int ompi_mtl_mx_module_init(){ 
    mx_param_t mx_param;
    mx_return_t mx_return;
    int32_t nic, ep;
    
    /* setup params */
    mx_param.key = MX_PARAM_UNEXP_QUEUE_MAX;
    mx_param.val.unexp_queue_max = ompi_mtl_mx.mx_unexp_queue_max;
    
    /* get a local endpoint */
    nic = ompi_mtl_mx.mx_board_num;
    if (nic < 0) {
      nic = MX_ANY_NIC;
    }
    ep = ompi_mtl_mx.mx_endpoint_num;
    if (ep < 0) {
      ep = MX_ANY_ENDPOINT;
    }
    mx_return = mx_open_endpoint(nic,
                                 ep,
                                 ompi_mtl_mx.mx_filter, 
                                 NULL, 
                                 0,
                                 &ompi_mtl_mx.mx_endpoint);
    
    if(mx_return != MX_SUCCESS) { 
        opal_output(ompi_mtl_base_framework.framework_output, "Error in mx_open_endpoint (error %s)\n", mx_strerror(mx_return));
        return OMPI_ERROR;
    }
    
    /* get the endpoint address */
    mx_return = mx_get_endpoint_addr( ompi_mtl_mx.mx_endpoint, 
                                      &ompi_mtl_mx.mx_endpoint_addr); 
    
    if(mx_return != MX_SUCCESS) { 
        opal_output(ompi_mtl_base_framework.framework_output, "Error in mx_get_endpoint_addr (error %s)\n", mx_strerror(mx_return));
        return OMPI_ERROR;
    }
    
    mx_return = mx_decompose_endpoint_addr( ompi_mtl_mx.mx_endpoint_addr, &(ompi_mtl_mx.mx_addr.nic_id),
                                            &(ompi_mtl_mx.mx_addr.endpoint_id) );
    
    if(mx_return != MX_SUCCESS) { 
        opal_output(ompi_mtl_base_framework.framework_output, "Error in mx_decompose_endpoint_addr (error %s)\n", mx_strerror(mx_return));
        return OMPI_ERROR;
    }
    opal_output_verbose(10, ompi_mtl_base_framework.framework_output, 
			"mtl:mx: local nic %d, endpoint %d, got nic %d, ep %d\n", nic, ep, 
            (int)ompi_mtl_mx.mx_addr.nic_id,
			ompi_mtl_mx.mx_addr.endpoint_id);

    ompi_modex_send( &mca_mtl_mx_component.super.mtl_version, 
                             &ompi_mtl_mx.mx_addr, 
                             sizeof(mca_mtl_mx_addr_t));
    
    /* register the mtl mx progress function */
    opal_progress_register(ompi_mtl_mx_progress);
    
    return OMPI_SUCCESS; 
}
예제 #2
0
int ompi_mtl_mx_module_init(){ 
    mx_param_t mx_param;
    mx_return_t mx_return;
    
    
    /* setup params */
    mx_param.key = MX_PARAM_UNEXP_QUEUE_MAX;
    mx_param.val.unexp_queue_max = ompi_mtl_mx.mx_unexp_queue_max;
    
   
    /* get a local endpoint */
    mx_return = mx_open_endpoint(MX_ANY_NIC, 
                                 MX_ANY_ENDPOINT,
                                 ompi_mtl_mx.mx_filter, 
                                 NULL, 
                                 0,
                                 &ompi_mtl_mx.mx_endpoint);
    
    
    if(mx_return != MX_SUCCESS) { 
        opal_output(ompi_mtl_base_output, "Error in mx_open_endpoint (error %s)\n", mx_strerror(mx_return));
        return OMPI_ERROR;
    }
    
    /* get the endpoint address */
    mx_return = mx_get_endpoint_addr( ompi_mtl_mx.mx_endpoint, 
                                      &ompi_mtl_mx.mx_endpoint_addr); 
    
    if(mx_return != MX_SUCCESS) { 
        opal_output(ompi_mtl_base_output, "Error in mx_get_endpoint_addr (error %s)\n", mx_strerror(mx_return));
        return OMPI_ERROR;
    }
    
    mx_return = mx_decompose_endpoint_addr( ompi_mtl_mx.mx_endpoint_addr, &(ompi_mtl_mx.mx_addr.nic_id),
                                            &(ompi_mtl_mx.mx_addr.endpoint_id) );
    
    if(mx_return != MX_SUCCESS) { 
        opal_output(ompi_mtl_base_output, "Error in mx_decompose_endpoint_addr (error %s)\n", mx_strerror(mx_return));
        return OMPI_ERROR;
    }


    
    ompi_modex_send( &mca_mtl_mx_component.super.mtl_version, 
                             &ompi_mtl_mx.mx_addr, 
                             sizeof(mca_mtl_mx_addr_t));
    
    /* register the mtl mx progress function */
    opal_progress_register(ompi_mtl_mx_progress);
    
    
    return OMPI_SUCCESS; 
    
        
}
예제 #3
0
static int init_mx( MPIDI_PG_t *pg_p )
{
   mx_endpoint_addr_t local_endpoint_addr;
   mx_return_t        ret;
   mx_param_t         param;
   int                mpi_errno = MPI_SUCCESS;
   int                r;

   r = MPL_putenv("MX_DISABLE_SHARED=1");
   MPIU_ERR_CHKANDJUMP(r, mpi_errno, MPI_ERR_OTHER, "**putenv");
   r = MPL_putenv("MX_DISABLE_SELF=1");
   MPIU_ERR_CHKANDJUMP(r, mpi_errno, MPI_ERR_OTHER, "**putenv");

   ret = mx_init();
   MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_init", "**mx_init %s", mx_strerror (ret));
   
   mx_set_error_handler(MX_ERRORS_RETURN);

   /*
   ret = mx_get_info(NULL, MX_NIC_COUNT, NULL, 0, &nic_count, sizeof(int));
   MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_get_info", "**mx_get_info %s", mx_strerror (ret));
   
   count = ++nic_count;
   mx_nics = (uint64_t *)MPIU_Malloc(count*sizeof(uint64_t));
   ret = mx_get_info(NULL, MX_NIC_IDS, NULL, 0, mx_nics, count*sizeof(uint64_t));
   MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_get_info", "**mx_get_info %s", mx_strerror (ret));
    
    do{	     
      ret = mx_nic_id_to_board_number(mx_nics[index],&mx_board_num);
      index++;
   }while(ret != MX_SUCCESS);
   */
#ifndef USE_CTXT_AS_MARK
   param.key = MX_PARAM_CONTEXT_ID;
   param.val.context_id.bits  = NEM_MX_MATCHING_BITS - SHIFT_TYPE;
   param.val.context_id.shift = SHIFT_TYPE;
   ret = mx_open_endpoint(MX_ANY_NIC,MX_ANY_ENDPOINT,MPID_NEM_MX_FILTER,&param,1,&MPID_nem_mx_local_endpoint);
#else
   ret = mx_open_endpoint(MX_ANY_NIC,MX_ANY_ENDPOINT,MPID_NEM_MX_FILTER,NULL,0,&MPID_nem_mx_local_endpoint);
#endif
   MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_open_endpoint", "**mx_open_endpoint %s", mx_strerror (ret));
      
   ret = mx_get_endpoint_addr(MPID_nem_mx_local_endpoint,&local_endpoint_addr);
   MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_get_endpoint_addr", "**mx_get_endpoint_addr %s", mx_strerror (ret));   
   
   ret = mx_decompose_endpoint_addr(local_endpoint_addr,&MPID_nem_mx_local_nic_id,&MPID_nem_mx_local_endpoint_id);
   MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_decompose_endpoint_addr", "**mx_decompose_endpoint_addr %s", mx_strerror (ret));
   
 fn_exit:
   return mpi_errno;
 fn_fail:
   goto fn_exit;
}
예제 #4
0
파일: mx_stream.c 프로젝트: ananos/xen2mx
static inline void
receiver(mx_endpoint_t ep, int blocking, uint32_t match_val, uint32_t filter)
{
	int count, len, iter, cur_req, num_req;
	mx_status_t stat;	
	mx_request_t req[NUM_RREQ];
	mx_request_t sreq;
	mx_segment_t seg;
	uint32_t result, usec;
	struct timeval start_time, end_time;
	double bw, pkts_per_sec;
	char *buffer;
	struct metadata info;
	int bothways;
#if MX_THREAD_SAFE
	struct mx_thread_arg args;
	MX_THREAD_T thread;
#endif
	uint64_t nic;
	uint32_t eid;

	seg.segment_ptr = &info;
	seg.segment_length = sizeof(info);
	mx_irecv(ep, &seg, 1, match_val, MX_MATCH_MASK_NONE, 0, &req[0]);
	/* wait for the receive to complete */
	mx_test_or_wait(blocking, ep, &req[0], MX_INFINITE, &stat, &result);
	if (!result) {
		fprintf(stderr, "mx_wait failed\n");
		exit(1);
	}
	if (stat.code != MX_STATUS_SUCCESS) {
		fprintf(stderr, "irecv failed with status %s\n", mx_strstatus(stat.code));
		exit(1);
	}
	if (filter != ~0) {
		/* filter == ~0 means recv threads on master */
		mx_decompose_endpoint_addr(stat.source, &nic, &eid);
		mx_connect(ep, nic, eid, filter, MX_INFINITE, &stat.source);
	}
	len = ntohl(info.len);
	iter = ntohl(info.iter);
	Verify = ntohl(info.verify);
	bothways = ntohl(info.bothways);
	if (do_verbose)
		printf("Starting test: len = %d, iter = %d\n", len, iter);
	if (do_verbose && Verify) {
		printf("Verifying results\n");
	}
	buffer = malloc(len * NUM_RREQ);
	if (buffer == NULL) {
		fprintf(stderr, "Can't allocate buffers\n");
		exit(1);
	}

	if (bothways) {
#if MX_THREAD_SAFE
		args.ep = ep;
		args.dest = stat.source;
		args.iter = iter;
		args.len = len;
		args.blocking = blocking;
		num_threads++;
		MX_THREAD_CREATE(&thread, &start_send_thread, &args);
#else
		fprintf(stderr,"bothways not supported\n");
		exit(1);
#endif
	}


	/* pre-post our receives */
	num_req = NUM_RREQ;
	if (num_req > iter)
		num_req = iter;
	for (cur_req = 0; cur_req < num_req; cur_req++) {
		seg.segment_ptr = &buffer[cur_req * len];
		seg.segment_length = len;
		mx_irecv(ep, &seg, 1, match_val, MX_MATCH_MASK_NONE, 0, 
			 &req[cur_req]);
	}

	MX_MUTEX_LOCK(&stream_mutex);
	++threads_running;
	MX_MUTEX_UNLOCK(&stream_mutex);
	while(threads_running != num_threads)
		/* spin */;

#if DO_HANDSHAKE
	/* post a send to let the sender know we are ready */
	seg.segment_ptr = &info;
	seg.segment_length = sizeof(info);
	sreq = 0;
	mx_isend(ep, &seg, 1, stat.source, match_val, NULL, &sreq);
	mx_test_or_wait(blocking, ep, &sreq, MX_INFINITE, &stat, &result);
	if (!result) {
		fprintf(stderr, "mx_wait failed\n");
		exit(1);
	}
	if (stat.code != MX_STATUS_SUCCESS) {
		fprintf(stderr, "isend failed with status %s\n", mx_strstatus(stat.code));
		exit(1);
	}
#endif
	/* start the test */
	gettimeofday(&start_time, NULL);
	for (count = 0; count < iter; count++) {
		/* wait for the receive to complete */
		cur_req = count & (NUM_RREQ - 1);
		
		mx_test_or_wait(blocking, ep, &req[cur_req], 
				MX_INFINITE, &stat, &result);
		if (!result) {
			fprintf(stderr, "mx_wait failed\n");
			exit(1);
		}
		if (stat.code != MX_STATUS_SUCCESS) {
			fprintf(stderr, "irecv failed with status %s\n", mx_strstatus(stat.code));
			exit(1);
		}
		if (stat.xfer_length != len) {
			fprintf(stderr, "bad len %d != %d\n", stat.xfer_length, len);
			exit(1);
		}
		/* hack since mx_cancel does not work */
		if ((count + NUM_RREQ) > iter)
			continue;
		
		seg.segment_ptr = &buffer[cur_req * len];
		seg.segment_length = len;
		if (Verify)
			mx_check_buffer(seg.segment_ptr, len);
		mx_irecv(ep, &seg, 1, match_val, MX_MATCH_MASK_NONE, 0, 
			      &req[cur_req]);
	}
	gettimeofday(&end_time, NULL);
	usec = end_time.tv_usec - start_time.tv_usec;
	usec += (end_time.tv_sec - start_time.tv_sec) * 1000000;
	bw =  ((double)iter * (double)len) / (double) usec;
	pkts_per_sec = iter / ((double) usec / 1000000.0);
	global_bwinfo.bandwidth = bw;
	global_bwinfo.pkts_per_sec = pkts_per_sec;
	/* printf("%8d    %5.3f    %5.3f\n", len, bw, pkts_per_sec);*/
#if 0 /* mx_cancel assert(0)'s */
	for (cur_req = 0; cur_req < num_req; cur_req++) {
		mx_cancel(ep, &req[cur_req]);
	}
#endif

	info.usec = htonl(usec);
	seg.segment_ptr = &info;
	seg.segment_length = sizeof(info);
	sreq = 0;
	mx_isend(ep, &seg, 1, stat.source, match_val, NULL, &sreq);
	mx_test_or_wait(blocking, ep, &sreq, MX_INFINITE, &stat, &result);
	free(buffer);
#if MX_THREAD_SAFE
	if(bothways)
		MX_THREAD_JOIN(thread);
#endif
}