int MPID_nem_ib_init (MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p) { int mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_MPID_IB_INIT); MPIDI_FUNC_ENTER(MPID_STATE_MPID_IB_INIT); /* Make sure that our private fields in vc fit into the area provided. */ MPIU_Assert(sizeof(MPID_nem_ib_vc_area) <= MPID_NEM_VC_NETMOD_AREA_LEN); /* Allocate and initialize conn mgmt related info */ mpi_errno = MPID_nem_ib_init_process_info(pg_rank, pg_p); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to init process info"); } mpi_errno = MPID_nem_ib_init_connection(pg_rank, pg_p->size); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to init conn info"); } mpi_errno = MPID_nem_ib_get_control_params(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to get control params"); } /* Open and init all HCA's for communication */ mpi_errno = MPID_nem_ib_init_hca(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to init HCA"); } mpi_errno = MPID_nem_ib_get_control_params_after_hcainit(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to get control params after hca_init"); } /* Set default parameters. */ mpi_errno = MPID_nem_ib_set_default_params(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to set def params"); } /* Get user defined parameters. */ mpi_errno = MPID_nem_ib_get_user_params(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to get user params"); } /* init process_init_info for communication info exchange */ mpi_errno = MPID_nem_ib_alloc_process_init_info(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to init process_init_info"); } mpi_errno = MPID_nem_ib_setup_startup_ring(pg_p, pg_rank); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to setup startup ring"); } if (process_info.has_srq) { mpi_errno = init_vbuf_lock(); if(mpi_errno) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to init vbuf lock"); } } mpi_errno = MPID_nem_ib_open_ports(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to init HCA"); } /* Setup QP's and other things for communication */ mpi_errno = MPID_nem_ib_setup_conn(pg_p); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to setup conn"); } /* Exchange conn info between all processes */ mpi_errno = MPID_nem_ib_exchange_conn(pg_p, pg_rank); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to exchange conn info"); } #if !defined(DISABLE_PTMALLOC) if (!mvapich2_minit()) { process_info.has_lazy_mem_unregister = 1; /* Initialize the registration cache */ mpi_errno = dreg_init(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to initialize registration cache"); } } #else /* !defined(DISABLE_PTMALLOC) */ mallopt(M_TRIM_THRESHOLD, -1); mallopt(M_MMAP_MAX, 0); process_info.has_lazy_mem_unregister = 0; #endif /* !defined(DISABLE_PTMALLOC) */ /* Allocate RDMA Buffers */ mpi_errno = MPID_nem_ib_allocate_memory( pg_rank, pg_p->size); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to allocate memory"); } if(process_info.has_srq) { mpi_errno = MPID_nem_ib_allocate_srq(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to allocate memory for srq"); } } /* establish conn info between all processes */ mpi_errno = MPID_nem_ib_establish_conn(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to establish conn"); } /* init channel manager */ /* Defined in ib_channel_manager.c, need to declare in ib_channel_manager.h * */ mpi_errno = MPIDI_nem_ib_init_cmanager(pg_rank, pg_p->size); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to init cmanager"); } /* Free conn mgmt related info */ mpi_errno = MPID_nem_ib_free_conn_info(pg_p->size); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_INTERN, "**fail", "**fail %s", "Failed to init conn info"); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPID_IB_INIT); return mpi_errno; fn_fail: goto fn_exit; }
int MPIDI_CH3_Init(int has_parent, MPIDI_PG_t * pg, int pg_rank) { int mpi_errno = MPI_SUCCESS; int pg_size, threshold, dpm = 0, p; char *dpm_str, *value, *conn_info = NULL; MPIDI_VC_t *vc; MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3_INIT); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3_INIT); if (MPIDI_CH3_Pkt_size_index[MPIDI_CH3_PKT_CLOSE] != sizeof (MPIDI_CH3_Pkt_close_t)) { MPIU_ERR_SETFATALANDJUMP1( mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s", "Failed sanity check! Packet size table mismatch"); } pg_size = MPIDI_PG_Get_size(pg); /*Determine to use which connection management*/ threshold = MPIDI_CH3I_CM_DEFAULT_ON_DEMAND_THRESHOLD; /*check ON_DEMAND_THRESHOLD*/ value = getenv("MV2_ON_DEMAND_THRESHOLD"); if (value) { threshold = atoi(value); } dpm_str = getenv("MV2_SUPPORT_DPM"); if (dpm_str) { dpm = !!atoi(dpm_str); } MPIDI_CH3I_Process.has_dpm = dpm; if(MPIDI_CH3I_Process.has_dpm) { setenv("MV2_ENABLE_AFFINITY", "0", 1); } #ifdef _ENABLE_XRC_ value = getenv ("MV2_USE_XRC"); if (value) { USE_XRC = atoi(value); if (USE_XRC) { /* Enable on-demand */ threshold = 1; } } #endif /* _ENABLE_XRC_ */ #ifdef _ENABLE_UD_ if ((value = getenv("MV2_HYBRID_ENABLE_THRESHOLD")) != NULL) { rdma_hybrid_enable_threshold = atoi(value); } if ((value = getenv("MV2_USE_UD_HYBRID")) != NULL) { rdma_enable_hybrid = atoi(value); } if (pg_size < rdma_hybrid_enable_threshold) { rdma_enable_hybrid = 0; } #endif if (pg_size > threshold || dpm #ifdef _ENABLE_XRC_ || USE_XRC #endif /* _ENABLE_XRC_ */ #ifdef _ENABLE_UD_ || rdma_enable_hybrid #endif ) { MPIDI_CH3I_Process.cm_type = MPIDI_CH3I_CM_ON_DEMAND; MPIDI_CH3I_Process.num_conn = 0; } else { MPIDI_CH3I_Process.cm_type = MPIDI_CH3I_CM_BASIC_ALL2ALL; } #if defined(RDMA_CM) if (((value = getenv("MV2_USE_RDMA_CM")) != NULL || (value = getenv("MV2_USE_IWARP_MODE")) != NULL) && atoi(value) && ! dpm) { MPIDI_CH3I_Process.cm_type = MPIDI_CH3I_CM_RDMA_CM; } else { rdma_cm_get_hca_type(&MPIDI_CH3I_RDMA_Process); } #endif /* defined(RDMA_CM) */ MPIDI_PG_GetConnKVSname(&pg->ch.kvs_name); #if defined(CKPT) #if defined(RDMA_CM) if (MPIDI_CH3I_Process.cm_type == MPIDI_CH3I_CM_RDMA_CM) { MPIU_Error_printf("Error: Checkpointing does not work with RDMA CM.\n" "Please configure and compile MVAPICH2 with checkpointing disabled " "or without support for RDMA CM.\n"); MPIU_ERR_SETFATALANDJUMP(mpi_errno, MPI_ERR_OTHER, "**fail"); } #endif /* defined(RDMA_CM) */ // Always use CM_ON_DEMAND for Checkpoint/Restart and Migration MPIDI_CH3I_Process.cm_type = MPIDI_CH3I_CM_ON_DEMAND; #endif /* defined(CKPT) */ #ifdef _ENABLE_UD_ if (rdma_enable_hybrid) { MPIU_Assert(MPIDI_CH3I_Process.cm_type == MPIDI_CH3I_CM_ON_DEMAND); } #endif /* save my vc_ptr for easy access */ MPIDI_PG_Get_vc(pg, pg_rank, &MPIDI_CH3I_Process.vc); /* Initialize Progress Engine */ if ((mpi_errno = MPIDI_CH3I_Progress_init())) { MPIU_ERR_POP(mpi_errno); } /* Check for SMP only */ MPIDI_CH3I_set_smp_only(); if (!SMP_ONLY) { switch (MPIDI_CH3I_Process.cm_type) { /* allocate rmda memory and set up the queues */ case MPIDI_CH3I_CM_ON_DEMAND: #if defined(RDMA_CM) case MPIDI_CH3I_CM_RDMA_CM: #endif /* defined(RDMA_CM) */ mpi_errno = MPIDI_CH3I_CM_Init(pg, pg_rank, &conn_info); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_POP(mpi_errno); } break; default: /*call old init to setup all connections*/ if ((mpi_errno = MPIDI_CH3I_RDMA_init(pg, pg_rank)) != MPI_SUCCESS) { MPIU_ERR_POP(mpi_errno); } /* All vc should be connected */ for (p = 0; p < pg_size; ++p) { MPIDI_PG_Get_vc(pg, p, &vc); vc->ch.state = MPIDI_CH3I_VC_STATE_IDLE; } break; } } #if defined(CKPT) #if defined(DISABLE_PTMALLOC) MPIU_Error_printf("Error: Checkpointing does not work without registration " "caching enabled.\nPlease configure and compile MVAPICH2 without checkpointing " " or enable registration caching.\n"); MPIU_ERR_SETFATALANDJUMP(mpi_errno, MPI_ERR_OTHER, "**fail"); #endif /* defined(DISABLE_PTMALLOC) */ if ((mpi_errno = MPIDI_CH3I_CR_Init(pg, pg_rank, pg_size))) { MPIU_ERR_POP(mpi_errno); } #endif /* defined(CKPT) */ /* set connection info for dynamic process management */ if (conn_info && dpm) { mpi_errno = MPIDI_PG_SetConnInfo(pg_rank, (const char *)conn_info); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_POP(mpi_errno); } } MPIU_Free(conn_info); /* Initialize the smp channel */ if ((mpi_errno = MPIDI_CH3I_SMP_init(pg))) { MPIU_ERR_POP(mpi_errno); } if (SMP_INIT) { for (p = 0; p < pg_size; ++p) { MPIDI_PG_Get_vc(pg, p, &vc); /* Mark the SMP VC as Idle */ if (vc->smp.local_nodes >= 0) { vc->ch.state = MPIDI_CH3I_VC_STATE_IDLE; if (SMP_ONLY) { MPIDI_CH3I_SMP_Init_VC(vc); } #ifdef _ENABLE_XRC_ VC_XST_SET (vc, XF_SMP_VC); #endif } } } else { extern int enable_shmem_collectives; enable_shmem_collectives = SMP_INIT; } /* Set the eager max msg size now that we know SMP and RDMA are initialized. * The max message size is also set during VC initialization, but the state * of SMP is unknown at that time. */ for (p = 0; p < pg_size; ++p) { MPIDI_PG_Get_vc(pg, p, &vc); vc->eager_max_msg_sz = MPIDI_CH3_EAGER_MAX_MSG_SIZE(vc); } fn_exit: MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3_INIT); return mpi_errno; fn_fail: goto fn_exit; }
/** * Initialize the HCAs * Look at rdma_open_hca() & rdma_iba_hca_init_noqp() in * mvapich2/trunk/src/mpid/ch3/channels/mrail/src/gen2/rdma_iba_priv.c * * Store all the HCA info in mv2_nem_dev_info_t->hca[hca_num] * * Output: * hca_list: fill it with the HCAs information * * \see hca_list */ int MPID_nem_ib_init_hca() { int mpi_errno = MPI_SUCCESS; MPIDI_STATE_DECL(MPID_STATE_MPIDI_INIT_HCA); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_INIT_HCA); struct ibv_device *ib_dev = NULL; struct ibv_device **dev_list = NULL; int nHca; int num_devices = 0; #ifdef CRC_CHECK gen_crc_table(); #endif memset( hca_list, 0, sizeof(hca_list) ); /* Get the list of devices */ dev_list = ibv_get_device_list(&num_devices); if (dev_list==NULL) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s", "No IB device found"); } if (umad_init() < 0) MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s", "Can't init UMAD library"); /* Runtime checks */ MPIU_Assert( num_devices<=MAX_NUM_HCAS ); if ( num_devices> MAX_NUM_HCAS) { MPIU_Error_printf( "WARNING: found %d IB devices, the maximum is %d (MAX_NUM_HCAS). ", num_devices, MAX_NUM_HCAS); num_devices = MAX_NUM_HCAS; } if ( ib_hca_num_hcas > num_devices) { MPIU_Error_printf( "WARNING: user requested %d IB devices, the available number is %d. ", ib_hca_num_hcas, num_devices); ib_hca_num_hcas = num_devices; } MPIU_DBG_MSG_P( CH3_CHANNEL, VERBOSE, "[HCA] Found %d HCAs\n", num_devices); MPIU_DBG_MSG_P( CH3_CHANNEL, VERBOSE, "[HCA] User requested %d\n", ib_hca_num_hcas); /* Retrieve information for each found device */ for (nHca = 0; nHca < ib_hca_num_hcas; nHca++) { /* Check for user choice */ if( (rdma_iba_hca[0]==0) || (strncmp(rdma_iba_hca, RDMA_IBA_NULL_HCA, 32)!=0) || (ib_hca_num_hcas > 1)) { /* User hasn't specified any HCA name, or the number of HCAs is greater then 1 */ ib_dev = dev_list[nHca]; } else { /* User specified a HCA, try to look for it */ int dev_count; dev_count = 0; while(dev_list[dev_count]) { if(!strncmp(ibv_get_device_name(dev_list[dev_count]), rdma_iba_hca, 32)) { ib_dev = dev_list[dev_count]; break; } dev_count++; } } /* Check if device has been identified */ hca_list[nHca].ib_dev = ib_dev; if (!ib_dev) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s", "No IB device found"); } MPIU_DBG_MSG_P( CH3_CHANNEL, VERBOSE, "[HCA] HCA device %d found\n", nHca); hca_list[nHca].nic_context = ibv_open_device(ib_dev); if (hca_list[nHca].nic_context==NULL) { MPIU_ERR_SETFATALANDJUMP2(mpi_errno, MPI_ERR_OTHER, "**fail", "%s %d", "Failed to open HCA number", nHca); } hca_list[nHca].ptag = ibv_alloc_pd(hca_list[nHca].nic_context); if (!hca_list[nHca].ptag) { MPIU_ERR_SETFATALANDJUMP2(mpi_errno, MPI_ERR_OTHER, "**fail", "%s%d", "Failed to alloc pd number ", nHca); } /* Set the hca type */ #if defined(RDMA_CM) if (process_info.use_iwarp_mode) { if ((mpi_errno = rdma_cm_get_hca_type(process_info.use_iwarp_mode, &process_info.hca_type)) != MPI_SUCCESS) { MPIU_ERR_POP(mpi_errno); } if (process_info.hca_type == CHELSIO_T3) { process_info.use_iwarp_mode = 1; } } else #endif /* defined(RDMA_CM) */ mpi_errno = get_hca_type(hca_list[nHca].ib_dev, hca_list[nHca].nic_context, &hca_list[nHca].hca_type); if (mpi_errno != MPI_SUCCESS) { fprintf(stderr, "[%s, %d] Error in get_hca_type", __FILE__, __LINE__ ); MPIU_ERR_POP(mpi_errno); } } if (!strncmp(rdma_iba_hca, RDMA_IBA_NULL_HCA, 32) && (ib_hca_num_hcas==1) && (num_devices > nHca) && (rdma_find_active_port(hca_list[0].nic_context, hca_list[nHca].ib_dev)==-1)) { /* Trac #376 - There are multiple rdma capable devices (num_devices) in * the system. The user has asked us to use ANY (!strncmp) ONE device * (rdma_num_hcas), and the first device does not have an active port. So * try to find some other device with an active port. */ int j; for (j = 0; dev_list[j]; j++) { ib_dev = dev_list[j]; if (ib_dev) { hca_list[0].nic_context = ibv_open_device(ib_dev); if (!hca_list[0].nic_context) { /* Go to next device */ continue; } if (rdma_find_active_port(hca_list[0].nic_context, ib_dev)!=-1) { hca_list[0].ib_dev = ib_dev; hca_list[0].ptag = ibv_alloc_pd(hca_list[0].nic_context); if (!hca_list[0].ptag) { MPIU_ERR_SETFATALANDJUMP2(mpi_errno, MPI_ERR_OTHER, "**fail", "%s%d", "Failed to alloc pd number ", nHca); } } } } } fn_exit: /* Clean up before exit */ if (dev_list!=NULL) ibv_free_device_list(dev_list); MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_INIT_HCA); return mpi_errno; fn_fail: goto fn_exit; }
/** * the first step in original MPID_nem_ib_setup_conn() function * open hca, create ptags and create cqs */ int MPID_nem_ib_open_ports() { int mpi_errno = MPI_SUCCESS; /* Infiniband Verb Structures */ struct ibv_port_attr port_attr; struct ibv_device_attr dev_attr; int nHca; /* , curRank, rail_index ; */ MPIDI_STATE_DECL(MPID_STATE_MPIDI_OPEN_HCA); MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_OPEN_HCA); for (nHca = 0; nHca < ib_hca_num_hcas; nHca++) { if (ibv_query_device(hca_list[nHca].nic_context, &dev_attr)) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s", "Error getting HCA attributes"); } /* detecting active ports */ if (rdma_default_port < 0 || ib_hca_num_ports > 1) { int nPort; int k = 0; for (nPort = 1; nPort <= RDMA_DEFAULT_MAX_PORTS; nPort ++) { if ((! ibv_query_port(hca_list[nHca].nic_context, nPort, &port_attr)) && port_attr.state == IBV_PORT_ACTIVE && (port_attr.lid || (!port_attr.lid && use_iboeth))) { if (use_iboeth) { if (ibv_query_gid(hca_list[nHca].nic_context, nPort, 0, &hca_list[nHca].gids[k])) { /* new error information function needed */ MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**fail", "Failed to retrieve gid on rank %d", process_info.rank); } DEBUG_PRINT("[%d] %s(%d): Getting gid[%d][%d] for" " port %d subnet_prefix = %llx," " intf_id = %llx\r\n", process_info.rank, __FUNCTION__, __LINE__, nHca, k, k, hca_list[nHca].gids[k].global.subnet_prefix, hca_list[nHca].gids[k].global.interface_id); } else { hca_list[nHca].lids[k] = port_attr.lid; } hca_list[nHca].ports[k++] = nPort; if (check_attrs(&port_attr, &dev_attr)) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s", "Attributes failed sanity check"); } } } if (k < ib_hca_num_ports) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**activeports", "**activeports %d", ib_hca_num_ports); } } else { if(ibv_query_port(hca_list[nHca].nic_context, rdma_default_port, &port_attr) || (!port_attr.lid && !use_iboeth) || (port_attr.state != IBV_PORT_ACTIVE)) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**portquery", "**portquery %d", rdma_default_port); } hca_list[nHca].ports[0] = rdma_default_port; if (use_iboeth) { if (ibv_query_gid(hca_list[nHca].nic_context, 0, 0, &hca_list[nHca].gids[0])) { /* new error function needed */ MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**fail", "Failed to retrieve gid on rank %d", process_info.rank); } if (check_attrs(&port_attr, &dev_attr)) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno, MPI_ERR_OTHER, "**fail", "**fail %s", "Attributes failed sanity check"); } } else { hca_list[nHca].lids[0] = port_attr.lid; } } if (rdma_use_blocking) { hca_list[nHca].comp_channel = ibv_create_comp_channel(hca_list[nHca].nic_context); if (!hca_list[nHca].comp_channel) { MPIU_ERR_SETFATALANDSTMT1(mpi_errno, MPI_ERR_OTHER, goto fn_fail, "**fail", "**fail %s", "cannot create completion channel"); } hca_list[nHca].send_cq_hndl = NULL; hca_list[nHca].recv_cq_hndl = NULL; hca_list[nHca].cq_hndl = ibv_create_cq(hca_list[nHca].nic_context, rdma_default_max_cq_size, NULL, hca_list[nHca].comp_channel, 0); if (!hca_list[nHca].cq_hndl) { MPIU_ERR_SETFATALANDSTMT1(mpi_errno, MPI_ERR_OTHER, goto fn_fail, "**fail", "**fail %s", "cannot create cq"); } if (ibv_req_notify_cq(hca_list[nHca].cq_hndl, 0)) { MPIU_ERR_SETFATALANDSTMT1(mpi_errno, MPI_ERR_OTHER, goto fn_fail, "**fail", "**fail %s", "cannot request cq notification"); }
int MPIR_Datatype_init_names(void) { #ifdef HAVE_ERROR_CHECKING static const char FCNAME[] = "MPIR_Datatype_init_names"; #endif int mpi_errno = MPI_SUCCESS; int i; MPID_Datatype *datatype_ptr = NULL; MPIU_THREADSAFE_INIT_DECL(needsInit); if (needsInit) { MPIU_THREADSAFE_INIT_BLOCK_BEGIN(needsInit); /* Make sure that the basics have datatype structures allocated * and filled in for them. They are just integers prior to this * call. */ mpi_errno = MPIR_Datatype_builtin_fillin(); if (mpi_errno != MPI_SUCCESS) { MPIU_ERR_POPFATAL(mpi_errno); } /* For each predefined type, ensure that there is a corresponding object and that the object's name is set */ for (i=0; mpi_names[i].name != 0; i++) { /* The size-specific types may be DATATYPE_NULL, as might be those based on 'long long' and 'long double' if those types were disabled at configure time. */ if (mpi_names[i].dtype == MPI_DATATYPE_NULL) continue; MPID_Datatype_get_ptr(mpi_names[i].dtype, datatype_ptr); if (datatype_ptr < MPID_Datatype_builtin || datatype_ptr > MPID_Datatype_builtin + MPID_DATATYPE_N_BUILTIN) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno,MPI_ERR_INTERN, "**typeinitbadmem","**typeinitbadmem %d", i); } if (!datatype_ptr) { MPIU_ERR_SETFATALANDJUMP1(mpi_errno,MPI_ERR_INTERN, "**typeinitfail", "**typeinitfail %d", i - 1); } MPIU_DBG_MSG_FMT(DATATYPE,VERBOSE,(MPIU_DBG_FDEST, "mpi_names[%d].name = %p", i, mpi_names[i].name)); MPIU_Strncpy(datatype_ptr->name, mpi_names[i].name, MPI_MAX_OBJECT_NAME); } /* Handle the minloc/maxloc types */ for (i=0; mpi_maxloc_names[i].name != 0; i++) { /* types based on 'long long' and 'long double' may be disabled at configure time, and their values set to MPI_DATATYPE_NULL. skip those types. */ if (mpi_maxloc_names[i].dtype == MPI_DATATYPE_NULL) continue; MPID_Datatype_get_ptr(mpi_maxloc_names[i].dtype, datatype_ptr); if (!datatype_ptr) { MPIU_ERR_SETFATALANDJUMP(mpi_errno,MPI_ERR_INTERN, "**typeinitminmaxloc"); } MPIU_Strncpy(datatype_ptr->name, mpi_maxloc_names[i].name, MPI_MAX_OBJECT_NAME); } MPIU_THREADSAFE_INIT_CLEAR(needsInit); fn_fail:; MPIU_THREADSAFE_INIT_BLOCK_END(needsInit); } return mpi_errno; }