static void __domain_destruct(void *obj) { int ret = FI_SUCCESS; struct gnix_fid_domain *domain = (struct gnix_fid_domain *) obj; GNIX_TRACE(FI_LOG_DOMAIN, "\n"); ret = _gnix_close_cache(domain); if (ret != FI_SUCCESS) GNIX_FATAL(FI_LOG_MR, "failed to close memory registration cache\n"); ret = _gnix_notifier_close(domain->mr_cache_attr.notifier); if (ret != FI_SUCCESS) GNIX_FATAL(FI_LOG_MR, "failed to close MR notifier\n"); /* * remove from the list of cdms attached to fabric */ dlist_remove_init(&domain->list); _gnix_ref_put(domain->fabric); memset(domain, 0, sizeof *domain); free(domain); }
/* Process an incoming connection request at a listening PEP. */ static int __gnix_pep_connreq(struct gnix_fid_pep *pep, int fd) { int ret; struct gnix_pep_sock_conn *conn; struct fi_eq_cm_entry *eq_entry; int eqe_size; /* Create and initialize a new connection request. */ conn = calloc(1, sizeof(*conn)); if (!conn) { GNIX_WARN(FI_LOG_EP_CTRL, "Failed to alloc accepted socket conn\n"); return -FI_ENOMEM; } conn->fid.fclass = FI_CLASS_CONNREQ; conn->fid.context = pep; conn->sock_fd = fd; /* Pull request data from the listening socket. */ conn->bytes_read += read(fd, &conn->req, sizeof(conn->req)); if (conn->bytes_read != sizeof(conn->req)) { /* TODO Wait for more bytes. */ GNIX_FATAL(FI_LOG_EP_CTRL, "Unexpected read size\n"); } conn->req.info.src_addr = &conn->req.src_addr; conn->req.info.dest_addr = &conn->req.dest_addr; conn->req.info.tx_attr = &conn->req.tx_attr; conn->req.info.rx_attr = &conn->req.rx_attr; conn->req.info.ep_attr = &conn->req.ep_attr; conn->req.info.domain_attr = &conn->req.domain_attr; conn->req.info.fabric_attr = &conn->req.fabric_attr; conn->req.info.domain_attr->name = NULL; conn->req.info.fabric_attr->name = NULL; conn->req.info.fabric_attr->prov_name = NULL; conn->info = &conn->req.info; conn->info->handle = &conn->fid; /* Tell user of a new conn req via the EQ. */ eq_entry = (struct fi_eq_cm_entry *)conn->req.eqe_buf; eq_entry->fid = &pep->pep_fid.fid; eq_entry->info = fi_dupinfo(conn->info); eqe_size = sizeof(*eq_entry) + conn->req.cm_data_len; ret = fi_eq_write(&pep->eq->eq_fid, FI_CONNREQ, eq_entry, eqe_size, 0); if (ret != eqe_size) { GNIX_WARN(FI_LOG_EP_CTRL, "fi_eq_write failed, err: %d\n", ret); fi_freeinfo(conn->info); free(conn); return ret; } GNIX_DEBUG(FI_LOG_EP_CTRL, "Added FI_CONNREQ EQE: %p, %p\n", pep->eq, pep); return FI_SUCCESS; }
static int __udreg_init(struct gnix_fid_domain *domain) { udreg_return_t urc; udreg_cache_attr_t udreg_cache_attr = { .cache_name = {"gnix_app_cache"}, .max_entries = domain->udreg_reg_limit, .modes = UDREG_CC_MODE_USE_LARGE_PAGES, .debug_mode = 0, .debug_rank = 0, .reg_context = (void *) domain, .dreg_context = (void *) domain, .destructor_context = (void *) domain, .device_reg_func = __udreg_register, .device_dereg_func = __udreg_deregister, .destructor_callback = __udreg_cache_destructor, }; if (domain->mr_cache_attr.lazy_deregistration) udreg_cache_attr.modes |= UDREG_CC_MODE_USE_LAZY_DEREG; /* * Create a udreg cache for application memory registrations. */ urc = UDREG_CacheCreate(&udreg_cache_attr); if (urc != UDREG_RC_SUCCESS) { GNIX_FATAL(FI_LOG_MR, "Could not initialize udreg application cache, urc=%d\n", urc); } urc = UDREG_CacheAccess(udreg_cache_attr.cache_name, &domain->udreg_cache); if (urc != UDREG_RC_SUCCESS) { GNIX_FATAL(FI_LOG_MR, "Could not access udreg application cache, urc=%d", urc); } domain->mr_is_init = 1; return FI_SUCCESS; }
static int __udreg_close(struct gnix_fid_domain *domain) { udreg_return_t ret; if (domain->udreg_cache) { ret = UDREG_CacheRelease(domain->udreg_cache); if (unlikely(ret != UDREG_RC_SUCCESS)) GNIX_FATAL(FI_LOG_DOMAIN, "failed to release from " "mr cache during domain destruct, dom=%p rc=%d\n", domain, ret); ret = UDREG_CacheDestroy(domain->udreg_cache); if (unlikely(ret != UDREG_RC_SUCCESS)) GNIX_FATAL(FI_LOG_DOMAIN, "failed to destroy mr " "cache during domain destruct, dom=%p rc=%d\n", domain, ret); } return FI_SUCCESS; }
static int __cache_close(struct gnix_fid_domain *domain) { int ret; if (domain->mr_cache) { ret = _gnix_mr_cache_destroy(domain->mr_cache); if (ret != FI_SUCCESS) GNIX_FATAL(FI_LOG_DOMAIN, "failed to destroy mr cache " "during domain destruct, dom=%p ret=%d\n", domain, ret); } return FI_SUCCESS; }
/* Check for a connection response on an FI_EP_MSG. */ int _gnix_ep_progress(struct gnix_fid_ep *ep) { int ret, bytes_read; struct gnix_pep_sock_connresp resp; /* No lock, fast exit. */ if (ep->conn_state != GNIX_EP_CONNECTING) { return FI_SUCCESS; } COND_ACQUIRE(ep->requires_lock, &ep->vc_lock); if (ep->conn_state != GNIX_EP_CONNECTING) { COND_RELEASE(ep->requires_lock, &ep->vc_lock); return FI_SUCCESS; } /* Check for a connection response. */ bytes_read = read(ep->conn_fd, &resp, sizeof(resp)); if (bytes_read >= 0) { if (bytes_read == sizeof(resp)) { /* Received response. */ ret = __gnix_ep_connresp(ep, &resp); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "__gnix_pep_connreq failed, %d\n", ret); } } else { GNIX_FATAL(FI_LOG_EP_CTRL, "Unexpected read size: %d\n", bytes_read); } } else if (errno != EAGAIN) { GNIX_WARN(FI_LOG_EP_CTRL, "Read error: %s\n", strerror(errno)); } COND_RELEASE(ep->requires_lock, &ep->vc_lock); return FI_SUCCESS; }
/** * Create a slab from a handle and append to the slab list. * * @param[in] handle Handle to the allocator being used. * * @return FI_SUCCESS On successful slab creation. * * @return -FI_ENOMEM if failure to allocate memory for slab or bitmap. * @return [Unspec] if failure in alloc_bitmap. Will return error code from * alloc_bitmap. * @return [Unspec] if failure in GNI_MemRegister. Converts gni_return_t * status code to FI_ERRNO value. */ static int __create_slab(struct gnix_mbox_alloc_handle *handle) { struct gnix_slab *slab; gni_return_t status; char error_buf[256]; char *error; size_t total_size; int ret; int vmdh_index = -1; int flags = GNI_MEM_READWRITE; struct gnix_auth_key *info; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); slab = calloc(1, sizeof(*slab)); if (!slab) { error = strerror_r(errno, error_buf, sizeof(error_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "Error allocating slab: %s\n", error); ret = -FI_ENOMEM; goto err_slab_calloc; } total_size = handle->page_size * __page_count(handle); GNIX_DEBUG(FI_LOG_EP_CTRL, "total_size requested for mmap: %zu.\n", total_size); slab->used = calloc(1, sizeof(*(slab->used))); if (!slab->used) { error = strerror_r(errno, error_buf, sizeof(error_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "Error allocating bitmap: %s\n", error); ret = -FI_ENOMEM; goto err_bitmap_calloc; } slab->base = mmap(0, total_size, (PROT_READ | PROT_WRITE), MAP_SHARED, handle->fd, handle->last_offset); if (slab->base == MAP_FAILED) { error = strerror_r(errno, error_buf, sizeof(error_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "%s\n", error); ret = -FI_ENOMEM; goto err_mmap; } ret = _gnix_alloc_bitmap(slab->used, __mbox_count(handle), NULL); if (ret) { GNIX_WARN(FI_LOG_EP_CTRL, "Error allocating bitmap.\n"); goto err_alloc_bitmap; } COND_ACQUIRE(handle->nic_handle->requires_lock, &handle->nic_handle->lock); if (handle->nic_handle->using_vmdh) { info = _gnix_auth_key_lookup(GNIX_PROV_DEFAULT_AUTH_KEY, GNIX_PROV_DEFAULT_AUTH_KEYLEN); assert(info); if (!handle->nic_handle->mdd_resources_set) { /* check to see if the ptag registration limit was set * yet or not -- becomes read-only after success */ _gnix_auth_key_enable(info); status = GNI_SetMddResources( handle->nic_handle->gni_nic_hndl, (info->attr.prov_key_limit + info->attr.user_key_limit)); assert(status == GNI_RC_SUCCESS); handle->nic_handle->mdd_resources_set = 1; } vmdh_index = _gnix_get_next_reserved_key(info); if (vmdh_index <= 0) { GNIX_FATAL(FI_LOG_DOMAIN, "failed to get reserved key for mbox " "registration, rc=%d\n", vmdh_index); } flags |= GNI_MEM_USE_VMDH; } status = GNI_MemRegister(handle->nic_handle->gni_nic_hndl, (uint64_t) slab->base, total_size, handle->cq_handle, flags, vmdh_index, &slab->memory_handle); COND_RELEASE(handle->nic_handle->requires_lock, &handle->nic_handle->lock); if (status != GNI_RC_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "GNI_MemRegister failed: %s\n", gni_err_str[status]); ret = gnixu_to_fi_errno(status); goto err_memregister; } slab->allocator = handle; gnix_slist_insert_tail(&slab->list_entry, &handle->slab_list); handle->last_offset += total_size; return ret; err_memregister: _gnix_free_bitmap(slab->used); err_alloc_bitmap: munmap(slab->base, total_size); err_mmap: free(slab->used); err_bitmap_calloc: free(slab); err_slab_calloc: return ret; }
int _gnix_cm_nic_enable(struct gnix_cm_nic *cm_nic) { int i, ret = FI_SUCCESS; struct gnix_fid_fabric *fabric; struct gnix_datagram *dg_ptr; uint8_t tag = GNIX_CM_NIC_WC_TAG; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); if (cm_nic == NULL) return -FI_EINVAL; if (cm_nic->domain == NULL) { GNIX_FATAL(FI_LOG_EP_CTRL, "domain is NULL\n"); } if (cm_nic->domain->fabric == NULL) { GNIX_FATAL(FI_LOG_EP_CTRL, "fabric is NULL\n"); } fabric = cm_nic->domain->fabric; assert(cm_nic->dgram_hndl != NULL); for (i = 0; i < fabric->n_wc_dgrams; i++) { ret = _gnix_dgram_alloc(cm_nic->dgram_hndl, GNIX_DGRAM_WC, &dg_ptr); /* * wildcards may already be posted to the cm_nic, * so just break if -FI_EAGAIN is returned by * _gnix_dgram_alloc */ if (ret == -FI_EAGAIN) { ret = FI_SUCCESS; break; } if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "_gnix_dgram_alloc call returned %d\n", ret); goto err; } dg_ptr->callback_fn = __process_datagram; dg_ptr->cache = cm_nic; __dgram_set_tag(dg_ptr, tag); ret = _gnix_dgram_wc_post(dg_ptr); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "_gnix_dgram_wc_post returned %d\n", ret); _gnix_dgram_free(dg_ptr); goto err; } } /* * TODO: better cleanup in error case */ err: return ret; }