static int gnix_write_proc_job(char *val_str) { size_t count; int fd; int rc = 0; char *filename = "/proc/job"; int val_str_len = strlen(val_str); fd = open(filename, O_WRONLY); if (fd < 0) { GNIX_WARN(FI_LOG_FABRIC, "open(%s) failed, errno=%s\n", filename, strerror(errno)); return -errno; } count = write(fd, val_str, val_str_len); if (count != val_str_len) { GNIX_WARN(FI_LOG_FABRIC, "write(%s) failed, errno=%s\n", val_str, strerror(errno)); rc = -errno; } close(fd); return rc; }
/* Indicate that the next task spawned will be restricted to cores assigned to * corespec. */ int _gnix_task_is_not_app(void) { size_t count; int fd; char filename[PATH_MAX]; int rc = 0; char val_str[] = "0"; int val_str_len = strlen(val_str); snprintf(filename, PATH_MAX, "/proc/self/task/%ld/task_is_app", syscall(SYS_gettid)); fd = open(filename, O_WRONLY); if (fd < 0) { GNIX_WARN(FI_LOG_FABRIC, "open(%s) failed, errno=%s\n", filename, strerror(errno)); return -errno; } count = write(fd, val_str, val_str_len); if (count != val_str_len) { GNIX_WARN(FI_LOG_FABRIC, "write(%s, %s) failed, errno=%s\n", filename, val_str, strerror(errno)); rc = -errno; } close(fd); return rc; }
int _gnix_notifier_get_event(struct gnix_mr_notifier *mrn, void* buf, size_t len) { int ret, ret_errno; if ((mrn == NULL) || (buf == NULL) || (len <= 0)) { GNIX_WARN(FI_LOG_MR, "Invalid argument to _gnix_notifier_get_event\n"); return -FI_EINVAL; } fastlock_acquire(&mrn->lock); if (*(mrn->cntr) > 0) { GNIX_DEBUG(FI_LOG_MR, "reading kdreg event\n"); ret = read(mrn->fd, buf, len); if (ret < 0) { ret_errno = errno; if (ret_errno != EAGAIN) { GNIX_WARN(FI_LOG_MR, "kdreg event read failed: %s\n", strerror(ret_errno)); } /* Not all of these map to fi_errno values */ ret = -ret_errno; } } else { GNIX_DEBUG(FI_LOG_MR, "nothing to read from kdreg :(\n"); ret = -FI_EAGAIN; } fastlock_release(&mrn->lock); return ret; }
int _gnix_mbox_alloc(struct gnix_mbox_alloc_handle *alloc_handle, struct gnix_mbox **ptr) { struct gnix_slab *slab; int position; int ret; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); if (!alloc_handle || !ptr) { GNIX_WARN(FI_LOG_EP_CTRL, "Invalid alloc_handle or ptr.\n"); ret = -FI_EINVAL; goto err; } fastlock_acquire(&alloc_handle->lock); position = __find_free(alloc_handle, &slab); if (position < 0) { GNIX_DEBUG(FI_LOG_EP_CTRL, "Creating new slab.\n"); ret = __create_slab(alloc_handle); if (ret) { GNIX_WARN(FI_LOG_EP_CTRL, "Slab creation failed.\n"); goto err; } slab = container_of(alloc_handle->slab_list.tail, struct gnix_slab, list_entry); position = ret; }
/* Process incoming connection requests on a listening PEP. */ int _gnix_pep_progress(struct gnix_fid_pep *pep) { int accept_fd, ret; fastlock_acquire(&pep->lock); accept_fd = accept(pep->listen_fd, NULL, NULL); if (accept_fd >= 0) { /* New Connection. */ ret = __gnix_pep_connreq(pep, accept_fd); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "__gnix_pep_connreq failed, err: %d\n", ret); } } else if (errno != EAGAIN) { GNIX_WARN(FI_LOG_EP_CTRL, "(accept) Unexpected errno on listen socket: %d\n", errno); } fastlock_release(&pep->lock); return FI_SUCCESS; }
static int __basic_mr_reg_mr( struct gnix_fid_domain *domain, uint64_t address, uint64_t length, struct _gnix_fi_reg_context *fi_reg_context, void **handle) { struct gnix_fid_mem_desc *md, *ret; md = calloc(1, sizeof(*md)); if (!md) { GNIX_WARN(FI_LOG_MR, "failed to allocate memory"); return -FI_ENOMEM; } ret = __gnix_register_region((void *) md, (void *) address, length, fi_reg_context, (void *) domain); if (!ret) { GNIX_WARN(FI_LOG_MR, "failed to register memory"); free(md); return -FI_ENOSPC; } *handle = (void *) md; return FI_SUCCESS; }
static int __gnix_amo_send_err(struct gnix_fid_ep *ep, struct gnix_fab_req *req) { struct gnix_fid_cntr *cntr = NULL; int rc = FI_SUCCESS; uint64_t flags = req->flags & GNIX_AMO_COMPLETION_FLAGS; if (ep->send_cq) { rc = _gnix_cq_add_error(ep->send_cq, req->user_context, flags, 0, 0, 0, 0, 0, FI_ECANCELED, GNI_RC_TRANSACTION_ERROR, NULL); if (rc) { GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cq_add_error() failed: %d\n", rc); } } if ((req->type == GNIX_FAB_RQ_AMO) && ep->write_cntr) { cntr = ep->write_cntr; } else if ((req->type == GNIX_FAB_RQ_FAMO || req->type == GNIX_FAB_RQ_CAMO) && ep->read_cntr) { cntr = ep->read_cntr; } if (cntr) { rc = _gnix_cntr_inc_err(cntr); if (rc) GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cntr_inc_err() failed: %d\n", rc); } return rc; }
int _gnix_buddy_allocator_destroy(gnix_buddy_alloc_handle_t *alloc_handle) { GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); if (unlikely(!alloc_handle)) { GNIX_WARN(FI_LOG_EP_CTRL, "Invalid parameter to _gnix_buddy_allocator_destroy." "\n"); return -FI_EINVAL; } fastlock_acquire(&alloc_handle->lock); free(alloc_handle->lists); while (_gnix_free_bitmap(&alloc_handle->bitmap)) { GNIX_WARN(FI_LOG_EP_CTRL, "Trying to free buddy allocator handle bitmap.\n"); sleep(1); } fastlock_release(&alloc_handle->lock); fastlock_destroy(&alloc_handle->lock); free(alloc_handle); return FI_SUCCESS; }
int _gnix_cm_nic_free(struct gnix_cm_nic *cm_nic) { int ret = FI_SUCCESS; gni_return_t status; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); if (cm_nic == NULL) return -FI_EINVAL; if (cm_nic->dgram_hndl != NULL) { ret = _gnix_dgram_hndl_free(cm_nic->dgram_hndl); if (ret != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_CTRL, "gnix_dgram_hndl_free returned %d\n", ret); } if (cm_nic->gni_cdm_hndl != NULL) { status = GNI_CdmDestroy(cm_nic->gni_cdm_hndl); if (status != GNI_RC_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "cdm destroy failed - %s\n", gni_err_str[status]); ret = gnixu_to_fi_errno(status); } } free(cm_nic); return ret; }
static int __gnix_msg_send_err(struct gnix_fid_ep *ep, struct gnix_fab_req *req) { uint64_t flags = FI_SEND | FI_MSG; int rc; flags |= req->msg.send_flags & FI_TAGGED; if (ep->send_cq) { rc = _gnix_cq_add_error(ep->send_cq, req->user_context, flags, 0, 0, 0, 0, 0, FI_ECANCELED, GNI_RC_TRANSACTION_ERROR, NULL); if (rc != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cq_add_error() returned %d\n", rc); } } if (ep->send_cntr) { rc = _gnix_cntr_inc_err(ep->send_cntr); if (rc != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cntr_inc() failed: %d\n", rc); } return FI_SUCCESS; }
static int __gnix_msg_send_completion(struct gnix_fid_ep *ep, struct gnix_fab_req *req) { uint64_t flags = FI_SEND | FI_MSG; int rc; flags |= req->msg.send_flags & FI_TAGGED; if ((req->msg.send_flags & FI_COMPLETION) && ep->send_cq) { rc = _gnix_cq_add_event(ep->send_cq, req->user_context, flags, 0, 0, 0, 0, FI_ADDR_NOTAVAIL); if (rc != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cq_add_event returned %d\n", rc); } } if (ep->send_cntr) { rc = _gnix_cntr_inc(ep->send_cntr); if (rc != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cntr_inc() failed: %d\n", rc); } return FI_SUCCESS; }
static int __recv_err(struct gnix_fid_ep *ep, void *context, uint64_t flags, size_t len, void *addr, uint64_t data, uint64_t tag, size_t olen, int err, int prov_errno, void *err_data) { int rc; if (ep->recv_cq) { rc = _gnix_cq_add_error(ep->recv_cq, context, flags, len, addr, data, tag, olen, err, prov_errno, err_data); if (rc != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cq_add_error returned %d\n", rc); } } if (ep->recv_cntr) { rc = _gnix_cntr_inc_err(ep->recv_cntr); if (rc != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cntr_inc_err() failed: %d\n", rc); } return FI_SUCCESS; }
static int __recv_completion( struct gnix_fid_ep *ep, struct gnix_fab_req *req, void *context, uint64_t flags, size_t len, void *addr, uint64_t data, uint64_t tag, fi_addr_t src_addr) { int rc; if ((req->msg.recv_flags & FI_COMPLETION) && ep->recv_cq) { rc = _gnix_cq_add_event(ep->recv_cq, context, flags, len, addr, data, tag, src_addr); if (rc != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cq_add_event returned %d\n", rc); } } if (ep->recv_cntr) { rc = _gnix_cntr_inc(ep->recv_cntr); if (rc != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cntr_inc() failed: %d\n", rc); } return FI_SUCCESS; }
static int __gnix_amo_send_completion(struct gnix_fid_ep *ep, struct gnix_fab_req *req) { struct gnix_fid_cntr *cntr = NULL; int rc = FI_SUCCESS; uint64_t flags = req->flags & GNIX_AMO_COMPLETION_FLAGS; if ((req->flags & FI_COMPLETION) && ep->send_cq) { rc = _gnix_cq_add_event(ep->send_cq, req->user_context, flags, 0, 0, 0, 0, FI_ADDR_NOTAVAIL); if (rc) { GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cq_add_event() failed: %d\n", rc); } } if ((req->type == GNIX_FAB_RQ_AMO) && ep->write_cntr) { cntr = ep->write_cntr; } else if ((req->type == GNIX_FAB_RQ_FAMO || req->type == GNIX_FAB_RQ_CAMO) && ep->read_cntr) { cntr = ep->read_cntr; } if (cntr) { rc = _gnix_cntr_inc(cntr); if (rc) GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cntr_inc() failed: %d\n", rc); } return FI_SUCCESS; }
static int __gnix_fab_ops_get_val(struct fid *fid, fab_ops_val_t t, void *val) { GNIX_TRACE(FI_LOG_FABRIC, "\n"); assert(val); if (fid->fclass != FI_CLASS_FABRIC) { GNIX_WARN(FI_LOG_FABRIC, "Invalid fabric\n"); return -FI_EINVAL; } switch (t) { case GNI_WAIT_THREAD_SLEEP: *(uint32_t *)val = gnix_wait_thread_sleep_time; break; case GNI_DEFAULT_USER_REGISTRATION_LIMIT: *(uint32_t *)val = gnix_default_user_registration_limit; break; case GNI_DEFAULT_PROV_REGISTRATION_LIMIT: *(uint32_t *)val = gnix_default_prov_registration_limit; break; default: GNIX_WARN(FI_LOG_FABRIC, ("Invalid fab_ops_val\n")); } return FI_SUCCESS; }
int _gnix_mbox_allocator_create(struct gnix_nic *nic, gni_cq_handle_t cq_handle, enum gnix_page_size page_size, size_t mbox_size, size_t mpmmap, struct gnix_mbox_alloc_handle **alloc_handle) { struct gnix_mbox_alloc_handle *handle; char error_buf[256]; char *error; int ret; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); if (!nic || !mbox_size || !mpmmap || !alloc_handle) { GNIX_WARN(FI_LOG_EP_CTRL, "Invalid parameter to allocator_create.\n"); return -FI_EINVAL; } *alloc_handle = NULL; handle = calloc(1, sizeof(*handle)); if (!handle) { error = strerror_r(errno, error_buf, sizeof(error_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "Error allocating alloc handle: %s\n", error); return -FI_ENOMEM; } handle->page_size = page_size * 1024 * 1024; handle->mbox_size = mbox_size; handle->mpmmap = mpmmap; handle->nic_handle = nic; handle->cq_handle = cq_handle; fastlock_init(&handle->lock); ret = __open_huge_page(handle); if (ret) { GNIX_WARN(FI_LOG_EP_CTRL, "Error opening huge page.\n"); goto err_huge_page; } ret = __create_slab(handle); if (ret) { GNIX_WARN(FI_LOG_EP_CTRL, "Slab creation failed.\n"); goto err_slab_creation; } *alloc_handle = handle; return ret; err_slab_creation: free(handle->filename); err_huge_page: free(handle); return ret; }
/* SMSG callback for AMO remote counter control message. */ int __smsg_amo_cntr(void *data, void *msg) { int ret = FI_SUCCESS; struct gnix_vc *vc = (struct gnix_vc *)data; struct gnix_smsg_amo_cntr_hdr *hdr = (struct gnix_smsg_amo_cntr_hdr *)msg; struct gnix_fid_ep *ep = vc->ep; gni_return_t status; if (hdr->flags & FI_REMOTE_WRITE && ep->rwrite_cntr) { ret = _gnix_cntr_inc(ep->rwrite_cntr); if (ret != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cntr_inc() failed: %d\n", ret); } if (hdr->flags & FI_REMOTE_READ && ep->rread_cntr) { ret = _gnix_cntr_inc(ep->rread_cntr); if (ret != FI_SUCCESS) GNIX_WARN(FI_LOG_EP_DATA, "_gnix_cntr_inc() failed: %d\n", ret); } status = GNI_SmsgRelease(vc->gni_ep); if (OFI_UNLIKELY(status != GNI_RC_SUCCESS)) { GNIX_WARN(FI_LOG_EP_DATA, "GNI_SmsgRelease returned %s\n", gni_err_str[status]); ret = gnixu_to_fi_errno(status); } return ret; }
/* Process an incoming connection request at a listening PEP. */ static int __gnix_pep_connreq(struct gnix_fid_pep *pep, int fd) { int ret; struct gnix_pep_sock_conn *conn; struct fi_eq_cm_entry *eq_entry; int eqe_size; /* Create and initialize a new connection request. */ conn = calloc(1, sizeof(*conn)); if (!conn) { GNIX_WARN(FI_LOG_EP_CTRL, "Failed to alloc accepted socket conn\n"); return -FI_ENOMEM; } conn->fid.fclass = FI_CLASS_CONNREQ; conn->fid.context = pep; conn->sock_fd = fd; /* Pull request data from the listening socket. */ conn->bytes_read += read(fd, &conn->req, sizeof(conn->req)); if (conn->bytes_read != sizeof(conn->req)) { /* TODO Wait for more bytes. */ GNIX_FATAL(FI_LOG_EP_CTRL, "Unexpected read size\n"); } conn->req.info.src_addr = &conn->req.src_addr; conn->req.info.dest_addr = &conn->req.dest_addr; conn->req.info.tx_attr = &conn->req.tx_attr; conn->req.info.rx_attr = &conn->req.rx_attr; conn->req.info.ep_attr = &conn->req.ep_attr; conn->req.info.domain_attr = &conn->req.domain_attr; conn->req.info.fabric_attr = &conn->req.fabric_attr; conn->req.info.domain_attr->name = NULL; conn->req.info.fabric_attr->name = NULL; conn->req.info.fabric_attr->prov_name = NULL; conn->info = &conn->req.info; conn->info->handle = &conn->fid; /* Tell user of a new conn req via the EQ. */ eq_entry = (struct fi_eq_cm_entry *)conn->req.eqe_buf; eq_entry->fid = &pep->pep_fid.fid; eq_entry->info = fi_dupinfo(conn->info); eqe_size = sizeof(*eq_entry) + conn->req.cm_data_len; ret = fi_eq_write(&pep->eq->eq_fid, FI_CONNREQ, eq_entry, eqe_size, 0); if (ret != eqe_size) { GNIX_WARN(FI_LOG_EP_CTRL, "fi_eq_write failed, err: %d\n", ret); fi_freeinfo(conn->info); free(conn); return ret; } GNIX_DEBUG(FI_LOG_EP_CTRL, "Added FI_CONNREQ EQE: %p, %p\n", pep->eq, pep); return FI_SUCCESS; }
static void __nic_get_completed_txd(struct gnix_nic *nic, gni_cq_handle_t hw_cq, struct gnix_tx_descriptor **txd, gni_return_t *tx_status) { gni_post_descriptor_t *gni_desc; struct gnix_tx_descriptor *txd_p = NULL; struct gnix_fab_req *req; gni_return_t status; int msg_id; gni_cq_entry_t cqe; uint32_t recov = 1; if (__gnix_nic_txd_err_get(nic, &txd_p)) { *txd = txd_p; *tx_status = GNI_RC_TRANSACTION_ERROR; return; } status = GNI_CqGetEvent(hw_cq, &cqe); if (status == GNI_RC_NOT_DONE) { *txd = NULL; *tx_status = GNI_RC_NOT_DONE; return; } assert(status == GNI_RC_SUCCESS || status == GNI_RC_TRANSACTION_ERROR); if (unlikely(status == GNI_RC_TRANSACTION_ERROR)) { status = GNI_CqErrorRecoverable(cqe, &recov); if (status == GNI_RC_SUCCESS) { if (!recov) { char ebuf[512]; GNI_CqErrorStr(cqe, ebuf, sizeof(ebuf)); GNIX_WARN(FI_LOG_EP_DATA, "CQ error status: %s\n", ebuf); } } else { GNIX_WARN(FI_LOG_EP_DATA, "GNI_CqErrorRecover returned: %s\n", gni_err_str[status]); recov = 0; /* assume something bad has happened */ } } if (GNI_CQ_GET_TYPE(cqe) == GNI_CQ_EVENT_TYPE_POST) { status = GNI_GetCompleted(hw_cq, cqe, &gni_desc); assert(status == GNI_RC_SUCCESS || status == GNI_RC_TRANSACTION_ERROR); txd_p = container_of(gni_desc, struct gnix_tx_descriptor, gni_desc); } else if (GNI_CQ_GET_TYPE(cqe) == GNI_CQ_EVENT_TYPE_SMSG) {
/** * Will attempt to find a directory in hugetlbfs using the given page size and * create a filename to use for backing an mmap. * * @param[in] page_size Page size to look for in the hugetlbfs * @param[out] filename Pointer containing filename after generation. * * @return FI_SUCCESS On successfully finding a huge page and generating a * file name. * * @return -FI_EINVAL if an invalid parameter was given * @return -FI_EIO if an error occurred while opening the /proc/mounts * file. This is propagated from __find_huge_page. * @return -FI_ENOMEM if an error occurred while allocating space for the * filename. */ static int __generate_file_name(size_t page_size, char **filename) { static const char basename[] = "gnix_map"; char *full_filename = NULL; char *huge_page = NULL; char *error; char error_buf[256]; int my_file_id; int size; int ret; if (!filename) { GNIX_WARN(FI_LOG_EP_CTRL, "filename pointer is NULL.\n"); ret = -FI_EINVAL; goto err_invalid; } ret = __find_huge_page(page_size, &huge_page); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "Find huge page returned error %s\n", fi_strerror(-ret)); goto err_invalid; } my_file_id = ofi_atomic_inc32(&file_id_counter); size = snprintf(NULL, 0, "%s/%s.%d.%d", huge_page, basename, getpid(), my_file_id); if (size < 0) { error = strerror_r(errno, error_buf, sizeof(error_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "Error while gathering size for snprintf: %s\n", error); goto err_snprintf; } full_filename = malloc(size + 1); if (!full_filename) { error = strerror_r(errno, error_buf, sizeof(error_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "Error allocating full_filename: %s\n", error); ret = -FI_ENOMEM; goto err_snprintf; } sprintf(full_filename, "%s/%s.%d.%d", huge_page, basename, getpid(), my_file_id); GNIX_DEBUG(FI_LOG_EP_CTRL, "Generated filename: %s\n", full_filename); *filename = full_filename; err_snprintf: free(huge_page); err_invalid: return ret; }
static int gnix_domain_close(fid_t fid) { int ret = FI_SUCCESS, references_held; struct gnix_fid_domain *domain; GNIX_TRACE(FI_LOG_DOMAIN, "\n"); domain = container_of(fid, struct gnix_fid_domain, domain_fid.fid); if (domain->domain_fid.fid.fclass != FI_CLASS_DOMAIN) { ret = -FI_EINVAL; goto err; } /* before checking the refcnt, flush the memory registration cache */ if (domain->mr_cache_ro) { fastlock_acquire(&domain->mr_cache_lock); ret = _gnix_mr_cache_flush(domain->mr_cache_ro); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_DOMAIN, "failed to flush memory cache on domain close\n"); fastlock_release(&domain->mr_cache_lock); goto err; } fastlock_release(&domain->mr_cache_lock); } if (domain->mr_cache_rw) { fastlock_acquire(&domain->mr_cache_lock); ret = _gnix_mr_cache_flush(domain->mr_cache_rw); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_DOMAIN, "failed to flush memory cache on domain close\n"); fastlock_release(&domain->mr_cache_lock); goto err; } fastlock_release(&domain->mr_cache_lock); } /* * if non-zero refcnt, there are eps, mrs, and/or an eq associated * with this domain which have not been closed. */ references_held = _gnix_ref_put(domain); if (references_held) { GNIX_INFO(FI_LOG_DOMAIN, "failed to fully close domain due to " "lingering references. references=%i dom=%p\n", references_held, domain); } GNIX_INFO(FI_LOG_DOMAIN, "gnix_domain_close invoked returning %d\n", ret); err: return ret; }
/** * Fill all of the fields of an mbox to be returned to the requester. * * @param[in] handle Handle to the allocator being used. * @param[in] slab Slab which the mbox is allocated from. * @param[in] position Position of the mbox in the slab. * @param[out] ptr Contains the allocated mbox upon success. * * @return FI_SUCCESS Upon successfully filling an mbox with relevant data. * @return -FI_EINVAL Upon receiving invalid input, or finding the bitmap in * a corrupted state. * @return -FI_ENOMEM Upon failure to create the mbox structure using calloc. */ static int __fill_mbox(struct gnix_mbox_alloc_handle *handle, struct gnix_slab *slab, size_t position, struct gnix_mbox **ptr) { struct gnix_mbox *out; int ret = FI_SUCCESS; char error_buf[256]; size_t mapped_size; char *error; out = calloc(1, sizeof(*out)); if (!out) { error = strerror_r(errno, error_buf, sizeof(error_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "Error allocating mbox: %s\n", error); ret = -FI_ENOMEM; goto err_mbox_calloc; } mapped_size = handle->page_size * __page_count(handle); out->slab = slab; out->base = slab->base; out->offset = (position * handle->mbox_size); out->memory_handle = &slab->memory_handle; if (out->offset > mapped_size) { GNIX_WARN(FI_LOG_EP_CTRL, "Mbox out of bounds.\n"); ret = -FI_EINVAL; goto err_invalid; } /* On some systems, the page may not be zero'd from first use. Memset it here */ memset((void *) ((uint64_t) out->base + out->offset), 0x0, handle->mbox_size); ret = _gnix_test_and_set_bit(slab->used, position); if (ret != 0) { GNIX_WARN(FI_LOG_EP_CTRL, "Bit already set when creating mbox.\n"); ret = -FI_EINVAL; goto err_invalid; } *ptr = out; return ret; err_invalid: free(out); err_mbox_calloc: return ret; }
int _gnix_mbox_allocator_destroy(struct gnix_mbox_alloc_handle *alloc_handle) { struct slist_entry *entry; struct gnix_slab *temp; char error_buf[256]; int position; char *error; int ret = FI_SUCCESS; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); if (!alloc_handle) { GNIX_WARN(FI_LOG_EP_CTRL, "Invalid alloc handle.\n"); return -FI_EINVAL; } position = __find_used(alloc_handle, &temp); if (position >= 0) { GNIX_WARN(FI_LOG_EP_CTRL, "Can't destroy, not all mailboxes have been returned (pos = %d).\n", position); return -FI_EBUSY; } while (!slist_empty(&alloc_handle->slab_list)) { entry = slist_remove_head(&alloc_handle->slab_list); temp = container_of(entry, struct gnix_slab, list_entry); ret = __destroy_slab(alloc_handle, temp); if (ret) GNIX_WARN(FI_LOG_EP_CTRL, "Error destroying slab.\n"); } if (alloc_handle->filename != NULL) free(alloc_handle->filename); if (alloc_handle->fd != -1) ret = close(alloc_handle->fd); if (ret) { error = strerror_r(errno, error_buf, sizeof(error_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "Error closing map file: %s\n", error); } fastlock_destroy(&alloc_handle->lock); free(alloc_handle); return FI_SUCCESS; }
int _gnix_buddy_allocator_create(void *base, uint32_t len, uint32_t max, gnix_buddy_alloc_handle_t **alloc_handle) { char err_buf[256] = {0}, *error = NULL; int fi_errno; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); /* Ensure parameters are valid */ if (unlikely(!base || !len || !max || max > len || !alloc_handle || IS_NOT_POW_TWO(max) || (len % max) || !(len / MIN_BLOCK_SIZE * 2))) { GNIX_WARN(FI_LOG_EP_CTRL, "Invalid parameter to _gnix_buddy_allocator_create." "\n"); return -FI_EINVAL; } *alloc_handle = calloc(1, sizeof(gnix_buddy_alloc_handle_t)); if (unlikely(!alloc_handle)) { error = strerror_r(errno, err_buf, sizeof(err_buf)); GNIX_WARN(FI_LOG_EP_CTRL, "Could not create buddy allocator handle.\n", error); return -FI_ENOMEM; } fastlock_init(&alloc_handle[0]->lock); alloc_handle[0]->base = base; alloc_handle[0]->len = len; alloc_handle[0]->max = max; if (__gnix_buddy_create_lists(alloc_handle[0])) { free(*alloc_handle); return -FI_ENOMEM; } /* The bitmap needs len / MIN_BLOCK_SIZE * 2 bits to flag every possible * block of size: min, min * 2, min * 4, ... , max that fits in the * base. block. The maximum number of bits used would be if max = len. */ if ((fi_errno = _gnix_alloc_bitmap(&alloc_handle[0]->bitmap, len / MIN_BLOCK_SIZE * 2))) { free(&alloc_handle[0]->lists); free(*alloc_handle); } return fi_errno; }
static int gnix_ep_control(fid_t fid, int command, void *arg) { int i, ret = FI_SUCCESS; struct gnix_fid_ep *ep; struct gnix_fid_domain *dom; struct gnix_vc *vc; ep = container_of(fid, struct gnix_fid_ep, ep_fid); switch (command) { /* * for FI_EP_RDM, post wc datagrams now */ case FI_ENABLE: if (ep->type == FI_EP_RDM) { dom = ep->domain; for (i = 0; i < dom->fabric->n_wc_dgrams; i++) { assert(ep->recv_cq != NULL); ret = _gnix_vc_alloc(ep, FI_ADDR_UNSPEC, &vc); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "_gnix_vc_alloc call returned %d\n", ret); goto err; } ret = _gnix_vc_accept(vc); if (ret != FI_SUCCESS) { GNIX_WARN(FI_LOG_EP_CTRL, "_gnix_vc_accept returned %d\n", ret); _gnix_vc_destroy(vc); goto err; } else { fastlock_acquire(&ep->vc_list_lock); dlist_insert_tail(&vc->entry, &ep->wc_vc_list); fastlock_release(&ep->vc_list_lock); } } } break; case FI_GETFIDFLAG: case FI_SETFIDFLAG: case FI_ALIAS: default: return -FI_ENOSYS; } err: return ret; }
static int __nic_setup_irq_cq(struct gnix_nic *nic) { int ret = FI_SUCCESS; size_t len; gni_return_t status; int fd = -1; void *mmap_addr; len = (size_t)sysconf(_SC_PAGESIZE); mmap_addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, fd, 0); if (mmap_addr == MAP_FAILED) { GNIX_WARN(FI_LOG_EP_CTRL, "mmap failed - %s\n", strerror(errno)); ret = -errno; goto err; } nic->irq_mmap_addr = mmap_addr; nic->irq_mmap_len = len; status = GNI_MemRegister(nic->gni_nic_hndl, (uint64_t) nic->irq_mmap_addr, len, nic->rx_cq_blk, GNI_MEM_READWRITE, -1, &nic->irq_mem_hndl); if (status != GNI_RC_SUCCESS) { ret = gnixu_to_fi_errno(status); GNIX_WARN(FI_LOG_EP_CTRL, "GNI_MemRegister returned %s\n", gni_err_str[status]); goto err_w_mmap; } #if 0 fprintf(stderr,"registered ireq memhndl 0x%016lx 0x%016lx\n", nic->irq_mem_hndl.qword1, nic->irq_mem_hndl.qword2); #endif return ret; err_w_mmap: munmap(mmap_addr, len); err: return ret; }
static int verify_cq_attr(struct fi_cq_attr *attr, struct fi_ops_cq *ops, struct fi_ops *fi_cq_ops) { GNIX_TRACE(FI_LOG_CQ, "\n"); if (!attr || !ops || !fi_cq_ops) return -FI_EINVAL; if (!attr->size) attr->size = GNIX_CQ_DEFAULT_SIZE; switch (attr->format) { case FI_CQ_FORMAT_UNSPEC: attr->format = FI_CQ_FORMAT_CONTEXT; case FI_CQ_FORMAT_CONTEXT: case FI_CQ_FORMAT_MSG: case FI_CQ_FORMAT_DATA: case FI_CQ_FORMAT_TAGGED: break; default: GNIX_WARN(FI_LOG_CQ, "format: %d unsupported.\n", attr->format); return -FI_EINVAL; } switch (attr->wait_obj) { case FI_WAIT_NONE: ops->sread = fi_no_cq_sread; ops->signal = fi_no_cq_signal; ops->sreadfrom = fi_no_cq_sreadfrom; fi_cq_ops->control = fi_no_control; break; case FI_WAIT_SET: if (!attr->wait_set) { GNIX_WARN(FI_LOG_CQ, "FI_WAIT_SET is set, but wait_set field doesn't reference a wait object.\n"); return -FI_EINVAL; } break; case FI_WAIT_UNSPEC: break; case FI_WAIT_FD: case FI_WAIT_MUTEX_COND: default: GNIX_WARN(FI_LOG_CQ, "wait type: %d unsupported.\n", attr->wait_obj); return -FI_EINVAL; } return FI_SUCCESS; }
/** * Destroy a slab. * * @param[in] handle Handle to the allocator being used. * @param[in] slab Slab to be destroyed. * * @return FI_SUCCESS On successful slab destruction. * * @return -FI_EINVAL On invalid handle or slab being given as parameters. */ static int __destroy_slab(struct gnix_mbox_alloc_handle *handle, struct gnix_slab *slab) { size_t total_size; GNIX_TRACE(FI_LOG_EP_CTRL, "\n"); if (!handle || !slab) { GNIX_WARN(FI_LOG_EP_CTRL, "Invalid argument handle or slab.\n"); return -FI_EINVAL; } total_size = handle->page_size * __page_count(handle); _gnix_free_bitmap(slab->used); free(slab->used); COND_ACQUIRE(handle->nic_handle->requires_lock, &handle->nic_handle->lock); GNI_MemDeregister(handle->nic_handle->gni_nic_hndl, &slab->memory_handle); COND_RELEASE(handle->nic_handle->requires_lock, &handle->nic_handle->lock); munmap(slab->base, total_size); free(slab); return FI_SUCCESS; }
/******************************************************************************* * Helper functions. ******************************************************************************/ static int gnix_verify_av_attr(struct fi_av_attr *attr) { int ret = FI_SUCCESS; if (attr->rx_ctx_bits > GNIX_RX_CTX_MAX_BITS) { GNIX_WARN(FI_LOG_AV, "rx_ctx_bits too big\n"); return -FI_EINVAL; } switch (attr->type) { case FI_AV_TABLE: case FI_AV_MAP: case FI_AV_UNSPEC: break; default: ret = -FI_EINVAL; break; } if (attr->name != NULL) { ret = -FI_ENOSYS; } return ret; }
static inline int __gnix_buddy_create_lists(gnix_buddy_alloc_handle_t *alloc_handle) { uint32_t i, offset = 0; alloc_handle->nlists = (uint32_t) __gnix_buddy_log2(alloc_handle->max / MIN_BLOCK_SIZE) + 1; alloc_handle->lists = calloc(1, sizeof(struct dlist_entry) * alloc_handle->nlists); if (unlikely(!alloc_handle->lists)) { GNIX_WARN(FI_LOG_EP_CTRL, "Could not create buddy allocator lists.\n"); return -FI_ENOMEM; } for (i = 0; i < alloc_handle->nlists; i++) { dlist_init(alloc_handle->lists + i); } /* Insert free blocks of size max in sorted order into last list */ for (i = 0; i < alloc_handle->len / alloc_handle->max; i++) { dlist_insert_tail((void *) ((uint8_t *) alloc_handle->base + offset), alloc_handle->lists + alloc_handle->nlists - 1); offset += alloc_handle->max; } return FI_SUCCESS; }