void ptlrpc_ni_fini(void) { wait_queue_head_t waitq; struct l_wait_info lwi; int rc; int retries; /* Wait for the event queue to become idle since there may still be * messages in flight with pending events (i.e. the fire-and-forget * messages == client requests and "non-difficult" server * replies */ for (retries = 0;; retries++) { rc = LNetEQFree(ptlrpc_eq_h); switch (rc) { default: LBUG(); case 0: LNetNIFini(); return; case -EBUSY: if (retries != 0) CWARN("Event queue still busy\n"); /* Wait for a bit */ init_waitqueue_head(&waitq); lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL); l_wait_event(waitq, 0, &lwi); break; } } /* notreached */ }
int ptlrpc_ni_init(void) { int rc; lnet_pid_t pid; pid = ptl_get_pid(); CDEBUG(D_NET, "My pid is: %x\n", pid); /* We're not passing any limits yet... */ rc = LNetNIInit(pid); if (rc < 0) { CDEBUG(D_NET, "Can't init network interface: %d\n", rc); return (-ENOENT); } /* CAVEAT EMPTOR: how we process portals events is _radically_ * different depending on... */ /* kernel LNet calls our master callback when there are new event, * because we are guaranteed to get every event via callback, * so we just set EQ size to 0 to avoid overhread of serializing * enqueue/dequeue operations in LNet. */ rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h); if (rc == 0) return 0; CERROR("Failed to allocate event queue: %d\n", rc); LNetNIFini(); return (-ENOMEM); }
int lnet_unconfigure (void) { int refcount; LNET_MUTEX_LOCK(&lnet_config_mutex); if (the_lnet.ln_niinit_self) { the_lnet.ln_niinit_self = 0; LNetNIFini(); } LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex); refcount = the_lnet.ln_refcount; LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex); LNET_MUTEX_UNLOCK(&lnet_config_mutex); return (refcount == 0) ? 0 : -EBUSY; }
static int lnet_unconfigure(void) { int refcount; mutex_lock(&lnet_config_mutex); if (the_lnet.ln_niinit_self) { the_lnet.ln_niinit_self = 0; LNetNIFini(); } mutex_lock(&the_lnet.ln_api_mutex); refcount = the_lnet.ln_refcount; mutex_unlock(&the_lnet.ln_api_mutex); mutex_unlock(&lnet_config_mutex); return (refcount == 0) ? 0 : -EBUSY; }
static int lnet_unconfigure(void) { int refcount; mutex_lock(&lnet_config_mutex); if (the_lnet.ln_niinit_self) { the_lnet.ln_niinit_self = 0; LNetNIFini(); module_put(THIS_MODULE); } mutex_lock(&the_lnet.ln_api_mutex); refcount = the_lnet.ln_refcount; mutex_unlock(&the_lnet.ln_api_mutex); mutex_unlock(&lnet_config_mutex); return !refcount ? 0 : -EBUSY; }
static int lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_hdr *hdr) { int rc; switch (cmd) { case IOC_LIBCFS_CONFIGURE: { struct libcfs_ioctl_data *data = (struct libcfs_ioctl_data *)hdr; if (data->ioc_hdr.ioc_len < sizeof(*data)) return -EINVAL; the_lnet.ln_nis_from_mod_params = data->ioc_flags; return lnet_configure(NULL); } case IOC_LIBCFS_UNCONFIGURE: return lnet_unconfigure(); case IOC_LIBCFS_ADD_NET: return lnet_dyn_configure(hdr); case IOC_LIBCFS_DEL_NET: return lnet_dyn_unconfigure(hdr); default: /* * Passing LNET_PID_ANY only gives me a ref if the net is up * already; I'll need it to ensure the net can't go down while * I'm called into it */ rc = LNetNIInit(LNET_PID_ANY); if (rc >= 0) { rc = LNetCtl(cmd, hdr); LNetNIFini(); } return rc; } }
int lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data) { int rc; switch (cmd) { case IOC_LIBCFS_CONFIGURE: return lnet_configure(NULL); case IOC_LIBCFS_UNCONFIGURE: return lnet_unconfigure(); default: /* Passing LNET_PID_ANY only gives me a ref if the net is up * already; I'll need it to ensure the net can't go down while * I'm called into it */ rc = LNetNIInit(LNET_PID_ANY); if (rc >= 0) { rc = LNetCtl(cmd, data); LNetNIFini(); } return rc; } }