void rpc_async_queue_free(rpc_async_queue *queue) { pthread_mutex_lock(&queue->mutex); pointer data; while ((data = rpc_async_queue_try_pop(queue)) != NULL) { rpc_free(data); } pthread_mutex_unlock(&queue->mutex); rpc_free(queue); }
static void nul_destroy(struct rpc_auth *auth) { dprintk("RPC: destroying NULL authenticator %p\n", auth); rpcauth_free_credcache(auth); rpc_free(auth); }
static int start_rpc(int fd) { LOG_ENTRY; struct rpc *rpc = NULL; rpc = rpc_alloc(); if (!rpc) { RPC_ERROR("out of memory"); goto fail; } if (rpc_init(fd, gps_rpc_handler, rpc)) { RPC_ERROR("failed to init RPC"); goto fail; } if (rpc_start(rpc)) { RPC_ERROR("failed to start RPC"); goto fail; } gps_rpc = rpc; LOG_EXIT; return 0; fail: if (rpc) { rpc_free(rpc); } LOG_EXIT; return -1; }
/* * Create an RPC client * FIXME: This should also take a flags argument (as in task->tk_flags). * It's called (among others) from pmap_create_client, which may in * turn be called by an async task. In this case, rpciod should not be * made to sleep too long. */ struct rpc_clnt * rpc_create_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *program, u32 vers, int flavor) { struct rpc_version *version; struct rpc_clnt *clnt = NULL; dprintk("RPC: creating %s client for %s (xprt %p)\n", program->name, servname, xprt); if (!xprt) goto out; if (vers >= program->nrvers || !(version = program->version[vers])) goto out; clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt)); if (!clnt) goto out_no_clnt; memset(clnt, 0, sizeof(*clnt)); atomic_set(&clnt->cl_users, 0); clnt->cl_xprt = xprt; clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_server = servname; clnt->cl_protname = program->name; clnt->cl_port = xprt->addr.sin_port; clnt->cl_prog = program->number; clnt->cl_vers = version->number; clnt->cl_prot = xprt->prot; clnt->cl_stats = program->stats; INIT_RPC_WAITQ(&clnt->cl_bindwait, "bindwait"); if (!clnt->cl_port) clnt->cl_autobind = 1; rpc_init_rtt(&clnt->cl_rtt, xprt->timeout.to_initval); if (!rpcauth_create(flavor, clnt)) goto out_no_auth; /* save the nodename */ clnt->cl_nodelen = strlen(system_utsname.nodename); if (clnt->cl_nodelen > UNX_MAXNODENAME) clnt->cl_nodelen = UNX_MAXNODENAME; memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); out: return clnt; out_no_clnt: printk(KERN_INFO "RPC: out of memory in rpc_create_client\n"); goto out; out_no_auth: printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %d)\n", flavor); rpc_free(clnt); clnt = NULL; goto out; }
void tsg_free(rdpTsg* tsg) { if (tsg != NULL) { free(tsg->MachineName); rpc_free(tsg->rpc); free(tsg); } }
void rpc_sv_free_tracked_objs(void* cl) { rpc_client_state_t* c = (rpc_client_state_t*)cl; for (int i = 0; i < c->num_obj; i++) { rpc_free(c->obj[i]); } c->num_obj = 0; }
void rpc_release_task(struct rpc_task *task) { struct rpc_task *next, *prev; dprintk("RPC: %4d release task\n", task->tk_pid); /* Remove from global task list */ prev = task->tk_prev_task; next = task->tk_next_task; if (next) next->tk_prev_task = prev; if (prev) prev->tk_next_task = next; else all_tasks = next; /* Release resources */ if (task->tk_rqstp) xprt_release(task); if (task->tk_cred) rpcauth_releasecred(task); if (task->tk_buffer) { rpc_free(task->tk_buffer); task->tk_buffer = NULL; } if (task->tk_client) { rpc_release_client(task->tk_client); task->tk_client = NULL; } #ifdef RPC_DEBUG task->tk_magic = 0; #endif if (task->tk_flags & RPC_TASK_DYNAMIC) { dprintk("RPC: %4d freeing task\n", task->tk_pid); task->tk_flags &= ~RPC_TASK_DYNAMIC; rpc_free(task); } }
rpc_thread* rpc_thread_new(rpc_thread_func func) { rpc_thread *th = rpc_new(rpc_thread,1); th->func = func; pthread_t pid; if (pthread_create(&pid, NULL, rpc_thread_inner, th) == -1) { //go to rpc_free(th); return NULL; } th->pid = pid; th->finished = TRUE; return th; }
/* * Delete an RPC client */ int rpc_destroy_client(struct rpc_clnt *clnt) { dprintk("RPC: destroying %s client for %s\n", clnt->cl_protname, clnt->cl_server); if (clnt->cl_auth) { rpcauth_destroy(clnt->cl_auth); clnt->cl_auth = NULL; } if (clnt->cl_xprt) { xprt_destroy(clnt->cl_xprt); clnt->cl_xprt = NULL; } rpc_free(clnt); return 0; }
/* * Cancel a blocked lock request. * We always use an async RPC call for this in order not to hang a * process that has been Ctrl-C'ed. */ int nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl) { struct nlm_rqst *req; unsigned long flags; sigset_t oldset; int status; /* Block all signals while setting up call */ spin_lock_irqsave(¤t->sigmask_lock, flags); oldset = current->blocked; sigfillset(¤t->blocked); recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags); do { req = (struct nlm_rqst *) rpc_allocate(RPC_TASK_ASYNC, sizeof(*req)); } while (req == NULL); req->a_host = host; req->a_flags = RPC_TASK_ASYNC; nlmclnt_setlockargs(req, fl); status = nlmclnt_async_call(req, NLMPROC_CANCEL, nlmclnt_cancel_callback); if (status < 0) rpc_free(req); spin_lock_irqsave(¤t->sigmask_lock, flags); current->blocked = oldset; recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags); return status; }
void rpc_response_free(rpc_response *rsp) { memset(rsp, 0, sizeof(rpc_response)); if (!rpc_array_add(rpc_response_freelist, rsp)) { rpc_free(rsp); } }
/* * Destroy cred handle. */ static void nul_destroy_cred(struct rpc_cred *cred) { rpc_free(cred); }
void rpc_request_free(rpc_request *req) { memset(req, 0, sizeof(rpc_request)); if (!rpc_array_add(rpc_request_freelist, req)) { rpc_free(req); } }
/* * This is the main entry point for the NLM client. */ int nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl) { struct nfs_server *nfssrv = NFS_SERVER(inode); struct nlm_host *host; struct nlm_rqst reqst, *call = &reqst; sigset_t oldset; unsigned long flags; int status; /* Always use NLM version 1 over UDP for now... */ if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), IPPROTO_UDP, 1))) return -ENOLCK; /* Create RPC client handle if not there, and copy soft * and intr flags from NFS client. */ if (host->h_rpcclnt == NULL) { struct rpc_clnt *clnt; /* Bind an rpc client to this host handle (does not * perform a portmapper lookup) */ if (!(clnt = nlm_bind_host(host))) { status = -ENOLCK; goto done; } clnt->cl_softrtry = nfssrv->client->cl_softrtry; clnt->cl_intr = nfssrv->client->cl_intr; clnt->cl_chatty = nfssrv->client->cl_chatty; } /* Keep the old signal mask */ spin_lock_irqsave(¤t->sigmask_lock, flags); oldset = current->blocked; /* If we're cleaning up locks because the process is exiting, * perform the RPC call asynchronously. */ if ((cmd == F_SETLK || cmd == F_SETLKW) && fl->fl_type == F_UNLCK && (current->flags & PF_EXITING)) { sigfillset(¤t->blocked); /* Mask all signals */ recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags); call = nlmclnt_alloc_call(); call->a_flags = RPC_TASK_ASYNC; } else { spin_unlock_irqrestore(¤t->sigmask_lock, flags); call->a_flags = 0; } call->a_host = host; /* Set up the argument struct */ nlmclnt_setlockargs(call, fl); if (cmd == F_GETLK) { status = nlmclnt_test(call, fl); } else if ((cmd == F_SETLK || cmd == F_SETLKW) && fl->fl_type == F_UNLCK) { status = nlmclnt_unlock(call, fl); } else if (cmd == F_SETLK || cmd == F_SETLKW) { call->a_args.block = (cmd == F_SETLKW)? 1 : 0; status = nlmclnt_lock(call, fl); } else { status = -EINVAL; } if (status < 0 && (call->a_flags & RPC_TASK_ASYNC)) rpc_free(call); spin_lock_irqsave(¤t->sigmask_lock, flags); current->blocked = oldset; recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags); done: dprintk("lockd: clnt proc returns %d\n", status); nlm_release_host(host); return status; }