void xti_error(int fd, char *cmdname) { char error[100]; int n; snprintf(error, 100, "XTI error: %s", t_strerror(t_errno)); n = t_errno == TSYSERR ? errno : 0; if (fd >= 0) t_close(fd); unix_error(n, cmdname, copy_string(error)); }
/* * Takes a connection request off 'fd' in the form of a t_call structure * and returns a pointer to it. * Returns NULL on failure, else pointer to t_call structure on success. */ static struct t_call * get_new_conind(int fd) { struct t_call *call; debug_msg("Entering get_new_conind"); /* LINTED E_BAD_PTR_CAST_ALIGN */ if ((call = (struct t_call *)t_alloc(fd, T_CALL, T_ALL)) == NULL) { error_msg("t_alloc: %s", t_strerror(t_errno)); return (NULL); } if (t_listen(fd, call) < 0) { error_msg("t_listen: %s", t_strerror(t_errno)); (void) t_free((char *)call, T_CALL); return (NULL); } return (call); }
/* ARGSUSED1 */ int _tx_error(const char *s, int api_semantics) { const char *c; int errnum = errno; /* In case a system call fails. */ c = t_strerror(t_errno); if (s != NULL && *s != '\0') { (void) write(2, s, strlen(s)); (void) write(2, ": ", 2); } (void) write(2, c, strlen(c)); if (t_errno == TSYSERR) { c = strerror(errnum); (void) write(2, ": ", 2); (void) write(2, c, strlen(c)); } (void) write(2, "\n", 1); return (0); }
static int tlx_setsockopt(int fd, int level, int optname, const void *optval, socklen_t optlen) { struct t_optmgmt request, reply; struct { struct opthdr sockopt; char data[256]; } optbuf; debug_msg("Entering tlx_setsockopt, " "fd: %d, level: %d, optname: %d, optval: %x, optlen: %d", fd, level, optname, optval, optlen); if (optlen > sizeof (optbuf.data)) { error_msg(gettext("t_optmgmt request too long")); return (-1); } optbuf.sockopt.level = level; optbuf.sockopt.name = optname; optbuf.sockopt.len = optlen; (void) memcpy(optbuf.data, optval, optlen); request.opt.len = sizeof (struct opthdr) + optlen; request.opt.buf = (char *)&optbuf; request.flags = T_NEGOTIATE; reply.opt.maxlen = sizeof (struct opthdr) + optlen; reply.opt.buf = (char *)&optbuf; reply.flags = 0; if ((t_optmgmt(fd, &request, &reply) == -1) || (reply.flags != T_SUCCESS)) { error_msg("t_optmgmt: %s", t_strerror(t_errno)); return (-1); } return (0); }
/* * This call attempts to t_accept() an incoming/pending TLI connection. * If it is thwarted by a TLOOK, it is deferred and whatever is on the * file descriptor, removed after a t_look. (Incoming connect indications * get queued for later processing and disconnect indications remove a * a queued connection request if a match found). * Returns -1 on failure, else 0. */ int tlx_accept(const char *fmri, tlx_info_t *tlx_info, struct sockaddr_storage *remote_addr) { tlx_conn_ind_t *conind; struct t_call *call; int fd; int listen_fd = tlx_info->pr_info.listen_fd; debug_msg("Entering tlx_accept: instance: %s", fmri); if ((fd = t_open(tlx_info->dev_name, O_RDWR, NULL)) == -1) { error_msg("t_open: %s", t_strerror(t_errno)); return (-1); } if (tlx_info->pr_info.v6only) { int on = 1; /* restrict to IPv6 communications only */ if (tlx_setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof (on)) == -1) { (void) t_close(fd); return (-1); } } if (t_bind(fd, NULL, NULL) == -1) { error_msg("t_bind: %s", t_strerror(t_errno)); (void) t_close(fd); return (-1); } /* * Get the next connection indication - first try the pending * queue, then, if none there, get a new one from the file descriptor. */ if ((conind = uu_list_first(tlx_info->conn_ind_queue)) != NULL) { debug_msg("taking con off queue"); call = conind->call; } else if ((call = get_new_conind(listen_fd)) == NULL) { (void) t_close(fd); return (-1); } /* * Accept the connection indication on the newly created endpoint. * If we fail, and it's the result of a tlook, queue the indication * if it isn't already, and go and process the t_look. */ if (t_accept(listen_fd, fd, call) == -1) { if (t_errno == TLOOK) { if (uu_list_first(tlx_info->conn_ind_queue) == NULL) { /* * We are first one to have to defer accepting * and start the pending connections list. */ if (queue_conind(tlx_info->conn_ind_queue, call) == -1) { error_msg(gettext( "Failed to queue connection " "indication for instance %s"), fmri); (void) t_free((char *)call, T_CALL); return (-1); } } (void) process_tlook(fmri, tlx_info); } else { /* non-TLOOK accept failure */ error_msg("%s: %s", "t_accept failed", t_strerror(t_errno)); /* * If we were accepting a queued connection, dequeue * it. */ if (uu_list_first(tlx_info->conn_ind_queue) != NULL) (void) dequeue_conind(tlx_info->conn_ind_queue); (void) t_free((char *)call, T_CALL); } (void) t_close(fd); return (-1); } /* Copy remote address into address parameter */ (void) memcpy(remote_addr, call->addr.buf, MIN(call->addr.len, sizeof (*remote_addr))); /* If we were accepting a queued connection, dequeue it. */ if (uu_list_first(tlx_info->conn_ind_queue) != NULL) (void) dequeue_conind(tlx_info->conn_ind_queue); (void) t_free((char *)call, T_CALL); return (fd); }
/* * Handle a TLOOK notification received during a t_accept() call. * Returns -1 on failure, else 0. */ static int process_tlook(const char *fmri, tlx_info_t *tlx_info) { int event; int fd = tlx_info->pr_info.listen_fd; debug_msg("Entering process_tlook:"); switch (event = t_look(fd)) { case T_LISTEN: { struct t_call *call; debug_msg("process_tlook: T_LISTEN event"); if ((call = get_new_conind(fd)) == NULL) return (-1); if (queue_conind(tlx_info->conn_ind_queue, call) == -1) { error_msg(gettext("Failed to queue connection " "indication for instance %s"), fmri); (void) t_free((char *)call, T_CALL); return (-1); } break; } case T_DISCONNECT: { /* * Note: In Solaris 2.X (SunOS 5.X) bundled * connection-oriented transport drivers * [ e.g /dev/tcp and /dev/ticots and * /dev/ticotsord (tl)] we do not send disconnect * indications to listening endpoints. * So this will not be seen with endpoints on Solaris * bundled transport devices. However, Streams TPI * allows for this (broken?) behavior and so we account * for it here because of the possibility of unbundled * transport drivers causing this. */ tlx_conn_ind_t *cip; struct t_discon *discon; debug_msg("process_tlook: T_DISCONNECT event"); /* LINTED */ if ((discon = (struct t_discon *) t_alloc(fd, T_DIS, T_ALL)) == NULL) { error_msg("t_alloc: %s", t_strerror(t_errno)); return (-1); } if (t_rcvdis(fd, discon) < 0) { error_msg("t_rcvdis: %s", t_strerror(t_errno)); (void) t_free((char *)discon, T_DIS); return (-1); } /* * Find any queued connection pending that matches this * disconnect notice and remove from the pending queue. */ cip = uu_list_first(tlx_info->conn_ind_queue); while ((cip != NULL) && (cip->call->sequence != discon->sequence)) { cip = uu_list_next(tlx_info->conn_ind_queue, cip); } if (cip != NULL) { /* match found */ uu_list_remove(tlx_info->conn_ind_queue, cip); (void) t_free((char *)cip->call, T_CALL); free(cip); } (void) t_free((char *)discon, T_DIS); break; } case -1: error_msg("t_look: %s", t_errno); return (-1); default: error_msg(gettext("do_tlook: unexpected t_look event: %d"), event); return (-1); } return (0); }
/* * Create a tli/xti endpoint, either bound to the address specified in * 'instance' for non-RPC services, else a kernel chosen address. * Returns -1 on failure, else 0. */ int create_bound_endpoint(const instance_t *inst, tlx_info_t *tlx_info) { int fd; int qlen; const char *fmri = inst->fmri; struct netbuf *reqaddr; struct netbuf *retaddr; struct netbuf netbuf; struct sockaddr_storage ss; rpc_info_t *rpc = tlx_info->pr_info.ri; debug_msg("Entering create_bound_endpoint"); if ((fd = t_open(tlx_info->dev_name, O_RDWR, NULL)) == -1) { error_msg(gettext("Failed to open transport %s for " "instance %s, proto %s: %s"), tlx_info->dev_name, fmri, tlx_info->pr_info.proto, t_strerror(t_errno)); return (-1); } if (tlx_info->pr_info.v6only) { int on = 1; /* restrict to IPv6 communications only */ if (tlx_setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof (on)) == -1) { (void) t_close(fd); return (-1); } } /* * Negotiate for the returning of the remote uid for loopback * transports for RPC services. This needs to be done before the * endpoint is bound using t_bind(), so that any requests to it * contain the uid. */ if ((rpc != NULL) && (rpc->is_loopback)) svc_fd_negotiate_ucred(fd); /* * Bind the service's address to the endpoint and setup connection * backlog. In the case of RPC services, we specify a NULL requested * address and accept what we're given, storing the returned address * for later RPC binding. In the case of non-RPC services we specify * the service's associated address. */ if (rpc != NULL) { reqaddr = NULL; retaddr = &(rpc->netbuf); } else { reqaddr = &(tlx_info->local_addr); netbuf.buf = (char *)&ss; netbuf.maxlen = sizeof (ss); retaddr = &netbuf; } /* ignored for conn/less services */ qlen = inst->config->basic->conn_backlog; if ((tlx_bind(fd, reqaddr, retaddr, qlen) == -1) || ((reqaddr != NULL) && !netbufs_equal(reqaddr, retaddr))) { error_msg(gettext("Failed to bind to the requested address " "for instance %s, proto %s"), fmri, tlx_info->pr_info.proto); (void) t_close(fd); return (-1); } return (fd); }
/* * We use our own private version of svc_create() which registers our services * only on loopback transports and enables an option whereby Solaris ucreds * are associated with each connection, permitting us to check privilege bits. */ static int fmd_rpc_svc_create_local(void (*disp)(struct svc_req *, SVCXPRT *), rpcprog_t prog, rpcvers_t vers, uint_t ssz, uint_t rsz, int force) { struct netconfig *ncp; struct netbuf buf; SVCXPRT *xprt; void *hdl; int fd, n = 0; char door[PATH_MAX]; time_t tm; if ((hdl = setnetconfig()) == NULL) { fmd_error(EFMD_RPC_REG, "failed to iterate over " "netconfig database: %s\n", nc_sperror()); return (fmd_set_errno(EFMD_RPC_REG)); } if (force) svc_unreg(prog, vers); /* clear stale rpcbind registrations */ buf.buf = alloca(_SS_MAXSIZE); buf.maxlen = _SS_MAXSIZE; buf.len = 0; while ((ncp = getnetconfig(hdl)) != NULL) { if (strcmp(ncp->nc_protofmly, NC_LOOPBACK) != 0) continue; if (!force && rpcb_getaddr(prog, vers, ncp, &buf, HOST_SELF)) { (void) endnetconfig(hdl); return (fmd_set_errno(EFMD_RPC_BOUND)); } if ((fd = t_open(ncp->nc_device, O_RDWR, NULL)) == -1) { fmd_error(EFMD_RPC_REG, "failed to open %s: %s\n", ncp->nc_device, t_strerror(t_errno)); continue; } svc_fd_negotiate_ucred(fd); /* enable ucred option on xprt */ if ((xprt = svc_tli_create(fd, ncp, NULL, ssz, rsz)) == NULL) { (void) t_close(fd); continue; } if (svc_reg(xprt, prog, vers, disp, ncp) == FALSE) { fmd_error(EFMD_RPC_REG, "failed to register " "rpc service on %s\n", ncp->nc_netid); svc_destroy(xprt); continue; } n++; } (void) endnetconfig(hdl); /* * If we failed to register services (n == 0) because rpcbind is down, * then check to see if the RPC door file exists before attempting an * svc_door_create(), which cleverly destroys any existing door file. * The RPC APIs have no stable errnos, so we use rpcb_gettime() as a * hack to determine if rpcbind itself is down. */ if (!force && n == 0 && rpcb_gettime(HOST_SELF, &tm) == FALSE && snprintf(door, sizeof (door), RPC_DOOR_RENDEZVOUS, prog, vers) > 0 && access(door, F_OK) == 0) return (fmd_set_errno(EFMD_RPC_BOUND)); /* * Attempt to create a door server for the RPC program as well. Limit * the maximum request size for the door transport to the receive size. */ if ((xprt = svc_door_create(disp, prog, vers, ssz)) == NULL) { fmd_error(EFMD_RPC_REG, "failed to create door for " "rpc service 0x%lx/0x%lx\n", prog, vers); } else { (void) svc_control(xprt, SVCSET_CONNMAXREC, &rsz); n++; } return (n); }