/** unblock one signal, so we can catch it */ void ub_thread_sig_unblock(int sig) { #if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) || defined(HAVE_SIGPROCMASK) # if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) int err; # endif sigset_t sigset; sigemptyset(&sigset); sigaddset(&sigset, sig); #ifdef HAVE_PTHREAD if((err=pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))) fatal_exit("pthread_sigmask: %s", strerror(err)); #else # ifdef HAVE_SOLARIS_THREADS if((err=thr_sigsetmask(SIG_UNBLOCK, &sigset, NULL))) fatal_exit("thr_sigsetmask: %s", strerror(err)); # else /* have nothing, do single thread case */ if(sigprocmask(SIG_UNBLOCK, &sigset, NULL)) fatal_exit("sigprocmask: %s", strerror(errno)); # endif /* HAVE_SOLARIS_THREADS */ #endif /* HAVE_PTHREAD */ #else (void)sig; #endif /* have signal stuff */ }
static void system_activity_monitor(void) { struct sigaction act; sigset_t sigmask; /* * Setup for gathering system's statistic. */ sysstat_init(); /* * In addition to the SIGQUIT, SIGINT and SIGTERM signals already * being handled, this thread also needs to handle SIGHUP, SIGALRM * and SIGTHAW signals. */ (void) sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_handler = alarm_handler; (void) sigaction(SIGALRM, &act, NULL); act.sa_handler = work_handler; (void) sigaction(SIGHUP, &act, NULL); act.sa_handler = thaw_handler; (void) sigaction(SIGTHAW, &act, NULL); /* * Invoke work_handler with a dummy SIGHUP signal to read * cpr config file, get autoshutdown properties and schedule * an alarm if needed. */ work_handler(SIGHUP); /* * Wait for signal to read file */ (void) thr_sigsetmask(0, 0, &sigmask); (void) sigdelset(&sigmask, SIGHUP); (void) sigdelset(&sigmask, SIGALRM); (void) sigdelset(&sigmask, SIGTHAW); (void) thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); do { (void) sigsuspend(&sigmask); } while (errno == EINTR); }
void VMError::reset_signal_handlers() { // install signal handlers for all synchronous program error signals sigset_t newset; sigemptyset(&newset); for (int i = 0; i < NUM_SIGNALS; i++) { save_signal(i, SIGNALS[i]); os::signal(SIGNALS[i], CAST_FROM_FN_PTR(void *, crash_handler)); sigaddset(&newset, SIGNALS[i]); } thr_sigsetmask(SIG_UNBLOCK, &newset, NULL); }
static struct clnt_ops * clnt_vc_ops(void) { static struct clnt_ops ops; sigset_t mask, newmask; /* VARIABLES PROTECTED BY ops_lock: ops */ sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); mutex_lock(&ops_lock); if (ops.cl_call == NULL) { ops.cl_call = clnt_vc_call; ops.cl_abort = clnt_vc_abort; ops.cl_geterr = clnt_vc_geterr; ops.cl_freeres = clnt_vc_freeres; ops.cl_destroy = clnt_vc_destroy; ops.cl_control = clnt_vc_control; } mutex_unlock(&ops_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); return (&ops); }
static void crash_handler(int sig, siginfo_t* info, void* ucVoid) { // unmask current signal sigset_t newset; sigemptyset(&newset); sigaddset(&newset, sig); // also unmask other synchronous signals for (int i = 0; i < NUM_SIGNALS; i++) { sigaddset(&newset, SIGNALS[i]); } thr_sigsetmask(SIG_UNBLOCK, &newset, NULL); VMError err(NULL, sig, NULL, info, ucVoid); err.report_and_die(); }
void switch_resolver_reset(int mt_disabled, sigset_t oldmask, int old_retry) { if (mt_disabled) { (void) mutex_unlock(&one_lane); (void) thr_sigsetmask(SIG_SETMASK, &oldmask, NULL); } else { (void) (*disable_mt)(); } (*unset_no_hosts_fallback)(); (void) (*override_retry)(old_retry); }
/* * Thread initialization. Mask out all signals we want our * signal handler to handle for us from any other threads. */ static void thr_init(void) { sigset_t sigset; long thr_flags = (THR_NEW_LWP|THR_DAEMON); /* * Before we kick off any other threads, mask out desired * signals from main thread so that any subsequent threads * don't receive said signals. */ (void) thr_sigsetmask(NULL, NULL, &sigset); (void) sigaddset(&sigset, SIGHUP); (void) sigaddset(&sigset, SIGTERM); (void) sigaddset(&sigset, SIGINT); (void) thr_sigsetmask(SIG_SETMASK, &sigset, NULL); if (thr_create(NULL, 0, sig_handler, 0, thr_flags, &sig_thread)) { syslog(LOG_ERR, gettext("Failed to create signal handling thread")); exit(4); } }
/* * Get a free event and check to see if more are needed. */ robo_event_t * get_free_event( library_t *library) { robo_event_t *ret; char *ent_pnt = "get_free_event"; mutex_lock(&library->free_mutex); if (library->free_count < 20 && !library->inc_free_running) { sigset_t signal_set; (void) sigemptyset(&signal_set); (void) sigaddset(&signal_set, SIGEMT); library->inc_free_running++; thr_sigsetmask(SIG_BLOCK, &signal_set, NULL); thr_create(NULL, MD_THR_STK, &inc_free, (void *)library, (THR_DETACHED | THR_BOUND), NULL); thr_sigsetmask(SIG_UNBLOCK, &signal_set, NULL); thr_yield(); } while (library->free_count <= 0) { mutex_unlock(&library->free_mutex); if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "%s: Waiting for free event.", ent_pnt); sleep(2); mutex_lock(&library->free_mutex); } ret = library->free; ETRACE((LOG_NOTICE, "EV:LfGf: %#x.", ret)); library->free_count--; library->free = ret->next; mutex_unlock(&library->free_mutex); return (ret); }
static void clnt_dg_destroy(CLIENT *clnt) { struct cx_data *cx = (struct cx_data *)clnt->cl_p1; struct rpc_dplx_rec *rec = (struct rpc_dplx_rec *)clnt->cl_p2; int cu_fd = CU_DATA(cx)->cu_fd; sigset_t mask, newmask; /* Handle our own signal mask here, the signal section is * larger than the wait (not 100% clear why) */ sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); /* barrier both channels */ rpc_dplx_swc(clnt, rpc_flag_clear); rpc_dplx_rwc(clnt, rpc_flag_clear); if (CU_DATA(cx)->cu_closeit) (void)close(cu_fd); XDR_DESTROY(&(CU_DATA(cx)->cu_outxdrs)); /* signal both channels */ rpc_dplx_ssc(clnt, RPC_DPLX_FLAG_NONE); rpc_dplx_rsc(clnt, RPC_DPLX_FLAG_NONE); /* release */ rpc_dplx_unref(rec, RPC_DPLX_FLAG_NONE); free_cx_data(cx); if (clnt->cl_netid && clnt->cl_netid[0]) mem_free(clnt->cl_netid, strlen(clnt->cl_netid) + 1); if (clnt->cl_tp && clnt->cl_tp[0]) mem_free(clnt->cl_tp, strlen(clnt->cl_tp) + 1); mem_free(clnt, sizeof(CLIENT)); thr_sigsetmask(SIG_SETMASK, &mask, NULL); }
static bool_t clnt_vc_freeres( CLIENT *cl, xdrproc_t xdr_res, caddr_t res_ptr ) { struct ct_data *ct; XDR *xdrs; bool_t dummy; #ifdef _REENTRANT sigset_t mask; #endif sigset_t newmask; _DIAGASSERT(cl != NULL); ct = (struct ct_data *)cl->cl_private; xdrs = &(ct->ct_xdrs); __clnt_sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); mutex_lock(&clnt_fd_lock); #ifdef _REENTRANT while (vc_fd_locks[ct->ct_fd]) cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); #endif xdrs->x_op = XDR_FREE; dummy = (*xdr_res)(xdrs, res_ptr); mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); cond_signal(&vc_cv[ct->ct_fd]); return dummy; }
int sigprocmask(int how, const sigset_t *set, sigset_t *oset) { int error; /* * Guard against children of vfork(). */ if (curthread->ul_vfork) return (__sigprocmask(how, set, oset)); if ((error = thr_sigsetmask(how, set, oset)) != 0) { errno = error; return (-1); } return (0); }
/** block all signals, masks them away. */ void ub_thread_blocksigs(void) { #if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) || defined(HAVE_SIGPROCMASK) # if defined(HAVE_PTHREAD) || defined(HAVE_SOLARIS_THREADS) int err; # endif sigset_t sigset; sigfillset(&sigset); #ifdef HAVE_PTHREAD if((err=pthread_sigmask(SIG_SETMASK, &sigset, NULL))) fatal_exit("pthread_sigmask: %s", strerror(err)); #else # ifdef HAVE_SOLARIS_THREADS if((err=thr_sigsetmask(SIG_SETMASK, &sigset, NULL))) fatal_exit("thr_sigsetmask: %s", strerror(err)); # else /* have nothing, do single process signal mask */ if(sigprocmask(SIG_SETMASK, &sigset, NULL)) fatal_exit("sigprocmask: %s", strerror(errno)); # endif /* HAVE_SOLARIS_THREADS */ #endif /* HAVE_PTHREAD */ #endif /* have signal stuff */ }
static bool_t clnt_vc_control(CLIENT *cl, u_int request, void *info) { struct ct_data *ct; void *infop = info; sigset_t mask; sigset_t newmask; int rpc_lock_value; assert(cl != NULL); ct = (struct ct_data *)cl->cl_private; sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); mutex_lock(&clnt_fd_lock); while (vc_fd_locks[ct->ct_fd]) cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); if (__isthreaded) rpc_lock_value = 1; else rpc_lock_value = 0; vc_fd_locks[ct->ct_fd] = rpc_lock_value; mutex_unlock(&clnt_fd_lock); switch (request) { case CLSET_FD_CLOSE: ct->ct_closeit = TRUE; release_fd_lock(ct->ct_fd, mask); return (TRUE); case CLSET_FD_NCLOSE: ct->ct_closeit = FALSE; release_fd_lock(ct->ct_fd, mask); return (TRUE); default: break; } /* for other requests which use info */ if (info == NULL) { release_fd_lock(ct->ct_fd, mask); return (FALSE); } switch (request) { case CLSET_TIMEOUT: if (time_not_ok((struct timeval *)info)) { release_fd_lock(ct->ct_fd, mask); return (FALSE); } ct->ct_wait = *(struct timeval *)infop; ct->ct_waitset = TRUE; break; case CLGET_TIMEOUT: *(struct timeval *)infop = ct->ct_wait; break; case CLGET_SERVER_ADDR: (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len); break; case CLGET_FD: *(int *)info = ct->ct_fd; break; case CLGET_SVC_ADDR: /* The caller should not free this memory area */ *(struct netbuf *)info = ct->ct_addr; break; case CLSET_SVC_ADDR: /* set to new address */ release_fd_lock(ct->ct_fd, mask); return (FALSE); case CLGET_XID: /* * use the knowledge that xid is the * first element in the call structure * This will get the xid of the PREVIOUS call */ *(u_int32_t *)info = ntohl(*(u_int32_t *)(void *)&ct->ct_u.ct_mcalli); break; case CLSET_XID: /* This will set the xid of the NEXT call */ *(u_int32_t *)(void *)&ct->ct_u.ct_mcalli = htonl(*((u_int32_t *)info) + 1); /* increment by 1 as clnt_vc_call() decrements once */ break; case CLGET_VERS: /* * This RELIES on the information that, in the call body, * the version number field is the fifth field from the * begining of the RPC header. MUST be changed if the * call_struct is changed */ *(u_int32_t *)info = ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT)); break; case CLSET_VERS: *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT) = htonl(*(u_int32_t *)info); break; case CLGET_PROG: /* * This RELIES on the information that, in the call body, * the program number field is the fourth field from the * begining of the RPC header. MUST be changed if the * call_struct is changed */ *(u_int32_t *)info = ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT)); break; case CLSET_PROG: *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT) = htonl(*(u_int32_t *)info); break; default: release_fd_lock(ct->ct_fd, mask); return (FALSE); } release_fd_lock(ct->ct_fd, mask); return (TRUE); }
/* * Create a client handle for a connection. * Default options are set, which the user can change using clnt_control()'s. * The rpc/vc package does buffering similar to stdio, so the client * must pick send and receive buffer sizes, 0 => use the default. * NB: fd is copied into a private area. * NB: The rpch->cl_auth is set null authentication. Caller may wish to * set this something more useful. * * fd should be an open socket * * fd - open file descriptor * raddr - servers address * prog - program number * vers - version number * sendsz - buffer send size * recvsz - buffer recv size */ CLIENT * clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog, const rpcvers_t vers, u_int sendsz, u_int recvsz) { CLIENT *cl; /* client handle */ struct ct_data *ct = NULL; /* client handle */ struct timeval now; struct rpc_msg call_msg; static u_int32_t disrupt; sigset_t mask; sigset_t newmask; struct sockaddr_storage ss; socklen_t slen; struct __rpc_sockinfo si; if (disrupt == 0) disrupt = (u_int32_t)(long)raddr; cl = (CLIENT *)mem_alloc(sizeof (*cl)); ct = (struct ct_data *)mem_alloc(sizeof (*ct)); if ((cl == (CLIENT *)NULL) || (ct == (struct ct_data *)NULL)) { (void) syslog(LOG_ERR, clnt_vc_errstr, clnt_vc_str, __no_mem_str); rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto err; } ct->ct_addr.buf = NULL; sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); mutex_lock(&clnt_fd_lock); if (vc_fd_locks == (int *) NULL) { int cv_allocsz, fd_allocsz; int dtbsize = __rpc_dtbsize(); fd_allocsz = dtbsize * sizeof (int); vc_fd_locks = (int *) mem_alloc(fd_allocsz); if (vc_fd_locks == (int *) NULL) { mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); goto err; } else memset(vc_fd_locks, '\0', fd_allocsz); assert(vc_cv == (cond_t *) NULL); cv_allocsz = dtbsize * sizeof (cond_t); vc_cv = (cond_t *) mem_alloc(cv_allocsz); if (vc_cv == (cond_t *) NULL) { mem_free(vc_fd_locks, fd_allocsz); vc_fd_locks = (int *) NULL; mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); goto err; } else { int i; for (i = 0; i < dtbsize; i++) cond_init(&vc_cv[i], 0, (void *) 0); } } else assert(vc_cv != (cond_t *) NULL); /* * XXX - fvdl connecting while holding a mutex? */ slen = sizeof ss; if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) { if (errno != ENOTCONN) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); goto err; } if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){ rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); goto err; } } mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); if (!__rpc_fd2sockinfo(fd, &si)) goto err; ct->ct_closeit = FALSE; /* * Set up private data struct */ ct->ct_fd = fd; ct->ct_wait.tv_usec = 0; ct->ct_waitset = FALSE; ct->ct_addr.buf = malloc(raddr->maxlen); if (ct->ct_addr.buf == NULL) goto err; memcpy(ct->ct_addr.buf, raddr->buf, raddr->len); ct->ct_addr.len = raddr->len; ct->ct_addr.maxlen = raddr->maxlen; /* * Initialize call message */ (void)gettimeofday(&now, NULL); call_msg.rm_xid = ((u_int32_t)++disrupt) ^ __RPC_GETXID(&now); call_msg.rm_direction = CALL; call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; call_msg.rm_call.cb_prog = (u_int32_t)prog; call_msg.rm_call.cb_vers = (u_int32_t)vers; /* * pre-serialize the static part of the call msg and stash it away */ xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE, XDR_ENCODE); if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) { if (ct->ct_closeit) { (void)_close(fd); } goto err; } ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs)); XDR_DESTROY(&(ct->ct_xdrs)); assert(ct->ct_mpos + sizeof(uint32_t) <= MCALL_MSG_SIZE); /* * Create a client handle which uses xdrrec for serialization * and authnone for authentication. */ cl->cl_ops = clnt_vc_ops(); cl->cl_private = ct; cl->cl_auth = authnone_create(); sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, cl->cl_private, read_vc, write_vc); return (cl); err: if (ct) { if (ct->ct_addr.len) mem_free(ct->ct_addr.buf, ct->ct_addr.len); mem_free(ct, sizeof (struct ct_data)); } if (cl) mem_free(cl, sizeof (CLIENT)); return ((CLIENT *)NULL); }
static enum clnt_stat clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr, xdrproc_t xdr_results, void *results_ptr, struct timeval timeout) { struct ct_data *ct = (struct ct_data *) cl->cl_private; XDR *xdrs = &(ct->ct_xdrs); struct rpc_msg reply_msg; u_int32_t x_id; u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli; /* yuk */ bool_t shipnow; int refreshes = 2; sigset_t mask, newmask; int rpc_lock_value; bool_t reply_stat; assert(cl != NULL); sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); mutex_lock(&clnt_fd_lock); while (vc_fd_locks[ct->ct_fd]) cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); if (__isthreaded) rpc_lock_value = 1; else rpc_lock_value = 0; vc_fd_locks[ct->ct_fd] = rpc_lock_value; mutex_unlock(&clnt_fd_lock); if (!ct->ct_waitset) { /* If time is not within limits, we ignore it. */ if (time_not_ok(&timeout) == FALSE) ct->ct_wait = timeout; } shipnow = (xdr_results == NULL && timeout.tv_sec == 0 && timeout.tv_usec == 0) ? FALSE : TRUE; call_again: xdrs->x_op = XDR_ENCODE; ct->ct_error.re_status = RPC_SUCCESS; x_id = ntohl(--(*msg_x_id)); if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) || (! XDR_PUTINT32(xdrs, &proc)) || (! AUTH_MARSHALL(cl->cl_auth, xdrs)) || (! (*xdr_args)(xdrs, args_ptr))) { if (ct->ct_error.re_status == RPC_SUCCESS) ct->ct_error.re_status = RPC_CANTENCODEARGS; (void)xdrrec_endofrecord(xdrs, TRUE); release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } } else { *(uint32_t *) &ct->ct_u.ct_mcallc[ct->ct_mpos] = htonl(proc); if (! __rpc_gss_wrap(cl->cl_auth, ct->ct_u.ct_mcallc, ct->ct_mpos + sizeof(uint32_t), xdrs, xdr_args, args_ptr)) { if (ct->ct_error.re_status == RPC_SUCCESS) ct->ct_error.re_status = RPC_CANTENCODEARGS; (void)xdrrec_endofrecord(xdrs, TRUE); release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } } if (! xdrrec_endofrecord(xdrs, shipnow)) { release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status = RPC_CANTSEND); } if (! shipnow) { release_fd_lock(ct->ct_fd, mask); return (RPC_SUCCESS); } /* * Hack to provide rpc-based message passing */ if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { release_fd_lock(ct->ct_fd, mask); return(ct->ct_error.re_status = RPC_TIMEDOUT); } /* * Keep receiving until we get a valid transaction id */ xdrs->x_op = XDR_DECODE; while (TRUE) { reply_msg.acpted_rply.ar_verf = _null_auth; reply_msg.acpted_rply.ar_results.where = NULL; reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; if (! xdrrec_skiprecord(xdrs)) { release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } /* now decode and validate the response header */ if (! xdr_replymsg(xdrs, &reply_msg)) { if (ct->ct_error.re_status == RPC_SUCCESS) continue; release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } if (reply_msg.rm_xid == x_id) break; } /* * process header */ _seterr_reply(&reply_msg, &(ct->ct_error)); if (ct->ct_error.re_status == RPC_SUCCESS) { if (! AUTH_VALIDATE(cl->cl_auth, &reply_msg.acpted_rply.ar_verf)) { ct->ct_error.re_status = RPC_AUTHERROR; ct->ct_error.re_why = AUTH_INVALIDRESP; } else { if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { reply_stat = (*xdr_results)(xdrs, results_ptr); } else { reply_stat = __rpc_gss_unwrap(cl->cl_auth, xdrs, xdr_results, results_ptr); } if (! reply_stat) { if (ct->ct_error.re_status == RPC_SUCCESS) ct->ct_error.re_status = RPC_CANTDECODERES; } } /* free verifier ... */ if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { xdrs->x_op = XDR_FREE; (void)xdr_opaque_auth(xdrs, &(reply_msg.acpted_rply.ar_verf)); } } /* end successful completion */ else { /* maybe our credentials need to be refreshed ... */ if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg)) goto call_again; } /* end of unsuccessful completion */ release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); }
int main(int argc, char *argv[]) { pid_t pid; int pm_fd; struct sigaction act; sigset_t sigmask; int c; char errmsg[PATH_MAX + 64]; int pid_fd; prog = argv[0]; if (geteuid() != 0) { (void) fprintf(stderr, "%s: Must be root\n", prog); exit(EXIT_FAILURE); } if ((pid_fd = open_pidfile(prog)) == -1) exit(EXIT_FAILURE); /* * Process options */ broadcast = 1; while ((c = getopt(argc, argv, "n")) != EOF) { switch (c) { case 'n': broadcast = 0; break; case '?': (void) fprintf(stderr, "Usage: %s [-n]\n", prog); exit(EXIT_FAILURE); } } pm_fd = open(PM, O_RDWR); if (pm_fd == -1) { (void) sprintf(errmsg, "%s: %s", prog, PM); perror(errmsg); exit(EXIT_FAILURE); } (void) close(pm_fd); /* * Initialize mutex lock used to insure only one command to * run at a time. */ if (mutex_init(&poweroff_mutex, USYNC_THREAD, NULL) != 0) { (void) fprintf(stderr, "%s: Unable to initialize mutex lock\n", prog); exit(EXIT_FAILURE); } if ((info = (pwr_info_t *)malloc(sizeof (pwr_info_t))) == NULL) { (void) sprintf(errmsg, "%s: malloc", prog); perror(errmsg); exit(EXIT_FAILURE); } /* * Daemon is set to go... */ if ((pid = fork()) < 0) exit(EXIT_FAILURE); else if (pid != 0) exit(EXIT_SUCCESS); pid = getpid(); openlog(prog, 0, LOG_DAEMON); if (write_pidfile(pid_fd, pid) == -1) /* logs errors on failure */ exit(EXIT_FAILURE); (void) close(pid_fd); /* * Close all the parent's file descriptors (Bug 1225843). */ closefrom(0); (void) setsid(); (void) chdir("/"); (void) umask(0); #ifdef DEBUG /* * Connect stdout to the console. */ if (dup2(open("/dev/console", O_WRONLY|O_NOCTTY), 1) == -1) { logerror("Unable to connect to the console."); } #endif info->pd_flags = PD_AC; info->pd_idle_time = -1; info->pd_start_time = 0; info->pd_finish_time = 0; /* * Allow SIGQUIT, SIGINT and SIGTERM signals to terminate us * any time */ act.sa_handler = kill_handler; (void) sigemptyset(&act.sa_mask); act.sa_flags = 0; (void) sigaction(SIGQUIT, &act, NULL); (void) sigaction(SIGINT, &act, NULL); (void) sigaction(SIGTERM, &act, NULL); (void) sigfillset(&sigmask); (void) sigdelset(&sigmask, SIGQUIT); (void) sigdelset(&sigmask, SIGINT); (void) sigdelset(&sigmask, SIGTERM); (void) thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); /* * If "power_button" device node can be opened, create a new * thread to monitor the power button. */ if ((pb_fd = open(PB, O_RDONLY)) != -1) { if (thr_create(NULL, NULL, (void *(*)(void *))power_button_monitor, NULL, THR_DAEMON, NULL) != 0) { logerror("Unable to monitor system's power button."); } } #ifdef sparc do_attach(); #endif /* * Create a new thread to monitor system activity and suspend * system if idle. */ if (thr_create(NULL, NULL, (void *(*)(void *))system_activity_monitor, NULL, THR_DAEMON, NULL) != 0) { logerror("Unable to create thread to monitor system activity."); } /* * Block until we receive an explicit terminate signal */ (void) sigsuspend(&sigmask); return (1); }
int _tx_bind( int fd, const struct t_bind *req, struct t_bind *ret, int api_semantics ) { struct T_bind_req *bind_reqp; struct T_bind_ack *bind_ackp; int size, sv_errno, retlen; struct _ti_user *tiptr; sigset_t mask; int didalloc; int use_xpg41tpi; struct strbuf ctlbuf; if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) return (-1); /* * We block all signals since TI_BIND, which sends a TPI message * O_T_BIND_REQ down, is not an idempotetent operation * Note that sig_mutex_lock() only defers signals, it does not * block them, so interruptible syscalls could still get EINTR. */ (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask); sig_mutex_lock(&tiptr->ti_lock); if (_T_IS_XTI(api_semantics)) { /* * User level state verification only done for XTI * because doing for TLI may break existing applications */ if (tiptr->ti_state != T_UNBND) { t_errno = TOUTSTATE; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); return (-1); } } /* * Acquire buffer for use in sending/receiving the message. * Note: assumes (correctly) that ti_ctlsize is large enough * to hold sizeof (struct T_bind_req/ack) */ if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) { sv_errno = errno; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); errno = sv_errno; return (-1); } /* LINTED pointer cast */ bind_reqp = (struct T_bind_req *)ctlbuf.buf; size = (int)sizeof (struct T_bind_req); use_xpg41tpi = (_T_IS_XTI(api_semantics)) && ((tiptr->ti_prov_flag & XPG4_1) != 0); if (use_xpg41tpi) /* XTI call and provider knows the XTI inspired TPI */ bind_reqp->PRIM_type = T_BIND_REQ; else /* TLI caller old TPI provider */ bind_reqp->PRIM_type = O_T_BIND_REQ; bind_reqp->ADDR_length = (req == NULL? 0: req->addr.len); bind_reqp->ADDR_offset = 0; bind_reqp->CONIND_number = (req == NULL? 0: req->qlen); if (bind_reqp->ADDR_length) { if (_t_aligned_copy(&ctlbuf, (int)bind_reqp->ADDR_length, size, req->addr.buf, &bind_reqp->ADDR_offset) < 0) { /* * Aligned copy will overflow buffer allocated based * on transport maximum address length. * return error. */ t_errno = TBADADDR; goto err_out; } size = bind_reqp->ADDR_offset + bind_reqp->ADDR_length; } if (_t_do_ioctl(fd, ctlbuf.buf, size, TI_BIND, &retlen) < 0) { goto err_out; } if (retlen < (int)sizeof (struct T_bind_ack)) { t_errno = TSYSERR; errno = EIO; goto err_out; } /* LINTED pointer cast */ bind_ackp = (struct T_bind_ack *)ctlbuf.buf; if ((req != NULL) && req->addr.len != 0 && (use_xpg41tpi == 0) && (_T_IS_XTI(api_semantics))) { /* * Best effort to do XTI on old TPI. * * Match address requested or unbind and fail with * TADDRBUSY. * * XXX - Hack alert ! Should we do this at all ? * Not "supported" as may not work if encoding of * address is different in the returned address. This * will also have trouble with TCP/UDP wildcard port * requests */ if ((req->addr.len != bind_ackp->ADDR_length) || (memcmp(req->addr.buf, ctlbuf.buf + bind_ackp->ADDR_offset, req->addr.len) != 0)) { (void) _tx_unbind_locked(fd, tiptr, &ctlbuf); t_errno = TADDRBUSY; goto err_out; } } tiptr->ti_ocnt = 0; tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED; _T_TX_NEXTSTATE(T_BIND, tiptr, "t_bind: invalid state event T_BIND"); if (ret != NULL) { if (_T_IS_TLI(api_semantics) || ret->addr.maxlen > 0) { if (TLEN_GT_NLEN(bind_reqp->ADDR_length, ret->addr.maxlen)) { t_errno = TBUFOVFLW; goto err_out; } (void) memcpy(ret->addr.buf, ctlbuf.buf + bind_ackp->ADDR_offset, (size_t)bind_ackp->ADDR_length); ret->addr.len = bind_ackp->ADDR_length; } ret->qlen = bind_ackp->CONIND_number; } tiptr->ti_qlen = (uint_t)bind_ackp->CONIND_number; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); return (0); /* NOTREACHED */ err_out: sv_errno = errno; if (didalloc) free(ctlbuf.buf); else tiptr->ti_ctlbuf = ctlbuf.buf; sig_mutex_unlock(&tiptr->ti_lock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); errno = sv_errno; return (-1); }
/* * monitor_msg - thread routine to monitor messages. */ void * monitor_msg( void *vlibrary) { int exit_status = 0; sigset_t signal_set; library_t *library = (library_t *)vlibrary; robo_event_t *current_event; struct sigaction sig_action; message_request_t *message, shutdown; enum sam_mess_type mtype; /* dummy up a shutdown message */ (void) memset(&shutdown, 0, sizeof (message_request_t)); (void) memset(&sig_action, 0, sizeof (struct sigaction)); shutdown.mtype = MESS_MT_SHUTDOWN; /* LINTED constant truncated by assignment */ shutdown.message.magic = MESSAGE_MAGIC; shutdown.message.command = MESS_CMD_SHUTDOWN; /* * Should have been called with all signals blocked, * now let sigemt be delivered and just exit when it is */ sig_action.sa_handler = sig_catch; sig_action.sa_flags = 0; (void) sigemptyset(&signal_set); (void) sigaddset(&signal_set, SIGEMT); (void) sigaction(SIGEMT, &sig_action, (struct sigaction *)NULL); (void) thr_sigsetmask(SIG_UNBLOCK, &signal_set, NULL); mutex_lock(&library->mutex); /* wait for initialize */ mutex_unlock(&library->mutex); message = (message_request_t *)SHM_REF_ADDR(library->un->dt.rb.message); if (thr_create(NULL, MD_THR_STK, stk_acs_response, (void *)library, (THR_BOUND | THR_NEW_LWP | THR_DETACHED), NULL)) { sam_syslog(LOG_CRIT, "Unable to start stk_acs_response thread: %m."); thr_exit(NULL); } /* Main loop */ for (;;) { current_event = get_free_event(library); /* * Zeroing the struct has the effect of initializing * the mutex and the condition to USYNC_THREAD, just * what we want */ (void) memset(current_event, 0, sizeof (robo_event_t)); current_event->status.bits = REST_FREEMEM; /* Wait for a message */ mutex_lock(&message->mutex); while (message->mtype == MESS_MT_VOID) cond_wait(&message->cond_r, &message->mutex); /* Copy the request into the event */ current_event->request.message = message->message; mtype = message->mtype; /* capture message type */ message->mtype = MESS_MT_VOID; /* release the message area */ message->message.exit_id.pid = 0; cond_signal(&message->cond_i); /* and wake up anyone waiting */ mutex_unlock(&message->mutex); if (mtype == MESS_MT_APIHELP) { current_event->next = NULL; mutex_lock(&stk_acs_mutex); /* * If the list is NULL, this will be the only * entry on the list. Set the head and last to current */ if (stk_acs_event_head == NULL) { stk_acs_event_head = stk_acs_event_last = current_event; cond_signal(&stk_acs_cond); } else { /* * If the head is not null, last points to the * last entry on the list. Point last * next to the current then set last = current */ stk_acs_event_last->next = current_event; stk_acs_event_last = current_event; } mutex_unlock(&stk_acs_mutex); } else { current_event->type = EVENT_TYPE_MESS; /* * Put the event on the list and * wake up the event handler */ add_to_end(library, current_event); if (message->mtype == MESS_MT_SHUTDOWN) { if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "shutdown request:%s:%d.", __FILE__, __LINE__); threads[STK_MSG_THREAD] = (thread_t)-1; thr_exit(&exit_status); /* NOTREACHED */ return (NULL); } } } }
int main(int ac, char *av[]) { char *dir = "/"; int allflag = 0; int df_allflag = 0; int opt_cnt = 0; int maxservers = 1; /* zero allows inifinte number of threads */ int maxservers_set = 0; int logmaxservers = 0; int pid; int i; char *provider = (char *)NULL; char *df_provider = (char *)NULL; struct protob *protobp0, *protobp; NETSELDECL(proto) = NULL; NETSELDECL(df_proto) = NULL; NETSELPDECL(providerp); char *defval; boolean_t can_do_mlp; uint_t dss_npaths = 0; char **dss_pathnames = NULL; sigset_t sgset; char name[PATH_MAX], value[PATH_MAX]; int ret, bufsz; int pipe_fd = -1; MyName = *av; /* * Initializations that require more privileges than we need to run. */ (void) _create_daemon_lock(NFSD, DAEMON_UID, DAEMON_GID); svcsetprio(); can_do_mlp = priv_ineffect(PRIV_NET_BINDMLP); if (__init_daemon_priv(PU_RESETGROUPS|PU_CLEARLIMITSET, DAEMON_UID, DAEMON_GID, PRIV_SYS_NFS, can_do_mlp ? PRIV_NET_BINDMLP : NULL, NULL) == -1) { (void) fprintf(stderr, "%s should be run with" " sufficient privileges\n", av[0]); exit(1); } (void) enable_extended_FILE_stdio(-1, -1); /* * Read in the values from SMF first before we check * command line options so the options override SMF values. */ bufsz = PATH_MAX; ret = nfs_smf_get_prop("max_connections", value, DEFAULT_INSTANCE, SCF_TYPE_INTEGER, NFSD, &bufsz); if (ret == SA_OK) { errno = 0; max_conns_allowed = strtol(value, (char **)NULL, 10); if (errno != 0) max_conns_allowed = -1; } bufsz = PATH_MAX; ret = nfs_smf_get_prop("listen_backlog", value, DEFAULT_INSTANCE, SCF_TYPE_INTEGER, NFSD, &bufsz); if (ret == SA_OK) { errno = 0; listen_backlog = strtol(value, (char **)NULL, 10); if (errno != 0) { listen_backlog = 32; } } bufsz = PATH_MAX; ret = nfs_smf_get_prop("protocol", value, DEFAULT_INSTANCE, SCF_TYPE_ASTRING, NFSD, &bufsz); if ((ret == SA_OK) && strlen(value) > 0) { df_proto = strdup(value); opt_cnt++; if (strncasecmp("ALL", value, 3) == 0) { free(df_proto); df_proto = NULL; df_allflag = 1; } } bufsz = PATH_MAX; ret = nfs_smf_get_prop("device", value, DEFAULT_INSTANCE, SCF_TYPE_ASTRING, NFSD, &bufsz); if ((ret == SA_OK) && strlen(value) > 0) { df_provider = strdup(value); opt_cnt++; } bufsz = PATH_MAX; ret = nfs_smf_get_prop("servers", value, DEFAULT_INSTANCE, SCF_TYPE_INTEGER, NFSD, &bufsz); if (ret == SA_OK) { errno = 0; maxservers = strtol(value, (char **)NULL, 10); if (errno != 0) maxservers = 1; else maxservers_set = 1; } bufsz = 4; ret = nfs_smf_get_prop("server_versmin", value, DEFAULT_INSTANCE, SCF_TYPE_INTEGER, NFSD, &bufsz); if (ret == SA_OK) nfs_server_vers_min = strtol(value, (char **)NULL, 10); bufsz = 4; ret = nfs_smf_get_prop("server_versmax", value, DEFAULT_INSTANCE, SCF_TYPE_INTEGER, NFSD, &bufsz); if (ret == SA_OK) nfs_server_vers_max = strtol(value, (char **)NULL, 10); bufsz = PATH_MAX; ret = nfs_smf_get_prop("server_delegation", value, DEFAULT_INSTANCE, SCF_TYPE_ASTRING, NFSD, &bufsz); if (ret == SA_OK) if (strncasecmp(value, "off", 3) == 0) nfs_server_delegation = FALSE; /* * Conflict options error messages. */ if (opt_cnt > 1) { (void) fprintf(stderr, "\nConflicting options, only one of " "the following options can be specified\n" "in SMF:\n" "\tprotocol=ALL\n" "\tprotocol=protocol\n" "\tdevice=devicename\n\n"); usage(); } opt_cnt = 0; while ((i = getopt(ac, av, "ac:p:s:t:l:")) != EOF) { switch (i) { case 'a': free(df_proto); df_proto = NULL; free(df_provider); df_provider = NULL; allflag = 1; opt_cnt++; break; case 'c': max_conns_allowed = atoi(optarg); break; case 'p': proto = optarg; df_allflag = 0; opt_cnt++; break; /* * DSS: NFSv4 distributed stable storage. * * This is a Contracted Project Private interface, for * the sole use of Sun Cluster HA-NFS. See PSARC/2006/313. */ case 's': if (strlen(optarg) < MAXPATHLEN) { /* first "-s" option encountered? */ if (dss_pathnames == NULL) { /* * Allocate maximum possible space * required given cmdline arg count; * "-s <path>" consumes two args. */ size_t sz = (ac / 2) * sizeof (char *); dss_pathnames = (char **)malloc(sz); if (dss_pathnames == NULL) { (void) fprintf(stderr, "%s: " "dss paths malloc failed\n", av[0]); exit(1); } (void) memset(dss_pathnames, 0, sz); } dss_pathnames[dss_npaths] = optarg; dss_npaths++; } else { (void) fprintf(stderr, "%s: -s pathname too long.\n", av[0]); } break; case 't': provider = optarg; df_allflag = 0; opt_cnt++; break; case 'l': listen_backlog = atoi(optarg); break; case '?': usage(); /* NOTREACHED */ } } allflag = df_allflag; if (proto == NULL) proto = df_proto; if (provider == NULL) provider = df_provider; /* * Conflict options error messages. */ if (opt_cnt > 1) { (void) fprintf(stderr, "\nConflicting options, only one of " "the following options can be specified\n" "on the command line:\n" "\t-a\n" "\t-p protocol\n" "\t-t transport\n\n"); usage(); } if (proto != NULL && strncasecmp(proto, NC_UDP, strlen(NC_UDP)) == 0) { if (nfs_server_vers_max == NFS_V4) { if (nfs_server_vers_min == NFS_V4) { fprintf(stderr, "NFS version 4 is not supported " "with the UDP protocol. Exiting\n"); exit(3); } else { fprintf(stderr, "NFS version 4 is not supported " "with the UDP protocol.\n"); } } } /* * If there is exactly one more argument, it is the number of * servers. */ if (optind == ac - 1) { maxservers = atoi(av[optind]); maxservers_set = 1; } /* * If there are two or more arguments, then this is a usage error. */ else if (optind < ac - 1) usage(); /* * Check the ranges for min/max version specified */ else if ((nfs_server_vers_min > nfs_server_vers_max) || (nfs_server_vers_min < NFS_VERSMIN) || (nfs_server_vers_max > NFS_VERSMAX)) usage(); /* * There are no additional arguments, and we haven't set maxservers * explicitly via the config file, we use a default number of * servers. We will log this. */ else if (maxservers_set == 0) logmaxservers = 1; /* * Basic Sanity checks on options * * max_conns_allowed must be positive, except for the special * value of -1 which is used internally to mean unlimited, -1 isn't * documented but we allow it anyway. * * maxservers must be positive * listen_backlog must be positive or zero */ if (((max_conns_allowed != -1) && (max_conns_allowed <= 0)) || (listen_backlog < 0) || (maxservers <= 0)) { usage(); } /* * Set current dir to server root */ if (chdir(dir) < 0) { (void) fprintf(stderr, "%s: ", MyName); perror(dir); exit(1); } #ifndef DEBUG pipe_fd = daemonize_init(); #endif openlog(MyName, LOG_PID | LOG_NDELAY, LOG_DAEMON); /* * establish our lock on the lock file and write our pid to it. * exit if some other process holds the lock, or if there's any * error in writing/locking the file. */ pid = _enter_daemon_lock(NFSD); switch (pid) { case 0: break; case -1: fprintf(stderr, "error locking for %s: %s\n", NFSD, strerror(errno)); exit(2); default: /* daemon was already running */ exit(0); } /* * If we've been given a list of paths to be used for distributed * stable storage, and provided we're going to run a version * that supports it, setup the DSS paths. */ if (dss_pathnames != NULL && nfs_server_vers_max >= DSS_VERSMIN) { if (dss_init(dss_npaths, dss_pathnames) != 0) { fprintf(stderr, "%s", "dss_init failed. Exiting.\n"); exit(1); } } /* * Block all signals till we spawn other * threads. */ (void) sigfillset(&sgset); (void) thr_sigsetmask(SIG_BLOCK, &sgset, NULL); if (logmaxservers) { fprintf(stderr, "Number of servers not specified. Using default of %d.\n", maxservers); } /* * Make sure to unregister any previous versions in case the * user is reconfiguring the server in interesting ways. */ svc_unreg(NFS_PROGRAM, NFS_VERSION); svc_unreg(NFS_PROGRAM, NFS_V3); svc_unreg(NFS_PROGRAM, NFS_V4); svc_unreg(NFS_ACL_PROGRAM, NFS_ACL_V2); svc_unreg(NFS_ACL_PROGRAM, NFS_ACL_V3); /* * Set up kernel RPC thread pool for the NFS server. */ if (nfssvcpool(maxservers)) { fprintf(stderr, "Can't set up kernel NFS service: %s. " "Exiting.\n", strerror(errno)); exit(1); } /* * Set up blocked thread to do LWP creation on behalf of the kernel. */ if (svcwait(NFS_SVCPOOL_ID)) { fprintf(stderr, "Can't set up NFS pool creator: %s. Exiting.\n", strerror(errno)); exit(1); } /* * RDMA start and stop thread. * Per pool RDMA listener creation and * destructor thread. * * start rdma services and block in the kernel. * (only if proto or provider is not set to TCP or UDP) */ if ((proto == NULL) && (provider == NULL)) { if (svcrdma(NFS_SVCPOOL_ID, nfs_server_vers_min, nfs_server_vers_max, nfs_server_delegation)) { fprintf(stderr, "Can't set up RDMA creator thread : %s\n", strerror(errno)); } } /* * Now open up for signal delivery */ (void) thr_sigsetmask(SIG_UNBLOCK, &sgset, NULL); sigset(SIGTERM, sigflush); sigset(SIGUSR1, quiesce); /* * Build a protocol block list for registration. */ protobp0 = protobp = (struct protob *)malloc(sizeof (struct protob)); protobp->serv = "NFS"; protobp->versmin = nfs_server_vers_min; protobp->versmax = nfs_server_vers_max; protobp->program = NFS_PROGRAM; protobp->next = (struct protob *)malloc(sizeof (struct protob)); protobp = protobp->next; protobp->serv = "NFS_ACL"; /* not used */ protobp->versmin = nfs_server_vers_min; /* XXX - this needs work to get the version just right */ protobp->versmax = (nfs_server_vers_max > NFS_ACL_V3) ? NFS_ACL_V3 : nfs_server_vers_max; protobp->program = NFS_ACL_PROGRAM; protobp->next = (struct protob *)NULL; if (allflag) { if (do_all(protobp0, nfssvc) == -1) { fprintf(stderr, "setnetconfig failed : %s\n", strerror(errno)); exit(1); } } else if (proto) { /* there's more than one match for the same protocol */ struct netconfig *nconf; NCONF_HANDLE *nc; bool_t protoFound = FALSE; if ((nc = setnetconfig()) == (NCONF_HANDLE *) NULL) { fprintf(stderr, "setnetconfig failed : %s\n", strerror(errno)); goto done; } while (nconf = getnetconfig(nc)) { if (strcmp(nconf->nc_proto, proto) == 0) { protoFound = TRUE; do_one(nconf->nc_device, NULL, protobp0, nfssvc); } } (void) endnetconfig(nc); if (protoFound == FALSE) { fprintf(stderr, "couldn't find netconfig entry for protocol %s\n", proto); } } else if (provider) do_one(provider, proto, protobp0, nfssvc); else { for (providerp = defaultproviders; *providerp != NULL; providerp++) { provider = *providerp; do_one(provider, NULL, protobp0, nfssvc); } } done: free(protobp); free(protobp0); if (num_fds == 0) { fprintf(stderr, "Could not start NFS service for any protocol." " Exiting.\n"); exit(1); } end_listen_fds = num_fds; /* * nfsd is up and running as far as we are concerned. */ daemonize_fini(pipe_fd); /* * Get rid of unneeded privileges. */ __fini_daemon_priv(PRIV_PROC_FORK, PRIV_PROC_EXEC, PRIV_PROC_SESSION, PRIV_FILE_LINK_ANY, PRIV_PROC_INFO, (char *)NULL); /* * Poll for non-data control events on the transport descriptors. */ poll_for_action(); /* * If we get here, something failed in poll_for_action(). */ return (1); }
int main(int argc, char **argv) { dsvcd_datastore_t **ds_table; dsvc_datastore_t dd; dsvc_synchtype_t synchtype; char **modules; unsigned int i, j; int debug_level = 0; boolean_t is_daemon = B_TRUE; boolean_t is_verbose = B_FALSE; int sig, nmodules, nsynchmods, c; sigset_t sigset; char signame[SIG2STR_MAX]; char *progname; void *stackbase; unsigned int stacksize = 16 * 1024; struct rlimit rl; (void) setlocale(LC_ALL, ""); (void) textdomain(TEXT_DOMAIN); /* * Mask all signals except SIGABRT; doing this here ensures that * all threads created through door_create() have them masked too. */ (void) sigfillset(&sigset); (void) sigdelset(&sigset, SIGABRT); (void) thr_sigsetmask(SIG_BLOCK, &sigset, NULL); /* * Figure out our program name; just keep the final piece so that * our dhcpmsg() messages don't get too long. */ progname = strrchr(argv[0], '/'); if (progname != NULL) progname++; else progname = argv[0]; /* * Set the door thread creation procedure so that all of our * threads are created with thread stacks with backing store. */ (void) door_server_create(doorserv_create); while ((c = getopt(argc, argv, "d:fv")) != EOF) { switch (c) { case 'd': debug_level = atoi(optarg); break; case 'f': is_daemon = B_FALSE; break; case 'v': is_verbose = B_TRUE; break; case '?': (void) fprintf(stderr, gettext("usage: %s [-dn] [-f] [-v]\n"), progname); return (EXIT_FAILURE); default: break; } } if (geteuid() != 0) { dhcpmsg_init(progname, B_FALSE, is_verbose, debug_level); dhcpmsg(MSG_ERROR, "must be super-user"); dhcpmsg_fini(); return (EXIT_FAILURE); } if (is_daemon && daemonize() == 0) { dhcpmsg_init(progname, B_FALSE, is_verbose, debug_level); dhcpmsg(MSG_ERROR, "cannot become daemon, exiting"); dhcpmsg_fini(); return (EXIT_FAILURE); } dhcpmsg_init(progname, is_daemon, is_verbose, debug_level); (void) atexit(dhcpmsg_fini); /* * Max out the number available descriptors since we need to * allocate two per held lock. */ rl.rlim_cur = RLIM_INFINITY; rl.rlim_max = RLIM_INFINITY; if (setrlimit(RLIMIT_NOFILE, &rl) == -1) dhcpmsg(MSG_ERR, "setrlimit failed"); (void) enable_extended_FILE_stdio(-1, -1); if (enumerate_dd(&modules, &nmodules) != DSVC_SUCCESS) { dhcpmsg(MSG_ERROR, "cannot enumerate public modules, exiting"); return (EXIT_FAILURE); } /* * NOTE: this code assumes that a module that needs dsvclockd will * always need it (even as the container version is ramped). If * this becomes bogus in a future release, we'll have to make this * logic more sophisticated. */ nsynchmods = nmodules; for (i = 0; i < nmodules; i++) { dd.d_resource = modules[i]; dd.d_conver = DSVC_CUR_CONVER; dd.d_location = ""; if (module_synchtype(&dd, &synchtype) != DSVC_SUCCESS) { dhcpmsg(MSG_WARNING, "cannot determine synchronization " "type for `%s', skipping", modules[i]); free(modules[i]); modules[i] = NULL; nsynchmods--; continue; } if ((synchtype & DSVC_SYNCH_STRATMASK) != DSVC_SYNCH_DSVCD) { free(modules[i]); modules[i] = NULL; nsynchmods--; } } if (nsynchmods == 0) { dhcpmsg(MSG_INFO, "no public modules need synchronization"); return (EXIT_SUCCESS); } /* * Allocate the datastore table; include one extra entry so that * the table is NULL-terminated. */ ds_table = calloc(nsynchmods + 1, sizeof (dsvcd_datastore_t *)); if (ds_table == NULL) { dhcpmsg(MSG_ERR, "cannot allocate datastore table, exiting"); return (EXIT_FAILURE); } ds_table[nsynchmods] = NULL; /* * Create the datastores (which implicitly creates the doors). * then sit around and wait for requests to come in on the doors. */ for (i = 0, j = 0; i < nmodules; i++) { if (modules[i] != NULL) { ds_table[j] = ds_create(modules[i], svc_lock); if (ds_table[j] == NULL) { while (j-- > 0) ds_destroy(ds_table[j]); return (EXIT_FAILURE); } free(modules[i]); j++; } } free(modules); stackbase = stack_create(&stacksize); if (stackbase == NULL) dhcpmsg(MSG_ERR, "cannot create reaper stack; containers " "will not be reaped"); else { errno = thr_create(stackbase, stacksize, reaper, ds_table, THR_DAEMON, NULL); if (errno != 0) { dhcpmsg(MSG_ERR, "cannot create reaper thread; " "containers will not be reaped"); stack_destroy(stackbase, stacksize); } } /* * Synchronously wait for a QUIT, TERM, or INT, then shutdown. */ (void) sigemptyset(&sigset); (void) sigaddset(&sigset, SIGQUIT); (void) sigaddset(&sigset, SIGTERM); (void) sigaddset(&sigset, SIGINT); (void) sigwait(&sigset, &sig); if (sig != SIGTERM && sig != SIGQUIT && sig != SIGINT) dhcpmsg(MSG_WARNING, "received unexpected signal"); if (sig2str(sig, signame) == -1) (void) strlcpy(signame, "???", sizeof (signame)); dhcpmsg(MSG_INFO, "shutting down via SIG%s", signame); for (i = 0; i < nsynchmods; i++) ds_destroy(ds_table[i]); return (EXIT_SUCCESS); }
static bool_t clnt_msk_control(CLIENT *cl, u_int request, void *info) { struct cm_data *cm = CM_DATA((struct cx_data *) cl->cl_private); sigset_t mask; bool_t result = TRUE; thr_sigsetmask(SIG_SETMASK, (sigset_t *) 0, &mask); /* XXX */ vc_fd_lock_c(cl, &mask); switch (request) { case CLSET_FD_CLOSE: cm->cm_closeit = TRUE; result = TRUE; goto unlock; case CLSET_FD_NCLOSE: cm->cm_closeit = FALSE; result = TRUE; goto unlock; } /* for other requests which use info */ if (info == NULL) { result = FALSE; goto unlock; } switch (request) { case CLSET_TIMEOUT: if (time_not_ok((struct timeval *)info)) { result = FALSE; goto unlock; } cm->cm_total = *(struct timeval *)info; break; case CLGET_TIMEOUT: *(struct timeval *)info = cm->cm_total; break; case CLSET_RETRY_TIMEOUT: if (time_not_ok((struct timeval *)info)) { result = FALSE; goto unlock; } cm->cm_wait = *(struct timeval *)info; break; case CLGET_RETRY_TIMEOUT: *(struct timeval *)info = cm->cm_wait; break; case CLGET_FD: *(msk_trans_t **)info = cm->trans; break; case CLGET_XID: /* * use the knowledge that xid is the * first element in the call structure *. * This will get the xid of the PREVIOUS call */ *(u_int32_t *)info = cm->call_msg.rm_xid - 1; break; case CLSET_XID: /* This will set the xid of the NEXT call */ cm->call_msg.rm_xid = *(u_int32_t *)info; break; case CLGET_VERS: *(u_int32_t *)info = cm->call_msg.rm_call.cb_vers; break; case CLSET_VERS: cm->call_msg.rm_call.cb_vers = *(u_int32_t *)info; break; case CLGET_PROG: *(u_int32_t *)info = cm->call_msg.rm_call.cb_prog; break; case CLSET_PROG: cm->call_msg.rm_call.cb_prog = *(u_int32_t *)info; break; case CLSET_ASYNC: //FIXME cm->cm_async = *(int *)info; break; case CLSET_CONNECT: //FIXMEcm->cm_connect = *(int *)info; break; default: break; } unlock: vc_fd_unlock_c(cl, &mask); return (result); }
static enum clnt_stat clnt_vc_call( CLIENT *h, rpcproc_t proc, xdrproc_t xdr_args, const char *args_ptr, xdrproc_t xdr_results, caddr_t results_ptr, struct timeval timeout ) { struct ct_data *ct; XDR *xdrs; struct rpc_msg reply_msg; u_int32_t x_id; u_int32_t *msg_x_id; bool_t shipnow; int refreshes = 2; #ifdef _REENTRANT sigset_t mask, newmask; #endif _DIAGASSERT(h != NULL); ct = (struct ct_data *) h->cl_private; #ifdef _REENTRANT __clnt_sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); mutex_lock(&clnt_fd_lock); while (vc_fd_locks[ct->ct_fd]) cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); vc_fd_locks[ct->ct_fd] = __rpc_lock_value; mutex_unlock(&clnt_fd_lock); #endif xdrs = &(ct->ct_xdrs); msg_x_id = &ct->ct_u.ct_mcalli; if (!ct->ct_waitset) { if (time_not_ok(&timeout) == FALSE) ct->ct_wait = timeout; } shipnow = (xdr_results == NULL && timeout.tv_sec == 0 && timeout.tv_usec == 0) ? FALSE : TRUE; call_again: xdrs->x_op = XDR_ENCODE; ct->ct_error.re_status = RPC_SUCCESS; x_id = ntohl(--(*msg_x_id)); if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) || (! XDR_PUTINT32(xdrs, (int32_t *)&proc)) || (! AUTH_MARSHALL(h->cl_auth, xdrs)) || (! (*xdr_args)(xdrs, __UNCONST(args_ptr)))) { if (ct->ct_error.re_status == RPC_SUCCESS) ct->ct_error.re_status = RPC_CANTENCODEARGS; (void)xdrrec_endofrecord(xdrs, TRUE); release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } if (! xdrrec_endofrecord(xdrs, shipnow)) { release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status = RPC_CANTSEND); } if (! shipnow) { release_fd_lock(ct->ct_fd, mask); return (RPC_SUCCESS); } /* * Hack to provide rpc-based message passing */ if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { release_fd_lock(ct->ct_fd, mask); return(ct->ct_error.re_status = RPC_TIMEDOUT); } /* * Keep receiving until we get a valid transaction id */ xdrs->x_op = XDR_DECODE; for (;;) { reply_msg.acpted_rply.ar_verf = _null_auth; reply_msg.acpted_rply.ar_results.where = NULL; reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; if (! xdrrec_skiprecord(xdrs)) { release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } /* now decode and validate the response header */ if (! xdr_replymsg(xdrs, &reply_msg)) { if (ct->ct_error.re_status == RPC_SUCCESS) continue; release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } if (reply_msg.rm_xid == x_id) break; } /* * process header */ _seterr_reply(&reply_msg, &(ct->ct_error)); if (ct->ct_error.re_status == RPC_SUCCESS) { if (! AUTH_VALIDATE(h->cl_auth, &reply_msg.acpted_rply.ar_verf)) { ct->ct_error.re_status = RPC_AUTHERROR; ct->ct_error.re_why = AUTH_INVALIDRESP; } else if (! (*xdr_results)(xdrs, results_ptr)) { if (ct->ct_error.re_status == RPC_SUCCESS) ct->ct_error.re_status = RPC_CANTDECODERES; } /* free verifier ... */ if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { xdrs->x_op = XDR_FREE; (void)xdr_opaque_auth(xdrs, &(reply_msg.acpted_rply.ar_verf)); } } /* end successful completion */ else { /* maybe our credentials need to be refreshed ... */ if (refreshes-- && AUTH_REFRESH(h->cl_auth)) goto call_again; } /* end of unsuccessful completion */ release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); }
int _tx_open(const char *path, int flags, struct t_info *info, int api_semantics) { int retval, fd, sv_errno; int sv_terrno; int sv_errno_global; struct _ti_user *tiptr; sigset_t mask; int t_create_first_attempt = 1; int ticap_ioctl_failed = 0; if (!(flags & O_RDWR)) { t_errno = TBADFLAG; return (-1); } sv_errno_global = errno; sv_terrno = t_errno; retry: if ((fd = open(path, flags)) < 0) { t_errno = TSYSERR; if (_T_IS_XTI(api_semantics) && errno == ENOENT) /* XTI only */ t_errno = TBADNAME; return (-1); } /* * is module already pushed */ do { retval = ioctl(fd, I_FIND, "timod"); } while (retval < 0 && errno == EINTR); if (retval < 0) { sv_errno = errno; t_errno = TSYSERR; (void) close(fd); errno = sv_errno; return (-1); } if (retval == 0) { /* * "timod" not already on stream, then push it */ do { /* * Assumes (correctly) that I_PUSH is * atomic w.r.t signals (EINTR error) */ retval = ioctl(fd, I_PUSH, "timod"); } while (retval < 0 && errno == EINTR); if (retval < 0) { int sv_errno = errno; t_errno = TSYSERR; (void) close(fd); errno = sv_errno; return (-1); } } /* * _t_create() requires that all signals be blocked. * Note that sig_mutex_lock() only defers signals, it does not * block them, so interruptible syscalls could still get EINTR. */ (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask); sig_mutex_lock(&_ti_userlock); /* * Call to _t_create may fail either because transport doesn't * understand T_CAPABILITY_REQ or for some other reason. It is nearly * impossible to distinguish between these cases so it is implicitly * assumed that it is always save to close and reopen the same stream * and that open/close doesn't have side effects. _t_create may fail * only once if its' failure is caused by unimplemented * T_CAPABILITY_REQ. */ tiptr = _t_create(fd, info, api_semantics, &ticap_ioctl_failed); if (tiptr == NULL) { /* * If _t_create failed due to fail of ti_capability_req we may * try to reopen the stream in the hope that timod will emulate * TI_CAPABILITY and it will succeed when called again. */ if (t_create_first_attempt == 1 && ticap_ioctl_failed == 1) { t_create_first_attempt = 0; (void) close(fd); errno = sv_errno_global; t_errno = sv_terrno; sig_mutex_unlock(&_ti_userlock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); goto retry; } else { int sv_errno = errno; (void) close(fd); sig_mutex_unlock(&_ti_userlock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); errno = sv_errno; return (-1); } } /* * _t_create synchronizes state witk kernel timod and * already sets it to T_UNBND - what it needs to be * be on T_OPEN event. No _T_TX_NEXTSTATE needed here. */ sig_mutex_unlock(&_ti_userlock); (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL); do { retval = ioctl(fd, I_FLUSH, FLUSHRW); } while (retval < 0 && errno == EINTR); /* * We ignore other error cases (retval < 0) - assumption is * that I_FLUSH failures is temporary (e.g. ENOSR) or * otherwise benign failure on a this newly opened file * descriptor and not a critical failure. */ return (fd); }
/* * Create a client handle for a connection. * Default options are set, which the user can change using clnt_control()'s. * The rpc/vc package does buffering similar to stdio, so the client * must pick send and receive buffer sizes, 0 => use the default. * NB: fd is copied into a private area. * NB: The rpch->cl_auth is set null authentication. Caller may wish to * set this something more useful. * * fd should be an open socket */ CLIENT * clnt_vc_create( int fd, const struct netbuf *raddr, rpcprog_t prog, rpcvers_t vers, u_int sendsz, u_int recvsz ) { CLIENT *h; struct ct_data *ct = NULL; struct rpc_msg call_msg; #ifdef _REENTRANT sigset_t mask; #endif sigset_t newmask; struct sockaddr_storage ss; socklen_t slen; struct __rpc_sockinfo si; _DIAGASSERT(raddr != NULL); h = mem_alloc(sizeof(*h)); if (h == NULL) { warnx("clnt_vc_create: out of memory"); rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto fooy; } ct = mem_alloc(sizeof(*ct)); if (ct == NULL) { warnx("clnt_vc_create: out of memory"); rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto fooy; } __clnt_sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); #ifdef _REENTRANT mutex_lock(&clnt_fd_lock); if (vc_fd_locks == NULL) { size_t cv_allocsz, fd_allocsz; int dtbsize = __rpc_dtbsize(); fd_allocsz = dtbsize * sizeof (int); vc_fd_locks = mem_alloc(fd_allocsz); if (vc_fd_locks == NULL) { goto blooy; } else memset(vc_fd_locks, '\0', fd_allocsz); _DIAGASSERT(vc_cv == NULL); cv_allocsz = dtbsize * sizeof (cond_t); vc_cv = mem_alloc(cv_allocsz); if (vc_cv == NULL) { mem_free(vc_fd_locks, fd_allocsz); vc_fd_locks = NULL; goto blooy; } else { int i; for (i = 0; i < dtbsize; i++) cond_init(&vc_cv[i], 0, (void *) 0); } } else _DIAGASSERT(vc_cv != NULL); #endif /* * XXX - fvdl connecting while holding a mutex? */ slen = sizeof ss; if (getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) { if (errno != ENOTCONN) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto blooy; } if (connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){ rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto blooy; } } mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); if (!__rpc_fd2sockinfo(fd, &si)) goto fooy; ct->ct_closeit = FALSE; /* * Set up private data struct */ ct->ct_fd = fd; ct->ct_wait.tv_usec = 0; ct->ct_waitset = FALSE; ct->ct_addr.buf = malloc((size_t)raddr->maxlen); if (ct->ct_addr.buf == NULL) goto fooy; memcpy(ct->ct_addr.buf, raddr->buf, (size_t)raddr->len); ct->ct_addr.len = raddr->len; ct->ct_addr.maxlen = raddr->maxlen; /* * Initialize call message */ call_msg.rm_xid = __RPC_GETXID(); call_msg.rm_direction = CALL; call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; call_msg.rm_call.cb_prog = (u_int32_t)prog; call_msg.rm_call.cb_vers = (u_int32_t)vers; /* * pre-serialize the static part of the call msg and stash it away */ xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE, XDR_ENCODE); if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) { if (ct->ct_closeit) { (void)close(fd); } goto fooy; } ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs)); XDR_DESTROY(&(ct->ct_xdrs)); /* * Create a client handle which uses xdrrec for serialization * and authnone for authentication. */ h->cl_ops = clnt_vc_ops(); h->cl_private = ct; h->cl_auth = authnone_create(); sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, h->cl_private, read_vc, write_vc); return (h); blooy: mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); fooy: /* * Something goofed, free stuff and barf */ if (ct) mem_free(ct, sizeof(struct ct_data)); if (h) mem_free(h, sizeof(CLIENT)); return (NULL); }
static bool clnt_rdma_control(CLIENT *cl, u_int request, void *info) { struct cm_data *cm = CM_DATA((struct cx_data *) cl->cl_p1); sigset_t mask; bool result = TRUE; thr_sigsetmask(SIG_SETMASK, (sigset_t *) 0, &mask); /* XXX */ /* always take recv lock first if taking together */ rpc_dplx_rlc(cl); //receive lock clnt rpc_dplx_slc(cl); //send lock clnt switch (request) { case CLSET_FD_CLOSE: cm->cm_closeit = TRUE; result = TRUE; goto unlock; case CLSET_FD_NCLOSE: cm->cm_closeit = FALSE; result = TRUE; goto unlock; } /* for other requests which use info */ if (info == NULL) { result = FALSE; goto unlock; } switch (request) { case CLSET_TIMEOUT: if (time_not_ok((struct timeval *)info)) { result = FALSE; goto unlock; } cm->cm_total = *(struct timeval *)info; break; case CLGET_TIMEOUT: *(struct timeval *)info = cm->cm_total; break; case CLSET_RETRY_TIMEOUT: if (time_not_ok((struct timeval *)info)) { result = FALSE; goto unlock; } cm->cm_wait = *(struct timeval *)info; break; case CLGET_RETRY_TIMEOUT: *(struct timeval *)info = cm->cm_wait; break; case CLGET_FD: *(RDMAXPRT **)info = cm->cm_xdrs.x_lib[1]; break; case CLGET_XID: /* * use the knowledge that xid is the * first element in the call structure *. * This will get the xid of the PREVIOUS call */ *(u_int32_t *)info = cm->call_msg.rm_xid - 1; break; case CLSET_XID: /* This will set the xid of the NEXT call */ cm->call_msg.rm_xid = *(u_int32_t *)info; break; case CLGET_VERS: *(u_int32_t *)info = cm->call_msg.rm_call.cb_vers; break; case CLSET_VERS: cm->call_msg.rm_call.cb_vers = *(u_int32_t *)info; break; case CLGET_PROG: *(u_int32_t *)info = cm->call_msg.rm_call.cb_prog; break; case CLSET_PROG: cm->call_msg.rm_call.cb_prog = *(u_int32_t *)info; break; case CLSET_ASYNC: //FIXME cm->cm_async = *(int *)info; break; case CLSET_CONNECT: //FIXMEcm->cm_connect = *(int *)info; break; default: break; } unlock: rpc_dplx_ruc(cl); rpc_dplx_suc(cl); return (result); }
/* * Body of the worker thread to log the zfd's stdout and stderr to a log file * and to perform interactive IO to the stdin, stdout and stderr zfd's. * * The stdin, stdout and stderr are from the perspective of the process inside * the zone, so the zoneadmd view is opposite (i.e. we write to the stdin fd * and read from the stdout/stderr fds). */ static void srvr(void *modearg) { zlog_mode_t mode = (zlog_mode_t)modearg; int gzoutfd = -1; int stdinfd = -1; int stdoutfd = -1; sigset_t blockset; int gzerrfd = -1; int stderrfd = -1; if (!shutting_down) { open_logfile(); } /* * This thread should receive SIGHUP so that it can close the log * file, and reopen it, during log rotation. */ sigset(SIGHUP, hup_handler); (void) sigfillset(&blockset); (void) sigdelset(&blockset, SIGHUP); (void) thr_sigsetmask(SIG_BLOCK, &blockset, NULL); if (!shutting_down) { if (pipe(eventstream) != 0) { zerror(zlogp, B_TRUE, "failed to open logger control " "pipe"); return; } } while (!shutting_down) { if (init_server_sock(zlogp, &gzoutfd, "out") == -1) { zerror(zlogp, B_FALSE, "server setup: stdout socket init failed"); goto death; } if (init_server_sock(zlogp, &gzerrfd, "err") == -1) { zerror(zlogp, B_FALSE, "server setup: stderr socket init failed"); goto death; } if (mode == ZLOG_INTERACTIVE) { if ((stdinfd = open_fd(zlogp, 0, O_RDWR)) == -1) { goto death; } stdoutfd = stdinfd; } else { if ((stdinfd = open_fd(zlogp, 0, O_WRONLY)) == -1 || (stdoutfd = open_fd(zlogp, 1, O_RDONLY)) == -1 || (stderrfd = open_fd(zlogp, 2, O_RDONLY)) == -1) { goto death; } } do_zfd_io(gzoutfd, gzerrfd, stdinfd, stdoutfd, stderrfd); death: destroy_server_sock(gzoutfd, "out"); destroy_server_sock(gzerrfd, "err"); (void) close(stdinfd); if (mode != ZLOG_INTERACTIVE) { (void) close(stdoutfd); (void) close(stderrfd); } } (void) close(eventstream[0]); eventstream[0] = -1; (void) close(eventstream[1]); eventstream[1] = -1; (void) close(logfd); }
int main(int argc, char **argv) { int c; pid_t pid; extern char *optarg; sigset_t mask; struct sigaction act; (void) setlocale(LC_ALL, ""); #ifndef TEXT_DOMAIN #define TEXT_DOMAIN "SYS_TEST" #endif (void) textdomain(TEXT_DOMAIN); if ((prog = strrchr(argv[0], '/')) == NULL) { prog = argv[0]; } else { prog++; } (void) enable_extended_FILE_stdio(-1, -1); /* * process arguments */ if (argc > 3) { usage(); } while ((c = getopt(argc, argv, "d:t:")) != EOF) { switch (c) { case 'd': debug_level = atoi(optarg); break; case 't': idle_timeout = atoi(optarg); break; case '?': default: usage(); /*NOTREACHED*/ } } /* * Check permission */ if (getuid() != 0) { (void) fprintf(stderr, gettext("Must be root to run %s\n"), prog); exit(EPERM); } /* * When rcm_daemon is started by a call to librcm, it inherits file * descriptors from the DR initiator making a call. The file * descriptors may correspond to devices that can be removed by DR. * Since keeping them remain opened is problematic, close everything * but stdin/stdout/stderr. */ closefrom(3); /* * When rcm_daemon is started by the caller, it will inherit the * signal block mask. We unblock all signals to make sure the * signal handling will work normally. */ (void) sigfillset(&mask); (void) thr_sigsetmask(SIG_UNBLOCK, &mask, NULL); /* * block SIGUSR1, use it for killing specific threads */ (void) sigemptyset(&mask); (void) sigaddset(&mask, SIGUSR1); (void) thr_sigsetmask(SIG_BLOCK, &mask, NULL); /* * Setup signal handlers for SIGHUP and SIGUSR1 * SIGHUP - causes a "delayed" daemon exit, effectively the same * as a daemon restart. * SIGUSR1 - causes a thr_exit(). Unblocked in selected threads. */ act.sa_flags = 0; act.sa_handler = catch_sighup; (void) sigaction(SIGHUP, &act, NULL); act.sa_handler = catch_sigusr1; (void) sigaction(SIGUSR1, &act, NULL); /* * ignore SIGPIPE so that the rcm daemon does not exit when it * attempts to read or write from a pipe whose corresponding * rcm script process exited. */ act.sa_handler = SIG_IGN; (void) sigaction(SIGPIPE, &act, NULL); /* * run in daemon mode */ if (debug_level < DEBUG_LEVEL_FORK) { if (fork()) { exit(0); } detachfromtty(); } /* only one daemon can run at a time */ if ((pid = enter_daemon_lock()) != getpid()) { rcm_log_message(RCM_DEBUG, "%s pid %d already running\n", prog, pid); exit(EDEADLK); } rcm_log_message(RCM_TRACE1, "%s started, debug level = %d\n", prog, debug_level); /* * Set daemon state to block RCM requests before rcm_daemon is * fully initialized. See rcmd_thr_incr(). */ rcmd_set_state(RCMD_INIT); /* * create rcm_daemon door and set permission to 0400 */ if (create_event_service(RCM_SERVICE_DOOR, event_service) == -1) { rcm_log_message(RCM_ERROR, gettext("cannot create door service: %s\n"), strerror(errno)); rcmd_exit(errno); } (void) chmod(RCM_SERVICE_DOOR, S_IRUSR); init_poll_thread(); /* initialize poll thread related data */ /* * Initialize database by asking modules to register. */ rcmd_db_init(); /* * Initialize locking, including lock recovery in the event of * unexpected daemon failure. */ rcmd_lock_init(); /* * Start accepting normal requests */ rcmd_set_state(RCMD_NORMAL); /* * Start cleanup thread */ rcmd_db_clean(); /* * Loop within daemon and return after a period of inactivity. */ rcmd_start_timer(idle_timeout); rcmd_cleanup(0); return (0); }
int main(int argc, char **argv) { extern char *optarg; char *proxystr = NULL; char *proxyhost, *proxyport; int rc, err_code; int sval; int sockfd; int pipefd = -1; int sleeptime = 0; boolean_t quit = B_FALSE; struct addrinfo hints; struct addrinfo *ai = NULL; sigset_t main_ss; while ((rc = getopt(argc, argv, "s:")) != -1) { switch (rc) { case 's': proxystr = optarg; break; case ':': (void) fprintf(stderr, "Option -%c requires operand\n", optopt); usage(); break; case '?': (void) fprintf(stderr, "Unrecognized option -%c\n", optopt); usage(); break; default: break; } } if (proxystr == NULL) { usage(); } proxyhost = strtok(proxystr, ":"); if (proxyhost == NULL) { (void) fprintf(stderr, "host must be of format hostname:port\n"); usage(); } proxyport = strtok(NULL, ":"); if (proxyport == NULL) { (void) fprintf(stderr, "host must be of format hostname:port\n"); usage(); } (void) signal(SIGPIPE, SIG_IGN); if (daemonize_start() < 0) { (void) fprintf(stderr, "Unable to start daemon\n"); exit(EXIT_FAILURE); } /* * Before doing anything else, check to see if it's possible to reach * the proxyd. If not, sit in a loop waiting for a period of time. * If the proxyd doesn't come on-line after waiting, return an error * code that tells smf to enter this service into maintenance mode. */ while ((rc = zp_ping_proxy()) < -1) { (void) sleep(SLEEP_INTERVAL); sleeptime += SLEEP_INTERVAL; if (sleeptime >= SLEEP_DURATION) break; } if (rc == -2) { /* never successfully reached proxy */ (void) fprintf(stderr, "Timed out trying to reach proxy\n"); exit(SMF_EXIT_ERR_FATAL); } else if (rc == -1) { /* got some other error */ exit(EXIT_FAILURE); } (void) memset(&hints, 0, sizeof (struct addrinfo)); hints.ai_flags = AI_ALL; hints.ai_family = PF_UNSPEC; hints.ai_socktype = SOCK_STREAM; if ((err_code = getaddrinfo(proxyhost, proxyport, &hints, &ai)) != 0) { (void) fprintf(stderr, "Unable to perform name lookup\n"); (void) fprintf(stderr, "%s: %s\n", proxyhost, gai_strerror(err_code)); exit(EXIT_FAILURE); } if ((sockfd = socket(ai->ai_family, SOCK_STREAM, 0)) < 0) { perror("socket"); exit(EXIT_FAILURE); } sval = 1; if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *)&sval, sizeof (sval)) < 0) { perror("setsocketopt"); exit(EXIT_FAILURE); } if (bind(sockfd, (struct sockaddr *)ai->ai_addr, ai->ai_addrlen) < 0) { if (errno != EADDRINUSE) { perror("bind"); exit(EXIT_FAILURE); } /* * If the socket is in use, call zoneproxyd and * ask it to un-register the current socket. Then * try again. */ if (zp_unregister_zone() < 0) { exit(EXIT_FAILURE); } if (bind(sockfd, (struct sockaddr *)ai->ai_addr, ai->ai_addrlen) < 0) { perror("bind"); exit(EXIT_FAILURE); } } if (listen(sockfd, 5) < 0) { perror("listen"); exit(EXIT_FAILURE); } if (zp_register_socket(sockfd, &pipefd) < 0) { exit(EXIT_FAILURE); } /* * At this point, the proxyd has a copy of the socket and will answer * all incoming connection requests. Close our refernce to the socket * here. */ (void) close(sockfd); freeaddrinfo(ai); daemonize_ready(0); (void) sigfillset(&main_ss); if (thr_sigsetmask(SIG_BLOCK, &main_ss, NULL) < 0) { perror("thr_sigsetmask"); exit(EXIT_FAILURE); } /* create signal handling thread */ if (thr_create(NULL, 0, (void *(*)(void *))s_handler, NULL, THR_BOUND, NULL) < 0) { perror("thr_create"); exit(EXIT_FAILURE); } drop_privs(); /* Wait for signal to quit */ while (quit == B_FALSE) { struct pollfd pfd[1]; boolean_t unexpected = B_FALSE; char value; /* * Pipe to proxyd notfies client when to quit. If the proxy * writes a byte to the pipe, or the pipe is closed * unexpectedly, POLLIN will be true, telling us to exit. */ pfd[0].fd = pipefd; pfd[0].events = POLLIN; if (poll(pfd, 1, INFTIM) < 0) { if (errno == EINTR) { continue; } perror("poll"); exit(EXIT_FAILURE); } if (pfd[0].revents & POLLIN) { rc = read(pipefd, &value, 1); if (rc < 0) { perror("read"); exit(EXIT_FAILURE); } quit = B_TRUE; if (rc == 0) unexpected = B_TRUE; } else if (pfd[0].revents & (POLLERR | POLLHUP | POLLNVAL)) { quit = B_TRUE; unexpected = B_TRUE; } if (quit && unexpected) { exit(EXIT_DAEMON_TERM); } } return (0); }
/* * Main thread. Sits on the message queue and waits for something to do. * * The transport thread for the ibm will issue a delayed request for * requests supporting delayed requests. Otherwise will issue the request * and wait for response. */ void * transport_thread( void *vxport) { robo_event_t *event; xport_state_t *transport = (xport_state_t *)vxport; struct sigaction sig_action; sigset_t signal_set, full_block_set; sigfillset(&full_block_set); sigemptyset(&signal_set); /* signals to except. */ sigaddset(&signal_set, SIGCHLD); mutex_lock(&transport->mutex); /* wait for go */ mutex_unlock(&transport->mutex); l_mess = transport->library->un->dis_mes[DIS_MES_NORM]; lc_mess = transport->library->un->dis_mes[DIS_MES_CRIT]; thr_sigsetmask(SIG_SETMASK, &full_block_set, NULL); memset(&sig_action, 0, sizeof (struct sigaction)); (void) sigemptyset(&sig_action.sa_mask); sig_action.sa_flags = SA_RESTART; sig_action.sa_handler = SIG_DFL; (void) sigaction(SIGCHLD, &sig_action, NULL); for (;;) { mutex_lock(&transport->list_mutex); if (transport->active_count == 0) cond_wait(&transport->list_condit, &transport->list_mutex); if (transport->active_count == 0) { /* check to make sure */ mutex_unlock(&transport->list_mutex); continue; } event = transport->first; transport->first = unlink_list(event); transport->active_count--; mutex_unlock(&transport->list_mutex); ETRACE((LOG_NOTICE, "EvTr %#x(%#x) - \n", event, (event->type == EVENT_TYPE_MESS) ? event->request.message.command : event->request.internal.command)); event->next = NULL; /* Everyone must take care of disposing of the event */ switch (event->type) { case EVENT_TYPE_INTERNAL: switch (event->request.internal.command) { case ROBOT_INTRL_LOAD_MEDIA: if (transport->library->un->state <= DEV_IDLE) { load(transport->library, event); } else { disp_of_event(transport->library, event, EINVAL); } break; case ROBOT_INTRL_FORCE_MEDIA: force(transport->library, event); break; case ROBOT_INTRL_DISMOUNT_MEDIA: dismount(transport->library, event); break; case ROBOT_INTRL_INIT: init_transport(transport); disp_of_event(transport->library, event, 0); break; case ROBOT_INTRL_VIEW_DATABASE: view(transport->library, event); break; case ROBOT_INTRL_QUERY_DRIVE: query_drv(transport->library, event); break; case ROBOT_INTRL_QUERY_LIBRARY: query_lib(transport->library, event); break; case ROBOT_INTRL_SET_CATEGORY: set_category(transport->library, event); break; case ROBOT_INTRL_SHUTDOWN: transport->thread = (thread_t)- 1; thr_exit((void *)NULL); break; default: disp_of_event(transport->library, event, EINVAL); break; } break; case EVENT_TYPE_MESS: if (event->request.message.magic != MESSAGE_MAGIC) { if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:bad magic: %s:%d.\n", __FILE__, __LINE__); disp_of_event(transport->library, event, EINVAL); break; } switch (event->request.message.command) { default: if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:msq_bad: %s:%d.\n", __FILE__, __LINE__); disp_of_event(transport->library, event, EINVAL); break; } default: if (DBG_LVL(SAM_DBG_DEBUG)) sam_syslog(LOG_DEBUG, "xpt_thr:event_bad: %s:%d.\n", __FILE__, __LINE__); disp_of_event(transport->library, event, EINVAL); break; } } }
int condvarWait(condvar_t *condvar, mutex_t *mutex, thread_state_t wtype) { sigjmp_buf jmpbuf; int err; sys_thread_t *self = sysThreadSelf(); /* * There is no threads interface to get a thread's state. So, instead, * we use this hack so that the debugger agent can get at this thread's * state. Of course, this is not very reliable, but when a thread goes * to sleep, it *will* be reported as sleeping. During the transition * from running to sleep, it may be incorrectly reported, since the * setting of the state here is not atomic with the voluntary sleep. * The better fix is to extend the Solaris threads interface and have * the debugger agent call this interface OR to use libthread_db for * intra-process state reporting. * * Now, condition variables are used either for waiting to enter a * monitor (MONITOR_WAIT) or to execute a "wait()" method when already * holding a monitor (CONDVAR_WAIT). So, when condvarWait() is called * it could be to wait for a monitor or for a condition within a * monitor. This is indicated by the "wtype" argument to condvarWait(). * This type is set in the thread state before going to sleep. */ self->state = wtype; #ifdef __linux__ /* * Register our intrHandler as a cleanup handler. If we get * interrupted (i.e. canceled), we longjmp out of this handler. */ pthread_cleanup_push(intrHandler, NULL); if (setjmp(jmpbuf) == 0) { /* * Set the jmp buf and enable cancellation. */ thr_setspecific(intrJmpbufkey, &jmpbuf); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); /* * Note: pthread_cond_wait is _not_ interruptible on Linux */ #else thr_setspecific(sigusr1Jmpbufkey, &jmpbuf); if (sigsetjmp(jmpbuf, 1) == 0) { sigset_t osigset; thr_sigsetmask(SIG_UNBLOCK, &sigusr1Mask, &osigset); again: #endif err = cond_wait((cond_t *) condvar, (mutex_t *) mutex); switch(err) { case 0: err = SYS_OK; break; #ifndef __linux__ case EINTR: /* Signals other than USR1 were received. */ goto again; #endif default: err = SYS_ERR; } #ifdef __linux__ /* * Disable cancellation and clear the jump buf. */ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); thr_setspecific(intrJmpbufkey, NULL); #else thr_sigsetmask(SIG_SETMASK, &osigset, NULL); #endif } else { /* * we've received a SIGUSR1 to interrupt our wait. We just return * and something above use notices the change. * clear the jump buf just to be paranoid. */ #ifndef __linux__ thr_setspecific(sigusr1Jmpbufkey, NULL); #endif err = SYS_INTRPT; } #ifdef __linux__ pthread_cleanup_pop(0); #endif /* * After having woken up, change the thread state to RUNNABLE, since * it is now runnable. */ self->state = RUNNABLE; return err; } /* * Returns 0 if condition variable became true before timeout expired. * Returns 1 if timeout expired first. * Returns <0 if wait fails for any other reason. */ int condvarTimedWait(condvar_t *condvar, mutex_t *mutex, jlong millis, thread_state_t wtype) { #ifdef __linux__ jmp_buf jmpbuf; #else sigjmp_buf jmpbuf; #endif int err; struct timespec timeout; sys_thread_t *self; jlong end_time; if (millis < 0) return SYS_ERR; if (millis > (jlong)INT_MAX) { return condvarWait(condvar, mutex, wtype); } end_time = sysTimeMillis() + millis; self = sysThreadSelf(); self->state = wtype; #ifdef __linux__ /* * Register our intrHandler as a cleanup handler. If we get * interrupted (i.e. canceled), we longjmp out of this handler. */ pthread_cleanup_push(intrHandler, NULL); if (setjmp(jmpbuf) == 0) { /* * Set the jmp buf and enable cancellation. */ thr_setspecific(intrJmpbufkey, &jmpbuf); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); /* * Calculate an absolute timeout value. */ timeout.tv_sec = end_time / 1000; timeout.tv_nsec = (end_time % 1000) * 1000000; again: #else thr_setspecific(sigusr1Jmpbufkey, &jmpbuf); if (sigsetjmp(jmpbuf, 1) == 0) { sigset_t osigset; thr_sigsetmask(SIG_UNBLOCK, &sigusr1Mask, &osigset); again: timeout.tv_sec = end_time / 1000; timeout.tv_nsec = (end_time % 1000) * 1000000; #endif err = cond_timedwait((cond_t *)condvar, (mutex_t *)mutex, &timeout); switch(err) { case 0: err = SYS_OK; break; case EINTR: /* Signals other than USR1 were received. */ if (sysTimeMillis() < end_time) { goto again; } /*FALLTHRU*/ #ifdef USE_PTHREADS case ETIMEDOUT: #else case ETIME: #endif err = SYS_TIMEOUT; break; default: err = SYS_ERR; } #ifdef __linux__ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); thr_setspecific(intrJmpbufkey, NULL); #else thr_sigsetmask(SIG_SETMASK, &osigset, NULL); #endif } else { /* * we've received a SIGUSR1 to interrupt our wait. We just return * and something above use notices the change. * clear the jump buf just to be paranoid. */ #ifndef __linux__ thr_setspecific(sigusr1Jmpbufkey, NULL); #endif err = SYS_INTRPT; } #ifdef __linux__ /* Remove intrHandler without calling it. */ pthread_cleanup_pop(0); sysAssert(pthread_mutex_trylock(mutex) == EBUSY); /* * After having woken up, change the thread state to RUNNABLE, since * it is now runnable. */ #endif self->state = RUNNABLE; return err; } int condvarSignal(condvar_t *condvar) { int err; err = cond_signal((cond_t *) condvar); condvar->counter++; return (err == 0 ? SYS_OK : SYS_ERR); }