/* * Retry a mount */ static void amfs_retry(int rc, int term, opaque_t arg) { struct continuation *cp = (struct continuation *) arg; am_node *mp = cp->mp; int error = 0; dlog("Commencing retry for mount of %s", mp->am_path); new_ttl(mp); if ((cp->start + ALLOWED_MOUNT_TIME) < clocktime(NULL)) { /* * The entire mount has timed out. Set the error code and skip past all * the mntfs's so that amfs_bgmount will not have any more * ways to try the mount, thus causing an error. */ plog(XLOG_INFO, "mount of \"%s\" has timed out", mp->am_path); error = ETIMEDOUT; while (*cp->mf) cp->mf++; /* explicitly forbid further retries after timeout */ cp->retry = FALSE; } if (error || !IN_PROGRESS(cp)) error = amfs_bgmount(cp); reschedule_timeout_mp(); }
void forcibly_timeout_mp(am_node *mp) { mntfs *mf = mp->am_al->al_mnt; /* * Arrange to timeout this node */ if (mf && ((mp->am_flags & AMF_ROOT) || (mf->mf_flags & (MFF_MOUNTING | MFF_UNMOUNTING)))) { /* * We aren't going to schedule a timeout, so we need to notify the * child here unless we are already unmounting, in which case that * process is responsible for notifying the child. */ if (mf->mf_flags & MFF_UNMOUNTING) plog(XLOG_WARNING, "node %s is currently being unmounted, ignoring timeout request", mp->am_path); else { plog(XLOG_WARNING, "ignoring timeout request for active node %s", mp->am_path); notify_child(mp, AMQ_UMNT_FAILED, EBUSY, 0); } } else { plog(XLOG_INFO, "\"%s\" forcibly timed out", mp->am_path); mp->am_flags &= ~AMF_NOTIMEOUT; mp->am_ttl = clocktime(NULL); /* * Force mtime update of parent dir, to prevent DNLC/dcache from caching * the old entry, which could result in ESTALE errors, bad symlinks, and * more. */ clocktime(&mp->am_parent->am_fattr.na_mtime); reschedule_timeout_mp(); } }
am_node * amfs_generic_mount_child(am_node *new_mp, int *error_return) { int error; struct continuation *cp; /* Continuation structure if need to mount */ dlog("in amfs_generic_mount_child"); *error_return = error = 0; /* Error so far */ /* we have an errorfs attached to the am_node, free it */ if (new_mp->am_al) free_loc(new_mp->am_al); new_mp->am_al = NULL; /* * Construct a continuation */ cp = ALLOC(struct continuation); cp->callout = 0; cp->mp = new_mp; cp->retry = TRUE; cp->start = clocktime(NULL); cp->al = new_mp->am_alarray; /* * Try and mount the file system. If this succeeds immediately (possible * for a ufs file system) then return the attributes, otherwise just * return an error. */ error = amfs_bgmount(cp); reschedule_timeout_mp(); if (!error) return new_mp; /* * Code for quick reply. If current_transp is set, then it's the * transp that's been passed down from nfs_dispatcher() or from * autofs_program_[123](). * If new_mp->am_transp is not already set, set it by copying in * current_transp. Once am_transp is set, nfs_quick_reply() and * autofs_mount_succeeded() can use it to send a reply to the * client that requested this mount. */ if (current_transp && !new_mp->am_transp) { dlog("Saving RPC transport for %s", new_mp->am_path); new_mp->am_transp = (SVCXPRT *) xmalloc(sizeof(SVCXPRT)); *(new_mp->am_transp) = *current_transp; } if (error && new_mp->am_al && new_mp->am_al->al_mnt && (new_mp->am_al->al_mnt->mf_ops == &amfs_error_ops)) new_mp->am_error = error; if (new_mp->am_error > 0) assign_error_mntfs(new_mp); ereturn(error); }
/* * Check that we are not burning resources */ static void checkup(void) { static int max_fd = 0; static char *max_mem = 0; int next_fd = dup(0); caddr_t next_mem = sbrk(0); close(next_fd); if (max_fd < next_fd) { dlog("%d new fds allocated; total is %d", next_fd - max_fd, next_fd); max_fd = next_fd; } if (max_mem < next_mem) { #ifdef HAVE_GETPAGESIZE dlog("%#lx bytes of memory allocated; total is %#lx (%ld pages)", (long) (next_mem - max_mem), (unsigned long) next_mem, ((long) next_mem + getpagesize() - 1) / (long) getpagesize()); #else /* not HAVE_GETPAGESIZE */ dlog("%#lx bytes of memory allocated; total is %#lx", (long) (next_mem - max_mem), (unsigned long) next_mem); #endif /* not HAVE_GETPAGESIZE */ max_mem = next_mem; } } #else /* not DEBUG */ #define checkup() #endif /* not DEBUG */ static int #ifdef HAVE_SIGACTION do_select(sigset_t smask, int fds, fd_set *fdp, struct timeval *tvp) #else /* not HAVE_SIGACTION */ do_select(int smask, int fds, fd_set *fdp, struct timeval *tvp) #endif /* not HAVE_SIGACTION */ { int sig; int nsel; if ((sig = setjmp(select_intr))) { select_intr_valid = 0; /* Got a signal */ switch (sig) { case SIGINT: case SIGTERM: amd_state = Finishing; reschedule_timeout_mp(); break; } nsel = -1; errno = EINTR; } else { select_intr_valid = 1; /* * Invalidate the current clock value */ clock_valid = 0; /* * Allow interrupts. If a signal * occurs, then it will cause a longjmp * up above. */ #ifdef HAVE_SIGACTION sigprocmask(SIG_SETMASK, &smask, NULL); #else /* not HAVE_SIGACTION */ (void) sigsetmask(smask); #endif /* not HAVE_SIGACTION */ /* * Wait for input */ nsel = select(fds, fdp, (fd_set *) 0, (fd_set *) 0, tvp->tv_sec ? tvp : (struct timeval *) 0); } #ifdef HAVE_SIGACTION sigprocmask(SIG_BLOCK, &masked_sigs, NULL); #else /* not HAVE_SIGACTION */ (void) sigblock(MASKED_SIGS); #endif /* not HAVE_SIGACTION */ /* * Perhaps reload the cache? */ if (do_mapc_reload < clocktime()) { mapc_reload(); do_mapc_reload = clocktime() + gopt.map_reload_interval; } return nsel; }
static void free_map_if_success(int rc, int term, opaque_t arg) { am_node *mp = (am_node *) arg; mntfs *mf = mp->am_mnt; wchan_t wchan = get_mntfs_wchan(mf); /* * Not unmounting any more */ mf->mf_flags &= ~MFF_UNMOUNTING; /* * If a timeout was deferred because the underlying filesystem * was busy then arrange for a timeout as soon as possible. */ if (mf->mf_flags & MFF_WANTTIMO) { mf->mf_flags &= ~MFF_WANTTIMO; reschedule_timeout_mp(); } if (term) { plog(XLOG_ERROR, "unmount for %s got signal %d", mp->am_path, term); #if defined(DEBUG) && defined(SIGTRAP) /* * dbx likes to put a trap on exit(). * Pretend it succeeded for now... */ if (term == SIGTRAP) { am_unmounted(mp); } #endif /* DEBUG */ #ifdef HAVE_FS_AUTOFS if (mp->am_flags & AMF_AUTOFS) autofs_umount_failed(mp); #endif /* HAVE_FS_AUTOFS */ amd_stats.d_uerr++; } else if (rc) { if (mf->mf_ops == &amfs_program_ops || rc == EBUSY) plog(XLOG_STATS, "\"%s\" on %s still active", mp->am_path, mf->mf_mount); else plog(XLOG_ERROR, "%s: unmount: %s", mp->am_path, strerror(rc)); #ifdef HAVE_FS_AUTOFS if (mf->mf_flags & MFF_IS_AUTOFS) autofs_get_mp(mp); if (mp->am_flags & AMF_AUTOFS) autofs_umount_failed(mp); #endif /* HAVE_FS_AUTOFS */ amd_stats.d_uerr++; } else { am_unmounted(mp); } /* * Wakeup anything waiting for this unmount */ wakeup(wchan); }
/* * Timeout all nodes waiting on * a given Fserver. */ void map_flush_srvr(fserver *fs) { int i; int done = 0; for (i = last_used_map; i >= 0; --i) { am_node *mp = exported_ap[i]; if (mp && mp->am_mnt && mp->am_mnt->mf_server == fs) { plog(XLOG_INFO, "Flushed %s; dependent on %s", mp->am_path, fs->fs_host); mp->am_ttl = clocktime(NULL); done = 1; } } if (done) reschedule_timeout_mp(); }
void forcibly_timeout_mp(am_node *mp) { mntfs *mf = mp->am_mnt; /* * Arrange to timeout this node */ if (mf && ((mp->am_flags & AMF_ROOT) || (mf->mf_flags & (MFF_MOUNTING | MFF_UNMOUNTING)))) { if (!(mf->mf_flags & MFF_UNMOUNTING)) plog(XLOG_WARNING, "ignoring timeout request for active node %s", mp->am_path); } else { plog(XLOG_INFO, "\"%s\" forcibly timed out", mp->am_path); mp->am_flags &= ~AMF_NOTIMEOUT; mp->am_ttl = clocktime(); reschedule_timeout_mp(); } }
/* * Retry a mount */ static void amfs_retry(int rc, int term, opaque_t arg) { struct continuation *cp = (struct continuation *) arg; am_node *mp = cp->mp; int error = 0; dlog("Commencing retry for mount of %s", mp->am_path); new_ttl(mp); if ((cp->start + ALLOWED_MOUNT_TIME) < clocktime(NULL)) { /* * The entire mount has timed out. Set the error code and skip past all * the mntfs's so that amfs_bgmount will not have any more * ways to try the mount, thus causing an error. */ plog(XLOG_INFO, "mount of \"%s\" has timed out", mp->am_path); error = ETIMEDOUT; while (*cp->al) cp->al++; /* explicitly forbid further retries after timeout */ cp->retry = FALSE; } if (error || !IN_PROGRESS(cp)) error = amfs_bgmount(cp); else /* Normally it's amfs_bgmount() which frees the continuation. However, if * the mount is already in progress and we're in amfs_retry() for another * node we don't try mounting the filesystem once again. Still, we have * to free the continuation as we won't get called again and thus would * leak the continuation structure and our am_loc references. */ free_continuation(cp); reschedule_timeout_mp(); }
/* * The continuation function. This is called by * the task notifier when a background mount attempt * completes. */ static void amfs_cont(int rc, int term, opaque_t arg) { struct continuation *cp = (struct continuation *) arg; am_node *mp = cp->mp; mntfs *mf = mp->am_al->al_mnt; dlog("amfs_cont: '%s'", mp->am_path); /* * Definitely not trying to mount at the moment */ mf->mf_flags &= ~MFF_MOUNTING; /* * While we are mounting - try to avoid race conditions */ new_ttl(mp); /* * Wakeup anything waiting for this mount */ wakeup(get_mntfs_wchan(mf)); /* * Check for termination signal or exit status... */ if (rc || term) { #ifdef HAVE_FS_AUTOFS if (mf->mf_flags & MFF_IS_AUTOFS && !(mf->mf_flags & MFF_MOUNTED)) autofs_release_fh(mp); #endif /* HAVE_FS_AUTOFS */ if (term) { /* * Not sure what to do for an error code. */ mf->mf_error = EIO; /* XXX ? */ mf->mf_flags |= MFF_ERROR; plog(XLOG_ERROR, "mount for %s got signal %d", mp->am_path, term); } else { /* * Check for exit status... */ #ifdef __linux__ /* * HACK ALERT! * * On Linux (and maybe not only) it's possible to run * an amd which "knows" how to mount certain combinations * of nfs_proto/nfs_version which the kernel doesn't grok. * So if we got an EINVAL and we have a server that's not * using NFSv2/UDP, try again with NFSv2/UDP. * * Too bad that there is no way to dynamically determine * what combinations the _client_ supports, as opposed to * what the _server_ supports... */ if (rc == EINVAL && mf->mf_server && (mf->mf_server->fs_version != 2 || !STREQ(mf->mf_server->fs_proto, "udp"))) mf->mf_flags |= MFF_NFS_SCALEDOWN; else #endif /* __linux__ */ { mf->mf_error = rc; mf->mf_flags |= MFF_ERROR; errno = rc; /* XXX */ if (!STREQ(mp->am_al->al_mnt->mf_ops->fs_type, "linkx")) plog(XLOG_ERROR, "%s: mount (amfs_cont): %m", mp->am_path); } } if (!(mf->mf_flags & MFF_NFS_SCALEDOWN)) { /* * If we get here then that attempt didn't work, so * move the info vector pointer along by one and * call the background mount routine again */ amd_stats.d_merr++; cp->al++; } amfs_bgmount(cp); if (mp->am_error > 0) assign_error_mntfs(mp); } else { /* * The mount worked. */ dlog("Mounting %s returned success", cp->mp->am_path); am_mounted(cp->mp); free_continuation(cp); } reschedule_timeout_mp(); }
static serv_state run_rpc(void) { #ifdef HAVE_SIGACTION sigset_t smask; sigprocmask(SIG_BLOCK, &masked_sigs, &smask); #else /* not HAVE_SIGACTION */ int smask = sigblock(MASKED_SIGS); #endif /* not HAVE_SIGACTION */ next_softclock = clocktime(); amd_state = Run; /* * Keep on trucking while we are in Run mode. This state * is switched to Quit after all the file systems have * been unmounted. */ while ((int) amd_state <= (int) Finishing) { struct timeval tvv; int nsel; time_t now; #ifdef HAVE_SVC_GETREQSET fd_set readfds; memmove(&readfds, &svc_fdset, sizeof(svc_fdset)); FD_SET(fwd_sock, &readfds); #else /* not HAVE_SVC_GETREQSET */ # ifdef FD_SET fd_set readfds; FD_ZERO(&readfds); readfds.fds_bits[0] = svc_fds; FD_SET(fwd_sock, &readfds); # else /* not FD_SET */ int readfds = svc_fds | (1 << fwd_sock); # endif /* not FD_SET */ #endif /* not HAVE_SVC_GETREQSET */ checkup(); /* * If the full timeout code is not called, * then recompute the time delta manually. */ now = clocktime(); if (next_softclock <= now) { if (amd_state == Finishing) umount_exported(); tvv.tv_sec = softclock(); } else { tvv.tv_sec = next_softclock - now; } tvv.tv_usec = 0; if (amd_state == Finishing && last_used_map < 0) { flush_mntfs(); amd_state = Quit; break; } #ifdef HAVE_FS_AUTOFS autofs_add_fdset(&readfds); #endif /* HAVE_FS_AUTOFS */ if (tvv.tv_sec <= 0) tvv.tv_sec = SELECT_MAXWAIT; if (tvv.tv_sec) { dlog("Select waits for %ds", (int) tvv.tv_sec); } else { dlog("Select waits for Godot"); } nsel = do_select(smask, FD_SETSIZE, &readfds, &tvv); switch (nsel) { case -1: if (errno == EINTR) { dlog("select interrupted"); continue; } plog(XLOG_ERROR, "select: %m"); break; case 0: break; default: /* * Read all pending NFS responses at once to avoid having responses * queue up as a consequence of retransmissions. */ #ifdef FD_SET if (FD_ISSET(fwd_sock, &readfds)) { FD_CLR(fwd_sock, &readfds); #else /* not FD_SET */ if (readfds & (1 << fwd_sock)) { readfds &= ~(1 << fwd_sock); #endif /* not FD_SET */ --nsel; do { fwd_reply(); } while (rpc_pending_now() > 0); } #ifdef HAVE_FS_AUTOFS if (nsel) nsel = autofs_handle_fdset(&readfds, nsel); #endif /* HAVE_FS_AUTOFS */ if (nsel) { /* * Anything left must be a normal * RPC request. */ #ifdef HAVE_SVC_GETREQSET svc_getreqset(&readfds); #else /* not HAVE_SVC_GETREQSET */ # ifdef FD_SET svc_getreq(readfds.fds_bits[0]); # else /* not FD_SET */ svc_getreq(readfds); # endif /* not FD_SET */ #endif /* not HAVE_SVC_GETREQSET */ } break; } } #ifdef HAVE_SIGACTION sigprocmask(SIG_SETMASK, &smask, NULL); #else /* not HAVE_SIGACTION */ (void) sigsetmask(smask); #endif /* not HAVE_SIGACTION */ if (amd_state == Quit) amd_state = Done; return amd_state; } int mount_automounter(int ppid) { /* * Old code replaced by rpc-trash patch. * Erez Zadok <*****@*****.**> int so = socket(AF_INET, SOCK_DGRAM, 0); */ SVCXPRT *udp_amqp = NULL, *tcp_amqp = NULL; int nmount, ret; int soNFS; int udp_soAMQ, tcp_soAMQ; struct netconfig *udp_amqncp, *tcp_amqncp; /* * Create the nfs service for amd */ ret = create_nfs_service(&soNFS, &nfs_port, &nfsxprt, nfs_program_2); if (ret != 0) return ret; /* security: if user sets -D noamq, don't even create listening socket */ amuDebug(D_AMQ) { ret = create_amq_service(&udp_soAMQ, &udp_amqp, &udp_amqncp, &tcp_soAMQ, &tcp_amqp, &tcp_amqncp); if (ret != 0) return ret; } #ifdef HAVE_FS_AUTOFS if (amd_use_autofs) { /* * Create the autofs service for amd. */ ret = create_autofs_service(); /* if autofs service fails it is OK if using a test amd */ if (ret != 0) { plog(XLOG_WARNING, "autofs service registration failed, turning off autofs support"); amd_use_autofs = 0; } } #endif /* HAVE_FS_AUTOFS */ /* * Start RPC forwarding */ if (fwd_init() != 0) return 3; /* * Construct the root automount node */ make_root_node(); /* * Pick up the pieces from a previous run * This is likely to (indirectly) need the rpc_fwd package * so it *must* come after the call to fwd_init(). */ if (gopt.flags & CFM_RESTART_EXISTING_MOUNTS) restart(); /* * Mount the top-level auto-mountpoints */ nmount = mount_exported(); /* * Now safe to tell parent that we are up and running */ if (ppid) kill(ppid, SIGQUIT); if (nmount == 0) { plog(XLOG_FATAL, "No work to do - quitting"); amd_state = Done; return 0; } #ifdef DEBUG amuDebug(D_AMQ) #endif /* DEBUG */ { /* * Complete registration of amq (first TCP service then UDP) */ unregister_amq(); ret = amu_svc_register(tcp_amqp, get_amd_program_number(), AMQ_VERSION, amq_program_1, IPPROTO_TCP, tcp_amqncp); if (ret != 1) { plog(XLOG_FATAL, "unable to register (AMQ_PROGRAM=%d, AMQ_VERSION, tcp)", get_amd_program_number()); return 3; } ret = amu_svc_register(udp_amqp, get_amd_program_number(), AMQ_VERSION, amq_program_1, IPPROTO_UDP, udp_amqncp); if (ret != 1) { plog(XLOG_FATAL, "unable to register (AMQ_PROGRAM=%d, AMQ_VERSION, udp)", get_amd_program_number()); return 4; } } /* * Start timeout_mp rolling */ reschedule_timeout_mp(); /* * Start the server */ if (run_rpc() != Done) { plog(XLOG_FATAL, "run_rpc failed"); amd_state = Done; } return 0; }
int mount_automounter(int ppid) { /* * Old code replaced by rpc-trash patch. * Erez Zadok <*****@*****.**> int so = socket(AF_INET, SOCK_DGRAM, 0); */ SVCXPRT *udp_amqp = NULL, *tcp_amqp = NULL; int nmount, ret; int soNFS; int udp_soAMQ, tcp_soAMQ; struct netconfig *udp_amqncp, *tcp_amqncp; /* * This must be done first, because it attempts to bind * to various UDP ports and we don't want anything else * potentially taking over those ports before we get a chance * to reserve them. */ if (gopt.flags & CFM_RESTART_EXISTING_MOUNTS) restart_automounter_nodes(); /* * Start RPC forwarding */ if (fwd_init() != 0) return 3; /* * Construct the root automount node */ make_root_node(); /* * Pick up the pieces from a previous run * This is likely to (indirectly) need the rpc_fwd package * so it *must* come after the call to fwd_init(). */ if (gopt.flags & CFM_RESTART_EXISTING_MOUNTS) restart(); /* * Create the nfs service for amd * If nfs_port is already initialized, it means we * already created the service during restart_automounter_nodes(). */ if (nfs_port == 0) { ret = create_nfs_service(&soNFS, &nfs_port, &nfsxprt, nfs_program_2); if (ret != 0) return ret; } xsnprintf(pid_fsname, sizeof(pid_fsname), "%s:(pid%ld,port%u)", am_get_hostname(), (long) am_mypid, nfs_port); /* security: if user sets -D noamq, don't even create listening socket */ if (amuDebug(D_AMQ)) { ret = create_amq_service(&udp_soAMQ, &udp_amqp, &udp_amqncp, &tcp_soAMQ, &tcp_amqp, &tcp_amqncp, gopt.preferred_amq_port); if (ret != 0) return ret; } #ifdef HAVE_FS_AUTOFS if (amd_use_autofs) { /* * Create the autofs service for amd. */ ret = create_autofs_service(); /* if autofs service fails it is OK if using a test amd */ if (ret != 0) { plog(XLOG_WARNING, "autofs service registration failed, turning off autofs support"); amd_use_autofs = 0; } } #endif /* HAVE_FS_AUTOFS */ /* * Mount the top-level auto-mountpoints */ nmount = mount_exported(); /* * Now safe to tell parent that we are up and running */ if (ppid) kill(ppid, SIGQUIT); if (nmount == 0) { plog(XLOG_FATAL, "No work to do - quitting"); amd_state = Done; return 0; } if (amuDebug(D_AMQ)) { /* * Complete registration of amq (first TCP service then UDP) */ int tcp_ok = 0, udp_ok = 0; unregister_amq(); /* unregister leftover Amd, if any, just in case */ tcp_ok = amu_svc_register(tcp_amqp, get_amd_program_number(), AMQ_VERSION, amq_program_1, IPPROTO_TCP, tcp_amqncp); if (!tcp_ok) plog(XLOG_FATAL, "unable to register (AMQ_PROGRAM=%lu, AMQ_VERSION, tcp)", get_amd_program_number()); udp_ok = amu_svc_register(udp_amqp, get_amd_program_number(), AMQ_VERSION, amq_program_1, IPPROTO_UDP, udp_amqncp); if (!udp_ok) plog(XLOG_FATAL, "unable to register (AMQ_PROGRAM=%lu, AMQ_VERSION, udp)", get_amd_program_number()); /* return error only if both failed */ if (!tcp_ok && !udp_ok) { amd_state = Done; return 3; } } /* * Start timeout_mp rolling */ reschedule_timeout_mp(); /* * Start the server */ if (run_rpc() != Done) { plog(XLOG_FATAL, "run_rpc failed"); amd_state = Done; } return 0; }