Esempio n. 1
0
static int
autofs_unmount_1_req(struct umntrequest *ul,
                     struct umntres *res,
                     struct authunix_parms *cred,
                     SVCXPRT *transp)
{
    int mapno, err;
    am_node *mp = NULL;

    dlog("UNMOUNT REQUEST: dev=%lx rdev=%lx %s",
         (u_long) ul->devid,
         (u_long) ul->rdevid,
         ul->isdirect ? "direct" : "indirect");

    /* by default, and if not found, succeed */
    res->status = 0;

    for (mapno = 0; ; mapno++) {
        mp = get_exported_ap(mapno);
        if (!mp)
            break;
        if (mp->am_dev == ul->devid &&
                (ul->rdevid == 0 || mp->am_rdev == ul->rdevid))
            break;
    }

    if (mp) {
        /* save RPC context */
        if (!mp->am_transp && transp) {
            mp->am_transp = (SVCXPRT *) xmalloc(sizeof(SVCXPRT));
            *(mp->am_transp) = *transp;
        }

        mapno = mp->am_mapno;
        err = unmount_mp(mp);

        if (err)
            /* backgrounded, don't reply yet */
            return 1;

        if (get_exported_ap(mapno))
            /* unmounting failed, tell the kernel */
            res->status = 1;
    }

    dlog("UNMOUNT REPLY: status=%d", res->status);
    return 0;
}
Esempio n. 2
0
/*
 * Return the entire tree of mount nodes
 */
amq_mount_tree_list *
amqproc_export_1_svc(voidp argp, struct svc_req *rqstp)
{
  static amq_mount_tree_list aml;
  static am_node *mp;

  mp = get_exported_ap(0);
  aml.amq_mount_tree_list_val = (amq_mount_tree_p *) ((void *) &mp);
  aml.amq_mount_tree_list_len = 1;	/* XXX */

  return &aml;
}
Esempio n. 3
0
/*
 * Convert from file handle to automount node.
 */
static am_node *
fh_to_mp3(am_nfs_fh *fhp, int *rp, int vop)
{
  struct am_fh *fp = (struct am_fh *) fhp;
  am_node *ap = NULL;

  if (fp->u.s.fhh_type != 0) {
    /* New filehandle type */
    int len = sizeof(*fhp) - sizeof(fp->fhh_gen);
    char *path = xmalloc(len+1);
    /*
     * Because fhp is treated as a filehandle we use memcpy
     * instead of xstrlcpy.
     */
    memcpy(path, (char *) fp->u.fhh_path, len);
    path[len] = '\0';
    /* dlog("fh_to_mp3: new filehandle: %s", path); */

    ap = path_to_exported_ap(path);
    XFREE(path);
  } else {
    /* dlog("fh_to_mp3: old filehandle: %d", fp->u.s.fhh_id); */
    /*
     * Check process id matches
     * If it doesn't then it is probably
     * from an old kernel-cached filehandle
     * which is now out of date.
     */
    if (fp->u.s.fhh_pid != get_server_pid()) {
      dlog("fh_to_mp3: wrong pid %ld != my pid %ld",
	   (long) fp->u.s.fhh_pid, get_server_pid());
      goto drop;
    }

    /*
     * Get hold of the supposed mount node
     */
    ap = get_exported_ap(fp->u.s.fhh_id);
  }

  /*
   * Check the generation number in the node
   * matches the one from the kernel.  If not
   * then the old node has been timed out and
   * a new one allocated.
   */
  if (ap != NULL && ap->am_gen != fp->fhh_gen)
    ap = NULL;

  /*
   * If it doesn't exists then drop the request
   */
  if (!ap)
    goto drop;

#if 0
  /*
   * If the node is hung then locate a new node
   * for it.  This implements the replicated filesystem
   * retries.
   */
  if (ap->am_al->al_mnt && FSRV_ISDOWN(ap->am_al->al_mnt->mf_server) && ap->am_parent) {
    int error;
    am_node *orig_ap = ap;

    dlog("fh_to_mp3: %s (%s) is hung: lookup alternative file server",
	 orig_ap->am_path, orig_ap->am_al->al_mnt->mf_info);

    /*
     * Update modify time of parent node.
     * With any luck the kernel will re-stat
     * the child node and get new information.
     */
    clocktime(&orig_ap->am_fattr.na_mtime);

    /*
     * Call the parent's lookup routine for an object
     * with the same name.  This may return -1 in error
     * if a mount is in progress.  In any case, if no
     * mount node is returned the error code is propagated
     * to the caller.
     */
    if (vop == VLOOK_CREATE) {
      ap = orig_ap->am_parent->am_al->al_mnt->mf_ops->lookup_child(orig_ap->am_parent, orig_ap->am_name, &error, vop);
      if (ap && error < 0)
	ap = orig_ap->am_parent->am_al->al_mnt->mf_ops->mount_child(ap, &error);
    } else {
      ap = NULL;
      error = ESTALE;
    }
    if (ap == 0) {
      if (error < 0 && amd_state == Finishing)
	error = ENOENT;
      *rp = error;
      return 0;
    }

    /*
     * Update last access to original node.  This
     * avoids timing it out and so sending ESTALE
     * back to the kernel.
     * XXX - Not sure we need this anymore (jsp, 90/10/6).
     */
    new_ttl(orig_ap);

  }
#endif /* 0 */

  /*
   * Disallow references to objects being unmounted, unless
   * they are automount points.
   */
  if (ap->am_al->al_mnt && (ap->am_al->al_mnt->mf_flags & MFF_UNMOUNTING) &&
      !(ap->am_flags & AMF_ROOT)) {
    if (amd_state == Finishing)
      *rp = ENOENT;
    else
      *rp = -1;
    return 0;
  }
  new_ttl(ap);

drop:
  if (!ap || !ap->am_al->al_mnt) {
    /*
     * If we are shutting down then it is likely
     * that this node has disappeared because of
     * a fast timeout.  To avoid things thrashing
     * just pretend it doesn't exist at all.  If
     * ESTALE is returned, some NFS clients just
     * keep retrying (stupid or what - if it's
     * stale now, what's it going to be in 5 minutes?)
     */
    if (amd_state == Finishing)
      *rp = ENOENT;
    else {
      *rp = ESTALE;
      amd_stats.d_stale++;
    }
  }

  return ap;
}
Esempio n. 4
0
static serv_state
run_rpc(void)
{
#ifdef HAVE_SIGACTION
  sigset_t smask;
  sigprocmask(SIG_BLOCK, &masked_sigs, &smask);
#else /* not HAVE_SIGACTION */
  int smask = sigblock(MASKED_SIGS);
#endif /* not HAVE_SIGACTION */

  next_softclock = clocktime(NULL);

  amd_state = Run;

  /*
   * Keep on trucking while we are in Run mode.  This state
   * is switched to Quit after all the file systems have
   * been unmounted.
   */
  while ((int) amd_state <= (int) Finishing) {
    struct timeval tvv;
    int nsel;
    time_t now;
    fd_set readfds;

#ifdef HAVE_SVC_GETREQSET
    memmove(&readfds, &svc_fdset, sizeof(svc_fdset));
#else /* not HAVE_SVC_GETREQSET */
    FD_ZERO(&readfds);
# ifdef HAVE_FD_SET_FDS_BITS
    readfds.fds_bits[0] = svc_fds;
# else /* not HAVE_FD_SET_FDS_BITS */
    readfds = svc_fds;
# endif  /* not HAVE_FD_SET_FDS_BITS */
#endif /* not HAVE_SVC_GETREQSET */
    FD_SET(fwd_sock, &readfds);

    checkup();

    /*
     * If the full timeout code is not called,
     * then recompute the time delta manually.
     */
    now = clocktime(NULL);

    if (next_softclock <= now) {
      if (amd_state == Finishing)
	umount_exported();
      tvv.tv_sec = softclock();
    } else {
      tvv.tv_sec = next_softclock - now;
    }
    tvv.tv_usec = 0;

    if (amd_state == Finishing && get_exported_ap(0) == NULL) {
      flush_mntfs();
      amd_state = Quit;
      break;
    }

#ifdef HAVE_FS_AUTOFS
    autofs_add_fdset(&readfds);
#endif /* HAVE_FS_AUTOFS */

    if (tvv.tv_sec <= 0)
      tvv.tv_sec = SELECT_MAXWAIT;
    if (tvv.tv_sec) {
      dlog("Select waits for %ds", (int) tvv.tv_sec);
    } else {
      dlog("Select waits for Godot");
    }

    nsel = do_select(smask, FD_SETSIZE, &readfds, &tvv);

    switch (nsel) {
    case -1:
      if (errno == EINTR) {
	dlog("select interrupted");
	continue;
      }
      plog(XLOG_ERROR, "select: %m");
      break;

    case 0:
      break;

    default:
      /*
       * Read all pending NFS responses at once to avoid having responses
       * queue up as a consequence of retransmissions.
       */
      if (FD_ISSET(fwd_sock, &readfds)) {
	FD_CLR(fwd_sock, &readfds);
	--nsel;
	do {
	  fwd_reply();
	} while (rpc_pending_now() > 0);
      }

#ifdef HAVE_FS_AUTOFS
      if (nsel)
	nsel = autofs_handle_fdset(&readfds, nsel);
#endif /* HAVE_FS_AUTOFS */

      if (nsel) {
	/*
	 * Anything left must be a normal
	 * RPC request.
	 */
#ifdef HAVE_SVC_GETREQSET
	svc_getreqset(&readfds);
#else /* not HAVE_SVC_GETREQSET */
# ifdef HAVE_FD_SET_FDS_BITS
	svc_getreq(readfds.fds_bits[0]);
# else /* not HAVE_FD_SET_FDS_BITS */
	svc_getreq(readfds);
# endif /* not HAVE_FD_SET_FDS_BITS */
#endif /* not HAVE_SVC_GETREQSET */
      }
      break;
    }
  }

#ifdef HAVE_SIGACTION
  sigprocmask(SIG_SETMASK, &smask, NULL);
#else /* not HAVE_SIGACTION */
  (void) sigsetmask(smask);
#endif /* not HAVE_SIGACTION */

  if (amd_state == Quit)
    amd_state = Done;

  return amd_state;
}