示例#1
0
static void
in6_rtqtimo(void *rock)
{
	struct radix_node_head *rnh = rock;
	struct rtqk_arg arg;
	struct timeval atv;
	static time_t last_adjusted_timeout = 0;

	arg.found = arg.killed = 0;
	arg.rnh = rnh;
	arg.nextstop = time_second + rtq_timeout;
	arg.draining = arg.updating = 0;
	RADIX_NODE_HEAD_LOCK(rnh);
	rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
	RADIX_NODE_HEAD_UNLOCK(rnh);

	/*
	 * Attempt to be somewhat dynamic about this:
	 * If there are ``too many'' routes sitting around taking up space,
	 * then crank down the timeout, and see if we can't make some more
	 * go away.  However, we make sure that we will never adjust more
	 * than once in rtq_timeout seconds, to keep from cranking down too
	 * hard.
	 */
	if ((arg.found - arg.killed > rtq_toomany)
	   && (time_second - last_adjusted_timeout >= rtq_timeout)
	   && rtq_reallyold > rtq_minreallyold) {
		rtq_reallyold = 2*rtq_reallyold / 3;
		if (rtq_reallyold < rtq_minreallyold) {
			rtq_reallyold = rtq_minreallyold;
		}

		last_adjusted_timeout = time_second;
#ifdef DIAGNOSTIC
		log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d",
		    rtq_reallyold);
#endif
		arg.found = arg.killed = 0;
		arg.updating = 1;
		RADIX_NODE_HEAD_LOCK(rnh);
		rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
		RADIX_NODE_HEAD_UNLOCK(rnh);
	}

	atv.tv_usec = 0;
	atv.tv_sec = arg.nextstop;
	callout_reset(&rtq_timer, tvtohz(&atv), in6_rtqtimo, rock);
}
示例#2
0
void
in_rtqdrain(void)
{
	VNET_ITERATOR_DECL(vnet_iter);
	struct radix_node_head *rnh;
	struct rtqk_arg arg;
	int 	fibnum;

	VNET_LIST_RLOCK_NOSLEEP();
	VNET_FOREACH(vnet_iter) {
		CURVNET_SET(vnet_iter);

		for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
			rnh = rt_tables_get_rnh(fibnum, AF_INET);
			arg.found = arg.killed = 0;
			arg.rnh = rnh;
			arg.nextstop = 0;
			arg.draining = 1;
			arg.updating = 0;
			RADIX_NODE_HEAD_LOCK(rnh);
			rnh->rnh_walktree(rnh, in_rtqkill, &arg);
			RADIX_NODE_HEAD_UNLOCK(rnh);
		}
		CURVNET_RESTORE();
	}
	VNET_LIST_RUNLOCK_NOSLEEP();
}
示例#3
0
static void
in_rtqtimo_one(void *rock)
{
	struct radix_node_head *rnh = rock;
	struct rtqk_arg arg;
	static time_t last_adjusted_timeout = 0;

	arg.found = arg.killed = 0;
	arg.rnh = rnh;
	arg.nextstop = time_uptime + V_rtq_timeout;
	arg.draining = arg.updating = 0;
	RADIX_NODE_HEAD_LOCK(rnh);
	rnh->rnh_walktree(rnh, in_rtqkill, &arg);
	RADIX_NODE_HEAD_UNLOCK(rnh);

	/*
	 * Attempt to be somewhat dynamic about this:
	 * If there are ``too many'' routes sitting around taking up space,
	 * then crank down the timeout, and see if we can't make some more
	 * go away.  However, we make sure that we will never adjust more
	 * than once in rtq_timeout seconds, to keep from cranking down too
	 * hard.
	 */
	if ((arg.found - arg.killed > V_rtq_toomany) &&
	    (time_uptime - last_adjusted_timeout >= V_rtq_timeout) &&
	    V_rtq_reallyold > V_rtq_minreallyold) {
		V_rtq_reallyold = 2 * V_rtq_reallyold / 3;
		if (V_rtq_reallyold < V_rtq_minreallyold) {
			V_rtq_reallyold = V_rtq_minreallyold;
		}

		last_adjusted_timeout = time_uptime;
#ifdef DIAGNOSTIC
		log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n",
		    V_rtq_reallyold);
#endif
		arg.found = arg.killed = 0;
		arg.updating = 1;
		RADIX_NODE_HEAD_LOCK(rnh);
		rnh->rnh_walktree(rnh, in_rtqkill, &arg);
		RADIX_NODE_HEAD_UNLOCK(rnh);
	}

}
示例#4
0
void
in6_rtqdrain()
{
	struct radix_node_head *rnh = rt_tables[AF_INET6];
	struct rtqk_arg arg;

	arg.found = arg.killed = 0;
	arg.rnh = rnh;
	arg.nextstop = 0;
	arg.draining = 1;
	arg.updating = 0;
	RADIX_NODE_HEAD_LOCK(rnh);
	rnh->rnh_walktree(rnh, in6_rtqkill, &arg);
	RADIX_NODE_HEAD_UNLOCK(rnh);
}
示例#5
0
void
in_rtqdrain(void)
{
	struct radix_node_head *rnh;
	struct rtqk_arg arg;
	int 	fibnum;

	for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) {
		rnh = rt_tables_get_rnh(fibnum, AF_INET);
		arg.found = arg.killed = 0;
		arg.rnh = rnh;
		arg.nextstop = 0;
		arg.draining = 1;
		arg.updating = 0;
		RADIX_NODE_HEAD_LOCK(rnh);
		rnh->rnh_walktree(rnh, in_rtqkill, &arg);
		RADIX_NODE_HEAD_UNLOCK(rnh);
	}
}
示例#6
0
static void
in6_mtutimo(void *rock)
{
	struct radix_node_head *rnh = rock;
	struct mtuex_arg arg;
	struct timeval atv;

	arg.rnh = rnh;
	arg.nextstop = time_second + MTUTIMO_DEFAULT;
	RADIX_NODE_HEAD_LOCK(rnh);
	rnh->rnh_walktree(rnh, in6_mtuexpire, &arg);
	RADIX_NODE_HEAD_UNLOCK(rnh);

	atv.tv_usec = 0;
	atv.tv_sec = arg.nextstop;
	if (atv.tv_sec < time_second) {
		printf("invalid mtu expiration time on routing table\n");
		arg.nextstop = time_second + 30;	/* last resort */
	}
	callout_reset(&rtq_mtutimer, tvtohz(&atv), in6_mtutimo, rock);
}
示例#7
0
/*
 * Free the net address hash lists that are hanging off the mount points.
 */
static void
vfs_free_addrlist(struct netexport *nep)
{
	int i;
	struct radix_node_head *rnh;
	struct ucred *cred;

	for (i = 0; i <= AF_MAX; i++) {
		if ((rnh = nep->ne_rtable[i])) {
			RADIX_NODE_HEAD_LOCK(rnh);
			(*rnh->rnh_walktree) (rnh, vfs_free_netcred, rnh);
			RADIX_NODE_HEAD_UNLOCK(rnh);
			RADIX_NODE_HEAD_DESTROY(rnh);
			free(rnh, M_RTABLE);
			nep->ne_rtable[i] = NULL;	/* not SMP safe XXX */
		}
	}
	cred = nep->ne_defexported.netc_anon;
	if (cred != NULL)
		crfree(cred);

}
示例#8
0
/*
 * Build hash lists of net addresses and hang them off the mount point.
 * Called by vfs_export() to set up the lists of export addresses.
 */
static int
vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
    struct export_args *argp)
{
	register struct netcred *np;
	register struct radix_node_head *rnh;
	register int i;
	struct radix_node *rn;
	struct sockaddr *saddr, *smask = 0;
	struct domain *dom;
	int error;

	/*
	 * XXX: This routine converts from a `struct xucred'
	 * (argp->ex_anon) to a `struct ucred' (np->netc_anon).  This
	 * operation is questionable; for example, what should be done
	 * with fields like cr_uidinfo and cr_prison?  Currently, this
	 * routine does not touch them (leaves them as NULL).
	 */
	if (argp->ex_anon.cr_version != XUCRED_VERSION) {
		vfs_mount_error(mp, "ex_anon.cr_version: %d != %d",
		    argp->ex_anon.cr_version, XUCRED_VERSION);
		return (EINVAL);
	}

	if (argp->ex_addrlen == 0) {
		if (mp->mnt_flag & MNT_DEFEXPORTED) {
			vfs_mount_error(mp,
			    "MNT_DEFEXPORTED already set for mount %p", mp);
			return (EPERM);
		}
		np = &nep->ne_defexported;
		np->netc_exflags = argp->ex_flags;
		np->netc_anon = crget();
		np->netc_anon->cr_uid = argp->ex_anon.cr_uid;
		crsetgroups(np->netc_anon, argp->ex_anon.cr_ngroups,
		    argp->ex_anon.cr_groups);
		np->netc_anon->cr_prison = &prison0;
		prison_hold(np->netc_anon->cr_prison);
		np->netc_numsecflavors = argp->ex_numsecflavors;
		bcopy(argp->ex_secflavors, np->netc_secflavors,
		    sizeof(np->netc_secflavors));
		MNT_ILOCK(mp);
		mp->mnt_flag |= MNT_DEFEXPORTED;
		MNT_IUNLOCK(mp);
		return (0);
	}

#if MSIZE <= 256
	if (argp->ex_addrlen > MLEN) {
		vfs_mount_error(mp, "ex_addrlen %d is greater than %d",
		    argp->ex_addrlen, MLEN);
		return (EINVAL);
	}
#endif

	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK | M_ZERO);
	saddr = (struct sockaddr *) (np + 1);
	if ((error = copyin(argp->ex_addr, saddr, argp->ex_addrlen)))
		goto out;
	if (saddr->sa_family == AF_UNSPEC || saddr->sa_family > AF_MAX) {
		error = EINVAL;
		vfs_mount_error(mp, "Invalid saddr->sa_family: %d");
		goto out;
	}
	if (saddr->sa_len > argp->ex_addrlen)
		saddr->sa_len = argp->ex_addrlen;
	if (argp->ex_masklen) {
		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
		if (error)
			goto out;
		if (smask->sa_len > argp->ex_masklen)
			smask->sa_len = argp->ex_masklen;
	}
	i = saddr->sa_family;
	if ((rnh = nep->ne_rtable[i]) == NULL) {
		/*
		 * Seems silly to initialize every AF when most are not used,
		 * do so on demand here
		 */
		for (dom = domains; dom; dom = dom->dom_next) {
			KASSERT(((i == AF_INET) || (i == AF_INET6)), 
			    ("unexpected protocol in vfs_hang_addrlist"));
			if (dom->dom_family == i && dom->dom_rtattach) {
				/*
				 * XXX MRT 
				 * The INET and INET6 domains know the
				 * offset already. We don't need to send it
				 * So we just use it as a flag to say that
				 * we are or are not setting up a real routing
				 * table. Only IP and IPV6 need have this
				 * be 0 so all other protocols can stay the 
				 * same (ABI compatible).
				 */ 
				dom->dom_rtattach(
				    (void **) &nep->ne_rtable[i], 0);
				break;
			}
		}
		if ((rnh = nep->ne_rtable[i]) == NULL) {
			error = ENOBUFS;
			vfs_mount_error(mp, "%s %s %d",
			    "Unable to initialize radix node head ",
			    "for address family", i);
			goto out;
		}
	}
	RADIX_NODE_HEAD_LOCK(rnh);
	rn = (*rnh->rnh_addaddr)(saddr, smask, rnh, np->netc_rnodes);
	RADIX_NODE_HEAD_UNLOCK(rnh);
	if (rn == NULL || np != (struct netcred *)rn) {	/* already exists */
		error = EPERM;
		vfs_mount_error(mp, "Invalid radix node head, rn: %p %p",
		    rn, np);
		goto out;
	}
	np->netc_exflags = argp->ex_flags;
	np->netc_anon = crget();
	np->netc_anon->cr_uid = argp->ex_anon.cr_uid;
	crsetgroups(np->netc_anon, argp->ex_anon.cr_ngroups,
	    argp->ex_anon.cr_groups);
	np->netc_anon->cr_prison = &prison0;
	prison_hold(np->netc_anon->cr_prison);
	np->netc_numsecflavors = argp->ex_numsecflavors;
	bcopy(argp->ex_secflavors, np->netc_secflavors,
	    sizeof(np->netc_secflavors));
	return (0);
out:
	free(np, M_NETADDR);
	return (error);
}