Пример #1
0
void
tunable_quad_init(void *data)
{
	struct tunable_quad *d = (struct tunable_quad *)data;

	TUNABLE_QUAD_FETCH(d->path, d->var);
}
Пример #2
0
/*
 * Boot time overrides that are not scaled against main memory
 */
void
init_param1(void)
{
    // hax!
    ticks = softticks = 1;

    //global_hz[get_stack_id()] = 100; //HZ; // Haxing time
    //the above is already set up

	TUNABLE_INT_FETCH("kern.hz", &hz);
	tick = 1000000 / hz;

#ifdef VM_SWZONE_SIZE_MAX
	maxswzone = VM_SWZONE_SIZE_MAX;
#endif
	TUNABLE_INT_FETCH("kern.maxswzone", &maxswzone);
#ifdef VM_BCACHE_SIZE_MAX
	maxbcache = VM_BCACHE_SIZE_MAX;
#endif
	TUNABLE_INT_FETCH("kern.maxbcache", &maxbcache);

	maxtsiz = MAXTSIZ;
	TUNABLE_QUAD_FETCH("kern.maxtsiz", &maxtsiz);
	dfldsiz = DFLDSIZ;
	TUNABLE_QUAD_FETCH("kern.dfldsiz", &dfldsiz);
	maxdsiz = MAXDSIZ;
	TUNABLE_QUAD_FETCH("kern.maxdsiz", &maxdsiz);
	dflssiz = DFLSSIZ;
	TUNABLE_QUAD_FETCH("kern.dflssiz", &dflssiz);
	maxssiz = MAXSSIZ;
	TUNABLE_QUAD_FETCH("kern.maxssiz", &maxssiz);
	sgrowsiz = SGROWSIZ;
	TUNABLE_QUAD_FETCH("kern.sgrowsiz", &sgrowsiz);
}
/*
 * Boot time overrides that are not scaled against main memory
 */
void
init_param1(void)
{
	hz = HZ;
	TUNABLE_INT_FETCH("kern.hz", &hz);
	stathz = hz * 128 / 100;
	profhz = stathz;
	ustick = 1000000 / hz;
	nstick = 1000000000 / hz;
	/* can adjust 30ms in 60s */
	ntp_default_tick_delta = howmany(30000000, 60 * hz);

#ifdef VM_SWZONE_SIZE_MAX
	maxswzone = VM_SWZONE_SIZE_MAX;
#endif
	TUNABLE_LONG_FETCH("kern.maxswzone", &maxswzone);
#ifdef VM_BCACHE_SIZE_MAX
	maxbcache = VM_BCACHE_SIZE_MAX;
#endif
	TUNABLE_LONG_FETCH("kern.maxbcache", &maxbcache);
	maxtsiz = MAXTSIZ;
	TUNABLE_QUAD_FETCH("kern.maxtsiz", &maxtsiz);
	dfldsiz = DFLDSIZ;
	TUNABLE_QUAD_FETCH("kern.dfldsiz", &dfldsiz);
	maxdsiz = MAXDSIZ;
	TUNABLE_QUAD_FETCH("kern.maxdsiz", &maxdsiz);
	dflssiz = DFLSSIZ;
	TUNABLE_QUAD_FETCH("kern.dflssiz", &dflssiz);
	maxssiz = MAXSSIZ;
	TUNABLE_QUAD_FETCH("kern.maxssiz", &maxssiz);
	sgrowsiz = SGROWSIZ;
	TUNABLE_QUAD_FETCH("kern.sgrowsiz", &sgrowsiz);
}
Пример #4
0
/*
 * tunable_mbinit() has to be run before any mbuf allocations are done.
 */
static void
tunable_mbinit(void *dummy)
{
#ifndef __rtems__
	quad_t realmem;

	/*
	 * The default limit for all mbuf related memory is 1/2 of all
	 * available kernel memory (physical or kmem).
	 * At most it can be 3/4 of available kernel memory.
	 */
	realmem = qmin((quad_t)physmem * PAGE_SIZE,
	    vm_map_max(kmem_map) - vm_map_min(kmem_map));
	maxmbufmem = realmem / 2;
	TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
	if (maxmbufmem > realmem / 4 * 3)
		maxmbufmem = realmem / 4 * 3;
#else /* __rtems__ */
	maxmbufmem = rtems_bsd_get_allocator_domain_size(
	    RTEMS_BSD_ALLOCATOR_DOMAIN_MBUF);
#endif /* __rtems__ */

	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
	if (nmbclusters == 0)
		nmbclusters = maxmbufmem / MCLBYTES / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
	if (nmbjumbop == 0)
		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
	if (nmbjumbo9 == 0)
		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
	if (nmbjumbo16 == 0)
		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;

	/*
	 * We need at least as many mbufs as we have clusters of
	 * the various types added together.
	 */
	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
		nmbufs = lmax(maxmbufmem / MSIZE / 5,
		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
}
Пример #5
0
/*
 * tunable_mbinit() has to be run before any mbuf allocations are done.
 */
static void
tunable_mbinit(void *dummy)
{
	quad_t realmem, maxmbufmem;

	/*
	 * The default limit for all mbuf related memory is 1/2 of all
	 * available kernel memory (physical or kmem).
	 * At most it can be 3/4 of available kernel memory.
	 */
	realmem = qmin((quad_t)physmem * PAGE_SIZE,
	    vm_map_max(kernel_map) - vm_map_min(kernel_map));
	maxmbufmem = realmem / 2;
	TUNABLE_QUAD_FETCH("kern.maxmbufmem", &maxmbufmem);
	if (maxmbufmem > realmem / 4 * 3)
		maxmbufmem = realmem / 4 * 3;

	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
	if (nmbclusters == 0)
		nmbclusters = maxmbufmem / MCLBYTES / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
	if (nmbjumbop == 0)
		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
	if (nmbjumbo9 == 0)
		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
	if (nmbjumbo16 == 0)
		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;

	/*
	 * We need at least as many mbufs as we have clusters of
	 * the various types added together.
	 */
	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
		nmbufs = lmax(maxmbufmem / MSIZE / 5,
		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
}
Пример #6
0
/*
 * Boot time overrides that are scaled against main memory
 */
void
init_param2(long physpages)
{
	quad_t realmem;

	/* Base parameters */
	maxusers = MAXUSERS;
	TUNABLE_INT_FETCH("kern.maxusers", &maxusers);
	if (maxusers == 0) {
		maxusers = physpages / (2 * 1024 * 1024 / PAGE_SIZE);
		if (maxusers < 32)
			maxusers = 32;
#ifdef VM_MAX_AUTOTUNE_MAXUSERS
                if (maxusers > VM_MAX_AUTOTUNE_MAXUSERS)
                        maxusers = VM_MAX_AUTOTUNE_MAXUSERS;
#endif
                /*
                 * Scales down the function in which maxusers grows once
                 * we hit 384.
                 */
                if (maxusers > 384)
                        maxusers = 384 + ((maxusers - 384) / 8);
        }

	/*
	 * The following can be overridden after boot via sysctl.  Note:
	 * unless overriden, these macros are ultimately based on maxusers.
	 * Limit maxproc so that kmap entries cannot be exhausted by
	 * processes.
	 */
	maxproc = NPROC;
	TUNABLE_INT_FETCH("kern.maxproc", &maxproc);
	if (maxproc > (physpages / 12))
		maxproc = physpages / 12;
	maxprocperuid = (maxproc * 9) / 10;

	/*
	 * The default limit for maxfiles is 1/12 of the number of
	 * physical page but not less than 16 times maxusers.
	 * At most it can be 1/6 the number of physical pages.
	 */
	maxfiles = imax(MAXFILES, physpages / 8);
	TUNABLE_INT_FETCH("kern.maxfiles", &maxfiles);
	if (maxfiles > (physpages / 4))
		maxfiles = physpages / 4;
	maxfilesperproc = (maxfiles / 10) * 9;
	
	/*
	 * Cannot be changed after boot.
	 */
	nbuf = NBUF;
	TUNABLE_INT_FETCH("kern.nbuf", &nbuf);

	/*
	 * XXX: Does the callout wheel have to be so big?
	 *
	 * Clip callout to result of previous function of maxusers maximum
	 * 384.  This is still huge, but acceptable.
	 */
	ncallout = imin(16 + maxproc + maxfiles, 18508);
	TUNABLE_INT_FETCH("kern.ncallout", &ncallout);

	/*
	 * The default limit for all mbuf related memory is 1/2 of all
	 * available kernel memory (physical or kmem).
	 * At most it can be 3/4 of available kernel memory.
	 */
	realmem = qmin((quad_t)physpages * PAGE_SIZE,
	    VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
	maxmbufmem = realmem / 2;
	TUNABLE_QUAD_FETCH("kern.maxmbufmem", &maxmbufmem);
	if (maxmbufmem > (realmem / 4) * 3)
		maxmbufmem = (realmem / 4) * 3;

	/*
	 * The default for maxpipekva is min(1/64 of the kernel address space,
	 * max(1/64 of main memory, 512KB)).  See sys_pipe.c for more details.
	 */
	maxpipekva = (physpages / 64) * PAGE_SIZE;
	TUNABLE_LONG_FETCH("kern.ipc.maxpipekva", &maxpipekva);
	if (maxpipekva < 512 * 1024)
		maxpipekva = 512 * 1024;
	if (maxpipekva > (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 64)
		maxpipekva = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) /
		    64;
}