Exemplo n.º 1
0
void
random_ident_hardware(struct random_systat *systat)
{

	/* Set default to software */
	*systat = random_yarrow;

	/* Then go looking for hardware */
#if defined(__amd64__) || (defined(__i386__) && !defined(PC98))
#ifdef PADLOCK_RNG
	if (via_feature_rng & VIA_HAS_RNG) {
		int enable;

		enable = 1;
		TUNABLE_INT_FETCH("hw.nehemiah_rng_enable", &enable);
		if (enable)
			*systat = random_nehemiah;
	}
#endif
#ifdef RDRAND_RNG
	if (cpu_feature2 & CPUID2_RDRAND) {
		int enable;

		enable = 1;
		TUNABLE_INT_FETCH("hw.ivy_rng_enable", &enable);
		if (enable)
			*systat = random_ivy;
	}
#endif
#endif
}
Exemplo n.º 2
0
/*
 * Boot time overrides that are not scaled against main memory
 */
void
init_param1(void)
{
    // hax!
    ticks = softticks = 1;

    //global_hz[get_stack_id()] = 100; //HZ; // Haxing time
    //the above is already set up

	TUNABLE_INT_FETCH("kern.hz", &hz);
	tick = 1000000 / hz;

#ifdef VM_SWZONE_SIZE_MAX
	maxswzone = VM_SWZONE_SIZE_MAX;
#endif
	TUNABLE_INT_FETCH("kern.maxswzone", &maxswzone);
#ifdef VM_BCACHE_SIZE_MAX
	maxbcache = VM_BCACHE_SIZE_MAX;
#endif
	TUNABLE_INT_FETCH("kern.maxbcache", &maxbcache);

	maxtsiz = MAXTSIZ;
	TUNABLE_QUAD_FETCH("kern.maxtsiz", &maxtsiz);
	dfldsiz = DFLDSIZ;
	TUNABLE_QUAD_FETCH("kern.dfldsiz", &dfldsiz);
	maxdsiz = MAXDSIZ;
	TUNABLE_QUAD_FETCH("kern.maxdsiz", &maxdsiz);
	dflssiz = DFLSSIZ;
	TUNABLE_QUAD_FETCH("kern.dflssiz", &dflssiz);
	maxssiz = MAXSSIZ;
	TUNABLE_QUAD_FETCH("kern.maxssiz", &maxssiz);
	sgrowsiz = SGROWSIZ;
	TUNABLE_QUAD_FETCH("kern.sgrowsiz", &sgrowsiz);
}
Exemplo n.º 3
0
/*
 * tunable_mbinit() has to be run before init_maxsockets() thus
 * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
 * runs at SI_ORDER_ANY.
 *
 * NB: This has to be done before VM init.
 */
static void
tunable_mbinit(void *dummy)
{

	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
	if (nmbclusters == 0)
		nmbclusters = maxmbufmem / MCLBYTES / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
	if (nmbjumbop == 0)
		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
	if (nmbjumbo9 == 0)
		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
	if (nmbjumbo16 == 0)
		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;

	/*
	 * We need at least as many mbufs as we have clusters of
	 * the various types added together.
	 */
	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
		nmbufs = lmax(maxmbufmem / MSIZE / 5,
		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
}
Exemplo n.º 4
0
/*
 * IP6 initialization: fill in IP6 protocol switch table.
 * All protocols not implemented in kernel go to raw IP6 protocol handler.
 */
void
ip6_init(void)
{
	struct ip6protosw *pr;
	int i;

	TUNABLE_INT_FETCH("net.inet6.ip6.auto_linklocal",
	    &V_ip6_auto_linklocal);
	TUNABLE_INT_FETCH("net.inet6.ip6.accept_rtadv", &V_ip6_accept_rtadv);
	TUNABLE_INT_FETCH("net.inet6.ip6.no_radr", &V_ip6_no_radr);

	TAILQ_INIT(&V_in6_ifaddrhead);
	V_in6_ifaddrhashtbl = hashinit(IN6ADDR_NHASH, M_IFADDR,
	    &V_in6_ifaddrhmask);

	/* Initialize packet filter hooks. */
	V_inet6_pfil_hook.ph_type = PFIL_TYPE_AF;
	V_inet6_pfil_hook.ph_af = AF_INET6;
	if ((i = pfil_head_register(&V_inet6_pfil_hook)) != 0)
		printf("%s: WARNING: unable to register pfil hook, "
			"error %d\n", __func__, i);

	scope6_init();
	addrsel_policy_init();
	nd6_init();
	frag6_init();

	V_ip6_desync_factor = arc4random() % MAX_TEMP_DESYNC_FACTOR;

	/* Skip global initialization stuff for non-default instances. */
	if (!IS_DEFAULT_VNET(curvnet))
		return;

#ifdef DIAGNOSTIC
	if (sizeof(struct protosw) != sizeof(struct ip6protosw))
		panic("sizeof(protosw) != sizeof(ip6protosw)");
#endif
	pr = (struct ip6protosw *)pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
	if (pr == NULL)
		panic("ip6_init");

	/* Initialize the entire ip6_protox[] array to IPPROTO_RAW. */
	for (i = 0; i < IPPROTO_MAX; i++)
		ip6_protox[i] = pr - inet6sw;
	/*
	 * Cycle through IP protocols and put them into the appropriate place
	 * in ip6_protox[].
	 */
	for (pr = (struct ip6protosw *)inet6domain.dom_protosw;
	    pr < (struct ip6protosw *)inet6domain.dom_protoswNPROTOSW; pr++)
		if (pr->pr_domain->dom_family == PF_INET6 &&
		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
			/* Be careful to only index valid IP protocols. */
			if (pr->pr_protocol < IPPROTO_MAX)
				ip6_protox[pr->pr_protocol] = pr - inet6sw;
		}

	netisr_register(&ip6_nh);
}
Exemplo n.º 5
0
static void
dmar_identify(driver_t *driver, device_t parent)
{
	ACPI_TABLE_DMAR *dmartbl;
	ACPI_DMAR_HARDWARE_UNIT *dmarh;
	ACPI_STATUS status;
	int i, error;

	if (acpi_disabled("dmar"))
		return;
	TUNABLE_INT_FETCH("hw.dmar.enable", &dmar_enable);
	if (!dmar_enable)
		return;
#ifdef INVARIANTS
	TUNABLE_INT_FETCH("hw.dmar.check_free", &dmar_check_free);
#endif
	TUNABLE_INT_FETCH("hw.dmar.match_verbose", &dmar_match_verbose);
	status = AcpiGetTable(ACPI_SIG_DMAR, 1, (ACPI_TABLE_HEADER **)&dmartbl);
	if (ACPI_FAILURE(status))
		return;
	haw = dmartbl->Width + 1;
	if ((1ULL << (haw + 1)) > BUS_SPACE_MAXADDR)
		dmar_high = BUS_SPACE_MAXADDR;
	else
		dmar_high = 1ULL << (haw + 1);
	if (bootverbose) {
		printf("DMAR HAW=%d flags=<%b>\n", dmartbl->Width,
		    (unsigned)dmartbl->Flags,
		    "\020\001INTR_REMAP\002X2APIC_OPT_OUT");
	}
	AcpiPutTable((ACPI_TABLE_HEADER *)dmartbl);

	dmar_iterate_tbl(dmar_count_iter, NULL);
	if (dmar_devcnt == 0)
		return;
	dmar_devs = malloc(sizeof(device_t) * dmar_devcnt, M_DEVBUF,
	    M_WAITOK | M_ZERO);
	for (i = 0; i < dmar_devcnt; i++) {
		dmarh = dmar_find_by_index(i);
		if (dmarh == NULL) {
			printf("dmar_identify: cannot find HWUNIT %d\n", i);
			continue;
		}
		dmar_devs[i] = BUS_ADD_CHILD(parent, 1, "dmar", i);
		if (dmar_devs[i] == NULL) {
			printf("dmar_identify: cannot create instance %d\n", i);
			continue;
		}
		error = bus_set_resource(dmar_devs[i], SYS_RES_MEMORY,
		    DMAR_REG_RID, dmarh->Address, PAGE_SIZE);
		if (error != 0) {
			printf(
	"dmar%d: unable to alloc register window at 0x%08jx: error %d\n",
			    i, (uintmax_t)dmarh->Address, error);
			device_delete_child(parent, dmar_devs[i]);
			dmar_devs[i] = NULL;
		}
	}
}
Exemplo n.º 6
0
/*
 * Boot time overrides that are not scaled against main memory
 */
void
init_param1(void)
{
#ifndef XEN
	vm_guest = detect_virtual();
#else
	vm_guest = VM_GUEST_XEN;
#endif
	hz = -1;
	TUNABLE_INT_FETCH("kern.hz", &hz);
	if (hz == -1)
		hz = vm_guest > VM_GUEST_NO ? HZ_VM : HZ;
	tick = 1000000 / hz;

#ifdef VM_SWZONE_SIZE_MAX
	maxswzone = VM_SWZONE_SIZE_MAX;
#endif
	TUNABLE_LONG_FETCH("kern.maxswzone", &maxswzone);
#ifdef VM_BCACHE_SIZE_MAX
	maxbcache = VM_BCACHE_SIZE_MAX;
#endif
	TUNABLE_LONG_FETCH("kern.maxbcache", &maxbcache);
	msgbufsize = MSGBUF_SIZE;
	TUNABLE_INT_FETCH("kern.msgbufsize", &msgbufsize);

	maxtsiz = MAXTSIZ;
	TUNABLE_ULONG_FETCH("kern.maxtsiz", &maxtsiz);
	dfldsiz = DFLDSIZ;
	TUNABLE_ULONG_FETCH("kern.dfldsiz", &dfldsiz);
	maxdsiz = MAXDSIZ;
	TUNABLE_ULONG_FETCH("kern.maxdsiz", &maxdsiz);
	dflssiz = DFLSSIZ;
	TUNABLE_ULONG_FETCH("kern.dflssiz", &dflssiz);
	maxssiz = MAXSSIZ;
	TUNABLE_ULONG_FETCH("kern.maxssiz", &maxssiz);
	sgrowsiz = SGROWSIZ;
	TUNABLE_ULONG_FETCH("kern.sgrowsiz", &sgrowsiz);

	/*
	 * Let the administrator set {NGROUPS_MAX}, but disallow values
	 * less than NGROUPS_MAX which would violate POSIX.1-2008 or
	 * greater than INT_MAX-1 which would result in overflow.
	 */
	ngroups_max = NGROUPS_MAX;
	TUNABLE_INT_FETCH("kern.ngroups", &ngroups_max);
	if (ngroups_max < NGROUPS_MAX)
		ngroups_max = NGROUPS_MAX;

	/*
	 * Only allow to lower the maximal pid.
	 * Prevent setting up a non-bootable system if pid_max is too low.
	 */
	TUNABLE_INT_FETCH("kern.pid_max", &pid_max);
	if (pid_max > PID_MAX)
		pid_max = PID_MAX;
	else if (pid_max < 300)
		pid_max = 300;
}
Exemplo n.º 7
0
int
dmar_init_qi(struct dmar_unit *unit)
{
	uint64_t iqa;
	uint32_t ics;
	int qi_sz;

	if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
		return (0);
	unit->qi_enabled = 1;
	TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
	if (!unit->qi_enabled)
		return (0);

	TAILQ_INIT(&unit->tlb_flush_entries);
	TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
	unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
	    taskqueue_thread_enqueue, &unit->qi_taskqueue);
	taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
	    "dmar%d qi taskq", unit->unit);

	unit->inv_waitd_gen = 0;
	unit->inv_waitd_seq = 1;

	qi_sz = DMAR_IQA_QS_DEF;
	TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
	if (qi_sz > DMAR_IQA_QS_MAX)
		qi_sz = DMAR_IQA_QS_MAX;
	unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
	/* Reserve one descriptor to prevent wraparound. */
	unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;

	/* The invalidation queue reads by DMARs are always coherent. */
	unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size,
	    M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
	unit->inv_waitd_seq_hw_phys = pmap_kextract(
	    (vm_offset_t)&unit->inv_waitd_seq_hw);

	DMAR_LOCK(unit);
	dmar_write8(unit, DMAR_IQT_REG, 0);
	iqa = pmap_kextract(unit->inv_queue);
	iqa |= qi_sz;
	dmar_write8(unit, DMAR_IQA_REG, iqa);
	dmar_enable_qi(unit);
	ics = dmar_read4(unit, DMAR_ICS_REG);
	if ((ics & DMAR_ICS_IWC) != 0) {
		ics = DMAR_ICS_IWC;
		dmar_write4(unit, DMAR_ICS_REG, ics);
	}
	dmar_enable_qi_intr(unit);
	DMAR_UNLOCK(unit);

	return (0);
}
Exemplo n.º 8
0
/*
 * Boot time overrides that are scaled against main memory
 */
void
init_param2(long physpages)
{

	/* Base parameters */
	maxusers = MAXUSERS;
	TUNABLE_INT_FETCH("kern.maxusers", &maxusers);
	if (maxusers == 0) {
		maxusers = physpages / (2 * 1024 * 1024 / PAGE_SIZE);
		if (maxusers < 32)
			maxusers = 32;
		if (maxusers > 384)
			maxusers = 384;
	}

	/*
	 * The following can be overridden after boot via sysctl.  Note:
	 * unless overriden, these macros are ultimately based on maxusers.
	 */
	maxproc = NPROC;
	TUNABLE_INT_FETCH("kern.maxproc", &maxproc);
	/*
	 * Limit maxproc so that kmap entries cannot be exhausted by
	 * processes.
	 */
	if (maxproc > (physpages / 12))
		maxproc = physpages / 12;
	maxfiles = MAXFILES;
	TUNABLE_INT_FETCH("kern.maxfiles", &maxfiles);
	maxprocperuid = (maxproc * 9) / 10;
	maxfilesperproc = (maxfiles * 9) / 10;
	
	/*
	 * Cannot be changed after boot.
	 */
	nbuf = NBUF;
	TUNABLE_INT_FETCH("kern.nbuf", &nbuf);

	ncallout = 16 + maxproc + maxfiles;
	TUNABLE_INT_FETCH("kern.ncallout", &ncallout);

	/*
	 * The default for maxpipekva is min(1/64 of the kernel address space,
	 * max(1/64 of main memory, 512KB)).  See sys_pipe.c for more details.
	 */
	maxpipekva = (physpages / 64) * PAGE_SIZE;
	if (maxpipekva < 512 * 1024)
		maxpipekva = 512 * 1024;
	if (maxpipekva > (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 64)
		maxpipekva = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) /
		    64;
	TUNABLE_LONG_FETCH("kern.ipc.maxpipekva", &maxpipekva);
}
Exemplo n.º 9
0
void
syncache_init(void)
{
	int i;

	tcp_syncache.cache_count = 0;
	tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
	tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
	tcp_syncache.cache_limit =
	    tcp_syncache.hashsize * tcp_syncache.bucket_limit;
	tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
	tcp_syncache.hash_secret = arc4random();

	TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
	    &tcp_syncache.hashsize);
	TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
	    &tcp_syncache.cache_limit);
	TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
	    &tcp_syncache.bucket_limit);
	if (!powerof2(tcp_syncache.hashsize)) {
		printf("WARNING: syncache hash size is not a power of 2.\n");
		tcp_syncache.hashsize = 512;	/* safe default */
	}
	tcp_syncache.hashmask = tcp_syncache.hashsize - 1;

	/* Allocate the hash table. */
	MALLOC(tcp_syncache.hashbase, struct syncache_head *,
	    tcp_syncache.hashsize * sizeof(struct syncache_head),
	    M_SYNCACHE, M_WAITOK);

	/* Initialize the hash buckets. */
	for (i = 0; i < tcp_syncache.hashsize; i++) {
		TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
		tcp_syncache.hashbase[i].sch_length = 0;
	}

	/* Initialize the timer queues. */
	for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
		TAILQ_INIT(&tcp_syncache.timerq[i]);
		callout_init(&tcp_syncache.tt_timerq[i],
			debug_mpsafenet ? CALLOUT_MPSAFE : 0);
	}

	/*
	 * Allocate the syncache entries.  Allow the zone to allocate one
	 * more entry than cache limit, so a new entry can bump out an
	 * older one.
	 */
	tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
	uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
	tcp_syncache.cache_limit -= 1;
}
Exemplo n.º 10
0
static int
drm_modevent(module_t mod, int type, void *data)
{

	switch (type) {
	case MOD_LOAD:
		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet);
		break;
	}
	return (0);
}
Exemplo n.º 11
0
/*
 * Tcp initialization
 */
void
tcp_init()
{
	int hashsize = TCBHASHSIZE;
	
	tcp_ccgen = 1;
	tcp_cleartaocache();

	tcp_delacktime = TCPTV_DELACK;
	tcp_keepinit = TCPTV_KEEP_INIT;
	tcp_keepidle = TCPTV_KEEP_IDLE;
	tcp_keepintvl = TCPTV_KEEPINTVL;
	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
	tcp_msl = TCPTV_MSL;
	tcp_rexmit_min = TCPTV_MIN;
	tcp_rexmit_slop = TCPTV_CPU_VAR;

	LIST_INIT(&tcb);
	tcbinfo.listhead = &tcb;
	TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
	if (!powerof2(hashsize)) {
		printf("WARNING: TCB hash size not a power of 2\n");
		hashsize = 512; /* safe default */
	}
	tcp_tcbhashsize = hashsize;
	tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
	tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
					&tcbinfo.porthashmask);
	tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets,
				 ZONE_INTERRUPT, 0);

	tcp_reass_maxseg = nmbclusters / 16;
	TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
	    &tcp_reass_maxseg);

#ifdef INET6
#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
#else /* INET6 */
#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
#endif /* INET6 */
	if (max_protohdr < TCP_MINPROTOHDR)
		max_protohdr = TCP_MINPROTOHDR;
	if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
		panic("tcp_init");
#undef TCP_MINPROTOHDR

	syncache_init();
}
Exemplo n.º 12
0
void
tunable_int_init(void *data)
{
	struct tunable_int *d = (struct tunable_int *)data;

	TUNABLE_INT_FETCH(d->path, d->var);
}
Exemplo n.º 13
0
void
isci_interrupt_setup(struct isci_softc *isci)
{
	uint8_t max_msix_messages = SCI_MAX_MSIX_MESSAGES_PER_CONTROLLER *
	    isci->controller_count;
	BOOL use_msix = FALSE;
	uint32_t force_legacy_interrupts = 0;

	TUNABLE_INT_FETCH("hw.isci.force_legacy_interrupts",
	    &force_legacy_interrupts);

	if (!force_legacy_interrupts &&
	    pci_msix_count(isci->device) >= max_msix_messages) {

		isci->num_interrupts = max_msix_messages;
		pci_alloc_msix(isci->device, &isci->num_interrupts);
		if (isci->num_interrupts == max_msix_messages)
			use_msix = TRUE;
	}

	if (use_msix == TRUE)
		isci_interrupt_setup_msix(isci);
	else
		isci_interrupt_setup_legacy(isci);
}
Exemplo n.º 14
0
void
initializecpucache()
{

	/*
	 * CPUID with %eax = 1, %ebx returns
	 * Bits 15-8: CLFLUSH line size
	 * 	(Value * 8 = cache line size in bytes)
	 */
	if ((cpu_feature & CPUID_CLFSH) != 0)
		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
	/*
	 * XXXKIB: (temporary) hack to work around traps generated
	 * when CLFLUSHing APIC register window under virtualization
	 * environments.  These environments tend to disable the
	 * CPUID_SS feature even though the native CPU supports it.
	 */
	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
	if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
		cpu_feature &= ~CPUID_CLFSH;
	/*
	 * Allow to disable CLFLUSH feature manually by
	 * hw.clflush_disable tunable.
	 */
	if (hw_clflush_disable == 1)
		cpu_feature &= ~CPUID_CLFSH;
}
Exemplo n.º 15
0
/*
 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
 */
static void
sf_buf_init(void *arg)
{
    struct sf_buf *sf_bufs;
    vm_offset_t sf_base;
    int i;

#ifdef SFBUF_OPTIONAL_DIRECT_MAP
    if (SFBUF_OPTIONAL_DIRECT_MAP)
        return;
#endif

    nsfbufs = NSFBUFS;
    TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);

    sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
    TAILQ_INIT(&sf_buf_freelist);
    sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
    sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
                     M_WAITOK | M_ZERO);
    for (i = 0; i < nsfbufs; i++) {
        sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
        TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
    }
    sf_buf_alloc_want = 0;
    mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
}
Exemplo n.º 16
0
void
platform_mp_setmaxid(void)
{
	bus_space_handle_t scu;
	int hwcpu, ncpu;
	uint32_t val;

	/* If we've already set the global vars don't bother to do it again. */
	if (mp_ncpus != 0)
		return;

	if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0)
		panic("Couldn't map the SCU\n");
	val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONFIG_REG);
	hwcpu = (val & SCU_CONFIG_REG_NCPU_MASK) + 1;
	bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE);

	ncpu = hwcpu;
	TUNABLE_INT_FETCH("hw.ncpu", &ncpu);
	if (ncpu < 1 || ncpu > hwcpu)
		ncpu = hwcpu;

	mp_ncpus = ncpu;
	mp_maxid = ncpu - 1;
}
Exemplo n.º 17
0
/*
 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
 */
static void
sf_buf_init(void *arg)
{
    struct sf_buf *sf_bufs;
    vm_offset_t sf_base;
    int i;

    /* Don't bother on systems with a direct map */
    if (hw_direct_map)
        return;

    nsfbufs = NSFBUFS;
    TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);

    sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
    TAILQ_INIT(&sf_buf_freelist);
    sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
    sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
                     M_NOWAIT | M_ZERO);

    for (i = 0; i < nsfbufs; i++) {
        sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
        TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
    }
    sf_buf_alloc_want = 0;
    mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
}
/*
 * Boot time overrides that are not scaled against main memory
 */
void
init_param1(void)
{
	hz = HZ;
	TUNABLE_INT_FETCH("kern.hz", &hz);
	stathz = hz * 128 / 100;
	profhz = stathz;
	ustick = 1000000 / hz;
	nstick = 1000000000 / hz;
	/* can adjust 30ms in 60s */
	ntp_default_tick_delta = howmany(30000000, 60 * hz);

#ifdef VM_SWZONE_SIZE_MAX
	maxswzone = VM_SWZONE_SIZE_MAX;
#endif
	TUNABLE_LONG_FETCH("kern.maxswzone", &maxswzone);
#ifdef VM_BCACHE_SIZE_MAX
	maxbcache = VM_BCACHE_SIZE_MAX;
#endif
	TUNABLE_LONG_FETCH("kern.maxbcache", &maxbcache);
	maxtsiz = MAXTSIZ;
	TUNABLE_QUAD_FETCH("kern.maxtsiz", &maxtsiz);
	dfldsiz = DFLDSIZ;
	TUNABLE_QUAD_FETCH("kern.dfldsiz", &dfldsiz);
	maxdsiz = MAXDSIZ;
	TUNABLE_QUAD_FETCH("kern.maxdsiz", &maxdsiz);
	dflssiz = DFLSSIZ;
	TUNABLE_QUAD_FETCH("kern.dflssiz", &dflssiz);
	maxssiz = MAXSSIZ;
	TUNABLE_QUAD_FETCH("kern.maxssiz", &maxssiz);
	sgrowsiz = SGROWSIZ;
	TUNABLE_QUAD_FETCH("kern.sgrowsiz", &sgrowsiz);
}
Exemplo n.º 19
0
/*
 * tunable_mbinit() has to be run before any mbuf allocations are done.
 */
static void
tunable_mbinit(void *dummy)
{
#ifndef __rtems__
	quad_t realmem;

	/*
	 * The default limit for all mbuf related memory is 1/2 of all
	 * available kernel memory (physical or kmem).
	 * At most it can be 3/4 of available kernel memory.
	 */
	realmem = qmin((quad_t)physmem * PAGE_SIZE,
	    vm_map_max(kmem_map) - vm_map_min(kmem_map));
	maxmbufmem = realmem / 2;
	TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
	if (maxmbufmem > realmem / 4 * 3)
		maxmbufmem = realmem / 4 * 3;
#else /* __rtems__ */
	maxmbufmem = rtems_bsd_get_allocator_domain_size(
	    RTEMS_BSD_ALLOCATOR_DOMAIN_MBUF);
#endif /* __rtems__ */

	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
	if (nmbclusters == 0)
		nmbclusters = maxmbufmem / MCLBYTES / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
	if (nmbjumbop == 0)
		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
	if (nmbjumbo9 == 0)
		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
	if (nmbjumbo16 == 0)
		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;

	/*
	 * We need at least as many mbufs as we have clusters of
	 * the various types added together.
	 */
	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
		nmbufs = lmax(maxmbufmem / MSIZE / 5,
		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
}
Exemplo n.º 20
0
static void
tunable_mbinit(void *dummy)
{

	/* This has to be done before VM init. */
	nmbclusters = 1024 + maxusers * 64;
	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
}
Exemplo n.º 21
0
/*
 * Boot time overrides
 */
void
init_param(void)
{

	/* Base parameters */
	maxusers = MAXUSERS;
	TUNABLE_INT_FETCH("kern.maxusers", &maxusers);
	hz = HZ;
	TUNABLE_INT_FETCH("kern.hz", &hz);
	tick = 1000000 / hz;
	tickadj = howmany(30000, 60 * hz);	/* can adjust 30ms in 60s */

	/* The following can be overridden after boot via sysctl */
	maxproc = NPROC;
	TUNABLE_INT_FETCH("kern.maxproc", &maxproc);
	maxfiles = MAXFILES;
	TUNABLE_INT_FETCH("kern.maxfiles", &maxfiles);
	maxprocperuid = maxproc - 1;
	maxfilesperproc = maxfiles;

	/* Cannot be changed after boot */
	nsfbufs = NSFBUFS;
	TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
	nbuf = NBUF;
	TUNABLE_INT_FETCH("kern.nbuf", &nbuf);
	ncallout = 16 + maxproc + maxfiles;
	TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
}
Exemplo n.º 22
0
int
ept_init(int ipinum)
{
	int use_hw_ad_bits, use_superpages, use_exec_only;
	uint64_t cap;

	cap = rdmsr(MSR_VMX_EPT_VPID_CAP);

	/*
	 * Verify that:
	 * - page walk length is 4 steps
	 * - extended page tables can be laid out in write-back memory
	 * - invvpid instruction with all possible types is supported
	 * - invept instruction with all possible types is supported
	 */
	if (!EPT_PWL4(cap) ||
	    !EPT_MEMORY_TYPE_WB(cap) ||
	    !INVVPID_SUPPORTED(cap) ||
	    !INVVPID_ALL_TYPES_SUPPORTED(cap) ||
	    !INVEPT_SUPPORTED(cap) ||
	    !INVEPT_ALL_TYPES_SUPPORTED(cap))
		return (EINVAL);

	ept_pmap_flags = ipinum & PMAP_NESTED_IPIMASK;

	use_superpages = 1;
	TUNABLE_INT_FETCH("hw.vmm.ept.use_superpages", &use_superpages);
	if (use_superpages && EPT_PDE_SUPERPAGE(cap))
		ept_pmap_flags |= PMAP_PDE_SUPERPAGE;	/* 2MB superpage */

	use_hw_ad_bits = 1;
	TUNABLE_INT_FETCH("hw.vmm.ept.use_hw_ad_bits", &use_hw_ad_bits);
	if (use_hw_ad_bits && AD_BITS_SUPPORTED(cap))
		ept_enable_ad_bits = 1;
	else
		ept_pmap_flags |= PMAP_EMULATE_AD_BITS;

	use_exec_only = 1;
	TUNABLE_INT_FETCH("hw.vmm.ept.use_exec_only", &use_exec_only);
	if (use_exec_only && EPT_SUPPORTS_EXEC_ONLY(cap))
		ept_pmap_flags |= PMAP_SUPPORTS_EXEC_ONLY;

	return (0);
}
Exemplo n.º 23
0
/*
 * tunable_mbinit() has to be run before any mbuf allocations are done.
 */
static void
tunable_mbinit(void *dummy)
{
	quad_t realmem, maxmbufmem;

	/*
	 * The default limit for all mbuf related memory is 1/2 of all
	 * available kernel memory (physical or kmem).
	 * At most it can be 3/4 of available kernel memory.
	 */
	realmem = qmin((quad_t)physmem * PAGE_SIZE,
	    vm_map_max(kernel_map) - vm_map_min(kernel_map));
	maxmbufmem = realmem / 2;
	TUNABLE_QUAD_FETCH("kern.maxmbufmem", &maxmbufmem);
	if (maxmbufmem > realmem / 4 * 3)
		maxmbufmem = realmem / 4 * 3;

	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
	if (nmbclusters == 0)
		nmbclusters = maxmbufmem / MCLBYTES / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
	if (nmbjumbop == 0)
		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
	if (nmbjumbo9 == 0)
		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
	if (nmbjumbo16 == 0)
		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;

	/*
	 * We need at least as many mbufs as we have clusters of
	 * the various types added together.
	 */
	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
		nmbufs = lmax(maxmbufmem / MSIZE / 5,
		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
}
Exemplo n.º 24
0
/*
 * Boot time overrides that are scaled against main memory
 */
void
init_param2(long physpages)
{

	/* Base parameters */
	maxusers = MAXUSERS;
	TUNABLE_INT_FETCH("kern.maxusers", &maxusers);
	if (maxusers == 0) {
		maxusers = physpages / (2 * 1024 * 1024 / PAGE_SIZE);
		if (maxusers < 32)
			maxusers = 32;
		if (maxusers > 384)
			maxusers = 384;
	}

	/*
	 * The following can be overridden after boot via sysctl.  Note:
	 * unless overriden, these macros are ultimately based on maxusers.
	 */
	maxproc = NPROC;
	TUNABLE_INT_FETCH("kern.maxproc", &maxproc);
	/*
	 * Limit maxproc so that kmap entries cannot be exhausted by
	 * processes.
	 */
	if (maxproc > (physpages / 12))
		maxproc = physpages / 12;
	maxfiles = MAXFILES;
	TUNABLE_INT_FETCH("kern.maxfiles", &maxfiles);
	maxprocperuid = (maxproc * 9) / 10;
	maxfilesperproc = (maxfiles * 9) / 10;

	/*
	 * Cannot be changed after boot.
	 */
	nbuf = NBUF;
	TUNABLE_INT_FETCH("kern.nbuf", &nbuf);

	ncallout = 16 + maxproc + maxfiles;
	TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
}
Exemplo n.º 25
0
/*
 * AMD nested page table init.
 */
int
svm_npt_init(int ipinum)
{
	int enable_superpage = 1;

	npt_flags = ipinum & NPT_IPIMASK;
	TUNABLE_INT_FETCH("hw.vmm.npt.enable_superpage", &enable_superpage);
	if (enable_superpage)
		npt_flags |= PMAP_PDE_SUPERPAGE; 
	
	return (0);
}
Exemplo n.º 26
0
/*
 * tunable_mbinit() has to be run before init_maxsockets() thus
 * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
 * runs at SI_ORDER_ANY.
 */
static void
tunable_mbinit(void *dummy)
{
	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);

	/* This has to be done before VM init. */
	if (nmbclusters == 0)
		nmbclusters = 1024 + maxusers * 64;
	nmbjumbop = nmbclusters / 2;
	nmbjumbo9 = nmbjumbop / 2;
	nmbjumbo16 = nmbjumbo9 / 2;
}
Exemplo n.º 27
0
/*
 * Boot time overrides that are scaled against the kernel map
 */
void
init_param3(long kmempages)
{

	/*
	 * The default for maxpipekva is max(5% of the kernel map, 512KB).
	 * See sys_pipe.c for more details.
	 */
	maxpipekva = (kmempages / 20) * PAGE_SIZE;
	if (maxpipekva < 512 * 1024)
		maxpipekva = 512 * 1024;
	TUNABLE_INT_FETCH("kern.ipc.maxpipekva", &maxpipekva);
}
Exemplo n.º 28
0
static void
tunable_set_numzones(void)
{

	TUNABLE_INT_FETCH("debug.malloc.numzones",
	    &numzones);

	/* Sanity check the number of malloc uma zones. */
	if (numzones <= 0)
		numzones = 1;
	if (numzones > MALLOC_DEBUG_MAXZONES)
		numzones = MALLOC_DEBUG_MAXZONES;
}
Exemplo n.º 29
0
/*
 * Boot-time allocation of the DDB capture buffer, if any.  Force all buffer
 * sizes, including the maximum size, to be rounded to block sizes.
 */
static void
db_capture_sysinit(__unused void *dummy)
{

    TUNABLE_INT_FETCH("debug.ddb.capture.bufsize", &db_capture_bufsize);
    db_capture_maxbufsize = roundup(db_capture_maxbufsize,
                                    TEXTDUMP_BLOCKSIZE);
    db_capture_bufsize = roundup(db_capture_bufsize, TEXTDUMP_BLOCKSIZE);
    if (db_capture_bufsize > db_capture_maxbufsize)
        db_capture_bufsize = db_capture_maxbufsize;
    if (db_capture_bufsize != 0)
        db_capture_buf = malloc(db_capture_bufsize, M_DDB_CAPTURE,
                                M_WAITOK);
}
Exemplo n.º 30
0
/*
 * tunable_mbinit() has to be run before init_maxsockets() thus
 * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
 * runs at SI_ORDER_ANY.
 */
void
tunable_mbinit(void *dummy)
{

    int maxusers = 16;

	/* This has to be done before VM init. */
	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
	if (nmbclusters == 0)
		nmbclusters = 1024 + maxusers * 64;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
	if (nmbjumbop == 0)
		nmbjumbop = nmbclusters / 2;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
	if (nmbjumbo9 == 0)
		nmbjumbo9 = nmbclusters / 4;

	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
	if (nmbjumbo16 == 0)
		nmbjumbo16 = nmbclusters / 8;
}