コード例 #1
0
ファイル: uinet_init.c プロジェクト: diegows/wanproxy
int
uinet_init(void)
{
	struct thread *td;

	printf("uinet_init starting\n");

	/* XXX need to get this from OS */
	mp_ncpus = 1;

        /* vm_init bits */
        ncallout = 64;
	
        pcpup = malloc(sizeof(struct pcpu), M_DEVBUF, M_ZERO);
        pcpu_init(pcpup, 0, sizeof(struct pcpu));
        kern_timeout_callwheel_alloc(malloc(512*1024, M_DEVBUF, M_ZERO));
        kern_timeout_callwheel_init();
	uinet_init_thread0();
        uma_startup(malloc(40*4096, M_DEVBUF, M_ZERO), 40);
	uma_startup2();
	/* XXX fix this magic 64 to something a bit more dynamic & sensible */
	uma_page_slab_hash = malloc(sizeof(struct uma_page)*64, M_DEVBUF, M_ZERO);
	uma_page_mask = 64-1;
	pthread_mutex_init(&init_lock, NULL);
	pthread_cond_init(&init_cond, NULL);
	mutex_init();
        mi_startup();
	sx_init(&proctree_lock, "proctree");
	td = curthread;

	/* XXX - would very much like to do better than this */
	/* give all configuration threads time to complete initialization
	 * before continuing
	 */
	sleep(1);
	return (0);
}
コード例 #2
0
ファイル: uinet_init.c プロジェクト: bigclouds/libuinet
int
uinet_init(unsigned int ncpus, unsigned int nmbclusters, struct uinet_instance_cfg *inst_cfg)
{
	struct thread *td;
	char tmpbuf[32];
	int boot_pages;
	int num_hash_buckets;
	caddr_t v;

	if (ncpus > MAXCPU) {
		printf("Limiting number of CPUs to %u\n", MAXCPU);
		ncpus = MAXCPU;
	} else if (0 == ncpus) {
		printf("Setting number of CPUs to 1\n");
		ncpus = 1;
	}

	printf("uinet starting: cpus=%u, nmbclusters=%u\n", ncpus, nmbclusters);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", nmbclusters);
	setenv("kern.ipc.nmbclusters", tmpbuf);

	/* The env var kern.ncallout will get read in proc0_init(), but
	 * that's after we init the callwheel below.  So we set it here for
	 * consistency, but the operative setting is the direct assignment
	 * below.
	 */
        ncallout = HZ * 3600;
	snprintf(tmpbuf, sizeof(tmpbuf), "%u", ncallout);
	setenv("kern.ncallout", tmpbuf);

	/* Assuming maxsockets will be set to nmbclusters, the following
	 * sets the TCP tcbhash size so that perfectly uniform hashing would
	 * result in a maximum bucket depth of about 16.
	 */
	num_hash_buckets = 1;
	while (num_hash_buckets < nmbclusters / 16)
		num_hash_buckets <<= 1;
	snprintf(tmpbuf, sizeof(tmpbuf), "%u", num_hash_buckets);	
	setenv("net.inet.tcp.tcbhashsize", tmpbuf);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", 2048);
	setenv("net.inet.tcp.syncache.hashsize", tmpbuf);

	boot_pages = 16;  /* number of pages made available for uma to bootstrap itself */

	mp_ncpus = ncpus;
	mp_maxid = mp_ncpus - 1;

	uhi_set_num_cpus(mp_ncpus);

        /* vm_init bits */
	
	/* first get size required, then alloc memory, then give that memory to the second call */
	v = 0;
        v = kern_timeout_callwheel_alloc(v);
	kern_timeout_callwheel_alloc(malloc(round_page((vm_offset_t)v), M_DEVBUF, M_ZERO));
        kern_timeout_callwheel_init();

	uinet_init_thread0();

        uma_startup(malloc(boot_pages*PAGE_SIZE, M_DEVBUF, M_ZERO), boot_pages);
	uma_startup2();

	/* XXX any need to tune this? */
	num_hash_buckets = 8192;  /* power of 2.  32 bytes per bucket on a 64-bit system, so no need to skimp */
	uma_page_slab_hash = malloc(sizeof(struct uma_page)*num_hash_buckets, M_DEVBUF, M_ZERO);
	uma_page_mask = num_hash_buckets - 1;

#if 0
	pthread_mutex_init(&init_lock, NULL);
	pthread_cond_init(&init_cond, NULL);
#endif
	mutex_init();
        mi_startup();
	sx_init(&proctree_lock, "proctree");
	td = curthread;

	/* XXX - would very much like to do better than this */
	/* give all configuration threads time to complete initialization
	 * before continuing
	 */
	sleep(1);

	uinet_instance_init(&uinst0, vnet0, inst_cfg);

	if (uhi_msg_init(&shutdown_helper_msg, 1, 0) != 0)
		printf("Failed to init shutdown helper message - there will be no shutdown helper thread\n");
	else if (kthread_add(shutdown_helper, &shutdown_helper_msg, NULL, &shutdown_helper_thread, 0, 0, "shutdown_helper"))
		printf("Failed to create shutdown helper thread\n");

	/*
	 * XXX This should be configurable - applications that arrange for a
	 * particular thread to process all signals will not want this.
	 */
	if (kthread_add(one_sighandling_thread, NULL, NULL, &at_least_one_sighandling_thread, 0, 0, "one_sighandler"))
		printf("Failed to create at least one signal handling thread\n");
	uhi_mask_all_signals();

#if 0
	printf("maxusers=%d\n", maxusers);
	printf("maxfiles=%d\n", maxfiles);
	printf("maxsockets=%d\n", maxsockets);
	printf("nmbclusters=%d\n", nmbclusters);
#endif

	return (0);
}
コード例 #3
0
ファイル: uinet_init.c プロジェクト: BillTheBest/libuinet
int
uinet_init(struct uinet_global_cfg *cfg, struct uinet_instance_cfg *inst_cfg)
{
	struct thread *td;
	char tmpbuf[32];
	int boot_pages;
	caddr_t v;
	struct uinet_global_cfg default_cfg;
	unsigned int ncpus;
	unsigned int num_hash_buckets;

#if defined(__amd64__) || defined(__i386__)
	unsigned int regs[4];

	do_cpuid(1, regs);
	cpu_feature = regs[3];
	cpu_feature2 = regs[2];
#endif

uinet_hz = HZ;

	if (cfg == NULL) {
		uinet_default_cfg(&default_cfg, UINET_GLOBAL_CFG_MEDIUM);
		cfg = &default_cfg;
	}

	epoch_number = cfg->epoch_number;
	
#if defined(VIMAGE_STS) || defined(VIMAGE_STS_ONLY)
	if (inst_cfg) {
		uinet_instance_init_vnet_sts(&vnet0_sts, inst_cfg);
	}
#endif

	printf("uinet starting\n");
	printf("requested configuration:\n");
	uinet_print_cfg(cfg);

	if_netmap_num_extra_bufs = cfg->netmap_extra_bufs;

	ncpus = cfg->ncpus;

	if (ncpus > MAXCPU) {
		printf("Limiting number of CPUs to %u\n", MAXCPU);
		ncpus = MAXCPU;
	} else if (0 == ncpus) {
		printf("Setting number of CPUs to 1\n");
		ncpus = 1;
	}

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", cfg->kern.ipc.maxsockets);
	setenv("kern.ipc.maxsockets", tmpbuf);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", cfg->kern.ipc.nmbclusters);
	setenv("kern.ipc.nmbclusters", tmpbuf);

	/* The env var kern.ncallout will get read in proc0_init(), but
	 * that's after we init the callwheel below.  So we set it here for
	 * consistency, but the operative setting is the direct assignment
	 * below.
	 */
        ncallout = HZ * 3600;
	snprintf(tmpbuf, sizeof(tmpbuf), "%u", ncallout);
	setenv("kern.ncallout", tmpbuf);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", roundup_nearest_power_of_2(cfg->net.inet.tcp.syncache.hashsize));
	setenv("net.inet.tcp.syncache.hashsize", tmpbuf);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", cfg->net.inet.tcp.syncache.bucketlimit);
	setenv("net.inet.tcp.syncache.bucketlimit", tmpbuf);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", cfg->net.inet.tcp.syncache.cachelimit);
	setenv("net.inet.tcp.syncache.cachelimit", tmpbuf);

	snprintf(tmpbuf, sizeof(tmpbuf), "%u", roundup_nearest_power_of_2(cfg->net.inet.tcp.tcbhashsize));	
	setenv("net.inet.tcp.tcbhashsize", tmpbuf);

	boot_pages = 16;  /* number of pages made available for uma to bootstrap itself */

	mp_ncpus = ncpus;
	mp_maxid = mp_ncpus - 1;

	uhi_set_num_cpus(mp_ncpus);

        /* vm_init bits */
	
	/* first get size required, then alloc memory, then give that memory to the second call */
	v = 0;
        v = kern_timeout_callwheel_alloc(v);
	kern_timeout_callwheel_alloc(malloc(round_page((vm_offset_t)v), M_DEVBUF, M_ZERO));
        kern_timeout_callwheel_init();

	uinet_thread_init();
	uinet_init_thread0();

        uma_startup(malloc(boot_pages*PAGE_SIZE, M_DEVBUF, M_ZERO), boot_pages);
	uma_startup2();

	/* XXX any need to tune this? */
	num_hash_buckets = 8192;  /* power of 2.  32 bytes per bucket on a 64-bit system, so no need to skimp */
	uma_page_slab_hash = malloc(sizeof(struct uma_page)*num_hash_buckets, M_DEVBUF, M_ZERO);
	uma_page_mask = num_hash_buckets - 1;

#if 0
	pthread_mutex_init(&init_lock, NULL);
	pthread_cond_init(&init_cond, NULL);
#endif
	mutex_init();
        mi_startup();
	sx_init(&proctree_lock, "proctree");
	td = curthread;

	/* XXX - would very much like to do better than this */
	/* give all configuration threads time to complete initialization
	 * before continuing
	 */
	sleep(1);

	kernel_sysctlbyname(curthread, "kern.ipc.somaxconn", NULL, NULL,
			    &cfg->kern.ipc.somaxconn, sizeof(cfg->kern.ipc.somaxconn), NULL, 0);

	uinet_instance_init(&uinst0, vnet0, inst_cfg);

	if (uhi_msg_init(&shutdown_helper_msg, 1, 0) != 0)
		printf("Failed to init shutdown helper message - there will be no shutdown helper thread\n");
	else if (kthread_add(shutdown_helper, &shutdown_helper_msg, NULL, &shutdown_helper_thread, 0, 0, "shutdown_helper"))
		printf("Failed to create shutdown helper thread\n");

	/*
	 * XXX This should be configurable - applications that arrange for a
	 * particular thread to process all signals will not want this.
	 */
	if (kthread_add(one_sighandling_thread, NULL, NULL, &at_least_one_sighandling_thread, 0, 0, "one_sighandler"))
		printf("Failed to create at least one signal handling thread\n");
	uhi_mask_all_signals();

	return (0);
}