void pim6_init(void) { sysctl_net_inet6_pim6_setup(NULL); pim6stat_percpu = percpu_alloc(sizeof(uint64_t) * PIM6_NSTATS); }
static int npf_init(void) { #ifdef _MODULE devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR; #endif int error = 0; npf_stats_percpu = percpu_alloc(NPF_STATS_SIZE); npf_sysctl = NULL; npf_bpf_sysinit(); npf_worker_sysinit(); npf_tableset_sysinit(); npf_session_sysinit(); npf_nat_sysinit(); npf_alg_sysinit(); npf_ext_sysinit(); /* Load empty configuration. */ npf_pfil_register(true); npf_config_init(); #ifdef _MODULE /* Attach /dev/npf device. */ error = devsw_attach("npf", NULL, &bmajor, &npf_cdevsw, &cmajor); if (error) { /* It will call devsw_detach(), which is safe. */ (void)npf_fini(); } #endif return error; }
/* * IP initialization: fill in IP protocol switch table. * All protocols not implemented in kernel go to raw IP protocol handler. */ void ip_init(void) { const struct protosw *pr; int i; sysctl_net_inet_ip_setup(NULL); pool_init(&inmulti_pool, sizeof(struct in_multi), 0, 0, 0, "inmltpl", NULL, IPL_SOFTNET); pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); if (pr == 0) panic("ip_init"); for (i = 0; i < IPPROTO_MAX; i++) ip_protox[i] = pr - inetsw; for (pr = inetdomain.dom_protosw; pr < inetdomain.dom_protoswNPROTOSW; pr++) if (pr->pr_domain->dom_family == PF_INET && pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) ip_protox[pr->pr_protocol] = pr - inetsw; ip_reass_init(); ip_ids = ip_id_init(); ip_id = time_second & 0xfffff; ipintrq.ifq_maxlen = IFQ_MAXLEN; TAILQ_INIT(&in_ifaddrhead); in_ifaddrhashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true, &in_ifaddrhash); in_multihashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true, &in_multihash); ip_mtudisc_timeout_q = rt_timer_queue_create(ip_mtudisc_timeout); #ifdef GATEWAY ipflow_init(ip_hashsize); #endif /* Register our Packet Filter hook. */ inet_pfil_hook = pfil_head_create(PFIL_TYPE_AF, (void *)AF_INET); KASSERT(inet_pfil_hook != NULL); #ifdef MBUFTRACE MOWNER_ATTACH(&ip_tx_mowner); MOWNER_ATTACH(&ip_rx_mowner); #endif /* MBUFTRACE */ ipstat_percpu = percpu_alloc(sizeof(uint64_t) * IP_NSTATS); }
/* * `Attach' the random device. We use the timing of this event as * another potential source of initial entropy. */ void rndattach(int num) { uint32_t c; /* Trap unwary players who don't call rnd_init() early. */ KASSERT(rnd_ready); rnd_temp_buffer_cache = pool_cache_init(RND_TEMP_BUFFER_SIZE, 0, 0, 0, "rndtemp", NULL, IPL_NONE, NULL, NULL, NULL); rnd_ctx_cache = pool_cache_init(sizeof(struct rnd_ctx), 0, 0, 0, "rndctx", NULL, IPL_NONE, NULL, NULL, NULL); percpu_urandom_cprng = percpu_alloc(sizeof(struct cprng_strong *)); /* Mix in another counter. */ c = rndpseudo_counter(); mutex_spin_enter(&rndpool_mtx); rndpool_add_data(&rnd_pool, &c, sizeof(c), 1); mutex_spin_exit(&rndpool_mtx); }
void pim6_init(void) { pim6stat_percpu = percpu_alloc(sizeof(uint64_t) * PIM6_NSTATS); }
void pic_add(struct pic_softc *pic, int irqbase) { int slot, maybe_slot = -1; KASSERT(strlen(pic->pic_name) > 0); for (slot = 0; slot < PIC_MAXPICS; slot++) { struct pic_softc * const xpic = pic_list[slot]; if (xpic == NULL) { if (maybe_slot < 0) maybe_slot = slot; if (irqbase < 0) break; continue; } if (irqbase < 0 || xpic->pic_irqbase < 0) continue; if (irqbase >= xpic->pic_irqbase + xpic->pic_maxsources) continue; if (irqbase + pic->pic_maxsources <= xpic->pic_irqbase) continue; panic("pic_add: pic %s (%zu sources @ irq %u) conflicts" " with pic %s (%zu sources @ irq %u)", pic->pic_name, pic->pic_maxsources, irqbase, xpic->pic_name, xpic->pic_maxsources, xpic->pic_irqbase); } slot = maybe_slot; #if 0 printf("%s: pic_sourcebase=%zu pic_maxsources=%zu\n", pic->pic_name, pic_sourcebase, pic->pic_maxsources); #endif KASSERTMSG(pic->pic_maxsources <= PIC_MAXSOURCES, "%zu", pic->pic_maxsources); KASSERT(pic_sourcebase + pic->pic_maxsources <= PIC_MAXMAXSOURCES); /* * Allocate a pointer to each cpu's evcnts and then, for each cpu, * allocate its evcnts and then attach an evcnt for each pin. * We can't allocate the evcnt structures directly since * percpu will move the contents of percpu memory around and * corrupt the pointers in the evcnts themselves. Remember, any * problem can be solved with sufficient indirection. */ pic->pic_percpu = percpu_alloc(sizeof(struct pic_percpu)); KASSERT(pic->pic_percpu != NULL); /* * Now allocate the per-cpu evcnts. */ percpu_foreach(pic->pic_percpu, pic_percpu_allocate, pic); pic->pic_sources = &pic_sources[pic_sourcebase]; pic->pic_irqbase = irqbase; pic_sourcebase += pic->pic_maxsources; pic->pic_id = slot; #ifdef __HAVE_PIC_SET_PRIORITY KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL)); #endif #ifdef MULTIPROCESSOR KASSERT((slot == 0) == (pic->pic_ops->pic_ipi_send != NULL)); #endif pic_list[slot] = pic; }