/* * Called by nd6_init() during initialization time. */ void nd6_prproxy_init(void) { ndprl_size = sizeof (struct nd6_prproxy_prelist); ndprl_zone = zinit(ndprl_size, NDPRL_ZONE_MAX * ndprl_size, 0, NDPRL_ZONE_NAME); if (ndprl_zone == NULL) panic("%s: failed allocating ndprl_zone", __func__); zone_change(ndprl_zone, Z_EXPAND, TRUE); zone_change(ndprl_zone, Z_CALLERACCT, FALSE); solsrc_size = sizeof (struct nd6_prproxy_solsrc); solsrc_zone = zinit(solsrc_size, SOLSRC_ZONE_MAX * solsrc_size, 0, SOLSRC_ZONE_NAME); if (solsrc_zone == NULL) panic("%s: failed allocating solsrc_zone", __func__); zone_change(solsrc_zone, Z_EXPAND, TRUE); zone_change(solsrc_zone, Z_CALLERACCT, FALSE); soltgt_size = sizeof (struct nd6_prproxy_soltgt); soltgt_zone = zinit(soltgt_size, SOLTGT_ZONE_MAX * soltgt_size, 0, SOLTGT_ZONE_NAME); if (soltgt_zone == NULL) panic("%s: failed allocating soltgt_zone", __func__); zone_change(soltgt_zone, Z_EXPAND, TRUE); zone_change(soltgt_zone, Z_CALLERACCT, FALSE); }
/* initial setup done at time of sysinit */ void pipeinit(void) { nbigpipe=0; vm_size_t zone_size; zone_size = 8192 * sizeof(struct pipe); pipe_zone = zinit(sizeof(struct pipe), zone_size, 4096, "pipe zone"); /* allocate lock group attribute and group for pipe mutexes */ pipe_mtx_grp_attr = lck_grp_attr_alloc_init(); pipe_mtx_grp = lck_grp_alloc_init("pipe", pipe_mtx_grp_attr); /* allocate the lock attribute for pipe mutexes */ pipe_mtx_attr = lck_attr_alloc_init(); /* * Set up garbage collection for dead pipes */ zone_size = (PIPE_GARBAGE_QUEUE_LIMIT + 20) * sizeof(struct pipe_garbage); pipe_garbage_zone = (zone_t)zinit(sizeof(struct pipe_garbage), zone_size, 4096, "pipe garbage zone"); pipe_garbage_lock = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr); }
void ipc_bootstrap(void) { kern_return_t kr; ipc_port_multiple_lock_init(); ipc_port_timestamp_lock_init(); ipc_port_timestamp_data = 0; ipc_space_zone = zinit(sizeof(struct ipc_space), 0, ipc_space_max * sizeof(struct ipc_space), sizeof(struct ipc_space), IPC_ZONE_TYPE, "ipc spaces"); ipc_tree_entry_zone = zinit(sizeof(struct ipc_tree_entry), 0, ipc_tree_entry_max * sizeof(struct ipc_tree_entry), sizeof(struct ipc_tree_entry), IPC_ZONE_TYPE, "ipc tree entries"); ipc_object_zones[IOT_PORT] = zinit(sizeof(struct ipc_port), 0, ipc_port_max * sizeof(struct ipc_port), sizeof(struct ipc_port), 0, "ipc ports"); ipc_object_zones[IOT_PORT_SET] = zinit(sizeof(struct ipc_pset), 0, ipc_pset_max * sizeof(struct ipc_pset), sizeof(struct ipc_pset), IPC_ZONE_TYPE, "ipc port sets"); /* create special spaces */ kr = ipc_space_create_special(&ipc_space_kernel); assert(kr == KERN_SUCCESS); kr = ipc_space_create_special(&ipc_space_reply); assert(kr == KERN_SUCCESS); /* initialize modules with hidden data structures */ ipc_table_init(); ipc_notify_init(); ipc_hash_init(); ipc_marequest_init(); }
void os_reason_init() { int reasons_allocated = 0; /* * Initialize OS reason group and lock attributes */ os_reason_lock_grp_attr = lck_grp_attr_alloc_init(); os_reason_lock_grp = lck_grp_alloc_init("os_reason_lock", os_reason_lock_grp_attr); os_reason_lock_attr = lck_attr_alloc_init(); /* * Create OS reason zone. */ os_reason_zone = zinit(sizeof(struct os_reason), OS_REASON_MAX_COUNT * sizeof(struct os_reason), OS_REASON_MAX_COUNT, "os reasons"); if (os_reason_zone == NULL) { panic("failed to initialize os_reason_zone"); } /* * We pre-fill the OS reason zone to reduce the likelihood that * the jetsam thread and others block when they create an exit * reason. This pre-filled memory is not-collectable since it's * foreign memory crammed in as part of zfill(). */ reasons_allocated = zfill(os_reason_zone, OS_REASON_RESERVE_COUNT); assert(reasons_allocated > 0); }
void svm_change_init(void) { svm_change_zone = zinit(sizeof(struct change), (vm_size_t) (512*1024), sizeof(struct change), "svm.change"); }
void flowadv_init(void) { STAILQ_INIT(&fadv_list); /* Setup lock group and attribute for fadv_lock */ fadv_lock_grp_attr = lck_grp_attr_alloc_init(); fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr); lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL); fadv_zone_size = P2ROUNDUP(sizeof (struct flowadv_fcentry), sizeof (u_int64_t)); fadv_zone = zinit(fadv_zone_size, FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME); if (fadv_zone == NULL) { panic("%s: failed allocating %s", __func__, FADV_ZONE_NAME); /* NOTREACHED */ } zone_change(fadv_zone, Z_EXPAND, TRUE); zone_change(fadv_zone, Z_CALLERACCT, FALSE); if (kernel_thread_start(flowadv_thread_func, NULL, &fadv_thread) != KERN_SUCCESS) { panic("%s: couldn't create flow event advisory thread", __func__); /* NOTREACHED */ } thread_deallocate(fadv_thread); }
static FT_Error ft_lzw_file_reset( FT_LZWFile zip ) { FT_Stream stream = zip->source; FT_Error error; if ( !FT_STREAM_SEEK( zip->start ) ) { s_zstate_t* zstream = &zip->zstream; zinit( zstream ); zstream->avail_in = 0; zstream->next_in = zip->input; zstream->total_in = 0; zstream->avail_out = 0; zstream->next_out = zip->buffer; zstream->total_out = 0; zstream->zs_in_count = zip->source->size - 2; zip->limit = zip->buffer + FT_LZW_BUFFER_SIZE; zip->cursor = zip->limit; zip->pos = 0; } return error; }
/* * Initialize FP handling. */ void fpu_module_init(void) { ifps_zone = zinit(sizeof(struct i386_fpsave_state), THREAD_MAX * sizeof(struct i386_fpsave_state), THREAD_CHUNK * sizeof(struct i386_fpsave_state), "i386 fpsave state"); }
/* * ROUTINE: semaphore_init [private] * * Initialize the semaphore mechanisms. * Right now, we only need to initialize the semaphore zone. */ void semaphore_init(void) { semaphore_zone = zinit(sizeof(struct semaphore), semaphore_max * sizeof(struct semaphore), sizeof(struct semaphore), "semaphores"); }
/* * ROUTINE: semaphore_init [private] * * Initialize the semaphore mechanisms. * Right now, we only need to initialize the semaphore zone. */ void semaphore_init(void) { semaphore_zone = zinit(sizeof(struct semaphore), semaphore_max * sizeof(struct semaphore), sizeof(struct semaphore), "semaphores"); zone_change(semaphore_zone, Z_NOENCRYPT, TRUE); }
void mk_timer_init(void) { int s = sizeof (mk_timer_data_t); assert(!(mk_timer_zone != NULL)); mk_timer_zone = zinit(s, (4096 * s), (16 * s), "mk_timer"); }
/* * Initialize the Audit subsystem: configuration state, work queue, * synchronization primitives, worker thread, and trigger device node. Also * call into the BSM assembly code to initialize it. */ void audit_init(void) { audit_enabled = 0; audit_syscalls = 0; audit_kevent_mask = 0; audit_suspended = 0; audit_panic_on_write_fail = 0; audit_fail_stop = 0; audit_in_failure = 0; audit_argv = 0; audit_arge = 0; audit_fstat.af_filesz = 0; /* '0' means unset, unbounded. */ audit_fstat.af_currsz = 0; audit_nae_mask.am_success = 0; audit_nae_mask.am_failure = 0; TAILQ_INIT(&audit_q); audit_q_len = 0; audit_pre_q_len = 0; audit_qctrl.aq_hiwater = AQ_HIWATER; audit_qctrl.aq_lowater = AQ_LOWATER; audit_qctrl.aq_bufsz = AQ_BUFSZ; audit_qctrl.aq_minfree = AU_FS_MINFREE; audit_kinfo.ai_termid.at_type = AU_IPv4; audit_kinfo.ai_termid.at_addr[0] = INADDR_ANY; _audit_lck_grp_init(); mtx_init(&audit_mtx, "audit_mtx", NULL, MTX_DEF); KINFO_LOCK_INIT(); cv_init(&audit_worker_cv, "audit_worker_cv"); cv_init(&audit_drain_cv, "audit_drain_cv"); cv_init(&audit_watermark_cv, "audit_watermark_cv"); cv_init(&audit_fail_cv, "audit_fail_cv"); audit_record_zone = zinit(sizeof(struct kaudit_record), AQ_HIWATER*sizeof(struct kaudit_record), 8192, "audit_zone"); #if CONFIG_MACF audit_mac_init(); #endif /* Init audit session subsystem. */ audit_session_init(); /* Initialize the BSM audit subsystem. */ kau_init(); /* audit_trigger_init(); */ /* Start audit worker thread. */ (void) audit_pipe_init(); /* Start audit worker thread. */ audit_worker_init(); }
/* * thread_call_initialize: * * Initialize this module, called * early during system initialization. */ void thread_call_initialize(void) { thread_call_t call; thread_call_group_t group = &thread_call_group0; kern_return_t result; thread_t thread; int i; spl_t s; i = sizeof (thread_call_data_t); thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); zone_change(thread_call_zone, Z_CALLERACCT, FALSE); zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); lck_attr_setdefault(&thread_call_lck_attr); lck_grp_attr_setdefault(&thread_call_lck_grp_attr); lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); #if defined(__i386__) || defined(__x86_64__) lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #else lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #endif queue_init(&group->pending_queue); queue_init(&group->delayed_queue); s = splsched(); thread_call_lock_spin(); timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group); wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO); wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO); queue_init(&thread_call_internal_queue); for ( call = internal_call_storage; call < &internal_call_storage[internal_call_count]; call++) { enqueue_tail(&thread_call_internal_queue, qe(call)); } thread_call_daemon_awake = TRUE; thread_call_unlock(); splx(s); result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread); if (result != KERN_SUCCESS) panic("thread_call_initialize"); thread_deallocate(thread); }
void mac_labelzone_init(void) { zone_label = zinit(sizeof(struct label), 8192 * sizeof(struct label), sizeof(struct label), "MAC Labels"); zone_change(zone_label, Z_EXPAND, TRUE); zone_change(zone_label, Z_EXHAUST, FALSE); }
/*ARGSUSED*/ int smbfs_init(struct vfsconf *vfsp) { #ifdef SMBFS_USEZONE smbfsmount_zone = zinit("SMBFSMOUNT", sizeof(struct smbmount), 0, 0, 1); #endif smbfs_pbuf_freecnt = nswbuf / 2 + 1; SMBVDEBUG("done.\n"); return 0; }
void udp_init() { LIST_INIT(&udb); udbinfo.listhead = &udb; udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask); udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.porthashmask); udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets, ZONE_INTERRUPT, 0); }
static void uthread_zone_init(void) { if (!uthread_zone_inited) { uthread_zone = zinit(sizeof(struct uthread), thread_max * sizeof(struct uthread), THREAD_CHUNK * sizeof(struct uthread), "uthreads"); uthread_zone_inited = 1; } }
/* * thread_call_initialize: * * Initialize this module, called * early during system initialization. */ void thread_call_initialize(void) { thread_call_t call; kern_return_t result; thread_t thread; int i; i = sizeof (thread_call_data_t); thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); zone_change(thread_call_zone, Z_CALLERACCT, FALSE); zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); lck_attr_setdefault(&thread_call_lck_attr); lck_grp_attr_setdefault(&thread_call_lck_grp_attr); lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); #if defined(__i386__) || defined(__x86_64__) lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #else lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #endif nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE); disable_ints_and_lock(); queue_init(&thread_call_internal_queue); for ( call = internal_call_storage; call < &internal_call_storage[INTERNAL_CALL_COUNT]; call++) { enqueue_tail(&thread_call_internal_queue, qe(call)); } thread_call_daemon_awake = TRUE; enable_ints_and_unlock(); result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread); if (result != KERN_SUCCESS) panic("thread_call_initialize"); thread_deallocate(thread); }
void wait_queue_bootstrap(void) { wait_queues_init(); _wait_queue_zone = zinit(sizeof(struct wait_queue), WAIT_QUEUE_MAX * sizeof(struct wait_queue), sizeof(struct wait_queue), "wait queues"); zone_change(_wait_queue_zone, Z_NOENCRYPT, TRUE); _wait_queue_set_zone = zinit(sizeof(struct wait_queue_set), WAIT_QUEUE_SET_MAX * sizeof(struct wait_queue_set), sizeof(struct wait_queue_set), "wait queue sets"); zone_change(_wait_queue_set_zone, Z_NOENCRYPT, TRUE); _wait_queue_link_zone = zinit(sizeof(struct _wait_queue_link), WAIT_QUEUE_LINK_MAX * sizeof(struct _wait_queue_link), sizeof(struct _wait_queue_link), "wait queue links"); zone_change(_wait_queue_link_zone, Z_NOENCRYPT, TRUE); }
void vnode_pager_bootstrap(void) { register vm_size_t size; size = (vm_size_t) sizeof(struct vnode_pager); vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size, PAGE_SIZE, "vnode pager structures"); #ifdef __i386__ apple_protect_pager_bootstrap(); #endif /* __i386__ */ return; }
static void uthread_zone_init(void) { if (!uthread_zone_inited) { uthread_zone = zinit(sizeof(struct uthread), thread_max * sizeof(struct uthread), THREAD_CHUNK * sizeof(struct uthread), "uthreads"); uthread_zone_inited = 1; zone_change(uthread_zone, Z_NOENCRYPT, TRUE); } }
/* `fopen()' wrapper. */ FILE *zfile_fopen(const char *name, const char *mode) { char *tmp_name; FILE *stream; enum compression_type type; int write_mode = 0; if (!zinit_done) { zinit(); } if (name == NULL || name[0] == 0) { return NULL; } /* Do we want to write to this file? */ if ((strchr(mode, 'w') != NULL) || (strchr(mode, '+') != NULL)) { write_mode = 1; } /* Check for write permissions. */ if (write_mode && ioutil_access(name, IOUTIL_ACCESS_W_OK) < 0) { return NULL; } type = try_uncompress(name, &tmp_name, write_mode); if (type == COMPR_NONE) { stream = fopen(name, mode); if (stream == NULL) { return NULL; } zfile_list_add(NULL, name, type, write_mode, stream, NULL); return stream; } else if (*tmp_name == '\0') { errno = EACCES; return NULL; } /* Open the uncompressed version of the file. */ stream = fopen(tmp_name, mode); if (stream == NULL) { return NULL; } zfile_list_add(tmp_name, name, type, write_mode, stream, NULL); /* now we don't need the archdep_tmpnam allocation any more */ lib_free(tmp_name); return stream; }
/* * Initialize the kernel memory allocator */ void kmeminit(void) { struct kmzones *kmz; if ((sizeof(kmzones)/sizeof(kmzones[0])) != (sizeof(memname)/sizeof(memname[0]))) { panic("kmeminit: kmzones has %lu elements but memname has %lu\n", (sizeof(kmzones)/sizeof(kmzones[0])), (sizeof(memname)/sizeof(memname[0]))); } kmz = kmzones; while (kmz < &kmzones[M_LAST]) { /* XXX */ if (kmz->kz_elemsize == (size_t)(-1)) ; else /* XXX */ if (kmz->kz_zalloczone == KMZ_CREATEZONE || kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT) { kmz->kz_zalloczone = zinit(kmz->kz_elemsize, 1024 * 1024, PAGE_SIZE, memname[kmz - kmzones]); zone_change(kmz->kz_zalloczone, Z_CALLERACCT, (kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT)); if (kmz->kz_noencrypt == TRUE) zone_change(kmz->kz_zalloczone, Z_NOENCRYPT, TRUE); } else if (kmz->kz_zalloczone == KMZ_LOOKUPZONE) kmz->kz_zalloczone = kalloc_zone(kmz->kz_elemsize); kmz++; } kmz = kmzones; while (kmz < &kmzones[M_LAST]) { /* XXX */ if (kmz->kz_elemsize == (size_t)(-1)) ; else /* XXX */ if (kmz->kz_zalloczone == KMZ_SHAREZONE) { kmz->kz_zalloczone = kmzones[kmz->kz_elemsize].kz_zalloczone; kmz->kz_elemsize = kmzones[kmz->kz_elemsize].kz_elemsize; } kmz++; } }
void rtalloc_init( void) { kern_return_t retval; vm_offset_t min, addr; vm_size_t size; register int i; retval = kmem_suballoc(kernel_map, &min, rtalloc_map_size, FALSE, TRUE, &rtalloc_map); if (retval != KERN_SUCCESS) panic("rtalloc_init: kmem_suballoc failed"); /* * Ensure that zones up to size 8192 bytes exist. * This is desirable because messages are allocated * with rtalloc, and messages up through size 8192 are common. */ rtalloc_max = 16 * 1024; rtalloc_max_prerounded = rtalloc_max / 2 + 1; /* * Allocate a zone for each size we are going to handle. * We specify non-paged memory. Make zone exhaustible. */ for (i = 0, size = 1; size < rtalloc_max; i++, size <<= 1) { if (size < RTALLOC_MINSIZE) { rt_zone[i] = 0; continue; } if (size == RTALLOC_MINSIZE) { first_rt_zone = i; } rt_zone[i] = zinit(size, rt_zone_max[i] * size, size, rt_zone_name[i]); zone_change(rt_zone[i], Z_EXHAUST, TRUE); zone_change(rt_zone[i], Z_COLLECT, FALSE); zone_change(rt_zone[i], Z_EXPAND, FALSE); /* * Get space from the zone_map. Since these zones are * not collectable, no pages containing elements from these * zones will ever be reclaimed by the garbage collection * scheme below. */ zprealloc(rt_zone[i], rt_zone_max[i] * size); } }
void host_notify_init(void) { int i; for (i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++) queue_init(&host_notify_queue[i]); mutex_init(&host_notify_lock, ETAP_MISC_EVENT); i = sizeof (struct host_notify_entry); host_notify_zone = zinit(i, (4096 * i), (16 * i), "host_notify"); }
void priq_init(void) { priq_size = sizeof (struct priq_if); priq_zone = zinit(priq_size, PRIQ_ZONE_MAX * priq_size, 0, PRIQ_ZONE_NAME); if (priq_zone == NULL) { panic("%s: failed allocating %s", __func__, PRIQ_ZONE_NAME); /* NOTREACHED */ } zone_change(priq_zone, Z_EXPAND, TRUE); zone_change(priq_zone, Z_CALLERACCT, TRUE); priq_cl_size = sizeof (struct priq_class); priq_cl_zone = zinit(priq_cl_size, PRIQ_CL_ZONE_MAX * priq_cl_size, 0, PRIQ_CL_ZONE_NAME); if (priq_cl_zone == NULL) { panic("%s: failed allocating %s", __func__, PRIQ_CL_ZONE_NAME); /* NOTREACHED */ } zone_change(priq_cl_zone, Z_EXPAND, TRUE); zone_change(priq_cl_zone, Z_CALLERACCT, TRUE); }
void qfq_init(void) { qfq_size = sizeof (struct qfq_if); qfq_zone = zinit(qfq_size, QFQ_ZONE_MAX * qfq_size, 0, QFQ_ZONE_NAME); if (qfq_zone == NULL) { panic("%s: failed allocating %s", __func__, QFQ_ZONE_NAME); /* NOTREACHED */ } zone_change(qfq_zone, Z_EXPAND, TRUE); zone_change(qfq_zone, Z_CALLERACCT, TRUE); qfq_cl_size = sizeof (struct qfq_class); qfq_cl_zone = zinit(qfq_cl_size, QFQ_CL_ZONE_MAX * qfq_cl_size, 0, QFQ_CL_ZONE_NAME); if (qfq_cl_zone == NULL) { panic("%s: failed allocating %s", __func__, QFQ_CL_ZONE_NAME); /* NOTREACHED */ } zone_change(qfq_cl_zone, Z_EXPAND, TRUE); zone_change(qfq_cl_zone, Z_CALLERACCT, TRUE); }
void fairq_init(void) { fairq_size = sizeof (struct fairq_if); fairq_zone = zinit(fairq_size, FAIRQ_ZONE_MAX * fairq_size, 0, FAIRQ_ZONE_NAME); if (fairq_zone == NULL) { panic("%s: failed allocating %s", __func__, FAIRQ_ZONE_NAME); /* NOTREACHED */ } zone_change(fairq_zone, Z_EXPAND, TRUE); zone_change(fairq_zone, Z_CALLERACCT, TRUE); fairq_cl_size = sizeof (struct fairq_class); fairq_cl_zone = zinit(fairq_cl_size, FAIRQ_CL_ZONE_MAX * fairq_cl_size, 0, FAIRQ_CL_ZONE_NAME); if (fairq_cl_zone == NULL) { panic("%s: failed allocating %s", __func__, FAIRQ_CL_ZONE_NAME); /* NOTREACHED */ } zone_change(fairq_cl_zone, Z_EXPAND, TRUE); zone_change(fairq_cl_zone, Z_CALLERACCT, TRUE); }
void tcq_init(void) { tcq_size = sizeof (struct tcq_if); tcq_zone = zinit(tcq_size, TCQ_ZONE_MAX * tcq_size, 0, TCQ_ZONE_NAME); if (tcq_zone == NULL) { panic("%s: failed allocating %s", __func__, TCQ_ZONE_NAME); /* NOTREACHED */ } zone_change(tcq_zone, Z_EXPAND, TRUE); zone_change(tcq_zone, Z_CALLERACCT, TRUE); tcq_cl_size = sizeof (struct tcq_class); tcq_cl_zone = zinit(tcq_cl_size, TCQ_CL_ZONE_MAX * tcq_cl_size, 0, TCQ_CL_ZONE_NAME); if (tcq_cl_zone == NULL) { panic("%s: failed allocating %s", __func__, TCQ_CL_ZONE_NAME); /* NOTREACHED */ } zone_change(tcq_cl_zone, Z_EXPAND, TRUE); zone_change(tcq_cl_zone, Z_CALLERACCT, TRUE); }
void sfb_init(void) { _CASSERT(SFBF_ECN4 == CLASSQF_ECN4); _CASSERT(SFBF_ECN6 == CLASSQF_ECN6); sfb_size = sizeof (struct sfb); sfb_zone = zinit(sfb_size, SFB_ZONE_MAX * sfb_size, 0, SFB_ZONE_NAME); if (sfb_zone == NULL) { panic("%s: failed allocating %s", __func__, SFB_ZONE_NAME); /* NOTREACHED */ } zone_change(sfb_zone, Z_EXPAND, TRUE); zone_change(sfb_zone, Z_CALLERACCT, TRUE); sfb_bins_size = sizeof (*((struct sfb *)0)->sfb_bins); sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size, 0, SFB_BINS_ZONE_NAME); if (sfb_bins_zone == NULL) { panic("%s: failed allocating %s", __func__, SFB_BINS_ZONE_NAME); /* NOTREACHED */ } zone_change(sfb_bins_zone, Z_EXPAND, TRUE); zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE); sfb_fcl_size = sizeof (*((struct sfb *)0)->sfb_fc_lists); sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size, 0, SFB_FCL_ZONE_NAME); if (sfb_fcl_zone == NULL) { panic("%s: failed allocating %s", __func__, SFB_FCL_ZONE_NAME); /* NOTREACHED */ } zone_change(sfb_fcl_zone, Z_EXPAND, TRUE); zone_change(sfb_fcl_zone, Z_CALLERACCT, TRUE); }