/** * Purge all entries from a queue. */ int rd_kafka_q_purge0 (rd_kafka_q_t *rkq, int do_lock) { rd_kafka_op_t *rko, *next; TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); int cnt = 0; if (do_lock) mtx_lock(&rkq->rkq_lock); if (rkq->rkq_fwdq) { cnt = rd_kafka_q_purge(rkq->rkq_fwdq); if (do_lock) mtx_unlock(&rkq->rkq_lock); return cnt; } /* Move ops queue to tmpq to avoid lock-order issue * by locks taken from rd_kafka_op_destroy(). */ TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link); /* Zero out queue */ rd_kafka_q_reset(rkq); if (do_lock) mtx_unlock(&rkq->rkq_lock); /* Destroy the ops */ next = TAILQ_FIRST(&tmpq); while ((rko = next)) { next = TAILQ_NEXT(next, rko_link); rd_kafka_op_destroy(rko); cnt++; } return cnt; }
const struct t_backendQ * t_all_backends(void) { static int initialized = 0; static struct t_backendQ bQ = TAILQ_HEAD_INITIALIZER(bQ); if (!initialized) { /* add each available backend (order matter) */ /* FLAC files support using libflac */ if (t_ftflac_backend != NULL) TAILQ_INSERT_TAIL(&bQ, t_ftflac_backend(), entries); /* Ogg/Vorbis files support using libogg/libvorbis */ if (t_ftoggvorbis_backend != NULL) TAILQ_INSERT_TAIL(&bQ, t_ftoggvorbis_backend(), entries); /* Multiple files types support using TagLib */ if (t_fttaglib_backend != NULL) TAILQ_INSERT_TAIL(&bQ, t_fttaglib_backend(), entries); /* mp3 ID3v1.1 files types support */ if (t_ftid3v1_backend != NULL) TAILQ_INSERT_TAIL(&bQ, t_ftid3v1_backend(), entries); initialized = 1; } return (&bQ); }
static inline int futex_wake(int *uaddr, int count) { struct futex_element *e,*n = NULL; struct futex_queue q = TAILQ_HEAD_INITIALIZER(q); // Atomically grab all relevant futex blockers // from the global futex queue mcs_pdr_lock(&__futex.lock); e = TAILQ_FIRST(&__futex.queue); while(e != NULL) { if(count > 0) { n = TAILQ_NEXT(e, link); if(e->uaddr == uaddr) { TAILQ_REMOVE(&__futex.queue, e, link); TAILQ_INSERT_TAIL(&q, e, link); count--; } e = n; } else break; } mcs_pdr_unlock(&__futex.lock); // Unblock them outside the lock e = TAILQ_FIRST(&q); while(e != NULL) { n = TAILQ_NEXT(e, link); TAILQ_REMOVE(&q, e, link); while(e->pthread == NULL) cpu_relax(); uthread_runnable((struct uthread*)e->pthread); e = n; } return 0; }
void __free_page(struct page *page) { struct pglist pglist = TAILQ_HEAD_INITIALIZER(pglist); TAILQ_INSERT_TAIL(&pglist, &page->p_vmp, pageq.queue); uvm_pglistfree(&pglist); }
static void *timer_thread(void *arg) { struct futex_element *e,*n = NULL; struct futex_queue q = TAILQ_HEAD_INITIALIZER(q); // Do this forever... for(;;) { // Block for 1 millisecond sys_block(1000); // Then atomically do the following... mcs_pdr_lock(&__futex.lock); // Up the time __futex.time++; // Find all futexes that have timed out on this iteration, // and count those still waiting int waiting = 0; e = TAILQ_FIRST(&__futex.queue); while(e != NULL) { n = TAILQ_NEXT(e, link); if(e->ms_timeout == __futex.time) { e->timedout = true; TAILQ_REMOVE(&__futex.queue, e, link); TAILQ_INSERT_TAIL(&q, e, link); } else if(e->ms_timeout != (uint64_t)-1) waiting++; e = n; } // If there are no more waiting, disable the timer if(waiting == 0) { __futex.time = 0; __futex.timer_enabled = false; } mcs_pdr_unlock(&__futex.lock); // Unblock any futexes that have timed out outside the lock e = TAILQ_FIRST(&q); while(e != NULL) { n = TAILQ_NEXT(e, link); TAILQ_REMOVE(&q, e, link); while(e->pthread == NULL) cpu_relax(); uthread_runnable((struct uthread*)e->pthread); e = n; } // If we have disabled the timer, park this thread futex_wait(&__futex.timer_enabled, false, -1); } }
/* * Non-nsswitch.conf version with hard-coded order. */ struct sudo_nss_list * sudo_read_nss(void) { static struct sudo_nss_list snl = TAILQ_HEAD_INITIALIZER(snl); debug_decl(sudo_read_nss, SUDO_DEBUG_NSS) # ifdef HAVE_SSSD TAILQ_INSERT_TAIL(&snl, &sudo_nss_sss, entries); # endif # ifdef HAVE_LDAP TAILQ_INSERT_TAIL(&snl, &sudo_nss_ldap, entries); # endif TAILQ_INSERT_TAIL(&snl, &sudo_nss_file, entries); debug_return_ptr(&snl); }
static int sudoers_policy_open(unsigned int version, sudo_conv_t conversation, sudo_printf_t plugin_printf, char * const settings[], char * const user_info[], char * const envp[], char * const args[]) { struct sudo_conf_debug_file_list debug_files = TAILQ_HEAD_INITIALIZER(debug_files); struct sudoers_policy_open_info info; const char *plugin_path = NULL; char * const *cur; debug_decl(sudoers_policy_open, SUDOERS_DEBUG_PLUGIN) sudo_version = version; sudo_conv = conversation; sudo_printf = plugin_printf; /* Plugin args are only specified for API version 1.2 and higher. */ if (sudo_version < SUDO_API_MKVERSION(1, 2)) args = NULL; /* Initialize the debug subsystem. */ for (cur = settings; *cur != NULL; cur++) { if (strncmp(*cur, "debug_flags=", sizeof("debug_flags=") - 1) == 0) { sudoers_debug_parse_flags(&debug_files, *cur + sizeof("debug_flags=") - 1); continue; } if (strncmp(*cur, "plugin_path=", sizeof("plugin_path=") - 1) == 0) { plugin_path = *cur + sizeof("plugin_path=") - 1; continue; } } sudoers_debug_register(plugin_path, &debug_files); /* Call the sudoers init function. */ info.settings = settings; info.user_info = user_info; info.plugin_args = args; debug_return_bool(sudoers_policy_init(&info, envp)); }
/** * Purge all entries from a queue with a rktp version smaller than `version` * This shaves off the head of the queue, up until the first rko with * a non-matching rktp or version. */ void rd_kafka_q_purge_toppar_version (rd_kafka_q_t *rkq, rd_kafka_toppar_t *rktp, int version) { rd_kafka_op_t *rko, *next; TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq); int32_t cnt = 0; int64_t size = 0; mtx_lock(&rkq->rkq_lock); if (rkq->rkq_fwdq) { rd_kafka_q_purge_toppar_version(rkq->rkq_fwdq, rktp, version); mtx_unlock(&rkq->rkq_lock); return; } /* Move ops to temporary queue and then destroy them from there * without locks to avoid lock-ordering problems in op_destroy() */ while ((rko = TAILQ_FIRST(&rkq->rkq_q)) && rko->rko_rktp && rd_kafka_toppar_s2i(rko->rko_rktp) == rktp && rko->rko_version < version) { TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link); TAILQ_INSERT_TAIL(&tmpq, rko, rko_link); cnt++; size += rko->rko_len; } rkq->rkq_qlen -= cnt; rkq->rkq_qsize -= size; mtx_unlock(&rkq->rkq_lock); next = TAILQ_FIRST(&tmpq); while ((rko = next)) { next = TAILQ_NEXT(next, rko_link); rd_kafka_op_destroy(rko); } }
/* Allow the application to print its usage message too if set */ static rte_usage_hook_t rte_application_usage_hook = NULL; TAILQ_HEAD(shared_driver_list, shared_driver); /* Definition for shared object drivers. */ struct shared_driver { TAILQ_ENTRY(shared_driver) next; char name[PATH_MAX]; void* lib_handle; }; /* List of external loadable drivers */ static struct shared_driver_list solib_list = TAILQ_HEAD_INITIALIZER(solib_list); /* early configuration structure, when memory config is not mmapped */ static struct rte_mem_config early_mem_config; /* define fd variable here, because file needs to be kept open for the * duration of the program, as we hold a write lock on it in the primary proc */ static int mem_cfg_fd = -1; static struct flock wr_lock = { .l_type = F_WRLCK, .l_whence = SEEK_SET, .l_start = offsetof(struct rte_mem_config, memseg), .l_len = sizeof(early_mem_config.memseg), };
#include <infiniband/driver.h> #include "usnic_direct.h" #include "usd.h" #include "usd_ib_sysfs.h" #include "usd_ib_cmd.h" #include "usd_socket.h" #include "usd_device.h" static pthread_once_t usd_init_once = PTHREAD_ONCE_INIT; static struct usd_ib_dev *usd_ib_dev_list; static int usd_init_error; TAILQ_HEAD(,usd_device) usd_device_list = TAILQ_HEAD_INITIALIZER(usd_device_list); /* * Perform one-time initialization */ static void do_usd_init(void) { usd_init_error = usd_ib_get_devlist(&usd_ib_dev_list); } /* * Init routine */ static int usd_init(void)
#define VFS_MOUNTARG_SIZE_MAX (1024 * 64) static int vfs_domount(struct thread *td, const char *fstype, char *fspath, int fsflags, struct vfsoptlist **optlist); static void free_mntarg(struct mntarg *ma); static int usermount = 0; SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "Unprivileged users may mount and unmount file systems"); MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure"); MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); static uma_zone_t mount_zone; /* List of mounted filesystems. */ struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* For any iteration/modification of mountlist */ struct mtx mountlist_mtx; MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF); /* * Global opts, taken by all filesystems */ static const char *global_opts[] = { "errmsg", "fstype", "fspath", "ro", "rw", "nosuid",
#include <sys/cdefs.h> __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> #include <machine/cpufunc.h> #include <machine/fiq.h> #include <vm/vm.h> #include <machine/pcb.h> #include <vm/pmap.h> #include <machine/cpu.h> TAILQ_HEAD(, fiqhandler) fiqhandler_stack = TAILQ_HEAD_INITIALIZER(fiqhandler_stack); extern char fiqvector[]; extern char fiq_nullhandler[], fiq_nullhandler_end[]; #define IRQ_BIT I32_bit #define FIQ_BIT F32_bit /* * fiq_installhandler: * * Actually install the FIQ handler down at the FIQ vector. * * Note: If the FIQ is invoked via an extra layer of * indirection, the actual FIQ code store lives in the * data segment, so there is no need to manipulate
unsigned int tq_nthreads; unsigned int tq_flags; const char *tq_name; struct mutex tq_mtx; TAILQ_HEAD(, task) tq_worklist; }; struct taskq taskq_sys = { TQ_S_CREATED, 0, 1, 0, "systq", MUTEX_INITIALIZER(IPL_HIGH), TAILQ_HEAD_INITIALIZER(taskq_sys.tq_worklist) }; struct taskq taskq_sys_mp = { TQ_S_CREATED, 0, 1, TASKQ_MPSAFE, "systqmp", MUTEX_INITIALIZER(IPL_HIGH), TAILQ_HEAD_INITIALIZER(taskq_sys_mp.tq_worklist) }; typedef int (*sleepfn)(const volatile void *, struct mutex *, int, const char *, int);
struct filemon { TAILQ_ENTRY(filemon) link; /* Link into the in-use list. */ struct mtx mtx; /* Lock mutex for this filemon. */ struct cv cv; /* Lock condition variable for this filemon. */ struct file *fp; /* Output file pointer. */ struct thread *locker; /* Ptr to the thread locking this filemon. */ pid_t pid; /* The process ID being monitored. */ char fname1[MAXPATHLEN]; /* Temporary filename buffer. */ char fname2[MAXPATHLEN]; /* Temporary filename buffer. */ char msgbufr[1024]; /* Output message buffer. */ }; static TAILQ_HEAD(, filemon) filemons_inuse = TAILQ_HEAD_INITIALIZER(filemons_inuse); static TAILQ_HEAD(, filemon) filemons_free = TAILQ_HEAD_INITIALIZER(filemons_free); static int n_readers = 0; static struct mtx access_mtx; static struct cv access_cv; static struct thread *access_owner = NULL; static struct thread *access_requester = NULL; static struct cdev *filemon_dev; #include "filemon_lock.c" #include "filemon_wrapper.c" static void filemon_dtr(void *data) {
do { \ if (t->size != exp) { \ errx(1, "unexpected size for token \"%.*s\" " \ "in \"%s\"", T_PRINTFSTAR(t,data), fun); \ } \ } while (/*CONSTCOND*/0) static char * token2cstr(jsmntok_t *t, char *data) { *(T_STR(t, data) + T_SIZE(t)) = '\0'; return T_STR(t, data); } struct rumprun_execs rumprun_execs = TAILQ_HEAD_INITIALIZER(rumprun_execs); static void makeargv(char *argvstr) { struct rumprun_exec *rre; char **argv; int nargs; rumprun_parseargs(argvstr, &nargs, 0); rre = malloc(sizeof(*rre) + (nargs+1) * sizeof(*argv)); if (rre == NULL) err(1, "could not allocate rre"); rumprun_parseargs(argvstr, &nargs, rre->rre_argv); rre->rre_argv[nargs] = NULL;
void rtflushclone(struct radix_node_head *, struct rtentry *); int rt_if_remove_rtdelete(struct radix_node *, void *); #ifndef SMALL_KERNEL int rt_if_linkstate_change(struct radix_node *, void *); #endif #define LABELID_MAX 50000 struct rt_label { TAILQ_ENTRY(rt_label) rtl_entry; char rtl_name[RTLABEL_LEN]; u_int16_t rtl_id; int rtl_ref; }; TAILQ_HEAD(rt_labels, rt_label) rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels); #ifdef IPSEC struct ifaddr * encap_findgwifa(struct sockaddr *gw) { return (TAILQ_FIRST(&encif[0].sc_if.if_addrlist)); } #endif int rtable_init(struct radix_node_head ***table) { void **p; struct domain *dom;
/** * A notification message to store in the queue of message to go out to the * mgmt component */ typedef struct notification_msg_s { conf_msg_type_e action; ///< action to send subscriber_status_t message; ///< data to send TAILQ_ENTRY(notification_msg_s) entries; ///< ptrs to next/prev } notification_msg_t; /** * List of notification messages to buffer until main thread processes msgs */ static TAILQ_HEAD(notification_buffer_s, notification_msg_s) notification_msgs = TAILQ_HEAD_INITIALIZER(notification_msgs); static pthread_mutex_t msgs_lock; ///< lock for accessing notification_msgs static pconn_client_t * mgmt_client; ///< client cnx to mgmt component static evTimerID mgmt_timer_id; ///< timerID for retrying cnx to mgmt static evContext main_ctx; ///< event context for main thread /*** STATIC/INTERNAL Functions ***/ /** * Fwd declaration of function to setup the connection to the mgmt component */ static void connect_mgmt(evContext ctx, void * uap, struct timespec due, struct timespec inter);
#include "wireless.h" #include "EAPLog.h" struct scanCallbackEntry_s; typedef struct scanCallbackEntry_s scanCallbackEntry, * scanCallbackEntryRef; typedef TAILQ_HEAD(scanCallbackHead_s, scanCallbackEntry_s) scanCallbackHead; #if 0 static __inline not_used() {}; /* this is here so that emacs indent doesn't get confused - what a pain*/ #endif typedef struct scanCallbackHead_s * scanCallbackHeadRef; static scanCallbackHead S_head = TAILQ_HEAD_INITIALIZER(S_head); static scanCallbackHeadRef S_scanCallbackHead_p = &S_head; enum { kScanCallbackStateNone = 0, kScanCallbackStateStarted = 1, kScanCallbackStateComplete = 2 }; struct scanCallbackEntry_s { Apple80211Ref wref; wireless_scan_callback_t func; void * arg; CFStringRef ssid; uint32_t state; TAILQ_ENTRY(scanCallbackEntry_s) link;
/* * Structure to hold a route */ struct natm_route { TAILQ_ENTRY(natm_route) link; struct in_addr host; struct diagif *aif; u_int flags; int llcsnap; u_int vpi, vci; u_int traffic; u_int pcr, scr, mbs, icr, mcr; u_int tbe, nrm, trm, adtf, rif, rdf, cdf; }; static TAILQ_HEAD(, natm_route) natm_route_list = TAILQ_HEAD_INITIALIZER(natm_route_list); static void store_route(struct rt_msghdr *rtm) { u_int i; struct natm_route *r; char *cp; struct sockaddr *sa; struct sockaddr_in *sain; struct sockaddr_dl *sdl; struct diagif *aif; u_int n; r = malloc(sizeof(*r)); if (r == NULL)
/* Hash from SIP Call-id to SDP parser * (in order not to depend upon the transport socket pair as would be the case if SIP parser was a mux) */ struct callid_2_sdp { HASH_ENTRY(callid_2_sdp) entry; // entry in the hash TAILQ_ENTRY(callid_2_sdp) used_entry; // entry in the used list char call_id[SIP_CALLID_LEN+1]; struct parser *sdp_parser; struct timeval last_used; }; // The hash itself static HASH_TABLE(callids_2_sdps, callid_2_sdp) callids_2_sdps; // A list of all entries, in LRU order static TAILQ_HEAD(callids_2_sdps_tq, callid_2_sdp) callids_2_sdps_used = TAILQ_HEAD_INITIALIZER(callids_2_sdps_used); // A mutex to protect both the hash and the tailqueue static struct mutex callids_2_sdps_mutex; static void callid_2_sdp_dtor(struct callid_2_sdp *c2s) { SLOG(LOG_DEBUG, "Destruct callid_2_sdp@%p for callid '%s'", c2s, c2s->call_id); HASH_REMOVE(&callids_2_sdps, c2s, entry); TAILQ_REMOVE(&callids_2_sdps_used, c2s, used_entry); parser_unref(&c2s->sdp_parser); } static void callid_2_sdp_del(struct callid_2_sdp *c2s) { callid_2_sdp_dtor(c2s); objfree(c2s);
#include <stdlib.h> #include <string.h> #include <err.h> #include <sysexits.h> #include <unistd.h> #include <sys/queue.h> #include "bus_autoconf.h" #include "bus_sections.h" #include "bus_usb.h" struct usb_blob; typedef TAILQ_HEAD(,usb_blob) usb_blob_head_t; typedef TAILQ_ENTRY(usb_blob) usb_blob_entry_t; static usb_blob_head_t usb_blob_head = TAILQ_HEAD_INITIALIZER(usb_blob_head); static uint32_t usb_blob_count; struct usb_blob { usb_blob_entry_t entry; struct usb_device_id temp; }; /* * To ensure that the correct USB driver is loaded, the driver having * the most information about the device must be probed first. Then * more generic drivers shall be probed. */ static int usb_compare(const void *_a, const void *_b) {
#endif /* __rtems__ */ static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage"); struct mtx devmtx; static void destroy_devl(struct cdev *dev); #ifndef __rtems__ static int destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg); #endif /* __rtems__ */ static int make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, va_list ap); static struct cdev_priv_list cdevp_free_list = TAILQ_HEAD_INITIALIZER(cdevp_free_list); static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list = SLIST_HEAD_INITIALIZER(cdevsw_gt_post_list); void dev_lock(void) { mtx_lock(&devmtx); } /* * Free all the memory collected while the cdev mutex was * locked. Since devmtx is after the system map mutex, free() cannot * be called immediately and is postponed until cdev mutex can be * dropped.
#include "config.h" #include "finit.h" #include "private.h" #include "helpers.h" #include "plugin.h" #define is_io_plugin(p) ((p)->io.cb && (p)->io.fd > 0) #define SEARCH_PLUGIN(str) \ PLUGIN_ITERATOR(p, tmp) { \ if (!strcmp(p->name, str)) \ return p; \ } static char *plugpath = NULL; /* Set by first load. */ static TAILQ_HEAD(plugin_head, plugin) plugins = TAILQ_HEAD_INITIALIZER(plugins); #ifndef ENABLE_STATIC static void check_plugin_depends(plugin_t *plugin); #endif static char *trim_ext(char *name) { char *ptr; if (name) { ptr = strstr(name, ".so"); if (!ptr) ptr = strstr(name, ".c"); if (ptr)
#include <stdlib.h> #include <string.h> #include <sysexits.h> #include <unistd.h> #include <fcntl.h> #include <net/if.h> /* IFNAMSIZ */ #include <net/pfvar.h> #include <netinet/in.h> /* in_addr */ #include <netinet/ip_fw.h> /* * Map between current altq queue id numbers and names. */ static TAILQ_HEAD(, pf_altq) altq_entries = TAILQ_HEAD_INITIALIZER(altq_entries); void altq_set_enabled(int enabled) { int pffd; pffd = open("/dev/pf", O_RDWR); if (pffd == -1) err(EX_UNAVAILABLE, "altq support opening pf(4) control device"); if (enabled) { if (ioctl(pffd, DIOCSTARTALTQ) != 0 && errno != EEXIST) err(EX_UNAVAILABLE, "enabling altq"); } else { if (ioctl(pffd, DIOCSTOPALTQ) != 0 && errno != ENOENT)
#include <unistd.h> #include <event.h> #include <openssl/ssl.h> #include "relayd.h" struct carpgroup { TAILQ_ENTRY(carpgroup) entry; char *group; int do_demote; int changed_by; }; TAILQ_HEAD(carpgroups, carpgroup) carpgroups = TAILQ_HEAD_INITIALIZER(carpgroups); struct carpgroup *carp_group_find(char *group); int carp_demote_ioctl(char *, int); struct carpgroup * carp_group_find(char *group) { struct carpgroup *c; TAILQ_FOREACH(c, &carpgroups, entry) if (!strcmp(c->group, group)) return (c); return (NULL); }
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: provider.c,v 1.5 2004/01/13 01:54:39 max Exp $ * $FreeBSD$ */ #include <sys/queue.h> #define L2CAP_SOCKET_CHECKED #include <bluetooth.h> #include <string.h> #include <stdlib.h> #include "profile.h" #include "provider.h" static TAILQ_HEAD(, provider) providers = TAILQ_HEAD_INITIALIZER(providers); static uint32_t change_state = 0; static uint32_t handle = 0; /* * Register Service Discovery provider. * Should not be called more the once. */ int32_t provider_register_sd(int32_t fd) { extern profile_t sd_profile_descriptor; extern profile_t bgd_profile_descriptor; provider_p sd = calloc(1, sizeof(*sd));
#include <sys/proc.h> #include <sys/kthread.h> #include <sys/lock.h> #include <sys/mutex.h> #include <sys/errno.h> #include <sys/sbuf.h> #include <geom/geom.h> #include <geom/geom_int.h> #include <machine/stdarg.h> #ifdef DDB #include <ddb/ddb.h> #endif struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; struct g_hh00 { struct g_class *mp; int error; int post; }; /* * This event offers a new class a chance to taste all preexisting providers. */ static void g_load_class(void *arg, int flag) { struct g_hh00 *hh;
* * $FreeBSD: src/sys/kern/subr_eventhandler.c,v 1.3 1999/11/16 16:28:57 phk Exp $ */ #include <sys/param.h> #include <sys/kernel.h> #include <sys/malloc.h> #include <sys/systm.h> #include <sys/eventhandler.h> #include <sys/mplock2.h> MALLOC_DEFINE(M_EVENTHANDLER, "eventhandler", "Event handler records"); /* List of 'slow' lists */ static TAILQ_HEAD(, eventhandler_list) eventhandler_lists = TAILQ_HEAD_INITIALIZER(eventhandler_lists); static struct lwkt_token evlist_token = LWKT_TOKEN_INITIALIZER(evlist_token); struct eventhandler_entry_generic { struct eventhandler_entry ee; void (* func)(void); }; /* * Insertion is O(n) due to the priority scan, but optimises to O(1) * if all priorities are identical. * * MPSAFE */ eventhandler_tag
static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); static struct lwkt_token deadlwp_token = LWKT_TOKEN_INITIALIZER(deadlwp_token); /* * callout list for things to do at exit time */ struct exitlist { exitlist_fn function; TAILQ_ENTRY(exitlist) next; }; TAILQ_HEAD(exit_list_head, exitlist); static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); /* * LWP reaper data */ struct task *deadlwp_task[MAXCPU]; struct lwplist deadlwp_list[MAXCPU]; /* * exit -- * Death of process. * * SYS_EXIT_ARGS(int rval) */ int sys_exit(struct exit_args *uap)
* * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94 */ #include <sys/cdefs.h> __KERNEL_RCSID(0, "$NetBSD: subr_evcnt.c,v 1.12 2014/02/25 18:30:11 pooka Exp $"); #include <sys/param.h> #include <sys/evcnt.h> #include <sys/kmem.h> #include <sys/mutex.h> #include <sys/sysctl.h> #include <sys/systm.h> /* list of all events */ struct evcntlist allevents = TAILQ_HEAD_INITIALIZER(allevents); static kmutex_t evcnt_lock __cacheline_aligned; static bool init_done; static uint32_t evcnt_generation; /* * We need a dummy object to stuff into the evcnt link set to * ensure that there always is at least one object in the set. */ static struct evcnt dummy_static_evcnt; __link_set_add_bss(evcnts, dummy_static_evcnt); /* * Initialize event counters. This does the attach procedure for * each of the static event counters in the "evcnts" link set. */