#include <vm/vm_extern.h> #include <sys/user.h> #include <sys/refcount.h> #include <sys/thread2.h> #include <sys/sysref2.h> #include <sys/mplock2.h> static void reaplwps(void *context, int dummy); static void reaplwp(struct lwp *lp); static void killlwps(struct lwp *lp); static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback"); static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status"); static struct lwkt_token deadlwp_token = LWKT_TOKEN_INITIALIZER(deadlwp_token); /* * callout list for things to do at exit time */ struct exitlist { exitlist_fn function; TAILQ_ENTRY(exitlist) next; }; TAILQ_HEAD(exit_list_head, exitlist); static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list); /* * LWP reaper data */
#include <sys/sysctl.h> #include <sys/malloc.h> #include <sys/jail.h> #include <sys/thread.h> #include <sys/thread2.h> static MALLOC_DEFINE(M_SEM, "sem", "SVID compatible semaphores"); static void seminit (void *); static struct sem_undo *semu_alloc (struct proc *p); static int semundo_adjust (struct proc *p, int semid, int semnum, int adjval); static void semundo_clear (int semid, int semnum); static struct lwkt_token semu_token = LWKT_TOKEN_INITIALIZER(semu_token); static int semtot = 0; static struct semid_pool *sema; /* semaphore id pool */ static TAILQ_HEAD(, sem_undo) semu_list = TAILQ_HEAD_INITIALIZER(semu_list); static struct lock sema_lk; struct sem { u_short semval; /* semaphore value */ pid_t sempid; /* pid of last operation */ u_short semncnt; /* # awaiting semval > cval */ u_short semzcnt; /* # awaiting semval = 0 */ }; /* * Undo structure (one per process) */
* $FreeBSD: src/sys/kern/subr_eventhandler.c,v 1.3 1999/11/16 16:28:57 phk Exp $ */ #include <sys/param.h> #include <sys/kernel.h> #include <sys/malloc.h> #include <sys/systm.h> #include <sys/eventhandler.h> #include <sys/mplock2.h> MALLOC_DEFINE(M_EVENTHANDLER, "eventhandler", "Event handler records"); /* List of 'slow' lists */ static TAILQ_HEAD(, eventhandler_list) eventhandler_lists = TAILQ_HEAD_INITIALIZER(eventhandler_lists); static struct lwkt_token evlist_token = LWKT_TOKEN_INITIALIZER(evlist_token); struct eventhandler_entry_generic { struct eventhandler_entry ee; void (* func)(void); }; /* * Insertion is O(n) due to the priority scan, but optimises to O(1) * if all priorities are identical. * * MPSAFE */ eventhandler_tag eventhandler_register(struct eventhandler_list *list, const char *name,
&IDTVEC(ioapic_intr191) }; #define IOAPIC_HWI_SYSCALL (IDT_OFFSET_SYSCALL - IDT_OFFSET) static struct ioapic_irqmap { int im_type; /* IOAPIC_IMT_ */ enum intr_trigger im_trig; enum intr_polarity im_pola; int im_gsi; int im_msi_base; uint32_t im_flags; /* IOAPIC_IMF_ */ } ioapic_irqmaps[MAXCPU][IOAPIC_HWI_VECTORS]; static struct lwkt_token ioapic_irqmap_tok = LWKT_TOKEN_INITIALIZER(ioapic_irqmap_token); #define IOAPIC_IMT_UNUSED 0 #define IOAPIC_IMT_RESERVED 1 #define IOAPIC_IMT_LEGACY 2 #define IOAPIC_IMT_SYSCALL 3 #define IOAPIC_IMT_MSI 4 #define IOAPIC_IMT_MSIX 5 #define IOAPIC_IMT_ISHWI(map) ((map)->im_type != IOAPIC_IMT_RESERVED && \ (map)->im_type != IOAPIC_IMT_SYSCALL) #define IOAPIC_IMF_CONF 0x1 extern void IOAPIC_INTREN(int); extern void IOAPIC_INTRDIS(int);
#include <sys/sysproto.h> #include <sys/thread.h> #include <sys/uio.h> #include <sys/signalvar.h> #include <sys/filio.h> #include <sys/ktr.h> #include <sys/thread2.h> #include <sys/file2.h> #include <sys/mplock2.h> /* * Global token for kqueue subsystem */ #if 0 struct lwkt_token kq_token = LWKT_TOKEN_INITIALIZER(kq_token); SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions, CTLFLAG_RW, &kq_token.t_collisions, 0, "Collision counter of kq_token"); #endif MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); struct kevent_copyin_args { struct kevent_args *ka; int pchanges; }; static int kqueue_sleep(struct kqueue *kq, struct timespec *tsp); static int kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count, struct knote *marker);
/* * Global tokens. These replace the MP lock for major subsystem locking. * These tokens are initially used to lockup both global and individual * operations. * * Once individual structures get their own locks these tokens are used * only to protect global lists & other variables and to interlock * allocations and teardowns and such. * * The UP initializer causes token acquisition to also acquire the MP lock * for maximum compatibility. The feature may be enabled and disabled at * any time, the MP state is copied to the tokref when the token is acquired * and will not race against sysctl changes. */ struct lwkt_token mp_token = LWKT_TOKEN_INITIALIZER(mp_token); struct lwkt_token pmap_token = LWKT_TOKEN_INITIALIZER(pmap_token); struct lwkt_token dev_token = LWKT_TOKEN_INITIALIZER(dev_token); struct lwkt_token vm_token = LWKT_TOKEN_INITIALIZER(vm_token); struct lwkt_token vmspace_token = LWKT_TOKEN_INITIALIZER(vmspace_token); struct lwkt_token kvm_token = LWKT_TOKEN_INITIALIZER(kvm_token); struct lwkt_token sigio_token = LWKT_TOKEN_INITIALIZER(sigio_token); struct lwkt_token tty_token = LWKT_TOKEN_INITIALIZER(tty_token); struct lwkt_token vnode_token = LWKT_TOKEN_INITIALIZER(vnode_token); static int lwkt_token_spin = 5; SYSCTL_INT(_lwkt, OID_AUTO, token_spin, CTLFLAG_RW, &lwkt_token_spin, 0, "Decontention spin loops"); static int lwkt_token_delay = 0; SYSCTL_INT(_lwkt, OID_AUTO, token_delay, CTLFLAG_RW, &lwkt_token_delay, 0, "Decontention spin delay in ns");
#include <sys/kernel.h> #include <sys/fnv_hash.h> #include <sys/objcache.h> #include "rpcv2.h" #include "nfsproto.h" #include "nfs.h" #include "nfsmount.h" #include "nfsnode.h" static MALLOC_DEFINE(M_NFSNODE, "NFS node", "NFS node"); static struct objcache *nfsnode_objcache; static LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl; static u_long nfsnodehash; static lwkt_token nfsnhash_token = LWKT_TOKEN_INITIALIZER(nfsnhash_token); static struct lock nfsnhash_lock; #define TRUE 1 #define FALSE 0 #define NFSNOHASH(fhsum) (&nfsnodehashtbl[(fhsum) & nfsnodehash]) /* * Initialize hash links for nfsnodes * and build nfsnode free list. */ void nfs_nhinit(void) { nfsnode_objcache = objcache_create_simple(M_NFSNODE, sizeof(struct nfsnode));
struct module { TAILQ_ENTRY(module) link; /* chain together all modules */ TAILQ_ENTRY(module) flink; /* all modules in a file */ struct linker_file* file; /* file which contains this module */ int refs; /* reference count */ int id; /* unique id number */ char *name; /* module name */ modeventhand_t handler; /* event handler */ void *arg; /* argument for handler */ modspecific_t data; /* module specific data */ }; #define MOD_EVENT(mod, type) (mod)->handler((mod), (type), (mod)->arg) static modulelist_t modules = TAILQ_HEAD_INITIALIZER(modules); static struct lwkt_token mod_token = LWKT_TOKEN_INITIALIZER(mod_token); static int nextid = 1; static void module_shutdown(void*, int); static int modevent_nop(module_t mod, int what, void* arg) { return 0; } static void module_init(void* arg) { TAILQ_INIT(&modules);
static long numnfsrvcache; static long desirednfsrvcache; #define NFSRCHASH(xid) \ (&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash]) static LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl; static TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead; static u_long nfsrvhash; #define TRUE 1 #define FALSE 0 #define NETFAMILY(rp) \ (((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO) struct lwkt_token srvcache_token = LWKT_TOKEN_INITIALIZER(srvcache_token); /* * Static array that defines which nfs rpc's are nonidempotent */ static int nonidempotent[NFS_NPROCS] = { FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
* $DragonFly: src/sys/kern/kern_kthread.c,v 1.13 2006/12/18 20:41:01 dillon Exp $ */ #include <sys/param.h> #include <sys/systm.h> #include <sys/proc.h> #include <sys/kthread.h> #include <sys/ptrace.h> #include <sys/resourcevar.h> #include <sys/signalvar.h> #include <sys/unistd.h> #include <sys/wait.h> #include <machine/stdarg.h> static struct lwkt_token kpsus_token = LWKT_TOKEN_INITIALIZER(kpsus_token); /* * Create a kernel process/thread/whatever. It shares it's address space * with proc0 - ie: kernel only. 5.x compatible. * * All kthreads are created as MPSAFE threads. */ int kthread_create(void (*func)(void *), void *arg, struct thread **tdp, const char *fmt, ...) { thread_t td; __va_list ap;