#define BUCKET(rel, abs) \ (timeout_wheel[ \ ((rel) <= (1 << (2*WHEELBITS))) \ ? ((rel) <= (1 << WHEELBITS)) \ ? MASKWHEEL(0, (abs)) \ : MASKWHEEL(1, (abs)) + WHEELSIZE \ : ((rel) <= (1 << (3*WHEELBITS))) \ ? MASKWHEEL(2, (abs)) + 2*WHEELSIZE \ : MASKWHEEL(3, (abs)) + 3*WHEELSIZE]) #define MOVEBUCKET(wheel, time) \ CIRCQ_APPEND(&timeout_todo, \ &timeout_wheel[MASKWHEEL((wheel), (time)) + (wheel)*WHEELSIZE]) /* All wheels are locked with the same mutex. */ struct mutex timeout_mutex = MUTEX_INITIALIZER(IPL_HIGH); /* * Circular queue definitions. */ #define CIRCQ_INIT(elem) do { \ (elem)->next = (elem); \ (elem)->prev = (elem); \ } while (0) #define CIRCQ_INSERT(elem, list) do { \ (elem)->prev = (list)->prev; \ (elem)->next = (list); \ (list)->prev->next = (elem); \ (list)->prev = (elem); \
#include <sys/protosw.h> #include <sys/pool.h> #include <sys/socket.h> #include <sys/socketvar.h> #include <net/if.h> #include <uvm/uvm_extern.h> #ifdef DDB #include <machine/db_machdep.h> #endif struct mbstat mbstat; /* mbuf stats */ struct mutex mbstatmtx = MUTEX_INITIALIZER(IPL_NET); struct pool mbpool; /* mbuf pool */ struct pool mtagpool; /* mbuf cluster pools */ u_int mclsizes[] = { MCLBYTES, /* must be at slot 0 */ 4 * 1024, 8 * 1024, 9 * 1024, 12 * 1024, 16 * 1024, 64 * 1024 }; static char mclnames[MCLPOOLS][8]; struct pool mclpools[MCLPOOLS];
struct io_handler *handler_list; spinlock vector_lock; int32 enable_count; bool no_lock_vector; #if DEBUG_INTERRUPTS int64 handled_count; int64 unhandled_count; int trigger_count; int ignored_count; #endif }; static struct io_vector sVectors[NUM_IO_VECTORS]; static bool sAllocatedIOInterruptVectors[NUM_IO_VECTORS]; static mutex sIOInterruptVectorAllocationLock = MUTEX_INITIALIZER("io_interrupt_vector_allocation"); #if DEBUG_INTERRUPTS static int dump_int_statistics(int argc, char **argv) { int i; for (i = 0; i < NUM_IO_VECTORS; i++) { struct io_handler *io; if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) && sVectors[i].enable_count == 0 && sVectors[i].handled_count == 0 && sVectors[i].unhandled_count == 0 && sVectors[i].handler_list == NULL)
= "drivers/disk/virtual/checksum_device/raw/device_v1"; static const char* const kControlDeviceName = "disk/virtual/checksum_device/control"; static const char* const kRawDeviceBaseName = "disk/virtual/checksum_device"; static const char* const kFilePathItem = "checksum_device/file_path"; struct RawDevice; typedef DoublyLinkedList<RawDevice> RawDeviceList; struct device_manager_info* sDeviceManager; static RawDeviceList sDeviceList; static mutex sDeviceListLock = MUTEX_INITIALIZER("checksum device list"); struct CheckSumBlock : public DoublyLinkedListLinkImpl<CheckSumBlock> { uint64 blockIndex; bool used; bool dirty; CheckSum checkSums[kCheckSumsPerBlock]; CheckSumBlock() : used(false) { } };
enum { EXTENDED_KEY = 0xe0, LEFT_ALT_KEY = 0x38, RIGHT_ALT_KEY = 0xb8, SYS_REQ_KEY = 0x54 }; struct keyboard_cookie { bool is_reader; bool is_debugger; }; static mutex sInitializeLock = MUTEX_INITIALIZER("keyboard init"); static int32 sKeyboardOpenCount = 0; static bool sHasKeyboardReader = false; static bool sHasDebugReader = false; static sem_id sKeyboardSem; static struct packet_buffer *sKeyBuffer; static bool sIsExtended = false; static int32 sKeyboardRepeatRate; static bigtime_t sKeyboardRepeatDelay; static status_t set_leds(led_info *ledInfo) { uint8 leds = 0;
unsigned int tq_running; unsigned int tq_nthreads; unsigned int tq_flags; const char *tq_name; struct mutex tq_mtx; TAILQ_HEAD(, task) tq_worklist; }; struct taskq taskq_sys = { TQ_S_CREATED, 0, 1, 0, "systq", MUTEX_INITIALIZER(IPL_HIGH), TAILQ_HEAD_INITIALIZER(taskq_sys.tq_worklist) }; struct taskq taskq_sys_mp = { TQ_S_CREATED, 0, 1, TASKQ_MPSAFE, "systqmp", MUTEX_INITIALIZER(IPL_HIGH), TAILQ_HEAD_INITIALIZER(taskq_sys_mp.tq_worklist) }; typedef int (*sleepfn)(const volatile void *, struct mutex *, int, const char *, int);
* Copyright 20010, Michael Lotz, [email protected]. All Rights Reserved. * Distributed under the terms of the MIT license. */ #include <arch/x86/apic.h> #include <arch/x86/arch_int.h> #include <arch/x86/msi.h> #include <debug.h> #include <lock.h> static bool sMSISupported = false; static const uint32 kVectorCount = 256 - ARCH_INTERRUPT_BASE; static bool sAllocatedVectors[kVectorCount]; static mutex sMSIAllocationLock = MUTEX_INITIALIZER("msi_allocation"); void msi_init() { if (!apic_available()) { dprintf("disabling msi due to missing apic\n"); return; } for (uint16 i = 0; i < kVectorCount; i++) sAllocatedVectors[i] = false; // the first 24 vectors are addressable with a single ioapic config for (uint16 i = 0; i < 24; i++)
/* * Memory Mapped Configuration space access. * * Since mapping the whole configuration space will cost us up to * 256MB of kernel virtual memory, we use seperate mappings per bus. * The mappings are created on-demand, such that we only use kernel * virtual memory for busses that are actually present. */ bus_addr_t pci_mcfg_addr; int pci_mcfg_min_bus, pci_mcfg_max_bus; bus_space_tag_t pci_mcfgt = X86_BUS_SPACE_MEM; bus_space_handle_t pci_mcfgh[256]; void pci_mcfg_map_bus(int); struct mutex pci_conf_lock = MUTEX_INITIALIZER(IPL_HIGH); #define PCI_CONF_LOCK() \ do { \ mtx_enter(&pci_conf_lock); \ } while (0) #define PCI_CONF_UNLOCK() \ do { \ mtx_leave(&pci_conf_lock); \ } while (0) #define PCI_MODE1_ENABLE 0x80000000UL #define PCI_MODE1_ADDRESS_REG 0x0cf8 #define PCI_MODE1_DATA_REG 0x0cfc
#include <pthread.h> #include <stdlib.h> #include <string.h> #include <OS.h> #include <libroot_private.h> #include <locks.h> #include <runtime_loader.h> #include <syscall_utils.h> #include <user_runtime.h> static const char* const kEnvLockName = "env lock"; static mutex sEnvLock = MUTEX_INITIALIZER(kEnvLockName); static char **sManagedEnviron; char **environ = NULL; static inline void lock_variables(void) { mutex_lock(&sEnvLock); } static inline void unlock_variables(void) {
// sMaxPorts must be power of 2 static int32 sMaxPorts = 4096; static int32 sUsedPorts = 0; static struct port_entry* sPorts; static area_id sPortArea; static heap_allocator* sPortAllocator; static ConditionVariable sNoSpaceCondition; static vint32 sTotalSpaceInUse; static vint32 sAreaChangeCounter; static vint32 sAllocatingArea; static bool sPortsActive = false; static port_id sNextPort = 1; static int32 sFirstFreeSlot = 1; static mutex sPortsLock = MUTEX_INITIALIZER("ports list"); static PortNotificationService sNotificationService; // #pragma mark - TeamNotificationService PortNotificationService::PortNotificationService() : DefaultNotificationService("ports") { } void
#include <sys/param.h> #include <sys/systm.h> #include <sys/mutex.h> #include <uvm/uvm_extern.h> #include <machine/gdt.h> #include <machine/pcb.h> union descriptor bootstrap_gdt[NGDT]; union descriptor *gdt = bootstrap_gdt; int gdt_next; /* next available slot for sweeping */ int gdt_free; /* next free slot; terminated with GNULL_SEL */ struct mutex gdt_lock_store = MUTEX_INITIALIZER(IPL_HIGH); int gdt_get_slot(void); void gdt_put_slot(int); /* * Lock and unlock the GDT. */ #define gdt_lock() (mtx_enter(&gdt_lock_store)) #define gdt_unlock() (mtx_leave(&gdt_lock_store)) /* XXX needs spinlocking if we ever mean to go finegrained. */ void setgdt(int sel, void *base, size_t limit, int type, int dpl, int def32, int gran) {
static const char* const kControlDeviceName = RAM_DISK_CONTROL_DEVICE_NAME; static const char* const kRawDeviceBaseName = RAM_DISK_RAW_DEVICE_BASE_NAME; static const char* const kFilePathItem = "ram_disk/file_path"; static const char* const kDeviceSizeItem = "ram_disk/device_size"; static const char* const kDeviceIDItem = "ram_disk/id"; struct RawDevice; typedef DoublyLinkedList<RawDevice> RawDeviceList; struct device_manager_info* sDeviceManager; static RawDeviceList sDeviceList; static mutex sDeviceListLock = MUTEX_INITIALIZER("ram disk device list"); static uint64 sUsedRawDeviceIDs = 0; static int32 allocate_raw_device_id(); static void free_raw_device_id(int32 id); struct Device { Device(device_node* node) : fNode(node) { mutex_init(&fLock, "ram disk device"); }
#include <pthread_private.h> #include <runtime_loader.h> #include <syscalls.h> #include <user_thread.h> typedef struct fork_hook { struct fork_hook *next; void (*function)(void); } fork_hook; #define FORK_LOCK_NAME "fork lock" static fork_hook *sPrepareHooks, *sParentHooks, *sChildHooks; static fork_hook *sLastParentHook, *sLastChildHook; static mutex sForkLock = MUTEX_INITIALIZER(FORK_LOCK_NAME); extern thread_id __main_thread_id; /** Adds a hook to the specified list. * If \a _lastHook is NULL, the hook will be added at the head of the list, * else it will be added at the tail of the list. * Since this function allocates memory, it can fail, and returns B_NO_MEMORY * in that case. It returns B_OK on success. */ static status_t add_fork_hook(fork_hook **_hooks, fork_hook **_lastHook, void (*function)(void)) { fork_hook *hook = (fork_hook *)malloc(sizeof(struct fork_hook));
#include <vfs.h> #include "DebugSupport.h" #include "PackageLinksDirectory.h" #include "StringConstants.h" //#define TRACE_DEPENDENCIES_ENABLED #ifdef TRACE_DEPENDENCIES_ENABLED # define TRACE_DEPENDENCIES(x...) TPRINT(x) #else # define TRACE_DEPENDENCIES(x...) do {} while (false) #endif mutex PackageFSRoot::sRootListLock = MUTEX_INITIALIZER("packagefs root list"); PackageFSRoot::RootList PackageFSRoot::sRootList; PackageFSRoot::PackageFSRoot(dev_t deviceID, ino_t nodeID) : fDeviceID(deviceID), fNodeID(nodeID), fSystemVolume(NULL), fPackageLinksDirectory(NULL) { rw_lock_init(&fLock, "packagefs root"); } PackageFSRoot::~PackageFSRoot()
#include "Inode.h" #include "NFS4Defs.h" #include "RequestBuilder.h" #include "ReplyInterpreter.h" #include "RootInode.h" #include "RPCCallbackServer.h" #include "RPCServer.h" #include "VnodeToInode.h" #include "WorkQueue.h" #ifdef DEBUG #define TRACE_NFS4 #endif #ifdef TRACE_NFS4 static mutex gTraceLock = MUTEX_INITIALIZER(NULL); #define TRACE(x...) \ { \ mutex_lock(&gTraceLock); \ dprintf("nfs4: %s(): ", __FUNCTION__); \ dprintf(x); \ dprintf("\n"); \ mutex_unlock(&gTraceLock); \ } #else #define TRACE(x...) (void)0 #endif extern fs_volume_ops gNFSv4VolumeOps; extern fs_vnode_ops gNFSv4VnodeOps;
bool Compare(addr_t key, const UserMutexEntry* value) const { return value->address == key; } UserMutexEntry*& GetLink(UserMutexEntry* value) const { return value->hashNext; } }; typedef BOpenHashTable<UserMutexHashDefinition> UserMutexTable; static UserMutexTable sUserMutexTable; static mutex sUserMutexTableLock = MUTEX_INITIALIZER("user mutex table"); static void add_user_mutex_entry(UserMutexEntry* entry) { UserMutexEntry* firstEntry = sUserMutexTable.Lookup(entry->address); if (firstEntry != NULL) firstEntry->otherEntries.Add(entry); else sUserMutexTable.Insert(entry); } static bool remove_user_mutex_entry(UserMutexEntry* entry)
#include <machine/instr.h> #include <machine/cpu.h> #include <machine/openfirm.h> #include <machine/ctlreg.h> #include <machine/pmap.h> #ifdef notyet #include "fb.h" #include "esp_sbus.h" #endif #include "tda.h" #ifdef MULTIPROCESSOR struct mutex ddb_mp_mutex = MUTEX_INITIALIZER(IPL_HIGH); volatile int ddb_state = DDB_STATE_NOT_RUNNING; volatile cpuid_t ddb_active_cpu; boolean_t db_switch_cpu; struct cpu_info *db_switch_to_cpu; #endif db_regs_t ddb_regs; /* register state */ extern void OF_enter(void); extern struct traptrace { unsigned short tl:3, /* Trap level */ ns:4, /* PCB nsaved */ tt:9; /* Trap type */ unsigned short pid; /* PID */
#include <vm/VMAddressSpace.h> #include "HashedObjectCache.h" #include "MemoryManager.h" #include "slab_private.h" #include "SmallObjectCache.h" typedef DoublyLinkedList<ObjectCache> ObjectCacheList; typedef DoublyLinkedList<ObjectCache, DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> > MaintenanceQueue; static ObjectCacheList sObjectCaches; static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list"); static mutex sMaintenanceLock = MUTEX_INITIALIZER("object cache resize requests"); static MaintenanceQueue sMaintenanceQueue; static ConditionVariable sMaintenanceCondition; #if SLAB_OBJECT_CACHE_TRACING namespace SlabObjectCacheTracing { class ObjectCacheTraceEntry : public AbstractTraceEntry { public: ObjectCacheTraceEntry(ObjectCache* cache)
char eventBuffer[128]; KMessage event; event.SetTo(eventBuffer, sizeof(eventBuffer), IMAGE_MONITOR); event.AddInt32("event", eventCode); event.AddInt32("image", image->info.basic_info.id); event.AddPointer("imageStruct", image); DefaultNotificationService::Notify(event, eventCode); } }; } // namespace static image_id sNextImageID = 1; static mutex sImageMutex = MUTEX_INITIALIZER("image"); static ImageTable* sImageTable; static ImageNotificationService sNotificationService; /*! Registers an image with the specified team. */ static image_id register_image(Team *team, extended_image_info *info, size_t size, bool locked) { image_id id = atomic_add(&sNextImageID, 1); struct image *image; image = (struct image*)malloc(sizeof(struct image)); if (image == NULL) return B_NO_MEMORY;
#define MAX_SOCKET_ADDRESS_LENGTH (sizeof(sockaddr_storage)) #define MAX_SOCKET_OPTION_LENGTH 128 #define MAX_ANCILLARY_DATA_LENGTH 1024 #define GET_SOCKET_FD_OR_RETURN(fd, kernel, descriptor) \ do { \ status_t getError = get_socket_descriptor(fd, kernel, descriptor); \ if (getError != B_OK) \ return getError; \ } while (false) static net_stack_interface_module_info* sStackInterface = NULL; static vint32 sStackInterfaceInitialized = 0; static mutex sLock = MUTEX_INITIALIZER("stack interface"); struct FDPutter { FDPutter(file_descriptor* descriptor) : descriptor(descriptor) { } ~FDPutter() { if (descriptor != NULL) put_fd(descriptor); } file_descriptor* descriptor;