/** * Get a unique magic number (ioctl). * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a resulting drm_auth structure. * \return zero on success, or a negative number on failure. * * If there is a magic number in drm_file::magic then use it, otherwise * searches an unique non-zero magic number and add it associating it with \p * file_priv. * This ioctl needs protection by the drm_global_mutex, which protects * struct drm_file::magic and struct drm_magic_entry::priv. */ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { static drm_magic_t sequence = 0; static struct spinlock lock = SPINLOCK_INITIALIZER(&lock, "drm_gm"); struct drm_auth *auth = data; /* Find unique magic */ if (file_priv->magic) { auth->magic = file_priv->magic; } else { do { spin_lock(&lock); if (!sequence) ++sequence; /* reserve 0 */ auth->magic = sequence++; spin_unlock(&lock); } while (drm_find_file(dev, auth->magic)); file_priv->magic = auth->magic; drm_add_magic(dev, file_priv, auth->magic); } DRM_DEBUG("%u\n", auth->magic); return 0; }
*/ #ifdef AH_SUPPORT_AR5312 #define BUSTAG(ah) \ ((bus_space_tag_t) ((struct ar531x_config *)((ah)->ah_st))->tag) #else #define BUSTAG(ah) ((ah)->ah_st) #endif /* * This lock is used to seralise register access for chips which have * problems w/ SMP CPUs issuing concurrent PCI transactions. * * XXX This is a global lock for now; it should be pushed to * a per-device lock in some platform-independent fashion. */ struct spinlock ah_regser_spin = SPINLOCK_INITIALIZER(ah_regser_spin); extern void ath_hal_printf(struct ath_hal *, const char*, ...) __printflike(2,3); extern void ath_hal_vprintf(struct ath_hal *, const char*, __va_list) __printflike(2, 0); extern const char* ath_hal_ether_sprintf(const u_int8_t *mac); extern void *ath_hal_malloc(size_t); extern void ath_hal_free(void *); #ifdef AH_ASSERT extern void ath_hal_assert_failed(const char* filename, int lineno, const char* msg); #endif #ifdef AH_DEBUG extern void DO_HALDEBUG(struct ath_hal *ah, u_int mask, const char* fmt, ...) __printflike(3, 4);
*/ #define PAGER_MAP_SIZE (8 * 1024 * 1024) TAILQ_HEAD(swqueue, buf); int pager_map_size = PAGER_MAP_SIZE; struct vm_map pager_map; static int bswneeded_raw; static int bswneeded_kva; static long nswbuf_raw; static struct buf *swbuf_raw; static vm_offset_t swapbkva; /* swap buffers kva */ static struct swqueue bswlist_raw; /* without kva */ static struct swqueue bswlist_kva; /* with kva */ static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin); static int pbuf_raw_count; static int pbuf_kva_count; SYSCTL_INT(_vfs, OID_AUTO, pbuf_raw_count, CTLFLAG_RD, &pbuf_raw_count, 0, "Kernel virtual address space reservations"); SYSCTL_INT(_vfs, OID_AUTO, pbuf_kva_count, CTLFLAG_RD, &pbuf_kva_count, 0, "Kernel raw address space reservations"); /* * Initialize the swap buffer list. * * Called from the low level boot code only. */ static void vm_pager_init(void *arg __unused)
#ifdef DDB #include <ddb/ddb.h> #endif extern int lwkt_sched_debug; #ifndef LWKT_NUM_POOL_TOKENS #define LWKT_NUM_POOL_TOKENS 4001 /* prime number */ #endif struct lwkt_pool_token { struct lwkt_token token; } __cachealign; static struct lwkt_pool_token pool_tokens[LWKT_NUM_POOL_TOKENS]; struct spinlock tok_debug_spin = SPINLOCK_INITIALIZER(&tok_debug_spin, "tok_debug_spin"); #define TOKEN_STRING "REF=%p TOK=%p TD=%p" #define TOKEN_ARGS lwkt_tokref_t ref, lwkt_token_t tok, struct thread *td #define CONTENDED_STRING TOKEN_STRING " (contention started)" #define UNCONTENDED_STRING TOKEN_STRING " (contention stopped)" #if !defined(KTR_TOKENS) #define KTR_TOKENS KTR_ALL #endif KTR_INFO_MASTER(tokens); KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, TOKEN_ARGS); KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, TOKEN_ARGS); #if 0 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, TOKEN_ARGS); KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, TOKEN_ARGS);
#define VACT_INC 2 static void vnode_terminate(struct vnode *vp); static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures"); /* * The vnode free list hold inactive vnodes. Aged inactive vnodes * are inserted prior to the mid point, and otherwise inserted * at the tail. */ TAILQ_HEAD(freelst, vnode); static struct freelst vnode_active_list; static struct freelst vnode_inactive_list; static struct vnode vnode_active_rover; static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin, "vfs_spin"); int activevnodes = 0; SYSCTL_INT(_debug, OID_AUTO, activevnodes, CTLFLAG_RD, &activevnodes, 0, "Number of active nodes"); int cachedvnodes = 0; SYSCTL_INT(_debug, OID_AUTO, cachedvnodes, CTLFLAG_RD, &cachedvnodes, 0, "Number of total cached nodes"); int inactivevnodes = 0; SYSCTL_INT(_debug, OID_AUTO, inactivevnodes, CTLFLAG_RD, &inactivevnodes, 0, "Number of inactive nodes"); static int batchfreevnodes = 5; SYSCTL_INT(_debug, OID_AUTO, batchfreevnodes, CTLFLAG_RW, &batchfreevnodes, 0, "Number of vnodes to free at once"); #ifdef TRACKVNODE static u_long trackvnode;
#include <sys/md5.h> #include <opencrypto/cryptodev.h> #include <opencrypto/cryptosoft.h> #include <opencrypto/xform.h> #include <sys/kobj.h> #include <sys/bus.h> #include "cryptodev_if.h" static int32_t swcr_id; static struct swcr_data **swcr_sessions = NULL; static u_int32_t swcr_sesnum; static u_int32_t swcr_minsesnum = 1; static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin); u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); static int swcr_combined(struct cryptop *); static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); static int swcr_freesession(device_t dev, u_int64_t tid); static int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid); /* * Apply a symmetric encryption/decryption algorithm. */ static int