.peek = noop_dequeue, .owner = THIS_MODULE, }; static struct netdev_queue noop_netdev_queue = { .qdisc = &noop_qdisc, .qdisc_sleeping = &noop_qdisc, }; struct Qdisc noop_qdisc = { .enqueue = noop_enqueue, .dequeue = noop_dequeue, .flags = TCQ_F_BUILTIN, .ops = &noop_qdisc_ops, .list = LIST_HEAD_INIT(noop_qdisc.list), .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, }; EXPORT_SYMBOL(noop_qdisc); static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { .id = "noqueue", .priv_size = 0, .enqueue = noop_enqueue, .dequeue = noop_dequeue, .peek = noop_dequeue, .owner = THIS_MODULE, }; static struct Qdisc noqueue_qdisc; static struct netdev_queue noqueue_netdev_queue = {
static irqreturn_t sun3_vec255(int irq, void *dev_id) { // intersil_clear(); return IRQ_HANDLED; } static void sun3_inthandle(unsigned int irq, struct pt_regs *fp) { *sun3_intreg &= ~(1 << irq); __m68k_handle_int(irq, fp); } static struct irq_controller sun3_irq_controller = { .name = "sun3", .lock = __SPIN_LOCK_UNLOCKED(sun3_irq_controller.lock), .startup = m68k_irq_startup, .shutdown = m68k_irq_shutdown, .enable = sun3_enable_irq, .disable = sun3_disable_irq, }; void __init sun3_init_IRQ(void) { *sun3_intreg = 1; m68k_setup_auto_interrupt(sun3_inthandle); m68k_setup_irq_controller(&sun3_irq_controller, IRQ_AUTO_1, 7); m68k_setup_user_interrupt(VEC_USER, 128, NULL); if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL))
void __init init_consistent_dma_size(unsigned long size) { unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M); BUG_ON(consistent_pte); /* Check we're called before DMA region init */ BUG_ON(base < VMALLOC_END); /* Grow region to accommodate specified size */ if (base < consistent_base) consistent_base = base; } #include "vmregion.h" static struct arm_vmregion_head consistent_head = { .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), .vm_end = CONSISTENT_END, }; #ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif /* * Initialise the consistent memory allocation. */ static int __init consistent_init(void) { int ret = 0; pgd_t *pgd;
#include <linux/kallsyms.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/trace.h> #include <asm/pda.h> static atomic_t irq_err_count; void ack_bad_irq(unsigned int irq) { atomic_inc(&irq_err_count); printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); } static struct irq_desc bad_irq_desc = { .handle_irq = handle_bad_irq, .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), }; #ifdef CONFIG_CPUMASK_OFFSTACK /* We are not allocating a variable-sized bad_irq_desc.affinity */ #error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK." #endif #ifdef CONFIG_PROC_FS int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; struct irqaction *action; unsigned long flags; if (i < NR_IRQS) {
return 0; } static void atari_shutdown_irq(unsigned int irq) { atari_disable_irq(irq); atari_turnoff_irq(irq); m68k_irq_shutdown(irq); if (irq == IRQ_AUTO_4) vectors[VEC_INT4] = falcon_hblhandler; } static struct irq_controller atari_irq_controller = { .name = "atari", .lock = __SPIN_LOCK_UNLOCKED(atari_irq_controller.lock), .startup = atari_startup_irq, .shutdown = atari_shutdown_irq, .enable = atari_enable_irq, .disable = atari_disable_irq, }; /* * void atari_init_IRQ (void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the atari IRQ handling routines.
#define RX_MAX_COUNT 256 #define TX_MAX_COUNT 15 #define SIUIRSEL 0x08 #define TMICMODE 0x20 #define TMICTX 0x10 #define IRMSEL 0x0c #define IRMSEL_HP 0x08 #define IRMSEL_TEMIC 0x04 #define IRMSEL_SHARP 0x00 #define IRUSESEL 0x02 #define SIRSEL 0x01 static struct uart_port siu_uart_ports[SIU_PORTS_MAX] = { [0 ... SIU_PORTS_MAX-1] = { .lock = __SPIN_LOCK_UNLOCKED(siu_uart_ports->lock), .irq = -1, }, }; #ifdef CONFIG_SERIAL_VR41XX_CONSOLE static uint8_t lsr_break_flag[SIU_PORTS_MAX]; #endif #define siu_read(port, offset) readb((port)->membase + (offset)) #define siu_write(port, offset, value) writeb((value), (port)->membase + (offset)) void vr41xx_select_siu_interface(siu_interface_t interface) { struct uart_port *port; unsigned long flags;
static void exi_tasklet(unsigned long param); /* io memory base for EXI */ static void __iomem *exi_io_mem; /* * These are the available exi channels. */ static struct exi_channel exi_channels[EXI_MAX_CHANNELS] = { [0] = { .channel = 0, .lock = __SPIN_LOCK_UNLOCKED(exi_channels[0].lock), .io_lock = __SPIN_LOCK_UNLOCKED(exi_channels[0].io_lock), .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER( exi_channels[0].wait_queue), }, [1] = { .channel = 1, .lock = __SPIN_LOCK_UNLOCKED(exi_channels[1].lock), .io_lock = __SPIN_LOCK_UNLOCKED(exi_channels[1].io_lock), .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER( exi_channels[1].wait_queue), }, [2] = { .channel = 2, .lock = __SPIN_LOCK_UNLOCKED(exi_channels[2].lock), .io_lock = __SPIN_LOCK_UNLOCKED(exi_channels[2].io_lock),
#include <asm/cacheflush.h> #include <asm/cpu-regs.h> #include <asm/busctl-regs.h> #include <unit/leds.h> #include <asm/fpu.h> #include <asm/sections.h> #include <asm/debugger.h> #include "internal.h" #if (CONFIG_INTERRUPT_VECTOR_BASE & 0xffffff) #error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!" #endif int kstack_depth_to_print = 24; spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock); struct exception_to_signal_map { u8 signo; u32 si_code; }; static const struct exception_to_signal_map exception_to_signal_map[256] = { /* MMU exceptions */ [EXCEP_ITLBMISS >> 3] = { 0, 0 }, [EXCEP_DTLBMISS >> 3] = { 0, 0 }, [EXCEP_IAERROR >> 3] = { 0, 0 }, [EXCEP_DAERROR >> 3] = { 0, 0 }, /* system exceptions */ [EXCEP_TRAP >> 3] = { SIGTRAP, TRAP_BRKPT },
/* Message buffer for non-IRQ messages */ static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1]; #endif /* Message buffer for IRQ messages */ static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1]; #if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) /* The lock is used to control access to gszBufferNonIRQ */ static PVRSRV_LINUX_MUTEX gsDebugMutexNonIRQ; #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) /* The lock is used to control access to gszBufferIRQ */ /* PRQA S 0671,0685 1 */ /* ignore warnings about C99 style initialisation */ static spinlock_t gsDebugLockIRQ = __SPIN_LOCK_UNLOCKED(); #else static DEFINE_SPINLOCK(gsDebugLockIRQ); #endif #if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) #if !defined (USE_SPIN_LOCK) /* to keep QAC happy */ #define USE_SPIN_LOCK (in_interrupt() || !preemptible()) #endif #endif static inline void GetBufferLock(unsigned long *pulLockFlags) { #if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) if (USE_SPIN_LOCK) #endif
static bool softcarrier = true; static bool share_irq; static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */ static bool txsense; /* 0 = active high, 1 = active low */ /* forward declarations */ static void send_pulse_irdeo(unsigned int length, ktime_t edge); static void send_space_irdeo(void); #ifdef CONFIG_IR_SERIAL_TRANSMITTER static void send_pulse_homebrew(unsigned int length, ktime_t edge); static void send_space_homebrew(void); #endif static struct serial_ir_hw hardware[] = { [IR_HOMEBREW] = { .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_HOMEBREW].lock), .signal_pin = UART_MSR_DCD, .signal_pin_change = UART_MSR_DDCD, .on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR), .off = (UART_MCR_RTS | UART_MCR_OUT2), #ifdef CONFIG_IR_SERIAL_TRANSMITTER .send_pulse = send_pulse_homebrew, .send_space = send_space_homebrew, .set_send_carrier = true, .set_duty_cycle = true, #endif }, [IR_IRDEO] = { .lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO].lock), .signal_pin = UART_MSR_DSR,
boot_params.tboot_addr); pr_debug("version: %d\n", tboot->version); pr_debug("log_addr: 0x%08x\n", tboot->log_addr); pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry); pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base); pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); } static pgd_t *tboot_pg_dir; static struct mm_struct tboot_mm = { .mm_rb = RB_ROOT, .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), .mmlist = LIST_HEAD_INIT(init_mm.mmlist), }; static inline void switch_to_tboot_pt(void) { write_cr3(virt_to_phys(tboot_pg_dir)); } static int map_tboot_page(unsigned long vaddr, unsigned long pfn, pgprot_t prot) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte;
#include <asm/irq.h> #include <asm/traps.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #include <asm/amipcmcia.h> static void amiga_enable_irq(unsigned int irq); static void amiga_disable_irq(unsigned int irq); static irqreturn_t ami_int1(int irq, void *dev_id); static irqreturn_t ami_int3(int irq, void *dev_id); static irqreturn_t ami_int4(int irq, void *dev_id); static irqreturn_t ami_int5(int irq, void *dev_id); static struct irq_controller amiga_irq_controller = { .name = "amiga", .lock = __SPIN_LOCK_UNLOCKED(amiga_irq_controller.lock), .enable = amiga_enable_irq, .disable = amiga_disable_irq, }; /* * void amiga_init_IRQ(void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the amiga IRQ handling routines. */
int apollo_irq_startup(unsigned int irq) { if (irq < 8) *(volatile unsigned char *)(pica+1) &= ~(1 << irq); else *(volatile unsigned char *)(picb+1) &= ~(1 << (irq - 8)); return 0; } void apollo_irq_shutdown(unsigned int irq) { if (irq < 8) *(volatile unsigned char *)(pica+1) |= (1 << irq); else *(volatile unsigned char *)(picb+1) |= (1 << (irq - 8)); } static struct irq_controller apollo_irq_controller = { .name = "apollo", .lock = __SPIN_LOCK_UNLOCKED(apollo_irq_controller.lock), .startup = apollo_irq_startup, .shutdown = apollo_irq_shutdown, }; void __init dn_init_IRQ(void) { m68k_setup_user_interrupt(VEC_USER + 96, 16, dn_process_int); m68k_setup_irq_controller(&apollo_irq_controller, IRQ_APOLLO, 16); }
trace_printk(fmt, ##__VA_ARGS__); \ pr_err(fmt, ##__VA_ARGS__); \ } \ } while (0) static bool mcerr_throttle_enabled = true; static u32 mcerr_silenced; static int arb_intr_mma_set(const char *arg, const struct kernel_param *kp); static int arb_intr_mma_get(char *buff, const struct kernel_param *kp); static void unthrottle_prints(struct work_struct *work); static int spurious_intrs; static struct arb_emem_intr_info arb_intr_info = { .lock = __SPIN_LOCK_UNLOCKED(arb_intr_info.lock), }; static int arb_intr_count; static struct kernel_param_ops arb_intr_mma_ops = { .get = arb_intr_mma_get, .set = arb_intr_mma_set, }; module_param_cb(arb_intr_mma_in_ms, &arb_intr_mma_ops, &arb_intr_info.arb_intr_mma, S_IRUGO | S_IWUSR); module_param(arb_intr_count, int, S_IRUGO | S_IWUSR); module_param(spurious_intrs, int, S_IRUGO | S_IWUSR); static const char *const smmu_page_attrib[] = { "nr-nw-s",
switch (irq) { case 1: case 2: case 8: case 9: case 11: case 12: case 13: printk("%s: ISA IRQ %d not implemented by HW\n", __FUNCTION__, irq); return -ENXIO; } return 0; } static void q40_irq_shutdown(unsigned int irq) { } static struct irq_controller q40_irq_controller = { .name = "q40", .lock = __SPIN_LOCK_UNLOCKED(q40_irq_controller.lock), .startup = q40_irq_startup, .shutdown = q40_irq_shutdown, .enable = q40_enable_irq, .disable = q40_disable_irq, }; /* * void q40_init_IRQ (void) * * Parameters: None * * Returns: Nothing * * This function is called during kernel startup to initialize * the q40 IRQ handling routines.
#include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/io.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/mod_devicetable.h> #include <linux/syscore_ops.h> #include <asm/prom.h> #include <asm/fsl_lbc.h> static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; EXPORT_SYMBOL(fsl_lbc_ctrl_dev); /** * fsl_lbc_addr - convert the base address * @addr_base: base address of the memory bank * * This function converts a base address of lbc into the right format for the * BR register. If the SOC has eLBC then it returns 32bit physical address * else it convers a 34bit local bus physical address to correct format of * 32bit address for BR register (Example: MPC8641). */ u32 fsl_lbc_addr(phys_addr_t addr_base) { struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node;
fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\ fn_show_state, fn_send_intr, fn_lastcons, fn_caps_toggle,\ fn_num, fn_hold, fn_scroll_forw, fn_scroll_back,\ fn_boot_it, fn_caps_on, fn_compose, fn_SAK,\ fn_dec_console, fn_inc_console, fn_spawn_con, fn_bare_num typedef void (fn_handler_fn)(struct vc_data *vc); static fn_handler_fn FN_HANDLERS; static fn_handler_fn *fn_handler[] = { FN_HANDLERS }; /* * Variables exported for vt_ioctl.c */ struct vt_spawn_console vt_spawn_con = { .lock = __SPIN_LOCK_UNLOCKED(vt_spawn_con.lock), .pid = NULL, .sig = 0, }; /* * Internal Data. */ static struct kbd_struct kbd_table[MAX_NR_CONSOLES]; static struct kbd_struct *kbd = kbd_table; /* maximum values each key_handler can handle */ static const int max_vals[] = { 255, ARRAY_SIZE(func_table) - 1, ARRAY_SIZE(fn_handler) - 1, NR_PAD - 1,
return; pTrackBuf = &TrackBuf; pTrackBuf->RingBufCtr = 0; pTrackBuf->RingBufInIdx = 0; pTrackBuf->RingBufOutIdx = 0; tracking_initialize = true; } void ist30xx_tracking_deinit(void) { } static spinlock_t mr_lock = __SPIN_LOCK_UNLOCKED(); int ist30xx_get_track(u32 *track, int cnt) { int i; u8 *buf = (u8 *)track; unsigned long flags; cnt *= sizeof(track[0]); spin_lock_irqsave(&mr_lock, flags); if (pTrackBuf->RingBufCtr < (u16)cnt) { spin_unlock_irqrestore(&mr_lock, flags); return IST30XX_RINGBUF_NOT_ENOUGH; }
.name = "SSC2 PCM out", .pdc = &pdc_tx_reg, .mask = &ssc_tx_mask, }, { .name = "SSC2 PCM in", .pdc = &pdc_rx_reg, .mask = &ssc_rx_mask, } }, }; static struct atmel_ssc_info ssc_info[NUM_SSC_DEVICES] = { { .name = "ssc0", .lock = __SPIN_LOCK_UNLOCKED(ssc_info[0].lock), .dir_mask = SSC_DIR_MASK_UNUSED, .initialized = 0, }, { .name = "ssc1", .lock = __SPIN_LOCK_UNLOCKED(ssc_info[1].lock), .dir_mask = SSC_DIR_MASK_UNUSED, .initialized = 0, }, { .name = "ssc2", .lock = __SPIN_LOCK_UNLOCKED(ssc_info[2].lock), .dir_mask = SSC_DIR_MASK_UNUSED, .initialized = 0, },
#if RT_CACHE_DEBUG >= 2 static atomic_t dst_total = ATOMIC_INIT(0); #endif /* * We want to keep lock & list close together * to dirty as few cache lines as possible in __dst_free(). * As this is not a very strong hint, we dont force an alignment on SMP. */ static struct { spinlock_t lock; struct dst_entry *list; unsigned long timer_inc; unsigned long timer_expires; } dst_garbage = { .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), .timer_inc = DST_GC_MAX, }; static void dst_gc_task(struct work_struct *work); static void ___dst_free(struct dst_entry *dst); static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); static DEFINE_MUTEX(dst_gc_mutex); /* * long lived entries are maintained in this list, guarded by dst_gc_mutex */ static struct dst_entry *dst_busy_list; static void dst_gc_task(struct work_struct *work) {
*/ /* * Scheduler hook for average runqueue determination */ #include <linux/module.h> #include <linux/percpu.h> #include <linux/hrtimer.h> #include <linux/sched.h> #include <linux/math64.h> static DEFINE_PER_CPU(u64, nr_prod_sum); static DEFINE_PER_CPU(u64, last_time); static DEFINE_PER_CPU(u64, nr); static DEFINE_PER_CPU(unsigned long, iowait_prod_sum); static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); static u64 last_get_time; /** * sched_get_nr_running_avg * @return: Average nr_running and iowait value since last poll. * Returns the avg * 100 to return up to two decimal points * of accuracy. * * Obtains the average nr_running value since the last poll. * This function may not be called concurrently with itself */ void sched_get_nr_running_avg(int *avg, int *iowait_avg) { int cpu; u64 curr_time = sched_clock();
int builds; int news; int collisions; int runs; } script; struct { unsigned long init_time; unsigned long unwind_time; int inits; int unwinds; } api; } stat; # endif } unw = { .tables = &unw.kernel_table, .lock = __SPIN_LOCK_UNLOCKED(unw.lock), .save_order = { UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR }, .preg_index = { offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */ offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */ offsetof(struct unw_frame_info, bsp_loc)/8, offsetof(struct unw_frame_info, bspstore_loc)/8, offsetof(struct unw_frame_info, pfs_loc)/8, offsetof(struct unw_frame_info, rnat_loc)/8, offsetof(struct unw_frame_info, psp)/8, offsetof(struct unw_frame_info, rp_loc)/8, offsetof(struct unw_frame_info, r4)/8, offsetof(struct unw_frame_info, r5)/8,
static struct uart_driver s3c24xx_uart_drv = { .owner = THIS_MODULE, // .dev_name = "s3c2410_serial", .dev_name = S3C24XX_SERIAL_NAME, .nr = CONFIG_SERIAL_SAMSUNG_UARTS, .cons = S3C24XX_SERIAL_CONSOLE, .driver_name = S3C24XX_SERIAL_NAME, .major = S3C24XX_SERIAL_MAJOR, .minor = S3C24XX_SERIAL_MINOR, }; static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = { [0] = { .port = { .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock), .iotype = UPIO_MEM, .irq = IRQ_S3CUART_RX0, .uartclk = 0, .fifosize = 16, .ops = &s3c24xx_serial_ops, .flags = UPF_BOOT_AUTOCONF, .line = 0, } }, [1] = { .port = { .lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[1].port.lock), .iotype = UPIO_MEM, .irq = IRQ_S3CUART_RX1, .uartclk = 0,
typedef enum { RMNET_FULL_PACKET, RMNET_PARTIAL_PACKET, RMNET_PARTIAL_HEADER, } RMNET_PAST_STATE; static struct { RMNET_PAST_STATE state; char buf[MAX_PART_PKT_SIZE]; int size; int type; } past_packet; //f00171359, fenghaiming begin, SPIN_LOCK_UNLOCKED has been removed from kernel3.0. static struct xmd_ch_info rmnet_channels[MAX_SMD_NET] = { {0, "CHANNEL13", 0, XMD_NET, NULL, 0, __SPIN_LOCK_UNLOCKED(rmnet_channels[0].lock)}, {1, "CHANNEL14", 0, XMD_NET, NULL, 0, __SPIN_LOCK_UNLOCKED(rmnet_channels[1].lock)}, {2, "CHANNEL15", 0, XMD_NET, NULL, 0, __SPIN_LOCK_UNLOCKED(rmnet_channels[2].lock)}, }; //f00171359, fenghaiming end struct rmnet_private { struct xmd_ch_info *ch; struct net_device_stats stats; const char *chname; struct wake_lock wake_lock; #ifdef CONFIG_MSM_RMNET_DEBUG ktime_t last_packet; short active_countdown; /* Number of times left to check */ short restart_count; /* Number of polls seems so far */ unsigned long wakeups_xmit;
/************************************************** * 'auth_domains' are stored in a hash table indexed by name. * When the last reference to an 'auth_domain' is dropped, * the object is unhashed and freed. * If auth_domain_lookup fails to find an entry, it will return * it's second argument 'new'. If this is non-null, it will * have been atomically linked into the table. */ #define DN_HASHBITS 6 #define DN_HASHMAX (1<<DN_HASHBITS) static struct hlist_head auth_domain_table[DN_HASHMAX]; static spinlock_t auth_domain_lock = __SPIN_LOCK_UNLOCKED(auth_domain_lock); void auth_domain_put(struct auth_domain *dom) { if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) { hlist_del(&dom->hash); dom->flavour->domain_release(dom); spin_unlock(&auth_domain_lock); } } EXPORT_SYMBOL_GPL(auth_domain_put); struct auth_domain * auth_domain_lookup(char *name, struct auth_domain *new) { struct auth_domain *hp;
/* * This structure is used so that all the data protected by lock * can be placed in the same cache line as the lock. This primes * the cache line to have the data after getting the lock. */ struct acct_glbs { spinlock_t lock; volatile int active; volatile int needcheck; struct file *file; struct pid_namespace *ns; struct timer_list timer; }; static struct acct_glbs acct_globals __cacheline_aligned = {__SPIN_LOCK_UNLOCKED(acct_globals.lock)}; /* * Called whenever the timer says to check the free space. */ static void acct_timeout(unsigned long unused) { acct_globals.needcheck = 1; } /* * Check the amount of free space and suspend/resume accordingly. */ static int check_free_space(struct file *file) { struct kstatfs sbuf;
#include <linux/security.h> #include <linux/user_namespace.h> #include <asm/uaccess.h> #include "internal.h" /* Session keyring create vs join semaphore */ static DEFINE_MUTEX(key_session_mutex); /* User keyring creation semaphore */ static DEFINE_MUTEX(key_user_keyring_mutex); /* The root user's tracking struct */ struct key_user root_key_user = { .usage = ATOMIC_INIT(3), .cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock), .lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock), .nkeys = ATOMIC_INIT(2), .nikeys = ATOMIC_INIT(2), .uid = GLOBAL_ROOT_UID, }; /* * Install the user and user session keyrings for the current process's UID. */ int install_user_keyrings(void) { struct user_struct *user; const struct cred *cred; struct key *uid_keyring, *session_keyring; key_perm_t user_keyring_perm; char buf[20];
#include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "speakup.h" #include "serialio.h" #define MAXSYNTHS 16 /* Max number of synths in array. */ static struct spk_synth *synths[MAXSYNTHS]; struct spk_synth *synth; char pitch_buff[32] = ""; static int module_status; bool quiet_boot; struct speakup_info_t speakup_info = { .spinlock = __SPIN_LOCK_UNLOCKED(speakup_info.spinlock), .flushing = 0, }; EXPORT_SYMBOL_GPL(speakup_info); static int do_synth_init(struct spk_synth *in_synth); int serial_synth_probe(struct spk_synth *synth) { const struct old_serial_port *ser; int failed = 0; if ((synth->ser >= SPK_LO_TTY) && (synth->ser <= SPK_HI_TTY)) { ser = spk_serial_init(synth->ser); if (ser == NULL) { failed = -1;
* vmscan's shrink_page_list. */ static const struct address_space_operations swap_aops = { .writepage = swap_writepage, .set_page_dirty = __set_page_dirty_no_writeback, .migratepage = migrate_page, }; static struct backing_dev_info swap_backing_dev_info = { .name = "swap", .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, }; struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, }; EXPORT_SYMBOL(swapper_space); #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) static struct { unsigned long add_total; unsigned long del_total; unsigned long find_success; unsigned long find_total; } swap_cache_info;
static struct module_stat spx_rstat __attribute__ ((__aligned__(SMP_CACHE_BYTES))); static struct module_stat spx_wstat __attribute__ ((__aligned__(SMP_CACHE_BYTES))); typedef struct spx { struct spx *next; struct spx **prev; int init; queue_t *q; dev_t dev; } spx_t; #if defined DEFINE_SPINLOCK static DEFINE_SPINLOCK(spx_lock); #elif defined __SPIN_LOCK_UNLOCKED static spinlock_t spx_lock = __SPIN_LOCK_UNLOCKED(spx_lock); #elif defined SPIN_LOCK_UNLOCKED static spinlock_t spx_lock = SPIN_LOCK_UNLOCKED; #else #error cannot initialize spin locks #endif static struct spx *spx_list = NULL; static streamscall int spx_rput(queue_t *q, mblk_t *mp) { switch (mp->b_datap->db_type) { case M_FLUSH: if (mp->b_rptr[0] & FLUSHR) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA);