static void s2idle_enter(void) { trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true); raw_spin_lock_irq(&s2idle_lock); if (pm_wakeup_pending()) goto out; s2idle_state = S2IDLE_STATE_ENTER; raw_spin_unlock_irq(&s2idle_lock); get_online_cpus(); cpuidle_resume(); /* Push all the CPUs into the idle loop. */ wake_up_all_idle_cpus(); /* Make the current CPU wait so it can enter the idle loop too. */ swait_event_exclusive(s2idle_wait_head, s2idle_state == S2IDLE_STATE_WAKE); cpuidle_pause(); put_online_cpus(); raw_spin_lock_irq(&s2idle_lock); out: s2idle_state = S2IDLE_STATE_NONE; raw_spin_unlock_irq(&s2idle_lock); trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false); }
/* * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling * threads context. */ void handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; irqreturn_t action_ret; might_sleep(); raw_spin_lock_irq(&desc->lock); kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; if (unlikely(!action || (desc->istate & IRQS_DISABLED))) goto out_unlock; irq_compat_set_progress(desc); desc->istate |= IRQS_INPROGRESS; raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock_irq(&desc->lock); desc->istate &= ~IRQS_INPROGRESS; irq_compat_clr_progress(desc); out_unlock: raw_spin_unlock_irq(&desc->lock); }
/* * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling * threads context. */ void handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; irqreturn_t action_ret; might_sleep(); raw_spin_lock_irq(&desc->lock); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; goto out_unlock; } irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock_irq(&desc->lock); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); out_unlock: raw_spin_unlock_irq(&desc->lock); }
static int stlog_print(char __user *buf, int size) { char *text; struct ringbuf *msg; int len = 0; #ifdef DEBUG_STLOG printk("[STLOG] %s stlog_seq %llu stlog_idx %lu ringbuf_first_seq %llu ringbuf_first_idx %lu ringbuf_next_seq %llu ringbuf_next_idx %lu \n", __func__,stlog_seq,stlog_idx,ringbuf_first_seq,ringbuf_first_idx,ringbuf_next_seq,ringbuf_next_idx); #endif text = kmalloc(RINGBUF_LINE_MAX + S_PREFIX_MAX, GFP_KERNEL); if (!text) return -ENOMEM; while (size > 0) { size_t n; raw_spin_lock_irq(&ringbuf_lock); if (stlog_seq < ringbuf_first_seq) { /* messages are gone, move to first one */ stlog_seq = ringbuf_first_seq; stlog_idx = ringbuf_first_idx; ringbuf_prev = 0; } if (stlog_seq == ringbuf_next_seq) { raw_spin_unlock_irq(&ringbuf_lock); break; } msg = ringbuf_from_idx(stlog_idx); n = stlog_print_text(msg, ringbuf_prev, false, text, RINGBUF_LINE_MAX + S_PREFIX_MAX); if (n <= size) { /* message fits into buffer, move forward */ stlog_idx = ringbuf_next(stlog_idx); stlog_seq++; ringbuf_prev = msg->flags; } else if (!len){ n = size; } else n = 0; raw_spin_unlock_irq(&ringbuf_lock); if (!n) break; if (copy_to_user(buf, text, n)) { if (!len) len = -EFAULT; break; } len += n; size -= n; buf += n; } kfree(text); return len; }
static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) { struct irq_desc *desc = irq_to_desc(irq); uint16_t intassign0, intassign1; unsigned int pin; pin = SYSINT1_IRQ_TO_PIN(irq); raw_spin_lock_irq(&desc->lock); intassign0 = icu1_read(INTASSIGN0); intassign1 = icu1_read(INTASSIGN1); switch (pin) { case 0: intassign0 &= ~INTASSIGN_MASK; intassign0 |= (uint16_t)assign; break; case 1: intassign0 &= ~(INTASSIGN_MASK << 3); intassign0 |= (uint16_t)assign << 3; break; case 2: intassign0 &= ~(INTASSIGN_MASK << 6); intassign0 |= (uint16_t)assign << 6; break; case 3: intassign0 &= ~(INTASSIGN_MASK << 9); intassign0 |= (uint16_t)assign << 9; break; case 8: intassign0 &= ~(INTASSIGN_MASK << 12); intassign0 |= (uint16_t)assign << 12; break; case 9: intassign1 &= ~INTASSIGN_MASK; intassign1 |= (uint16_t)assign; break; case 11: intassign1 &= ~(INTASSIGN_MASK << 6); intassign1 |= (uint16_t)assign << 6; break; case 12: intassign1 &= ~(INTASSIGN_MASK << 9); intassign1 |= (uint16_t)assign << 9; break; default: raw_spin_unlock_irq(&desc->lock); return -EINVAL; } sysint1_assign[pin] = assign; icu1_write(INTASSIGN0, intassign0); icu1_write(INTASSIGN1, intassign1); raw_spin_unlock_irq(&desc->lock); return 0; }
/***************************************************************************** 函数名称 : mlog_log_read 功能描述 : 输入参数 : 输出参数 : 返 回 值 : No 修改历史 : *****************************************************************************/ static ssize_t mlog_log_read(struct file *file, char __user *buf,size_t count, loff_t *ppos) { ssize_t ret = -EINVAL; int error = 0; int i = 0; int bwr_eq_rd = 0; char c; DEFINE_WAIT(wait); for (;;) { prepare_to_wait(&mlog_log_wait, &wait, TASK_INTERRUPTIBLE); raw_spin_lock_irq(&mlog_log_buf_lock); bwr_eq_rd = (log_wr_off == log_rd_off); raw_spin_unlock_irq(&mlog_log_buf_lock); if (!bwr_eq_rd) { break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -EINTR; break; } schedule(); } finish_wait(&mlog_log_wait, &wait); if (bwr_eq_rd) { return ret; } raw_spin_lock_irq(&mlog_log_buf_lock); i = 0; while (!error && (log_wr_off != log_rd_off) && i < count) { c = LOG_BUF(log_rd_off); log_rd_off++; error = __put_user(c,buf); buf++; i++; } if (!error) { ret = i; } raw_spin_unlock_irq(&mlog_log_buf_lock); return ret; }
/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; struct thread_info *thread = current_thread_info(); int ret; oops_enter(); raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); if (!user_mode(regs)) bug_type = report_bug(regs->pc, regs); if (bug_type != BUG_TRAP_TYPE_NONE) str = "Oops - BUG"; ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); }
/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; oops_enter(); raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); }
/* * Because this function is inlined, the 'state' parameter will be * constant, and thus optimised away by the compiler. Likewise the * 'timeout' parameter for the cases without timeouts. */ static inline int __sched __down_common(struct semaphore *sem, long state, long timeout) { struct semaphore_waiter waiter; list_add_tail(&waiter.list, &sem->wait_list); waiter.task = current; waiter.up = false; for (;;) { if (signal_pending_state(state, current)) goto interrupted; if (unlikely(timeout <= 0)) goto timed_out; __set_current_state(state); raw_spin_unlock_irq(&sem->lock); timeout = schedule_timeout(timeout); raw_spin_lock_irq(&sem->lock); if (waiter.up) return 0; } timed_out: list_del(&waiter.list); return -ETIME; interrupted: list_del(&waiter.list); return -EINTR; }
/* * Copy a range of characters from the log buffer. */ int log_buf_copy(char *dest, int idx, int len) { int ret, max; bool took_lock = false; if (!oops_in_progress) { raw_spin_lock_irq(&logbuf_lock); took_lock = true; } max = log_buf_get_len(); if (idx < 0 || idx >= max) { ret = -1; } else { if (len > max - idx) len = max - idx; ret = len; idx += (log_end - max); while (len-- > 0) dest[len] = LOG_BUF(idx + len); } if (took_lock) raw_spin_unlock_irq(&logbuf_lock); return ret; }
/** * probe_irq_on - begin an interrupt autodetect * * Commence probing for an interrupt. The interrupts are scanned * and a mask of potential interrupt lines is returned. * */ unsigned long probe_irq_on(void) { struct irq_desc *desc; unsigned long mask = 0; int i; /* * quiesce the kernel, or at least the asynchronous portion */ async_synchronize_full(); mutex_lock(&probing_active); /* * something may have generated an irq long ago and we want to * flush such a longstanding irq before considering it as spurious. */ for_each_irq_desc_reverse(i, desc) { raw_spin_lock_irq(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { /* * Some chips need to know about probing in * progress: */ if (desc->irq_data.chip->irq_set_type) desc->irq_data.chip->irq_set_type(&desc->irq_data, IRQ_TYPE_PROBE); irq_startup(desc, false); } raw_spin_unlock_irq(&desc->lock); }
/* * It is not legal to sleep in the idle task - print a warning * message if some code attempts to do it: */ static void dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) { raw_spin_unlock_irq(&rq->lock); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); raw_spin_lock_irq(&rq->lock); }
/* * enable any unassigned irqs * (we must startup again here because if a longstanding irq * happened in the previous stage, it may have masked itself) */ for_each_irq_desc_reverse(i, desc) { raw_spin_lock_irq(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; if (irq_startup(desc, false)) desc->istate |= IRQS_PENDING; } raw_spin_unlock_irq(&desc->lock); }
/** * pci_cfg_access_lock - Lock PCI config reads/writes * @dev: pci device struct * * When access is locked, any userspace reads or writes to config * space and concurrent lock requests will sleep until access is * allowed via pci_cfg_access_unlock() again. */ void pci_cfg_access_lock(struct pci_dev *dev) { might_sleep(); raw_spin_lock_irq(&pci_lock); if (dev->block_cfg_access) pci_wait_cfg(dev); dev->block_cfg_access = 1; raw_spin_unlock_irq(&pci_lock); }
static noinline void pci_wait_cfg(struct pci_dev *dev) { DECLARE_WAITQUEUE(wait, current); init_waitqueue_head(&wait); do { raw_spin_unlock_irq(&pci_lock); vmm_completion_wait(&__pci_cfg_completion); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); }
static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); if (desc->irq_data.domain) ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq); raw_spin_unlock_irq(&desc->lock); return ret; }
static noinline void pci_wait_cfg(struct pci_dev *dev) { DECLARE_WAITQUEUE(wait, current); __add_wait_queue(&pci_cfg_wait, &wait); do { set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_unlock_irq(&pci_lock); schedule(); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); __remove_wait_queue(&pci_cfg_wait, &wait); }
/* * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling * threads context. */ void handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; int mask_this_irq = 0; irqreturn_t action_ret; might_sleep(); raw_spin_lock_irq(&desc->lock); kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) { mask_this_irq = 1; if (!(desc->status & IRQ_LEVEL)) desc->status |= IRQ_PENDING; goto out_unlock; } desc->status |= IRQ_INPROGRESS; raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock_irq(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out_unlock: if (unlikely(mask_this_irq)) { chip_bus_lock(irq, desc); mask_irq(desc, irq); chip_bus_sync_unlock(irq, desc); } raw_spin_unlock_irq(&desc->lock); }
static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); if (desc->name) ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); raw_spin_unlock_irq(&desc->lock); return ret; }
/* * Now filter out any obviously spurious interrupts */ for_each_irq_desc(i, desc) { raw_spin_lock_irq(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { /* It triggered already - consider it spurious. */ if (!(desc->istate & IRQS_WAITING)) { desc->istate &= ~IRQS_AUTODETECT; irq_shutdown(desc); } else if (i < 32) mask |= 1 << i; } raw_spin_unlock_irq(&desc->lock); }
static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); ret = sprintf(buf, "%s\n", irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); raw_spin_unlock_irq(&desc->lock); return ret; }
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; raw_spin_lock_irq(&desc->lock); ret = sprintf(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); raw_spin_unlock_irq(&desc->lock); return ret; }
int dev_read_kmsg(char __user *buf, int len) { int error = -EINVAL; unsigned i; char c; if (!buf || len < 0) goto out; error = 0; if (!len) goto out; if (!access_ok(VERIFY_WRITE, buf, len)) { error = -EFAULT; goto out; } error = wait_event_interruptible(dev_wait, (dev_start - log_end)); if (error) goto out; i = 0; raw_spin_lock_irq(&logbuf_lock); while (!error && (dev_start != log_end) && i < len) { c = LOG_BUF(dev_start); dev_start++; raw_spin_unlock_irq(&logbuf_lock); error = __put_user(c,buf); buf++; i++; cond_resched(); raw_spin_lock_irq(&logbuf_lock); } raw_spin_unlock_irq(&logbuf_lock); if (!error) error = i; out: return error; }
/* * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling * threads context. */ void handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; int mask_this_irq = 0; irqreturn_t action_ret; might_sleep(); raw_spin_lock_irq(&desc->lock); kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { mask_this_irq = 1; goto out_unlock; } irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock_irq(&desc->lock); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); out_unlock: raw_spin_unlock_irq(&desc->lock); if (unlikely(mask_this_irq)) { chip_bus_lock(desc); mask_irq(desc); chip_bus_sync_unlock(desc); } }
/* see cgroup_stat_flush() */ static void cgroup_stat_flush_locked(struct cgroup *cgrp) { int cpu; lockdep_assert_held(&cgroup_stat_mutex); for_each_possible_cpu(cpu) { raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_cpu_stat_lock, cpu); struct cgroup *pos = NULL; raw_spin_lock_irq(cpu_lock); while ((pos = cgroup_cpu_stat_pop_updated(pos, cgrp, cpu))) cgroup_cpu_stat_flush_one(pos, cpu); raw_spin_unlock_irq(cpu_lock); } }
/***************************************************************************** 函数名称 : mlog_log_poll 功能描述 : 输入参数 : 输出参数 : 返 回 值 : No 修改历史 : *****************************************************************************/ static unsigned int mlog_log_poll(struct file *file, poll_table *wait) { unsigned int ret = 0; poll_wait(file, &mlog_log_wait, wait); raw_spin_lock_irq(&mlog_log_buf_lock); if (log_wr_off != log_rd_off) { ret = POLLIN | POLLRDNORM; } raw_spin_unlock_irq(&mlog_log_buf_lock); return ret; }
int log_buf_copy2(char *dest, int dest_len, int log_copy_start, int log_copy_end) { bool took_lock = false; int count; if (!oops_in_progress) { raw_spin_lock_irq(&logbuf_lock); took_lock = true; } for (count = 0; (log_copy_start != log_end) && (log_copy_start != log_copy_end) && (count < dest_len); log_copy_start++) { dest[count++] = LOG_BUF(log_copy_start); } if (took_lock) raw_spin_unlock_irq(&logbuf_lock); return count; }
static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); struct irqaction *action; ssize_t ret = 0; char *p = ""; raw_spin_lock_irq(&desc->lock); for (action = desc->action; action != NULL; action = action->next) { ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", p, action->name); p = ","; } raw_spin_unlock_irq(&desc->lock); if (ret) ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); return ret; }
/** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none * @waiter: the pre-initialized rt_mutex_waiter * * lock->wait_lock must be held by the caller. */ static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter, unsigned long flags) { int ret = 0; for (;;) { /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock, current, waiter)) break; /* * TASK_INTERRUPTIBLE checks for signals and * timeout. Ignored otherwise. */ if (unlikely(state == TASK_INTERRUPTIBLE)) { /* Signal pending? */ if (signal_pending(current)) ret = -EINTR; if (timeout && !timeout->task) ret = -ETIMEDOUT; if (ret) break; } raw_spin_unlock_irq(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); schedule_rt_mutex(lock); raw_spin_lock_irq(&lock->wait_lock); set_current_state(state); } return ret; }
/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; #ifdef CONFIG_HUAWEI_PRINTK_CTRL printk_level_setup(LOGLEVEL_DEBUG); #endif oops_enter(); raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); #ifdef CONFIG_HISI_BB set_exception_info(instruction_pointer(regs)); #endif ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); #ifdef CONFIG_HUAWEI_PRINTK_CTRL printk_level_setup(sysctl_printk_level); #endif }