/** * tick_is_oneshot_available - check for a oneshot capable event device */ int tick_is_oneshot_available(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return 0; if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) return 1; return tick_broadcast_oneshot_available(); }
static void arch_perfmon_setup_counters(void) { union cpuid10_eax eax; eax.full = cpuid_eax(0xa); if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && __this_cpu_read(cpu_info.x86_model) == 15) { eax.split.version_id = 2; eax.split.num_counters = 2; eax.split.bit_width = 40; } num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER); op_arch_perfmon_spec.num_counters = num_counters; op_arch_perfmon_spec.num_controls = num_counters; }
static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; s64 blocked, runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); snap = &__get_cpu_var(xen_runstate_snapshot); /* work out how much time the VCPU has not been runn*ing* */ blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; *snap = state; /* Add the appropriate number of ticks of stolen time, including any left-overs from last time. */ stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); if (stolen < 0) stolen = 0; ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __this_cpu_write(xen_residual_stolen, stolen); account_steal_ticks(ticks); /* Add the appropriate number of ticks of blocked time, including any left-overs from last time. */ blocked += __this_cpu_read(xen_residual_blocked); if (blocked < 0) blocked = 0; ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); __this_cpu_write(xen_residual_blocked, blocked); account_idle_ticks(ticks); }
void __tasklet_hi_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); t->next = NULL; *__this_cpu_read(tasklet_hi_vec.tail) = t; __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); raise_softirq_irqoff(HI_SOFTIRQ); local_irq_restore(flags); }
/** * tick_check_oneshot_mode - check whether the system is in oneshot mode * * returns 1 when either nohz or highres are enabled. otherwise 0. */ int tick_oneshot_mode_active(void) { unsigned long flags; int ret; local_irq_save(flags); ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT; local_irq_restore(flags); return ret; }
u64 xen_clocksource_read(void) { struct pvclock_vcpu_time_info *src; u64 ret; preempt_disable_notrace(); src = &__this_cpu_read(xen_vcpu)->time; ret = pvclock_clocksource_read(src); preempt_enable_notrace(); return ret; }
static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __this_cpu_read(tasklet_vec.head); __this_cpu_write(tasklet_vec.head, NULL); __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); #ifdef CONFIG_SEC_DEBUG sec_debug_irq_sched_log(-1, t->func, 3); t->func(t->data); sec_debug_irq_sched_log(-1, t->func, 4); #else t->func(t->data); #endif tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = NULL; *__this_cpu_read(tasklet_vec.tail) = t; __this_cpu_write(tasklet_vec.tail, &(t->next)); __raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_enable(); } }
/** * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here * return non-zero on failure */ int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv; int next_state, entered_state; if (off) return -ENODEV; if (!initialized) return -ENODEV; /* check if the device is ready */ if (!dev || !dev->enabled) return -EBUSY; drv = cpuidle_get_cpu_driver(dev); /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(drv, dev); if (need_resched()) { dev->last_residency = 0; /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev, next_state); local_irq_enable(); return 0; } trace_cpu_idle_rcuidle(next_state, dev->cpu); if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); if (cpuidle_state_is_coupled(dev, drv, next_state)) entered_state = cpuidle_enter_state_coupled(dev, drv, next_state); else entered_state = cpuidle_enter_state(dev, drv, next_state); if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev, entered_state); return 0; }
/*S:010 * We approach the Switcher. * * Remember that each CPU has two pages which are visible to the Guest when it * runs on that CPU. This has to contain the state for that Guest: we copy the * state in just before we run the Guest. * * Each Guest has "changed" flags which indicate what has changed in the Guest * since it last ran. We saw this set in interrupts_and_traps.c and * segments.c. */ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) { /* * Copying all this data can be quite expensive. We usually run the * same Guest we ran last time (and that Guest hasn't run anywhere else * meanwhile). If that's not the case, we pretend everything in the * Guest has changed. */ if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { __this_cpu_write(lg_last_cpu, cpu); cpu->last_pages = pages; cpu->changed = CHANGED_ALL; } /* * These copies are pretty cheap, so we do them unconditionally: */ /* Save the current Host top-level page directory. */ #ifdef CONFIG_PAX_PER_CPU_PGD pages->state.host_cr3 = read_cr3(); #else pages->state.host_cr3 = __pa(current->mm->pgd); #endif /* * Set up the Guest's page tables to see this CPU's pages (and no * other CPU's pages). */ map_switcher_in_guest(cpu, pages); /* * Set up the two "TSS" members which tell the CPU what stack to use * for traps which do directly into the Guest (ie. traps at privilege * level 1). */ pages->state.guest_tss.sp1 = cpu->esp1; pages->state.guest_tss.ss1 = cpu->ss1; /* Copy direct-to-Guest trap entries. */ if (cpu->changed & CHANGED_IDT) copy_traps(cpu, pages->state.guest_idt, default_idt_entries); /* Copy all GDT entries which the Guest can change. */ if (cpu->changed & CHANGED_GDT) copy_gdt(cpu, pages->state.guest_gdt); /* If only the TLS entries have changed, copy them. */ else if (cpu->changed & CHANGED_GDT_TLS) copy_gdt_tls(cpu, pages->state.guest_gdt); /* Mark the Guest as unchanged for next time. */ cpu->changed = 0; }
unsigned long cmci_intel_adjust_timer(unsigned long interval) { if ((this_cpu_read(cmci_backoff_cnt) > 0) && (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { mce_notify_irq(); return CMCI_STORM_INTERVAL; } switch (__this_cpu_read(cmci_storm_state)) { case CMCI_STORM_ACTIVE: /* * We switch back to interrupt mode once the poll timer has * silenced itself. That means no events recorded and the timer * interval is back to our poll interval. */ __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); if (!atomic_sub_return(1, &cmci_storm_on_cpus)) pr_notice("CMCI storm subsided: switching to interrupt mode\n"); /* FALLTHROUGH */ case CMCI_STORM_SUBSIDED: /* * We wait for all CPUs to go back to SUBSIDED state. When that * happens we switch back to interrupt mode. */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); cmci_reenable(); cmci_recheck(); } return CMCI_POLL_INTERVAL; default: /* We have shiny weather. Let the poll do whatever it thinks. */ return interval; } }
/* Callback function for perf event subsystem */ static void watchdog_overflow_callback(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { /* Ensure the watchdog never gets throttled */ event->hw.interrupts = 0; if (__this_cpu_read(watchdog_nmi_touch) == true) { __this_cpu_write(watchdog_nmi_touch, false); return; } /* check for a hardlockup * This is done by making sure our timer interrupt * is incrementing. The timer interrupt should have * fired multiple times before we overflow'd. If it hasn't * then this is a good indication the cpu is stuck */ if (is_hardlockup()) { int this_cpu = smp_processor_id(); /* only print hardlockups once */ if (__this_cpu_read(hard_watchdog_warn) == true) return; if (hardlockup_panic) { trigger_all_cpu_backtrace(); panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); } else WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); __this_cpu_write(hard_watchdog_warn, true); return; } __this_cpu_write(hard_watchdog_warn, false); return; }
asmlinkage __visible void xen_maybe_preempt_hcall(void) { if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) && need_resched())) { /* * Clear flag as we may be rescheduled on a different * cpu. */ __this_cpu_write(xen_in_preemptible_hcall, false); _cond_resched(); __this_cpu_write(xen_in_preemptible_hcall, true); } }
static unsigned int tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tee_tginfo *info = par->targinfo; struct iphdr *iph; if (__this_cpu_read(tee_active)) return XT_CONTINUE; /* * Copy the skb, and route the copy. Will later return %XT_CONTINUE for * the original skb, which should continue on its way as if nothing has * happened. The copy should be independently delivered to the TEE * --gateway. */ skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return XT_CONTINUE; #ifdef WITH_CONNTRACK /* Avoid counting cloned packets towards the original connection. */ nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif /* * If we are in PREROUTING/INPUT, the checksum must be recalculated * since the length could have changed as a result of defragmentation. * * We also decrease the TTL to mitigate potential TEE loops * between two hosts. * * Set %IP_DF so that the original source is notified of a potentially * decreased MTU on the clone route. IPv6 does this too. */ iph = ip_hdr(skb); iph->frag_off |= htons(IP_DF); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_IN) --iph->ttl; ip_send_check(iph); if (tee_tg_route4(skb, info)) { __this_cpu_write(tee_active, true); ip_local_out(skb); __this_cpu_write(tee_active, false); } else { kfree_skb(skb); } return XT_CONTINUE; }
int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_driver(); int next_state, entered_state; if (off) return -ENODEV; if (!initialized) return -ENODEV; if (!dev || !dev->enabled) return -EBUSY; #if 0 hrtimer_peek_ahead_timers(); #endif next_state = cpuidle_curr_governor->select(drv, dev); if (need_resched()) { local_irq_enable(); return 0; } trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle_rcuidle(next_state, dev->cpu); entered_state = cpuidle_enter_ops(dev, drv, next_state); trace_power_end_rcuidle(dev->cpu); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); if (entered_state >= 0) { dev->states_usage[entered_state].time += (unsigned long long)dev->last_residency; dev->states_usage[entered_state].usage++; } else { dev->last_residency = 0; } if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev, entered_state); return 0; }
/** * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here * return non-zero on failure */ int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_driver(); int next_state, entered_state; if (off) return -ENODEV; if (!initialized) return -ENODEV; /* check if the device is ready */ if (!dev || !dev->enabled) return -EBUSY; /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(drv, dev); if (need_resched()) { dev->last_residency = 0; /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev, next_state); local_irq_enable(); return 0; } trace_cpu_idle_rcuidle(next_state, dev->cpu); if (need_resched()) { dev->last_residency = 0; local_irq_enable(); entered_state = next_state; goto exit; } if (cpuidle_state_is_coupled(dev, drv, next_state)) entered_state = cpuidle_enter_state_coupled(dev, drv, next_state); else entered_state = cpuidle_enter_state(dev, drv, next_state); exit: trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev, entered_state); return 0; }
static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; s64 blocked, runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); snap = &__get_cpu_var(xen_runstate_snapshot); blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; *snap = state; stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); if (stolen < 0) stolen = 0; ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __this_cpu_write(xen_residual_stolen, stolen); account_steal_ticks(ticks); blocked += __this_cpu_read(xen_residual_blocked); if (blocked < 0) blocked = 0; ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); __this_cpu_write(xen_residual_blocked, blocked); account_idle_ticks(ticks); }
static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); list = __this_cpu_read(tasklet_hi_vec.head); __this_cpu_write(tasklet_hi_vec.head, NULL); __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); exynos_ss_softirq(ESS_FLAG_SOFTIRQ_HI_TASKLET, t->func, irqs_disabled(), ESS_FLAG_IN); t->func(t->data); exynos_ss_softirq(ESS_FLAG_SOFTIRQ_HI_TASKLET, t->func, irqs_disabled(), ESS_FLAG_OUT); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); t->next = NULL; *__this_cpu_read(tasklet_hi_vec.tail) = t; __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); __raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } }
/* * Optimized increment and decrement functions. * * These are only for a single page and therefore can take a struct page * * argument instead of struct zone *. This allows the inclusion of the code * generated for page_zone(page) into the optimized functions. * * No overflow check is necessary and therefore the differential can be * incremented or decremented in place which may allow the compilers to * generate better code. * The increment or decrement is known and therefore one boundary check can * be omitted. * * NOTE: These functions are very performance sensitive. Change only * with care. * * Some processors have inc/dec instructions that are atomic vs an interrupt. * However, the code must first determine the differential location in a zone * based on the processor number and then inc/dec the counter. There is no * guarantee without disabling preemption that the processor will not change * in between and therefore the atomicity vs. interrupt cannot be exploited * in a useful way here. */ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { struct per_cpu_pageset __percpu *pcp = zone->pageset; s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { s8 overstep = t >> 1; zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); }
/** * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here * return non-zero on failure */ int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_driver(); int next_state, entered_state; if (off) return -ENODEV; if (!initialized) return -ENODEV; /* check if the device is ready */ if (!dev || !dev->enabled) return -EBUSY; #if 0 /* shows regressions, re-enable for 2.6.29 */ /* * run any timers that can be run now, at this point * before calculating the idle duration etc. */ hrtimer_peek_ahead_timers(); #endif /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(drv, dev); if (need_resched()) { local_irq_enable(); return 0; } trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle_rcuidle(next_state, dev->cpu); if (cpuidle_state_is_coupled(dev, drv, next_state)) entered_state = cpuidle_enter_state_coupled(dev, drv, next_state); else entered_state = cpuidle_enter_state(dev, drv, next_state); trace_power_end_rcuidle(dev->cpu); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev, entered_state); return 0; }
static void xtensa_mx_irq_unmask(struct irq_data *d) { unsigned int mask = 1u << d->hwirq; if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - HW_IRQ_MX_BASE), MIENGSET); } else { mask |= __this_cpu_read(cached_irq_mask); __this_cpu_write(cached_irq_mask, mask); set_sr(mask, intenable); } }
/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @drv: cpuidle driver with cpuidle state information * @index: the index of suggested state */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; return -EINVAL; } } /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); sched_clock_idle_wakeup_event(0); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); return index; }
static void takeover_tasklets(unsigned int cpu) { /* CPU is dead, so no lock needed. */ local_irq_disable(); /* Find end, append list for that CPU. */ if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); per_cpu(tasklet_vec, cpu).head = NULL; per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; } raise_softirq_irqoff(TASKLET_SOFTIRQ); if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); per_cpu(tasklet_hi_vec, cpu).head = NULL; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); }
/** * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here * return non-zero on failure */ int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_state *target_state; int next_state; if (off) return -ENODEV; if (!initialized) return -ENODEV; /* check if the device is ready */ if (!dev || !dev->enabled) return -EBUSY; #if 0 /* shows regressions, re-enable for 2.6.29 */ /* * run any timers that can be run now, at this point * before calculating the idle duration etc. */ hrtimer_peek_ahead_timers(); #endif /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(dev); if (need_resched()) { local_irq_enable(); return 0; } target_state = &dev->states[next_state]; /* enter the state and update stats */ dev->last_state = target_state; RCU_NONIDLE( trace_power_start(POWER_CSTATE, next_state, dev->cpu); trace_cpu_idle(next_state, dev->cpu) ); dev->last_residency = target_state->enter(dev, target_state); RCU_NONIDLE( trace_power_end(dev->cpu); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); );
/** * cpuidle_play_dead - cpu off-lining * * Returns in case of an error or no driver */ int cpuidle_play_dead(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int i; if (!drv) return -ENODEV; /* Find lowest-power state that supports long-term idle */ for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) if (drv->states[i].enter_dead) return drv->states[i].enter_dead(dev, i); return -ENODEV; }
bool mce_intel_cmci_poll(void) { if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) return false; /* * Reset the counter if we've logged an error in the last poll * during the storm. */ if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned))) this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); else this_cpu_dec(cmci_backoff_cnt); return true; }
int notrace __ipipe_check_percpu_access(void) { struct ipipe_percpu_domain_data *p; struct ipipe_domain *this_domain; unsigned long flags; int ret = 0; flags = hard_local_irq_save_notrace(); /* * Don't use __ipipe_current_domain here, this would recurse * indefinitely. */ this_domain = __this_cpu_read(ipipe_percpu.curr)->domain; /* * Only the root domain may implement preemptive CPU migration * of tasks, so anything above in the pipeline should be fine. */ if (this_domain != ipipe_root_domain) goto out; if (raw_irqs_disabled_flags(flags)) goto out; /* * Last chance: hw interrupts were enabled on entry while * running over the root domain, but the root stage might be * currently stalled, in which case preemption would be * disabled, and no migration could occur. */ if (this_domain == ipipe_root_domain) { p = ipipe_this_cpu_root_context(); if (test_bit(IPIPE_STALL_FLAG, &p->status)) goto out; } /* * Our caller may end up accessing the wrong per-cpu variable * instance due to CPU migration; tell it to complain about * this. */ ret = 1; out: hard_local_irq_restore_notrace(flags); return ret; }
/** * acpi_idle_lpi_enter - enters an ACPI any LPI state * @dev: the target CPU * @drv: cpuidle driver containing cpuidle state info * @index: index of target state * * Return: 0 for success or negative value for error */ static int acpi_idle_lpi_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_lpi_state *lpi; pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; lpi = &pr->power.lpi_states[index]; if (lpi->entry_method == ACPI_CSTATE_FFH) return acpi_processor_ffh_lpi_enter(lpi); return -EINVAL; }
int psci_cpu_suspend_enter(unsigned long index) { int ret; u32 *state = __this_cpu_read(psci_power_state); /* * idle state index 0 corresponds to wfi, should never be called * from the cpu_suspend operations */ if (WARN_ON_ONCE(!index)) return -EINVAL; if (!psci_power_state_loses_context(state[index - 1])) ret = psci_ops.cpu_suspend(state[index - 1], 0); else ret = cpu_suspend(index, psci_suspend_finisher); return ret; }
/** * This function is both preempt and irq safe. The former is due to explicit * preemption disable. The latter is guaranteed by the fact that the slow path * is explicitly protected by an irq-safe spinlock whereas the fast patch uses * this_cpu_add which is irq-safe by definition. Hence there is no need muck * with irq state before calling this one */ void percpu_counter_add_batch(struct percpu_counter *fbc, int64_t amount, int32_t batch) { int64_t count; preempt_disable(); count = __this_cpu_read(*fbc->counters) + amount; if (count >= batch || count <= -batch) { unsigned long flags; spin_lock_irqsave(&fbc->lock); fbc->count += count; __this_cpu_sub(*fbc->counters, count - amount); spin_unlock_irqsave(&fbc->lock); } else { this_cpu_add(*fbc->counters, amount); } preempt_enable(); }
/** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @drv: cpuidle driver containing cpuidle state info * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; lapic_timer_state_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); lapic_timer_state_broadcast(pr, cx, 0); return index; }