/* * Functions related to boot-time initialization: */ static void __cpuinit init_hrtimers_cpu(int cpu) { struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); static char __cpuinitdata cpu_base_done[NR_CPUS]; int i; unsigned long flags; if (!cpu_base_done[cpu]) { raw_spin_lock_init(&cpu_base->lock); cpu_base_done[cpu] = 1; } raw_spin_lock_irqsave(&cpu_base->lock, flags); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); } hrtimer_init_hres(cpu_base); raw_spin_unlock_irqrestore(&cpu_base->lock, flags); }
/* * We're finished using the context for an address space. */ void destroy_context(struct mm_struct *mm) { unsigned long flags; unsigned int id; if (mm->context.id == MMU_NO_CONTEXT) return; WARN_ON(mm->context.active != 0); raw_spin_lock_irqsave(&context_lock, flags); id = mm->context.id; if (id != MMU_NO_CONTEXT) { __clear_bit(id, context_map); mm->context.id = MMU_NO_CONTEXT; #ifdef DEBUG_MAP_CONSISTENCY mm->context.active = 0; #endif context_mm[id] = NULL; nr_free_contexts++; } raw_spin_unlock_irqrestore(&context_lock, flags); }
static void pfair_task_exit(struct task_struct * t) { unsigned long flags; BUG_ON(!is_realtime(t)); /* Remote task from release or ready queue, and ensure * that it is not the scheduled task for ANY CPU. We * do this blanket check because occassionally when * tasks exit while blocked, the task_cpu of the task * might not be the same as the CPU that the PFAIR scheduler * has chosen for it. */ raw_spin_lock_irqsave(&pfair_lock, flags); TRACE_TASK(t, "RIP, state:%d\n", t->state); drop_all_references(t); raw_spin_unlock_irqrestore(&pfair_lock, flags); kfree(t->rt_param.pfair); t->rt_param.pfair = NULL; }
/* * Wake up the next waiter on the lock. * * Remove the top waiter from the current tasks waiter list and wake it up. * * Called with lock->wait_lock held. */ static void wakeup_next_waiter(struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; unsigned long flags; raw_spin_lock_irqsave(¤t->pi_lock, flags); waiter = rt_mutex_top_waiter(lock); /* * Remove it from current->pi_waiters. We do not adjust a * possible priority boost right now. We execute wakeup in the * boosted mode and go back to normal after releasing * lock->wait_lock. */ rt_mutex_dequeue_pi(current, waiter); rt_mutex_set_owner(lock, NULL); raw_spin_unlock_irqrestore(¤t->pi_lock, flags); wake_up_process(waiter->task); }
/* * mt65xx_mon_enable: Enable hardware monitors. * Return 0. */ int mt65xx_mon_enable(void) { unsigned long flags; p_pmu->reset(); // enable & start ARM performance monitors p_pmu->enable(); p_pmu->start(); // reset and enable L2C event counters raw_spin_lock_irqsave(&l2x0_lock, flags); __raw_writel(7, PL310_BASE + L2X0_EVENT_CNT_CTRL); dsb(); raw_spin_unlock_irqrestore(&l2x0_lock, flags); // stopping EMI monitors will reset all counters BM_Enable(0); // start EMI monitor counting BM_Enable(1); return 0; }
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { unsigned long flags; int ret = 0; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); if (!tick_device_is_functional(dev)) { dev->event_handler = tick_handle_periodic; cpumask_set_cpu(cpu, tick_get_broadcast_mask()); tick_broadcast_start_periodic(tick_broadcast_device.evtdev); ret = 1; } else { if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { int cpu = smp_processor_id(); cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); tick_broadcast_clear_oneshot(cpu); } } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); return ret; }
static void armv8pmu_disable_event(struct perf_event *event) { unsigned long flags; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* * Disable counter and interrupt */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter */ armv8pmu_disable_event_counter(event); /* * Disable interrupt for this counter */ armv8pmu_disable_event_irq(event); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); }
/** * down_interruptible - acquire the semaphore unless interrupted * @sem: the semaphore to be acquired * * Attempts to acquire the semaphore. If no more tasks are allowed to * acquire the semaphore, calling this function will put the task to sleep. * If the sleep is interrupted by a signal, this function will return -EINTR. * If the semaphore is successfully acquired, this function returns 0. */ int down_interruptible(struct semaphore *sem) { unsigned long flags; int result = 0; raw_spin_lock_irqsave(&sem->lock, flags); #ifdef CONFIG_ILOCKDEP ilockdep_acquire(&sem->idep_map, _RET_IP_, (void *)sem); #endif if (likely(sem->count > 0)) sem->count--; else result = __down_interruptible(sem); #ifdef CONFIG_ILOCKDEP if (result) ilockdep_clear_locking(current); else ilockdep_acquired(&sem->idep_map, _RET_IP_, (void *)sem); #endif raw_spin_unlock_irqrestore(&sem->lock, flags); return result; }
/** * irq_cpu_offline - Invoke all irq_cpu_offline functions. * * Iterate through all irqs and invoke the chip.irq_cpu_offline() * for each. */ void irq_cpu_offline(void) { struct irq_desc *desc; struct irq_chip *chip; unsigned long flags; unsigned int irq; for_each_active_irq(irq) { desc = irq_to_desc(irq); if (!desc) continue; raw_spin_lock_irqsave(&desc->lock, flags); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_offline && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_offline(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); } }
static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) { unsigned long flags; raw_spin_lock_irqsave(&p->lock, flags); /* mask compare A interrupt */ em_sti_write(p, STI_INTENCLR, 1); /* update compare A value */ em_sti_write(p, STI_COMPA_H, next >> 32); em_sti_write(p, STI_COMPA_L, next & 0xffffffff); /* clear compare A interrupt source */ em_sti_write(p, STI_INTFFCLR, 1); /* unmask compare A interrupt */ em_sti_write(p, STI_INTENSET, 1); raw_spin_unlock_irqrestore(&p->lock, flags); return next; }
/** * of_remove_property - Remove a property from a node. * * Note that we don't actually remove it, since we have given out * who-knows-how-many pointers to the data using get-property. * Instead we just move the property to the "dead properties" * list, so it won't be found any more. */ int of_remove_property(struct device_node *np, struct property *prop) { struct property **next; unsigned long flags; int found = 0; int rc; rc = of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop); if (rc) return rc; raw_spin_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == prop) { /* found the node */ *next = prop->next; prop->next = np->deadprops; np->deadprops = prop; found = 1; break; } next = &(*next)->next; } raw_spin_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; #ifdef CONFIG_PROC_DEVICETREE /* try to remove the proc node as well */ if (np->pde) proc_device_tree_remove_prop(np->pde, prop); #endif /* CONFIG_PROC_DEVICETREE */ return 0; }
static void dio48e_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); unsigned int i; const unsigned int gpio_reg_size = 8; unsigned int port; unsigned int out_port; unsigned int bitmask; unsigned long flags; /* set bits are evaluated a gpio register size at a time */ for (i = 0; i < chip->ngpio; i += gpio_reg_size) { /* no more set bits in this mask word; skip to the next word */ if (!mask[BIT_WORD(i)]) { i = (BIT_WORD(i) + 1) * BITS_PER_LONG - gpio_reg_size; continue; } port = i / gpio_reg_size; out_port = (port > 2) ? port + 1 : port; bitmask = mask[BIT_WORD(i)] & bits[BIT_WORD(i)]; raw_spin_lock_irqsave(&dio48egpio->lock, flags); /* update output state data and set device gpio register */ dio48egpio->out_state[port] &= ~mask[BIT_WORD(i)]; dio48egpio->out_state[port] |= bitmask; outb(dio48egpio->out_state[port], dio48egpio->base + out_port); raw_spin_unlock_irqrestore(&dio48egpio->lock, flags); /* prepare for next gpio register set */ mask[BIT_WORD(i)] >>= gpio_reg_size; bits[BIT_WORD(i)] >>= gpio_reg_size; } }
static void mcip_ipi_clear(int irq) { unsigned int cpu, c; unsigned long flags; unsigned int __maybe_unused copy; if (unlikely(irq == SOFTIRQ_IRQ)) { arc_softirq_clear(irq); return; } raw_spin_lock_irqsave(&mcip_lock, flags); /* Who sent the IPI */ __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ /* * In rare case, multiple concurrent IPIs sent to same target can * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be * "vectored" (multiple bits sets) as opposed to typical single bit */ do { c = __ffs(cpu); /* 0,1,2,3 */ __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); cpu &= ~(1U << c); } while (cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); #ifdef CONFIG_ARC_IPI_DBG if (c != __ffs(copy)) pr_info("IPIs from %x coalesced to %x\n", copy, raw_smp_processor_id()); #endif }
int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; struct irqaction * action; unsigned long flags; if (i == 0) { seq_printf(p, " "); for_each_online_cpu(j) seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } if (i < NR_IRQS) { raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; seq_printf(p, "%3d: ",i); #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); skip: raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; }
static void dio48e_irq_mask(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); const unsigned long offset = irqd_to_hwirq(data); unsigned long flags; /* only bit 3 on each respective Port C supports interrupts */ if (offset != 19 && offset != 43) return; raw_spin_lock_irqsave(&dio48egpio->lock, flags); if (offset == 19) dio48egpio->irq_mask &= ~BIT(0); else dio48egpio->irq_mask &= ~BIT(1); if (!dio48egpio->irq_mask) /* disable interrupts */ inb(dio48egpio->base + 0xB); raw_spin_unlock_irqrestore(&dio48egpio->lock, flags); }
/* * Slow path try-lock function: */ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) { unsigned long flags; int ret = 0; raw_spin_lock_irqsave(&lock->wait_lock, flags); if (likely(rt_mutex_owner(lock) != current)) { init_lists(lock); ret = try_to_take_rt_mutex(lock, current, NULL); /* * try_to_take_rt_mutex() sets the lock waiters * bit unconditionally. Clean this up. */ fixup_rt_mutex_waiters(lock); } raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return ret; }
/** * task_work_cancel - cancel a pending work added by task_work_add() * @task: the task which should execute the work * @func: identifies the work to remove * * Find the last queued pending work with ->func == @func and remove * it from queue. * * RETURNS: * The found work or NULL if not found. */ struct callback_head * task_work_cancel(struct task_struct *task, task_work_func_t func) { struct callback_head **pprev = &task->task_works; struct callback_head *work; unsigned long flags; /* * If cmpxchg() fails we continue without updating pprev. * Either we raced with task_work_add() which added the * new entry before this work, we will find it again. Or * we raced with task_work_run(), *pprev == NULL/exited. */ raw_spin_lock_irqsave(&task->pi_lock, flags); while ((work = ACCESS_ONCE(*pprev))) { smp_read_barrier_depends(); if (work->func != func) pprev = &work->next; else if (cmpxchg(pprev, work, work->next) == work) break; } raw_spin_unlock_irqrestore(&task->pi_lock, flags); return work; }
/** * rt_mutex_start_proxy_lock() - Start lock acquisition for another task * @lock: the rt_mutex to take * @waiter: the pre-initialized rt_mutex_waiter * @task: the task to prepare * @detect_deadlock: perform deadlock detection (1) or not (0) * * Returns: * 0 - task blocked on lock * 1 - acquired the lock for task, caller should wake it up * <0 - error * * Special API call for FUTEX_REQUEUE_PI support. */ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, int detect_deadlock) { unsigned long flags; int ret; raw_spin_lock_irqsave(&lock->wait_lock, flags); if (try_to_take_rt_mutex(lock, task, NULL)) { raw_spin_unlock(&lock->wait_lock); return 1; } ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock, flags, 0); if (ret == -EDEADLK && !rt_mutex_owner(lock)) { /* * Reset the return value. We might have * returned with -EDEADLK and the owner * released the lock while we were walking the * pi chain. Let the waiter sort it out. */ ret = 0; } if (unlikely(ret)) remove_waiter(lock, waiter, flags); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); debug_rt_mutex_print_deadlock(waiter); return ret; }
/* * cpudl_set - update the cpudl max-heap * @cp: the cpudl max-heap context * @cpu: the target cpu * @dl: the new earliest deadline for this cpu * * Notes: assumes cpu_rq(cpu)->lock is locked * * Returns: (void) */ void cpudl_set(struct cpudl *cp, int cpu, u64 dl) { int old_idx; unsigned long flags; WARN_ON(!cpu_present(cpu)); raw_spin_lock_irqsave(&cp->lock, flags); old_idx = cp->elements[cpu].idx; if (old_idx == IDX_INVALID) { int new_idx = cp->size++; cp->elements[new_idx].dl = dl; cp->elements[new_idx].cpu = cpu; cp->elements[cpu].idx = new_idx; cpudl_heapify_up(cp, new_idx); cpumask_clear_cpu(cpu, cp->free_cpus); } else { cp->elements[old_idx].dl = dl; cpudl_heapify(cp, old_idx); } raw_spin_unlock_irqrestore(&cp->lock, flags); }
static void armv8pmu_enable_event(struct perf_event *event) { unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter */ armv8pmu_disable_counter(idx); /* * Set event (if destined for PMNx counters). */ armv8pmu_write_evtype(idx, hwc->config_base); /* * Enable interrupt for this counter */ armv8pmu_enable_intens(idx); /* * Enable counter */ armv8pmu_enable_counter(idx); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); }
void synchronize_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); bool inprogress; if (!desc) return; do { unsigned long flags; while (irqd_irq_inprogress(&desc->irq_data)) cpu_relax(); raw_spin_lock_irqsave(&desc->lock, flags); inprogress = irqd_irq_inprogress(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); } while (inprogress); wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); }
void rq_attach_root(struct rq *rq, struct root_domain *rd) { struct root_domain *old_rd = NULL; unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { old_rd = rq->rd; if (cpumask_test_cpu(rq->cpu, old_rd->online)) set_rq_offline(rq); cpumask_clear_cpu(rq->cpu, old_rd->span); /* * If we dont want to free the old_rd yet then * set old_rd to NULL to skip the freeing later * in this function: */ if (!atomic_dec_and_test(&old_rd->refcount)) old_rd = NULL; } atomic_inc(&rd->refcount); rq->rd = rd; cpumask_set_cpu(rq->cpu, rd->span); if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); if (old_rd) call_rcu_sched(&old_rd->rcu, free_rootdomain); }
static void irq_affinity_notify(struct work_struct *work) { struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) goto out; raw_spin_lock_irqsave(&desc->lock, flags); if (irq_move_pending(&desc->irq_data)) irq_get_pending(cpumask, desc); else cpumask_copy(cpumask, desc->irq_data.affinity); raw_spin_unlock_irqrestore(&desc->lock, flags); notify->notify(notify, cpumask); free_cpumask_var(cpumask); out: kref_put(¬ify->kref, notify->release); }
/* * Check, if the new registered device should be used. */ static int tick_check_new_device(struct clock_event_device *newdev) { struct clock_event_device *curdev; struct tick_device *td; int cpu, ret = NOTIFY_OK; unsigned long flags; raw_spin_lock_irqsave(&tick_device_lock, flags); cpu = smp_processor_id(); if (!cpumask_test_cpu(cpu, newdev->cpumask)) goto out_bc; td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; /* cpu local device ? */ if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { /* * If the cpu affinity of the device interrupt can not * be set, ignore it. */ if (!irq_can_set_affinity(newdev->irq)) goto out_bc; /* * If we have a cpu local device already, do not replace it * by a non cpu local device */ if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) goto out_bc; } /* * If we have an active device, then check the rating and the oneshot * feature. */ if (curdev) { /* * Prefer one shot capable devices ! */ if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) && !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) goto out_bc; /* * Check the rating */ if (curdev->rating >= newdev->rating) goto out_bc; } /* * Replace the eventually existing device by the new * device. If the current device is the broadcast device, do * not give it back to the clockevents layer ! */ if (tick_is_broadcast_device(curdev)) { clockevents_shutdown(curdev); curdev = NULL; } clockevents_exchange_device(curdev, newdev); tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); raw_spin_unlock_irqrestore(&tick_device_lock, flags); return NOTIFY_STOP; out_bc: /* * Can the new device be used as a broadcast device ? */ if (tick_check_broadcast_device(newdev)) ret = NOTIFY_STOP; raw_spin_unlock_irqrestore(&tick_device_lock, flags); return ret; }
/* * Counterpart to lock_hrtimer_base above: */ static inline void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) { raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); }
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) { raw_spin_unlock_irqrestore(&desc->lock, flags); if (bus) chip_bus_sync_unlock(desc); }
/* * Task blocks on lock. * * Prepare waiter and propagate pi chain * * This must be called with lock->wait_lock held. */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, int detect_deadlock) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; unsigned long flags; int chain_walk = 0, res; raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; plist_node_init(&waiter->list_entry, task->prio); plist_node_init(&waiter->pi_list_entry, task->prio); /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) top_waiter = rt_mutex_top_waiter(lock); plist_add(&waiter->list_entry, &lock->wait_list); task->pi_blocked_on = waiter; raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!owner) return 0; if (waiter == rt_mutex_top_waiter(lock)) { raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) chain_walk = 1; if (!chain_walk) return 0; /* * The owner can't disappear while holding a lock, * so the owner struct is protected by wait_lock. * Gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); raw_spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, task); raw_spin_lock(&lock->wait_lock); return res; }
/* * Try to take an rt-mutex * * Must be called with lock->wait_lock held. * * @lock: the lock to be acquired. * @task: the task which wants to acquire the lock * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) */ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) { /* * We have to be careful here if the atomic speedups are * enabled, such that, when * - no other waiter is on the lock * - the lock has been released since we did the cmpxchg * the lock can be released or taken while we are doing the * checks and marking the lock with RT_MUTEX_HAS_WAITERS. * * The atomic acquire/release aware variant of * mark_rt_mutex_waiters uses a cmpxchg loop. After setting * the WAITERS bit, the atomic release / acquire can not * happen anymore and lock->wait_lock protects us from the * non-atomic case. * * Note, that this might set lock->owner = * RT_MUTEX_HAS_WAITERS in the case the lock is not contended * any more. This is fixed up when we take the ownership. * This is the transitional state explained at the top of this file. */ mark_rt_mutex_waiters(lock); if (rt_mutex_owner(lock)) return 0; /* * It will get the lock because of one of these conditions: * 1) there is no waiter * 2) higher priority than waiters * 3) it is top waiter */ if (rt_mutex_has_waiters(lock)) { if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { if (!waiter || waiter != rt_mutex_top_waiter(lock)) return 0; } } if (waiter || rt_mutex_has_waiters(lock)) { unsigned long flags; struct rt_mutex_waiter *top; raw_spin_lock_irqsave(&task->pi_lock, flags); /* remove the queued waiter. */ if (waiter) { plist_del(&waiter->list_entry, &lock->wait_list); task->pi_blocked_on = NULL; } /* * We have to enqueue the top waiter(if it exists) into * task->pi_waiters list. */ if (rt_mutex_has_waiters(lock)) { top = rt_mutex_top_waiter(lock); top->pi_list_entry.prio = top->list_entry.prio; plist_add(&top->pi_list_entry, &task->pi_waiters); } raw_spin_unlock_irqrestore(&task->pi_lock, flags); } /* We got the lock. */ debug_rt_mutex_lock(lock); rt_mutex_set_owner(lock, task); rt_mutex_deadlock_account_lock(lock, task); return 1; }
/* * Adjust the priority chain. Also used for deadlock detection. * Decreases task's usage by one - may thus free the task. * Returns 0 or -EDEADLK. */ static int rt_mutex_adjust_prio_chain(struct task_struct *task, int deadlock_detect, struct rt_mutex *orig_lock, struct rt_mutex_waiter *orig_waiter, struct task_struct *top_task) { struct rt_mutex *lock; struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; int detect_deadlock, ret = 0, depth = 0; unsigned long flags; detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, deadlock_detect); /* * The (de)boosting is a step by step approach with a lot of * pitfalls. We want this to be preemptible and we want hold a * maximum of two locks per step. So we have to check * carefully whether things change under us. */ again: if (++depth > max_lock_depth) { static int prev_max; /* * Print this only once. If the admin changes the limit, * print a new message when reaching the limit again. */ if (prev_max != max_lock_depth) { prev_max = max_lock_depth; printk(KERN_WARNING "Maximum lock depth %d reached " "task: %s (%d)\n", max_lock_depth, top_task->comm, task_pid_nr(top_task)); } put_task_struct(task); return deadlock_detect ? -EDEADLK : 0; } retry: /* * Task can not go away as we did a get_task() before ! */ raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; /* * Check whether the end of the boosting chain has been * reached or the state of the chain has changed while we * dropped the locks. */ if (!waiter) goto out_unlock_pi; /* * Check the orig_waiter state. After we dropped the locks, * the previous owner of the lock might have released the lock. */ if (orig_waiter && !rt_mutex_owner(orig_lock)) goto out_unlock_pi; /* * Drop out, when the task has no waiters. Note, * top_waiter can be NULL, when we are in the deboosting * mode! */ if (top_waiter && (!task_has_pi_waiters(task) || top_waiter != task_top_pi_waiter(task))) goto out_unlock_pi; /* * When deadlock detection is off then we check, if further * priority adjustment is necessary. */ if (!detect_deadlock && waiter->list_entry.prio == task->prio) goto out_unlock_pi; lock = waiter->lock; if (!raw_spin_trylock(&lock->wait_lock)) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); cpu_relax(); goto retry; } /* Deadlock detection */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); raw_spin_unlock(&lock->wait_lock); ret = deadlock_detect ? -EDEADLK : 0; goto out_unlock_pi; } top_waiter = rt_mutex_top_waiter(lock); /* Requeue the waiter */ plist_del(&waiter->list_entry, &lock->wait_list); waiter->list_entry.prio = task->prio; plist_add(&waiter->list_entry, &lock->wait_list); /* Release the task */ raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!rt_mutex_owner(lock)) { /* * If the requeue above changed the top waiter, then we need * to wake the new top waiter up to try to get the lock. */ if (top_waiter != rt_mutex_top_waiter(lock)) wake_up_process(rt_mutex_top_waiter(lock)->task); raw_spin_unlock(&lock->wait_lock); goto out_put_task; } put_task_struct(task); /* Grab the next task */ task = rt_mutex_owner(lock); get_task_struct(task); raw_spin_lock_irqsave(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { /* Boost the owner */ plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); waiter->pi_list_entry.prio = waiter->list_entry.prio; plist_add(&waiter->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); } else if (top_waiter == waiter) { /* Deboost the owner */ plist_del(&waiter->pi_list_entry, &task->pi_waiters); waiter = rt_mutex_top_waiter(lock); waiter->pi_list_entry.prio = waiter->list_entry.prio; plist_add(&waiter->pi_list_entry, &task->pi_waiters); __rt_mutex_adjust_prio(task); } raw_spin_unlock_irqrestore(&task->pi_lock, flags); top_waiter = rt_mutex_top_waiter(lock); raw_spin_unlock(&lock->wait_lock); if (!detect_deadlock && waiter != top_waiter) goto out_put_task; goto again; out_unlock_pi: raw_spin_unlock_irqrestore(&task->pi_lock, flags); out_put_task: put_task_struct(task); return ret; }
/** * tick_broadcast_control - Enable/disable or force broadcast mode * @mode: The selected broadcast mode * * Called when the system enters a state where affected tick devices * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. */ void tick_broadcast_control(enum tick_broadcast_mode mode) { struct clock_event_device *bc, *dev; struct tick_device *td; int cpu, bc_stopped; unsigned long flags; /* Protects also the local clockevent device. */ raw_spin_lock_irqsave(&tick_broadcast_lock, flags); td = this_cpu_ptr(&tick_cpu_device); dev = td->evtdev; /* * Is the device not affected by the powerstate ? */ if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) goto out; if (!tick_device_is_functional(dev)) goto out; cpu = smp_processor_id(); bc = tick_broadcast_device.evtdev; bc_stopped = cpumask_empty(tick_broadcast_mask); switch (mode) { case TICK_BROADCAST_FORCE: tick_broadcast_forced = 1; case TICK_BROADCAST_ON: cpumask_set_cpu(cpu, tick_broadcast_on); if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { /* * Only shutdown the cpu local device, if: * * - the broadcast device exists * - the broadcast device is not a hrtimer based one * - the broadcast device is in periodic mode to * avoid a hickup during switch to oneshot mode */ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) clockevents_shutdown(dev); } break; case TICK_BROADCAST_OFF: if (tick_broadcast_forced) break; cpumask_clear_cpu(cpu, tick_broadcast_on); if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(dev, 0); } break; } if (bc) { if (cpumask_empty(tick_broadcast_mask)) { if (!bc_stopped) clockevents_shutdown(bc); } else if (bc_stopped) { if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_broadcast_start_periodic(bc); else tick_broadcast_setup_oneshot(bc); } } out: raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); }