/* * Generic version of the affinity autoselector. */ static int setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) { struct cpumask *set = irq_default_affinity; int node = desc->irq_data.node; /* Excludes PER_CPU and NO_BALANCE interrupts */ if (!irq_can_set_affinity(irq)) return 0; /* * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (cpumask_intersects(desc->irq_data.affinity, cpu_online_mask)) set = desc->irq_data.affinity; else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); } cpumask_and(mask, cpu_online_mask, set); if (node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(node); /* make sure at least one of the cpus in nodemask is online */ if (cpumask_intersects(mask, nodemask)) cpumask_and(mask, mask, nodemask); } irq_do_set_affinity(&desc->irq_data, mask, false); return 0; }
/* * Generic version of the affinity autoselector. */ static int setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) { struct irq_chip *chip = irq_desc_get_chip(desc); struct cpumask *set = irq_default_affinity; int ret; /* Excludes PER_CPU and NO_BALANCE interrupts */ if (!irq_can_set_affinity(irq)) return 0; /* * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (cpumask_intersects(desc->irq_data.affinity, cpu_online_mask)) set = desc->irq_data.affinity; else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); } cpumask_and(mask, cpu_online_mask, set); ret = chip->irq_set_affinity(&desc->irq_data, mask, false); switch (ret) { case IRQ_SET_MASK_OK: cpumask_copy(desc->irq_data.affinity, mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); } return 0; }
static int init_cpu_pmu(void) { int i, irqs, err = 0; struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; if (!pdev) return -ENODEV; irqs = pdev->num_resources; /* * If we have a single PMU interrupt that we can't shift, assume that * we're running on a uniprocessor machine and continue. */ if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) return 0; for (i = 0; i < irqs; ++i) { err = set_irq_affinity(platform_get_irq(pdev, i), i); if (err) break; } return err; }
/* * Generic version of the affinity autoselector. */ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) { cpumask_t mask; if (!irq_can_set_affinity(irq)) return 0; cpus_and(mask, cpu_online_map, irq_default_affinity); /* * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { if (cpus_intersects(desc->affinity, cpu_online_map)) mask = desc->affinity; else desc->status &= ~IRQ_AFFINITY_SET; } desc->affinity = mask; desc->chip->set_affinity(irq, mask); return 0; }
/* * Generic version of the affinity autoselector. */ static int setup_affinity(unsigned int irq, struct irq_desc *desc) { if (!irq_can_set_affinity(irq)) return 0; /* * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { if (cpumask_any_and(desc->affinity, cpu_online_mask) < nr_cpu_ids) goto set_affinity; else desc->status &= ~IRQ_AFFINITY_SET; } cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); if (desc->node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(desc->node); /* make sure at least one of the cpus in nodemask is online */ if (cpumask_intersects(desc->affinity, nodemask)) cpumask_and(desc->affinity, desc->affinity, nodemask); } set_affinity: desc->chip->set_affinity(irq, desc->affinity); return 0; }
static bool tick_check_percpu(struct clock_event_device *curdev, struct clock_event_device *newdev, int cpu) { if (!cpumask_test_cpu(cpu, newdev->cpumask)) return false; if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) return true; /* Check if irq affinity can be set */ if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq)) return false; /* Prefer an existing cpu local device */ if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) return false; return true; }
int init_pmu_single(int cpu) { int irqs, err = 0; struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; if (!pdev) return -ENODEV; irqs = pdev->num_resources; /* * If we have a single PMU interrupt that we can't shift, assume that * we're running on a uniprocessor machine and continue. */ if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) return 0; err = set_irq_affinity(platform_get_irq(pdev, cpu), cpu); return err; }
/* * Generic version of the affinity autoselector. */ static int setup_affinity(unsigned int irq, struct irq_desc *desc) { if (!irq_can_set_affinity(irq)) return 0; /* * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) < nr_cpu_ids) goto set_affinity; else desc->status &= ~IRQ_AFFINITY_SET; } cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); set_affinity: desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); return 0; }
static int setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) { struct irq_chip *chip = irq_desc_get_chip(desc); struct cpumask *set = irq_default_affinity; int ret, node = desc->irq_data.node; if (!irq_can_set_affinity(irq)) return 0; if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (cpumask_intersects(desc->irq_data.affinity, cpu_online_mask)) set = desc->irq_data.affinity; else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); } cpumask_and(mask, cpu_online_mask, set); if (node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(node); if (cpumask_intersects(mask, nodemask)) cpumask_and(mask, mask, nodemask); } ret = chip->irq_set_affinity(&desc->irq_data, mask, false); switch (ret) { case IRQ_SET_MASK_OK: cpumask_copy(desc->irq_data.affinity, mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); } return 0; }
/* * Check, if the new registered device should be used. */ static int tick_check_new_device(struct clock_event_device *newdev) { struct clock_event_device *curdev; struct tick_device *td; int cpu, ret = NOTIFY_OK; unsigned long flags; raw_spin_lock_irqsave(&tick_device_lock, flags); cpu = smp_processor_id(); if (!cpumask_test_cpu(cpu, newdev->cpumask)) goto out_bc; td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; /* cpu local device ? */ if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { /* * If the cpu affinity of the device interrupt can not * be set, ignore it. */ if (!irq_can_set_affinity(newdev->irq)) goto out_bc; /* * If we have a cpu local device already, do not replace it * by a non cpu local device */ if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) goto out_bc; } /* * If we have an active device, then check the rating and the oneshot * feature. */ if (curdev) { /* * Prefer one shot capable devices ! */ if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) && !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) goto out_bc; /* * Check the rating */ if (curdev->rating >= newdev->rating) goto out_bc; } /* * Replace the eventually existing device by the new * device. If the current device is the broadcast device, do * not give it back to the clockevents layer ! */ if (tick_is_broadcast_device(curdev)) { clockevents_shutdown(curdev); curdev = NULL; } clockevents_exchange_device(curdev, newdev); tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); raw_spin_unlock_irqrestore(&tick_device_lock, flags); return NOTIFY_STOP; out_bc: /* * Can the new device be used as a broadcast device ? */ if (tick_check_broadcast_device(newdev)) ret = NOTIFY_STOP; raw_spin_unlock_irqrestore(&tick_device_lock, flags); return ret; }
static int bman_create_portal(struct bman_portal *portal, const struct bm_portal_config *c) { struct bm_portal *p; int ret; p = &portal->p; /* * prep the low-level portal struct with the mapped addresses from the * config, everything that follows depends on it and "config" is more * for (de)reference... */ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { dev_err(c->dev, "RCR initialisation failed\n"); goto fail_rcr; } if (bm_mc_init(p)) { dev_err(c->dev, "MC initialisation failed\n"); goto fail_mc; } /* * Default to all BPIDs disabled, we enable as required at * run-time. */ bm_isr_bscn_disable(p); /* Write-to-clear any stale interrupt status bits */ bm_out(p, BM_REG_ISDR, 0xffffffff); portal->irq_sources = 0; bm_out(p, BM_REG_IER, 0); bm_out(p, BM_REG_ISR, 0xffffffff); snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { dev_err(c->dev, "request_irq() failed\n"); goto fail_irq; } if (c->cpu != -1 && irq_can_set_affinity(c->irq) && irq_set_affinity(c->irq, cpumask_of(c->cpu))) { dev_err(c->dev, "irq_set_affinity() failed\n"); goto fail_affinity; } /* Need RCR to be empty before continuing */ ret = bm_rcr_get_fill(p); if (ret) { dev_err(c->dev, "RCR unclean\n"); goto fail_rcr_empty; } /* Success */ portal->config = c; bm_out(p, BM_REG_ISDR, 0); bm_out(p, BM_REG_IIR, 0); return 0; fail_rcr_empty: fail_affinity: free_irq(c->irq, portal); fail_irq: bm_mc_finish(p); fail_mc: bm_rcr_finish(p); fail_rcr: return -EIO; }