static void mcip_ipi_clear(int irq) { unsigned int cpu, c; unsigned long flags; if (unlikely(irq == SOFTIRQ_IRQ)) { arc_softirq_clear(irq); return; } raw_spin_lock_irqsave(&mcip_lock, flags); /* Who sent the IPI */ __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ /* * In rare case, multiple concurrent IPIs sent to same target can * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be * "vectored" (multiple bits sets) as opposed to typical single bit */ do { c = __ffs(cpu); /* 0,1,2,3 */ __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); cpu &= ~(1U << c); } while (cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); }
static void mcip_ipi_clear(int irq) { unsigned int cpu, c; unsigned long flags; unsigned int __maybe_unused copy; raw_spin_lock_irqsave(&mcip_lock, flags); /* Who sent the IPI */ __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ /* * In rare case, multiple concurrent IPIs sent to same target can * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be * "vectored" (multiple bits sets) as opposed to typical single bit */ do { c = __ffs(cpu); /* 0,1,2,3 */ __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); cpu &= ~(1U << c); } while (cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); #ifdef CONFIG_ARC_IPI_DBG if (c != __ffs(copy)) pr_info("IPIs from %x coalesced to %x\n", copy, raw_smp_processor_id()); #endif }
static void mcip_ipi_send(int cpu) { unsigned long flags; int ipi_was_pending; /* ARConnect can only send IPI to others */ if (unlikely(cpu == raw_smp_processor_id())) { arc_softirq_trigger(SOFTIRQ_IRQ); return; } raw_spin_lock_irqsave(&mcip_lock, flags); /* * If receiver already has a pending interrupt, elide sending this one. * Linux cross core calling works well with concurrent IPIs * coalesced into one * see arch/arc/kernel/smp.c: ipi_send_msg_one() */ __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); if (!ipi_was_pending) __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); }
static void mcip_ipi_send(int cpu) { unsigned long flags; int ipi_was_pending; /* * NOTE: We must spin here if the other cpu hasn't yet * serviced a previous message. This can burn lots * of time, but we MUST follows this protocol or * ipi messages can be lost!!! * Also, we must release the lock in this loop because * the other side may get to this same loop and not * be able to ack -- thus causing deadlock. */ do { raw_spin_lock_irqsave(&mcip_lock, flags); __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); if (ipi_was_pending == 0) break; /* break out but keep lock */ raw_spin_unlock_irqrestore(&mcip_lock, flags); } while (1); __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); raw_spin_unlock_irqrestore(&mcip_lock, flags); #ifdef CONFIG_ARC_IPI_DBG if (ipi_was_pending) pr_info("IPI ACK delayed from cpu %d\n", cpu); #endif }
static int __init idu_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; /* Read IDU BCR to confirm nr_irqs */ int nr_irqs = of_irq_count(intc); int i, irq; if (!idu_detected) panic("IDU not detected, but DeviceTree using it"); pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ for (i = 0; i < nr_irqs; i++) { /* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr */ irq = irq_of_parse_and_map(intc, i); if (!i) idu_first_irq = irq; irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); return 0; }
static cycle_t arc_read_gfrc(struct clocksource *cs) { unsigned long flags; u32 l, h; local_irq_save(flags); __mcip_cmd(CMD_GFRC_READ_LO, 0); l = read_aux_reg(ARC_REG_MCIP_READBACK); __mcip_cmd(CMD_GFRC_READ_HI, 0); h = read_aux_reg(ARC_REG_MCIP_READBACK); local_irq_restore(flags); return (((cycle_t)h) << 32) | l; }
static cycle_t arc_counter_read(struct clocksource *cs) { unsigned long flags; union { #ifdef CONFIG_CPU_BIG_ENDIAN struct { u32 h, l; }; #else struct { u32 l, h; }; #endif cycle_t full; } stamp; local_irq_save(flags); __mcip_cmd(CMD_GRTC_READ_LO, 0); stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK); __mcip_cmd(CMD_GRTC_READ_HI, 0); stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK); local_irq_restore(flags); return stamp.full; }
static int __init idu_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; int nr_irqs; int i, virq; struct mcip_bcr mp; struct mcip_idu_bcr idu_bcr; READ_BCR(ARC_REG_MCIP_BCR, mp); if (!mp.idu) panic("IDU not detected, but DeviceTree using it"); READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ for (i = 0; i < nr_irqs; i++) { /* Mask all common interrupts by default */ idu_irq_mask_raw(i); /* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr */ virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); BUG_ON(!virq); irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); return 0; }