int ps3_event_receive_port_destroy(unsigned int virq) { int result; DBG(" -> %s:%d virq %u\n", __func__, __LINE__, virq); ps3_chip_mask(irq_get_irq_data(virq)); result = lv1_destruct_event_receive_port(virq_to_hw(virq)); if (result) FAIL("%s:%d: lv1_destruct_event_receive_port failed: %s\n", __func__, __LINE__, ps3_result(result)); /* */ DBG(" <- %s:%d\n", __func__, __LINE__); return result; }
static void balloon3_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; do { /* clear useless edge notification */ if (desc->irq_data.chip->irq_ack) { struct irq_data *d; d = irq_get_irq_data(BALLOON3_AUX_NIRQ); desc->irq_data.chip->irq_ack(d); } while (pending) { irq = BALLOON3_IRQ(0) + __ffs(pending); generic_handle_irq(irq); pending &= pending - 1; } pending = __raw_readl(BALLOON3_INT_CONTROL_REG) & balloon3_irq_enabled; } while (pending); }
int migrate_platform_irqs(unsigned int cpu) { int new_cpei_cpu; struct irq_data *data = NULL; const struct cpumask *mask; int retval = 0; /* * dont permit CPEI target to removed. */ if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { printk ("CPU (%d) is CPEI Target\n", cpu); if (can_cpei_retarget()) { /* * Now re-target the CPEI to a different processor */ new_cpei_cpu = cpumask_any(cpu_online_mask); mask = cpumask_of(new_cpei_cpu); set_cpei_target_cpu(new_cpei_cpu); data = irq_get_irq_data(ia64_cpe_irq); /* * Switch for now, immediately, we need to do fake intr * as other interrupts, but need to study CPEI behaviour with * polling before making changes. */ if (data && data->chip) { data->chip->irq_disable(data); data->chip->irq_set_affinity(data, mask, false); data->chip->irq_enable(data); printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu); } } if (!data) { printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); retval = -EBUSY; } } return retval; }
static void bcm63xx_external_irq_unmask(struct irq_data *d) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; unsigned long flags; regaddr = get_ext_irq_perf_reg(irq); spin_lock_irqsave(&epic_lock, flags); reg = bcm_perf_readl(regaddr); if (BCMCPU_IS_6348()) reg |= EXTIRQ_CFG_MASK_6348(irq % 4); else reg |= EXTIRQ_CFG_MASK(irq % 4); bcm_perf_writel(reg, regaddr); spin_unlock_irqrestore(&epic_lock, flags); if (is_ext_irq_cascaded) internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start), NULL); }
static int max8973_thermal_init(struct max8973_chip *mchip) { struct thermal_zone_device *tzd; struct irq_data *irq_data; unsigned long irq_flags = 0; int ret; if (mchip->id != MAX77621) return 0; tzd = devm_thermal_zone_of_sensor_register(mchip->dev, 0, mchip, &max77621_tz_ops); if (IS_ERR(tzd)) { ret = PTR_ERR(tzd); dev_err(mchip->dev, "Failed to register thermal sensor: %d\n", ret); return ret; } if (mchip->irq <= 0) return 0; irq_data = irq_get_irq_data(mchip->irq); if (irq_data) irq_flags = irqd_get_trigger_type(irq_data); ret = devm_request_threaded_irq(mchip->dev, mchip->irq, NULL, max8973_thermal_irq, IRQF_ONESHOT | IRQF_SHARED | irq_flags, dev_name(mchip->dev), mchip); if (ret < 0) { dev_err(mchip->dev, "Failed to request irq %d, %d\n", mchip->irq, ret); return ret; } return 0; }
static int __init idu_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; /* Read IDU BCR to confirm nr_irqs */ int nr_irqs = of_irq_count(intc); int i, virq; struct mcip_bcr mp; READ_BCR(ARC_REG_MCIP_BCR, mp); if (!mp.idu) panic("IDU not detected, but DeviceTree using it"); pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ for (i = 0; i < nr_irqs; i++) { /* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr */ virq = irq_of_parse_and_map(intc, i); if (!i) idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq)); irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); return 0; }
int irq_select_affinity(unsigned int irq) { struct irq_data *data = irq_get_irq_data(irq); struct irq_chip *chip; static int last_cpu; int cpu = last_cpu + 1; if (!data) return 1; chip = irq_data_get_irq_chip(data); if (!chip->irq_set_affinity || irq_user_affinity[irq]) return 1; while (!cpu_possible(cpu) || !cpumask_test_cpu(cpu, irq_default_affinity)) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; cpumask_copy(data->affinity, cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false); return 0; }
int ps3_io_irq_destroy(unsigned int virq) { int result; unsigned long outlet = virq_to_hw(virq); ps3_chip_mask(irq_get_irq_data(virq)); /* * lv1_destruct_io_irq_outlet() will destroy the IRQ plug, * so call ps3_irq_plug_destroy() first. */ result = ps3_irq_plug_destroy(virq); BUG_ON(result); result = lv1_destruct_io_irq_outlet(outlet); if (result) pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; }
int ps3_io_irq_destroy(unsigned int virq) { int result; unsigned long outlet = virq_to_hw(virq); ps3_chip_mask(irq_get_irq_data(virq)); /* */ result = ps3_irq_plug_destroy(virq); BUG_ON(result); result = lv1_destruct_io_irq_outlet(outlet); if (result) FAIL("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n", __func__, __LINE__, ps3_result(result)); return result; }
int __init stmp3xxx_pinmux_init(int virtual_irq_start) { int b, r = 0; struct stmp3xxx_pinmux_bank *pm; int virq; for (b = 0; b < 3; b++) { /* only banks 0,1,2 are allowed to GPIO */ pm = pinmux_banks + b; pm->chip.base = 32 * b; pm->chip.ngpio = 32; pm->chip.owner = THIS_MODULE; pm->chip.can_sleep = 1; pm->chip.exported = 1; pm->chip.to_irq = stmp3xxx_gpio_to_irq; pm->chip.direction_input = stmp3xxx_gpio_input; pm->chip.direction_output = stmp3xxx_gpio_output; pm->chip.get = stmp3xxx_gpio_get; pm->chip.set = stmp3xxx_gpio_set; pm->chip.request = stmp3xxx_gpio_request; pm->chip.free = stmp3xxx_gpio_free; pm->virq = virtual_irq_start + b * 32; for (virq = pm->virq; virq < pm->virq; virq++) { gpio_irq_chip.irq_mask(irq_get_irq_data(virq)); set_irq_chip(virq, &gpio_irq_chip); set_irq_handler(virq, handle_level_irq); set_irq_flags(virq, IRQF_VALID); } r = gpiochip_add(&pm->chip); if (r < 0) break; set_irq_chained_handler(pm->irq, stmp3xxx_gpio_irq); set_irq_data(pm->irq, pm); } return r; }
static irqreturn_t deferred_fiq(int irq, void *dev_id) { int gpio, irq_num, fiq_count; struct irq_chip *irq_chip; irq_chip = irq_get_chip(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK)); /* * For each handled GPIO interrupt, keep calling its interrupt handler * until the IRQ counter catches the FIQ incremented interrupt counter. */ for (gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK; gpio <= AMS_DELTA_GPIO_PIN_HOOK_SWITCH; gpio++) { irq_num = gpio_to_irq(gpio); fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio]; if (irq_counter[gpio] < fiq_count && gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { struct irq_data *d = irq_get_irq_data(irq_num); /* * handle_simple_irq() that OMAP GPIO edge * interrupts default to since commit 80ac93c27441 * requires interrupt already acked and unmasked. */ if (irq_chip) { if (irq_chip->irq_ack) irq_chip->irq_ack(d); if (irq_chip->irq_unmask) irq_chip->irq_unmask(d); } } for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) generic_handle_irq(irq_num); } return IRQ_HANDLED; }
/* Called from the FIQ asm handler */ void msm7k_fiq_handler(void) { struct irq_data *d; struct irq_chip *c; struct pt_regs ctx_regs; pr_info("Fiq is received %s\n", __func__); fiq_counter++; d = irq_get_irq_data(MSM8625_INT_A9_M2A_2); c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); /* Clear the IRQ from the ENABLE_SET */ gic_clear_irq_pending(MSM8625_INT_A9_M2A_2); local_irq_enable(); ctx_regs.ARM_pc = msm_dump_cpu_ctx.fiq_r14; ctx_regs.ARM_lr = msm_dump_cpu_ctx.svc_r14; ctx_regs.ARM_sp = msm_dump_cpu_ctx.svc_r13; ctx_regs.ARM_fp = msm_dump_cpu_ctx.usr_r11; #ifdef CONFIG_SEC_DEBUG do { extern void sec_save_final_context(void); sec_save_final_context(); } while (0); #endif unwind_backtrace(&ctx_regs, current); #ifdef CONFIG_SMP smp_send_all_cpu_backtrace(); #endif flush_cache_all(); outer_flush_all(); return; }
/** * of_irq_to_resource - Decode a node's IRQ and return it as a resource * @dev: pointer to device tree node * @index: zero-based index of the irq * @r: pointer to resource structure to return result into. */ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) { int irq = irq_of_parse_and_map(dev, index); /* Only dereference the resource if both the * resource and the irq are valid. */ if (r && irq) { const char *name = NULL; memset(r, 0, sizeof(*r)); /* * Get optional "interrupts-names" property to add a name * to the resource. */ of_property_read_string_index(dev, "interrupt-names", index, &name); r->start = r->end = irq; r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq)); r->name = name ? name : of_node_full_name(dev); } return irq; }
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; BUG_ON(irq != bdata->irq); if (irqd_get_trigger_type(irq_get_irq_data(irq)) != bdata->irqflags) { pr_info("wake up cpu from deepsleep by gpio edge interrupt!"); irq_set_irq_type(bdata->irq, bdata->irqflags); } if (bdata->is_deepsleep) { bdata->is_deepsleep = false; } if (bdata->button->wakeup) pm_stay_awake(bdata->input->dev.parent); if (bdata->timer_debounce) mod_timer(&bdata->timer, jiffies + msecs_to_jiffies(bdata->timer_debounce)); else schedule_work(&bdata->work); return IRQ_HANDLED; }
/** * irq_domain_add() - Register an irq_domain * @domain: ptr to initialized irq_domain structure * * Registers an irq_domain structure. The irq_domain must at a minimum be * initialized with an ops structure pointer, and either a ->to_irq hook or * a valid irq_base value. Everything else is optional. */ void irq_domain_add(struct irq_domain *domain) { struct irq_data *d; int hwirq, irq; /* * This assumes that the irq_domain owner has already allocated * the irq_descs. This block will be removed when support for dynamic * allocation of irq_descs is added to irq_domain. */ irq_domain_for_each_irq(domain, hwirq, irq) { d = irq_get_irq_data(irq); if (!d) { WARN(1, "error: assigning domain to non existant irq_desc"); return; } if (d->domain) { /* things are broken; just report, don't clean up */ WARN(1, "error: irq_desc already assigned to a domain"); return; } d->domain = domain; d->hwirq = hwirq; }
/* MMP (ARMv5) */ void __init icu_init_irq(void) { int irq; max_icu_nr = 1; mmp_icu_base = ioremap(0xd4282000, 0x1000); icu_data[0].conf_enable = mmp_conf.conf_enable; icu_data[0].conf_disable = mmp_conf.conf_disable; icu_data[0].conf_mask = mmp_conf.conf_mask; icu_data[0].nr_irqs = 64; icu_data[0].virq_base = 0; icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, &irq_domain_simple_ops, &icu_data[0]); for (irq = 0; irq < 64; irq++) { icu_mask_irq(irq_get_irq_data(irq)); irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq); set_irq_flags(irq, IRQF_VALID); } irq_set_default_host(icu_data[0].domain); #ifdef CONFIG_CPU_PXA910 icu_irq_chip.irq_set_wake = pxa910_set_wake; #endif }
static int pm8901_resume(struct platform_device *pdev) { struct pm8901_chip *chip; int i; unsigned long irqsave; chip = platform_get_drvdata(pdev); for (i = 0; i < MAX_PM_IRQ; i++) { spin_lock_irqsave(&chip->pm_lock, irqsave); if (chip->config[i] && !chip->wake_enable[i]) { if (!((chip->config[i] & PM8901_IRQF_MASK_ALL) == PM8901_IRQF_MASK_ALL)) pm8901_irq_unmask(irq_get_irq_data(i + chip->pdata.irq_base)); } spin_unlock_irqrestore(&chip->pm_lock, irqsave); } if (!chip->count_wakeable) enable_irq(chip->pdata.irq); return 0; }
/** * irq_destroy_ipi() - unreserve an IPI that was previously allocated * @irq: linux irq number to be destroyed * @dest: cpumask of cpus which should have the IPI removed * * The IPIs allocated with irq_reserve_ipi() are retuerned to the system * destroying all virqs associated with them. * * Return 0 on success or error code on failure. */ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) { struct irq_data *data = irq_get_irq_data(irq); struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; struct irq_domain *domain; unsigned int nr_irqs; if (!irq || !data || !ipimask) return -EINVAL; domain = data->domain; if (WARN_ON(domain == NULL)) return -EINVAL; if (!irq_domain_is_ipi(domain)) { pr_warn("Trying to destroy a non IPI domain!\n"); return -EINVAL; } if (WARN_ON(!cpumask_subset(dest, ipimask))) /* * Must be destroying a subset of CPUs to which this IPI * was set up to target */ return -EINVAL; if (irq_domain_is_ipi_per_cpu(domain)) { irq = irq + cpumask_first(dest) - data->common->ipi_offset; nr_irqs = cpumask_weight(dest); } else { nr_irqs = 1; } irq_domain_free_irqs(irq, nr_irqs); return 0; }
static int wl1271_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct wlcore_platdev_data *pdev_data; struct wl12xx_sdio_glue *glue; struct resource res[2]; mmc_pm_flag_t mmcflags; int ret = -ENOMEM; int irq, wakeirq, num_irqs; const char *chip_family; /* We are only able to handle the wlan function */ if (func->num != 0x02) return -ENODEV; pdev_data = devm_kzalloc(&func->dev, sizeof(*pdev_data), GFP_KERNEL); if (!pdev_data) return -ENOMEM; pdev_data->if_ops = &sdio_ops; glue = devm_kzalloc(&func->dev, sizeof(*glue), GFP_KERNEL); if (!glue) return -ENOMEM; glue->dev = &func->dev; /* Grab access to FN0 for ELP reg. */ func->card->quirks |= MMC_QUIRK_LENIENT_FN0; /* Use block mode for transferring over one block size of data */ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; ret = wlcore_probe_of(&func->dev, &irq, &wakeirq, pdev_data); if (ret) goto out; /* if sdio can keep power while host is suspended, enable wow */ mmcflags = sdio_get_host_pm_caps(func); dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); if (mmcflags & MMC_PM_KEEP_POWER) pdev_data->pwr_in_suspend = true; sdio_set_drvdata(func, glue); /* Tell PM core that we don't need the card to be powered now */ pm_runtime_put_noidle(&func->dev); /* * Due to a hardware bug, we can't differentiate wl18xx from * wl12xx, because both report the same device ID. The only * way to differentiate is by checking the SDIO revision, * which is 3.00 on the wl18xx chips. */ if (func->card->cccr.sdio_vsn == SDIO_SDIO_REV_3_00) chip_family = "wl18xx"; else chip_family = "wl12xx"; glue->core = platform_device_alloc(chip_family, PLATFORM_DEVID_AUTO); if (!glue->core) { dev_err(glue->dev, "can't allocate platform_device"); ret = -ENOMEM; goto out; } glue->core->dev.parent = &func->dev; memset(res, 0x00, sizeof(res)); res[0].start = irq; res[0].flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq)); res[0].name = "irq"; if (wakeirq > 0) { res[1].start = wakeirq; res[1].flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(wakeirq)); res[1].name = "wakeirq"; num_irqs = 2; } else { num_irqs = 1; } ret = platform_device_add_resources(glue->core, res, num_irqs); if (ret) { dev_err(glue->dev, "can't add resources\n"); goto out_dev_put; } ret = platform_device_add_data(glue->core, pdev_data, sizeof(*pdev_data)); if (ret) { dev_err(glue->dev, "can't add platform data\n"); goto out_dev_put; } ret = platform_device_add(glue->core); if (ret) { dev_err(glue->dev, "can't add platform device\n"); goto out_dev_put; } return 0; out_dev_put: platform_device_put(glue->core); out: return ret; }
static void asc_receive_chars(struct uart_port *port) { struct tty_port *tport = &port->state->port; unsigned long status, mode; unsigned long c = 0; char flag; bool ignore_pe = false; /* * Datasheet states: If the MODE field selects an 8-bit frame then * this [parity error] bit is undefined. Software should ignore this * bit when reading 8-bit frames. */ mode = asc_in(port, ASC_CTL) & ASC_CTL_MODE_MSK; if (mode == ASC_CTL_MODE_8BIT || mode == ASC_CTL_MODE_8BIT_PAR) ignore_pe = true; if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) pm_wakeup_event(tport->tty->dev, 0); while ((status = asc_in(port, ASC_STA)) & ASC_STA_RBF) { c = asc_in(port, ASC_RXBUF) | ASC_RXBUF_DUMMY_RX; flag = TTY_NORMAL; port->icount.rx++; if (status & ASC_STA_OE || c & ASC_RXBUF_FE || (c & ASC_RXBUF_PE && !ignore_pe)) { if (c & ASC_RXBUF_FE) { if (c == (ASC_RXBUF_FE | ASC_RXBUF_DUMMY_RX)) { port->icount.brk++; if (uart_handle_break(port)) continue; c |= ASC_RXBUF_DUMMY_BE; } else { port->icount.frame++; } } else if (c & ASC_RXBUF_PE) { port->icount.parity++; } /* * Reading any data from the RX FIFO clears the * overflow error condition. */ if (status & ASC_STA_OE) { port->icount.overrun++; c |= ASC_RXBUF_DUMMY_OE; } c &= port->read_status_mask; if (c & ASC_RXBUF_DUMMY_BE) flag = TTY_BREAK; else if (c & ASC_RXBUF_PE) flag = TTY_PARITY; else if (c & ASC_RXBUF_FE) flag = TTY_FRAME; } if (uart_handle_sysrq_char(port, c & 0xff)) continue; uart_insert_char(port, c, ASC_RXBUF_DUMMY_OE, c & 0xff, flag); } /* Tell the rest of the system the news. New characters! */ tty_flip_buffer_push(tport); }
static int init_port(void) { int i, nlow, nhigh, ret, irq; unsigned int base = gpio_calc_base(); /* Translate our GPIO number into absolute number */ gpio_out_pin = gpio_out_pin + base; gpio_in_pin = gpio_in_pin + base; if (gpio_request(gpio_out_pin, LIRC_DRIVER_NAME " ir/out")) { printk(KERN_ALERT LIRC_DRIVER_NAME ": cant claim gpio pin %d\n", gpio_out_pin); ret = -ENODEV; goto exit_init_port; } if (gpio_request(gpio_in_pin, LIRC_DRIVER_NAME " ir/in")) { printk(KERN_ALERT LIRC_DRIVER_NAME ": cant claim gpio pin %d\n", gpio_in_pin); ret = -ENODEV; goto exit_gpio_free_out_pin; } gpio_direction_input(gpio_in_pin); gpio_direction_output(gpio_out_pin, 1); gpio_set_value(gpio_out_pin, invert); irq = gpio_to_irq(gpio_in_pin); dprintk("to_irq %d\n", irq); irqdata = irq_get_irq_data(irq); if (irqdata && irqdata->chip) { irqchip = irqdata->chip; } else { ret = -ENODEV; goto exit_gpio_free_in_pin; } /* if pin is high, then this must be an active low receiver. */ if (sense == -1) { /* * probe 9 times every 0.04s, collect "votes" for * active high/low */ nlow = 0; nhigh = 0; for (i = 0; i < 9; i++) { if (gpio_get_value(gpio_in_pin)) nlow++; else nhigh++; msleep(40); } sense = (nlow >= nhigh ? 1 : 0); printk(KERN_INFO LIRC_DRIVER_NAME ": auto-detected active %s receiver on GPIO pin %d\n", sense ? "low" : "high", gpio_in_pin); } else { printk(KERN_INFO LIRC_DRIVER_NAME ": manually using active %s receiver on GPIO pin %d\n", sense ? "low" : "high", gpio_in_pin); } /* we have the gpio absolute number, translate into descriptor and * get the gpio chip it belongs to. In some devices there is more * than one gpio chip. "gco" can be equal to "gci" or not depending on * the device and the choosed GPIOs */ gco = gpiod_to_chip(gpio_to_desc(gpio_out_pin)); gci = gpiod_to_chip(gpio_to_desc(gpio_in_pin)); /* get the offset at the gpio chip * (we will use offsets for better performance) */ gpio_out_pin = gpio_out_pin - gco->base; gpio_in_pin = gpio_in_pin - gci->base; printk(KERN_INFO LIRC_DRIVER_NAME " IN: %s, base = %d, offset = %d\n", gci->label, gci->base, gpio_in_pin); printk(KERN_INFO LIRC_DRIVER_NAME " OUT: %s, base = %d, offset = %d\n", gco->label, gco->base, gpio_out_pin); return 0; exit_gpio_free_in_pin: gpio_free(gpio_in_pin); exit_gpio_free_out_pin: gpio_free(gpio_out_pin); exit_init_port: return ret; }
void baboon_irq_disable(int irq) { mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C)); }
static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) { struct device_node *np = client->dev.of_node; u32 video; int rev_lo, rev_hi, ret; unsigned short cec_addr; priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); priv->current_page = 0xff; priv->hdmi = client; /* CEC I2C address bound to TDA998x I2C addr by configuration pins */ cec_addr = 0x34 + (client->addr & 0x03); priv->cec = i2c_new_dummy(client->adapter, cec_addr); if (!priv->cec) return -ENODEV; priv->dpms = DRM_MODE_DPMS_OFF; mutex_init(&priv->mutex); /* protect the page access */ init_waitqueue_head(&priv->edid_delay_waitq); setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done, (unsigned long)priv); INIT_WORK(&priv->detect_work, tda998x_detect_work); /* wake up the device: */ cec_write(priv, REG_CEC_ENAMODS, CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); tda998x_reset(priv); /* read version: */ rev_lo = reg_read(priv, REG_VERSION_LSB); rev_hi = reg_read(priv, REG_VERSION_MSB); if (rev_lo < 0 || rev_hi < 0) { ret = rev_lo < 0 ? rev_lo : rev_hi; goto fail; } priv->rev = rev_lo | rev_hi << 8; /* mask off feature bits: */ priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */ switch (priv->rev) { case TDA9989N2: dev_info(&client->dev, "found TDA9989 n2"); break; case TDA19989: dev_info(&client->dev, "found TDA19989"); break; case TDA19989N2: dev_info(&client->dev, "found TDA19989 n2"); break; case TDA19988: dev_info(&client->dev, "found TDA19988"); break; default: dev_err(&client->dev, "found unsupported device: %04x\n", priv->rev); goto fail; } /* after reset, enable DDC: */ reg_write(priv, REG_DDC_DISABLE, 0x00); /* set clock on DDC channel: */ reg_write(priv, REG_TX3, 39); /* if necessary, disable multi-master: */ if (priv->rev == TDA19989) reg_set(priv, REG_I2C_MASTER, I2C_MASTER_DIS_MM); cec_write(priv, REG_CEC_FRO_IM_CLK_CTRL, CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL); /* initialize the optional IRQ */ if (client->irq) { int irqf_trigger; /* init read EDID waitqueue and HDP work */ init_waitqueue_head(&priv->wq_edid); /* clear pending interrupts */ reg_read(priv, REG_INT_FLAGS_0); reg_read(priv, REG_INT_FLAGS_1); reg_read(priv, REG_INT_FLAGS_2); irqf_trigger = irqd_get_trigger_type(irq_get_irq_data(client->irq)); ret = request_threaded_irq(client->irq, NULL, tda998x_irq_thread, irqf_trigger | IRQF_ONESHOT, "tda998x", priv); if (ret) { dev_err(&client->dev, "failed to request IRQ#%u: %d\n", client->irq, ret); goto fail; } /* enable HPD irq */ cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD); } /* enable EDID read irq: */ reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); if (!np) return 0; /* non-DT */ /* get the optional video properties */ ret = of_property_read_u32(np, "video-ports", &video); if (ret == 0) { priv->vip_cntrl_0 = video >> 16; priv->vip_cntrl_1 = video >> 8; priv->vip_cntrl_2 = video; }
static int mag3110_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter; struct input_dev *idev; struct mag3110_data *data; int ret = 0; struct regulator *vdd, *vdd_io; u32 pos = 0; struct device_node *of_node = client->dev.of_node; #if MAG3110_IRQ_USED struct irq_data *irq_data = irq_get_irq_data(client->irq); u32 irq_flag; bool shared_irq = of_property_read_bool(of_node, "shared-interrupt"); #endif vdd = NULL; vdd_io = NULL; vdd = devm_regulator_get(&client->dev, "vdd"); if (!IS_ERR(vdd)) { ret = regulator_enable(vdd); if (ret) { dev_err(&client->dev, "vdd set voltage error\n"); return ret; } } vdd_io = devm_regulator_get(&client->dev, "vddio"); if (!IS_ERR(vdd_io)) { ret = regulator_enable(vdd_io); if (ret) { dev_err(&client->dev, "vddio set voltage error\n"); return ret; } } adapter = to_i2c_adapter(client->dev.parent); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) return -EIO; dev_info(&client->dev, "check mag3110 chip ID\n"); ret = mag3110_read_reg(client, MAG3110_WHO_AM_I); if (MAG3110_ID != ret) { dev_err(&client->dev, "read chip ID 0x%x is not equal to 0x%x!\n", ret, MAG3110_ID); return -EINVAL; } data = kzalloc(sizeof(struct mag3110_data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; i2c_set_clientdata(client, data); /* Init queue */ init_waitqueue_head(&data->waitq); data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { dev_err(&client->dev, "hwmon register failed!\n"); ret = PTR_ERR(data->hwmon_dev); goto error_rm_dev_sysfs; } /*input poll device register */ data->poll_dev = input_allocate_polled_device(); if (!data->poll_dev) { dev_err(&client->dev, "alloc poll device failed!\n"); ret = -ENOMEM; goto error_rm_hwmon_dev; } data->poll_dev->poll = mag3110_dev_poll; data->poll_dev->poll_interval = POLL_INTERVAL; data->poll_dev->poll_interval_max = POLL_INTERVAL_MAX; idev = data->poll_dev->input; idev->name = MAG3110_DRV_NAME; idev->id.bustype = BUS_I2C; idev->evbit[0] = BIT_MASK(EV_ABS); input_set_abs_params(idev, ABS_X, -15000, 15000, 0, 0); input_set_abs_params(idev, ABS_Y, -15000, 15000, 0, 0); input_set_abs_params(idev, ABS_Z, -15000, 15000, 0, 0); ret = input_register_polled_device(data->poll_dev); if (ret) { dev_err(&client->dev, "register poll device failed!\n"); goto error_free_poll_dev; } /*create device group in sysfs as user interface */ ret = sysfs_create_group(&idev->dev.kobj, &mag3110_attr_group); if (ret) { dev_err(&client->dev, "create device file failed!\n"); ret = -EINVAL; goto error_rm_poll_dev; } #if MAG3110_IRQ_USED irq_flag = irqd_get_trigger_type(irq_data); irq_flag |= IRQF_ONESHOT; if (shared_irq) irq_flag |= IRQF_SHARED; ret = request_threaded_irq(client->irq, NULL, mag3110_irq_handler, irq_flag, client->dev.driver->name, idev); if (ret < 0) { dev_err(&client->dev, "failed to register irq %d!\n", client->irq); goto error_rm_dev_sysfs; } #endif /* Initialize mag3110 chip */ mag3110_init_client(client); mag3110_pdata = data; mag3110_pdata->active = MAG_STANDBY; ret = of_property_read_u32(of_node, "position", &pos); if (ret) pos = DEFAULT_POSITION; mag3110_pdata->position = (int)pos; dev_info(&client->dev, "mag3110 is probed\n"); return 0; error_rm_dev_sysfs: sysfs_remove_group(&client->dev.kobj, &mag3110_attr_group); error_rm_poll_dev: input_unregister_polled_device(data->poll_dev); error_free_poll_dev: input_free_polled_device(data->poll_dev); error_rm_hwmon_dev: hwmon_device_unregister(data->hwmon_dev); kfree(data); mag3110_pdata = NULL; return ret; }
void da9052_device_exit(struct da9052 *da9052) { regmap_del_irq_chip(da9052->chip_irq, irq_get_irq_data(da9052->irq_base)->chip_data); mfd_remove_devices(da9052->dev); }
static const inline struct max8997_irq_data * irq_to_max8997_irq(struct max8997_dev *max8997, int irq) { struct irq_data *data = irq_get_irq_data(irq); return &max8997_irqs[data->hwirq]; }
int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops) { int err, irq; struct st_sensor_data *sdata = iio_priv(indio_dev); unsigned long irq_trig; sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name); if (sdata->trig == NULL) { dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n"); return -ENOMEM; } irq = sdata->get_irq_data_ready(indio_dev); irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); /* * If the IRQ is triggered on falling edge, we need to mark the * interrupt as active low, if the hardware supports this. */ if (irq_trig == IRQF_TRIGGER_FALLING) { if (!sdata->sensor_settings->drdy_irq.addr_ihl) { dev_err(&indio_dev->dev, "falling edge specified for IRQ but hardware " "only support rising edge, will request " "rising edge\n"); irq_trig = IRQF_TRIGGER_RISING; } else { /* Set up INT active low i.e. falling edge */ err = st_sensors_write_data_with_mask(indio_dev, sdata->sensor_settings->drdy_irq.addr_ihl, sdata->sensor_settings->drdy_irq.mask_ihl, 1); if (err < 0) goto iio_trigger_free; dev_info(&indio_dev->dev, "interrupts on the falling edge\n"); } } else if (irq_trig == IRQF_TRIGGER_RISING) { dev_info(&indio_dev->dev, "interrupts on the rising edge\n"); } else { dev_err(&indio_dev->dev, "unsupported IRQ trigger specified (%lx), only " "rising and falling edges supported, enforce " "rising edge\n", irq_trig); irq_trig = IRQF_TRIGGER_RISING; } /* * If the interrupt pin is Open Drain, by definition this * means that the interrupt line may be shared with other * peripherals. But to do this we also need to have a status * register and mask to figure out if this sensor was firing * the IRQ or not, so we can tell the interrupt handle that * it was "our" interrupt. */ if (sdata->int_pin_open_drain && sdata->sensor_settings->drdy_irq.addr_stat_drdy) irq_trig |= IRQF_SHARED; err = request_threaded_irq(irq, iio_trigger_generic_data_rdy_poll, NULL, irq_trig, sdata->trig->name, sdata->trig); if (err) { dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n"); goto iio_trigger_free; } iio_trigger_set_drvdata(sdata->trig, indio_dev); sdata->trig->ops = trigger_ops; sdata->trig->dev.parent = sdata->dev; err = iio_trigger_register(sdata->trig); if (err < 0) { dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); goto iio_trigger_register_error; } indio_dev->trig = iio_trigger_get(sdata->trig); return 0; iio_trigger_register_error: free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig); iio_trigger_free: iio_trigger_free(sdata->trig); return err; }
static int xgene_gpio_sb_probe(struct platform_device *pdev) { struct xgene_gpio_sb *priv; int ret; struct resource *res; void __iomem *regs; struct irq_domain *parent_domain = NULL; struct fwnode_handle *fwnode; u32 val32; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); priv->regs = regs; ret = platform_get_irq(pdev, 0); if (ret > 0) { priv->parent_irq_base = irq_get_irq_data(ret)->hwirq; parent_domain = irq_get_irq_data(ret)->domain; } if (!parent_domain) { dev_err(&pdev->dev, "unable to obtain parent domain\n"); return -ENODEV; } ret = bgpio_init(&priv->gc, &pdev->dev, 4, regs + MPA_GPIO_IN_ADDR, regs + MPA_GPIO_OUT_ADDR, NULL, regs + MPA_GPIO_OE_ADDR, NULL, 0); if (ret) return ret; priv->gc.to_irq = xgene_gpio_sb_to_irq; /* Retrieve start irq pin, use default if property not found */ priv->irq_start = XGENE_DFLT_IRQ_START_PIN; if (!device_property_read_u32(&pdev->dev, XGENE_IRQ_START_PROPERTY, &val32)) priv->irq_start = val32; /* Retrieve number irqs, use default if property not found */ priv->nirq = XGENE_DFLT_MAX_NIRQ; if (!device_property_read_u32(&pdev->dev, XGENE_NIRQ_PROPERTY, &val32)) priv->nirq = val32; /* Retrieve number gpio, use default if property not found */ priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO; if (!device_property_read_u32(&pdev->dev, XGENE_NGPIO_PROPERTY, &val32)) priv->gc.ngpio = val32; dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n", priv->gc.ngpio, priv->nirq, priv->irq_start); platform_set_drvdata(pdev, priv); if (pdev->dev.of_node) fwnode = of_node_to_fwnode(pdev->dev.of_node); else fwnode = pdev->dev.fwnode; priv->irq_domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, fwnode, &xgene_gpio_sb_domain_ops, priv); if (!priv->irq_domain) return -ENODEV; priv->gc.irqdomain = priv->irq_domain; ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv); if (ret) { dev_err(&pdev->dev, "failed to register X-Gene GPIO Standby driver\n"); irq_domain_remove(priv->irq_domain); return ret; } dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n"); if (priv->nirq > 0) { /* Register interrupt handlers for gpio signaled acpi events */ acpi_gpiochip_request_interrupts(&priv->gc); } return ret; }
/* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_lock>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive_noirq(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { unsigned int irq = msm_rpm_data.irq_ack; unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[SEL_MASK_SIZE]; struct irq_chip *irq_chip, *err_chip; int i; msm_rpm_request_poll_mode.req = req; msm_rpm_request_poll_mode.count = count; msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_poll_mode.done = NULL; spin_lock_irqsave(&msm_rpm_irq_lock, flags); irq_chip = irq_get_chip(irq); if (!irq_chip) { spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } irq_chip->irq_mask(irq_get_irq_data(irq)); err_chip = irq_get_chip(msm_rpm_data.irq_err); if (!err_chip) { irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } err_chip->irq_mask(irq_get_irq_data(msm_rpm_data.irq_err)); if (msm_rpm_request) { msm_rpm_busy_wait_for_request_completion(true); BUG_ON(msm_rpm_request); } msm_rpm_request = &msm_rpm_request_poll_mode; for (i = 0; i < count; i++) { BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, target_enum(req[i].id), req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_SEL_0), sel_masks, msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); #if defined(CONFIG_PANTECH_DEBUG) #if defined(CONFIG_PANTECH_DEBUG_RPM_LOG) //p14291_121102 pantech_debug_rpm_log(1, req->id, req->value); #endif #endif msm_rpm_send_req_interrupt(); msm_rpm_busy_wait_for_request_completion(false); BUG_ON(msm_rpm_request); err_chip->irq_unmask(irq_get_irq_data(msm_rpm_data.irq_err)); irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; }
int arizona_irq_init(struct arizona *arizona) { int flags = IRQF_ONESHOT; int ret, i; const struct regmap_irq_chip *aod, *irq; struct irq_data *irq_data; arizona->ctrlif_error = true; switch (arizona->type) { #ifdef CONFIG_MFD_WM5102 case WM5102: aod = &wm5102_aod; irq = &wm5102_irq; arizona->ctrlif_error = false; break; #endif #ifdef CONFIG_MFD_WM5110 case WM5110: case WM8280: aod = &wm5110_aod; switch (arizona->rev) { case 0 ... 2: irq = &wm5110_irq; break; default: irq = &wm5110_revd_irq; break; } arizona->ctrlif_error = false; break; #endif #ifdef CONFIG_MFD_WM8997 case WM8997: aod = &wm8997_aod; irq = &wm8997_irq; arizona->ctrlif_error = false; break; #endif #ifdef CONFIG_MFD_WM8998 case WM8998: case WM1814: aod = &wm8998_aod; irq = &wm8998_irq; arizona->ctrlif_error = false; break; #endif default: BUG_ON("Unknown Arizona class device" == NULL); return -EINVAL; } /* Disable all wake sources by default */ regmap_write(arizona->regmap, ARIZONA_WAKE_CONTROL, 0); /* Read the flags from the interrupt controller if not specified */ if (!arizona->pdata.irq_flags) { irq_data = irq_get_irq_data(arizona->irq); if (!irq_data) { dev_err(arizona->dev, "Invalid IRQ: %d\n", arizona->irq); return -EINVAL; } arizona->pdata.irq_flags = irqd_get_trigger_type(irq_data); switch (arizona->pdata.irq_flags) { case IRQF_TRIGGER_LOW: case IRQF_TRIGGER_HIGH: case IRQF_TRIGGER_RISING: case IRQF_TRIGGER_FALLING: break; case IRQ_TYPE_NONE: default: /* Device default */ arizona->pdata.irq_flags = IRQF_TRIGGER_LOW; break; } } if (arizona->pdata.irq_flags & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_RISING)) { ret = regmap_update_bits(arizona->regmap, ARIZONA_IRQ_CTRL_1, ARIZONA_IRQ_POL, 0); if (ret != 0) { dev_err(arizona->dev, "Couldn't set IRQ polarity: %d\n", ret); goto err; } } flags |= arizona->pdata.irq_flags; /* Allocate a virtual IRQ domain to distribute to the regmap domains */ arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops, arizona); if (!arizona->virq) { dev_err(arizona->dev, "Failed to add core IRQ domain\n"); ret = -EINVAL; goto err; } ret = regmap_add_irq_chip(arizona->regmap, irq_create_mapping(arizona->virq, 0), IRQF_ONESHOT, 0, aod, &arizona->aod_irq_chip); if (ret != 0) { dev_err(arizona->dev, "Failed to add AOD IRQs: %d\n", ret); goto err_domain; } ret = regmap_add_irq_chip(arizona->regmap, irq_create_mapping(arizona->virq, 1), IRQF_ONESHOT, 0, irq, &arizona->irq_chip); if (ret != 0) { dev_err(arizona->dev, "Failed to add main IRQs: %d\n", ret); goto err_aod; } /* Make sure the boot done IRQ is unmasked for resumes */ i = arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE); ret = request_threaded_irq(i, NULL, arizona_boot_done, IRQF_ONESHOT, "Boot done", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request boot done %d: %d\n", arizona->irq, ret); goto err_boot_done; } /* Handle control interface errors in the core */ if (arizona->ctrlif_error) { i = arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR); ret = request_threaded_irq(i, NULL, arizona_ctrlif_err, IRQF_ONESHOT, "Control interface error", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request CTRLIF_ERR %d: %d\n", arizona->irq, ret); goto err_ctrlif; } } /* Used to emulate edge trigger and to work around broken pinmux */ if (arizona->pdata.irq_gpio) { if (gpio_to_irq(arizona->pdata.irq_gpio) != arizona->irq) { dev_warn(arizona->dev, "IRQ %d is not GPIO %d (%d)\n", arizona->irq, arizona->pdata.irq_gpio, gpio_to_irq(arizona->pdata.irq_gpio)); arizona->irq = gpio_to_irq(arizona->pdata.irq_gpio); } ret = devm_gpio_request_one(arizona->dev, arizona->pdata.irq_gpio, GPIOF_IN, "arizona IRQ"); if (ret != 0) { dev_err(arizona->dev, "Failed to request IRQ GPIO %d:: %d\n", arizona->pdata.irq_gpio, ret); arizona->pdata.irq_gpio = 0; } } ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread, flags, "arizona", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request primary IRQ %d: %d\n", arizona->irq, ret); goto err_main_irq; } return 0; err_main_irq: if (arizona->ctrlif_error) free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona); err_ctrlif: free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona); err_boot_done: regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1), arizona->irq_chip); err_aod: regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0), arizona->aod_irq_chip); err_domain: err: return ret; }