void __init init_IRQ(void) { unsigned int irq; request_resource(&iomem_resource, &irq_resource); /* disable all IRQs */ writel(0, INTC_ICMR); /* all IRQs are IRQ, not REAL */ writel(0, INTC_ICLR); /* clear all GPIO edge detects */ writel(FMASK(8, 0) & ~FIELD(1, 1, GPI_SOFF_REQ), GPIO_GPIR); writel(0, GPIO_GFER); writel(0, GPIO_GRER); writel(0x0FFFFFFF, GPIO_GEDR); writel(1, INTC_ICCR); for (irq = 0; irq < IRQ_GPIOHIGH; irq++) { irq_set_chip(irq, &puv3_low_gpio_chip); irq_set_handler(irq, handle_edge_irq); irq_modify_status(irq, IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0); } for (irq = IRQ_GPIOHIGH + 1; irq < IRQ_GPIO0; irq++) { irq_set_chip(irq, &puv3_normal_chip); irq_set_handler(irq, handle_level_irq); irq_modify_status(irq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); } for (irq = IRQ_GPIO0; irq <= IRQ_GPIO27; irq++) { irq_set_chip(irq, &puv3_high_gpio_chip); irq_set_handler(irq, handle_edge_irq); irq_modify_status(irq, IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0); } /* * Install handler for GPIO 0-27 edge detect interrupts */ irq_set_chip(IRQ_GPIOHIGH, &puv3_normal_chip); irq_set_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler); #ifdef CONFIG_PUV3_GPIO puv3_init_gpio(); #endif }
void __init rpc_init_irq(void) { unsigned int irq, clr, set = 0; iomd_writeb(0, IOMD_IRQMASKA); iomd_writeb(0, IOMD_IRQMASKB); iomd_writeb(0, IOMD_FIQMASK); iomd_writeb(0, IOMD_DMAMASK); set_fiq_handler(&rpc_default_fiq_start, &rpc_default_fiq_end - &rpc_default_fiq_start); for (irq = 0; irq < NR_IRQS; irq++) { clr = IRQ_NOREQUEST; if (irq <= 6 || (irq >= 9 && irq <= 15)) clr |= IRQ_NOPROBE; if (irq == 21 || (irq >= 16 && irq <= 19) || irq == IRQ_KEYBOARDTX) set |= IRQ_NOAUTOEN; switch (irq) { case 0 ... 7: irq_set_chip_and_handler(irq, &iomd_a_chip, handle_level_irq); irq_modify_status(irq, clr, set); break; case 8 ... 15: irq_set_chip_and_handler(irq, &iomd_b_chip, handle_level_irq); irq_modify_status(irq, clr, set); break; case 16 ... 21: irq_set_chip_and_handler(irq, &iomd_dma_chip, handle_level_irq); irq_modify_status(irq, clr, set); break; case 64 ... 71: irq_set_chip(irq, &iomd_fiq_chip); irq_modify_status(irq, clr, set); break; } } init_FIQ(FIQ_START); }
/** * irq_sim_init - Initialize the interrupt simulator: allocate a range of * dummy interrupts. * * @sim: The interrupt simulator object to initialize. * @num_irqs: Number of interrupts to allocate * * Returns 0 on success and a negative error number on failure. */ int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs) { int i; sim->irqs = kmalloc_array(num_irqs, sizeof(*sim->irqs), GFP_KERNEL); if (!sim->irqs) return -ENOMEM; sim->irq_base = irq_alloc_descs(-1, 0, num_irqs, 0); if (sim->irq_base < 0) { kfree(sim->irqs); return sim->irq_base; } for (i = 0; i < num_irqs; i++) { sim->irqs[i].irqnum = sim->irq_base + i; sim->irqs[i].enabled = false; irq_set_chip(sim->irq_base + i, &irq_sim_irqchip); irq_set_chip_data(sim->irq_base + i, &sim->irqs[i]); irq_set_handler(sim->irq_base + i, &handle_simple_irq); irq_modify_status(sim->irq_base + i, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); } init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq); sim->irq_count = num_irqs; return 0; }
static struct mcuio_soft_hc *__setup_shc(const struct mcuio_soft_hc_ops *ops, void *priv) { struct mcuio_soft_hc *shc = kzalloc(sizeof(*shc), GFP_KERNEL); if (!shc) return ERR_PTR(-ENOMEM); init_kthread_worker(&shc->irq_kworker); shc->irq_kworker_task = kthread_run(kthread_worker_fn, &shc->irq_kworker, "shc_irq"); if (IS_ERR(shc->irq_kworker_task)) { pr_err("failed to create irq tsk for shc\n"); return ERR_PTR(PTR_ERR(shc->irq_kworker_task)); } init_kthread_work(&shc->do_irq, __do_irq); shc->ops = ops; shc->priv = priv; shc->rx_circ_buf.head = shc->rx_circ_buf.tail = 0; shc->rx_circ_buf.buf = shc->rx_buf; shc->chip.name = "MCUIO-SHC"; shc->chip.irq_mask = mcuio_soft_hc_irq_mask; shc->chip.irq_unmask = mcuio_soft_hc_irq_unmask; shc->irqno = irq_alloc_desc(0); irq_set_chip(shc->irqno, &shc->chip); irq_set_handler(shc->irqno, &handle_simple_irq); irq_modify_status(shc->irqno, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); return shc; }
void arc_request_percpu_irq(int irq, int cpu, irqreturn_t (*isr)(int irq, void *dev), const char *irq_nm, void *percpu_dev) { /* Boot cpu calls request, all call enable */ if (!cpu) { int rc; /* * These 2 calls are essential to making percpu IRQ APIs work * Ideally these details could be hidden in irq chip map function * but the issue is IPIs IRQs being static (non-DT) and platform * specific, so we can't identify them there. */ irq_set_percpu_devid(irq); irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); if (rc) panic("Percpu IRQ request failed for %d\n", irq); } enable_percpu_irq(irq, 0); }
static int mx25_tsadc_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct mx25_tsadc *tsadc = d->host_data; irq_set_chip_data(irq, tsadc); irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE); return 0; }
static int adp5588_irq_setup(struct adp5588_gpio *dev) { struct i2c_client *client = dev->client; struct adp5588_gpio_platform_data *pdata = dev_get_platdata(&client->dev); unsigned gpio; int ret; adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC); adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */ adp5588_gpio_read_intstat(client, dev->irq_stat); /* read to clear */ dev->irq_base = pdata->irq_base; mutex_init(&dev->irq_lock); for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) { int irq = gpio + dev->irq_base; irq_set_chip_data(irq, dev); irq_set_chip_and_handler(irq, &adp5588_irq_chip, handle_level_irq); irq_set_nested_thread(irq, 1); irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE); } ret = request_threaded_irq(client->irq, NULL, adp5588_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&client->dev), dev); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); goto out; } dev->gpio_chip.to_irq = adp5588_gpio_to_irq; adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_GPI_INT); return 0; out: dev->irq_base = 0; return ret; }
void set_irq_flags(unsigned int irq, unsigned int iflags) { unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (irq >= nr_irqs) { printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); return; } if (iflags & IRQF_VALID) clr |= IRQ_NOREQUEST; if (iflags & IRQF_PROBE) clr |= IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) clr |= IRQ_NOAUTOEN; /* Order is clear bits in "clr" then set bits in "set" */ irq_modify_status(irq, clr, set & ~clr); }
static int __devinit sunxi_gpio_irq_init(struct sunxi_gpio_chip *sgpio) { int irq; int base = sgpio->irq_base; /* irq_base < 0 on unsupported platforms */ if (base < 0) return 0; for (irq = base; irq < base + EINT_NUM; irq++) { irq_set_chip_data(irq, sgpio); irq_set_chip(irq, &sunxi_gpio_irq_chip); irq_set_handler(irq, handle_simple_irq); irq_modify_status(irq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); } return 0; }
static int ks8695uart_startup(struct uart_port *port) { int retval; irq_modify_status(KS8695_IRQ_UART_TX, IRQ_NOREQUEST, IRQ_NOAUTOEN); tx_enable(port, 0); rx_enable(port, 1); ms_enable(port, 1); /* * Allocate the IRQ */ retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, 0, "UART TX", port); if (retval) goto err_tx; retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, 0, "UART RX", port); if (retval) goto err_rx; retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, 0, "UART LineStatus", port); if (retval) goto err_ls; retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, 0, "UART ModemStatus", port); if (retval) goto err_ms; return 0; err_ms: free_irq(KS8695_IRQ_UART_LINE_STATUS, port); err_ls: free_irq(KS8695_IRQ_UART_RX, port); err_rx: free_irq(KS8695_IRQ_UART_TX, port); err_tx: return retval; }
static int sirfsoc_uart_startup(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; unsigned int index = port->line; int ret; irq_modify_status(port->irq, IRQ_NOREQUEST, IRQ_NOAUTOEN); ret = request_irq(port->irq, sirfsoc_uart_isr, 0, SIRFUART_PORT_NAME, sirfport); if (ret != 0) { dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n", index, port->irq); goto irq_err; } /* initial hardware settings */ wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) | SIRFUART_IO_MODE); wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFUART_IO_MODE); wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & ~SIRFUART_RX_DMA_FLUSH); wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0); wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0); wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN); if (sirfport->uart_reg->uart_type == SIRF_USP_UART) wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_ENDIAN_CTRL_LSBF | SIRFSOC_USP_EN); wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET); wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port)); wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port)); if (sirfport->rx_dma_chan) wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk, SIRFUART_RX_FIFO_CHK_SC(port->line, 0x1) | SIRFUART_RX_FIFO_CHK_LC(port->line, 0x2) | SIRFUART_RX_FIFO_CHK_HC(port->line, 0x4)); if (sirfport->tx_dma_chan) { sirfport->tx_dma_state = TX_DMA_IDLE; wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk, SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) | SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) | SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4)); } sirfport->ms_enabled = false; if (sirfport->uart_reg->uart_type == SIRF_USP_UART && sirfport->hw_flow_ctrl) { irq_modify_status(gpio_to_irq(sirfport->cts_gpio), IRQ_NOREQUEST, IRQ_NOAUTOEN); ret = request_irq(gpio_to_irq(sirfport->cts_gpio), sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport); if (ret != 0) { dev_err(port->dev, "UART-USP:request gpio irq fail\n"); goto init_rx_err; } } if (sirfport->uart_reg->uart_type == SIRF_REAL_UART && sirfport->rx_dma_chan) wr_regl(port, ureg->sirfsoc_swh_dma_io, SIRFUART_CLEAR_RX_ADDR_EN); if (sirfport->uart_reg->uart_type == SIRF_USP_UART && sirfport->rx_dma_chan) wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFSOC_USP_FRADDR_CLR_EN); if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) { sirfport->is_hrt_enabled = true; sirfport->rx_period_time = 20000000; sirfport->rx_last_pos = -1; sirfport->pio_fetch_cnt = 0; sirfport->rx_dma_items.xmit.tail = sirfport->rx_dma_items.xmit.head = 0; hrtimer_start(&sirfport->hrt, ns_to_ktime(sirfport->rx_period_time), HRTIMER_MODE_REL); } wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); if (sirfport->rx_dma_chan) sirfsoc_uart_start_next_rx_dma(port); else { if (!sirfport->is_atlas7) wr_regl(port, ureg->sirfsoc_int_en_reg, rd_regl(port, ureg->sirfsoc_int_en_reg) | SIRFUART_RX_IO_INT_EN(uint_en, sirfport->uart_reg->uart_type)); else wr_regl(port, ureg->sirfsoc_int_en_reg, SIRFUART_RX_IO_INT_EN(uint_en, sirfport->uart_reg->uart_type)); } enable_irq(port->irq); return 0; init_rx_err: free_irq(port->irq, sirfport); irq_err: return ret; }
static int __init CoreServicesInit(void) { int ret_val = 0; int result = 0; uint32_t currentSettings = 0; struct resource* r = request_mem_region(ALLOY_RAM_BASE, ALLOY_DEDICATED_RAM_SIZE, "AlloyRAM"); if(r == NULL) { printk("request_mem_region failed.\n"); } else { printk("request_mem_region ok.\n"); } r = request_mem_region(0x40000000, 4096, "AlloyPeripherals"); if(r == NULL) { printk("request_mem_region failed.\n"); } else { printk("request_mem_region ok.\n"); } /* * Register the character device (atleast try) */ ret_val = register_chrdev(MAJOR_NUM, DEVICE_NAME, &Fops); /* * Negative values signify an error */ if (ret_val < 0) { printk(KERN_ALERT "%s failed with %d\n", "Sorry, registering the character device ", ret_val); return ret_val; } set_irq_flags(IRQ_ARM_LOCAL_MAILBOX2, IRQF_VALID); irq_clear_status_flags(IRQ_ARM_LOCAL_MAILBOX2, IRQ_PER_CPU); irq_clear_status_flags(IRQ_ARM_LOCAL_MAILBOX2, IRQ_LEVEL); irq_modify_status(IRQ_ARM_LOCAL_MAILBOX0,0xffffffff,0x00000000); irq_modify_status(IRQ_ARM_LOCAL_MAILBOX1,0xffffffff,0x00000000); irq_modify_status(IRQ_ARM_LOCAL_MAILBOX2,0xffffffff,0x00000000); irq_modify_status(IRQ_ARM_LOCAL_MAILBOX3,0xffffffff,0x00000000); // // Register the interrupt handler for mailbox IRQs. // set_irq_flags(IRQ_ARM_LOCAL_MAILBOX2, IRQF_VALID); result = request_threaded_irq( IRQ_ARM_LOCAL_MAILBOX0, // The interrupt number requested (irq_handler_t) MailboxIRQHandler0, // The pointer to the handler function (above) NULL, IRQF_SHARED, // Interrupt is on rising edge (button press in Fig.1) "MailboxIRQHandler", // Used in /proc/interrupts to identify the owner DEVICE_NAME); // The *dev_id for shared interrupt lines, NULL here result = request_threaded_irq( IRQ_ARM_LOCAL_MAILBOX1, // The interrupt number requested (irq_handler_t) MailboxIRQHandler1, // The pointer to the handler function (above) NULL, IRQF_SHARED, // Interrupt is on rising edge (button press in Fig.1) "MailboxIRQHandler", // Used in /proc/interrupts to identify the owner DEVICE_NAME); // The *dev_id for shared interrupt lines, NULL here result = request_threaded_irq( IRQ_ARM_LOCAL_MAILBOX2, // The interrupt number requested (irq_handler_t) MailboxIRQHandler2, // The pointer to the handler function (above) NULL, IRQF_SHARED, // Interrupt is on rising edge (button press in Fig.1) "MailboxIRQHandler", // Used in /proc/interrupts to identify the owner DEVICE_NAME); // The *dev_id for shared interrupt lines, NULL here result = request_threaded_irq( IRQ_ARM_LOCAL_MAILBOX3, // The interrupt number requested (irq_handler_t) MailboxIRQHandler3, // The pointer to the handler function (above) NULL, IRQF_SHARED, // Interrupt is on rising edge (button press in Fig.1) "MailboxIRQHandler", // Used in /proc/interrupts to identify the owner DEVICE_NAME); // The *dev_id for shared interrupt lines, NULL here if(result == 0) { printk(KERN_INFO "Mailbox ISR registered ok.\n"); } else { printk(KERN_INFO "Mailbox ISR registration failed (%d).\n", result); } // // Enable the interupt. // We're on Core0 and we want to enable the Mailbox 1 interrupt. // currentSettings = readl( __io_address(ARM_LOCAL_MAILBOX_INT_CONTROL0) ); currentSettings |= 0x0000000f; writel( currentSettings, __io_address(ARM_LOCAL_MAILBOX_INT_CONTROL0) ); // // // alloyRam = ioremap_nocache( ALLOY_RAM_BASE, ALLOY_DEDICATED_RAM_SIZE ); printk(KERN_INFO "%s The major device number is %d.\n", "Registeration is a success", MAJOR_NUM); printk(KERN_INFO "If you want to talk to the device driver,\n"); printk(KERN_INFO "you'll have to create a device file. \n"); printk(KERN_INFO "We suggest you use:\n"); printk(KERN_INFO "mknod %s c %d 0\n", DEVICE_FILE_NAME, MAJOR_NUM); printk(KERN_INFO "The device file name is important, because\n"); printk(KERN_INFO "the ioctl program assumes that's the\n"); printk(KERN_INFO "file you'll use.\n"); return 0; }