static void a5k_floppy_enable_dma(dmach_t channel, dma_t *dma) { struct pt_regs regs; void *fiqhandler_start; unsigned int fiqhandler_length; extern void floppy_fiqsetup(unsigned long len, unsigned long addr, unsigned long port); if (dma->dma_mode == DMA_MODE_READ) { extern unsigned char floppy_fiqin_start, floppy_fiqin_end; fiqhandler_start = &floppy_fiqin_start; fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; } else { extern unsigned char floppy_fiqout_start, floppy_fiqout_end; fiqhandler_start = &floppy_fiqout_start; fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; } if (claim_fiq(&fh)) { printk("floppydma: couldn't claim FIQ.\n"); return; } memcpy((void *)0x1c, fiqhandler_start, fiqhandler_length); regs.ARM_r9 = dma->buf.length; regs.ARM_r10 = (unsigned long)dma->buf.address; regs.ARM_fp = FLOPPYDMA_BASE; set_fiq_regs(®s); enable_fiq(dma->dma_irq); }
static void floppy_enable_dma(dmach_t channel, dma_t *dma) { void *fiqhandler_start; unsigned int fiqhandler_length; struct pt_regs regs; if (dma->using_sg) BUG(); if (dma->dma_mode == DMA_MODE_READ) { extern unsigned char floppy_fiqin_start, floppy_fiqin_end; fiqhandler_start = &floppy_fiqin_start; fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; } else { extern unsigned char floppy_fiqout_start, floppy_fiqout_end; fiqhandler_start = &floppy_fiqout_start; fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; } regs.ARM_r9 = dma->buf.length; regs.ARM_r10 = (unsigned long)dma->buf.__address; regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE; if (claim_fiq(&fh)) { printk("floppydma: couldn't claim FIQ.\n"); return; } set_fiq_handler(fiqhandler_start, fiqhandler_length); set_fiq_regs(®s); enable_fiq(dma->dma_irq); }
static int __init msm_setup_fiq_handler(void) { int i, ret = 0; spin_lock_init(&msm_fiq_lock); claim_fiq(&msm7k_fh); set_fiq_handler(&msm7k_fiq_start, msm7k_fiq_length); for_each_possible_cpu(i) { msm7k_fiq_stack[i] = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); if (msm7k_fiq_stack[i] == NULL) break; } if (i != nr_cpumask_bits) { pr_err("FIQ STACK SETUP IS NOT SUCCESSFUL\n"); for (i = 0; i < nr_cpumask_bits && msm7k_fiq_stack[i] != NULL; i++) free_pages((unsigned long)msm7k_fiq_stack[i], THREAD_SIZE_ORDER); return -ENOMEM; } fiq_set_type(msm_fiq_no, IRQF_TRIGGER_RISING); if (cpu_is_msm8625() || cpu_is_msm8625q()) gic_set_irq_secure(msm_fiq_no); else msm_fiq_select(msm_fiq_no); enable_irq(msm_fiq_no); pr_info("%s : MSM FIQ handler setup--done\n", __func__); return ret; }
int fiq_glue_register_handler(struct fiq_glue_handler *handler) { int ret; int cpu; if (!handler || !handler->fiq) return -EINVAL; mutex_lock(&fiq_glue_lock); if (fiq_stack) { ret = -EBUSY; goto err_busy; } for_each_possible_cpu(cpu) { void *stack; stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); if (WARN_ON(!stack)) { ret = -ENOMEM; goto err_alloc_fiq_stack; } per_cpu(fiq_stack, cpu) = stack; } ret = claim_fiq(&fiq_debbuger_fiq_handler); if (WARN_ON(ret)) goto err_claim_fiq; current_handler = handler; on_each_cpu(fiq_glue_setup_helper, handler, true); set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue); mutex_unlock(&fiq_glue_lock); return 0; err_claim_fiq: err_alloc_fiq_stack: for_each_possible_cpu(cpu) { __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER); per_cpu(fiq_stack, cpu) = NULL; } err_busy: mutex_unlock(&fiq_glue_lock); return ret; }
static int __init msm_setup_fiq_handler(void) { int ret = 0; claim_fiq(&msm7k_fh); set_fiq_handler(&msm7k_fiq_start, msm7k_fiq_length); msm7k_fiq_stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); if (msm7k_fiq_stack == NULL) { pr_err("FIQ STACK SETUP IS NOT SUCCESSFUL\n"); return -ENOMEM; } fiq_set_type(MSM8625_INT_A9_M2A_2, IRQF_TRIGGER_RISING); gic_set_irq_secure(MSM8625_INT_A9_M2A_2); enable_irq(MSM8625_INT_A9_M2A_2); pr_info("%s : msm7k fiq setup--done\n", __func__); return ret; }
int msm_setup_fiq_handler(void) { int ret = 0; //void *stack = NULL; claim_fiq(&msm_7k_fh); set_fiq_handler(&msm_7k_fiq_start, msm_7k_fiq_length); msm_fiq_stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); printk(" %s : free pages available -%p ::\n",__func__,msm_fiq_stack); if (msm_fiq_stack == NULL) { printk("No free pages available - %s fails\n", __func__); panic("FIQ STACK SETUP IS NOT SUCCESSFUL"); return -ENOMEM; } //msm_7k_fiq_setup(stack); irq_set_irq_type(MSM8625_INT_A9_M2A_2, IRQF_TRIGGER_RISING); gic_set_irq_secure(MSM8625_INT_A9_M2A_2); enable_irq(MSM8625_INT_A9_M2A_2); printk("%s : setup_fiq_handler --done \n", __func__); return ret; }
struct snd_soc_platform *imx_ssi_fiq_init(struct platform_device *pdev, struct imx_ssi *ssi) { int ret = 0; ret = claim_fiq(&fh); if (ret) { dev_err(&pdev->dev, "failed to claim fiq: %d", ret); return ERR_PTR(ret); } mxc_set_irq_fiq(ssi->irq, 1); imx_pcm_fiq = ssi->irq; imx_ssi_fiq_base = (unsigned long)ssi->base; ssi->dma_params_tx.burstsize = 4; ssi->dma_params_rx.burstsize = 6; return &imx_soc_platform_fiq; }
/** * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer * @hw: The hardware state. * * Claim the FIQ handler (only one can be active at any one time) and * then setup the correct transfer code for this transfer. * * This call updates all the necessary state information if successful, * so the caller does not need to do anything more than start the transfer * as normal, since the IRQ will have been re-routed to the FIQ handler. */ static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) { struct pt_regs regs; enum spi_fiq_mode mode; struct spi_fiq_code *code; int ret; if (!hw->fiq_claimed) { /* try and claim fiq if we haven't got it, and if not * then return and simply use another transfer method */ ret = claim_fiq(&hw->fiq_handler); if (ret) return; } if (hw->tx && !hw->rx) mode = FIQ_MODE_TX; else if (hw->rx && !hw->tx) mode = FIQ_MODE_RX; else mode = FIQ_MODE_TXRX; regs.uregs[fiq_rspi] = (long)hw->regs; regs.uregs[fiq_rrx] = (long)hw->rx; regs.uregs[fiq_rtx] = (long)hw->tx + 1; regs.uregs[fiq_rcount] = hw->len - 1; regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ; set_fiq_regs(®s); if (hw->fiq_mode != mode) { u32 *ack_ptr; hw->fiq_mode = mode; switch (mode) { case FIQ_MODE_TX: code = &s3c24xx_spi_fiq_tx; break; case FIQ_MODE_RX: code = &s3c24xx_spi_fiq_rx; break; case FIQ_MODE_TXRX: code = &s3c24xx_spi_fiq_txrx; break; default: code = NULL; } BUG_ON(!code); ack_ptr = (u32 *)&code->data[code->ack_offset]; *ack_ptr = ack_bit(hw->irq); set_fiq_handler(&code->data, code->length); } s3c24xx_set_fiq(hw->irq, true); hw->fiq_mode = mode; hw->fiq_inuse = 1; }
static int __init mx1_camera_probe(struct platform_device *pdev) { struct mx1_camera_dev *pcdev; struct resource *res; struct pt_regs regs; struct clk *clk; void __iomem *base; unsigned int irq; int err = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { err = -ENODEV; goto exit; } clk = clk_get(&pdev->dev, "csi_clk"); if (IS_ERR(clk)) { err = PTR_ERR(clk); goto exit; } pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); err = -ENOMEM; goto exit_put_clk; } pcdev->res = res; pcdev->clk = clk; pcdev->pdata = pdev->dev.platform_data; if (pcdev->pdata) pcdev->mclk = pcdev->pdata->mclk_10khz * 10000; if (!pcdev->mclk) { dev_warn(&pdev->dev, "mclk_10khz == 0! Please, fix your platform data. " "Using default 20MHz\n"); pcdev->mclk = 20000000; } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); /* * Request the regions. */ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) { err = -EBUSY; goto exit_kfree; } base = ioremap(res->start, resource_size(res)); if (!base) { err = -ENOMEM; goto exit_release; } pcdev->irq = irq; pcdev->base = base; /* request dma */ pcdev->dma_chan = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_HIGH); if (pcdev->dma_chan < 0) { dev_err(&pdev->dev, "Can't request DMA for MX1 CSI\n"); err = -EBUSY; goto exit_iounmap; } dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chan); imx_dma_setup_handlers(pcdev->dma_chan, mx1_camera_dma_irq, NULL, pcdev); imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO, IMX_DMA_MEMSIZE_32, MX1_DMA_REQ_CSI_R, 0); /* burst length : 16 words = 64 bytes */ imx_dma_config_burstlen(pcdev->dma_chan, 0); /* request irq */ err = claim_fiq(&fh); if (err) { dev_err(&pdev->dev, "Camera interrupt register failed\n"); goto exit_free_dma; } set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end - &mx1_camera_sof_fiq_start); regs.ARM_r8 = (long)MX1_DMA_DIMR; regs.ARM_r9 = (long)MX1_DMA_CCR(pcdev->dma_chan); regs.ARM_r10 = (long)pcdev->base + CSICR1; regs.ARM_fp = (long)pcdev->base + CSISR; regs.ARM_sp = 1 << pcdev->dma_chan; set_fiq_regs(®s); mxc_set_irq_fiq(irq, 1); enable_fiq(irq); pcdev->soc_host.drv_name = DRIVER_NAME; pcdev->soc_host.ops = &mx1_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; pcdev->soc_host.v4l2_dev.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); if (err) goto exit_free_irq; dev_info(&pdev->dev, "MX1 Camera driver loaded\n"); return 0; exit_free_irq: disable_fiq(irq); mxc_set_irq_fiq(irq, 0); release_fiq(&fh); exit_free_dma: imx_dma_free(pcdev->dma_chan); exit_iounmap: iounmap(base); exit_release: release_mem_region(res->start, resource_size(res)); exit_kfree: kfree(pcdev); exit_put_clk: clk_put(clk); exit: return err; }
void __init ams_delta_init_fiq(void) { void *fiqhandler_start; unsigned int fiqhandler_length; struct pt_regs FIQ_regs; unsigned long val, offset; int i, retval; fiqhandler_start = &qwerty_fiqin_start; fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start; pr_info("Installing fiq handler from %p, length 0x%x\n", fiqhandler_start, fiqhandler_length); retval = claim_fiq(&fh); if (retval) { pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n", retval); return; } retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq, IRQ_TYPE_EDGE_RISING, "deferred_fiq", NULL); if (retval < 0) { pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval); release_fiq(&fh); return; } /* * Since no set_type() method is provided by OMAP irq chip, * switch to edge triggered interrupt type manually. */ offset = IRQ_ILR0_REG_OFFSET + ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4; val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1); omap_writel(val, DEFERRED_FIQ_IH_BASE + offset); set_fiq_handler(fiqhandler_start, fiqhandler_length); /* * Initialise the buffer which is shared * between FIQ mode and IRQ mode */ fiq_buffer[FIQ_GPIO_INT_MASK] = 0; fiq_buffer[FIQ_MASK] = 0; fiq_buffer[FIQ_STATE] = 0; fiq_buffer[FIQ_KEY] = 0; fiq_buffer[FIQ_KEYS_CNT] = 0; fiq_buffer[FIQ_KEYS_HICNT] = 0; fiq_buffer[FIQ_TAIL_OFFSET] = 0; fiq_buffer[FIQ_HEAD_OFFSET] = 0; fiq_buffer[FIQ_BUF_LEN] = 256; fiq_buffer[FIQ_MISSED_KEYS] = 0; fiq_buffer[FIQ_BUFFER_START] = (unsigned int) &fiq_buffer[FIQ_CIRC_BUFF]; for (i = FIQ_CNT_INT_00; i <= FIQ_CNT_INT_15; i++) fiq_buffer[i] = 0; /* * FIQ mode r9 always points to the fiq_buffer, because the FIQ isr * will run in an unpredictable context. The fiq_buffer is the FIQ isr's * only means of communication with the IRQ level and other kernel * context code. */ FIQ_regs.ARM_r9 = (unsigned int)fiq_buffer; set_fiq_regs(&FIQ_regs); pr_info("request_fiq(): fiq_buffer = %p\n", fiq_buffer); /* * Redirect GPIO interrupts to FIQ */ offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4; val = omap_readl(OMAP_IH1_BASE + offset) | 1; omap_writel(val, OMAP_IH1_BASE + offset); }
static int __init stmp3xxx_bat_init(void) { #ifdef POWER_FIQ int ret; ret = claim_fiq(&power_fiq); if (ret) { pr_err("Can't claim fiq"); } else { get_fiq_regs(&fiq_regs); set_fiq_handler(power_fiq_start, power_fiq_end-power_fiq_start); lock_vector_tlb((void *)0xffff0000); lock_vector_tlb(REGS_POWER_BASE); /* disable interrupts to be configured as FIQs */ __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_DCDC4P2_BO)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_BATT_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_VDDD_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_VDD18_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_VDD5V_DROOP)); /* Enable these interrupts as FIQs */ __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_DCDC4P2_BO)); __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_BATT_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDDD_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDD18_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDD5V_DROOP)); /* enable FIQ functionality */ __raw_writel(BM_ICOLL_CTRL_FIQ_FINAL_ENABLE, HW_ICOLL_CTRL_SET_ADDR); /* enable these interrupts */ __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_DCDC4P2_BO)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_BATT_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDDD_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDD18_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDD5V_DROOP)); } #endif return platform_driver_register(&stmp3xxx_batdrv); }