static int imx_pcm_fiq_new(struct snd_card *card, struct snd_soc_dai *dai, struct snd_pcm *pcm) { int ret; ret = imx_pcm_new(card, dai, pcm); if (ret) return ret; if (dai->playback.channels_min) { struct snd_pcm_substream *substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; imx_ssi_fiq_tx_buffer = (unsigned long)buf->area; } if (dai->capture.channels_min) { struct snd_pcm_substream *substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; imx_ssi_fiq_rx_buffer = (unsigned long)buf->area; } set_fiq_handler(&imx_ssi_fiq_start, &imx_ssi_fiq_end - &imx_ssi_fiq_start); return 0; }
static void floppy_enable_dma(dmach_t channel, dma_t *dma) { void *fiqhandler_start; unsigned int fiqhandler_length; struct pt_regs regs; if (dma->using_sg) BUG(); if (dma->dma_mode == DMA_MODE_READ) { extern unsigned char floppy_fiqin_start, floppy_fiqin_end; fiqhandler_start = &floppy_fiqin_start; fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; } else { extern unsigned char floppy_fiqout_start, floppy_fiqout_end; fiqhandler_start = &floppy_fiqout_start; fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; } regs.ARM_r9 = dma->buf.length; regs.ARM_r10 = (unsigned long)dma->buf.__address; regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE; if (claim_fiq(&fh)) { printk("floppydma: couldn't claim FIQ.\n"); return; } set_fiq_handler(fiqhandler_start, fiqhandler_length); set_fiq_regs(®s); enable_fiq(dma->dma_irq); }
/* Default reacquire function * - we always relinquish FIQ control * - we always reacquire FIQ control */ static int fiq_def_op(void *ref, int relinquish) { if (!relinquish) set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn)); return 0; }
static int __init msm_setup_fiq_handler(void) { int i, ret = 0; spin_lock_init(&msm_fiq_lock); claim_fiq(&msm7k_fh); set_fiq_handler(&msm7k_fiq_start, msm7k_fiq_length); for_each_possible_cpu(i) { msm7k_fiq_stack[i] = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); if (msm7k_fiq_stack[i] == NULL) break; } if (i != nr_cpumask_bits) { pr_err("FIQ STACK SETUP IS NOT SUCCESSFUL\n"); for (i = 0; i < nr_cpumask_bits && msm7k_fiq_stack[i] != NULL; i++) free_pages((unsigned long)msm7k_fiq_stack[i], THREAD_SIZE_ORDER); return -ENOMEM; } fiq_set_type(msm_fiq_no, IRQF_TRIGGER_RISING); if (cpu_is_msm8625() || cpu_is_msm8625q()) gic_set_irq_secure(msm_fiq_no); else msm_fiq_select(msm_fiq_no); enable_irq(msm_fiq_no); pr_info("%s : MSM FIQ handler setup--done\n", __func__); return ret; }
void __init rpc_init_irq(void) { unsigned int irq, clr, set = 0; iomd_writeb(0, IOMD_IRQMASKA); iomd_writeb(0, IOMD_IRQMASKB); iomd_writeb(0, IOMD_FIQMASK); iomd_writeb(0, IOMD_DMAMASK); set_fiq_handler(&rpc_default_fiq_start, &rpc_default_fiq_end - &rpc_default_fiq_start); for (irq = 0; irq < NR_IRQS; irq++) { clr = IRQ_NOREQUEST; if (irq <= 6 || (irq >= 9 && irq <= 15)) clr |= IRQ_NOPROBE; if (irq == 21 || (irq >= 16 && irq <= 19) || irq == IRQ_KEYBOARDTX) set |= IRQ_NOAUTOEN; switch (irq) { case 0 ... 7: irq_set_chip_and_handler(irq, &iomd_a_chip, handle_level_irq); irq_modify_status(irq, clr, set); break; case 8 ... 15: irq_set_chip_and_handler(irq, &iomd_b_chip, handle_level_irq); irq_modify_status(irq, clr, set); break; case 16 ... 21: irq_set_chip_and_handler(irq, &iomd_dma_chip, handle_level_irq); irq_modify_status(irq, clr, set); break; case 64 ... 71: irq_set_chip(irq, &iomd_fiq_chip); irq_modify_status(irq, clr, set); break; } } init_FIQ(FIQ_START); }
/* Default reacquire function * - we always relinquish FIQ control * - we always reacquire FIQ control */ static int fiq_def_op(void *ref, int relinquish) { if (!relinquish) { /* Restore default handler and registers */ local_fiq_disable(); set_fiq_regs(&dfl_fiq_regs); set_fiq_handler(&dfl_fiq_insn, sizeof(dfl_fiq_insn)); local_fiq_enable(); /* FIXME: notify irq controller to standard enable FIQs */ } return 0; }
int msm_fiq_set_handler(void (*func)(void *data, void *regs), void *data) { unsigned long flags; int ret = -ENOMEM; local_irq_save(flags); if (fiq_func == 0) { fiq_func = func; fiq_glue_setup(func, data, fiq_stack + 255); set_fiq_handler(&fiq_glue, (&fiq_glue_end - &fiq_glue)); ret = 0; } local_irq_restore(flags); return ret; }
int fiq_glue_register_handler(struct fiq_glue_handler *handler) { int ret; int cpu; if (!handler || !handler->fiq) return -EINVAL; mutex_lock(&fiq_glue_lock); if (fiq_stack) { ret = -EBUSY; goto err_busy; } for_each_possible_cpu(cpu) { void *stack; stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); if (WARN_ON(!stack)) { ret = -ENOMEM; goto err_alloc_fiq_stack; } per_cpu(fiq_stack, cpu) = stack; } ret = claim_fiq(&fiq_debbuger_fiq_handler); if (WARN_ON(ret)) goto err_claim_fiq; current_handler = handler; on_each_cpu(fiq_glue_setup_helper, handler, true); set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue); mutex_unlock(&fiq_glue_lock); return 0; err_claim_fiq: err_alloc_fiq_stack: for_each_possible_cpu(cpu) { __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER); per_cpu(fiq_stack, cpu) = NULL; } err_busy: mutex_unlock(&fiq_glue_lock); return ret; }
static int __init msm_setup_fiq_handler(void) { int ret = 0; claim_fiq(&msm7k_fh); set_fiq_handler(&msm7k_fiq_start, msm7k_fiq_length); msm7k_fiq_stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); if (msm7k_fiq_stack == NULL) { pr_err("FIQ STACK SETUP IS NOT SUCCESSFUL\n"); return -ENOMEM; } fiq_set_type(MSM8625_INT_A9_M2A_2, IRQF_TRIGGER_RISING); gic_set_irq_secure(MSM8625_INT_A9_M2A_2); enable_irq(MSM8625_INT_A9_M2A_2); pr_info("%s : msm7k fiq setup--done\n", __func__); return ret; }
int msm_setup_fiq_handler(void) { int ret = 0; //void *stack = NULL; claim_fiq(&msm_7k_fh); set_fiq_handler(&msm_7k_fiq_start, msm_7k_fiq_length); msm_fiq_stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); printk(" %s : free pages available -%p ::\n",__func__,msm_fiq_stack); if (msm_fiq_stack == NULL) { printk("No free pages available - %s fails\n", __func__); panic("FIQ STACK SETUP IS NOT SUCCESSFUL"); return -ENOMEM; } //msm_7k_fiq_setup(stack); irq_set_irq_type(MSM8625_INT_A9_M2A_2, IRQF_TRIGGER_RISING); gic_set_irq_secure(MSM8625_INT_A9_M2A_2); enable_irq(MSM8625_INT_A9_M2A_2); printk("%s : setup_fiq_handler --done \n", __func__); return ret; }
int msm_fiq_set_handler(void (*func)(void *data, void *regs, void *svc_sp), void *data) { unsigned long flags; int ret = -ENOMEM; if (!fiq_stack) fiq_stack = kzalloc(THREAD_SIZE, GFP_KERNEL); if (!fiq_stack) return -ENOMEM; local_irq_save(flags); if (fiq_func == 0) { fiq_func = func; fiq_data = data; fiq_glue_setup(func, data, fiq_stack + THREAD_START_SP); set_fiq_handler(&fiq_glue, (&fiq_glue_end - &fiq_glue)); ret = 0; } local_irq_restore(flags); return ret; }
/** * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer * @hw: The hardware state. * * Claim the FIQ handler (only one can be active at any one time) and * then setup the correct transfer code for this transfer. * * This call updates all the necessary state information if successful, * so the caller does not need to do anything more than start the transfer * as normal, since the IRQ will have been re-routed to the FIQ handler. */ static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) { struct pt_regs regs; enum spi_fiq_mode mode; struct spi_fiq_code *code; int ret; if (!hw->fiq_claimed) { /* try and claim fiq if we haven't got it, and if not * then return and simply use another transfer method */ ret = claim_fiq(&hw->fiq_handler); if (ret) return; } if (hw->tx && !hw->rx) mode = FIQ_MODE_TX; else if (hw->rx && !hw->tx) mode = FIQ_MODE_RX; else mode = FIQ_MODE_TXRX; regs.uregs[fiq_rspi] = (long)hw->regs; regs.uregs[fiq_rrx] = (long)hw->rx; regs.uregs[fiq_rtx] = (long)hw->tx + 1; regs.uregs[fiq_rcount] = hw->len - 1; regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ; set_fiq_regs(®s); if (hw->fiq_mode != mode) { u32 *ack_ptr; hw->fiq_mode = mode; switch (mode) { case FIQ_MODE_TX: code = &s3c24xx_spi_fiq_tx; break; case FIQ_MODE_RX: code = &s3c24xx_spi_fiq_rx; break; case FIQ_MODE_TXRX: code = &s3c24xx_spi_fiq_txrx; break; default: code = NULL; } BUG_ON(!code); ack_ptr = (u32 *)&code->data[code->ack_offset]; *ack_ptr = ack_bit(hw->irq); set_fiq_handler(&code->data, code->length); } s3c24xx_set_fiq(hw->irq, true); hw->fiq_mode = mode; hw->fiq_inuse = 1; }
static int __init mx1_camera_probe(struct platform_device *pdev) { struct mx1_camera_dev *pcdev; struct resource *res; struct pt_regs regs; struct clk *clk; void __iomem *base; unsigned int irq; int err = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { err = -ENODEV; goto exit; } clk = clk_get(&pdev->dev, "csi_clk"); if (IS_ERR(clk)) { err = PTR_ERR(clk); goto exit; } pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); err = -ENOMEM; goto exit_put_clk; } pcdev->res = res; pcdev->clk = clk; pcdev->pdata = pdev->dev.platform_data; if (pcdev->pdata) pcdev->mclk = pcdev->pdata->mclk_10khz * 10000; if (!pcdev->mclk) { dev_warn(&pdev->dev, "mclk_10khz == 0! Please, fix your platform data. " "Using default 20MHz\n"); pcdev->mclk = 20000000; } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); /* * Request the regions. */ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) { err = -EBUSY; goto exit_kfree; } base = ioremap(res->start, resource_size(res)); if (!base) { err = -ENOMEM; goto exit_release; } pcdev->irq = irq; pcdev->base = base; /* request dma */ pcdev->dma_chan = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_HIGH); if (pcdev->dma_chan < 0) { dev_err(&pdev->dev, "Can't request DMA for MX1 CSI\n"); err = -EBUSY; goto exit_iounmap; } dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chan); imx_dma_setup_handlers(pcdev->dma_chan, mx1_camera_dma_irq, NULL, pcdev); imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO, IMX_DMA_MEMSIZE_32, MX1_DMA_REQ_CSI_R, 0); /* burst length : 16 words = 64 bytes */ imx_dma_config_burstlen(pcdev->dma_chan, 0); /* request irq */ err = claim_fiq(&fh); if (err) { dev_err(&pdev->dev, "Camera interrupt register failed\n"); goto exit_free_dma; } set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end - &mx1_camera_sof_fiq_start); regs.ARM_r8 = (long)MX1_DMA_DIMR; regs.ARM_r9 = (long)MX1_DMA_CCR(pcdev->dma_chan); regs.ARM_r10 = (long)pcdev->base + CSICR1; regs.ARM_fp = (long)pcdev->base + CSISR; regs.ARM_sp = 1 << pcdev->dma_chan; set_fiq_regs(®s); mxc_set_irq_fiq(irq, 1); enable_fiq(irq); pcdev->soc_host.drv_name = DRIVER_NAME; pcdev->soc_host.ops = &mx1_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; pcdev->soc_host.v4l2_dev.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); if (err) goto exit_free_irq; dev_info(&pdev->dev, "MX1 Camera driver loaded\n"); return 0; exit_free_irq: disable_fiq(irq); mxc_set_irq_fiq(irq, 0); release_fiq(&fh); exit_free_dma: imx_dma_free(pcdev->dma_chan); exit_iounmap: iounmap(base); exit_release: release_mem_region(res->start, resource_size(res)); exit_kfree: kfree(pcdev); exit_put_clk: clk_put(clk); exit: return err; }
void __init ams_delta_init_fiq(void) { void *fiqhandler_start; unsigned int fiqhandler_length; struct pt_regs FIQ_regs; unsigned long val, offset; int i, retval; fiqhandler_start = &qwerty_fiqin_start; fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start; pr_info("Installing fiq handler from %p, length 0x%x\n", fiqhandler_start, fiqhandler_length); retval = claim_fiq(&fh); if (retval) { pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n", retval); return; } retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq, IRQ_TYPE_EDGE_RISING, "deferred_fiq", NULL); if (retval < 0) { pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval); release_fiq(&fh); return; } /* * Since no set_type() method is provided by OMAP irq chip, * switch to edge triggered interrupt type manually. */ offset = IRQ_ILR0_REG_OFFSET + ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4; val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1); omap_writel(val, DEFERRED_FIQ_IH_BASE + offset); set_fiq_handler(fiqhandler_start, fiqhandler_length); /* * Initialise the buffer which is shared * between FIQ mode and IRQ mode */ fiq_buffer[FIQ_GPIO_INT_MASK] = 0; fiq_buffer[FIQ_MASK] = 0; fiq_buffer[FIQ_STATE] = 0; fiq_buffer[FIQ_KEY] = 0; fiq_buffer[FIQ_KEYS_CNT] = 0; fiq_buffer[FIQ_KEYS_HICNT] = 0; fiq_buffer[FIQ_TAIL_OFFSET] = 0; fiq_buffer[FIQ_HEAD_OFFSET] = 0; fiq_buffer[FIQ_BUF_LEN] = 256; fiq_buffer[FIQ_MISSED_KEYS] = 0; fiq_buffer[FIQ_BUFFER_START] = (unsigned int) &fiq_buffer[FIQ_CIRC_BUFF]; for (i = FIQ_CNT_INT_00; i <= FIQ_CNT_INT_15; i++) fiq_buffer[i] = 0; /* * FIQ mode r9 always points to the fiq_buffer, because the FIQ isr * will run in an unpredictable context. The fiq_buffer is the FIQ isr's * only means of communication with the IRQ level and other kernel * context code. */ FIQ_regs.ARM_r9 = (unsigned int)fiq_buffer; set_fiq_regs(&FIQ_regs); pr_info("request_fiq(): fiq_buffer = %p\n", fiq_buffer); /* * Redirect GPIO interrupts to FIQ */ offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4; val = omap_readl(OMAP_IH1_BASE + offset) | 1; omap_writel(val, OMAP_IH1_BASE + offset); }
static int __init stmp3xxx_bat_init(void) { #ifdef POWER_FIQ int ret; ret = claim_fiq(&power_fiq); if (ret) { pr_err("Can't claim fiq"); } else { get_fiq_regs(&fiq_regs); set_fiq_handler(power_fiq_start, power_fiq_end-power_fiq_start); lock_vector_tlb((void *)0xffff0000); lock_vector_tlb(REGS_POWER_BASE); /* disable interrupts to be configured as FIQs */ __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_DCDC4P2_BO)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_BATT_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_VDDD_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_VDD18_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_CLR_ADDR(IRQ_VDD5V_DROOP)); /* Enable these interrupts as FIQs */ __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_DCDC4P2_BO)); __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_BATT_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDDD_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDD18_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENFIQ, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDD5V_DROOP)); /* enable FIQ functionality */ __raw_writel(BM_ICOLL_CTRL_FIQ_FINAL_ENABLE, HW_ICOLL_CTRL_SET_ADDR); /* enable these interrupts */ __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_DCDC4P2_BO)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_BATT_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDDD_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDD18_BRNOUT)); __raw_writel(BM_ICOLL_INTERRUPTn_ENABLE, HW_ICOLL_INTERRUPTn_SET_ADDR(IRQ_VDD5V_DROOP)); } #endif return platform_driver_register(&stmp3xxx_batdrv); }