static void floppy_enable_dma(dmach_t channel, dma_t *dma) { void *fiqhandler_start; unsigned int fiqhandler_length; struct pt_regs regs; if (dma->using_sg) BUG(); if (dma->dma_mode == DMA_MODE_READ) { extern unsigned char floppy_fiqin_start, floppy_fiqin_end; fiqhandler_start = &floppy_fiqin_start; fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; } else { extern unsigned char floppy_fiqout_start, floppy_fiqout_end; fiqhandler_start = &floppy_fiqout_start; fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; } regs.ARM_r9 = dma->buf.length; regs.ARM_r10 = (unsigned long)dma->buf.__address; regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE; if (claim_fiq(&fh)) { printk("floppydma: couldn't claim FIQ.\n"); return; } set_fiq_handler(fiqhandler_start, fiqhandler_length); set_fiq_regs(®s); enable_fiq(dma->dma_irq); }
static void a5k_floppy_enable_dma(dmach_t channel, dma_t *dma) { struct pt_regs regs; void *fiqhandler_start; unsigned int fiqhandler_length; extern void floppy_fiqsetup(unsigned long len, unsigned long addr, unsigned long port); if (dma->dma_mode == DMA_MODE_READ) { extern unsigned char floppy_fiqin_start, floppy_fiqin_end; fiqhandler_start = &floppy_fiqin_start; fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start; } else { extern unsigned char floppy_fiqout_start, floppy_fiqout_end; fiqhandler_start = &floppy_fiqout_start; fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start; } if (claim_fiq(&fh)) { printk("floppydma: couldn't claim FIQ.\n"); return; } memcpy((void *)0x1c, fiqhandler_start, fiqhandler_length); regs.ARM_r9 = dma->buf.length; regs.ARM_r10 = (unsigned long)dma->buf.address; regs.ARM_fp = FLOPPYDMA_BASE; set_fiq_regs(®s); enable_fiq(dma->dma_irq); }
/* Default reacquire function * - we always relinquish FIQ control * - we always reacquire FIQ control */ static int fiq_def_op(void *ref, int relinquish) { if (!relinquish) { /* Restore default handler and registers */ local_fiq_disable(); set_fiq_regs(&dfl_fiq_regs); set_fiq_handler(&dfl_fiq_insn, sizeof(dfl_fiq_insn)); local_fiq_enable(); /* FIXME: notify irq controller to standard enable FIQs */ } return 0; }
static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct imx_pcm_runtime_data *iprtd = runtime->private_data; struct pt_regs regs; get_fiq_regs(®s); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) regs.ARM_r8 = (iprtd->period * iprtd->periods - 1) << 16; else regs.ARM_r9 = (iprtd->period * iprtd->periods - 1) << 16; set_fiq_regs(®s); return 0; }
/** * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer * @hw: The hardware state. * * Claim the FIQ handler (only one can be active at any one time) and * then setup the correct transfer code for this transfer. * * This call updates all the necessary state information if successful, * so the caller does not need to do anything more than start the transfer * as normal, since the IRQ will have been re-routed to the FIQ handler. */ static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) { struct pt_regs regs; enum spi_fiq_mode mode; struct spi_fiq_code *code; int ret; if (!hw->fiq_claimed) { /* try and claim fiq if we haven't got it, and if not * then return and simply use another transfer method */ ret = claim_fiq(&hw->fiq_handler); if (ret) return; } if (hw->tx && !hw->rx) mode = FIQ_MODE_TX; else if (hw->rx && !hw->tx) mode = FIQ_MODE_RX; else mode = FIQ_MODE_TXRX; regs.uregs[fiq_rspi] = (long)hw->regs; regs.uregs[fiq_rrx] = (long)hw->rx; regs.uregs[fiq_rtx] = (long)hw->tx + 1; regs.uregs[fiq_rcount] = hw->len - 1; regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ; set_fiq_regs(®s); if (hw->fiq_mode != mode) { u32 *ack_ptr; hw->fiq_mode = mode; switch (mode) { case FIQ_MODE_TX: code = &s3c24xx_spi_fiq_tx; break; case FIQ_MODE_RX: code = &s3c24xx_spi_fiq_rx; break; case FIQ_MODE_TXRX: code = &s3c24xx_spi_fiq_txrx; break; default: code = NULL; } BUG_ON(!code); ack_ptr = (u32 *)&code->data[code->ack_offset]; *ack_ptr = ack_bit(hw->irq); set_fiq_handler(&code->data, code->length); } s3c24xx_set_fiq(hw->irq, true); hw->fiq_mode = mode; hw->fiq_inuse = 1; }
static int __init mx1_camera_probe(struct platform_device *pdev) { struct mx1_camera_dev *pcdev; struct resource *res; struct pt_regs regs; struct clk *clk; void __iomem *base; unsigned int irq; int err = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { err = -ENODEV; goto exit; } clk = clk_get(&pdev->dev, "csi_clk"); if (IS_ERR(clk)) { err = PTR_ERR(clk); goto exit; } pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); err = -ENOMEM; goto exit_put_clk; } pcdev->res = res; pcdev->clk = clk; pcdev->pdata = pdev->dev.platform_data; if (pcdev->pdata) pcdev->mclk = pcdev->pdata->mclk_10khz * 10000; if (!pcdev->mclk) { dev_warn(&pdev->dev, "mclk_10khz == 0! Please, fix your platform data. " "Using default 20MHz\n"); pcdev->mclk = 20000000; } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); /* * Request the regions. */ if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) { err = -EBUSY; goto exit_kfree; } base = ioremap(res->start, resource_size(res)); if (!base) { err = -ENOMEM; goto exit_release; } pcdev->irq = irq; pcdev->base = base; /* request dma */ pcdev->dma_chan = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_HIGH); if (pcdev->dma_chan < 0) { dev_err(&pdev->dev, "Can't request DMA for MX1 CSI\n"); err = -EBUSY; goto exit_iounmap; } dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chan); imx_dma_setup_handlers(pcdev->dma_chan, mx1_camera_dma_irq, NULL, pcdev); imx_dma_config_channel(pcdev->dma_chan, IMX_DMA_TYPE_FIFO, IMX_DMA_MEMSIZE_32, MX1_DMA_REQ_CSI_R, 0); /* burst length : 16 words = 64 bytes */ imx_dma_config_burstlen(pcdev->dma_chan, 0); /* request irq */ err = claim_fiq(&fh); if (err) { dev_err(&pdev->dev, "Camera interrupt register failed\n"); goto exit_free_dma; } set_fiq_handler(&mx1_camera_sof_fiq_start, &mx1_camera_sof_fiq_end - &mx1_camera_sof_fiq_start); regs.ARM_r8 = (long)MX1_DMA_DIMR; regs.ARM_r9 = (long)MX1_DMA_CCR(pcdev->dma_chan); regs.ARM_r10 = (long)pcdev->base + CSICR1; regs.ARM_fp = (long)pcdev->base + CSISR; regs.ARM_sp = 1 << pcdev->dma_chan; set_fiq_regs(®s); mxc_set_irq_fiq(irq, 1); enable_fiq(irq); pcdev->soc_host.drv_name = DRIVER_NAME; pcdev->soc_host.ops = &mx1_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; pcdev->soc_host.v4l2_dev.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); if (err) goto exit_free_irq; dev_info(&pdev->dev, "MX1 Camera driver loaded\n"); return 0; exit_free_irq: disable_fiq(irq); mxc_set_irq_fiq(irq, 0); release_fiq(&fh); exit_free_dma: imx_dma_free(pcdev->dma_chan); exit_iounmap: iounmap(base); exit_release: release_mem_region(res->start, resource_size(res)); exit_kfree: kfree(pcdev); exit_put_clk: clk_put(clk); exit: return err; }
void __init ams_delta_init_fiq(void) { void *fiqhandler_start; unsigned int fiqhandler_length; struct pt_regs FIQ_regs; unsigned long val, offset; int i, retval; fiqhandler_start = &qwerty_fiqin_start; fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start; pr_info("Installing fiq handler from %p, length 0x%x\n", fiqhandler_start, fiqhandler_length); retval = claim_fiq(&fh); if (retval) { pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n", retval); return; } retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq, IRQ_TYPE_EDGE_RISING, "deferred_fiq", NULL); if (retval < 0) { pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval); release_fiq(&fh); return; } /* * Since no set_type() method is provided by OMAP irq chip, * switch to edge triggered interrupt type manually. */ offset = IRQ_ILR0_REG_OFFSET + ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4; val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1); omap_writel(val, DEFERRED_FIQ_IH_BASE + offset); set_fiq_handler(fiqhandler_start, fiqhandler_length); /* * Initialise the buffer which is shared * between FIQ mode and IRQ mode */ fiq_buffer[FIQ_GPIO_INT_MASK] = 0; fiq_buffer[FIQ_MASK] = 0; fiq_buffer[FIQ_STATE] = 0; fiq_buffer[FIQ_KEY] = 0; fiq_buffer[FIQ_KEYS_CNT] = 0; fiq_buffer[FIQ_KEYS_HICNT] = 0; fiq_buffer[FIQ_TAIL_OFFSET] = 0; fiq_buffer[FIQ_HEAD_OFFSET] = 0; fiq_buffer[FIQ_BUF_LEN] = 256; fiq_buffer[FIQ_MISSED_KEYS] = 0; fiq_buffer[FIQ_BUFFER_START] = (unsigned int) &fiq_buffer[FIQ_CIRC_BUFF]; for (i = FIQ_CNT_INT_00; i <= FIQ_CNT_INT_15; i++) fiq_buffer[i] = 0; /* * FIQ mode r9 always points to the fiq_buffer, because the FIQ isr * will run in an unpredictable context. The fiq_buffer is the FIQ isr's * only means of communication with the IRQ level and other kernel * context code. */ FIQ_regs.ARM_r9 = (unsigned int)fiq_buffer; set_fiq_regs(&FIQ_regs); pr_info("request_fiq(): fiq_buffer = %p\n", fiq_buffer); /* * Redirect GPIO interrupts to FIQ */ offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4; val = omap_readl(OMAP_IH1_BASE + offset) | 1; omap_writel(val, OMAP_IH1_BASE + offset); }