static void amd_xgbe_phy_remove(struct phy_device *phydev)
{
	struct amd_xgbe_phy_priv *priv = phydev->priv;
	struct device *dev = priv->dev;

	if (priv->an_irq_allocated) {
		devm_free_irq(dev, priv->an_irq, priv);

		flush_workqueue(priv->an_workqueue);
		destroy_workqueue(priv->an_workqueue);
	}

	/* Release resources */
	devm_iounmap(dev, priv->sir1_regs);
	devm_release_mem_region(dev, priv->sir1_res->start,
				resource_size(priv->sir1_res));

	devm_iounmap(dev, priv->sir0_regs);
	devm_release_mem_region(dev, priv->sir0_res->start,
				resource_size(priv->sir0_res));

	devm_iounmap(dev, priv->rxtx_regs);
	devm_release_mem_region(dev, priv->rxtx_res->start,
				resource_size(priv->rxtx_res));

	devm_kfree(dev, priv);
}
static void ivp_release_iores(struct platform_device *plat_devp)
{
    struct ivp_device *pdev = 
         (struct ivp_device *) platform_get_drvdata(plat_devp);
    ivp_info("enter");
    if(NULL == pdev){
        ivp_err("%s: pdev is null", __func__);
        return;
    }

    if (NULL != pdev->io_res.gic_base_addr) {
        devm_iounmap(&plat_devp->dev, pdev->io_res.gic_base_addr);
        pdev->io_res.gic_base_addr = NULL;
    }

    if (NULL != pdev->io_res.pericrg_base_addr) {
        devm_iounmap(&plat_devp->dev, pdev->io_res.pericrg_base_addr);
        pdev->io_res.pericrg_base_addr = NULL;
    }

    if (NULL != pdev->io_res.pctrl_base_addr) {
        devm_iounmap(&plat_devp->dev, pdev->io_res.pctrl_base_addr);
        pdev->io_res.pctrl_base_addr = NULL;
    }

    if (NULL != pdev->io_res.cfg_base_addr) {
        devm_iounmap(&plat_devp->dev, pdev->io_res.cfg_base_addr);
        pdev->io_res.cfg_base_addr = NULL;
    }
}
int cp_watchdog_remove(struct platform_device *pdev)
{
#ifdef REQUEST_MEM_REGION
	struct resource *res = NULL;
#endif

	if (cp_watchdog->ops->exit)
		cp_watchdog->ops->exit(cp_watchdog->data);

	if (cp_wdt_type_cp_timer != cp_watchdog->type) {
		devm_iounmap(&pdev->dev, cp_watchdog->base);
#ifdef REQUEST_MEM_REGION
		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
		if (res)
			devm_release_mem_region(&pdev->dev, res->start,
				resource_size(res));
#endif
	}
	devm_kfree(&pdev->dev, cp_watchdog->data);
	devm_kfree(&pdev->dev, cp_watchdog);
	cp_watchdog = NULL;
	platform_set_drvdata(pdev, NULL);

	return 0;
}
Exemple #4
0
static void meson_uart_release_port(struct uart_port *port)
{
    int size = meson_uart_res_size(port);

    if (port->flags & UPF_IOREMAP) {
        devm_release_mem_region(port->dev, port->mapbase, size);
        devm_iounmap(port->dev, port->membase);
        port->membase = NULL;
    }
}
Exemple #5
0
static void
scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count)
{
	int i;

	for (i = 0; i < count && pchan->chan; i++, pchan++) {
		mbox_free_channel(pchan->chan);
		devm_kfree(dev, pchan->xfers);
		devm_iounmap(dev, pchan->rx_payload);
	}
}
static int sirfsoc_uart_remove(struct platform_device *pdev)
{
	struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
	struct uart_port *port = &sirfport->port;
	platform_set_drvdata(pdev, NULL);
	if (sirfport->hw_flow_ctrl)
		pinctrl_put(sirfport->p);
	devm_iounmap(&pdev->dev, port->membase);
	uart_remove_one_port(&sirfsoc_uart_drv, port);
	return 0;
}
static int mmp_apical_remove(struct platform_device *pdev)
{
	struct mmp_apical *apical = platform_get_drvdata(pdev);
	struct resource *res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	struct resource *res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);

	if (res0 == NULL || res1 == NULL) {
		dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
		return -ENOENT;
	}

	apical_dbg_uninit(apical->dev);
	devm_iounmap(apical->dev, apical->lcd_reg_base);
	devm_iounmap(apical->dev, apical->reg_base);
	devm_release_mem_region(apical->dev, res0->start,
			resource_size(res0));
	devm_kfree(apical->dev, apical);
	platform_set_drvdata(pdev, NULL);

	return 0;
}
Exemple #8
0
static void msm_sata_phy_deinit(struct device *dev)
{
	/* Synopsys PHY specific Power Down Sequence */
	u32 reg;
	struct msm_sata_hba *hba = dev_get_drvdata(dev);

	/* Setting PHY_RESET to 1 */
	reg = readl_relaxed(hba->phy_base + SATA_PHY_P0_PARAM4);
	reg = reg | 0x01;
	writel_relaxed(reg, hba->phy_base + SATA_PHY_P0_PARAM4);

	devm_iounmap(dev, hba->phy_base);
}
static int hisi_regulator_hi3630_core_remove(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct hisi_regulator_hi3630_core *pmic = platform_get_drvdata(pdev);

	devm_iounmap(dev, pmic->regs);
	devm_release_mem_region(dev, pmic->res->start,
				resource_size(pmic->res));
	devm_kfree(dev, pmic);
	platform_set_drvdata(pdev, NULL);

	return 0;
}
static void msm_sata_phy_deinit(struct device *dev)
{
	struct msm_sata_hba *hba = dev_get_drvdata(dev);

	
	writel_relaxed(0xF8, hba->phy_base + SATA_PHY_POW_DWN_CTRL0);
	writel_relaxed(0xFE, hba->phy_base + SATA_PHY_POW_DWN_CTRL1);

	
	writel_relaxed(0x00, hba->phy_base + UNIPHY_PLL_GLB_CFG);
	mb();

	devm_iounmap(dev, hba->phy_base);
}
Exemple #11
0
/**
 * Destroys an MC I/O object
 *
 * @mc_io: MC I/O object to destroy
 */
void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
{
	struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;

	if (dpmcp_dev)
		fsl_mc_io_unset_dpmcp(mc_io);

	devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
	devm_release_mem_region(mc_io->dev,
				mc_io->portal_phys_addr,
				mc_io->portal_size);

	mc_io->portal_virt_addr = NULL;
	devm_kfree(mc_io->dev, mc_io);
}
Exemple #12
0
static int bcm63xx_spi_remove(struct platform_device *pdev)
{
   bcm_mpi_dev_data_t *bs = platform_get_drvdata(pdev);

   bcm_pr_debug("%s()\n", __func__);

   bcm_assert(1 == bs->ref_count);

   /* reset spi block */
   bcm_spi_writeb(bs, 0, SPI_INT_MASK);

   /* HW shutdown */
   clk_disable(bs->clk);
   devm_free_irq(&(pdev->dev), bs->irq, bs);
   devm_iounmap(&(pdev->dev), bs->regs);
   devm_release_mem_region(&(pdev->dev), bs->res_start, bs->res_size);
   platform_set_drvdata(pdev, NULL);
   clk_put(bs->clk);
   bs->ref_count = 0;

   return (0);
}
static int hi6401_irq_remove(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct hi6401_irq *irq = platform_get_drvdata(pdev);

	if(irq->pll_delay_wq) {
		cancel_delayed_work(&irq->pll_delay_work);
		flush_workqueue(irq->pll_delay_wq);
		destroy_workqueue(irq->pll_delay_wq);
	}
	if(irq->hi6401_irq_delay_wq) {
		cancel_delayed_work(&irq->hi6401_irq_delay_work);
		flush_workqueue(irq->hi6401_irq_delay_wq);
		destroy_workqueue(irq->hi6401_irq_delay_wq);
	}
	free_irq(irq->irq, irq);
	gpio_free(irq->gpio);

	clk_disable_unprepare(irq->pmu_audio_clk);
	devm_clk_put(dev, irq->pmu_audio_clk);
	clk_disable_unprepare(irq->codec_ssi_clk);
	devm_clk_put(dev, irq->codec_ssi_clk);
	codec_ssi_iomux_idle(irq->pctrl);
	pinctrl_put(irq->pctrl);

	devm_iounmap(dev, irq->reg_base_addr);
	devm_release_mem_region(dev, irq->res->start,
				resource_size(irq->res));
	devm_kfree(dev, irq);
	platform_set_drvdata(pdev, NULL);
	if (g_dump_buf)
	{
		kfree(g_dump_buf);
		g_dump_buf = NULL;
	}

	return 0;
}
Exemple #14
0
static void ath10k_ahb_resource_deinit(struct ath10k *ar)
{
	struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
	struct device *dev;

	dev = &ar_ahb->pdev->dev;

	if (ar_ahb->mem)
		devm_iounmap(dev, ar_ahb->mem);

	if (ar_ahb->gcc_mem)
		iounmap(ar_ahb->gcc_mem);

	if (ar_ahb->tcsr_mem)
		iounmap(ar_ahb->tcsr_mem);

	ar_ahb->mem = NULL;
	ar_ahb->gcc_mem = NULL;
	ar_ahb->tcsr_mem = NULL;

	ath10k_ahb_clock_deinit(ar);
	ath10k_ahb_rst_ctrl_deinit(ar);
}
static int __init exynos_sata_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct ata_port_info pi = ahci_port_info;
	const struct ata_port_info *ppi[] = { &pi, NULL };
	struct ahci_host_priv *hpriv;
	struct exynos_sata *sata;
	struct ata_host *host;
	struct resource *mem;
	int n_ports, i, ret;

	sata = devm_kzalloc(dev, sizeof(*sata), GFP_KERNEL);
	if (!sata) {
		dev_err(dev, "can't alloc sata\n");
		return -EINVAL;
	}

	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
	if (!hpriv) {
		dev_err(dev, "can't alloc ahci_host_priv\n");
		ret = -ENOMEM;
		goto err1;
	}

	hpriv->flags |= (unsigned long)pi.private_data;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(dev, "no mmio space\n");
		ret = -EINVAL;
		goto err2;
	}

	sata->irq = platform_get_irq(pdev, 0);
	if (sata->irq <= 0) {
		dev_err(dev, "no irq\n");
		ret = -EINVAL;
		goto err2;
	}

	hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
	if (!hpriv->mmio) {
		dev_err(dev, "can't map %pR\n", mem);
		ret = -ENOMEM;
		goto err2;
	}

	exynos_sata_parse_dt(dev->of_node, sata);
	if (!sata->freq) {
		dev_err(dev, "can't determine sata frequency \n");
		ret = -ENOMEM;
		goto err2;
	}

	sata->sclk = devm_clk_get(dev, "sclk_sata");
	if (IS_ERR(sata->sclk)) {
		dev_err(dev, "failed to get sclk_sata\n");
		ret = PTR_ERR(sata->sclk);
		goto err3;
	}
	clk_enable(sata->sclk);

	clk_set_rate(sata->sclk, sata->freq * MHZ);

	sata->clk = devm_clk_get(dev, "sata");
	if (IS_ERR(sata->clk)) {
		dev_err(dev, "failed to get sata clock\n");
		ret = PTR_ERR(sata->clk);
		goto err4;
	}
	clk_enable(sata->clk);

	/*  Get a gen 3 PHY controller */

	sata->phy = sata_get_phy(SATA_PHY_GENERATION3);
	if (!sata->phy) {
		dev_err(dev, "failed to get sata phy\n");
		ret = -EPROBE_DEFER;
		goto err5;
	}

	/* Initialize the controller */

	ret = sata_init_phy(sata->phy);
	if (ret < 0) {
		dev_err(dev, "failed to initialize sata phy\n");
		goto err6;
	}

	ahci_save_initial_config(dev, hpriv, 0, 0);

	/* prepare host */
	if (hpriv->cap & HOST_CAP_NCQ)
		pi.flags |= ATA_FLAG_NCQ;

	if (hpriv->cap & HOST_CAP_PMP)
		pi.flags |= ATA_FLAG_PMP;

	ahci_set_em_messages(hpriv, &pi);

	/* CAP.NP sometimes indicate the index of the last enabled
	 * port, at other times, that of the last possible port, so
	 * determining the maximum port number requires looking at
	 * both CAP.NP and port_map.
	 */
	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));

	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
	if (!host) {
		ret = -ENOMEM;
		goto err7;
	}

	host->private_data = hpriv;

	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
		host->flags |= ATA_HOST_PARALLEL_SCAN;
	else
		pr_info(KERN_INFO
		       "ahci: SSS flag set, parallel bus scan disabled\n");

	if (pi.flags & ATA_FLAG_EM)
		ahci_reset_em(host);

	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];

		ata_port_desc(ap, "mmio %pR", mem);
		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);

		/* set enclosure management message type */
		if (ap->flags & ATA_FLAG_EM)
			ap->em_message_type = hpriv->em_msg_type;

		/* disabled/not-implemented port */
		if (!(hpriv->port_map & (1 << i)))
			ap->ops = &ata_dummy_port_ops;
	}

	ret = ahci_reset_controller(host);
	if (ret)
		goto err7;

	ahci_init_controller(host);
	ahci_print_info(host, "platform");

	ret = ata_host_activate(host, sata->irq, ahci_interrupt, IRQF_SHARED,
				&ahci_platform_sht);
	if (ret)
		goto err7;

	platform_set_drvdata(pdev, sata);

	return 0;

 err7:
	sata_shutdown_phy(sata->phy);

 err6:
	sata_put_phy(sata->phy);

 err5:
	clk_disable(sata->clk);
	devm_clk_put(dev, sata->clk);

 err4:
	clk_disable(sata->sclk);
	devm_clk_put(dev, sata->sclk);

 err3:
	devm_iounmap(dev, hpriv->mmio);

 err2:
	devm_kfree(dev, hpriv);

 err1:
	devm_kfree(dev, sata);

	return ret;
}
Exemple #16
0
static int bcm63xx_spi_probe(struct platform_device *pdev)
{
   struct device *dev = &(pdev->dev);
   int ret;
   struct resource *r;
   int irq;
   bcm_mpi_dev_data_t *bs;

   bcm_pr_debug("%s()\n", __func__);

   if (
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
       (!pdev->id_entry->driver_data)
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */
       (!BCMCPU_IS_6358())
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */
      ) {
      return -EINVAL;
   }

   bs = &(bcm_mpi_dev_data);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
   bcm_mpi_dev_data_init(bs, (const unsigned long *)pdev->id_entry->driver_data);
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */
   bcm_mpi_dev_data_init(bs, bcm6358_spi_reg_offsets);
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */
   bs->ref_count = 1;
   bs->pdev = pdev;

   r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   if (!r) {
      dev_err(dev, "no iomem\n");
      ret = -ENXIO;
      goto fail_get_res;
   }
   bs->res_start = r->start;
   bs->res_size = resource_size(r);

   irq = platform_get_irq(pdev, 0);
   if (irq < 0) {
      dev_err(dev, "no irq\n");
      ret = -ENXIO;
      goto fail_get_irq;
   }
   bs->irq = irq;

   bs->clk = devm_clk_get(dev, "spi");
   if (IS_ERR(bs->clk)) {
      dev_err(dev, "no clock for device\n");
      ret = PTR_ERR(bs->clk);
      goto fail_get_clk;
   }

   platform_set_drvdata(pdev, bs);

   if (!devm_request_mem_region(&(pdev->dev), bs->res_start, bs->res_size, driver_name)) {
      dev_err(dev, "iomem request failed\n");
      ret = -ENXIO;
      goto fail_req_reg;
   }

   bs->regs = devm_ioremap_nocache(&(pdev->dev), bs->res_start, bs->res_size);
   if (!bs->regs) {
      dev_err(dev, "unable to ioremap regs\n");
      ret = -ENOMEM;
      goto fail_io_remap;
   }
   bs->tx_io = (u8 *)(bs->regs + bs->reg_offsets[SPI_MSG_DATA]);
   bs->rx_io = (const u8 *)(bs->regs + bs->reg_offsets[SPI_RX_DATA]);

   ret = devm_request_irq(&(pdev->dev), irq, bcm63xx_spi_interrupt, 0,
                     pdev->name, bs);
   if (ret) {
      dev_err(dev, "unable to request irq\n");
      goto fail_req_irq;
   }

   /* Initialize hardware */
   clk_enable(bs->clk);
   /* Read interupts and clear them immediately */
   bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
   bcm_spi_writeb(bs, 0, SPI_INT_MASK);

   bcm_mpi_set_clk_cfg(bs, SPI_CLK_0_391MHZ);
   bcm_mpi_set_fill_byte(bs, 0);

   dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n",
       bs->res_start, bs->irq, bs->fifo_size);

   return 0;

   clk_disable(bs->clk);
   devm_free_irq(&(pdev->dev), irq, bs);
fail_req_irq:
   devm_iounmap(&(pdev->dev), bs->regs);
fail_io_remap:
   devm_release_mem_region(&(pdev->dev), bs->res_start, bs->res_size);
fail_req_reg:
   platform_set_drvdata(pdev, NULL);
   clk_put(bs->clk);
fail_get_clk:
fail_get_irq:
fail_get_res:
   bs->ref_count = 0;
   return ret;
}
Exemple #17
0
static int ath10k_ahb_resource_init(struct ath10k *ar)
{
	struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
	struct platform_device *pdev;
	struct resource *res;
	int ret;

	pdev = ar_ahb->pdev;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		ath10k_err(ar, "failed to get memory resource\n");
		ret = -ENXIO;
		goto out;
	}

	ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(ar_ahb->mem)) {
		ath10k_err(ar, "mem ioremap error\n");
		ret = PTR_ERR(ar_ahb->mem);
		goto out;
	}

	ar_ahb->mem_len = resource_size(res);

	ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
					  ATH10K_GCC_REG_SIZE);
	if (!ar_ahb->gcc_mem) {
		ath10k_err(ar, "gcc mem ioremap error\n");
		ret = -ENOMEM;
		goto err_mem_unmap;
	}

	ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
					   ATH10K_TCSR_REG_SIZE);
	if (!ar_ahb->tcsr_mem) {
		ath10k_err(ar, "tcsr mem ioremap error\n");
		ret = -ENOMEM;
		goto err_gcc_mem_unmap;
	}

	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret) {
		ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret);
		goto err_tcsr_mem_unmap;
	}

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret) {
		ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n",
			   ret);
		goto err_tcsr_mem_unmap;
	}

	ret = ath10k_ahb_clock_init(ar);
	if (ret)
		goto err_tcsr_mem_unmap;

	ret = ath10k_ahb_rst_ctrl_init(ar);
	if (ret)
		goto err_clock_deinit;

	ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
	if (ar_ahb->irq < 0) {
		ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
		ret = ar_ahb->irq;
		goto err_clock_deinit;
	}

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
		   ar_ahb->mem, ar_ahb->mem_len,
		   ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
	return 0;

err_clock_deinit:
	ath10k_ahb_clock_deinit(ar);

err_tcsr_mem_unmap:
	iounmap(ar_ahb->tcsr_mem);

err_gcc_mem_unmap:
	ar_ahb->tcsr_mem = NULL;
	iounmap(ar_ahb->gcc_mem);

err_mem_unmap:
	ar_ahb->gcc_mem = NULL;
	devm_iounmap(&pdev->dev, ar_ahb->mem);

out:
	ar_ahb->mem = NULL;
	return ret;
}
static int mmphw_probe(struct platform_device *pdev)
{
	struct mmp_mach_plat_info *mi;
	struct resource *res;
	int ret, i, size, irq;
	struct mmphw_path_plat *path_plat;
	struct mmphw_ctrl *ctrl = NULL;

	/* get resources from platform data */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
		ret = -ENOENT;
		goto failed;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "%s: no IRQ defined\n", __func__);
		ret = -ENOENT;
		goto failed;
	}

	/* get configs from platform data */
	mi = pdev->dev.platform_data;
	if (mi == NULL || !mi->path_num || !mi->paths) {
		dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
		ret = -EINVAL;
		goto failed;
	}

	/* allocate */
	size = sizeof(struct mmphw_ctrl) + sizeof(struct mmphw_path_plat) *
	       mi->path_num;
	ctrl = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
	if (!ctrl) {
		ret = -ENOMEM;
		goto failed;
	}

	ctrl->name = mi->name;
	ctrl->path_num = mi->path_num;
	ctrl->dev = &pdev->dev;
	ctrl->irq = irq;
	platform_set_drvdata(pdev, ctrl);
	mutex_init(&ctrl->access_ok);

	/* map registers.*/
	if (!devm_request_mem_region(ctrl->dev, res->start,
			resource_size(res), ctrl->name)) {
		dev_err(ctrl->dev,
			"can't request region for resource %pR\n", res);
		ret = -EINVAL;
		goto failed;
	}

	ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
			res->start, resource_size(res));
	if (ctrl->reg_base == NULL) {
		dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
			res->start, res->end);
		ret = -ENOMEM;
		goto failed;
	}

	/* request irq */
	ret = devm_request_irq(ctrl->dev, ctrl->irq, ctrl_handle_irq,
		IRQF_SHARED, "lcd_controller", ctrl);
	if (ret < 0) {
		dev_err(ctrl->dev, "%s unable to request IRQ %d\n",
				__func__, ctrl->irq);
		ret = -ENXIO;
		goto failed;
	}

	/* get clock */
	ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
	if (IS_ERR(ctrl->clk)) {
		dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name);
		ret = -ENOENT;
		goto failed_get_clk;
	}
	clk_prepare_enable(ctrl->clk);

	/* init global regs */
	ctrl_set_default(ctrl);

	/* init pathes from machine info and register them */
	for (i = 0; i < ctrl->path_num; i++) {
		/* get from config and machine info */
		path_plat = &ctrl->path_plats[i];
		path_plat->id = i;
		path_plat->ctrl = ctrl;

		/* path init */
		if (!path_init(path_plat, &mi->paths[i])) {
			ret = -EINVAL;
			goto failed_path_init;
		}
	}

#ifdef CONFIG_MMP_DISP_SPI
	ret = lcd_spi_register(ctrl);
	if (ret < 0)
		goto failed_path_init;
#endif

	dev_info(ctrl->dev, "device init done\n");

	return 0;

failed_path_init:
	for (i = 0; i < ctrl->path_num; i++) {
		path_plat = &ctrl->path_plats[i];
		path_deinit(path_plat);
	}

	if (ctrl->clk) {
		devm_clk_put(ctrl->dev, ctrl->clk);
		clk_disable_unprepare(ctrl->clk);
	}
failed_get_clk:
	devm_free_irq(ctrl->dev, ctrl->irq, ctrl);
failed:
	if (ctrl) {
		if (ctrl->reg_base)
			devm_iounmap(ctrl->dev, ctrl->reg_base);
		devm_release_mem_region(ctrl->dev, res->start,
				resource_size(res));
		devm_kfree(ctrl->dev, ctrl);
	}

	platform_set_drvdata(pdev, NULL);
	dev_err(&pdev->dev, "device init failed\n");

	return ret;
}
static int amd_xgbe_phy_probe(struct phy_device *phydev)
{
	struct amd_xgbe_phy_priv *priv;
	struct platform_device *phy_pdev;
	struct device *dev, *phy_dev;
	unsigned int phy_resnum, phy_irqnum;
	int ret;

	if (!phydev->bus || !phydev->bus->parent)
		return -EINVAL;

	dev = phydev->bus->parent;

	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->pdev = to_platform_device(dev);
	priv->adev = ACPI_COMPANION(dev);
	priv->dev = dev;
	priv->phydev = phydev;
	mutex_init(&priv->an_mutex);
	INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
	INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);

	if (!priv->adev || acpi_disabled) {
		struct device_node *bus_node;
		struct device_node *phy_node;

		bus_node = priv->dev->of_node;
		phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
		if (!phy_node) {
			dev_err(dev, "unable to parse phy-handle\n");
			ret = -EINVAL;
			goto err_priv;
		}

		phy_pdev = of_find_device_by_node(phy_node);
		of_node_put(phy_node);

		if (!phy_pdev) {
			dev_err(dev, "unable to obtain phy device\n");
			ret = -EINVAL;
			goto err_priv;
		}

		phy_resnum = 0;
		phy_irqnum = 0;
	} else {
		/* In ACPI, the XGBE and PHY resources are the grouped
		 * together with the PHY resources at the end
		 */
		phy_pdev = priv->pdev;
		phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
							 IORESOURCE_MEM) - 3;
		phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
							 IORESOURCE_IRQ) - 1;
	}
	phy_dev = &phy_pdev->dev;

	/* Get the device mmio areas */
	priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
					       phy_resnum++);
	priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
	if (IS_ERR(priv->rxtx_regs)) {
		dev_err(dev, "rxtx ioremap failed\n");
		ret = PTR_ERR(priv->rxtx_regs);
		goto err_put;
	}

	priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
					       phy_resnum++);
	priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
	if (IS_ERR(priv->sir0_regs)) {
		dev_err(dev, "sir0 ioremap failed\n");
		ret = PTR_ERR(priv->sir0_regs);
		goto err_rxtx;
	}

	priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
					       phy_resnum++);
	priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
	if (IS_ERR(priv->sir1_regs)) {
		dev_err(dev, "sir1 ioremap failed\n");
		ret = PTR_ERR(priv->sir1_regs);
		goto err_sir0;
	}

	/* Get the auto-negotiation interrupt */
	ret = platform_get_irq(phy_pdev, phy_irqnum);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq failed\n");
		goto err_sir1;
	}
	priv->an_irq = ret;

	/* Get the device speed set property */
	ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
				       &priv->speed_set);
	if (ret) {
		dev_err(dev, "invalid %s property\n",
			XGBE_PHY_SPEEDSET_PROPERTY);
		goto err_sir1;
	}

	switch (priv->speed_set) {
	case AMD_XGBE_PHY_SPEEDSET_1000_10000:
	case AMD_XGBE_PHY_SPEEDSET_2500_10000:
		break;
	default:
		dev_err(dev, "invalid %s property\n",
			XGBE_PHY_SPEEDSET_PROPERTY);
		ret = -EINVAL;
		goto err_sir1;
	}

	if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_PHY_BLWC_PROPERTY,
						     priv->serdes_blwc,
						     XGBE_PHY_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_PHY_BLWC_PROPERTY);
			goto err_sir1;
		}
	} else {
		memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
		       sizeof(priv->serdes_blwc));
	}

	if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_PHY_CDR_RATE_PROPERTY,
						     priv->serdes_cdr_rate,
						     XGBE_PHY_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_PHY_CDR_RATE_PROPERTY);
			goto err_sir1;
		}
	} else {
		memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
		       sizeof(priv->serdes_cdr_rate));
	}

	if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_PHY_PQ_SKEW_PROPERTY,
						     priv->serdes_pq_skew,
						     XGBE_PHY_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_PHY_PQ_SKEW_PROPERTY);
			goto err_sir1;
		}
	} else {
		memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
		       sizeof(priv->serdes_pq_skew));
	}

	if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_PHY_TX_AMP_PROPERTY,
						     priv->serdes_tx_amp,
						     XGBE_PHY_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_PHY_TX_AMP_PROPERTY);
			goto err_sir1;
		}
	} else {
		memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
		       sizeof(priv->serdes_tx_amp));
	}

	phydev->priv = priv;

	if (!priv->adev || acpi_disabled)
		platform_device_put(phy_pdev);

	return 0;

err_sir1:
	devm_iounmap(dev, priv->sir1_regs);
	devm_release_mem_region(dev, priv->sir1_res->start,
				resource_size(priv->sir1_res));

err_sir0:
	devm_iounmap(dev, priv->sir0_regs);
	devm_release_mem_region(dev, priv->sir0_res->start,
				resource_size(priv->sir0_res));

err_rxtx:
	devm_iounmap(dev, priv->rxtx_regs);
	devm_release_mem_region(dev, priv->rxtx_res->start,
				resource_size(priv->rxtx_res));

err_put:
	if (!priv->adev || acpi_disabled)
		platform_device_put(phy_pdev);

err_priv:
	devm_kfree(dev, priv);

	return ret;
}
static int mmp_apical_probe(struct platform_device *pdev)
{
	struct mmp_mach_apical_info *mi;
	struct mmp_mach_apical_info dt_mi;
	struct device_node *np = pdev->dev.of_node;
	struct resource *res0, *res1;
	int ret = 0;

	/* get resources from platform data */
	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (res0 == NULL || res1 == NULL) {
		dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
		return -ENOENT;
	}

	if (IS_ENABLED(CONFIG_OF)) {
		if (of_property_read_u32(np, "marvell,apical-num",
					&dt_mi.apical_channel_num)) {
			dev_err(&pdev->dev, "%s: apical get num fail\n",
					__func__);
			return -EINVAL;
		}
		mi = &dt_mi;
	} else {
		/* get configs from platform data */
		mi = pdev->dev.platform_data;
		if (mi == NULL) {
			dev_err(&pdev->dev, "%s: no platform data defined\n",
					__func__);
			return -EINVAL;
		}
	}

	apical = devm_kzalloc(&pdev->dev, sizeof(struct mmp_apical) +
			sizeof(struct mmp_apical_info) * mi->apical_channel_num,
			GFP_KERNEL);
	if (apical == NULL) {
		dev_err(&pdev->dev, "apical alloc fail\n");
		return -ENOMEM;
	}
	apical_init(mi);
	apical->dev = &pdev->dev;

	/* map registers.*/
	if (!devm_request_mem_region(apical->dev, res0->start,
			resource_size(res0), apical->name)) {
		dev_err(apical->dev,
			"can't request region for resource %pR\n", res0);
		ret = -EINVAL;
		goto mem_fail;
	}

	apical->reg_base = devm_ioremap_nocache(apical->dev,
			res0->start, resource_size(res0));
	if (apical->reg_base == NULL) {
		dev_err(apical->dev, "%s: res0%lx - %lx map failed\n", __func__,
			(unsigned long)res0->start, (unsigned long)res0->end);
		ret = -ENOMEM;
		goto ioremap_fail;
	}

	apical->lcd_reg_base = devm_ioremap_nocache(apical->dev,
			res1->start, resource_size(res1));
	if (apical->lcd_reg_base == NULL) {
		dev_err(apical->dev, "%s: res1%lx - %lx map failed\n", __func__,
			(unsigned long)res1->start, (unsigned long)res1->end);
		ret = -ENOMEM;
		goto ioremap1_fail;
	}
	platform_set_drvdata(pdev, apical);
	ret = apical_dbg_init(apical->dev);
	if (ret < 0) {
		dev_err(apical->dev, "%s: Failed to register apical dbg interface\n", __func__);
		goto ioremap1_fail;
	}

	dev_info(&pdev->dev, "apical probe succeed\n");

	return 0;

ioremap1_fail:
	devm_iounmap(&pdev->dev, apical->reg_base);
ioremap_fail:
	devm_release_mem_region(&pdev->dev, res0->start,
			resource_size(res0));
mem_fail:
	devm_kfree(&pdev->dev, apical);
	dev_err(&pdev->dev, "apical device init failed\n");

	return ret;
}
static int __init sata_phy_probe(struct platform_device *pdev)
{
	struct exynos_sata_phy *sataphy;
	struct sata_phy *phy;
	struct resource *res;
	struct device *dev = &pdev->dev;
	int ret = 0;

	phy = kzalloc(sizeof(struct sata_phy), GFP_KERNEL);
	if (!phy) {
		dev_err(&pdev->dev, "failed to allocate memory\n");
		ret = -ENOMEM;
		goto out;
	}

	sataphy = kzalloc(sizeof(struct exynos_sata_phy), GFP_KERNEL);
	if (!sataphy) {
		dev_err(dev, "failed to allocate memory\n");
		ret = -ENOMEM;
		goto err0;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(dev, "Could not find IO resource\n");
		ret = -EINVAL;
		goto err1;
	}

	sataphy->mem = devm_request_mem_region(dev, res->start,
					resource_size(res), pdev->name);
	if (!sataphy->mem) {
		dev_err(dev, "Could not request IO resource\n");
		ret = -EINVAL;
		goto err1;
	}

	sataphy->mmio =
	    devm_ioremap(dev, res->start, resource_size(res));
	if (!sataphy->mmio) {
		dev_err(dev, "failed to remap IO\n");
		ret = -ENOMEM;
		goto err2;
	}

	sataphy->clk = devm_clk_get(dev, "sata-phy");
	if (IS_ERR(sataphy->clk)) {
		dev_err(dev, "failed to get clk for PHY\n");
		ret = PTR_ERR(sataphy->clk);
		goto err3;
	}

	phy->init = sataphy_init;
	phy->shutdown = sataphy_shutdown;
	phy->priv_data = (void *)sataphy;
	phy->dev = dev;

	ret = sata_add_phy(phy, SATA_PHY_GENERATION3);
	if (ret < 0)
		goto err4;

	ret = i2c_add_driver(&sataphy_i2c_driver);
	if (ret < 0)
		goto err5;

	platform_set_drvdata(pdev, phy);

	return ret;

 err5:
	sata_remove_phy(phy);

 err4:
	clk_disable(sataphy->clk);
	devm_clk_put(dev, sataphy->clk);

 err3:
	devm_iounmap(dev, sataphy->mmio);

 err2:
	devm_release_mem_region(dev, res->start, resource_size(res));

 err1:
	kfree(sataphy);

 err0:
	kfree(phy);

 out:
	return ret;
}
int sirfsoc_uart_probe(struct platform_device *pdev)
{
	struct sirfsoc_uart_port *sirfport;
	struct uart_port *port;
	struct resource *res;
	int ret;

	if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
		dev_err(&pdev->dev,
			"Unable to find cell-index in uart node.\n");
		ret = -EFAULT;
		goto err;
	}

	sirfport = &sirfsoc_uart_ports[pdev->id];
	port = &sirfport->port;
	port->dev = &pdev->dev;
	port->private_data = sirfport;

	if (of_find_property(pdev->dev.of_node, "hw_flow_ctrl", NULL))
		sirfport->hw_flow_ctrl = 1;

	if (of_property_read_u32(pdev->dev.of_node,
			"fifosize",
			&port->fifosize)) {
		dev_err(&pdev->dev,
			"Unable to find fifosize in uart node.\n");
		ret = -EFAULT;
		goto err;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Insufficient resources.\n");
		ret = -EFAULT;
		goto err;
	}
	port->mapbase = res->start;
	port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
	if (!port->membase) {
		dev_err(&pdev->dev, "Cannot remap resource.\n");
		ret = -ENOMEM;
		goto err;
	}
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "Insufficient resources.\n");
		ret = -EFAULT;
		goto irq_err;
	}
	port->irq = res->start;

	if (sirfport->hw_flow_ctrl) {
		sirfport->p = pinctrl_get_select_default(&pdev->dev);
		ret = IS_ERR(sirfport->p);
		if (ret)
			goto pin_err;
	}

	port->ops = &sirfsoc_uart_ops;
	spin_lock_init(&port->lock);

	platform_set_drvdata(pdev, sirfport);
	ret = uart_add_one_port(&sirfsoc_uart_drv, port);
	if (ret != 0) {
		dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
		goto port_err;
	}

	return 0;

port_err:
	platform_set_drvdata(pdev, NULL);
	if (sirfport->hw_flow_ctrl)
		pinctrl_put(sirfport->p);
pin_err:
irq_err:
	devm_iounmap(&pdev->dev, port->membase);
err:
	return ret;
}
static ssize_t hwevent_store_setreg(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf, size_t size)
{
	struct hwevent_drvdata *drvdata = dev_get_drvdata(dev->parent);
	void *hwereg;
	unsigned long long addr;
	unsigned long val;
	int ret, i;

	if (sscanf(buf, "%llx %lx", &addr, &val) != 2)
		return -EINVAL;

	mutex_lock(&drvdata->mutex);
	ret = hwevent_enable(drvdata);
	if (ret) {
		mutex_unlock(&drvdata->mutex);
		return ret;
	}

	for (i = 0; i < drvdata->nr_hmux; i++) {
		if ((addr >= drvdata->hmux[i].start) &&
		    (addr < drvdata->hmux[i].end)) {
			hwereg = devm_ioremap(dev,
					      drvdata->hmux[i].start,
					      drvdata->hmux[i].end -
					      drvdata->hmux[i].start);
			if (!hwereg) {
				dev_err(dev, "unable to map address 0x%llx\n",
					addr);
				ret = -ENOMEM;
				goto err;
			}
			writel_relaxed(val, hwereg + addr -
				       drvdata->hmux[i].start);
			/* Ensure writes to hwevent control registers
			   are completed before unmapping the address
			*/
			mb();
			devm_iounmap(dev, hwereg);
			break;
		}
	}

	if (i == drvdata->nr_hmux) {
		ret = coresight_csr_hwctrl_set(addr, val);
		if (ret) {
			dev_err(dev, "invalid mux control register address\n");
			ret = -EINVAL;
			goto err;
		}
	}

	hwevent_disable(drvdata);
	mutex_unlock(&drvdata->mutex);
	return size;
err:
	hwevent_disable(drvdata);
	mutex_unlock(&drvdata->mutex);
	return ret;
}
int cp_watchdog_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct resource *res;
	int ret = -EINVAL;
	u32 type;

	cp_watchdog = devm_kzalloc(&pdev->dev,
		sizeof(struct cp_watchdog), GFP_KERNEL);

	if (!cp_watchdog)
		return -ENOMEM;

	if (of_property_read_u32(np, "watchdog-type", &type)) {
		dev_err(&pdev->dev, "%s: no watchdog type defined\n", __func__);
		ret = -EINVAL;
		goto freemem;
	}

	dev_info(&pdev->dev, "%s: watchdog type %d\n", __func__, type);

	switch (type) {
	case cp_wdt_type_wdt_timer:

		cp_watchdog->data = devm_kzalloc(&pdev->dev,
			sizeof(struct wdt_timer), GFP_KERNEL);
		if (!cp_watchdog->data) {
			ret = -ENOMEM;
			goto freemem;
		}
		cp_watchdog->ops = &wdt_timer_ops;
		break;

	case cp_wdt_type_soc_timer:

		cp_watchdog->data = devm_kzalloc(&pdev->dev,
			sizeof(struct soc_timer), GFP_KERNEL);
		if (!cp_watchdog->data) {
			ret = -ENOMEM;
			goto freemem;
		}
		cp_watchdog->ops = &soc_timer_ops;

		if (of_property_read_u32(np, "timer-num",
				&cp_watchdog->timer_num)) {
			dev_err(&pdev->dev,
				"%s: no timer num defined\n", __func__);
			ret = -EINVAL;
			goto freemem;
		}
		if (of_property_read_u32(np, "match-num",
				&cp_watchdog->match_num)) {
			dev_err(&pdev->dev,
				"%s: no match num defined\n", __func__);
			ret = -EINVAL;
			goto freemem;
		}
		dev_info(&pdev->dev,
			"%s: timer-num %u, match-num %u\n", __func__,
			cp_watchdog->timer_num, cp_watchdog->match_num);
		break;

	case cp_wdt_type_cp_timer:
		cp_watchdog->data = devm_kzalloc(&pdev->dev,
			sizeof(struct cp_timer), GFP_KERNEL);
		if (!cp_watchdog->data) {
			ret = -ENOMEM;
			goto freemem;
		}

		cp_watchdog->ops = &cp_timer_ops;
		break;

	default:
		dev_err(&pdev->dev, "%s: wrong watchdog type %u\n",
			__func__, type);
		ret = -EINVAL;
		goto freemem;
	}

	cp_watchdog->irq = platform_get_irq(pdev, 0);
	if (cp_watchdog->irq < 0) {
		dev_err(&pdev->dev, "%s: no irq defined\n", __func__);
		ret = -ENXIO;
		goto freemem;
	}

	if (cp_wdt_type_cp_timer != type) {
		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
		if (!res) {
			dev_err(&pdev->dev, "%s: no iomem defined\n", __func__);
			ret = -ENOMEM;
			goto freemem;
		}

#ifdef REQUEST_MEM_REGION
		if (!devm_request_mem_region(&pdev->dev, res->start,
				resource_size(res), "cp-watchdog")) {
			dev_err(&pdev->dev,
				"%s: can't request region for resource %pR\n",
				__func__, res);
			ret = -EINVAL;
			goto freemem;
		}
#endif

		cp_watchdog->base = devm_ioremap_nocache(&pdev->dev,
			res->start, resource_size(res));
		if (!cp_watchdog->base) {
			dev_err(&pdev->dev,
				"%s: map res %lx - %lx failed\n",
				__func__, (unsigned long)res->start,
				(unsigned long)res->end);
			ret = -ENOMEM;
			goto freememreg;
		}
	}
	if (cp_watchdog->ops->init && cp_watchdog->ops->init(cp_watchdog) < 0) {
		pr_err("%s: init watchdog error\n", __func__);
		ret = -ENODEV;
		goto freeiomap;
	}

	cp_watchdog->type = type;
	platform_set_drvdata(pdev, cp_watchdog);

	dev_info(&pdev->dev, "%s: init watchdog success\n", __func__);

	return 0;

freeiomap:
	if (cp_wdt_type_cp_timer != type)
		devm_iounmap(&pdev->dev, cp_watchdog->base);
freememreg:
#ifdef REQUEST_MEM_REGION
	if (cp_wdt_type_cp_timer != type)
		devm_release_mem_region(&pdev->dev, res->start,
			resource_size(res));
#endif
freemem:
	devm_kfree(&pdev->dev, cp_watchdog->data);
	devm_kfree(&pdev->dev, cp_watchdog);
	cp_watchdog = NULL;
	return ret;
}
static int hi6401_irq_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct device_node *np = dev->of_node;
	struct hi6401_irq *irq = NULL;
	enum of_gpio_flags flags;
	unsigned int virq;
	int ret = 0;
	int i;

	irq = devm_kzalloc(dev, sizeof(*irq), GFP_KERNEL);
	if (!irq) {
		dev_err(dev, "cannot allocate hi6401_irq device info\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pdev, irq);

	/* get resources */
	irq->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!irq->res) {
		dev_err(dev, "platform_get_resource err\n");
		goto err_exit;
	}

	if (!devm_request_mem_region(dev, irq->res->start,
				     resource_size(irq->res),
				     pdev->name)) {
		dev_err(dev, "cannot claim register memory\n");
		goto err_exit;
	}

	irq->reg_base_addr = devm_ioremap(dev, irq->res->start,
					  resource_size(irq->res));
	if (!irq->reg_base_addr) {
		dev_err(dev, "cannot map register memory\n");
		goto ioremap_err;
	}


	/* get pinctrl */
	irq->pctrl = devm_pinctrl_get(dev);
	if (IS_ERR(irq->pctrl)) {
		dev_err(dev, "could not get pinctrl\n");
		goto codec_ssi_get_err;
	}
	ret = codec_ssi_iomux_default(irq->pctrl);
	if (0 != ret)
		goto codec_ssi_iomux_err;

	/* get codec ssi clk */
	irq->codec_ssi_clk = devm_clk_get(dev, "clk_codecssi");
	if (IS_ERR(irq->codec_ssi_clk)) {
		pr_err("clk_get: codecssi clk not found!\n");
		ret = PTR_ERR(irq->codec_ssi_clk);
		goto codec_ssi_clk_err;
	}
	ret = clk_prepare_enable(irq->codec_ssi_clk);
	if (0 != ret) {
		pr_err("codec_ssi_clk :clk prepare enable failed !\n");
		goto codec_ssi_clk_enable_err;
	}

	/* get pmu audio clk */
	irq->pmu_audio_clk = devm_clk_get(dev, "clk_pmuaudioclk");
	if (IS_ERR(irq->pmu_audio_clk)) {
		pr_err("_clk_get: pmu_audio_clk not found!\n");
		ret = PTR_ERR(irq->pmu_audio_clk);
		goto pmu_audio_clk_err;
	}
	ret = clk_prepare_enable(irq->pmu_audio_clk);
	if (0 != ret) {
		pr_err("pmu_audio_clk :clk prepare enable failed !\n");
		goto pmu_audio_clk_enable_err;
	}

	spin_lock_init(&irq->lock);
	spin_lock_init(&irq->rw_lock);
	mutex_init(&irq->sr_mutex);
	mutex_init(&irq->pll_mutex);
	wake_lock_init(&irq->wake_lock, WAKE_LOCK_SUSPEND, "hi6401-irq");

	irq->dev = dev;

	/* clear IRQ status */
	hi6401_irq_write(irq, HI6401_REG_IRQ_0, 0xFF);
	hi6401_irq_write(irq, HI6401_REG_IRQ_1, 0xFF);
	/* mask all irqs */
	hi6401_irq_write(irq, HI6401_REG_IRQM_0, 0xFF);
	hi6401_irq_write(irq, HI6401_REG_IRQM_1, 0xFF);

	irq->gpio = of_get_gpio_flags(np, 0, &flags);
	if (0 > irq->gpio) {
		dev_err(dev, "get gpio flags error\n");
		ret = irq->gpio;
		goto get_gpio_err;
	}

	if (!gpio_is_valid(irq->gpio)) {
		dev_err(dev, "gpio is invalid\n");
		ret = -EINVAL;
		goto get_gpio_err;
	}

	ret = gpio_request_one(irq->gpio, GPIOF_IN, "hi6401_irq");
	if (0 > ret) {
		dev_err(dev, "failed to request gpio%d\n", irq->gpio);
		goto get_gpio_err;
	}

	irq->irq = gpio_to_irq(irq->gpio);

	irq->domain = irq_domain_add_simple(np, HI6401_MAX_IRQS, 0,
					    &hi6401_domain_ops, irq);
	if (!irq->domain) {
		dev_err(dev, "irq domain error\n");
		ret = -ENODEV;
		goto gpio_err;
	}

	for (i = 0; i < HI6401_MAX_IRQS; i++) {
		virq = irq_create_mapping(irq->domain, i);
		if (virq == NO_IRQ) {
			dev_err(dev, "Failed mapping hwirq\n");
			ret = -ENOSPC;
			goto gpio_err;
		}
		irq->irqs[i] = virq;
	}

	ret = request_irq(irq->irq, hi6401_irq_handler,
				   IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND,
				   "hi6401_irq", irq);
	if (0 > ret) {
		dev_err(dev, "could not claim irq %d\n", ret);
		ret = -ENODEV;
		goto gpio_err;
	}
	irq->hi6401_irq_delay_wq = create_singlethread_workqueue("hi6401_irq_delay_wq");
	if (!(irq->hi6401_irq_delay_wq)) {
		pr_err("%s(%u) : workqueue create failed", __FUNCTION__,__LINE__);
		ret = -ENOMEM;
		goto irq_delay_wq_err;
	}
	INIT_DELAYED_WORK(&irq->hi6401_irq_delay_work, hi6401_irq_work_func);

	irq->pll_delay_wq = create_singlethread_workqueue("pll_delay_wq");
	if (!(irq->pll_delay_wq)) {
		pr_err("%s : pll_delay_wq create failed", __FUNCTION__);
		ret = -ENOMEM;
		goto pll_delay_wq_err;
	}
	INIT_DELAYED_WORK(&irq->pll_delay_work, hi6401_pll_work_func);

	g_dump_buf = (char*)kmalloc(sizeof(char)*Hi6401_SIZE_MAX, GFP_KERNEL);
	if (!g_dump_buf)
	{
		pr_err("%s : couldn't malloc buffer.\n",__FUNCTION__);
		ret = -ENOMEM;
		goto g_dump_buf_kmalloc_err;
	}
	memset(g_dump_buf, 0, Hi6401_SIZE_MAX);
	/* populate sub nodes */
	of_platform_populate(np, of_hi6401_irq_child_match_tbl, NULL, dev);

	if (!hi6401_client) {
		hi6401_client = dsm_register_client(&dsm_hi6401);
	}
	return 0;

g_dump_buf_kmalloc_err:
	if(irq->pll_delay_wq) {
		cancel_delayed_work(&irq->pll_delay_work);
		flush_workqueue(irq->pll_delay_wq);
		destroy_workqueue(irq->pll_delay_wq);
	}
pll_delay_wq_err:
	if(irq->hi6401_irq_delay_wq) {
		cancel_delayed_work(&irq->hi6401_irq_delay_work);
		flush_workqueue(irq->hi6401_irq_delay_wq);
		destroy_workqueue(irq->hi6401_irq_delay_wq);
	}
irq_delay_wq_err:
	free_irq(irq->irq, irq);
gpio_err:
	gpio_free(irq->gpio);
get_gpio_err:

	clk_disable_unprepare(irq->pmu_audio_clk);
pmu_audio_clk_enable_err:
	devm_clk_put(dev, irq->pmu_audio_clk);
pmu_audio_clk_err:
	clk_disable_unprepare(irq->codec_ssi_clk);
codec_ssi_clk_enable_err:
	devm_clk_put(dev, irq->codec_ssi_clk);
codec_ssi_clk_err:
	codec_ssi_iomux_idle(irq->pctrl);
codec_ssi_iomux_err:
	pinctrl_put(irq->pctrl);
codec_ssi_get_err:

	devm_iounmap(dev, irq->reg_base_addr);
ioremap_err:
	devm_release_mem_region(dev, irq->res->start,
				resource_size(irq->res));
err_exit:
	devm_kfree(dev, irq);

	return ret;
}