Esempio n. 1
0
File: pm33xx.c Progetto: krzk/linux
static int am33xx_push_sram_idle(void)
{
	struct am33xx_pm_ro_sram_data ro_sram_data;
	int ret;
	u32 table_addr, ro_data_addr;
	void *copy_addr;

	ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
	ro_sram_data.amx3_pm_sram_data_phys =
		gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);

	/* Save physical address to calculate resume offset during pm init */
	am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
							ocmcram_location);

	am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
					    pm_sram->do_wfi,
					    *pm_sram->do_wfi_sz);
	if (!am33xx_do_wfi_sram) {
		dev_err(pm33xx_dev,
			"PM: %s: am33xx_do_wfi copy to sram failed\n",
			__func__);
		return -ENODEV;
	}

	table_addr =
		sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
	ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
	if (ret) {
		dev_dbg(pm33xx_dev,
			"PM: %s: EMIF function copy failed\n", __func__);
		return -EPROBE_DEFER;
	}

	ro_data_addr =
		sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
	copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
				   &ro_sram_data,
				   sizeof(ro_sram_data));
	if (!copy_addr) {
		dev_err(pm33xx_dev,
			"PM: %s: ro_sram_data copy to sram failed\n",
			__func__);
		return -ENODEV;
	}

	return 0;
}
static void *__alloc_from_pool(size_t size, struct page **ret_pages, gfp_t flags)
{
	unsigned long val;
	void *ptr = NULL;
	int count = size >> PAGE_SHIFT;
	int i;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
		for (i = 0; i < count ; i++) {
			ret_pages[i] = phys_to_page(phys);
			phys += 1 << PAGE_SHIFT;
		}
		ptr = (void *)val;
		memset(ptr, 0, size);
	}

	return ptr;
}
static int allocate_sram(struct snd_pcm_substream *substream,
		struct gen_pool *sram_pool, unsigned size,
		struct snd_pcm_hardware *ppcm)
{
	struct snd_dma_buffer *buf = &substream->dma_buffer;
	struct snd_dma_buffer *iram_dma = NULL;
	dma_addr_t iram_phys = 0;
	void *iram_virt = NULL;

	if (buf->private_data || !size)
		return 0;

	ppcm->period_bytes_max = size;
	iram_virt = (void *)gen_pool_alloc(sram_pool, size);
	if (!iram_virt)
		goto exit1;
	iram_phys = gen_pool_virt_to_phys(sram_pool, (unsigned)iram_virt);
	iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
	if (!iram_dma)
		goto exit2;
	iram_dma->area = iram_virt;
	iram_dma->addr = iram_phys;
	memset(iram_dma->area, 0, size);
	iram_dma->bytes = size;
	buf->private_data = iram_dma;
	return 0;
exit2:
	if (iram_virt)
		gen_pool_free(sram_pool, (unsigned)iram_virt, size);
exit1:
	return -ENOMEM;
}
void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
{
	unsigned long vaddr = gen_pool_alloc(pool, size);

	if (vaddr)
		*dma = gen_pool_virt_to_phys(pool, vaddr);
	return (void *)vaddr;
}
int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
{
	phys_addr_t offset;

	offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
						   sizeof(HGSMIBUFFERHEADER));
	outl(offset, VGA_PORT_HGSMI_GUEST);
	/* Make the compiler aware that the host has changed memory. */
	mb();

	return 0;
}
Esempio n. 6
0
/**
 * zynq_pm_remap_ocm() - Remap OCM
 * Returns a pointer to the mapped memory or NULL.
 *
 * Remap the OCM.
 */
static void __iomem *zynq_pm_remap_ocm(void)
{
	struct device_node *np;
	const char *comp = "xlnx,zynq-ocmc-1.0";
	void __iomem *base = NULL;

	np = of_find_compatible_node(NULL, NULL, comp);
	if (np) {
		struct device *dev;
		unsigned long pool_addr;
		unsigned long pool_addr_virt;
		struct gen_pool *pool;

		of_node_put(np);

		dev = &(of_find_device_by_node(np)->dev);

		/* Get OCM pool from device tree or platform data */
		pool = dev_get_gen_pool(dev);
		if (!pool) {
			pr_warn("%s: OCM pool is not available\n", __func__);
			return NULL;
		}

		pool_addr_virt = gen_pool_alloc(pool, zynq_sys_suspend_sz);
		if (!pool_addr_virt) {
			pr_warn("%s: Can't get OCM poll\n", __func__);
			return NULL;
		}
		pool_addr = gen_pool_virt_to_phys(pool, pool_addr_virt);
		if (!pool_addr) {
			pr_warn("%s: Can't get physical address of OCM pool\n",
				__func__);
			return NULL;
		}
		base = __arm_ioremap(pool_addr, zynq_sys_suspend_sz,
				     MT_MEMORY_RWX);
		if (!base) {
			pr_warn("%s: IOremap OCM pool failed\n", __func__);
			return NULL;
		}
		pr_debug("%s: Remap OCM %s from %lx to %lx\n", __func__, comp,
			 pool_addr_virt, (unsigned long)base);
	} else {
		pr_warn("%s: no compatible node found for '%s'\n", __func__,
				comp);
	}

	return base;
}
Esempio n. 7
0
struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
{
	struct gen_pool *gpool;
	int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);

	gpool = sram_get_gpool("asram");
	if (!gpool)
		return NULL;

	tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
	if (!tdmac->desc_arr)
		return NULL;

	tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
			(unsigned long)tdmac->desc_arr);

	return tdmac->desc_arr;
}
Esempio n. 8
0
static int am33xx_prepare_push_sram_idle(void)
{
	struct device_node *np;
	int ret;

	ret = ti_emif_copy_pm_function_table(pm_sram->emif_sram_table);
	if (ret) {
		pr_err("PM: %s: EMIF function copy failed\n", __func__);
		return -EPROBE_DEFER;
	}

	np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");

	if (!np) {
		np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
		if (!np) {
			pr_warn("PM: %s: Unable to find device node for mpu\n",
				__func__);
			return -ENODEV;
		}
	}

	sram_pool = of_get_named_gen_pool(np, "sram", 0);

	if (!sram_pool) {
		pr_warn("PM: %s: Unable to get sram pool for ocmcram\n",
			__func__);
		return -ENODEV;
	}

	ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
	if (!ocmcram_location) {
		pr_warn("PM: %s: Unable to allocate memory from ocmcram\n",
			__func__);
		return -EINVAL;
	}

	/* Save physical address to calculate resume offset during pm init */
	am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
							ocmcram_location);

	return 0;
}
Esempio n. 9
0
static int ti_emif_push_sram(struct device *dev)
{
	struct device_node *np = dev->of_node;

	sram_pool = of_gen_pool_get(np, "sram", 0);

	if (!sram_pool) {
		dev_err(dev, "Unable to get sram pool for ocmcram\n");
		return -ENODEV;
	}

	ocmcram_location = gen_pool_alloc(sram_pool, ti_emif_sram_sz);
	if (!ocmcram_location) {
		dev_err(dev, "Unable to allocate memory from ocmcram\n");
		return -EINVAL;
	}

	/* Save physical address to calculate resume offset during pm init */
	ti_emif_sram_phys = gen_pool_virt_to_phys(sram_pool,
						  ocmcram_location);
	ti_emif_sram_virt = fncpy((void *)ocmcram_location,
				  &ti_emif_sram,
				  ti_emif_sram_sz);

	/*
	 * These functions are called during suspend path while MMU is
	 * still on so add virtual base to offset for absolute address
	 */
	ti_emif_pm.save_context = sram_suspend_address(ti_emif_pm.save_context);
	ti_emif_pm.enter_sr = sram_suspend_address(ti_emif_pm.enter_sr);
	ti_emif_pm.abort_sr = sram_suspend_address(ti_emif_pm.abort_sr);

	/*
	 * These are called during resume path when MMU is not enabled
	 * so physical address is used instead
	 */
	ti_emif_pm.restore_context =
		sram_resume_address(ti_emif_pm.restore_context);
	ti_emif_pm.exit_sr = sram_resume_address(ti_emif_pm.exit_sr);

	return 0;
}
static void *__alloc_from_pool(size_t size, struct page **ret_page)
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = phys_to_page(phys);
		ptr = (void *)val;
	}

	return ptr;
}
Esempio n. 11
0
static int __devinit pruss_probe(struct platform_device *dev)
{
	struct uio_info *p;
	struct uio_pruss_dev *gdev;
	struct resource *regs_prussio;
	int ret = -ENODEV, cnt = 0, len;
	struct uio_pruss_pdata *pdata = dev->dev.platform_data;

	gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
	if (!gdev)
		return -ENOMEM;

	gdev->info = kzalloc(sizeof(*p) * MAX_PRUSS_EVT, GFP_KERNEL);
	if (!gdev->info) {
		kfree(gdev);
		return -ENOMEM;
	}
	/* Power on PRU in case its not done as part of boot-loader */
	gdev->pruss_clk = clk_get(&dev->dev, "pruss");
	if (IS_ERR(gdev->pruss_clk)) {
		dev_err(&dev->dev, "Failed to get clock\n");
		kfree(gdev->info);
		kfree(gdev);
		ret = PTR_ERR(gdev->pruss_clk);
		return ret;
	} else {
		clk_enable(gdev->pruss_clk);
	}

	regs_prussio = platform_get_resource(dev, IORESOURCE_MEM, 0);
	if (!regs_prussio) {
		dev_err(&dev->dev, "No PRUSS I/O resource specified\n");
		goto out_free;
	}

	if (!regs_prussio->start) {
		dev_err(&dev->dev, "Invalid memory resource\n");
		goto out_free;
	}

	gdev->sram_vaddr = (void *)gen_pool_alloc(davinci_gen_pool,
						  sram_pool_sz);
	if (!gdev->sram_vaddr) {
		dev_err(&dev->dev, "Could not allocate SRAM pool\n");
		goto out_free;
	}

	gdev->sram_paddr = gen_pool_virt_to_phys(davinci_gen_pool,
					(unsigned long)gdev->sram_vaddr);

	gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz,
				&(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
	if (!gdev->ddr_vaddr) {
		dev_err(&dev->dev, "Could not allocate external memory\n");
		goto out_free;
	}

	len = resource_size(regs_prussio);
	gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
	if (!gdev->prussio_vaddr) {
		dev_err(&dev->dev, "Can't remap PRUSS I/O  address range\n");
		goto out_free;
	}

	gdev->pintc_base = pdata->pintc_base;
	gdev->hostirq_start = platform_get_irq(dev, 0);

	for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
		p->mem[0].addr = regs_prussio->start;
		p->mem[0].size = resource_size(regs_prussio);
		p->mem[0].memtype = UIO_MEM_PHYS;

		p->mem[1].addr = gdev->sram_paddr;
		p->mem[1].size = sram_pool_sz;
		p->mem[1].memtype = UIO_MEM_PHYS;

		p->mem[2].addr = gdev->ddr_paddr;
		p->mem[2].size = extram_pool_sz;
		p->mem[2].memtype = UIO_MEM_PHYS;

		p->name = kasprintf(GFP_KERNEL, "pruss_evt%d", cnt);
		p->version = DRV_VERSION;

		/* Register PRUSS IRQ lines */
		p->irq = gdev->hostirq_start + cnt;
		p->handler = pruss_handler;
		p->priv = gdev;

		ret = uio_register_device(&dev->dev, p);
		if (ret < 0)
			goto out_free;
	}

	platform_set_drvdata(dev, gdev);
	return 0;

out_free:
	pruss_cleanup(dev, gdev);
	return ret;
}
Esempio n. 12
0
int init_mmdc_lpddr2_settings(struct platform_device *busfreq_pdev)
{
	struct platform_device *ocram_dev;
	unsigned int iram_paddr;
	struct device_node *node;
	struct gen_pool *iram_pool;

	busfreq_dev = &busfreq_pdev->dev;
	node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-mmdc");
	if (!node) {
		printk(KERN_ERR "failed to find imx6sl-mmdc device tree data!\n");
		return -EINVAL;
	}
	mmdc_base = of_iomap(node, 0);
	WARN(!mmdc_base, "unable to map mmdc registers\n");

	node = NULL;
	node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-ccm");
	if (!node) {
		printk(KERN_ERR "failed to find imx6sl-ccm device tree data!\n");
		return -EINVAL;
	}
	ccm_base = of_iomap(node, 0);
	WARN(!ccm_base, "unable to map ccm registers\n");

	node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
	if (!node) {
		printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
		return -EINVAL;
	}
	l2_base = of_iomap(node, 0);
	WARN(!l2_base, "unable to map PL310 registers\n");

	node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop");
	if (!node) {
		printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
		return -EINVAL;
	}
	anatop_base = of_iomap(node, 0);
	WARN(!anatop_base, "unable to map anatop registers\n");

	node = NULL;
	node = of_find_compatible_node(NULL, NULL, "mmio-sram");
	if (!node) {
		dev_err(busfreq_dev, "%s: failed to find ocram node\n",
			__func__);
		return -EINVAL;
	}

	ocram_dev = of_find_device_by_node(node);
	if (!ocram_dev) {
		dev_err(busfreq_dev, "failed to find ocram device!\n");
		return -EINVAL;
	}

	iram_pool = dev_get_gen_pool(&ocram_dev->dev);
	if (!iram_pool) {
		dev_err(busfreq_dev, "iram pool unavailable!\n");
		return -EINVAL;
	}

	reg_addrs[0] = (unsigned long)anatop_base;
	reg_addrs[1] = (unsigned long)ccm_base;
	reg_addrs[2] = (unsigned long)mmdc_base;
	reg_addrs[3] = (unsigned long)l2_base;

	ddr_freq_change_iram_base = (void *)gen_pool_alloc(iram_pool,
						LPDDR2_FREQ_CHANGE_SIZE);
	if (!ddr_freq_change_iram_base) {
		dev_err(busfreq_dev,
			"Cannot alloc iram for ddr freq change code!\n");
		return -ENOMEM;
	}

	iram_paddr = gen_pool_virt_to_phys(iram_pool,
				(unsigned long)ddr_freq_change_iram_base);
	/*
	 * Need to remap the area here since we want
	 * the memory region to be executable.
	 */
	ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
						LPDDR2_FREQ_CHANGE_SIZE,
						MT_MEMORY_NONCACHED);
	mx6_change_lpddr2_freq = (void *)fncpy(ddr_freq_change_iram_base,
		&mx6_lpddr2_freq_change, LPDDR2_FREQ_CHANGE_SIZE);

	curr_ddr_rate = ddr_normal_rate;

	return 0;
}
Esempio n. 13
0
int init_mmdc_settings(struct platform_device *busfreq_pdev)
{
	struct device *dev = &busfreq_pdev->dev;
	struct platform_device *ocram_dev;
	unsigned int iram_paddr;
	int i, err;
	u32 cpu;
	struct device_node *node;
	struct gen_pool *iram_pool;

	node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-mmdc-combine");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-mmdc device tree data!\n");
		return -EINVAL;
	}
	mmdc_base = of_iomap(node, 0);
	WARN(!mmdc_base, "unable to map mmdc registers\n");

	node = NULL;
	if (cpu_is_imx6q())
		node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-iomuxc");
	if (cpu_is_imx6dl())
		node = of_find_compatible_node(NULL, NULL,
			"fsl,imx6dl-iomuxc");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-iomux device tree data!\n");
		return -EINVAL;
	}
	iomux_base = of_iomap(node, 0);
	WARN(!iomux_base, "unable to map iomux registers\n");

	node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-ccm device tree data!\n");
		return -EINVAL;
	}
	ccm_base = of_iomap(node, 0);
	WARN(!mmdc_base, "unable to map mmdc registers\n");

	node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-pl310-cache device tree data!\n");
		return -EINVAL;
	}
	l2_base = of_iomap(node, 0);
	WARN(!mmdc_base, "unable to map mmdc registers\n");

	node = NULL;
	node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-a9-gic device tree data!\n");
		return -EINVAL;
	}
	gic_dist_base = of_iomap(node, 0);
	WARN(!gic_dist_base, "unable to map gic dist registers\n");

	if (cpu_is_imx6q())
		ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6q) +
			ARRAY_SIZE(ddr3_calibration);
	if (cpu_is_imx6dl())
		ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6dl) +
			ARRAY_SIZE(ddr3_calibration);

	normal_mmdc_settings = kmalloc((ddr_settings_size * 8), GFP_KERNEL);
	if (cpu_is_imx6q()) {
		memcpy(normal_mmdc_settings, ddr3_dll_mx6q,
			sizeof(ddr3_dll_mx6q));
		memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6q)),
			ddr3_calibration, sizeof(ddr3_calibration));
	}
	if (cpu_is_imx6dl()) {
		memcpy(normal_mmdc_settings, ddr3_dll_mx6dl,
			sizeof(ddr3_dll_mx6dl));
		memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6dl)),
			ddr3_calibration, sizeof(ddr3_calibration));
	}
	/* store the original DDR settings at boot. */
	for (i = 0; i < ddr_settings_size; i++) {
		/*
		 * writes via command mode register cannot be read back.
		 * hence hardcode them in the initial static array.
		 * this may require modification on a per customer basis.
		 */
		if (normal_mmdc_settings[i][0] != 0x1C)
			normal_mmdc_settings[i][1] =
				readl_relaxed(mmdc_base
				+ normal_mmdc_settings[i][0]);
	}

	irqs_used = devm_kzalloc(dev, sizeof(u32) * num_present_cpus(),
					GFP_KERNEL);

	for_each_present_cpu(cpu) {
		int irq;

		/*
		 * set up a reserved interrupt to get all
		 * the active cores into a WFE state
		 * before changing the DDR frequency.
		 */
		irq = platform_get_irq(busfreq_pdev, cpu);
		err = request_irq(irq, wait_in_wfe_irq,
			IRQF_PERCPU, "mmdc_1", NULL);
		if (err) {
			dev_err(dev,
				"Busfreq:request_irq failed %d, err = %d\n",
				irq, err);
			return err;
		}
		err = irq_set_affinity(irq, cpumask_of(cpu));
		if (err) {
			dev_err(dev,
				"Busfreq: Cannot set irq affinity irq=%d,\n",
				irq);
			return err;
		}
		irqs_used[cpu] = irq;
	}

	node = NULL;
	node = of_find_compatible_node(NULL, NULL, "mmio-sram");
	if (!node) {
		dev_err(dev, "%s: failed to find ocram node\n",
			__func__);
		return -EINVAL;
	}

	ocram_dev = of_find_device_by_node(node);
	if (!ocram_dev) {
		dev_err(dev, "failed to find ocram device!\n");
		return -EINVAL;
	}

	iram_pool = dev_get_gen_pool(&ocram_dev->dev);
	if (!iram_pool) {
		dev_err(dev, "iram pool unavailable!\n");
		return -EINVAL;
	}

	iomux_settings_size = ARRAY_SIZE(iomux_offsets_mx6q);
	iram_iomux_settings = gen_pool_alloc(iram_pool,
						(iomux_settings_size * 8) + 8);
	if (!iram_iomux_settings) {
		dev_err(dev, "unable to alloc iram for IOMUX settings!\n");
		return -ENOMEM;
	}

	/*
	  * Allocate extra space to store the number of entries in the
	  * ddr_settings plus 4 extra regsiter information that needs
	  * to be passed to the frequency change code.
	  * sizeof(iram_ddr_settings) = sizeof(ddr_settings) +
	  *					entries in ddr_settings + 16.
	  * The last 4 enties store the addresses of the registers:
	  * CCM_BASE_ADDR
	  * MMDC_BASE_ADDR
	  * IOMUX_BASE_ADDR
	  * L2X0_BASE_ADDR
	  */
	iram_ddr_settings = gen_pool_alloc(iram_pool,
					(ddr_settings_size * 8) + 8 + 32);
	if (!iram_ddr_settings) {
		dev_err(dev, "unable to alloc iram for ddr settings!\n");
		return -ENOMEM;
	}
	i = ddr_settings_size + 1;
	iram_ddr_settings[i][0] = (unsigned long)mmdc_base;
	iram_ddr_settings[i+1][0] = (unsigned long)ccm_base;
	iram_ddr_settings[i+2][0] = (unsigned long)iomux_base;
	iram_ddr_settings[i+3][0] = (unsigned long)l2_base;

	if (cpu_is_imx6q()) {
		/* store the IOMUX settings at boot. */
		for (i = 0; i < iomux_settings_size; i++) {
			iomux_offsets_mx6q[i][1] =
				readl_relaxed(iomux_base +
					iomux_offsets_mx6q[i][0]);
			iram_iomux_settings[i+1][0] = iomux_offsets_mx6q[i][0];
			iram_iomux_settings[i+1][1] = iomux_offsets_mx6q[i][1];
		}
	}

	if (cpu_is_imx6dl()) {
		for (i = 0; i < iomux_settings_size; i++) {
			iomux_offsets_mx6dl[i][1] =
				readl_relaxed(iomux_base +
					iomux_offsets_mx6dl[i][0]);
			iram_iomux_settings[i+1][0] = iomux_offsets_mx6dl[i][0];
			iram_iomux_settings[i+1][1] = iomux_offsets_mx6dl[i][1];
		}
	}

	ddr_freq_change_iram_base = gen_pool_alloc(iram_pool,
						DDR_FREQ_CHANGE_SIZE);
	if (!ddr_freq_change_iram_base) {
		dev_err(dev, "Cannot alloc iram for ddr freq change code!\n");
		return -ENOMEM;
	}

	iram_paddr = gen_pool_virt_to_phys(iram_pool,
				(unsigned long)ddr_freq_change_iram_base);
	/*
	 * need to remap the area here since we want
	 * the memory region to be executable.
	 */
	ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
						DDR_FREQ_CHANGE_SIZE,
						MT_MEMORY_RWX_NONCACHED);
	mx6_change_ddr_freq = (void *)fncpy(ddr_freq_change_iram_base,
		&mx6_ddr3_freq_change, DDR_FREQ_CHANGE_SIZE);

	curr_ddr_rate = ddr_normal_rate;

	return 0;
}
Esempio n. 14
0
struct pie_chunk *__pie_load_data(struct gen_pool *pool, bool phys,
		void *code_start, void *code_end,
		void *rel_start, void *rel_end)
{
	struct pie_chunk *chunk;
	unsigned long offset;
	int ret;
	char *tail;
	size_t common_sz;
	size_t code_sz;
	size_t tail_sz;

	/* Calculate the tail size */
	ret = pie_arch_fill_tail(NULL, __pie_common_start, __pie_common_end,
				__pie_overlay_start, code_start, code_end,
				rel_start, rel_end);
	if (ret < 0)
		goto err;
	tail_sz = ret;

	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
	if (!chunk) {
		ret = -ENOMEM;
		goto err;
	}

	common_sz = code_start - (void *)__pie_common_start;
	code_sz = code_end - code_start;

	chunk->pool = pool;
	chunk->sz = common_sz + code_sz + tail_sz;

	chunk->addr = gen_pool_alloc(pool, chunk->sz);
	if (!chunk->addr) {
		ret = -ENOMEM;
		goto err_free;
	}

	/* Copy common code/data */
	tail = (char *) chunk->addr;
	memcpy(tail, __pie_common_start, common_sz);
	tail += common_sz;

	/* Copy chunk specific code/data */
	memcpy(tail, code_start, code_sz);
	tail += code_sz;

	/* Fill in tail data */
	ret = pie_arch_fill_tail(tail, __pie_common_start, __pie_common_end,
				__pie_overlay_start, code_start, code_end,
				rel_start, rel_end);
	if (ret < 0)
		goto err_alloc;

	/* Calculate initial offset */
	if (phys)
		offset = gen_pool_virt_to_phys(pool, chunk->addr);
	else
		offset = chunk->addr;

	/* Perform arch specific code fixups */
	ret = pie_arch_fixup(chunk, (void *) chunk->addr, tail, offset);
	if (ret < 0)
		goto err_alloc;

	flush_icache_range(chunk->addr, chunk->addr + chunk->sz);

	return chunk;

err_alloc:
	gen_pool_free(chunk->pool, chunk->addr, chunk->sz);

err_free:
	kfree(chunk);
err:
	return ERR_PTR(ret);
}
Esempio n. 15
0
phys_addr_t pie_to_phys(struct pie_chunk *chunk, unsigned long addr)
{
	return gen_pool_virt_to_phys(chunk->pool, addr);
}