Ejemplo n.º 1
0
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
		    size_t size, const struct mem_type *type)
{
	unsigned long addr = virt, end = virt + size;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
	do {
		pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
		pfn += SZ_1M >> PAGE_SHIFT;
		pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
		pfn += SZ_1M >> PAGE_SHIFT;
		flush_pmd_entry(pmd);

		addr += PMD_SIZE;
		pmd += 2;
	} while (addr < end);

	return 0;
}
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>

#include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/system_info.h>

pgd_t *idmap_pgd;

#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
		pmd = pmd_alloc_one(&init_mm, addr);
		if (!pmd) {
			pr_warning("Failed to allocate identity pmd.\n");
			return;
		}
		pud_populate(&init_mm, pud, pmd);
		pmd += pmd_index(addr);
	} else
		pmd = pmd_offset(pud, addr);

	do {
		next = pmd_addr_end(addr, end);
		*pmd = __pmd((addr & PMD_MASK) | prot);
		flush_pmd_entry(pmd);
	} while (pmd++, addr = next, addr != end);
}
#else	/* !CONFIG_ARM_LPAE */
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
#ifdef  CONFIG_TIMA_RKP_L1_TABLES
        unsigned long cmd_id = 0x3f809221;
#endif
	pmd_t *pmd = pmd_offset(pud, addr);

	addr = (addr & PMD_MASK) | prot;
#ifdef  CONFIG_TIMA_RKP_L1_TABLES
	if (tima_is_pg_protected((unsigned long) pmd) != 0) {
#ifndef CONFIG_TIMA_RKP_COHERENT_TT
		clean_dcache_area(pmd, 8);
		tima_cache_flush((unsigned long)pmd);
#endif
		tima_send_cmd5((unsigned long)__pa(pmd), (unsigned long)__pmd(addr),
				(unsigned long)__pmd(addr+SECTION_SIZE), 0, 0, cmd_id);
#ifndef CONFIG_TIMA_RKP_COHERENT_TT
		tima_cache_inval((unsigned long)pmd);
#endif
		tima_tlb_inval_is(0);
	} else {
		pmd[0] = __pmd(addr);
		addr += SECTION_SIZE;
		pmd[1] = __pmd(addr);
	}
#else	/* CONFIG_TIMA_RKP_L1_TABLES */
	pmd[0] = __pmd(addr);
	addr += SECTION_SIZE;
	pmd[1] = __pmd(addr);
#endif	/* CONFIG_TIMA_RKP_L1_TABLES */
	flush_pmd_entry(pmd);
}
Ejemplo n.º 3
0
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
                         size_t size, const struct mem_type *type)
{
    unsigned long addr = virt, end = virt + size;
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    unmap_area_sections(virt, size);

    pgd = pgd_offset_k(virt);
    pud = pud_offset(pgd, addr);
    pmd = pmd_offset(pud, addr);
    do {
        unsigned long super_pmd_val, i;

        super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
                        PMD_SECT_SUPER;
        super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

        for (i = 0; i < 8; i++) {
            pmd[0] = __pmd(super_pmd_val);
            pmd[1] = __pmd(super_pmd_val);
            flush_pmd_entry(pmd);

            addr += PMD_SIZE;
            pmd += 2;
        }

        pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
    } while (addr < end);

    return 0;
}
/*
 * In order to soft-boot, we need to insert a 1:1 mapping in place of
 * the user-mode pages.  This will then ensure that we have predictable
 * results when turning the mmu off
 */
void setup_mm_for_reboot(char mode)
{
	unsigned long base_pmdval;
	pgd_t *pgd;
	int i;

	if (current->mm && current->mm->pgd)
		pgd = current->mm->pgd;
	else
		pgd = init_mm.pgd;

	base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
//	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
		base_pmdval |= PMD_BIT4;

	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
		unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
		pmd_t *pmd;

		pmd = pmd_off(pgd, i << PGDIR_SHIFT);
		pmd[0] = __pmd(pmdval);
		pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
		flush_pmd_entry(pmd);
	}
}
Ejemplo n.º 5
0
void remove_pmd_flag(mmuhack_t *mmuhack, unsigned long mask)
{
    pmd_t *pmd = mmuhack -> pmd;
    pmd_t *pmd_to_flush = mmuhack -> pmd;

    if (mmuhack -> addr & SECTION_SIZE) {
        pmd++;
    }

    mmuhack -> origpmd = *pmd;

    if ((mmuhack -> origpmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
        return ;

    if (*pmd & mask)
    {
        *pmd &= ~mask;
    } else {
        printk("Uh... I think this page (0x%08lX - 0x%08lX) s flag 0x%08lX is already removed.\n", mmuhack -> addr & PAGE_MASK, (mmuhack -> addr & PAGE_MASK) + (~PAGE_MASK), mask);
        return ;
    }

    flush_pmd_entry(pmd_to_flush);
    my_flush_tlb_kernel_page(mmuhack -> addr & PAGE_MASK);

    printk("Page 0x%08lX - 0x%08lX pmd flag 0x%08lX removed.\n", mmuhack -> addr & PAGE_MASK, (mmuhack -> addr & PAGE_MASK) + (~PAGE_MASK), mask);
}
Ejemplo n.º 6
0
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
		pmd = pmd_alloc_one(&init_mm, addr);
		if (!pmd) {
			pr_warning("Failed to allocate identity pmd.\n");
			return;
		}
		/*
		 * Copy the original PMD to ensure that the PMD entries for
		 * the kernel image are preserved.
		 */
		if (!pud_none(*pud))
			memcpy(pmd, pmd_offset(pud, 0),
			       PTRS_PER_PMD * sizeof(pmd_t));
		pud_populate(&init_mm, pud, pmd);
		pmd += pmd_index(addr);
	} else
		pmd = pmd_offset(pud, addr);

	do {
		next = pmd_addr_end(addr, end);
		*pmd = __pmd((addr & PMD_MASK) | prot);
		flush_pmd_entry(pmd);
	} while (pmd++, addr = next, addr != end);
}
Ejemplo n.º 7
0
		/*
		 * Copy the original PMD to ensure that the PMD entries for
		 * the kernel image are preserved.
		 */
		if (!pud_none(*pud))
			memcpy(pmd, pmd_offset(pud, 0),
			       PTRS_PER_PMD * sizeof(pmd_t));
		pud_populate(&init_mm, pud, pmd);
		pmd += pmd_index(addr);
	} else
		pmd = pmd_offset(pud, addr);

	do {
		next = pmd_addr_end(addr, end);
		*pmd = __pmd((addr & PMD_MASK) | prot);
		flush_pmd_entry(pmd);
	} while (pmd++, addr = next, addr != end);
}
#else	/* !CONFIG_ARM_LPAE */
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
	pmd_t *pmd = pmd_offset(pud, addr);

	addr = (addr & PMD_MASK) | prot;
	pmd[0] = __pmd(addr);
	addr += SECTION_SIZE;
	pmd[1] = __pmd(addr);
	flush_pmd_entry(pmd);
}
Ejemplo n.º 8
0
/*
 * On Assabet, we must probe for the Neponset board _before_
 * paging_init() has occurred to actually determine the amount
 * of RAM available.  To do so, we map the appropriate IO section
 * in the page table here in order to access GPIO registers.
 */
static void __init map_sa1100_gpio_regs( void )
{
	unsigned long phys = __PREG(GPLR) & PMD_MASK;
	unsigned long virt = io_p2v(phys);
	int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
	pmd_t *pmd;

	pmd = pmd_offset(pgd_offset_k(virt), virt);
	*pmd = __pmd(phys | prot);
	flush_pmd_entry(pmd);
}
Ejemplo n.º 9
0
void restore_pmd_flag(mmuhack_t *mmuhack)
{
    pmd_t *pmd = mmuhack -> pmd;
    if (mmuhack -> addr & SECTION_SIZE) {
        pmd++;
    }

    printk("Restoring PMD 0x%08lX to 0x%08lX\n", (unsigned long) mmuhack -> origpmd, mmuhack -> addr);

    *pmd = mmuhack -> origpmd;

    flush_pmd_entry(pmd);
    my_flush_tlb_kernel_page(mmuhack -> addr & PAGE_MASK);

    printk("Page 0x%08lX - 0x%08lX restored.\n", mmuhack -> addr & PAGE_MASK, (mmuhack -> addr & PAGE_MASK) + (~PAGE_MASK));
}
Ejemplo n.º 10
0
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
			 unsigned long size, unsigned long flags)
{
	unsigned long prot, addr = virt, end = virt + size;
	pgd_t *pgd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
			PMD_DOMAIN(DOMAIN_IO) |
			(flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));

	/*
	 * ARMv6 and above need XN set to prevent speculative prefetches
	 * hitting IO.
	 */
	if (cpu_architecture() >= CPU_ARCH_ARMv6)
		prot |= PMD_SECT_XN;

	pgd = pgd_offset_k(virt);
	do {
		unsigned long super_pmd_val, i;

		super_pmd_val = __pfn_to_phys(pfn) | prot;
		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

		for (i = 0; i < 8; i++) {
			pmd_t *pmd = pmd_offset(pgd, addr);

			pmd[0] = __pmd(super_pmd_val);
			pmd[1] = __pmd(super_pmd_val);
			flush_pmd_entry(pmd);

			addr += PGDIR_SIZE;
			pgd++;
		}

		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
	} while (addr < end);

	return 0;
}
Ejemplo n.º 11
0
void __init mt_map_io(void)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;

    iotable_init(mt_io_desc, ARRAY_SIZE(mt_io_desc));

    /* set NS=1 for NS_GIC_CPU_BASE */
    pgd = pgd_offset(&init_mm, NS_GIC_CPU_BASE);
    pud = pud_offset(pgd, NS_GIC_CPU_BASE);
    pmd = pmd_offset(pud, NS_GIC_CPU_BASE);
    if ((pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) {
        __raw_writel(__raw_readl(pmd) | PMD_TABLE_NS, pmd);
    } else {
        __raw_writel(__raw_readl(pmd) | PMD_SECT_NS, pmd);
    }
    flush_pmd_entry(pmd);
}
Ejemplo n.º 12
0
void setup_mm_for_kdump(char mode)
{
	unsigned long base_pmdval;
	pgd_t *pgd;
	int i;

	pgd = init_mm.pgd;

	cpu_switch_mm(pgd, &init_mm);

	base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;

	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
		unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
		pmd_t *pmd;

		pmd = pmd_offset(pgd, i << PGDIR_SHIFT);
		pmd[0] = __pmd(pmdval);
		pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
		flush_pmd_entry(pmd);
	}
}
Ejemplo n.º 13
0
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
		pmd = pmd_alloc_one(NULL, addr);
		if (!pmd) {
			pr_warning("Failed to allocate identity pmd.\n");
			return;
		}
		pud_populate(NULL, pud, pmd);
		pmd += pmd_index(addr);
	} else
		pmd = pmd_offset(pud, addr);

	do {
		next = pmd_addr_end(addr, end);
		*pmd = __pmd((addr & PMD_MASK) | prot);
		flush_pmd_entry(pmd);
	} while (pmd++, addr = next, addr != end);
}
Ejemplo n.º 14
0
static int
remap_area_sections(unsigned long virt, unsigned long pfn,
		    size_t size, const struct mem_type *type)
{
	unsigned long addr = virt, end = virt + size;
	pgd_t *pgd;

	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(addr);
	do {
		pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);

		set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
		pfn += SZ_4M >> PAGE_SHIFT;
		flush_pmd_entry(pmd);

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	return 0;
}
Ejemplo n.º 15
0
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
			 size_t size, const struct mem_type *type)
{
	unsigned long addr = virt, end = virt + size;
	pgd_t *pgd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	pgd = pgd_offset_k(virt);
	do {
		unsigned long super_pmd_val, i;

		super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
				PMD_SECT_SUPER;
		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

		for (i = 0; i < 8; i++) {
			pmd_t *pmd = pmd_offset(pgd, addr);

			pmd[0] = __pmd(super_pmd_val);
			pmd[1] = __pmd(super_pmd_val);
			flush_pmd_entry(pmd);

			addr += PGDIR_SIZE;
			pgd++;
		}

		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
	} while (addr < end);

	return 0;
}
Ejemplo n.º 16
0
/*
 * fill_spte - fill shadow page table entry
 * @entry
 * @value
 */
inline void fill_spte(u32 *entry, u32 value)
{
	*entry = value;
	flush_pmd_entry((pmd_t *)entry);
}
Ejemplo n.º 17
0
/*
 * Initialize the power management subsystem.
 *
 * Return value:
 *      -ENODEV: initialization failed
 *      0: success
 */
static int __init msm_pm_init(void)
{
	int ret;
	int val;
	enum msm_pm_time_stats_id enable_stats[] = {
		MSM_PM_STAT_REQUESTED_IDLE,
		MSM_PM_STAT_IDLE_SPIN,
		MSM_PM_STAT_IDLE_WFI,
		MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
		MSM_PM_STAT_SUSPEND,
		MSM_PM_STAT_FAILED_SUSPEND,
		MSM_PM_STAT_NOT_IDLE,
	};

#ifdef CONFIG_CPU_V7
	pgd_t *pc_pgd;
	pmd_t *pmd;
	unsigned long pmdval;
	unsigned long exit_phys;

	exit_phys = virt_to_phys(msm_pm_collapse_exit);

	/* Page table for cores to come back up safely. */
	pc_pgd = pgd_alloc(&init_mm);
	if (!pc_pgd)
		return -ENOMEM;
	pmd = pmd_offset(pud_offset(pc_pgd + pgd_index(exit_phys), exit_phys),
			 exit_phys);
	pmdval = (exit_phys & PGDIR_MASK) |
		     PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
	pmd[0] = __pmd(pmdval);
	pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));

	msm_saved_state_phys =
		allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
					      num_possible_cpus(), 4);
	if (!msm_saved_state_phys)
		return -ENOMEM;
	msm_saved_state = ioremap_nocache(msm_saved_state_phys,
					  CPU_SAVED_STATE_SIZE *
					  num_possible_cpus());
	if (!msm_saved_state)
		return -ENOMEM;

	/* It is remotely possible that the code in msm_pm_collapse_exit()
	 * which turns on the MMU with this mapping is in the
	 * next even-numbered megabyte beyond the
	 * start of msm_pm_collapse_exit().
	 * Map this megabyte in as well.
	 */
	pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
	flush_pmd_entry(pmd);
	msm_pm_pc_pgd = virt_to_phys(pc_pgd);
	clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
		     virt_to_phys(&msm_pm_pc_pgd));
#endif

	msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
		sizeof(*msm_pm_smem_data));
	if (msm_pm_smem_data == NULL) {
		printk(KERN_ERR "%s: failed to get smsm_data\n", __func__);
		return -ENODEV;
	}

	ret = msm_timer_init_time_sync(msm_pm_timeout);
	if (ret)
		return ret;

	ret = smsm_change_intr_mask(SMSM_POWER_MASTER_DEM, 0xFFFFFFFF, 0);
	if (ret) {
		printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
			__func__, ret);
		return ret;
	}

	if (cpu_is_msm8625()) {
		target_type = TARGET_IS_8625;
		clean_caches((unsigned long)&target_type, sizeof(target_type),
				virt_to_phys(&target_type));

		/*
		 * Configure the MPA5_GDFS_CNT_VAL register for
		 * DBGPWRUPEREQ_OVERRIDE[17:16] = Override the
		 * DBGNOPOWERDN for each cpu.
		 * MPA5_GDFS_CNT_VAL[9:0] = Delay counter for
		 * GDFS control.
		 */
		val = 0x00030002;
		__raw_writel(val, (MSM_CFG_CTL_BASE + 0x38));

		l2x0_base_addr = MSM_L2CC_BASE;
	}

#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
	/* The wakeup_reason field is overloaded during initialization time
	   to signal Modem that Apps will control the low power modes of
	   the memory.
	 */
	msm_pm_smem_data->wakeup_reason = 1;
	smsm_change_state(SMSM_APPS_DEM, 0, DEM_SLAVE_SMSM_RUN);
#endif

	BUG_ON(msm_pm_modes == NULL);

	suspend_set_ops(&msm_pm_ops);

	msm_pm_mode_sysfs_add();
	msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));

	atomic_set(&msm_pm_init_done, 1);
	return 0;
}