Пример #1
0
void arm_mmu_init(void)
{
	int i;

	/* set some mmu specific control bits */
	arm_write_cr1(arm_read_cr1() & ~((1<<29)|(1<<28)|(1<<0))); // access flag disabled, TEX remap disabled, mmu disabled

	/* set up an identity-mapped translation table with cache disabled */
	for (i=0; i < 4096; i++) {
#ifdef WITH_DMA_ZONE
		#if defined(PLATFORM_TCC892X)
		arm_mmu_map_section(i * MB, i * MB,  MMU_FLAG_READWRITE | MMU_FLAG_CACHED | MMU_FLAG_BUFFERED);
		#else
		arm_mmu_map_section(i * MB, i * MB,  MMU_FLAG_READWRITE | MMU_FLAG_CACHED);
		#endif
#else
		arm_mmu_map_section(i * MB, i * MB,  MMU_FLAG_READWRITE); // map everything uncached
#endif
	}

	/* set up the translation table base */
	arm_write_ttbr((uint32_t)tt);

	/* set up the domain access register */
	arm_write_dacr(0x00000001);

	/* turn on the mmu */
	arm_write_cr1(arm_read_cr1() | 0x1);
}
Пример #2
0
/* Setup MMU mapping for this platform */
void platform_init_mmu_mappings(void)
{
	uint32_t i;
	uint32_t sections;
	uint32_t table_size = ARRAY_SIZE(mmu_section_table);
	uint32_t ddr_start = get_ddr_start();
	uint32_t smem_addr = platform_get_smem_base_addr();

	/*Mapping the ddr start address for loading the kernel about 90 MB*/
	sections = 90;
	while(sections--)
	{
		arm_mmu_map_section(ddr_start + sections * MB, ddr_start + sections* MB, SCRATCH_MEMORY);
	}


	/* Mapping the SMEM addr */
	arm_mmu_map_section(smem_addr, smem_addr, COMMON_MEMORY);

	/* Configure the MMU page entries for memory read from the
	   mmu_section_table */
	for (i = 0; i < table_size; i++)
	{
		sections = mmu_section_table[i].num_of_sections;

		while (sections--)
		{
			arm_mmu_map_section(mmu_section_table[i].paddress +
								sections * MB,
								mmu_section_table[i].vaddress +
								sections * MB,
								mmu_section_table[i].flags);
		}
	}
}
Пример #3
0
void platform_init_mmu_mappings(void)
{
	/* do some memory map initialization */
	addr_t addr;
	arm_mmu_map_section(SDRAM_BASE, 0, MMU_FLAG_CACHED|MMU_FLAG_BUFFERED);
	for (addr = SDRAM_BASE; addr < SDRAM_BASE + SDRAM_SIZE; addr += (1024*1024)) {
		arm_mmu_map_section(addr, addr, MMU_FLAG_CACHED|MMU_FLAG_BUFFERED|MMU_FLAG_READWRITE);
	}
}
Пример #4
0
/* Setup memory for this platform */
void platform_init_mmu_mappings(void)
{
	uint32_t i;
	uint32_t sections;
	ram_partition ptn_entry;
	uint32_t table_size = ARRAY_SIZE(mmu_section_table);
	uint32_t len = 0;

	ASSERT(smem_ram_ptable_init_v1());

	len = smem_get_ram_ptable_len();

	/* Configure the MMU page entries for SDRAM and IMEM memory read
	   from the smem ram table*/
	for(i = 0; i < len; i++)
	{
		smem_get_ram_ptable_entry(&ptn_entry, i);
		if(ptn_entry.type == SYS_MEMORY)
		{
			if((ptn_entry.category == SDRAM) ||
			   (ptn_entry.category == IMEM))
			{
				/* Check to ensure that start address is 1MB aligned */
				ASSERT((ptn_entry.start & (MB-1)) == 0);

				sections = (ptn_entry.size) / MB;
				while(sections--)
				{
					arm_mmu_map_section(ptn_entry.start +
										sections * MB,
										ptn_entry.start +
										sections * MB,
										(MMU_MEMORY_TYPE_NORMAL_WRITE_THROUGH |
										 MMU_MEMORY_AP_READ_WRITE |
										 MMU_MEMORY_XN));
				}
			}
		}
	}

	/* Configure the MMU page entries for memory read from the
	   mmu_section_table */
	for (i = 0; i < table_size; i++)
	{
		sections = mmu_section_table[i].num_of_sections;

		while (sections--)
		{
			arm_mmu_map_section(mmu_section_table[i].paddress +
								sections * MB,
								mmu_section_table[i].vaddress +
								sections * MB,
								mmu_section_table[i].flags);
		}
	}
}
/* Setup memory for this platform */
void platform_init_mmu_mappings(void)
{
	uint32_t i;
	uint32_t sections;
	uint32_t table_size = ARRAY_SIZE(mmu_section_table);

	ASSERT(smem_ram_ptable_init(&ram_ptable));

	/* Configure the MMU page entries for SDRAM and IMEM memory read
	   from the smem ram table*/
	for(i = 0; i < ram_ptable.len; i++)
	{
		if(ram_ptable.parts[i].type == SYS_MEMORY)
		{
			if((ram_ptable.parts[i].category == SDRAM) ||
			   (ram_ptable.parts[i].category == IMEM))
			{
				/* Check to ensure that start address is 1MB aligned */
				ASSERT((ram_ptable.parts[i].start & 0xFFFFF) == 0);

				sections = (ram_ptable.parts[i].size) / MB;
				while(sections--)
				{
					arm_mmu_map_section(ram_ptable.parts[i].start +
										sections * MB,
										ram_ptable.parts[i].start +
										sections * MB,
										(MMU_MEMORY_TYPE_NORMAL_WRITE_THROUGH | \
										 MMU_MEMORY_AP_READ_WRITE | MMU_MEMORY_XN));
				}
			}
		}
	}

	/* Configure the MMU page entries for memory read from the
	   mmu_section_table */
	for (i = 0; i < table_size; i++)
	{
		sections = mmu_section_table[i].num_of_sections;

		while (sections--)
		{
			arm_mmu_map_section(mmu_section_table[i].paddress +
								sections * MB,
								mmu_section_table[i].vaddress +
								sections * MB,
								mmu_section_table[i].flags);
		}
	}
}
Пример #6
0
void platform_init_mmu_mappings(void)
{
	/* do some memory map initialization */
	addr_t addr;
	arm_mmu_map_section(SDRAM_BASE, 0,
			MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE |
			MMU_MEMORY_L1_AP_P_NA_U_NA);

	for (addr = SDRAM_BASE; addr < SDRAM_BASE + SDRAM_SIZE; addr += (1024*1024)) {
		arm_mmu_map_section(addr, addr,
				MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE |
				MMU_MEMORY_L1_AP_P_RW_U_NA);
	}
}
Пример #7
0
void platform_init_mmu_mappings(void)
{
	struct smem_ram_ptable *ram_ptable;
	uint32_t i;
	uint32_t sections;
	uint32_t table_size = ARRAY_SIZE(mmu_section_table);
	uint32_t last_fixed_addr = SDRAM_BANK0_LAST_FIXED_ADDR;

	ram_ptable = target_smem_ram_ptable_init();

	/* Configure the MMU page entries for SDRAM and IMEM memory read
	   from the smem ram table*/
	for(i = 0; i < ram_ptable->len; i++)
	{
		if((ram_ptable->parts[i].category == IMEM) || (ram_ptable->parts[i].category == SDRAM))
		{
			/* First bank info is added according to the static table - mmu_section_table. */
			if((ram_ptable->parts[i].start <= last_fixed_addr) &&
			   ((ram_ptable->parts[i].start + ram_ptable->parts[i].size) >= last_fixed_addr))
					continue;

			/* Check to ensure that start address is 1MB aligned */
			ASSERT((ram_ptable->parts[i].start & 0xFFFFF) == 0);

			sections = (ram_ptable->parts[i].size) / MB;

			while(sections--)
			{
				arm_mmu_map_section(ram_ptable->parts[i].start + sections * MB,
									ram_ptable->parts[i].start + sections * MB,
									SCRATCH_MEMORY);
			}
		}
    }

	/* Configure the MMU page entries for memory read from the
	   mmu_section_table */
	for (i = 0; i < table_size; i++)
	{
		sections = mmu_section_table[i].num_of_sections;

		while (sections--)
		{
			arm_mmu_map_section(mmu_section_table[i].paddress + sections * MB,
								mmu_section_table[i].vaddress + sections * MB,
								mmu_section_table[i].flags);
		}
	}
}
Пример #8
0
/* Setup memory for this platform */
void platform_init_mmu_mappings(void)
{
    uint32_t i;
    uint32_t sections;
    struct smem_ram_ptable ram_ptable;
    uint32_t vaddress = 0;

    if (smem_ram_ptable_init(&ram_ptable)) {
        for (i = 0; i < ram_ptable.len; i++) {
             if ((ram_ptable.parts[i].attr == READWRITE)
                 && (ram_ptable.parts[i].domain == APPS_DOMAIN)
                 && (ram_ptable.parts[i].start != 0x0)
                 && (!(ram_ptable.parts[i].size < MB))) {
                sections = ram_ptable.parts[i].size >> 20;
                if (vaddress == 0) {
                    vaddress = ROUND_TO_MB(ram_ptable.parts[i].start);
                }

                while (sections--) {
                    arm_mmu_map_section(ROUND_TO_MB(ram_ptable.parts[i].start) + sections*MB,
                    vaddress + sections*MB, ALL_MEMORY);
                }
                vaddress += ROUND_TO_MB(ram_ptable.parts[i].size);
                available_scratch_mem += ROUND_TO_MB(ram_ptable.parts[i].size);
            }
        }
    } else {
Пример #9
0
void platform_init_mmu_mappings(void)
{
    uint32_t sections = 1152;

    /* Map io mapped peripherals as device non-shared memory */
    while (sections--)
    {
        arm_mmu_map_section(0x88000000 + (sections << 20),
                            0x88000000 + (sections << 20),
                            (MMU_MEMORY_TYPE_DEVICE_NON_SHARED |
                             MMU_MEMORY_AP_READ_WRITE));
    }
}
Пример #10
0
/* Setup memory for this platform */
void platform_init_mmu_mappings(void)
{
	uint32_t i;
	uint32_t sections;
	uint32_t table_size = ARRAY_SIZE(mmu_section_table);

	for (i = 0; i < table_size; i++) {
		sections = mmu_section_table[i].num_of_sections;

		while (sections--) {
			arm_mmu_map_section(mmu_section_table[i].paddress +
					    sections * MB,
					    mmu_section_table[i].vaddress +
					    sections * MB,
					    mmu_section_table[i].flags);
		}
	}
}
Пример #11
0
Файл: mmu.c Проект: machinaut/lk
void arm_mmu_init(void)
{
	/* set some mmu specific control bits */
	arm_write_sctlr(arm_read_sctlr() & ~((1<<29)|(1<<28)|(1<<0))); // access flag disabled, TEX remap disabled, mmu disabled

	/* set up an identity-mapped translation table with cache disabled */
	for (addr_t i=0; i < 4096; i++) {
		arm_mmu_map_section(i * MB, i * MB,  MMU_FLAG_READWRITE); // map everything uncached
	}

	/* set up the translation table base */
	arm_write_ttbr((uint32_t)tt);

	/* set up the domain access register */
	arm_write_dacr(0x00000001);

	/* turn on the mmu */
	arm_write_sctlr(arm_read_sctlr() | 0x1);
}
Пример #12
0
void platform_init_mmu_mappings(void)
{
  /* configure available RAM banks */
  dram_init();

/* Enable D-cache  */
#if 1
  unsigned int addr;
  //unsigned int i = 0;
  unsigned int dram_size = 0;

  dram_size = memory_size();

  for (addr = 0; addr < dram_size; addr += (1024*1024))
  {
    /*virtual to physical 1-1 mapping*/
    arm_mmu_map_section(bi_dram[0].start+addr,bi_dram[0].start+addr, MMU_MEMORY_TYPE_NORMAL_WRITE_BACK_ALLOCATE | MMU_MEMORY_AP_READ_WRITE);
  }
#endif
}
Пример #13
0
/* Setup memory for this platform */
void platform_init_mmu_mappings(void)
{
	uint32_t i;
	uint32_t sections;
	uint32_t table_size = ARRAY_SIZE(mmu_section_table);

	/* Configure the MMU page entries for memory read from the
	   mmu_section_table */
	for (i = 0; i < table_size; i++)
	{
		sections = mmu_section_table[i].num_of_sections;

		while (sections--)
		{
			arm_mmu_map_section(mmu_section_table[i].paddress +
								sections * MB,
								mmu_section_table[i].vaddress +
								sections * MB,
								mmu_section_table[i].flags);
		}
	}
}
Пример #14
0
Файл: mmu.c Проект: chenyuwen/lk
int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count, uint flags)
{
    LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags);

    DEBUG_ASSERT(aspace);
    DEBUG_ASSERT(aspace->tt_virt);

    DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr));
    if (!is_valid_vaddr(aspace, vaddr))
        return ERR_OUT_OF_RANGE;

#if !WITH_ARCH_MMU_PICK_SPOT
    if (flags & ARCH_MMU_FLAG_NS) {
        /* WITH_ARCH_MMU_PICK_SPOT is required to support NS memory */
        panic("NS mem is not supported\n");
    }
#endif

    /* paddr and vaddr must be aligned */
    DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
    DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
    if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr))
        return ERR_INVALID_ARGS;

    if (count == 0)
        return NO_ERROR;

    /* see what kind of mapping we can use */
    int mapped = 0;
    while (count > 0) {
        if (IS_SECTION_ALIGNED(vaddr) && IS_SECTION_ALIGNED(paddr) && count >= SECTION_SIZE / PAGE_SIZE) {
            /* we can use a section */

            /* compute the arch flags for L1 sections */
            uint arch_flags = mmu_flags_to_l1_arch_flags(flags) |
                              MMU_MEMORY_L1_DESCRIPTOR_SECTION;

            /* map it */
            arm_mmu_map_section(aspace, paddr, vaddr, arch_flags);
            count -= SECTION_SIZE / PAGE_SIZE;
            mapped += SECTION_SIZE / PAGE_SIZE;
            vaddr += SECTION_SIZE;
            paddr += SECTION_SIZE;
        } else {
            /* will have to use a L2 mapping */
            uint l1_index = vaddr / SECTION_SIZE;
            uint32_t tt_entry = aspace->tt_virt[l1_index];

            LTRACEF("tt_entry 0x%x\n", tt_entry);
            switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) {
                case MMU_MEMORY_L1_DESCRIPTOR_SECTION:
                    // XXX will have to break L1 mapping into a L2 page table
                    PANIC_UNIMPLEMENTED;
                    break;
                case MMU_MEMORY_L1_DESCRIPTOR_INVALID: {
                    paddr_t l2_pa = 0;
                    if (get_l2_table(aspace, l1_index, &l2_pa) != NO_ERROR) {
                        TRACEF("failed to allocate pagetable\n");
                        goto done;
                    }
                    tt_entry = l2_pa | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE;
                    if (flags & ARCH_MMU_FLAG_NS)
                        tt_entry |= MMU_MEMORY_L1_PAGETABLE_NON_SECURE;

                    aspace->tt_virt[l1_index] = tt_entry;
                }
                    /* fallthrough */
                case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: {
                    uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry));
                    LTRACEF("l2_table at %p\n", l2_table);

                    DEBUG_ASSERT(l2_table);

                    // XXX handle 64K pages here

                    /* compute the arch flags for L2 4K pages */
                    uint arch_flags = mmu_flags_to_l2_arch_flags_small_page(flags);

                    uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE;
                    do {
                        l2_table[l2_index++] = paddr | arch_flags;
                        count--;
                        mapped++;
                        vaddr += PAGE_SIZE;
                        paddr += PAGE_SIZE;
                    } while (count && (l2_index != (SECTION_SIZE / PAGE_SIZE)));
                    break;
                }
                default:
                    PANIC_UNIMPLEMENTED;
            }
        }
    }

done:
    DSB;
    return mapped;
}