unsigned* target_atag_mem(unsigned* ptr) { struct smem_ram_ptable ram_ptable; unsigned i = 0; if (smem_ram_ptable_init(&ram_ptable)) { for (i = 0; i < ram_ptable.len; i++) { if ((ram_ptable.parts[i].attr == READWRITE) && (ram_ptable.parts[i].domain == APPS_DOMAIN) && (ram_ptable.parts[i].type == APPS_MEMORY)) { /* ATAG_MEM */ *ptr++ = 4; // Tag EBI-1 memory as unstable. if(ram_ptable.parts[i].category == EBI1_CS0) { // if EBI-1 CS-0 is 256Mb then this is a 2x256 target and // the kernel can reserve this mem region as unstable. // This memory region can be activated when the kernel // receives a request from Android init scripts. if(ram_ptable.parts[i].size == SIZE_256M) *ptr++ = 0x5441000A; //Deep-Power-Down Tag. //if EBI-1 CS-0 s 128Mb then this is a 2x128 target. //Android + Kernel + PMEM regions account for more than //128Mb and the target will not be able to boot with just //one memory bank active and the second memory bank is reserved. //In the case of 2x128 the tag is set to SelfRefresh Only. else if(ram_ptable.parts[i].size == SIZE_128M) *ptr++ = 0x5441000B; //Self-Refresh Tag. } else *ptr++ = 0x54410002; *ptr++ = ram_ptable.parts[i].size; *ptr++ = ram_ptable.parts[i].start; } /* Check for modem bootloader memory that can be reclaimed */ if ((ram_ptable.parts[i].attr == READWRITE) && (ram_ptable.parts[i].domain == APPS_DOMAIN) && (ram_ptable.parts[i].type == BOOT_REGION_MEMORY1)) { /* ATAG_MEM_OSBL */ *ptr++ = 4; *ptr++ = 0x5441000C; *ptr++ = ram_ptable.parts[i].size; *ptr++ = ram_ptable.parts[i].start; } } } else { dprintf(CRITICAL, "ERROR: Unable to read RAM partition\n"); ASSERT(0); } return ptr; }
/* Funtion to add the ram partition entries into device tree. * The function assumes that all the entire fixed memory regions should * be listed in the first bank of the passed in ddr regions. */ uint32_t target_dev_tree_mem(void *fdt, uint32_t memory_node_offset) { struct smem_ram_ptable ram_ptable; uint32_t i; int ret = 0; /* Make sure RAM partition table is initialized */ ASSERT(smem_ram_ptable_init(&ram_ptable)); /* Calculating the size of the mem_info_ptr */ for (i = 0 ; i < ram_ptable.len; i++) { if((ram_ptable.parts[i].category == SDRAM) && (ram_ptable.parts[i].type == SYS_MEMORY)) { /* Pass along all other usable memory regions to Linux */ ret = dev_tree_add_mem_info(fdt, memory_node_offset, ram_ptable.parts[i].start, ram_ptable.parts[i].size); if (ret) { dprintf(CRITICAL, "Failed to add secondary banks memory addresses\n"); goto target_dev_tree_mem_err; } } } target_dev_tree_mem_err: return ret; }
void *target_get_scratch_address(void) { struct smem_ram_ptable ram_ptable; unsigned i = 0; if (smem_ram_ptable_init(&ram_ptable)) { for (i = 0; i < ram_ptable.len; i++) { if ((ram_ptable.parts[i].attr == READWRITE) && (ram_ptable.parts[i].domain == APPS_DOMAIN) && (ram_ptable.parts[i].start != 0x0)) { if (ram_ptable.parts[i].size >= FASTBOOT_BUF_SIZE) { scratch_addr = ram_ptable.parts[i].start; break; } } } } else { dprintf(CRITICAL, "ERROR: Unable to read RAM partition\n"); ASSERT(0); } return (void *)((scratch_addr == -1) ? EBI1_ADDR_128M : scratch_addr); }
unsigned* target_atag_mem(unsigned* ptr) { struct smem_ram_ptable ram_ptable; unsigned i = 0; if (smem_ram_ptable_init(&ram_ptable)) { for (i = 0; i < ram_ptable.len; i++) { if ((ram_ptable.parts[i].attr == READWRITE) && (ram_ptable.parts[i].domain == APPS_DOMAIN) && (ram_ptable.parts[i].start != 0x0) && (!(ROUND_TO_MB(ram_ptable.parts[i].size) <= SIZE_1M))) { /* ATAG_MEM */ *ptr++ = 4; *ptr++ = 0x54410002; /* RAM parition are reported correctly by NON-HLOS Use the size passed directly */ if (target_is_emmc_boot()) *ptr++ = ROUND_TO_MB(ram_ptable.parts[i].size); else *ptr++ = ram_ptable.parts[i].size; *ptr++ = ram_ptable.parts[i].start; } } } else { dprintf(CRITICAL, "ERROR: Unable to read RAM partition\n"); ASSERT(0); } return ptr; }
/* Setup memory for this platform */ void platform_init_mmu_mappings(void) { uint32_t i; uint32_t sections; struct smem_ram_ptable ram_ptable; uint32_t vaddress = 0; if (smem_ram_ptable_init(&ram_ptable)) { for (i = 0; i < ram_ptable.len; i++) { if ((ram_ptable.parts[i].attr == READWRITE) && (ram_ptable.parts[i].domain == APPS_DOMAIN) && (ram_ptable.parts[i].start != 0x0) && (!(ram_ptable.parts[i].size < MB))) { sections = ram_ptable.parts[i].size >> 20; if (vaddress == 0) { vaddress = ROUND_TO_MB(ram_ptable.parts[i].start); } while (sections--) { arm_mmu_map_section(ROUND_TO_MB(ram_ptable.parts[i].start) + sections*MB, vaddress + sections*MB, ALL_MEMORY); } vaddress += ROUND_TO_MB(ram_ptable.parts[i].size); available_scratch_mem += ROUND_TO_MB(ram_ptable.parts[i].size); } } } else {
struct smem_ram_ptable* target_smem_ram_ptable_init() { /* Make sure RAM partition table is initialized */ ASSERT(smem_ram_ptable_init(&ram_ptable)); return &ram_ptable; }
unsigned* target_atag_mem(unsigned* ptr) { struct smem_ram_ptable ram_ptable; uint8_t i = 0; if (smem_ram_ptable_init(&ram_ptable)) { for (i = 0; i < ram_ptable.len; i++) { if (ram_ptable.parts[i].category == SDRAM && ram_ptable.parts[i].type == SYS_MEMORY && ram_ptable.parts[i].start == 0x40000000) { ASSERT(ram_ptable.parts[i].size >= SIZE_10M); *ptr++ = 4; *ptr++ = 0x54410002; *ptr++ = SIZE_10M; *ptr++ = ram_ptable.parts[i].start + SIZE_8M; *ptr++ = 4; *ptr++ = 0x54410002; *ptr++ = SIZE_21M; *ptr++ = ram_ptable.parts[i].start + SIZE_83M; } } } else { dprintf(CRITICAL, "ERROR: Unable to read RAM partition\n"); ASSERT(0); } return ptr; }
/* Setup memory for this platform */ void platform_init_mmu_mappings(void) { uint32_t i; uint32_t sections; uint32_t table_size = ARRAY_SIZE(mmu_section_table); ASSERT(smem_ram_ptable_init(&ram_ptable)); /* Configure the MMU page entries for SDRAM and IMEM memory read from the smem ram table*/ for(i = 0; i < ram_ptable.len; i++) { if(ram_ptable.parts[i].type == SYS_MEMORY) { if((ram_ptable.parts[i].category == SDRAM) || (ram_ptable.parts[i].category == IMEM)) { /* Check to ensure that start address is 1MB aligned */ ASSERT((ram_ptable.parts[i].start & 0xFFFFF) == 0); sections = (ram_ptable.parts[i].size) / MB; while(sections--) { arm_mmu_map_section(ram_ptable.parts[i].start + sections * MB, ram_ptable.parts[i].start + sections * MB, (MMU_MEMORY_TYPE_NORMAL_WRITE_THROUGH | \ MMU_MEMORY_AP_READ_WRITE | MMU_MEMORY_XN)); } } } } /* Configure the MMU page entries for memory read from the mmu_section_table */ for (i = 0; i < table_size; i++) { sections = mmu_section_table[i].num_of_sections; while (sections--) { arm_mmu_map_section(mmu_section_table[i].paddress + sections * MB, mmu_section_table[i].vaddress + sections * MB, mmu_section_table[i].flags); } } }
unsigned *target_atag_mem(unsigned *ptr) { struct smem_ram_ptable ram_ptable; uint8_t i = 0; if (smem_ram_ptable_init(&ram_ptable)) { for (i = 0; i < ram_ptable.len; i++) { /* Use only 140M from memory bank starting at 0x80000000 */ if (ram_ptable.parts[i].category == SDRAM && ram_ptable.parts[i].type == SYS_MEMORY && ram_ptable.parts[i].start == 0x80000000) { ASSERT(ram_ptable.parts[i].size >= SIZE_256M); *ptr++ = 4; *ptr++ = 0x54410002; *ptr++ = SIZE_140M; *ptr++ = ram_ptable.parts[i].start + SIZE_2M; if (ram_ptable.parts[i].size > SIZE_256M) { *ptr++ = 4; *ptr++ = 0x54410002; *ptr++ = ram_ptable.parts[i].size - SIZE_256M; *ptr++ = ram_ptable.parts[i].start + SIZE_256M; } } /* Pass along all other usable memory regions to Linux */ if (ram_ptable.parts[i].category == SDRAM && ram_ptable.parts[i].type == SYS_MEMORY && ram_ptable.parts[i].start != 0x80000000) { *ptr++ = 4; *ptr++ = 0x54410002; *ptr++ = ram_ptable.parts[i].size; *ptr++ = ram_ptable.parts[i].start; } } } else { dprintf(CRITICAL, "ERROR: Unable to read RAM partition\n"); ASSERT(0); } return ptr; }
unsigned *target_atag_mem(unsigned *ptr) { struct smem_ram_ptable ram_ptable; uint8_t i = 0; /* Make sure RAM partition table is initialized */ ASSERT(smem_ram_ptable_init(&ram_ptable)); for (i = 0; i < ram_ptable.len; i++) { if (ram_ptable.parts[i].category == SDRAM && (ram_ptable.parts[i].type == SYS_MEMORY) && ((ram_ptable.parts[i].start == SDRAM_START_ADDR)|| (ram_ptable.parts[i].start == SDRAM_SEC_BANK_START_ADDR))) { ASSERT(ram_ptable.parts[i].size >= SIZE_256M); if (ram_ptable.parts[i].start == SDRAM_START_ADDR) ptr = target_create_atag(ptr, copper_default_first_256M); else ptr = target_create_atag(ptr, copper_default_second_256M); if (ram_ptable.parts[i].size > SIZE_256M) { ptr = target_mem_atag_create(ptr, (ram_ptable.parts[i].size - SIZE_256M), (ram_ptable.parts[i].start + SIZE_256M)); } } /* Pass along all other usable memory regions to Linux */ if (ram_ptable.parts[i].category == SDRAM && (ram_ptable.parts[i].type == SYS_MEMORY) && (ram_ptable.parts[i].start != SDRAM_START_ADDR)) { ptr = target_mem_atag_create(ptr, ram_ptable.parts[i].size, ram_ptable.parts[i].start); } } return ptr; }
unsigned *target_atag_mem(unsigned *ptr) { struct smem_ram_ptable ram_ptable; uint8_t i = 0; if (smem_ram_ptable_init(&ram_ptable)) { for (i = 0; i < ram_ptable.len; i++) { if (ram_ptable.parts[i].category == SDRAM && (ram_ptable.parts[i].type == SYS_MEMORY) && (ram_ptable.parts[i].start == PHYS_MEM_START_ADDR)) { ASSERT(ram_ptable.parts[i].size >= SIZE_256M); ptr = target_first_256M_atag(ptr); if (ram_ptable.parts[i].size > SIZE_256M) { ptr = target_mem_atag_create(ptr, (ram_ptable.parts[i].size - SIZE_256M), (ram_ptable.parts[i].start + SIZE_256M)); } } /* Pass along all other usable memory regions to Linux */ if (ram_ptable.parts[i].category == SDRAM && (ram_ptable.parts[i].type == SYS_MEMORY) && (ram_ptable.parts[i].start != PHYS_MEM_START_ADDR)) { ptr = target_mem_atag_create(ptr, ram_ptable.parts[i].size, ram_ptable.parts[i].start); } } } else { dprintf(CRITICAL, "ERROR: Unable to read RAM partition\n"); ASSERT(0); } return ptr; }
int dram_init(void) { struct smem_ram_ptable rtable; int i; int mx = ARRAY_SIZE(rtable.parts); if (smem_ram_ptable_init(&rtable) > 0) { gd->ram_size = 0; for (i = 0; i < mx; i++) { if (rtable.parts[i].category == RAM_PARTITION_SDRAM && rtable.parts[i].type == RAM_PARTITION_SYS_MEMORY) { gd->ram_size += rtable.parts[i].size; } } gboard_param->ddr_size = gd->ram_size; } else { gd->ram_size = gboard_param->ddr_size; } return 0; }