unsigned smem_read_alloc_entry_offset(smem_mem_type_t type, void *buf, int len, int offset) { struct smem_alloc_info *ainfo; unsigned *dest = buf; unsigned src; unsigned size = len; uint32_t smem_addr = 0; #if DYNAMIC_SMEM smem_addr = smem_get_base_addr(); #else smem_addr = platform_get_smem_base_addr(); #endif smem = (struct smem *)smem_addr; if (((len & 0x3) != 0) || (((unsigned)buf & 0x3) != 0)) return 1; if (type < SMEM_FIRST_VALID_TYPE || type > SMEM_LAST_VALID_TYPE) return 1; ainfo = &smem->alloc_info[type]; if (readl(&ainfo->allocated) == 0) return 1; src = smem_addr + readl(&ainfo->offset) + offset; for (; size > 0; src += 4, size -= 4) *(dest++) = readl(src); return 0; }
/* Setup MMU mapping for this platform */ void platform_init_mmu_mappings(void) { uint32_t i; uint32_t sections; uint32_t table_size = ARRAY_SIZE(mmu_section_table); uint32_t ddr_start = get_ddr_start(); uint32_t smem_addr = platform_get_smem_base_addr(); /*Mapping the ddr start address for loading the kernel about 90 MB*/ sections = 90; while(sections--) { arm_mmu_map_section(ddr_start + sections * MB, ddr_start + sections* MB, SCRATCH_MEMORY); } /* Mapping the SMEM addr */ arm_mmu_map_section(smem_addr, smem_addr, COMMON_MEMORY); /* Configure the MMU page entries for memory read from the mmu_section_table */ for (i = 0; i < table_size; i++) { sections = mmu_section_table[i].num_of_sections; while (sections--) { arm_mmu_map_section(mmu_section_table[i].paddress + sections * MB, mmu_section_table[i].vaddress + sections * MB, mmu_section_table[i].flags); } } }
/* buf MUST be 4byte aligned, and len MUST be a multiple of 8. */ unsigned smem_read_alloc_entry(smem_mem_type_t type, void *buf, int len) { struct smem_alloc_info *ainfo; unsigned *dest = buf; unsigned src; unsigned size; uint32_t smem_addr = 0; #if DYNAMIC_SMEM smem_addr = smem_get_base_addr(); #else smem_addr = platform_get_smem_base_addr(); #endif smem = (struct smem *)smem_addr; if (((len & 0x3) != 0) || (((unsigned)buf & 0x3) != 0)) return 1; if (type < SMEM_FIRST_VALID_TYPE || type > SMEM_LAST_VALID_TYPE) return 1; /* TODO: Use smem spinlocks */ ainfo = &smem->alloc_info[type]; if (readl(&ainfo->allocated) == 0) return 1; size = readl(&ainfo->size); if (size < (unsigned)((len + 7) & ~0x00000007)) return 1; src = smem_addr + readl(&ainfo->offset); for (; len > 0; src += 4, len -= 4) *(dest++) = readl(src); return 0; }
/* Return a pointer to smem_item with size */ void* smem_get_alloc_entry(smem_mem_type_t type, uint32_t* size) { struct smem_alloc_info *ainfo = NULL; uint32_t smem_addr = 0; uint32_t base_ext = 0; uint32_t offset = 0; void *ret = NULL; #if DYNAMIC_SMEM smem_addr = smem_get_base_addr(); #else smem_addr = platform_get_smem_base_addr(); #endif smem = (struct smem *)smem_addr; if (type < SMEM_FIRST_VALID_TYPE || type > SMEM_LAST_VALID_TYPE) return ret; ainfo = &smem->alloc_info[type]; if (readl(&ainfo->allocated) == 0) return ret; *size = readl(&ainfo->size); base_ext = readl(&ainfo->base_ext); offset = readl(&ainfo->offset); if(base_ext) { ret = (void*)base_ext + offset; } else { ret = (void*) smem_addr + offset; } return ret; }