/* * @brief MPU default configuration * * This function provides the default configuration mechanism for the Memory * Protection Unit (MPU). */ static int arm_mpu_init(struct device *arg) { u32_t r_index; if (mpu_config.num_regions > _get_num_regions()) { /* Attempt to configure more MPU regions than * what is supported by hardware. As this operation * is executed during system (pre-kernel) initialization, * we want to ensure we can detect an attempt to * perform invalid configuration. */ __ASSERT(0, "Request to configure: %u regions (supported: %u)\n", mpu_config.num_regions, _get_num_regions() ); return -1; } SYS_LOG_DBG("total region count: %d", _get_num_regions()); arm_core_mpu_disable(); /* Architecture-specific configuration */ _mpu_init(); /* Configure regions */ for (r_index = 0; r_index < mpu_config.num_regions; r_index++) { _region_init(r_index, &mpu_config.mpu_regions[r_index]); } #if defined(CONFIG_APPLICATION_MEMORY) u32_t index, size; struct arm_mpu_region region_conf; /* configure app data portion */ index = _get_region_index_by_type(THREAD_APP_DATA_REGION); size = (u32_t)&__app_ram_end - (u32_t)&__app_ram_start; _get_region_attr_by_type(®ion_conf.attr, THREAD_APP_DATA_REGION, (u32_t)&__app_ram_start, size); region_conf.base = (u32_t)&__app_ram_start; if (size > 0) { _region_init(index, ®ion_conf); } #endif arm_core_mpu_enable(); /* Sanity check for number of regions in Cortex-M0+, M3, and M4. */ #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \ defined(CONFIG_CPU_CORTEX_M3) || \ defined(CONFIG_CPU_CORTEX_M4) __ASSERT( (MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos == 8, "Invalid number of MPU regions\n"); #endif return 0; }
/** * This internal function disables a given MPU region. */ static inline void _disable_region(u32_t r_index) { /* Attempting to configure MPU_RNR with an invalid * region number has unpredictable behavior. Therefore * we add a check before disabling the requested MPU * region. */ __ASSERT(r_index < _get_num_regions(), "Index 0x%x out-of-bound (supported regions: 0x%x)\n", r_index, _get_num_regions()); SYS_LOG_DBG("disable region 0x%x", r_index); /* Disable region */ ARM_MPU_ClrRegion(r_index); }
/** * This internal function is utilized by the MPU driver to parse the intent * type (i.e. THREAD_STACK_REGION) and return the correct region index. */ static inline u32_t _get_region_index_by_type(u32_t type) { u32_t region_index; __ASSERT(type < THREAD_MPU_REGION_LAST, "unsupported region type"); region_index = mpu_config.num_regions + type; __ASSERT(region_index < _get_num_regions(), "out of MPU regions, requested %u max is %u", region_index, _get_num_regions() - 1); return region_index; }
/** * @brief configure MPU regions for the memory partitions of the memory domain * * @param mem_domain memory domain that thread belongs to */ void arm_core_mpu_configure_mem_domain(struct k_mem_domain *mem_domain) { u32_t region_index = _get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION); u32_t num_partitions; struct k_mem_partition *pparts; struct arm_mpu_region region_conf; if (mem_domain) { SYS_LOG_DBG("configure domain: %p", mem_domain); num_partitions = mem_domain->num_partitions; pparts = mem_domain->partitions; } else { SYS_LOG_DBG("disable domain partition regions"); num_partitions = 0; pparts = NULL; } for (; region_index < _get_num_regions(); region_index++) { if (num_partitions && pparts->size) { SYS_LOG_DBG("set region 0x%x 0x%x 0x%x", region_index, pparts->start, pparts->size); region_conf.base = pparts->start; _get_ram_region_attr_by_conf(®ion_conf.attr, pparts->attr, pparts->start, pparts->size); _region_init(region_index, ®ion_conf); num_partitions--; } else { _disable_region(region_index); } pparts++; } }
/** * @brief get the maximum number of free regions for memory domain partitions */ int arm_core_mpu_get_max_domain_partition_regions(void) { /* * Subtract the start of domain partition regions from total regions * will get the maximum number of free regions for memory domain * partitions. */ return _get_num_regions() - _get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION); }
/* * @brief MPU default configuration * * This function provides the default configuration mechanism for the Memory * Protection Unit (MPU). */ static int nxp_mpu_init(struct device *arg) { ARG_UNUSED(arg); u32_t r_index; if (mpu_config.num_regions > _get_num_regions()) { /* Attempt to configure more MPU regions than * what is supported by hardware. As this operation * may be executed during system (pre-kernel) initialization, * we want to ensure we can detect an attempt to * perform invalid configuration. */ __ASSERT(0, "Request to configure: %u regions (supported: %u)\n", mpu_config.num_regions, _get_num_regions() ); return -1; } LOG_DBG("total region count: %d", _get_num_regions()); arm_core_mpu_disable(); /* Architecture-specific configuration */ _mpu_init(); /* Program fixed regions configured at SOC definition. */ for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) { _region_init(r_index, &mpu_config.mpu_regions[r_index]); } /* Update the number of programmed MPU regions. */ static_regions_num = mpu_config.num_regions; arm_core_mpu_enable(); return 0; }
/** * @brief update configuration of an active memory partition */ void arm_core_mpu_mem_partition_config_update( struct k_mem_partition *partition, k_mem_partition_attr_t *new_attr) { /* Find the partition. ASSERT if not found. */ u8_t i; u8_t reg_index = _get_num_regions(); for (i = static_regions_num; i < _get_num_regions(); i++) { if (!_is_enabled_region(i)) { continue; } u32_t base = _mpu_region_get_base(i); if (base != partition->start) { continue; } u32_t size = _mpu_region_get_size(i); if (size != partition->size) { continue; } /* Region found */ reg_index = i; break; } __ASSERT(reg_index != _get_num_regions(), "Memory domain partition not found\n"); /* Modify the permissions */ partition->attr = *new_attr; _mpu_configure_region(reg_index, partition); }
static int _region_allocate_and_init(const u8_t index, const struct nxp_mpu_region *region_conf) { /* Attempt to allocate new region index. */ if (index > (_get_num_regions() - 1)) { /* No available MPU region index. */ LOG_ERR("Failed to allocate new MPU region %u\n", index); return -EINVAL; } LOG_DBG("Program MPU region at index 0x%x", index); /* Program region */ _region_init(index, region_conf); return index; }
/** * @brief configure the base address and size for an MPU region * * @param type MPU region type * @param base base address in RAM * @param size size of the region */ void arm_core_mpu_configure(u8_t type, u32_t base, u32_t size) { struct arm_mpu_region region_conf; SYS_LOG_DBG("Region info: 0x%x 0x%x", base, size); u32_t region_index = _get_region_index_by_type(type); if (_get_region_attr_by_type(®ion_conf.attr, type, base, size)) { return; } region_conf.base = base; if (region_index >= _get_num_regions()) { return; } _region_init(region_index, ®ion_conf); }
/* This internal function programs the dynamic MPU regions. * * It returns the number of MPU region indices configured. * * Note: * If the dynamic MPU regions configuration has not been successfully * performed, the error signal is propagated to the caller of the function. */ static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition dynamic_regions[], u8_t regions_num) { /* Reset MPU regions inside which dynamic memory regions may * be programmed. * * Re-programming these regions will temporarily leave memory areas * outside all MPU regions. * This might trigger memory faults if ISRs occurring during * re-programming perform access in those areas. */ arm_core_mpu_disable(); _region_init(mpu_config.sram_region, (const struct nxp_mpu_region *) &mpu_config.mpu_regions[mpu_config.sram_region]); arm_core_mpu_enable(); u32_t mpu_reg_index = static_regions_num; /* In NXP MPU architecture the dynamic regions are * programmed on top of existing SRAM region configuration. */ mpu_reg_index = _mpu_configure_regions(dynamic_regions, regions_num, mpu_reg_index, false); if (mpu_reg_index != -EINVAL) { /* Disable the non-programmed MPU regions. */ for (int i = mpu_reg_index; i < _get_num_regions(); i++) { LOG_DBG("disable region 0x%x", i); /* Disable region */ SYSMPU->WORD[i][0] = 0; SYSMPU->WORD[i][1] = 0; SYSMPU->WORD[i][2] = 0; SYSMPU->WORD[i][3] = 0; } } return mpu_reg_index; }
/** * @brief configure MPU region for a single memory partition * * @param part_index memory partition index * @param part memory partition info */ void arm_core_mpu_configure_mem_partition(u32_t part_index, struct k_mem_partition *part) { u32_t region_index = _get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION); struct arm_mpu_region region_conf; SYS_LOG_DBG("configure partition index: %u", part_index); if (part && (region_index + part_index < _get_num_regions())) { SYS_LOG_DBG("set region 0x%x 0x%x 0x%x", region_index + part_index, part->start, part->size); _get_ram_region_attr_by_conf(®ion_conf.attr, part->attr, part->start, part->size); region_conf.base = part->start; _region_init(region_index + part_index, ®ion_conf); } else { _disable_region(region_index + part_index); } }
/** * @brief validate the given buffer is user accessible or not */ int arm_core_mpu_buffer_validate(void *addr, size_t size, int write) { u8_t r_index; /* Iterate through all MPU regions */ for (r_index = 0U; r_index < _get_num_regions(); r_index++) { if (!_is_enabled_region(r_index) || !_is_in_region(r_index, (u32_t)addr, size)) { continue; } /* For NXP MPU, priority is given to granting permission over * denying access for overlapping region. * So we can stop the iteration immediately once we find the * matched region that grants permission. */ if (_is_user_accessible_region(r_index, write)) { return 0; } } return -EPERM; }
/** * @brief get the maximum number of available (free) MPU region indices * for configuring dynamic MPU partitions */ int arm_core_mpu_get_max_available_dyn_regions(void) { return _get_num_regions() - static_regions_num; }