Ejemplo n.º 1
0
void vm_enable_paging(void)
{
        u32_t sctlr;

	write_ttbcr(0);

	/* Set all Domains to Client */
	write_dacr(0x55555555);

	sctlr = read_sctlr();

	/* Enable MMU */
	sctlr |= SCTLR_M;

	/* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/
	sctlr &= ~SCTLR_TRE;

	/* AFE set to zero (default reset value): not using simplified model. */
	sctlr &= ~SCTLR_AFE;

	/* Enable instruction and data cache */
	sctlr |= SCTLR_C;
	sctlr |= SCTLR_I;
	write_sctlr(sctlr);
}
Ejemplo n.º 2
0
int __init arch_cpu_irq_setup(void)
{
	static const struct cpu_page zero_filled_cpu_page = { 0 };

	int rc;
	extern u32 _start_vect[];
	u32 *vectors, *vectors_data;
	u32 vec;
	struct cpu_page vec_page;

#if defined(CONFIG_ARM32_HIGHVEC)
	/* Enable high vectors in SCTLR */
	write_sctlr(read_sctlr() | SCTLR_V_MASK);
	vectors = (u32 *) CPU_IRQ_HIGHVEC_BASE;
#else
#if defined(CONFIG_ARMV7A_SECUREX)
	write_vbar(CPU_IRQ_LOWVEC_BASE);
#endif
	vectors = (u32 *) CPU_IRQ_LOWVEC_BASE;
#endif
	vectors_data = vectors + CPU_IRQ_NR;

	/* If vectors are at correct location then do nothing */
	if ((u32) _start_vect == (u32) vectors) {
		return VMM_OK;
	}

	/* If vectors are not mapped in virtual memory then map them. */
	vec_page = zero_filled_cpu_page;
	rc = cpu_mmu_get_reserved_page((virtual_addr_t)vectors, &vec_page);
	if (rc) {
		rc = vmm_host_ram_alloc(&vec_page.pa, 
					TTBL_L2TBL_SMALL_PAGE_SIZE, 
					TRUE);
		if (rc) {
			return rc;
		}

		vec_page.va = (virtual_addr_t)vectors;
		vec_page.sz = TTBL_L2TBL_SMALL_PAGE_SIZE;
		vec_page.dom = TTBL_L1TBL_TTE_DOM_RESERVED;
		vec_page.ap = TTBL_AP_SRW_U;

		if ((rc = cpu_mmu_map_reserved_page(&vec_page))) {
			return rc;
		}
	}

	/*
	 * Loop through the vectors we're taking over, and copy the
	 * vector's insn and data word.
	 */
	for (vec = 0; vec < CPU_IRQ_NR; vec++) {
		vectors[vec] = _start_vect[vec];
		vectors_data[vec] = _start_vect[vec + CPU_IRQ_NR];
	}

	return VMM_OK;
}
Ejemplo n.º 3
0
void bootblock_soc_init(void)
{
	uint32_t sctlr;

	/* enable dcache */
	sctlr = read_sctlr();
	sctlr |= SCTLR_C;
	write_sctlr(sctlr);
}
Ejemplo n.º 4
0
bool arm_mmu_is_enabled(void)
{
	u32 sctlr = read_sctlr();

	if (sctlr & SCTLR_M_MASK) {
		return TRUE;
	}

	return FALSE;
}
Ejemplo n.º 5
0
bool cpu_mmu_enabled(void)
{
	uint32_t sctlr;

#ifdef ARM32
	sctlr =  read_sctlr();
#else
	sctlr =  read_sctlr_el1();
#endif

	return sctlr & SCTLR_M ? true : false;
}
Ejemplo n.º 6
0
void main(void)
{
    const char *stage_name = "fallback/romstage";
    void *entry;
    uint32_t sctlr;

    /* Globally disable MMU, caches, and branch prediction (these should
     * be disabled by default on reset) */
    sctlr = read_sctlr();
    sctlr &= ~(SCTLR_M | SCTLR_C | SCTLR_Z | SCTLR_I);
    write_sctlr(sctlr);

    armv7_invalidate_caches();

    /*
     * Re-enable caches and branch prediction. MMU will be set up later.
     * Note: If booting from USB, we need to disable branch prediction
     * before copying from USB into RAM (FIXME: why?)
     */
    sctlr = read_sctlr();
    sctlr |= SCTLR_C | SCTLR_Z | SCTLR_I;
    write_sctlr(sctlr);

    if (boot_cpu()) {
        bootblock_cpu_init();
        bootblock_mainboard_init();
    }

    console_init();
    printk(BIOS_INFO, "hello from bootblock\n");
    printk(BIOS_INFO, "bootblock main(): loading romstage\n");
    entry = cbfs_load_stage(CBFS_DEFAULT_MEDIA, stage_name);

    printk(BIOS_INFO, "bootblock main(): jumping to romstage\n");
    if (entry) stage_exit(entry);
    hlt();
}
Ejemplo n.º 7
0
void arm_mmu_cleanup(void)
{
	u32 sctlr = read_sctlr();

	/* If MMU already disabled then return */
	if (!(sctlr & SCTLR_M_MASK)) {
		return;
	}

	/* Disable MMU */
	sctlr &= ~SCTLR_M_MASK;
	write_sctlr(sctlr);

	return;
}
Ejemplo n.º 8
0
void exception_init(void)
{
	uint32_t sctlr = read_sctlr();
	/* Handle exceptions in ARM mode. */
	sctlr &= ~SCTLR_TE;
	/* Set V=0 in SCTLR so VBAR points to the exception vector table. */
	sctlr &= ~SCTLR_V;
	write_sctlr(sctlr);

	extern uint32_t exception_table[];
	set_vbar((uintptr_t)exception_table);

	exception_stack_end = exception_stack + ARRAY_SIZE(exception_stack);
	exception_state_ptr = &exception_state;
}
Ejemplo n.º 9
0
void arm_mmu_page_test(u32 * total, u32 * pass, u32 * fail)
{
	int setup_required = 0;
	u32 ite, pos, free_page[TEST_PAGE_COUNT];
	u32 sctlr = read_sctlr();

	if (!(sctlr & SCTLR_M_MASK)) {
		setup_required = 1;
	}

	if (setup_required) {
		arm_mmu_setup();
	}

	/* Initialize statistics */
	*total = 0x0;
	*pass = 0x0;
	*fail = 0x0;

	/* Prepare list of free sections */
	pos = 0;
	for (ite = 0; ite < (TTBL_L2TBL_SIZE / 4); ite++) {
		if ((l2[ite] & TTBL_L2TBL_TTE_TYPE_MASK) == 
						TTBL_L2TBL_TTE_TYPE_FAULT) {
			free_page[pos] = ite;
			pos++;
		}
		if (pos == TEST_PAGE_COUNT) {
			break;
		}
	}

	/* Run a fixed set of test for all free sections */
	for (ite = 0; ite < TEST_PAGE_COUNT; ite++) {
		arm_mmu_page_test_iter(free_page[ite], 
					free_page[(ite + 1) % TEST_PAGE_COUNT],
					total, pass, fail);
	}

	if (setup_required) {
		arm_mmu_cleanup();
	}

	return;
}
Ejemplo n.º 10
0
void vm_enable_paging(void)
{
        u32_t sctlr;

	write_ttbcr(0);

	/* Set all Domains to Client */
	write_dacr(0x55555555);

	sctlr = read_sctlr();

	/* Enable MMU */
	sctlr |= (SCTLR_M);

	/* Enable instruction and data cache */
	sctlr |= SCTLR_C;
	sctlr |= SCTLR_I;
	write_sctlr(sctlr);
}
Ejemplo n.º 11
0
/**
 * This function is called when the OS makes a firmware call with the 
 * function code APPF_POWER_DOWN_CPU
 */
static int power_down_cpu(unsigned cstate, unsigned rstate, unsigned flags)
{
    struct appf_cpu *cpu;
    struct appf_cluster *cluster;
    int cpu_index, cluster_index;
    int i, rc, cluster_can_enter_cstate1;
    struct appf_main_table* pmaintable = (struct appf_main_table*)reloc_addr((unsigned)&main_table);
#ifdef USE_REALVIEW_EB_RESETS
    int system_reset = FALSE, last_cpu = FALSE;
#endif
    cpu_index = appf_platform_get_cpu_index();
    cluster_index = appf_platform_get_cluster_index();
	 
    cluster = pmaintable->cluster_table;
    cluster += cluster_index;
	 	
    dbg_print("cluster:",cluster);
    
    cpu = cluster->cpu_table;
    cpu += cpu_index;   
   
    dbg_print("cpu:",cpu_index);
    dbg_print("cluster_index:",cluster_index);

    /* Validate arguments */
    if (cstate > 3)
    {
        return APPF_BAD_CSTATE;
    }
    if (rstate > 3)
    {
        return APPF_BAD_RSTATE;
    }
    /* If we're just entering standby mode, we don't mark the CPU as inactive */
    if (cstate == 1)
    {
        get_spinlock(cpu_index, cluster->context->lock);
        cpu->power_state = 1;
        
        /* See if we can make the cluster standby too */
        if (rstate == 1)
        {
            cluster_can_enter_cstate1 = TRUE;
            for(i=0; i<cluster->num_cpus; ++i)
            {
                if (cluster->cpu_table[i].power_state != 1)
                {
                    cluster_can_enter_cstate1 = FALSE;
                    break;
                }
            }
            if (cluster_can_enter_cstate1)
            {
                cluster->power_state = 1;
            }
        }
                
        rc = appf_platform_enter_cstate1(cpu_index, cpu, cluster);

        if (rc == 0)
        {
            release_spinlock(cpu_index, cluster->context->lock);
            dsb();
            wfi();
            get_spinlock(cpu_index, cluster->context->lock);
            rc = appf_platform_leave_cstate1(cpu_index, cpu, cluster);
        }
        
        cpu->power_state = 0;
        cluster->power_state = 0;
        release_spinlock(cpu_index, cluster->context->lock);
        return rc;
    }

    /* Ok, we're not just entering standby, so we are going to lose the context on this CPU */
		dbg_prints("step1\n");
	  get_spinlock(cpu_index, cluster->context->lock);
    --cluster->active_cpus;
		dbg_prints("step2\n");
		
    cpu->power_state = cstate;
    if (cluster->active_cpus == 0)
    {
        cluster->power_state = rstate;
#ifdef USE_REALVIEW_EB_RESETS
        /* last CPU down must not issue WFI, or we get stuck! */
        last_cpu = TRUE;
        if (rstate > 1)
        {
            system_reset = TRUE;
        }
#endif
    }
  
    /* add flags as required by hardware (e.g. APPF_SAVE_L2 if L2 is on) */
    flags |= cpu->context->flags;
    appf_platform_save_context(cluster, cpu, flags);
		
	dbg_prints("step3\n");
			

    /* Call the platform-specific shutdown code */
    rc = appf_platform_enter_cstate(cpu_index, cpu, cluster);
   
     /* Did the power down succeed? */
    if (rc == APPF_OK)
    {

        release_spinlock(cpu_index, cluster->context->lock);

        while (1) 
        {
#if 0
#if defined(NO_PCU) || defined(USE_REALVIEW_EB_RESETS)
            extern void platform_reset_handler(unsigned, unsigned, unsigned, unsigned);
            void (*reset)(unsigned, unsigned, unsigned, unsigned) = platform_reset_handler;

#ifdef USE_REALVIEW_EB_RESETS
            /* Unlock system registers */
            *(volatile unsigned *)0x10000020 = 0xa05f;
            if (system_reset)
            {
                /* Tell the Realview EB to do a system reset */
                *(volatile unsigned *)0x10000040 = 6;
                /* goto reset vector! */
            }
            else
            {
                if (!last_cpu)
                {
                    /* Tell the Realview EB to put this CPU into reset */
                    *(volatile unsigned *)0x10000074 &= ~(1 << (6 + cpu_index));
                    /* goto reset vector! (when another CPU takes us out of reset) */
                }
            }
#endif
            /*
             * If we get here, either we are the last CPU, or the EB resets 
             * aren't present (e.g. Emulator). So, fake a reset: Turn off MMU, 
             * corrupt registers, wait for a while, jump to warm reset entry point
             */
            write_sctlr(read_sctlr() & ~0x10001807); /* clear TRE, I Z C M */
            dsb();
            for (i=0; i<10000; ++i)
            {
                __nop();
            }
            reset(0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef);
#endif
#endif

            dsb();    
            wfi(); /* This signals the power controller to cut the power */
            /* Next stop, reset vector! */
        }
    }
    else
    {
        /* Power down failed for some reason, return to the OS */
        appf_platform_restore_context(cluster, cpu);
        cpu->power_state = 0;
        cluster->power_state = 0;
        ++cluster->active_cpus;
        release_spinlock(cpu_index, cluster->context->lock);
    }
    return rc;
}
static int stm32mp1_ddr_setup(void)
{
	struct ddr_info *priv = &ddr_priv_data;
	int ret;
	struct stm32mp1_ddr_config config;
	int node, len;
	uint32_t uret, idx;
	void *fdt;

#define PARAM(x, y)							\
	{								\
		.name = x,						\
		.offset = offsetof(struct stm32mp1_ddr_config, y),	\
		.size = sizeof(config.y) / sizeof(uint32_t)		\
	}

#define CTL_PARAM(x) PARAM("st,ctl-"#x, c_##x)
#define PHY_PARAM(x) PARAM("st,phy-"#x, p_##x)

	const struct {
		const char *name; /* Name in DT */
		const uint32_t offset; /* Offset in config struct */
		const uint32_t size;   /* Size of parameters */
	} param[] = {
		CTL_PARAM(reg),
		CTL_PARAM(timing),
		CTL_PARAM(map),
		CTL_PARAM(perf),
		PHY_PARAM(reg),
		PHY_PARAM(timing),
		PHY_PARAM(cal)
	};

	if (fdt_get_address(&fdt) == 0) {
		return -ENOENT;
	}

	node = fdt_node_offset_by_compatible(fdt, -1, DT_DDR_COMPAT);
	if (node < 0) {
		ERROR("%s: Cannot read DDR node in DT\n", __func__);
		return -EINVAL;
	}

	config.info.speed = fdt_read_uint32_default(node, "st,mem-speed", 0);
	if (!config.info.speed) {
		VERBOSE("%s: no st,mem-speed\n", __func__);
		return -EINVAL;
	}
	config.info.size = fdt_read_uint32_default(node, "st,mem-size", 0);
	if (!config.info.size) {
		VERBOSE("%s: no st,mem-size\n", __func__);
		return -EINVAL;
	}
	config.info.name = fdt_getprop(fdt, node, "st,mem-name", &len);
	if (config.info.name == NULL) {
		VERBOSE("%s: no st,mem-name\n", __func__);
		return -EINVAL;
	}
	INFO("RAM: %s\n", config.info.name);

	for (idx = 0; idx < ARRAY_SIZE(param); idx++) {
		ret = fdt_read_uint32_array(node, param[idx].name,
					    (void *)((uintptr_t)&config +
						     param[idx].offset),
					    param[idx].size);

		VERBOSE("%s: %s[0x%x] = %d\n", __func__,
			param[idx].name, param[idx].size, ret);
		if (ret != 0) {
			ERROR("%s: Cannot read %s\n",
			      __func__, param[idx].name);
			return -EINVAL;
		}
	}

	/* Disable axidcg clock gating during init */
	mmio_clrbits_32(priv->rcc + RCC_DDRITFCR, RCC_DDRITFCR_AXIDCGEN);

	stm32mp1_ddr_init(priv, &config);

	/* Enable axidcg clock gating */
	mmio_setbits_32(priv->rcc + RCC_DDRITFCR, RCC_DDRITFCR_AXIDCGEN);

	priv->info.size = config.info.size;

	VERBOSE("%s : ram size(%x, %x)\n", __func__,
		(uint32_t)priv->info.base, (uint32_t)priv->info.size);

	write_sctlr(read_sctlr() & ~SCTLR_C_BIT);
	dcsw_op_all(DC_OP_CISW);

	uret = ddr_test_data_bus();
	if (uret != 0U) {
		ERROR("DDR data bus test: can't access memory @ 0x%x\n",
		      uret);
		panic();
	}

	uret = ddr_test_addr_bus();
	if (uret != 0U) {
		ERROR("DDR addr bus test: can't access memory @ 0x%x\n",
		      uret);
		panic();
	}

	uret = ddr_check_size();
	if (uret < config.info.size) {
		ERROR("DDR size: 0x%x does not match DT config: 0x%x\n",
		      uret, config.info.size);
		panic();
	}

	write_sctlr(read_sctlr() | SCTLR_C_BIT);

	return 0;
}
Ejemplo n.º 13
0
void arm_mmu_setup(void)
{
	u32 s, sec, sec_tmpl = 0x0, sec_start = 0x0, sec_end = 0x0;
	u32 sctlr = read_sctlr();

	/* If MMU already enabled then return */
	if (sctlr & SCTLR_M_MASK) {
		return;
	}

	/* Reset memory for L2 */
	for (sec = 0; sec < (TTBL_L2TBL_SIZE / 4); sec++) {
		l2[sec] = 0x0;
	}

	/* Reset memory for L1 */
	for (sec = 0; sec < (TTBL_L1TBL_SIZE / 4); sec++) {
		l1[sec] = 0x0;
	}

	/* Section entry template for code */
	sec_tmpl = 0x0;
	sec_tmpl |= (TTBL_L1TBL_TTE_DOM_CHECKAP << TTBL_L1TBL_TTE_DOM_SHIFT);
	sec_tmpl |= (TTBL_AP_SRW_URW << TTBL_L1TBL_TTE_AP_SHIFT);
	sec_tmpl |= TTBL_L1TBL_TTE_C_MASK;
	sec_tmpl |= TTBL_L1TBL_TTE_TYPE_SECTION;

	/* Create section entries for code */
	sec_start = ((u32)&_code_start) & ~(TTBL_L1TBL_SECTION_PAGE_SIZE - 1);
	sec_end = ((u32)&_code_end) & ~(TTBL_L1TBL_SECTION_PAGE_SIZE - 1);
	for (sec = sec_start; 
	     sec <= sec_end; 
	     sec += TTBL_L1TBL_SECTION_PAGE_SIZE) {
		l1[sec / TTBL_L1TBL_SECTION_PAGE_SIZE] = sec_tmpl | sec;
	}
	sec_end += TTBL_L1TBL_SECTION_PAGE_SIZE;

	/* Creation section entries for exception vectors */
	if (sec_start > 0x0) {
		l1[0] = sec_tmpl | 0x0;
	}

	/* Map an additional section after code */
	sec = sec_end;
	l1[sec / TTBL_L1TBL_SECTION_PAGE_SIZE] = sec_tmpl | sec;
	sec_end += TTBL_L1TBL_SECTION_PAGE_SIZE;

	/* Section entry template for I/O */
	sec_tmpl &= ~TTBL_L1TBL_TTE_C_MASK;
	sec_tmpl |= TTBL_L1TBL_TTE_XN_MASK;

	/* Create section entries for IO */
	for (s = 0; s < arm_board_iosection_count(); s++) {
		sec = arm_board_iosection_addr(s) & 
				~(TTBL_L1TBL_SECTION_PAGE_SIZE - 1);
		l1[sec / TTBL_L1TBL_SECTION_PAGE_SIZE] = sec_tmpl | sec;
	}

	/* Map an l2 table after (code + additional section) */
	sec_tmpl = 0x0;
	sec_tmpl |= TTBL_L1TBL_TTE_TYPE_L2TBL;
	l2_mapva = sec_end;
	l1[l2_mapva / TTBL_L1TBL_SECTION_PAGE_SIZE] = sec_tmpl | (u32)(&l2);

	/* Setup test area in physical RAM */
	test_area_pa = sec_end;
	test_area_size = TTBL_L1TBL_SECTION_PAGE_SIZE;

	/* Write DACR */
	sec = 0x0;
	sec |= (TTBL_DOM_CLIENT << (2 * TTBL_L1TBL_TTE_DOM_CHECKAP));
	sec |= (TTBL_DOM_MANAGER << (2 * TTBL_L1TBL_TTE_DOM_BYPASSAP));
	sec |= (TTBL_DOM_NOACCESS << (2 * TTBL_L1TBL_TTE_DOM_NOACCESS));
	write_dacr(sec);

	/* Write TTBR0 */
	write_ttbr0((u32)&l1);

	/* Enable MMU */
	sctlr |= SCTLR_M_MASK;
	write_sctlr(sctlr);

	return;
}
Ejemplo n.º 14
0
/*******************************************************************************
 * Function to perform late architectural and platform specific initialization.
 * It also locates and loads the BL2 raw binary image in the trusted DRAM. Only
 * called by the primary cpu after a cold boot.
 * TODO: Add support for alternative image load mechanism e.g using virtio/elf
 * loader etc.
  ******************************************************************************/
void bl1_main(void)
{
	unsigned long sctlr_el3 = read_sctlr();
	unsigned long bl2_base;
	unsigned int load_type = TOP_LOAD, spsr;
	meminfo *bl1_tzram_layout;
	meminfo *bl2_tzram_layout = 0x0;

	/*
	 * Ensure that MMU/Caches and coherency are turned on
	 */
	assert(sctlr_el3 | SCTLR_M_BIT);
	assert(sctlr_el3 | SCTLR_C_BIT);
	assert(sctlr_el3 | SCTLR_I_BIT);

	/* Perform remaining generic architectural setup from EL3 */
	bl1_arch_setup();

	/* Perform platform setup in BL1. */
	bl1_platform_setup();

	/* Announce our arrival */
	printf(FIRMWARE_WELCOME_STR);
	printf("Built : %s, %s\n\r", __TIME__, __DATE__);

	/*
	 * Find out how much free trusted ram remains after BL1 load
	 * & load the BL2 image at its top
	 */
	bl1_tzram_layout = bl1_plat_sec_mem_layout();
	bl2_base = load_image(bl1_tzram_layout,
			      (const char *) BL2_IMAGE_NAME,
			      load_type, BL2_BASE);

	/*
	 * Create a new layout of memory for BL2 as seen by BL1 i.e.
	 * tell it the amount of total and free memory available.
	 * This layout is created at the first free address visible
	 * to BL2. BL2 will read the memory layout before using its
	 * memory for other purposes.
	 */
	bl2_tzram_layout = (meminfo *) bl1_tzram_layout->free_base;
	init_bl2_mem_layout(bl1_tzram_layout,
			    bl2_tzram_layout,
			    load_type,
			    bl2_base);

	if (bl2_base) {
		bl1_arch_next_el_setup();
		spsr = make_spsr(MODE_EL1, MODE_SP_ELX, MODE_RW_64);
		printf("Booting trusted firmware boot loader stage 2\n\r");
#if DEBUG
		printf("BL2 address = 0x%llx \n\r", (unsigned long long) bl2_base);
		printf("BL2 cpsr = 0x%x \n\r", spsr);
		printf("BL2 memory layout address = 0x%llx \n\r",
		       (unsigned long long) bl2_tzram_layout);
#endif
		run_image(bl2_base, spsr, SECURE, bl2_tzram_layout, 0);
	}

	/*
	 * TODO: print failure to load BL2 but also add a tzwdog timer
	 * which will reset the system eventually.
	 */
	printf("Failed to load boot loader stage 2 (BL2) firmware.\n\r");
	return;
}