/*******************************************************************************
 * Perform the very early platform specific architecture setup here. At the
 * moment this only does basic initialization. Later architectural setup
 * (bl1_arch_setup()) does not do anything platform specific.
 ******************************************************************************/
void bl1_plat_arch_setup(void)
{
	unsigned long cci_setup;

	/*
	 * Enable CCI-400 for this cluster. No need
	 * for locks as no other cpu is active at the
	 * moment
	 */
	cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI);
	if (cci_setup) {
		cci_enable_coherency(read_mpidr());
	}

	configure_mmu(&bl1_tzram_layout,
			TZROM_BASE,
			TZROM_BASE + TZROM_SIZE,
			BL1_COHERENT_RAM_BASE,
			BL1_COHERENT_RAM_LIMIT);
}
Exemplo n.º 2
0
/*******************************************************************************
 * FVP handler called when an affinity instance has just been powered on after
 * being turned off earlier. The level and mpidr determine the affinity
 * instance. The 'state' arg. allows the platform to decide whether the cluster
 * was turned off prior to wakeup and do what's necessary to setup it up
 * correctly.
 ******************************************************************************/
int fvp_affinst_on_finish(unsigned long mpidr,
			  unsigned int afflvl,
			  unsigned int state)
{
	int rc = PSCI_E_SUCCESS;
	unsigned long linear_id, cpu_setup, cci_setup;
	mailbox *fvp_mboxes;
	unsigned int gicd_base, gicc_base, reg_val, ectlr;

	switch (afflvl) {

	case MPIDR_AFFLVL1:
		/* Enable coherency if this cluster was off */
		if (state == PSCI_STATE_OFF) {

			/*
			 * This CPU might have woken up whilst the
			 * cluster was attempting to power down. In
			 * this case the FVP power controller will
			 * have a pending cluster power off request
			 * which needs to be cleared by writing to the
			 * PPONR register. This prevents the power
			 * controller from interpreting a subsequent
			 * entry of this cpu into a simple wfi as a
			 * power down request.
			 */
			fvp_pwrc_write_pponr(mpidr);

			cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI);
			if (cci_setup) {
				cci_enable_coherency(mpidr);
			}
		}
		break;

	case MPIDR_AFFLVL0:
		/*
		 * Ignore the state passed for a cpu. It could only have
		 * been off if we are here.
		 */

		/*
		 * Turn on intra-cluster coherency if the FVP flavour supports
		 * it.
		 */
		cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
		if (cpu_setup) {
			ectlr = read_cpuectlr();
			ectlr |= CPUECTLR_SMP_BIT;
			write_cpuectlr(ectlr);
		}

		/*
		 * Clear PWKUPR.WEN bit to ensure interrupts do not interfere
		 * with a cpu power down unless the bit is set again
		 */
		fvp_pwrc_clr_wen(mpidr);

		/* Zero the jump address in the mailbox for this cpu */
		fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF);
		linear_id = platform_get_core_pos(mpidr);
		fvp_mboxes[linear_id].value = 0;
		flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
				   sizeof(unsigned long));

		gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR);
		gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);

		/* Enable the gic cpu interface */
		gic_cpuif_setup(gicc_base);

		/* TODO: This setup is needed only after a cold boot */
		gic_pcpu_distif_setup(gicd_base);

		/* Allow access to the System counter timer module */
		reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT);
		reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT);
		reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT);
		mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val);
		mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val);

		reg_val = (1 << CNTNSAR_NS_SHIFT(0)) |
			(1 << CNTNSAR_NS_SHIFT(1));
		mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val);

		break;

	default:
		assert(0);
	}

	return rc;
}
Exemplo n.º 3
0
/*******************************************************************************
 * FVP handler called when an affinity instance is about to be suspended. The
 * level and mpidr determine the affinity instance. The 'state' arg. allows the
 * platform to decide whether the cluster is being turned off and take apt
 * actions.
 *
 * CAUTION: This function is called with coherent stacks so that caches can be
 * turned off, flushed and coherency disabled. There is no guarantee that caches
 * will remain turned on across calls to this function as each affinity level is
 * dealt with. So do not write & read global variables across calls. It will be
 * wise to do flush a write to the global to prevent unpredictable results.
 ******************************************************************************/
int fvp_affinst_suspend(unsigned long mpidr,
			unsigned long sec_entrypoint,
			unsigned long ns_entrypoint,
			unsigned int afflvl,
			unsigned int state)
{
	int rc = PSCI_E_SUCCESS;
	unsigned int gicc_base, ectlr;
	unsigned long cpu_setup, cci_setup, linear_id;
	mailbox *fvp_mboxes;

	switch (afflvl) {
	case MPIDR_AFFLVL1:
		if (state == PSCI_STATE_OFF) {
			/*
			 * Disable coherency if this cluster is to be
			 * turned off
			 */
			cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI);
			if (cci_setup) {
				cci_disable_coherency(mpidr);
			}

			/*
			 * Program the power controller to turn the
			 * cluster off
			 */
			fvp_pwrc_write_pcoffr(mpidr);

		}
		break;

	case MPIDR_AFFLVL0:
		if (state == PSCI_STATE_OFF) {
			/*
			 * Take this cpu out of intra-cluster coherency if
			 * the FVP flavour supports the SMP bit.
			 */
			cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
			if (cpu_setup) {
				ectlr = read_cpuectlr();
				ectlr &= ~CPUECTLR_SMP_BIT;
				write_cpuectlr(ectlr);
			}

			/* Program the jump address for the target cpu */
			linear_id = platform_get_core_pos(mpidr);
			fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF);
			fvp_mboxes[linear_id].value = sec_entrypoint;
			flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
					   sizeof(unsigned long));

			/*
			 * Prevent interrupts from spuriously waking up
			 * this cpu
			 */
			gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
			gic_cpuif_deactivate(gicc_base);

			/*
			 * Program the power controller to power this
			 * cpu off and enable wakeup interrupts.
			 */
			fvp_pwrc_set_wen(mpidr);
			fvp_pwrc_write_ppoffr(mpidr);
		}
		break;

	default:
		assert(0);
	}

	return rc;
}
Exemplo n.º 4
0
/*******************************************************************************
 * FVP handler called when an affinity instance is about to be turned off. The
 * level and mpidr determine the affinity instance. The 'state' arg. allows the
 * platform to decide whether the cluster is being turned off and take apt
 * actions.
 *
 * CAUTION: This function is called with coherent stacks so that caches can be
 * turned off, flushed and coherency disabled. There is no guarantee that caches
 * will remain turned on across calls to this function as each affinity level is
 * dealt with. So do not write & read global variables across calls. It will be
 * wise to do flush a write to the global to prevent unpredictable results.
 ******************************************************************************/
int fvp_affinst_off(unsigned long mpidr,
		    unsigned int afflvl,
		    unsigned int state)
{
	int rc = PSCI_E_SUCCESS;
	unsigned int gicc_base, ectlr;
	unsigned long cpu_setup, cci_setup;

	switch (afflvl) {
	case MPIDR_AFFLVL1:
		if (state == PSCI_STATE_OFF) {
			/*
			 * Disable coherency if this cluster is to be
			 * turned off
			 */
			cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI);
			if (cci_setup) {
				cci_disable_coherency(mpidr);
			}

			/*
			 * Program the power controller to turn the
			 * cluster off
			 */
			fvp_pwrc_write_pcoffr(mpidr);

		}
		break;

	case MPIDR_AFFLVL0:
		if (state == PSCI_STATE_OFF) {

			/*
			 * Take this cpu out of intra-cluster coherency if
			 * the FVP flavour supports the SMP bit.
			 */
			cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP);
			if (cpu_setup) {
				ectlr = read_cpuectlr();
				ectlr &= ~CPUECTLR_SMP_BIT;
				write_cpuectlr(ectlr);
			}

			/*
			 * Prevent interrupts from spuriously waking up
			 * this cpu
			 */
			gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR);
			gic_cpuif_deactivate(gicc_base);

			/*
			 * Program the power controller to power this
			 * cpu off
			 */
			fvp_pwrc_write_ppoffr(mpidr);
		}
		break;

	default:
		assert(0);
	}

	return rc;
}
/*
 * For the moment we assume that all security programming is done by the
 * primary core.
 * TODO:
 * Might want to enable interrupt on violations when supported?
 */
void plat_security_setup(void)
{
	struct tzc_instance controller;

	/*
	 * The Base FVP has a TrustZone address space controller, the Foundation
	 * FVP does not. Trying to program the device on the foundation FVP will
	 * cause an abort.
	 *
	 * If the platform had additional peripheral specific security
	 * configurations, those would be configured here.
	 */

	if (!platform_get_cfgvar(CONFIG_HAS_TZC))
		return;

	/*
	 * The TrustZone controller controls access to main DRAM. Give
	 * full NS access for the moment to use with OS.
	 */
	INFO("Configuring TrustZone Controller\n");

	/*
	 * The driver does some error checking and will assert.
	 * - Provide base address of device on platform.
	 * - Provide width of ACE-Lite IDs on platform.
	 */
	controller.base = TZC400_BASE;
	controller.aid_width = FVP_AID_WIDTH;
	tzc_init(&controller);

	/*
	 * Currently only filters 0 and 2 are connected on Base FVP.
	 * Filter 0 : CPU clusters (no access to DRAM by default)
	 * Filter 1 : not connected
	 * Filter 2 : LCDs (access to VRAM allowed by default)
	 * Filter 3 : not connected
	 * Programming unconnected filters will have no effect at the
	 * moment. These filter could, however, be connected in future.
	 * So care should be taken not to configure the unused filters.
	 */

	/* Disable all filters before programming. */
	tzc_disable_filters(&controller);

	/*
	 * Allow full access to all DRAM to supported devices for the
	 * moment. Give access to the CPUs and Virtio. Some devices
	 * would normally use the default ID so allow that too. We use
	 * three different regions to cover the three separate blocks of
	 * memory in the FVPs. We allow secure access to DRAM to load NS
	 * software.
	 * FIXME: In current models Virtio uses a reserved ID. This is
	 * not correct and will be fixed.
	 */

	/* Set to cover 2GB block of DRAM */
	tzc_configure_region(&controller, FILTER_SHIFT(0), 1,
			DRAM_BASE, 0xFFFFFFFF, TZC_REGION_S_RDWR,
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_RES5));

	/* Set to cover the 30GB block */
	tzc_configure_region(&controller, FILTER_SHIFT(0), 2,
			0x880000000, 0xFFFFFFFFF, TZC_REGION_S_RDWR,
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_RES5));

	/* Set to cover 480GB block */
	tzc_configure_region(&controller, FILTER_SHIFT(0), 3,
			0x8800000000, 0xFFFFFFFFFF, TZC_REGION_S_RDWR,
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_RES5));

	/*
	 * TODO: Interrupts are not currently supported. The only
	 * options we have are for access errors to occur quietly or to
	 * cause an exception. We choose to cause an exception.
	 */
	tzc_set_action(&controller, TZC_ACTION_ERR);

	/* Enable filters. */
	tzc_enable_filters(&controller);
}
Exemplo n.º 6
0
/*
 * For the moment we assume that all security programming is done by the
 * primary core.
 * TODO:
 * Might want to enable interrupt on violations when supported?
 */
void plat_security_setup(void)
{
	tzc_instance_t controller;

	/*
	 * The Base FVP has a TrustZone address space controller, the Foundation
	 * FVP does not. Trying to program the device on the foundation FVP will
	 * cause an abort.
	 *
	 * If the platform had additional peripheral specific security
	 * configurations, those would be configured here.
	 */

	if (!platform_get_cfgvar(CONFIG_HAS_TZC))
		return;

	/*
	 * The TrustZone controller controls access to main DRAM. Give
	 * full NS access for the moment to use with OS.
	 */
	INFO("Configuring TrustZone Controller\n");

	/*
	 * The driver does some error checking and will assert.
	 * - Provide base address of device on platform.
	 * - Provide width of ACE-Lite IDs on platform.
	 */
	controller.base = TZC400_BASE;
	controller.aid_width = FVP_AID_WIDTH;
	tzc_init(&controller);

	/*
	 * Currently only filters 0 and 2 are connected on Base FVP.
	 * Filter 0 : CPU clusters (no access to DRAM by default)
	 * Filter 1 : not connected
	 * Filter 2 : LCDs (access to VRAM allowed by default)
	 * Filter 3 : not connected
	 * Programming unconnected filters will have no effect at the
	 * moment. These filter could, however, be connected in future.
	 * So care should be taken not to configure the unused filters.
	 */

	/* Disable all filters before programming. */
	tzc_disable_filters(&controller);

	/*
	 * Allow only non-secure access to all DRAM to supported devices.
	 * Give access to the CPUs and Virtio. Some devices
	 * would normally use the default ID so allow that too. We use
	 * two regions to cover the blocks of physical memory in the FVPs.
	 *
	 * Software executing in the secure state, such as a secure
	 * boot-loader, can access the DRAM by using the NS attributes in
	 * the MMU translation tables and descriptors.
	 */

	/* Set to cover the first block of DRAM */
	tzc_configure_region(&controller, FILTER_SHIFT(0), 1,
			DRAM_BASE, 0xFFFFFFFF, TZC_REGION_S_NONE,
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_PCI) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO_OLD));

	/* Set to cover the second block of DRAM */
	tzc_configure_region(&controller, FILTER_SHIFT(0), 2,
			0x880000000, 0xFFFFFFFFF, TZC_REGION_S_NONE,
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_DEFAULT) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_PCI) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_AP) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO) |
			TZC_REGION_ACCESS_RDWR(FVP_NSAID_VIRTIO_OLD));

	/*
	 * TODO: Interrupts are not currently supported. The only
	 * options we have are for access errors to occur quietly or to
	 * cause an exception. We choose to cause an exception.
	 */
	tzc_set_action(&controller, TZC_ACTION_ERR);

	/* Enable filters. */
	tzc_enable_filters(&controller);
}