/* Start the SMC PA */
int tf_start(struct tf_comm *comm,
	u32 workspace_addr, u32 workspace_size,
	u8 *pa_buffer, u32 pa_size,
	u32 conf_descriptor, u32 conf_offset, u32 conf_size)
{
	struct tf_l1_shared_buffer *l1_shared_buffer = NULL;
	struct tf_ns_pa_info pa_info;
	int ret;
	u32 descr;
	u32 sdp_backing_store_addr;
	u32 sdp_bkext_store_addr;
#ifdef CONFIG_SMP
	long ret_affinity;
	cpumask_t saved_cpu_mask;
	cpumask_t local_cpu_mask = CPU_MASK_NONE;

	/* OMAP4 Secure ROM Code can only be called from CPU0. */
	cpu_set(0, local_cpu_mask);
	sched_getaffinity(0, &saved_cpu_mask);
	ret_affinity = sched_setaffinity(0, &local_cpu_mask);
	if (ret_affinity != 0)
		dpr_err("sched_setaffinity #1 -> 0x%lX", ret_affinity);
#endif

	workspace_size -= SZ_1M;
	sdp_backing_store_addr = workspace_addr + workspace_size;
	workspace_size -= 0x20000;
	sdp_bkext_store_addr = workspace_addr + workspace_size;

	tf_clock_timer_start();

	if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
		dpr_err("%s(%p): The SMC PA is already started\n",
			__func__, comm);

		ret = -EFAULT;
		goto error1;
	}

	if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) {
		dpr_err("%s(%p): The L1 structure size is incorrect!\n",
			__func__, comm);
		ret = -EFAULT;
		goto error1;
	}

	ret = tf_se_init(comm, sdp_backing_store_addr,
		sdp_bkext_store_addr);
	if (ret != 0) {
		dpr_err("%s(%p): SE initialization failed\n", __func__, comm);
		goto error1;
	}

	l1_shared_buffer =
		(struct tf_l1_shared_buffer *)
			internal_get_zeroed_page(GFP_KERNEL);

	if (l1_shared_buffer == NULL) {
		dpr_err("%s(%p): Ouf of memory!\n", __func__, comm);

		ret = -ENOMEM;
		goto error1;
	}
	/* Ensure the page is mapped */
	__set_page_locked(virt_to_page(l1_shared_buffer));

	dpr_info("%s(%p): L1SharedBuffer={0x%08x, 0x%08x}\n",
		__func__, comm,
		(u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer));

	descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer,
			current->mm);
	pa_info.certificate = (void *) workspace_addr;
	pa_info.parameters = (void *) __pa(l1_shared_buffer);
	pa_info.results = (void *) __pa(l1_shared_buffer);

	l1_shared_buffer->l1_shared_buffer_descr = descr & 0xFFF;

	l1_shared_buffer->backing_store_addr = sdp_backing_store_addr;
	l1_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
	l1_shared_buffer->workspace_addr = workspace_addr;
	l1_shared_buffer->workspace_size = workspace_size;

	dpr_info("%s(%p): System Configuration (%d bytes)\n",
		__func__, comm, conf_size);
	dpr_info("%s(%p): Starting PA (%d bytes)...\n",
		__func__, comm, pa_size);

	/*
	 * Make sure all data is visible to the secure world
	 */
	dmac_flush_range((void *)l1_shared_buffer,
		(void *)(((u32)l1_shared_buffer) + PAGE_SIZE));
	outer_clean_range(__pa(l1_shared_buffer),
		__pa(l1_shared_buffer) + PAGE_SIZE);

	if (pa_size > workspace_size) {
		dpr_err("%s(%p): PA size is incorrect (%x)\n",
			__func__, comm, pa_size);
		ret = -EFAULT;
		goto error1;
	}

	{
		void *tmp;
		tmp = ioremap_nocache(workspace_addr, pa_size);
		if (copy_from_user(tmp, pa_buffer, pa_size)) {
			iounmap(tmp);
			dpr_err("%s(%p): Cannot access PA buffer (%p)\n",
				__func__, comm, (void *) pa_buffer);
			ret = -EFAULT;
			goto error1;
		}
		iounmap(tmp);
	}

	dmac_flush_range((void *)&pa_info,
		(void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info)));
	outer_clean_range(__pa(&pa_info),
		__pa(&pa_info) + sizeof(struct tf_ns_pa_info));
	wmb();

	spin_lock(&(comm->lock));
	comm->l1_buffer = l1_shared_buffer;
	comm->l1_buffer->conf_descriptor = conf_descriptor;
	comm->l1_buffer->conf_offset     = conf_offset;
	comm->l1_buffer->conf_size       = conf_size;
	spin_unlock(&(comm->lock));
	l1_shared_buffer = NULL;

	/*
	 * Set the OS current time in the L1 shared buffer first. The secure
	 * world uses it as itw boot reference time.
	 */
	tf_set_current_time(comm);

	/* Workaround for issue #6081 */
	disable_nonboot_cpus();

	/*
	 * Start the SMC PA
	 */
	ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX,
		FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
		__pa(&pa_info), 0, 0, 0);
	if (ret != API_HAL_RET_VALUE_OK) {
		pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
		goto error2;
	}

	/* Loop until the first S Yield RPC is received */
loop:
	mutex_lock(&(comm->rpc_mutex));

	if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
		dpr_info("%s: Executing CMD=0x%x\n",
			__func__, comm->l1_buffer->rpc_command);

		switch (comm->l1_buffer->rpc_command) {
		case RPC_CMD_YIELD:
			dpr_info("%s: RPC_CMD_YIELD\n", __func__);
			set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
				&(comm->flags));
			comm->l1_buffer->rpc_status = RPC_SUCCESS;
			break;

		case RPC_CMD_INIT:
			dpr_info("%s: RPC_CMD_INIT\n", __func__);
			comm->l1_buffer->rpc_status = tf_rpc_init(comm);
			break;

		case RPC_CMD_TRACE:
			comm->l1_buffer->rpc_status = tf_rpc_trace(comm);
			break;

		default:
			comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS;
			break;
		}
		g_RPC_advancement = RPC_ADVANCEMENT_FINISHED;
	}

	mutex_unlock(&(comm->rpc_mutex));

	ret = tf_schedule_secure_world(comm);
	if (ret != 0) {
		pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
		goto error2;
	}

	if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
		goto loop;

	set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags);
	wake_up(&(comm->wait_queue));
	ret = 0;

	/* Workaround for issue #6081 */
	enable_nonboot_cpus();

	goto exit;

error2:
	/* Workaround for issue #6081 */
	enable_nonboot_cpus();

	spin_lock(&(comm->lock));
	l1_shared_buffer = comm->l1_buffer;
	comm->l1_buffer = NULL;
	spin_unlock(&(comm->lock));

error1:
	if (l1_shared_buffer != NULL) {
		__clear_page_locked(virt_to_page(l1_shared_buffer));
		internal_free_page((unsigned long) l1_shared_buffer);
	}

exit:
	tf_clock_timer_stop();
#ifdef CONFIG_SMP
	ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
	if (ret_affinity != 0)
		dpr_err("sched_setaffinity #2 -> 0x%lX", ret_affinity);
#endif

	if (ret > 0)
		ret = -EFAULT;

	return ret;
}
/*
 * OMAP4 MPUSS Low Power Entry Function
 *
 * The purpose of this function is to manage low power programming
 * of OMAP4 MPUSS subsystem
 * Paramenters:
 *	cpu : CPU ID
 *	power_state: Targetted Low power state.
 *
 * MPUSS Low power states
 * The basic rule is that the MPUSS power domain must be at the higher or
 * equal power state (state that consume more power) than the higher of the
 * two CPUs. For example, it is illegal for system power to be OFF, while
 * the power of one or both of the CPU is DORMANT. When an illegal state is
 * entered, then the hardware behavior is unpredictable.
 *
 * MPUSS state for the context save
 * save_state =
 *	0 - Nothing lost and no need to save: MPUSS INACTIVE
 *	1 - CPUx L1 and logic lost: MPUSS CSWR
 *	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
 *	3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
 */
void omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
{
	unsigned int save_state, wakeup_cpu, inst_clk_enab = 0;

	if (cpu > NR_CPUS)
		return;

	/*
	 * Low power state not supported on ES1.0 silicon
	 */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		wmb();
		do_wfi();
		return;
	}

	switch (power_state) {
	case PWRDM_POWER_ON:
	case PWRDM_POWER_INACTIVE:
		save_state = 0;
		break;
	case PWRDM_POWER_OFF:
		save_state = 1;
		setup_wakeup_routine(cpu);
		save_local_timers(cpu);
		break;
	case PWRDM_POWER_RET:
		/*
		 * CPUx CSWR is invalid hardware state. Additionally
		 * CPUx OSWR  doesn't give any gain vs CPUxOFF and
		 * hence not supported
		 */
	default:
		/* Invalid state */
		pr_debug("Invalid CPU low power state\n");
		return;
	}

	/*
	 * MPUSS book keeping should be executed by master
	 * CPU only which is the last CPU to go down
	 */
	if (cpu)
		goto cpu_prepare;
	/*
	 * Check MPUSS next state and save GIC if needed
	 * GIC lost during MPU OFF and OSWR
	 */
	pwrdm_clear_all_prev_pwrst(mpuss_pd);
	if (omap4_device_off_read_next_state() &&
			 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
		/* FIXME: Check if this can be optimised */

		/* l3_main inst clock must be enabled for
		 * a save ram operation
		 */
		if (!l3_main_3_ick->usecount) {
			inst_clk_enab = 1;
			clk_enable(l3_main_3_ick);
		}

		save_secure_all();
		save_gic_wakeupgen_secure();

		if (inst_clk_enab == 1)
			clk_disable(l3_main_3_ick);

		save_ivahd_tesla_regs();
		save_l3instr_regs();

		save_state = 3;
		goto cpu_prepare;
	}

	switch (pwrdm_read_next_pwrst(mpuss_pd)) {
	case PWRDM_POWER_ON:
	case PWRDM_POWER_INACTIVE:
		/* No need to save MPUSS context */
		break;
	case PWRDM_POWER_RET:
		/* MPUSS OSWR, logic lost */
		if (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF) {
			if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
				save_gic_wakeupgen_secure();
				save_l3instr_regs();
			} else {
				save_gic();
				omap4_wakeupgen_save();
			}
			save_state = 2;
		}
		break;
	case PWRDM_POWER_OFF:
		/* MPUSS OFF */
		if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
			/* l3_main inst clock must be enabled for
			 * a save ram operation
			 */
			if (!l3_main_3_ick->usecount) {
	                        inst_clk_enab = 1;
				clk_enable(l3_main_3_ick);
			}
			save_secure_ram();

			if (inst_clk_enab == 1)
				clk_disable(l3_main_3_ick);

			save_gic_wakeupgen_secure();
			save_ivahd_tesla_regs();
			save_l3instr_regs();
		} else {
			save_gic();
			omap4_wakeupgen_save();
		}
		save_state = 3;
		break;
	default:
		/* Fall through */
		;
	}

	/*
	 * Program the CPU targeted state
	 */
cpu_prepare:
	clear_cpu_prev_pwrst(cpu);
	if (cpu)
		pwrdm_set_next_pwrst(cpu1_pwrdm, power_state);
	else
		pwrdm_set_next_pwrst(cpu0_pwrdm, power_state);
	scu_pwrst_prepare(cpu, power_state);

	if (regset_save_on_suspend && !cpu)
		pm_dbg_regset_save(1);

	/*
	 * Call low level routine to enter to
	 * targeted power state
	 */
	__omap4_cpu_suspend(cpu, save_state);
	wakeup_cpu = hard_smp_processor_id();

	if (regset_save_on_suspend && !wakeup_cpu)
		pm_dbg_regset_save(2);

	/*
	 * Restore the CPUx and mpuss power state to ON otherwise
	 * CPUx power domain can transitions to programmed low power
	 * state while doing WFI outside the low powe code. On HS devices,
	 * CPUx can do WFI outside idle thread  which can result in
	 * power domain domain transition if the previous state was
	 * programmed to OFF/RET.
	 */
	if (wakeup_cpu) {
		pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
	} else {
		pwrdm_set_next_pwrst(cpu0_pwrdm, PWRDM_POWER_ON);
		pwrdm_set_next_pwrst(mpuss_pd, PWRDM_POWER_ON);
		omap4_secure_dispatcher(0x21, 4, 0, 0, 0, 0, 0);
	}

	/*
	 * Check the CPUx previous power state
	 */
	if (read_cpu_prev_pwrst(wakeup_cpu) == PWRDM_POWER_OFF) {
		cpu_init();
		restore_mmu_table_entry();
		restore_local_timers(wakeup_cpu);
	}

	/*
	 * Check MPUSS previous power state and enable
	 * GIC if needed.
	 */
	switch (pwrdm_read_prev_pwrst(mpuss_pd)) {
	case PWRDM_POWER_ON:
	case PWRDM_POWER_INACTIVE:
		/* No need to restore */
		break;
	case PWRDM_POWER_RET:
		if (pwrdm_read_prev_logic_pwrst(mpuss_pd) != PWRDM_POWER_OFF)
			break;
		/* fall through if hit MPU OSWR */
	case PWRDM_POWER_OFF:
		/*
		 * Enable GIC distributor
		 */
		if (!wakeup_cpu) {
			if ((omap_type() == OMAP2_DEVICE_TYPE_GP)
					&& omap4_device_off_read_prev_state()) {
				restore_gic();
				omap4_wakeupgen_restore();
			}
			enable_gic_distributor();
			if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
				if (omap4_device_off_read_prev_state())
					restore_ivahd_tesla_regs();
				restore_l3instr_regs();
			}
		}
		/*
		 * Enable GIC cpu inrterface
		 */
		enable_gic_cpu_interface();
		break;
	default:
		;
	}

}
/* Initializes the SE (SDP, SRAM resize, RPC handler) */
static int tf_se_init(struct tf_comm *comm,
	u32 sdp_backing_store_addr, u32 sdp_bkext_store_addr)
{
	int error;
	unsigned int crc;

	if (comm->se_initialized) {
		dpr_info("%s: SE already initialized... nothing to do\n",
			__func__);
		return 0;
	}

	/* Secure CRC read */
	dpr_info("%s: Secure CRC Read...\n", __func__);

	crc = omap4_secure_dispatcher(API_HAL_KM_GETSECUREROMCODECRC_INDEX,
		0, 0, 0, 0, 0, 0);
	pr_info("SMC: SecureCRC=0x%08X\n", crc);

	/*
	 * Flush caches before resize, just to be sure there is no
	 * pending public data writes back to SRAM that could trigger a
	 * security violation once their address space is marked as
	 * secure.
	 */
#define OMAP4_SRAM_PA   0x40300000
#define OMAP4_SRAM_SIZE 0xe000
	flush_cache_all();
	outer_flush_range(OMAP4_SRAM_PA,
			OMAP4_SRAM_PA + OMAP4_SRAM_SIZE);
	wmb();

	/* SRAM resize */
	dpr_info("%s: SRAM resize (52KB)...\n", __func__);
	error = omap4_secure_dispatcher(API_HAL_SEC_L3_RAM_RESIZE_INDEX,
		FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
		SEC_RAM_SIZE_52KB, 0, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: SRAM resize OK\n", __func__);
	} else {
		dpr_err("%s: SRAM resize failed [0x%x]\n", __func__, error);
		goto error;
	}

	/* SDP init */
	dpr_info("%s: SDP runtime init..."
		"(sdp_backing_store_addr=%x, sdp_bkext_store_addr=%x)\n",
		__func__,
		sdp_backing_store_addr, sdp_bkext_store_addr);
	error = omap4_secure_dispatcher(API_HAL_SDP_RUNTIMEINIT_INDEX,
		FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 2,
		sdp_backing_store_addr, sdp_bkext_store_addr, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: SDP runtime init OK\n", __func__);
	} else {
		dpr_err("%s: SDP runtime init failed [0x%x]\n",
			__func__, error);
		goto error;
	}

	/* RPC init */
	dpr_info("%s: RPC init...\n", __func__);
	error = omap4_secure_dispatcher(API_HAL_TASK_MGR_RPCINIT_INDEX,
		FLAG_START_HAL_CRITICAL, 1,
		(u32) (u32(*const) (u32, u32, u32, u32)) &rpc_handler, 0, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: RPC init OK\n", __func__);
	} else {
		dpr_err("%s: RPC init failed [0x%x]\n", __func__, error);
		goto error;
	}

	comm->se_initialized = true;

	return 0;

error:
	return -EFAULT;
}
/* Yields the Secure World */
int tf_schedule_secure_world(struct tf_comm *comm)
{
	int status = 0;
	int ret;
	unsigned long iflags;
	u32 appli_id;

	tf_set_current_time(comm);

	local_irq_save(iflags);

	switch (g_RPC_advancement) {
	case  RPC_ADVANCEMENT_NONE:
		/* Return from IRQ */
		appli_id = SMICODEPUB_IRQ_END;
		break;
	case  RPC_ADVANCEMENT_PENDING:
		/* nothing to do in this case */
		goto exit;
	default:
	case RPC_ADVANCEMENT_FINISHED:
		appli_id = SMICODEPUB_RPC_END;
		g_RPC_advancement = RPC_ADVANCEMENT_NONE;
		break;
	}

	tf_clock_timer_start();

	g_service_end = 1;
	/* yield to the Secure World */
	ret = omap4_secure_dispatcher(appli_id, /* app_id */
	   0, 0,        /* flags, nargs */
	   0, 0, 0, 0); /* arg1, arg2, arg3, arg4 */
	if (g_service_end != 0) {
		dpr_err("Service End ret=%X\n", ret);

		if (ret == 0) {
			dmac_flush_range((void *)comm->l1_buffer,
				(void *)(((u32)(comm->l1_buffer)) +
					PAGE_SIZE));
			outer_inv_range(__pa(comm->l1_buffer),
				__pa(comm->l1_buffer) +
				PAGE_SIZE);

			ret = comm->l1_buffer->exit_code;

			dpr_err("SMC PA failure ret=%X\n", ret);
			if (ret == 0)
				ret = -EFAULT;
		}
		clear_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags);
		omap4_secure_dispatcher(API_HAL_LM_PAUNLOADALL_INDEX,
			FLAG_START_HAL_CRITICAL, 0, 0, 0, 0, 0);
		status = ret;
	}

	tf_clock_timer_stop();

exit:
	local_irq_restore(iflags);

	return status;
}
u32 tf_try_disabling_secure_hwa_clocks(u32 mask)
{
	return omap4_secure_dispatcher(API_HAL_HWATURNOFF_INDEX,
		FLAG_START_HAL_CRITICAL, 1, mask, 0, 0, 0);
}
Ejemplo n.º 6
0
static int __init omap_l2_cache_init(void)
{
	u32 aux_ctrl = 0;
	u32 por_ctrl = 0;
	u32 lockdown = 0;
	bool mpu_prefetch_disable_errata = false;

	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/*
	 * To avoid code running on other OMAPs in
	 * multi-omap builds
	 */
	if (!cpu_is_omap44xx())
		return -ENODEV;
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

#ifdef CONFIG_OMAP_ALLOW_OSWR
	if (omap_rev() == OMAP4460_REV_ES1_0)
		mpu_prefetch_disable_errata = true;
#endif
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/* Static mapping, never released */
	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
	if (WARN_ON(!l2cache_base))
		return -ENODEV;
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
	/*
	 * 16-way associativity, parity disabled
	 * Way size - 32KB (es1.0)
	 * Way size - 64KB (es2.0 +)
	 */
	aux_ctrl = readl_relaxed(l2cache_base + L2X0_AUX_CTRL);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	if (omap_rev() == OMAP4430_REV_ES1_0) {
		aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
		goto skip_aux_por_api;
	}
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/*
	 * Drop instruction prefetch hint since it degrades the
	 * the performance.
	 */
	aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
		(1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
		(1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	if (!mpu_prefetch_disable_errata)
		aux_ctrl |= (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	omap_smc1(0x109, aux_ctrl);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/* Setup POR Control register */
	por_ctrl = readl_relaxed(l2cache_base + L2X0_PREFETCH_CTRL);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/*
	 * Double linefill is available only on OMAP4460 L2X0.
	 * It may cause single cache line memory corruption, leave it disabled
	 * on all devices
	 */
	por_ctrl &= ~(1 << L2X0_PREFETCH_DOUBLE_LINEFILL_SHIFT);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
	if (!mpu_prefetch_disable_errata) {
		por_ctrl &= ~L2X0_POR_OFFSET_MASK;
		por_ctrl |= L2X0_POR_OFFSET_VALUE;
	}
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/* Set POR through PPA service only in EMU/HS devices */
	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
		omap4_secure_dispatcher(PPA_SERVICE_PL310_POR, 0x7, 1,
				por_ctrl, 0, 0, 0);
	else if (omap_rev() >= OMAP4430_REV_ES2_2)
		omap_smc1(0x113, por_ctrl);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);


	/*
	 * FIXME: Temporary WA for OMAP4460 stability issue.
	 * Lock-down specific L2 cache ways which  makes effective
	 * L2 size as 512 KB instead of 1 MB
	 */
	if (omap_rev() == OMAP4460_REV_ES1_0) {
		lockdown = 0xa5a5;
		writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_D0);
		printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
		writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_D1);
		printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
		writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_I0);
		printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
		writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_I1);
	}
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

skip_aux_por_api:
	/* Enable PL310 L2 Cache controller */
	omap_smc1(0x102, 0x1);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/*
	 * Override default outer_cache.disable with a OMAP4
	 * specific one
	*/
	outer_cache.disable = omap4_l2x0_disable;
	outer_cache.set_debug = omap4_l2x0_set_debug;
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	return 0;
}
Ejemplo n.º 7
0
static int __init omap_l2_cache_init(void)
{
	u32 l2x0_auxctrl;
	u32 l2x0_por;
	u32 l2x0_lockdown;

	/*
	 * To avoid code running on other OMAPs in
	 * multi-omap builds
	 */
	if (!cpu_is_omap44xx())
		return -ENODEV;

	/* Static mapping, never released */
	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
	BUG_ON(!l2cache_base);

	if (omap_rev() == OMAP4430_REV_ES1_0) {
		l2x0_auxctrl = OMAP443X_L2X0_AUXCTL_VALUE_ES1;
		goto skip_auxctlr;
	}

	if (cpu_is_omap446x()) {
		if (omap_rev() == OMAP4460_REV_ES1_0) {
			l2x0_auxctrl = OMAP446X_L2X0_AUXCTL_VALUE_ES1;
			l2x0_por = OMAP446X_PL310_POR_ES1;
			l2x0_lockdown = 0xa5a5;
		} else {
			l2x0_auxctrl = OMAP446X_L2X0_AUXCTL_VALUE;
			l2x0_por = OMAP446X_PL310_POR;
			l2x0_lockdown = 0;
		}
	} else {
		l2x0_auxctrl = OMAP443X_L2X0_AUXCTL_VALUE;
		l2x0_por = OMAP443X_PL310_POR;
		l2x0_lockdown = 0;
	}

	/* Set POR through PPA service only in EMU/HS devices */
	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
		omap4_secure_dispatcher(
				PPA_SERVICE_PL310_POR, 0x7, 1,
				l2x0_por, 0, 0, 0);
	} else if (omap_rev() > OMAP4430_REV_ES2_1)
			omap_smc1(0x113, l2x0_por);

	/*
	 * FIXME : Temporary WA for the OMAP4460 stability
	 * issue. For OMAP4460 the effective L2X0 Size  = 512 KB
	 * with this WA.
	 */
	writel_relaxed(l2x0_lockdown, l2cache_base + 0x900);
	writel_relaxed(l2x0_lockdown, l2cache_base + 0x908);
	writel_relaxed(l2x0_lockdown, l2cache_base + 0x904);
	writel_relaxed(l2x0_lockdown, l2cache_base + 0x90C);

	/*
	 * Doble Linefill, BRESP enabled, $I and $D prefetch ON,
	 * Share-override = 1, NS lockdown enabled
	 */
	omap_smc1(0x109, l2x0_auxctrl);

skip_auxctlr:
	/* Enable PL310 L2 Cache controller */
	omap_smc1(0x102, 0x1);

	/*
	 * 32KB way size, 16-way associativity,
	 * parity disabled
	 */
	l2x0_init(l2cache_base, l2x0_auxctrl, 0xd0000fff);

	return 0;
}