Ejemplo n.º 1
0
int config_L2(int size)
{
    int i;
    struct cpumask mask; 
    int cur_size = get_l2c_size();
	
    if (size != SZ_256K && size != SZ_512K) {
        printk("inlvalid input size %x\n", size);
        return -1;
    }
    if (in_interrupt()) {
        printk(KERN_ERR "Cannot use %s in interrupt/softirq context\n",
               __func__);
        return -1;
    }
    if (size == cur_size) {
        printk("Config L2 size %x is equal to current L2 size %x\n",
               size, cur_size);
        return 0;
    }
    cpumask_clear(&mask);
	for(i = 0; i < get_cluster_core_count(); i++)
		cpumask_set_cpu(i, &mask);
	
    atomic_set(&L1_flush_done, 0);
    get_online_cpus();
    //printk("[Config L2] Config L2 start, on line cpu = %d\n",num_online_cpus());    
    
    /* disable cache and flush L1 on Cluster0*/
    on_each_cpu_mask(&mask, (smp_call_func_t)atomic_flush, NULL, true);
    //while(atomic_read(&L1_flush_done) != num_online_cpus());    
    //printk("[Config L2] L1 flush done\n");
    
    /* Only need to flush Cluster0's L2 */    
    smp_call_function_any(&mask, (smp_call_func_t)inner_dcache_flush_L2, NULL, true);
    //printk("[Config L2] L2 flush done\n");
    
    /* change L2 size */    
    config_L2_size(size);
    //printk("[Config L2] Change L2 flush size done(size = %d)\n",size);
        
    /* enable Cluster0's cache */
    atomic_set(&L1_flush_done, 0);
    on_each_cpu_mask(&mask, (smp_call_func_t)__enable_cache, NULL, true);
    
    //update cr_alignment for other kernel function usage 
    cr_alignment = cr_alignment | (0x4); //C1_CBIT
    put_online_cpus();
    printk("Config L2 size %x done\n", size);
    return 0;
}
Ejemplo n.º 2
0
void flush_tlb_mm(struct mm_struct *mm)
{
	if (tlb_ops_need_broadcast())
		on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
	else
		local_flush_tlb_mm(mm);
}
void ion_flush_cache_all(void)
{
    int cpu;
    unsigned int cluster0_stat;
    unsigned int cluster1_stat;
    unsigned int stat;
    cpumask_t mask;
    preempt_disable();
    cluster0_stat = readl(cluster0_resume_bit) & 0x0f;
    cluster1_stat = readl(cluster1_resume_bit) & 0x0f;

    stat = ~(cluster0_stat | cluster1_stat << 4) & 0xff;
    if(cluster1_stat == 0x0f) {
        stat = stat | CLUSTER1_CPU4;
    }

    cpumask_clear(&mask);
    for_each_online_cpu(cpu) {
        if(stat & (1 << cpu))
            cpumask_set_cpu(cpu, &mask);
    }

    on_each_cpu_mask(&mask, hisi_ion_flush_cache_all, NULL, 1);
    preempt_enable();
    return;
}
Ejemplo n.º 4
0
static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
		struct device_attribute *attr, const char *buf,
		size_t count)
{
	cpumask_t primary_thread_mask;
	int err;
	u8 val;

	if (kstrtou8(buf, 0, &val) || val != 1)
		return -EINVAL;

	if (fastsleep_workaround_applyonce == 1)
		return count;

	/*
	 * fastsleep_workaround_applyonce = 1 implies
	 * fastsleep workaround needs to be left in 'applied' state on all
	 * the cores. Do this by-
	 * 1. Patching out the call to 'undo' workaround in fastsleep exit path
	 * 2. Sending ipi to all the cores which have at least one online thread
	 * 3. Patching out the call to 'apply' workaround in fastsleep entry
	 * path
	 * There is no need to send ipi to cores which have all threads
	 * offlined, as last thread of the core entering fastsleep or deeper
	 * state would have applied workaround.
	 */
	err = patch_instruction(
		(unsigned int *)pnv_fastsleep_workaround_at_exit,
		PPC_INST_NOP);
	if (err) {
		pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_exit");
		goto fail;
	}

	get_online_cpus();
	primary_thread_mask = cpu_online_cores_map();
	on_each_cpu_mask(&primary_thread_mask,
				pnv_fastsleep_workaround_apply,
				&err, 1);
	put_online_cpus();
	if (err) {
		pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
		goto fail;
	}

	err = patch_instruction(
		(unsigned int *)pnv_fastsleep_workaround_at_entry,
		PPC_INST_NOP);
	if (err) {
		pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_entry");
		goto fail;
	}

	fastsleep_workaround_applyonce = 1;

	return count;
fail:
	return -EIO;
}
void flush_tlb_mm(struct mm_struct *mm)
{
	if (tlb_ops_need_broadcast())
		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
	else
		local_flush_tlb_mm(mm);
	broadcast_tlb_mm_a15_erratum(mm);
}
Ejemplo n.º 6
0
/* context.lock is held */
static void install_ldt(struct mm_struct *current_mm,
			struct ldt_struct *ldt)
{
	/* Synchronizes with lockless_dereference in load_mm_ldt. */
	smp_store_release(&current_mm->context.ldt, ldt);

	/* Activate the LDT for all CPUs using current_mm. */
	on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
}
Ejemplo n.º 7
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
	struct tlb_args ta = {
		.ta_vma = vma,
		.ta_start = uaddr
	};

	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
}
Ejemplo n.º 8
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_vma = vma;
		ta.ta_start = uaddr;
		on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
	} else
		local_flush_tlb_page(vma, uaddr);
}
Ejemplo n.º 9
0
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		     unsigned long end)
{
	struct tlb_args ta = {
		.ta_vma = vma,
		.ta_start = start,
		.ta_end = end
	};

	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
}
Ejemplo n.º 10
0
void flush_tlb_range(struct vm_area_struct *vma,
                     unsigned long start, unsigned long end)
{
	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_vma = vma;
		ta.ta_start = start;
		ta.ta_end = end;
		on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
	} else
		local_flush_tlb_range(vma, start, end);
}
Ejemplo n.º 11
0
void flush_tlb_mm(struct mm_struct *mm)
{
	if (IS_ENABLED(CONFIG_L4)) {
		l4x_unmap_sync_mm(mm);
		l4x_del_task(mm);
		return;
	}

	if (tlb_ops_need_broadcast())
		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
	else
		local_flush_tlb_mm(mm);
	broadcast_tlb_mm_a15_erratum(mm);
}
Ejemplo n.º 12
0
static void __exit tegra_cpuidle_exit(void)
{
	int cpu;
	struct cpuidle_driver *drv;

	unregister_pm_notifier(&tegra_cpuidle_pm_notifier);

	for_each_possible_cpu(cpu) {
		drv = &per_cpu(cpuidle_drv, cpu);

		on_each_cpu_mask(drv->cpumask, tegra_cpuidle_setup_bctimer,
				(void *)CLOCK_EVT_NOTIFY_BROADCAST_OFF, 1);

		cpuidle_unregister(drv);
	}
}
Ejemplo n.º 13
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
	if (IS_ENABLED(CONFIG_L4)) {
		l4x_unmap_sync_mm(vma->vm_mm);
		l4x_unmap_page(vma->vm_mm, uaddr);
		return;
	}

	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_vma = vma;
		ta.ta_start = uaddr;
		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
					&ta, 1);
	} else
		local_flush_tlb_page(vma, uaddr);
	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
Ejemplo n.º 14
0
static int tegra_cpuidle_register(unsigned int cpu)
{
	struct cpuidle_driver *drv;
	struct cpuidle_state *state;

	drv = &per_cpu(cpuidle_drv, cpu);
	drv->name = driver_name;
	drv->owner = owner;
	drv->cpumask = &per_cpu(idle_mask, cpu);
	cpumask_set_cpu(cpu, drv->cpumask);
	drv->state_count = 0;

	state = &drv->states[CPUIDLE_STATE_CLKGATING];
	snprintf(state->name, CPUIDLE_NAME_LEN, "clock-gated");
	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU clock gated");
	state->exit_latency = 10;
	state->target_residency = 10;
	state->power_usage = 600;
	state->flags = CPUIDLE_FLAG_TIME_VALID;
	state->enter = tegra_idle_enter_clock_gating;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
	drv->safe_state_index = 0;
#endif
	drv->state_count++;

#ifdef CONFIG_PM_SLEEP
	state = &drv->states[CPUIDLE_STATE_POWERGATING];
	snprintf(state->name, CPUIDLE_NAME_LEN, "powered-down");
	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU power gated");
	state->exit_latency = tegra_cpu_power_good_time();
	state->target_residency = tegra_cpu_power_off_time() +
		tegra_cpu_power_good_time();
	if (state->target_residency < tegra_pd_min_residency)
		state->target_residency = tegra_pd_min_residency;
	state->power_usage = 100;
	state->flags = CPUIDLE_FLAG_TIME_VALID;
	state->enter = tegra_idle_enter_pd;
	drv->state_count++;

	if (cpu == 0) {
		state = &drv->states[CPUIDLE_STATE_MC_CLK_STOP];
		snprintf(state->name, CPUIDLE_NAME_LEN, "mc-clock");
		snprintf(state->desc, CPUIDLE_DESC_LEN, "MC clock stop");
		state->exit_latency = tegra_cpu_power_good_time() +
			DRAM_SELF_REFRESH_EXIT_LATENCY;
		state->target_residency = tegra_cpu_power_off_time() +
			tegra_cpu_power_good_time() + DRAM_SELF_REFRESH_EXIT_LATENCY;
		if (state->target_residency < tegra_mc_clk_stop_min_residency())
			state->target_residency =
					tegra_mc_clk_stop_min_residency();
		state->power_usage = 0;
		state->flags = CPUIDLE_FLAG_TIME_VALID;
		state->enter = tegra_idle_enter_pd;
		state->disabled = true;
		drv->state_count++;
	}
#endif

	if (cpuidle_register(drv, NULL)) {
		pr_err("CPU%u: failed to register driver\n", cpu);
		return -EIO;
	}

	on_each_cpu_mask(drv->cpumask, tegra_cpuidle_setup_bctimer,
				(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);

	return 0;
}
Ejemplo n.º 15
0
void flush_tlb_mm(struct mm_struct *mm)
{
	on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
			 mm, 1);
}