Ejemplo n.º 1
0
int
vmap_page_range (struct vm_area_struct *vma, unsigned long from, unsigned long size, unsigned long vaddr)
{
	int error = 0;
	pgd_t * dir;
	unsigned long beg = from;
	unsigned long end = from + size;

	vaddr -= from;
	dir = pgd_offset(current->mm, from);
	flush_cache_range(vma, beg, end);
	while (from < end) {
		pmd_t *pmd = pmd_alloc(current->mm, dir, from);
		error = -ENOMEM;
		if (!pmd)
			break;
		error = vmap_pmd_range(pmd, from, end - from, vaddr + from);
		if (error)
			break;
		from = (from + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	}
	local_flush_tlb_range(current->mm, beg, end);
	return error;
}
Ejemplo n.º 2
0
void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
			 unsigned long end)
{
	if(mm->context != NO_CONTEXT) {
		if(mm->cpu_vm_mask != (1 << smp_processor_id()))
			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) mm, start, end);
		local_flush_tlb_range(mm, start, end);
	}
}
Ejemplo n.º 3
0
void flush_tlb_range(struct vm_area_struct *vma,
                     unsigned long start, unsigned long end)
{
	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_vma = vma;
		ta.ta_start = start;
		ta.ta_end = end;
		on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
	} else
		local_flush_tlb_range(vma, start, end);
}
Ejemplo n.º 4
0
/*
 * Flush all the (user) entries for the address space described
 * by mm.  We can't rely on mm->mmap describing all the entries
 * that might be in the hash table.
 */
void
local_flush_tlb_mm(struct mm_struct *mm)
{
	if (Hash == 0) {
		_tlbia();
		return;
	}

	if (mm->map_count) {
		struct vm_area_struct *mp;
		for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
			local_flush_tlb_range(mp, mp->vm_start, mp->vm_end);
	} else {
		struct vm_area_struct vma;
		vma.vm_mm = mm;
		local_flush_tlb_range(&vma, 0, TASK_SIZE);
	}

#ifdef CONFIG_SMP
	smp_send_tlb_invalidate(0);
#endif	
}
Ejemplo n.º 5
0
Archivo: smp.c Proyecto: maliyu/SOM2416
void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
			 unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;

	if (mm->context != NO_CONTEXT) {
		cpumask_t cpu_mask = mm->cpu_vm_mask;
		cpu_clear(smp_processor_id(), cpu_mask);
		if (!cpus_empty(cpu_mask))
			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
		local_flush_tlb_range(vma, start, end);
	}
}
Ejemplo n.º 6
0
void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
		struct flush_tlb_data fd;

		fd.mm = mm;
		fd.addr1 = start;
		fd.addr2 = end;
		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
	} else {
		int i;
		for (i = 0; i < smp_num_cpus; i++)
			if (smp_processor_id() != i)
				cpu_context(i, mm) = 0;
	}
	local_flush_tlb_range(mm, start, end);
}
Ejemplo n.º 7
0
/*
 * This routine is called from the page fault handler to remove a
 * range of active mappings at this point
 */
void
remove_mapping (struct task_struct *task, unsigned long start, unsigned long end)
{
	unsigned long beg = start;
	pgd_t *dir;

	down_write (&task->mm->mmap_sem);
	dir = pgd_offset (task->mm, start);
	flush_cache_range (task->mm->mmap, beg, end);
	while (start < end){
		remove_mapping_pmd_range (dir, start, end - start);
		start = (start + PGDIR_SIZE) & PGDIR_MASK;
		dir++;
	}
	local_flush_tlb_range (task->mm, beg, end);
	up_write (&task->mm->mmap_sem);
}
Ejemplo n.º 8
0
/*
 * Flush all tlb/hash table entries (except perhaps for those
 * mapping RAM starting at PAGE_OFFSET, since they never change).
 */
void
local_flush_tlb_all(void)
{
	struct vm_area_struct vma;

	/* aargh!!! */
	/*
	 * Just flush the kernel part of the address space, that's
	 * all that the current callers of this require.
	 * Eventually I hope to persuade the powers that be that
	 * we can and should dispense with flush_tlb_all().
	 *  -- paulus.
	 */
	vma.vm_mm = &init_mm;
	local_flush_tlb_range(&vma, TASK_SIZE, ~0UL);

#ifdef CONFIG_SMP
	smp_send_tlb_invalidate(0);
#endif /* CONFIG_SMP */
}
Ejemplo n.º 9
0
void flush_tlb_range(struct vm_area_struct *vma,
                     unsigned long start, unsigned long end)
{
	if (IS_ENABLED(CONFIG_L4)) {
		l4x_unmap_sync_mm(vma->vm_mm);
		l4x_unmap_page_range(vma->vm_mm, start, end);
		return;
	}

	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_vma = vma;
		ta.ta_start = start;
		ta.ta_end = end;
		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
					&ta, 1);
	} else
		local_flush_tlb_range(vma, start, end);
	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
Ejemplo n.º 10
0
Archivo: smp.c Proyecto: Einheri/wl500g
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;

	preempt_disable();
	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
		struct flush_tlb_data fd;

		fd.vma = vma;
		fd.addr1 = start;
		fd.addr2 = end;
		smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
	} else {
		int i;
		for (i = 0; i < num_online_cpus(); i++)
			if (smp_processor_id() != i)
				cpu_context(i, mm) = 0;
	}
	local_flush_tlb_range(vma, start, end);
	preempt_enable();
}
Ejemplo n.º 11
0
static void flush_tlb_range_ipi(void *info)
{
	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;

	local_flush_tlb_range(fd->mm, fd->addr1, fd->addr2);
}
Ejemplo n.º 12
0
static inline void ipi_flush_tlb_range(void *arg)
{
	struct tlb_args *ta = (struct tlb_args *)arg;

	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
}