Exemplo n.º 1
0
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct task_struct *task = m->private;
	struct vm_area_struct *map = v;
	(*pos)++;
	if (map->vm_next)
		return map->vm_next;
	m_stop(m, v);
	if (map != get_gate_vma(task))
		return get_gate_vma(task);
	return NULL;
}
Exemplo n.º 2
0
int in_gate_area(struct task_struct *task, unsigned long addr)
{
	struct vm_area_struct *vma = get_gate_vma(task);
	if (!vma)
		return 0;
	return (addr >= vma->vm_start) && (addr < vma->vm_end);
}
Exemplo n.º 3
0
static int show_map(struct seq_file *m, void *v)
{
	struct task_struct *task = m->private;
	struct vm_area_struct *map = v;
	struct mm_struct *mm = map->vm_mm;
	struct file *file = map->vm_file;
	int flags = map->vm_flags;
	unsigned long ino = 0;
	dev_t dev = 0;
	int len;

	if (file) {
		struct inode *inode = map->vm_file->f_dentry->d_inode;
		dev = inode->i_sb->s_dev;
		ino = inode->i_ino;
	}

	seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
			map->vm_start,
			map->vm_end,
			flags & VM_READ ? 'r' : '-',
			flags & VM_WRITE ? 'w' : '-',
			flags & VM_EXEC ? 'x' : '-',
			flags & VM_MAYSHARE ? 's' : 'p',
			map->vm_pgoff << PAGE_SHIFT,
			MAJOR(dev), MINOR(dev), ino, &len);

	/*
	 * Print the dentry name for named mappings, and a
	 * special [heap] marker for the heap:
	 */
	if (map->vm_file) {
		pad_len_spaces(m, len);
		seq_path(m, file->f_vfsmnt, file->f_dentry, "");
	} else {
		if (mm) {
			if (map->vm_start <= mm->start_brk &&
						map->vm_end >= mm->brk) {
				pad_len_spaces(m, len);
				seq_puts(m, "[heap]");
			} else {
				if (map->vm_start <= mm->start_stack &&
					map->vm_end >= mm->start_stack) {

					pad_len_spaces(m, len);
					seq_puts(m, "[stack]");
				}
			}
		} else {
			pad_len_spaces(m, len);
			seq_puts(m, "[vdso]");
		}
	}
	seq_putc(m, '\n');
	if (m->count < m->size)  /* map is copied successfully */
		m->version = (map != get_gate_vma(task))? map->vm_start: 0;
	return 0;
}
Exemplo n.º 4
0
static void *m_start(struct seq_file *m, loff_t *pos)
{
	struct task_struct *task = m->private;
	unsigned long last_addr = m->version;
	struct mm_struct *mm;
	struct vm_area_struct *map, *tail_map;
	loff_t l = *pos;

	/*
	 * We remember last_addr rather than next_addr to hit with
	 * mmap_cache most of the time. We have zero last_addr at
	 * the begining and also after lseek. We will have -1 last_addr
	 * after the end of the maps.
	 */

	if (last_addr == -1UL)
		return NULL;

	mm = get_task_mm(task);
	if (!mm)
		return NULL;

	tail_map = get_gate_vma(task);
	down_read(&mm->mmap_sem);

	/* Start with last addr hint */
	if (last_addr && (map = find_vma(mm, last_addr))) {
		map = map->vm_next;
		goto out;
	}

	/*
	 * Check the map index is within the range and do
	 * sequential scan until m_index.
	 */
	map = NULL;
	if ((unsigned long)l < mm->map_count) {
		map = mm->mmap;
		while (l-- && map)
			map = map->vm_next;
		goto out;
	}

	if (l != mm->map_count)
		tail_map = NULL; /* After gate map */

out:
	if (map)
		return map;

	/* End of maps has reached */
	m->version = (tail_map != NULL)? 0: -1UL;
	up_read(&mm->mmap_sem);
	mmput(mm);
	return tail_map;
}
Exemplo n.º 5
0
static void m_stop(struct seq_file *m, void *v)
{
	struct task_struct *task = m->private;
	struct vm_area_struct *map = v;
	if (map && map != get_gate_vma(task)) {
		struct mm_struct *mm = map->vm_mm;
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
}
Exemplo n.º 6
0
/*
 * lazy-check for CS validity on exec-shield binaries:
 *
 * the original non-exec stack patch was written by
 * Solar Designer <solar at openwall.com>. Thanks!
 */
static int
check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
{
	struct desc_struct *desc1, *desc2;
	struct vm_area_struct *vma;
	unsigned long limit;

	if (current->mm == NULL)
		return 0;

	limit = -1UL;
	if (current->mm->context.exec_limit != -1UL) {
		limit = PAGE_SIZE;
		spin_lock(&current->mm->page_table_lock);
		for (vma = current->mm->mmap; vma; vma = vma->vm_next)
			if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
				limit = vma->vm_end;
		vma = get_gate_vma(current->mm);
		if (vma && (vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
			limit = vma->vm_end;
		spin_unlock(&current->mm->page_table_lock);
		if (limit >= TASK_SIZE)
			limit = -1UL;
		current->mm->context.exec_limit = limit;
	}
	set_user_cs(&current->mm->context.user_cs, limit);

	desc1 = &current->mm->context.user_cs;
	desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;

	if (__compare_user_cs_desc(desc1, desc2)) {
		/*
		 * The CS was not in sync - reload it and retry the
		 * instruction. If the instruction still faults then
		 * we won't hit this branch next time around.
		 */
		if (print_fatal_signals >= 2) {
			printk(KERN_ERR "#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n",
				error_code, error_code/8, regs->ip,
				smp_processor_id());
			printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x, CPU_cs: %08x/%08x.\n",
				current->mm->context.exec_limit,
				desc1->a, desc1->b, desc2->a, desc2->b);
		}

		load_user_cs_desc(cpu, current->mm);

		return 1;
	}

	return 0;
}
Exemplo n.º 7
0
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
#ifdef CONFIG_L4
	/* Prevent the core-dump (gup.c) from doing PTE lookups on the
	 * vsyscall page */
	return 0;
#else
	struct vm_area_struct *vma = get_gate_vma(mm);

	if (!vma)
		return 0;

	return (addr >= vma->vm_start) && (addr < vma->vm_end);
#endif /* L4 */
}
Exemplo n.º 8
0
static int get_gate_page(struct mm_struct *mm, unsigned long address,
		unsigned int gup_flags, struct vm_area_struct **vma,
		struct page **page)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int ret = -EFAULT;

	/* user gate pages are read-only */
	if (gup_flags & FOLL_WRITE)
		return -EFAULT;
	if (address > TASK_SIZE)
		pgd = pgd_offset_k(address);
	else
		pgd = pgd_offset_gate(mm, address);
	BUG_ON(pgd_none(*pgd));
	p4d = p4d_offset(pgd, address);
	BUG_ON(p4d_none(*p4d));
	pud = pud_offset(p4d, address);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return -EFAULT;
	VM_BUG_ON(pmd_trans_huge(*pmd));
	pte = pte_offset_map(pmd, address);
	if (pte_none(*pte))
		goto unmap;
	*vma = get_gate_vma(mm);
	if (!page)
		goto out;
	*page = vm_normal_page(*vma, address, *pte);
	if (!*page) {
		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
			goto unmap;
		*page = pte_page(*pte);
	}
	get_page(*page);
out:
	ret = 0;
unmap:
	pte_unmap(pte);
	return ret;
}
static void *m_start(struct seq_file *m, loff_t *ppos)
{
	struct proc_maps_private *priv = m->private;
	unsigned long last_addr = m->version;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	unsigned int pos = *ppos;

	/* See m_cache_vma(). Zero at the start or after lseek. */
	if (last_addr == -1UL)
		return NULL;

	priv->task = get_proc_task(priv->inode);
	if (!priv->task)
		return ERR_PTR(-ESRCH);

	mm = priv->mm;
	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
		return NULL;

	down_read(&mm->mmap_sem);
	hold_task_mempolicy(priv);
	priv->tail_vma = get_gate_vma(mm);

	if (last_addr) {
		vma = find_vma(mm, last_addr);
		if (vma && (vma = m_next_vma(priv, vma)))
			return vma;
	}

	m->version = 0;
	if (pos < mm->map_count) {
		for (vma = mm->mmap; pos; pos--) {
			m->version = vma->vm_start;
			vma = vma->vm_next;
		}
		return vma;
	}

	/* we do not bother to update m->version in this case */
	if (pos == mm->map_count && priv->tail_vma)
		return priv->tail_vma;

	vma_stop(priv);
	return NULL;
}
Exemplo n.º 10
0
static void *m_start(struct seq_file *m, loff_t *pos)
{
	struct task_struct *task = m->private;
	struct mm_struct *mm = get_task_mm(task);
	struct vm_area_struct * map;
	loff_t l = *pos;

	if (!mm)
		return NULL;

	down_read(&mm->mmap_sem);
	map = mm->mmap;
	while (l-- && map)
		map = map->vm_next;
	if (!map) {
		up_read(&mm->mmap_sem);
		mmput(mm);
		if (l == -1)
			map = get_gate_vma(task);
	}
	return map;
}
Exemplo n.º 11
0
int in_gate_area(struct task_struct *task, unsigned long addr)
{
    const struct vm_area_struct *vma = get_gate_vma(task);

    return vma && addr >= vma->vm_start && addr < vma->vm_end;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
	const struct vm_area_struct *vma = get_gate_vma(mm);

	return vma && addr >= vma->vm_start && addr < vma->vm_end;
}