Exemplo n.º 1
0
void lkdtm_ACCESS_USERSPACE(void)
{
	unsigned long user_addr, tmp = 0;
	unsigned long *ptr;

	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
	if (user_addr >= TASK_SIZE) {
		pr_warn("Failed to allocate user memory\n");
		return;
	}

	if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
		pr_warn("copy_to_user failed\n");
		vm_munmap(user_addr, PAGE_SIZE);
		return;
	}

	ptr = (unsigned long *)user_addr;

	pr_info("attempting bad read at %p\n", ptr);
	tmp = *ptr;
	tmp += 0xc0dec0de;

	pr_info("attempting bad write at %p\n", ptr);
	*ptr = tmp;

	vm_munmap(user_addr, PAGE_SIZE);
}
Exemplo n.º 2
0
_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args )
{
	/* args->ctx ignored here; args->ukk_private required instead */
	/* we need to lock the mmap semaphore before calling the do_mmap function */
    down_write(&current->mm->mmap_sem);

    args->mapping = (void __user *)vm_mmap(
											(struct file *)args->ukk_private,
											0, /* start mapping from any address after NULL */
											args->size,
											PROT_READ | PROT_WRITE,
											MAP_SHARED,
											args->phys_addr
										   );

	/* and unlock it after the call */
	up_write(&current->mm->mmap_sem);

	/* No cookie required here */
	args->cookie = 0;
	/* uku_private meaningless, so zero */
	args->uku_private = NULL;

	if ( (NULL == args->mapping) || IS_ERR((void *)args->mapping) )
	{
		return _MALI_OSK_ERR_FAULT;
	}

	/* Success */
	return _MALI_OSK_ERR_OK;
}
Exemplo n.º 3
0
static int map_som_binary(struct file *file,
		const struct som_exec_auxhdr *hpuxhdr)
{
	unsigned long code_start, code_size, data_start, data_size;
	unsigned long bss_start, som_brk;
	int retval;
	int prot = PROT_READ | PROT_EXEC;
	int flags = MAP_FIXED|MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;

	mm_segment_t old_fs = get_fs();
	set_fs(get_ds());

	code_start = SOM_PAGESTART(hpuxhdr->exec_tmem);
	code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize);
	current->mm->start_code = code_start;
	current->mm->end_code = code_start + code_size;
	retval = vm_mmap(file, code_start, code_size, prot,
			flags, SOM_PAGESTART(hpuxhdr->exec_tfile));
	if (retval < 0 && retval > -1024)
		goto out;

	data_start = SOM_PAGESTART(hpuxhdr->exec_dmem);
	data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize);
	current->mm->start_data = data_start;
	current->mm->end_data = bss_start = data_start + data_size;
	retval = vm_mmap(file, data_start, data_size,
			prot | PROT_WRITE, flags,
			SOM_PAGESTART(hpuxhdr->exec_dfile));
	if (retval < 0 && retval > -1024)
		goto out;

	som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize);
	current->mm->start_brk = current->mm->brk = som_brk;
	retval = vm_mmap(NULL, bss_start, som_brk - bss_start,
			prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0);
	if (retval > 0 || retval < -1024)
		retval = 0;
out:
	set_fs(old_fs);
	return retval;
}
Exemplo n.º 4
0
/**
 * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
 * an empty user space mapping.
 *
 * The caller takes care of acquiring the mmap_sem of the task.
 *
 * @returns Pointer to the mapping.
 *          (void *)-1 on failure.
 * @param   R3PtrFixed  (RTR3PTR)-1 if anywhere, otherwise a specific location.
 * @param   cb          The size of the mapping.
 * @param   uAlignment  The alignment of the mapping.
 * @param   pTask       The Linux task to create this mapping in.
 * @param   fProt       The RTMEM_PROT_* mask.
 */
static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, struct task_struct *pTask, unsigned fProt)
{
    unsigned fLnxProt;
    unsigned long ulAddr;

    /*
     * Convert from IPRT protection to mman.h PROT_ and call do_mmap.
     */
    fProt &= (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
    if (fProt == RTMEM_PROT_NONE)
        fLnxProt = PROT_NONE;
    else
    {
        fLnxProt = 0;
        if (fProt & RTMEM_PROT_READ)
            fLnxProt |= PROT_READ;
        if (fProt & RTMEM_PROT_WRITE)
            fLnxProt |= PROT_WRITE;
        if (fProt & RTMEM_PROT_EXEC)
            fLnxProt |= PROT_EXEC;
    }

    if (R3PtrFixed != (RTR3PTR)-1)
        ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
    else
    {
        ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
        if (    !(ulAddr & ~PAGE_MASK)
            &&  (ulAddr & (uAlignment - 1)))
        {
            /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill
             * up alignment gaps. This is of course complicated by fragmentation (which we might have cause
             * ourselves) and further by there begin two mmap strategies (top / bottom). */
            /* For now, just ignore uAlignment requirements... */
        }
    }
    if (ulAddr & ~PAGE_MASK) /* ~PAGE_MASK == PAGE_OFFSET_MASK */
        return (void *)-1;
    return (void *)ulAddr;
}
Exemplo n.º 5
0
void lkdtm_EXEC_USERSPACE(void)
{
	unsigned long user_addr;

	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
	if (user_addr >= TASK_SIZE) {
		pr_warn("Failed to allocate user memory\n");
		return;
	}
	execute_user_location((void *)user_addr);
	vm_munmap(user_addr, PAGE_SIZE);
}
Exemplo n.º 6
0
int elf_initialize_userspace_stack(struct elfhdr elf_ex,unsigned long aux_addr,unsigned long tmp_stack, unsigned long stack_len,unsigned long load_addr) {
	unsigned long *aux_vec, aux_index;
	Elf64_Addr p_entry;

	p_entry = elf_ex.e_entry;
	vm_mmap(0, USERSTACK_ADDR, USERSTACK_LEN, PROT_READ | PROT_WRITE, MAP_ANONYMOUS, 0, "userstack");
	if (stack_len > 0) {
		aux_vec = (unsigned long *) aux_addr;
		if (aux_vec != 0) {
			int aux_last;

			aux_index = 0;
			aux_last = (MAX_AUX_VEC_ENTRIES - 2) * 2;

			AUX_ENT(AT_HWCAP, 0xbfebfbff); /* TODO: need to modify*/
			AUX_ENT(AT_PAGESZ, PAGE_SIZE);
			AUX_ENT(AT_CLKTCK, 100);
			AUX_ENT(AT_PHDR, load_addr + elf_ex.e_phoff);
			AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
			AUX_ENT(AT_PHNUM, elf_ex.e_phnum);
			AUX_ENT(AT_BASE, 0);
			AUX_ENT(AT_FLAGS, 0);
			AUX_ENT(AT_ENTRY, p_entry);

			AUX_ENT(AT_UID, 0x1f4); /* TODO : remove  UID hard coded to 0x1f4 for the next four entries  */
			AUX_ENT(AT_EUID, 0x1f4);
			AUX_ENT(AT_GID, 0x1f4);
			AUX_ENT(AT_EGID, 0x1f4);

			AUX_ENT(AT_SECURE, 0x0);

			aux_vec[aux_last] = 0x1234567887654321;
			aux_vec[aux_last + 1] = 0x1122334455667788;
			AUX_ENT(AT_RANDOM,
					userstack_addr((unsigned long)&aux_vec[aux_last]));

			aux_vec[aux_last + 2] = 0x34365f363878; /* This is string "x86_64" */
			AUX_ENT(AT_PLATFORM,
					userstack_addr((unsigned long)&aux_vec[aux_last+2]));
		}

		if (stack_len > 0){
			ut_memcpy((unsigned char *) USERSTACK_ADDR + USERSTACK_LEN - stack_len,
				(unsigned char *) tmp_stack, stack_len);
		}

		return JSUCCESS;
	}
	return JFAIL;
}
Exemplo n.º 7
0
/*the following three mem funcs are for check_huge_pages_in_practice()*/
void * mmap(void *addr, size_t len, int prot, int flags,int fd, off_t offset){
    struct file * file  = NULL;
    unsigned long retval = - EBADF;
    if (offset_in_page(offset) != 0)
        return (void *) -EINVAL;

    flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);

    retval = vm_mmap(file, (unsigned long)addr, len, prot, flags, offset);

    if (file)
            fput(file);
    return (void *) retval;

}
Exemplo n.º 8
0
static unsigned long map_to_user(struct file *filp, void *kptr,
	unsigned int size)
{
	/* the user space address to be returned */
	unsigned long uptr;
	/* the mmap struct to hold the semaphore of */
	struct mm_struct *mm;
	/* flags to pass to do_mmap_pgoff */
	unsigned long flags;
	/* old value in private field */
	void *oldval;

	/* print some debug info... */
	PR_DEBUG("size is (d) %d", size);

	mm = current->mm;
	/* must NOT add MAP_LOCKED to the flags (it causes a hang) */
	flags = MAP_POPULATE | MAP_SHARED;
	/* flags=MAP_POPULATE|MAP_PRIVATE; */
	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
	/*
	 * vm_mmap does not need the semaphore to be held
	 * down_write(&mm->mmap_sem);
	 */
	oldval = filp->private_data;
	filp->private_data = kptr;
	uptr = vm_mmap(
		filp, /* file pointer for which filp->mmap will be called */
		0, /* address - this is the address we recommend for user
			space - best not to ... */
		size, /* size */
		PROT_READ | PROT_WRITE, /* protection */
		flags, /* flags */
		0 /* pg offset */
	);
	filp->private_data = oldval;
	/*
	 * vm_mmap does not need the semaphore to be held
	 * up_write(&mm->mmap_sem);
	 */
	if (IS_ERR_VALUE(uptr))
		PR_ERROR("ERROR: problem calling do_mmap_pgoff");
	else
		PR_DEBUG("addr for user space is (lu) %lu / (p) %p", uptr,
			(void *)uptr);
	return uptr;
}
Exemplo n.º 9
0
static noinline void __init copy_user_test(void)
{
	char *kmem;
	char __user *usermem;
	size_t size = 10;
	int unused;

	kmem = kmalloc(size, GFP_KERNEL);
	if (!kmem)
		return;

	usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
	if (IS_ERR(usermem)) {
		pr_err("Failed to allocate user memory\n");
		kfree(kmem);
		return;
	}

	pr_info("out-of-bounds in copy_from_user()\n");
	unused = copy_from_user(kmem, usermem, size + 1);

	pr_info("out-of-bounds in copy_to_user()\n");
	unused = copy_to_user(usermem, kmem, size + 1);

	pr_info("out-of-bounds in __copy_from_user()\n");
	unused = __copy_from_user(kmem, usermem, size + 1);

	pr_info("out-of-bounds in __copy_to_user()\n");
	unused = __copy_to_user(usermem, kmem, size + 1);

	pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
	unused = __copy_from_user_inatomic(kmem, usermem, size + 1);

	pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
	unused = __copy_to_user_inatomic(usermem, kmem, size + 1);

	pr_info("out-of-bounds in strncpy_from_user()\n");
	unused = strncpy_from_user(kmem, usermem, size + 1);

	vm_munmap((unsigned long)usermem, PAGE_SIZE);
	kfree(kmem);
}
Exemplo n.º 10
0
//unsigned long fs_loadElfLibrary(struct file *file, unsigned long tmp_stack, unsigned long stack_len, unsigned long aux_addr) {
unsigned long fs_elf_load(struct file *file,unsigned long tmp_stack, unsigned long stack_len, unsigned long aux_addr) {
	struct elf_phdr *elf_phdata;
	struct elf_phdr *eppnt;
	unsigned long elf_bss, bss_start, bss, len;
	int retval, error, i, j;
	struct elfhdr elf_ex;
	Elf64_Addr p_entry;
	unsigned long *aux_vec, aux_index, load_addr;
	struct task_struct *task=g_current_task;

	error = 0;
	fs_lseek(file, 0, 0);
	retval = fs_read(file, (unsigned char *) &elf_ex, sizeof(elf_ex));
	if (retval != sizeof(elf_ex)) {
		error = -1;
		goto out;
	}

	if (ut_memcmp((unsigned char *) elf_ex.e_ident, (unsigned char *) ELFMAG, SELFMAG) != 0) {
		error = -2;
		goto out;
	}

	if (elf_ex.e_type == ET_DYN)  elf_ex.e_type=ET_EXEC;
	/* First of all, some simple consistency checks */
	//if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
	if (elf_ex.e_type != ET_EXEC || !elf_check_arch(&elf_ex)) {
		DEBUG("error:(not executable type or mismatch in architecture %x  %x %x \n",elf_ex.e_type,elf_ex.e_phnum,elf_check_arch(&elf_ex));
		error = -3;
		goto out;
	}

	/* Now read in all of the header information */

	j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
	/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */

	elf_phdata = mm_malloc(j, 0);
	if (!elf_phdata) {
		error = -4;
		goto out;
	}

	eppnt = elf_phdata;
	fs_lseek(file, (unsigned long) elf_ex.e_phoff, 0);
	retval = fs_read(file, (unsigned char *) eppnt, j);
	if (retval != j) {
		error = -5;
		goto out;
	}
	DEBUG("START address : %x offset :%x \n",ELF_PAGESTART(eppnt->p_vaddr),eppnt->p_offset);
	for (j = 0, i = 0; i < elf_ex.e_phnum; i++){
		if ((eppnt + i)->p_type == PT_LOAD)
			j++;
	}
	if (j == 0) {
		error = -6;
		goto out;
	}
	load_addr = ELF_PAGESTART(eppnt->p_vaddr);
	p_entry = elf_ex.e_entry;
	task->mm->start_code = 0;
	task->mm->end_code =0;
	for (i = 0; i < elf_ex.e_phnum; i++, eppnt++) /* mmap all loadable program headers */
	{
		if (eppnt->p_type != PT_LOAD)
			continue;
		//ut_log("%d: LOAD section: vaddr:%x filesz:%x offset:%x flags:%x  \n",i,ELF_PAGESTART(eppnt->p_vaddr),eppnt->p_filesz,eppnt->p_offset,eppnt->p_flags);
		/* Now use mmap to map the library into memory. */
		error = 1;
		if (eppnt->p_filesz > 0) {
			unsigned long addr;
			unsigned long start_addr = ELF_PAGESTART(eppnt->p_vaddr);
			unsigned long end_addr= eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
			addr = vm_mmap(file, start_addr, end_addr, eppnt->p_flags, 0, (eppnt->p_offset
					- ELF_PAGEOFFSET(eppnt->p_vaddr)),"text");
			if (addr == 0)
				error = 0;
			if (task->mm->start_code ==0  || task->mm->start_code > start_addr ) task->mm->start_code = start_addr;
			if (task->mm->end_code < end_addr ) task->mm->end_code = end_addr;
		}
		//if (error != ELF_PAGESTART(eppnt->p_vaddr))
		if (error != 1) {
			error = -6;
			goto out;
		}

		elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
		//	padzero(elf_bss);

		/* TODO :  bss start address in not at the PAGE_ALIGN or ELF_MIN_ALIGN , need to club this partial page with the data */
	//	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
		bss_start = eppnt->p_filesz + eppnt->p_vaddr;
		bss = eppnt->p_memsz + eppnt->p_vaddr;
		//ut_log(" bss start :%x end:%x memsz:%x elf_bss:%x \n",bss_start, bss,eppnt->p_memsz,elf_bss);
		if (bss > bss_start) {
			vm_setupBrk(bss_start, bss - bss_start);
		}
		error = 0;
	}

 out:
 	if (elf_phdata) {
 		mm_free(elf_phdata);
 	}
	if (error != 0) {
		ut_log(" ERROR in elf loader filename :%s :%d\n",file->filename,-error);
	} else {
		task->mm->stack_bottom = USERSTACK_ADDR+USERSTACK_LEN;
		 elf_initialize_userspace_stack(elf_ex,aux_addr,tmp_stack, stack_len,load_addr);

		vm_mmap(0, USER_SYSCALL_PAGE, 0x1000, PROT_READ | PROT_EXEC |PROT_WRITE, MAP_ANONYMOUS, 0,"fst_syscal");
			//ut_memset((unsigned char *)SYSCALL_PAGE,(unsigned char )0xcc,0x1000);
		ut_memcpy((unsigned char *)USER_SYSCALL_PAGE,(unsigned char *)&__vsyscall_page,0x1000);
		if (g_conf_syscall_debug==1){
			//pagetable_walk(4,g_current_task->mm->pgd,1,0);
		}
	}
	DEBUG(" Program start address(autod) : %x \n",elf_ex.e_entry);

	if (error == 0)
		return p_entry;
	else
		return 0;
}
Exemplo n.º 11
0
int init_host_shm(pci_dev_header_t *pci_hdr, pci_bar_t bars[], uint32_t len, int *msi_vector) {
	uint32_t i, *p;
	long ret;
	host_shm_pci_hdr = *pci_hdr;

	ut_printf(" Initialising HOST SHM .. \n");
	for (i = 0; i < len && i < 4; i++) {
		host_shm_pci_bar[i] = bars[i];
		ut_printf("Host_shm bar addr :%x  len: %x \n", host_shm_pci_bar[i].addr,
				host_shm_pci_bar[i].len);
	}
	if (bars[0].addr != 0) {
		if ((ret = vm_mmap(0, HOST_SHM_CTL_ADDR, 0x1000, PROT_WRITE, MAP_FIXED,
				bars[0].addr)) == 0) {
			ut_printf(
					"ERROR : mmap fails for Host_ctl addr :%x len:%x ret:%x \n",
					bars[0].addr, bars[0].len, ret);
			return 0;
		} else {
			p = (unsigned char *) HOST_SHM_CTL_ADDR;
			*p = 0xffffffff; /* set the proper mask */
			// g_hostShmLen=bars[0].len;
		}
	}
	if (bars[2].addr == 0)
		i=1; /* No MSI */
	else
      i=2;		/* MSI present in bar-1 */
	if (bars[i].addr != 0) {
		if ((ret = vm_mmap(0, HOST_SHM_ADDR, bars[i].len, PROT_WRITE, MAP_FIXED,
				bars[i].addr)) == 0) {
			ut_printf(
					"ERROR : mmap fails for Host_shm addr :%x len:%x ret:%x \n",
					bars[i].addr, bars[i].len, ret);
			return 0;
		} else {
			g_hostShmPhyAddr = bars[i].addr;
			g_hostShmLen = bars[i].len;
		}
	}else{
		return 0;
	}
	sc_register_waitqueue(&g_hfs_waitqueue,"Hfs");
	if (pci_hdr->interrupt_line > 0) {
		int k;
#if 1
			p = HOST_SHM_CTL_ADDR;
			*p=0xffffffff;
			p=p+1;
			k=*p;
#endif
		ut_printf(" Interrupt NUMBER : %i k:%d \n", pci_hdr->interrupt_line,k);
		ar_registerInterrupt(32 + pci_hdr->interrupt_line, host_shm_interrupt,
				"host_shm");
	}
	init_HostFs();
	if (*msi_vector > 0) {
		ar_registerInterrupt(*msi_vector , host_shm_interrupt,"hostshm_msi");
	}

	return 1;
}
Exemplo n.º 12
0
static int
load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
		  caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
{
	size_t map_len;
	vm_offset_t map_offset;
	vm_offset_t map_addr;
	int error;
	unsigned char *data_buf = 0;
	size_t copy_len;

	map_offset = trunc_page(offset);
	map_addr = trunc_page((vm_offset_t)vmaddr);

	if (memsz > filsz) {
		/*
		 * We have the stupid situation that
		 * the section is longer than it is on file,
		 * which means it has zero-filled areas, and
		 * we have to work for it.  Stupid iBCS!
		 */
		map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
	} else {
		/*
		 * The only stuff we care about is on disk, and we
		 * don't care if we map in more than is really there.
		 */
		map_len = round_page(offset + filsz) - trunc_page(map_offset);
	}

	DPRINTF(("%s(%d):  vm_mmap(&vmspace->vm_map, &0x%08lx, 0x%x, 0x%x, "
		"VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
		__FILE__, __LINE__, map_addr, map_len, prot, map_offset));

	if ((error = vm_mmap(&vmspace->vm_map,
			     &map_addr,
			     map_len,
			     prot,
			     VM_PROT_ALL,
			     MAP_PRIVATE | MAP_FIXED,
			     OBJT_VNODE,
			     vp,
			     map_offset)) != 0)
		return error;

	if (memsz == filsz) {
		/* We're done! */
		return 0;
	}

	/*
	 * Now we have screwball stuff, to accomodate stupid COFF.
	 * We have to map the remaining bit of the file into the kernel's
	 * memory map, allocate some anonymous memory, copy that last
	 * bit into it, and then we're done. *sigh*
	 * For clean-up reasons, we actally map in the file last.
	 */

	copy_len = (offset + filsz) - trunc_page(offset + filsz);
	map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
	map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;

	DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08lx,0x%x, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, map_addr, map_len));

	if (map_len != 0) {
		error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
				    map_len, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0);
		if (error)
			return error;
	}

	if ((error = vm_mmap(kernel_map,
			    (vm_offset_t *) &data_buf,
			    PAGE_SIZE,
			    VM_PROT_READ,
			    VM_PROT_READ,
			    0,
			    OBJT_VNODE,
			    vp,
			    trunc_page(offset + filsz))) != 0)
		return error;

	error = copyout(data_buf, (caddr_t) map_addr, copy_len);

	if (vm_map_remove(kernel_map,
			  (vm_offset_t) data_buf,
			  (vm_offset_t) data_buf + PAGE_SIZE))
		panic("load_coff_section vm_map_remove failed");

	return error;
}
Exemplo n.º 13
0
/* 
 * mmap_args(void *addr, size_t len, int prot, int flags, int fd,
 *		long pad, off_t pos)
 *
 * Memory Map (mmap) system call.  Note that the file offset
 * and address are allowed to be NOT page aligned, though if
 * the MAP_FIXED flag it set, both must have the same remainder
 * modulo the PAGE_SIZE (POSIX 1003.1b).  If the address is not
 * page-aligned, the actual mapping starts at trunc_page(addr)
 * and the return value is adjusted up by the page offset.
 *
 * Generally speaking, only character devices which are themselves
 * memory-based, such as a video framebuffer, can be mmap'd.  Otherwise
 * there would be no cache coherency between a descriptor and a VM mapping
 * both to the same character device.
 *
 * Block devices can be mmap'd no matter what they represent.  Cache coherency
 * is maintained as long as you do not write directly to the underlying
 * character device.
 *
 * No requirements
 */
int
kern_mmap(struct vmspace *vms, caddr_t uaddr, size_t ulen,
	  int uprot, int uflags, int fd, off_t upos, void **res)
{
	struct thread *td = curthread;
 	struct proc *p = td->td_proc;
	struct file *fp = NULL;
	struct vnode *vp;
	vm_offset_t addr;
	vm_offset_t tmpaddr;
	vm_size_t size, pageoff;
	vm_prot_t prot, maxprot;
	void *handle;
	int flags, error;
	off_t pos;
	vm_object_t obj;

	KKASSERT(p);

	addr = (vm_offset_t) uaddr;
	size = ulen;
	prot = uprot & VM_PROT_ALL;
	flags = uflags;
	pos = upos;

	/*
	 * Make sure mapping fits into numeric range etc.
	 *
	 * NOTE: We support the full unsigned range for size now.
	 */
	if (((flags & MAP_ANON) && (fd != -1 || pos != 0)))
		return (EINVAL);

	if (size == 0)
		return (EINVAL);

	if (flags & MAP_STACK) {
		if ((fd != -1) ||
		    ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
			return (EINVAL);
		flags |= MAP_ANON;
		pos = 0;
	}

	/*
	 * Virtual page tables cannot be used with MAP_STACK.  Apart from
	 * it not making any sense, the aux union is used by both
	 * types.
	 *
	 * Because the virtual page table is stored in the backing object
	 * and might be updated by the kernel, the mapping must be R+W.
	 */
	if (flags & MAP_VPAGETABLE) {
		if (vkernel_enable == 0)
			return (EOPNOTSUPP);
		if (flags & MAP_STACK)
			return (EINVAL);
		if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE))
			return (EINVAL);
	}

	/*
	 * Align the file position to a page boundary,
	 * and save its page offset component.
	 */
	pageoff = (pos & PAGE_MASK);
	pos -= pageoff;

	/* Adjust size for rounding (on both ends). */
	size += pageoff;			/* low end... */
	size = (vm_size_t) round_page(size);	/* hi end */
	if (size < ulen)			/* wrap */
		return(EINVAL);

	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (flags & (MAP_FIXED | MAP_TRYFIXED)) {
		/*
		 * The specified address must have the same remainder
		 * as the file offset taken modulo PAGE_SIZE, so it
		 * should be aligned after adjustment by pageoff.
		 */
		addr -= pageoff;
		if (addr & PAGE_MASK)
			return (EINVAL);

		/*
		 * Address range must be all in user VM space and not wrap.
		 */
		tmpaddr = addr + size;
		if (tmpaddr < addr)
			return (EINVAL);
		if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS)
			return (EINVAL);
		if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
			return (EINVAL);
	} else {
		/*
		 * Get a hint of where to map. It also provides mmap offset
		 * randomization if enabled.
		 */
		addr = vm_map_hint(p, addr, prot);
	}

	if (flags & MAP_ANON) {
		/*
		 * Mapping blank space is trivial.
		 */
		handle = NULL;
		maxprot = VM_PROT_ALL;
	} else {
		/*
		 * Mapping file, get fp for validation. Obtain vnode and make
		 * sure it is of appropriate type.
		 */
		fp = holdfp(p->p_fd, fd, -1);
		if (fp == NULL)
			return (EBADF);
		if (fp->f_type != DTYPE_VNODE) {
			error = EINVAL;
			goto done;
		}
		/*
		 * POSIX shared-memory objects are defined to have
		 * kernel persistence, and are not defined to support
		 * read(2)/write(2) -- or even open(2).  Thus, we can
		 * use MAP_ASYNC to trade on-disk coherence for speed.
		 * The shm_open(3) library routine turns on the FPOSIXSHM
		 * flag to request this behavior.
		 */
		if (fp->f_flag & FPOSIXSHM)
			flags |= MAP_NOSYNC;
		vp = (struct vnode *) fp->f_data;

		/*
		 * Validate the vnode for the operation.
		 */
		switch(vp->v_type) {
		case VREG:
			/*
			 * Get the proper underlying object
			 */
			if ((obj = vp->v_object) == NULL) {
				error = EINVAL;
				goto done;
			}
			KKASSERT((struct vnode *)obj->handle == vp);
			break;
		case VCHR:
			/*
			 * Make sure a device has not been revoked.  
			 * Mappability is handled by the device layer.
			 */
			if (vp->v_rdev == NULL) {
				error = EBADF;
				goto done;
			}
			break;
		default:
			/*
			 * Nothing else is mappable.
			 */
			error = EINVAL;
			goto done;
		}

		/*
		 * XXX hack to handle use of /dev/zero to map anon memory (ala
		 * SunOS).
		 */
		if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
			handle = NULL;
			maxprot = VM_PROT_ALL;
			flags |= MAP_ANON;
			pos = 0;
		} else {
			/*
			 * cdevs does not provide private mappings of any kind.
			 */
			if (vp->v_type == VCHR &&
			    (flags & (MAP_PRIVATE|MAP_COPY))) {
				error = EINVAL;
				goto done;
			}
			/*
			 * Ensure that file and memory protections are
			 * compatible.  Note that we only worry about
			 * writability if mapping is shared; in this case,
			 * current and max prot are dictated by the open file.
			 * XXX use the vnode instead?  Problem is: what
			 * credentials do we use for determination? What if
			 * proc does a setuid?
			 */
			maxprot = VM_PROT_EXECUTE;	/* ??? */
			if (fp->f_flag & FREAD) {
				maxprot |= VM_PROT_READ;
			} else if (prot & PROT_READ) {
				error = EACCES;
				goto done;
			}
			/*
			 * If we are sharing potential changes (either via
			 * MAP_SHARED or via the implicit sharing of character
			 * device mappings), and we are trying to get write
			 * permission although we opened it without asking
			 * for it, bail out.  Check for superuser, only if
			 * we're at securelevel < 1, to allow the XIG X server
			 * to continue to work.
			 */
			if ((flags & MAP_SHARED) != 0 || vp->v_type == VCHR) {
				if ((fp->f_flag & FWRITE) != 0) {
					struct vattr va;
					if ((error = VOP_GETATTR(vp, &va))) {
						goto done;
					}
					if ((va.va_flags &
					    (IMMUTABLE|APPEND)) == 0) {
						maxprot |= VM_PROT_WRITE;
					} else if (prot & PROT_WRITE) {
						error = EPERM;
						goto done;
					}
				} else if ((prot & PROT_WRITE) != 0) {
					error = EACCES;
					goto done;
				}
			} else {
				maxprot |= VM_PROT_WRITE;
			}
			handle = (void *)vp;
		}
	}

	lwkt_gettoken(&vms->vm_map.token);

	/*
	 * Do not allow more then a certain number of vm_map_entry structures
	 * per process.  Scale with the number of rforks sharing the map
	 * to make the limit reasonable for threads.
	 */
	if (max_proc_mmap && 
	    vms->vm_map.nentries >= max_proc_mmap * vmspace_getrefs(vms)) {
		error = ENOMEM;
		lwkt_reltoken(&vms->vm_map.token);
		goto done;
	}

	error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
			flags, handle, pos);
	if (error == 0)
		*res = (void *)(addr + pageoff);

	lwkt_reltoken(&vms->vm_map.token);
done:
	if (fp)
		fdrop(fp);

	return (error);
}
Exemplo n.º 14
0
/*
* This is the ioctl implementation.
*/
static long kern_unlocked_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg)
{
	/* int i; */
	char str[256];
	void *ptr;
	unsigned int order;

	unsigned long private;
	unsigned long adjusted;
	unsigned int diff;
	int ret;
	struct vm_area_struct *vma;
	struct mm_struct *mm;
	void *kernel_addr;
	unsigned long flags;
	PR_DEBUG("start");
	switch (cmd) {
	/*
	*	Exploring VMA issues
	*/
	case IOCTL_MMAP_PRINT:
		ptr = (void *)arg;
		PR_DEBUG("ptr is %p", ptr);
		vma = find_vma(current->mm, arg);
		PR_DEBUG("vma is %p", vma);
		diff = arg - vma->vm_start;
		PR_DEBUG("diff is %d", diff);
		private = (unsigned long)vma->vm_private_data;
		PR_DEBUG("private (ul) is %lu", private);
		PR_DEBUG("private (p) is %p", (void *)private);
		adjusted = private + diff;
		PR_DEBUG("adjusted (ul) is %lu", adjusted);
		PR_DEBUG("adjusted (p) is %p", (void *)adjusted);
		return 0;

	/*
	*	This is asking the kernel to read the memory
	*/
	case IOCTL_MMAP_READ:
		PR_DEBUG("starting to read");
		memcpy(str, vaddr, 256);
		str[255] = '\0';
		PR_DEBUG("data is %s", str);
		return 0;

	/*
	*	This is asking the kernel to write the memory
	*/
	case IOCTL_MMAP_WRITE:
		PR_DEBUG("starting to write");
		memset(vaddr, arg, size);
		return 0;

	/*
	*	This demos how to take the user space pointer and turn it
	*	into a kernel space pointer
	*/
	case IOCTL_MMAP_WRITE_USER:
		PR_DEBUG("starting to write using us pointer");
		ptr = (void *)arg;
		PR_DEBUG("ptr is %p", ptr);
		return 0;

	/*
	*	mmap a region from an ioctl
	*/
	case IOCTL_MMAP_MMAP:
		PR_DEBUG("trying to mmap");

		/*
		* if(do_kmalloc) {
		*	kaddr=kmalloc(ioctl_size,GFP_KERNEL);
		* } else {
		*	order=get_order(ioctl_size);
		*	kaddr=(void*)__get_free_pages(GFP_KERNEL,order);
		* }
		*/
		mm = current->mm;
		flags = MAP_POPULATE | MAP_SHARED;
		flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
		/* must hold process memory map semaphore because next
		function will change memory layout for the process.
		This also means that this code must be in a path that can
		sleep.
		*/
		/*
		 * vm_mmap does not need the semaphore to be held
		 * down_write(&mm->mmap_sem);
		 */
		addr = vm_mmap(
			filp,/* file pointer */
			0,/* recommended use space address */
			ioctl_size,/* size */
			PROT_READ | PROT_WRITE,/* protection */
			flags,/* flags */
			0/* pg offset */
		);
		/* remmember to release the semaphore! */
		/*
		 * vm_mmap does not need the semaphore to be held
		 * up_write(&mm->mmap_sem);
		 */
		/*
		PR_DEBUG("kaddr is (p) %p",kaddr);
		PR_DEBUG("real size is (d) %d",ioctl_size);
		*/
		PR_DEBUG(
			"addr for user space is (lu) %lu / (p) %p",
			addr, (void *)addr);
		return addr;

	/*
	*	unmap a region
	*/
	case IOCTL_MMAP_UNMAP:
		PR_DEBUG("trying to unmap");
		vma = find_vma(current->mm, addr);
		kernel_addr = vma->vm_private_data;
		size = vma->vm_end - vma->vm_start;
		PR_DEBUG("deduced kernel_addr is %p", kernel_addr);
		PR_DEBUG("deduced size is (d) %d", size);
		PR_DEBUG("real size is (d) %d", ioctl_size);
		PR_DEBUG("real kaddr is (p) %p", kaddr);
		ret = do_munmap(current->mm, addr, ioctl_size);
		if (ret) {
			PR_ERROR("error from do_munmap");
			return ret;
		}
		if (do_kmalloc)
			kfree(kernel_addr);
		else {
			order = get_order(size);
			free_pages((unsigned long)kernel_addr, order);
		}
		return ret;

	/*
	*	The the size of the region
	*/
	case IOCTL_MMAP_SETSIZE:
		PR_DEBUG("setting the size");
		ioctl_size = arg;
		PR_DEBUG("size is %d", ioctl_size);
		return 0;
	}
Exemplo n.º 15
0
static int __init test_user_copy_init(void)
{
	int ret = 0;
	char *kmem;
	char __user *usermem;
	char *bad_usermem;
	unsigned long user_addr;
	unsigned long value = 0x5A;

	kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
	if (!kmem)
		return -ENOMEM;

	user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
	if (user_addr >= (unsigned long)(TASK_SIZE)) {
		pr_warn("Failed to allocate user memory\n");
		kfree(kmem);
		return -ENOMEM;
	}

	usermem = (char __user *)user_addr;
	bad_usermem = (char *)user_addr;

	/* Legitimate usage: none of these should fail. */
	ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
		    "legitimate copy_from_user failed");
	ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
		    "legitimate copy_to_user failed");
	ret |= test(get_user(value, (unsigned long __user *)usermem),
		    "legitimate get_user failed");
	ret |= test(put_user(value, (unsigned long __user *)usermem),
		    "legitimate put_user failed");

	/* Invalid usage: none of these should succeed. */
	ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
				    PAGE_SIZE),
		    "illegal all-kernel copy_from_user passed");
	ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
				    PAGE_SIZE),
		    "illegal reversed copy_from_user passed");
	ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
				  PAGE_SIZE),
		    "illegal all-kernel copy_to_user passed");
	ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
				  PAGE_SIZE),
		    "illegal reversed copy_to_user passed");
	ret |= test(!get_user(value, (unsigned long __user *)kmem),
		    "illegal get_user passed");
	ret |= test(!put_user(value, (unsigned long __user *)kmem),
		    "illegal put_user passed");

	vm_munmap(user_addr, PAGE_SIZE * 2);
	kfree(kmem);

	if (ret == 0) {
		pr_info("tests passed.\n");
		return 0;
	}

	return -EINVAL;
}
Exemplo n.º 16
0
static int __init test_user_copy_init(void)
{
	int ret = 0;
	char *kmem;
	char __user *usermem;
	char *bad_usermem;
	unsigned long user_addr;
	u8 val_u8;
	u16 val_u16;
	u32 val_u32;
#ifdef TEST_U64
	u64 val_u64;
#endif

	kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
	if (!kmem)
		return -ENOMEM;

	user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
	if (user_addr >= (unsigned long)(TASK_SIZE)) {
		pr_warn("Failed to allocate user memory\n");
		kfree(kmem);
		return -ENOMEM;
	}

	usermem = (char __user *)user_addr;
	bad_usermem = (char *)user_addr;

	/*
	 * Legitimate usage: none of these copies should fail.
	 */
	memset(kmem, 0x3a, PAGE_SIZE * 2);
	ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
		    "legitimate copy_to_user failed");
	memset(kmem, 0x0, PAGE_SIZE);
	ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
		    "legitimate copy_from_user failed");
	ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE),
		    "legitimate usercopy failed to copy data");

#define test_legit(size, check)						  \
	do {								  \
		val_##size = check;					  \
		ret |= test(put_user(val_##size, (size __user *)usermem), \
		    "legitimate put_user (" #size ") failed");		  \
		val_##size = 0;						  \
		ret |= test(get_user(val_##size, (size __user *)usermem), \
		    "legitimate get_user (" #size ") failed");		  \
		ret |= test(val_##size != check,			  \
		    "legitimate get_user (" #size ") failed to do copy"); \
		if (val_##size != check) {				  \
			pr_info("0x%llx != 0x%llx\n",			  \
				(unsigned long long)val_##size,		  \
				(unsigned long long)check);		  \
		}							  \
	} while (0)

	test_legit(u8,  0x5a);
	test_legit(u16, 0x5a5b);
	test_legit(u32, 0x5a5b5c5d);
#ifdef TEST_U64
	test_legit(u64, 0x5a5b5c5d6a6b6c6d);
#endif
#undef test_legit

	/*
	 * Invalid usage: none of these copies should succeed.
	 */

	/* Prepare kernel memory with check values. */
	memset(kmem, 0x5a, PAGE_SIZE);
	memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);

	/* Reject kernel-to-kernel copies through copy_from_user(). */
	ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
				    PAGE_SIZE),
		    "illegal all-kernel copy_from_user passed");

	/* Destination half of buffer should have been zeroed. */
	ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE),
		    "zeroing failure for illegal all-kernel copy_from_user");

#if 0
	/*
	 * When running with SMAP/PAN/etc, this will Oops the kernel
	 * due to the zeroing of userspace memory on failure. This needs
	 * to be tested in LKDTM instead, since this test module does not
	 * expect to explode.
	 */
	ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
				    PAGE_SIZE),
		    "illegal reversed copy_from_user passed");
#endif
	ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
				  PAGE_SIZE),
		    "illegal all-kernel copy_to_user passed");
	ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
				  PAGE_SIZE),
		    "illegal reversed copy_to_user passed");

#define test_illegal(size, check)					    \
	do {								    \
		val_##size = (check);					    \
		ret |= test(!get_user(val_##size, (size __user *)kmem),	    \
		    "illegal get_user (" #size ") passed");		    \
		ret |= test(val_##size != (size)0,			    \
		    "zeroing failure for illegal get_user (" #size ")");    \
		if (val_##size != (size)0) {				    \
			pr_info("0x%llx != 0\n",			    \
				(unsigned long long)val_##size);	    \
		}							    \
		ret |= test(!put_user(val_##size, (size __user *)kmem),	    \
		    "illegal put_user (" #size ") passed");		    \
	} while (0)

	test_illegal(u8,  0x5a);
	test_illegal(u16, 0x5a5b);
	test_illegal(u32, 0x5a5b5c5d);
#ifdef TEST_U64
	test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
#endif
#undef test_illegal

	vm_munmap(user_addr, PAGE_SIZE * 2);
	kfree(kmem);

	if (ret == 0) {
		pr_info("tests passed.\n");
		return 0;
	}

	return -EINVAL;
}
Exemplo n.º 17
0
static int
coff_load_file(struct thread *td, char *name)
{
	struct proc *p = td->td_proc;
  	struct vmspace *vmspace = p->p_vmspace;
  	int error;
  	struct nameidata nd;
  	struct vnode *vp;
  	struct vattr attr;
  	struct filehdr *fhdr;
  	struct aouthdr *ahdr;
  	struct scnhdr *scns;
  	char *ptr = 0;
  	int nscns;
  	unsigned long text_offset = 0, text_address = 0, text_size = 0;
  	unsigned long data_offset = 0, data_address = 0, data_size = 0;
  	unsigned long bss_size = 0;
  	int i;

	NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME,
	    UIO_SYSSPACE, name, td);

  	error = namei(&nd);
  	if (error)
    		return error;

  	vp = nd.ni_vp;
  	if (vp == NULL)
    		return ENOEXEC;

  	if (vp->v_writecount) {
    		error = ETXTBSY;
    		goto fail;
  	}

  	if ((error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) != 0)
    		goto fail;

  	if ((vp->v_mount->mnt_flag & MNT_NOEXEC)
	    || ((attr.va_mode & 0111) == 0)
	    || (attr.va_type != VREG))
    		goto fail;

  	if (attr.va_size == 0) {
    		error = ENOEXEC;
    		goto fail;
  	}

  	if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0)
    		goto fail;

  	if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL)) != 0)
    		goto fail;

	/*
	 * Lose the lock on the vnode. It's no longer needed, and must not
	 * exist for the pagefault paging to work below.
	 */
	VOP_UNLOCK(vp, 0, td);

  	if ((error = vm_mmap(kernel_map,
			    (vm_offset_t *) &ptr,
			    PAGE_SIZE,
			    VM_PROT_READ,
		       	    VM_PROT_READ,
			    0,
			    OBJT_VNODE,
			    vp,
			    0)) != 0)
		goto unlocked_fail;

  	fhdr = (struct filehdr *)ptr;

  	if (fhdr->f_magic != I386_COFF) {
    		error = ENOEXEC;
    		goto dealloc_and_fail;
  	}

  	nscns = fhdr->f_nscns;

  	if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
    		/*
     		 * XXX -- just fail.  I'm so lazy.
     		 */
    		error = ENOEXEC;
    		goto dealloc_and_fail;
  	}

  	ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr));

  	scns = (struct scnhdr*)(ptr + sizeof(struct filehdr)
			  + sizeof(struct aouthdr));

  	for (i = 0; i < nscns; i++) {
    		if (scns[i].s_flags & STYP_NOLOAD)
      			continue;
    		else if (scns[i].s_flags & STYP_TEXT) {
      			text_address = scns[i].s_vaddr;
      			text_size = scns[i].s_size;
      			text_offset = scns[i].s_scnptr;
    		}
		else if (scns[i].s_flags & STYP_DATA) {
      			data_address = scns[i].s_vaddr;
      			data_size = scns[i].s_size;
      			data_offset = scns[i].s_scnptr;
    		} else if (scns[i].s_flags & STYP_BSS) {
      			bss_size = scns[i].s_size;
    		}
  	}

  	if ((error = load_coff_section(vmspace, vp, text_offset,
				      (caddr_t)(void *)(uintptr_t)text_address,
				      text_size, text_size,
				      VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
    		goto dealloc_and_fail;
  	}
  	if ((error = load_coff_section(vmspace, vp, data_offset,
				      (caddr_t)(void *)(uintptr_t)data_address,
				      data_size + bss_size, data_size,
				      VM_PROT_ALL)) != 0) {
    		goto dealloc_and_fail;
  	}

  	error = 0;

 dealloc_and_fail:
	if (vm_map_remove(kernel_map,
			  (vm_offset_t) ptr,
			  (vm_offset_t) ptr + PAGE_SIZE))
    		panic("%s vm_map_remove failed", __func__);

 fail:
	VOP_UNLOCK(vp, 0, td);
 unlocked_fail:
	NDFREE(&nd, NDF_ONLY_PNBUF);
	vrele(nd.ni_vp);
  	return error;
}
Exemplo n.º 18
0
static void lkdtm_do_action(enum ctype which)
{
	switch (which) {
	case CT_PANIC:
		panic("dumptest");
		break;
	case CT_BUG:
		BUG();
		break;
	case CT_WARNING:
		WARN_ON(1);
		break;
	case CT_EXCEPTION:
		*((int *) 0) = 0;
		break;
	case CT_LOOP:
		for (;;)
			;
		break;
	case CT_OVERFLOW:
		(void) recursive_loop(recur_count);
		break;
	case CT_CORRUPT_STACK:
		corrupt_stack();
		break;
	case CT_UNALIGNED_LOAD_STORE_WRITE: {
		static u8 data[5] __attribute__((aligned(4))) = {1, 2,
				3, 4, 5};
		u32 *p;
		u32 val = 0x12345678;

		p = (u32 *)(data + 1);
		if (*p == 0)
			val = 0x87654321;
		*p = val;
		 break;
	}
	case CT_OVERWRITE_ALLOCATION: {
		size_t len = 1020;
		u32 *data = kmalloc(len, GFP_KERNEL);

		data[1024 / sizeof(u32)] = 0x12345678;
		kfree(data);
		break;
	}
	case CT_WRITE_AFTER_FREE: {
		size_t len = 1024;
		u32 *data = kmalloc(len, GFP_KERNEL);

		kfree(data);
		schedule();
		memset(data, 0x78, len);
		break;
	}
	case CT_SOFTLOCKUP:
		preempt_disable();
		for (;;)
			cpu_relax();
		break;
	case CT_HARDLOCKUP:
		local_irq_disable();
		for (;;)
			cpu_relax();
		break;
	case CT_SPINLOCKUP:
		/* Must be called twice to trigger. */
		spin_lock(&lock_me_up);
		/* Let sparse know we intended to exit holding the lock. */
		__release(&lock_me_up);
		break;
	case CT_HUNG_TASK:
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule();
		break;
	case CT_EXEC_DATA:
		execute_location(data_area);
		break;
	case CT_EXEC_STACK: {
		u8 stack_area[EXEC_SIZE];
		execute_location(stack_area);
		break;
	}
	case CT_EXEC_KMALLOC: {
		u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
		execute_location(kmalloc_area);
		kfree(kmalloc_area);
		break;
	}
	case CT_EXEC_VMALLOC: {
		u32 *vmalloc_area = vmalloc(EXEC_SIZE);
		execute_location(vmalloc_area);
		vfree(vmalloc_area);
		break;
	}
	case CT_EXEC_USERSPACE: {
		unsigned long user_addr;

		user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
				    PROT_READ | PROT_WRITE | PROT_EXEC,
				    MAP_ANONYMOUS | MAP_PRIVATE, 0);
		if (user_addr >= TASK_SIZE) {
			pr_warn("Failed to allocate user memory\n");
			return;
		}
		execute_user_location((void *)user_addr);
		vm_munmap(user_addr, PAGE_SIZE);
		break;
	}
	case CT_ACCESS_USERSPACE: {
		unsigned long user_addr, tmp;
		unsigned long *ptr;

		user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
				    PROT_READ | PROT_WRITE | PROT_EXEC,
				    MAP_ANONYMOUS | MAP_PRIVATE, 0);
		if (user_addr >= TASK_SIZE) {
			pr_warn("Failed to allocate user memory\n");
			return;
		}

		ptr = (unsigned long *)user_addr;

		pr_info("attempting bad read at %p\n", ptr);
		tmp = *ptr;
		tmp += 0xc0dec0de;

		pr_info("attempting bad write at %p\n", ptr);
		*ptr = tmp;

		vm_munmap(user_addr, PAGE_SIZE);

		break;
	}
	case CT_WRITE_RO: {
		unsigned long *ptr;

		ptr = (unsigned long *)&rodata;

		pr_info("attempting bad write at %p\n", ptr);
		*ptr ^= 0xabcd1234;

		break;
	}
	case CT_WRITE_KERN: {
		size_t size;
		unsigned char *ptr;

		size = (unsigned long)do_overwritten -
		       (unsigned long)do_nothing;
		ptr = (unsigned char *)do_overwritten;

		pr_info("attempting bad %zu byte write at %p\n", size, ptr);
		memcpy(ptr, (unsigned char *)do_nothing, size);
		flush_icache_range((unsigned long)ptr,
				   (unsigned long)(ptr + size));

		do_overwritten();
		break;
	}
	case CT_NONE:
	default:
		break;
	}

}
Exemplo n.º 19
0
static int init_drm(void) {
	int err;
	int i;

	struct drm_file *file_priv;

	struct drm_mode_card_res res = {0};

	PTR_TYPE res_fb_buf[10] = {0};
	PTR_TYPE res_crtc_buf[10] = {0};
	PTR_TYPE res_conn_buf[10] = {0};
	PTR_TYPE res_enc_buf[10] = {0};
	
	struct drm_mode_modeinfo conn_mode_buf[20];
	PTR_TYPE conn_prop_buf[20];
	PTR_TYPE conn_propval_buf[20];
	PTR_TYPE conn_enc_buf[20];

	struct drm_mode_get_connector conn;

	struct drm_mode_create_dumb create_dumb;
	struct drm_mode_map_dumb map_dumb;
	struct drm_mode_fb_cmd cmd_dumb;

	struct drm_mode_get_encoder enc;
	struct drm_mode_crtc crtc;


	// make kernel and user memory in common space
	old_fs = get_fs();
	set_fs(KERNEL_DS);

	// open device file
	filp = filp_open(DRM_DEV_PATH, O_RDWR | O_CLOEXEC, 0);
	if (IS_ERR(filp)) {
		err = PTR_ERR(filp);
		printk(KERN_ERR "drmtest: unable to open file: %s (%d)", DRM_DEV_PATH, err);
		return err;
	}
 
 	// set master
  	err = drm_ioctl(filp, DRM_IOCTL_SET_MASTER, 0);
	if(err) {
		printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_SET_MASTER (%d)", err);
		goto master_release;
	}

	// check if master permission is set
	file_priv = filp->private_data;
	err = drm_ioctl_permit(DRM_MASTER, file_priv);
	if(err) {
		printk(KERN_ERR "drmtest: cannot set MASTER permissions (%d)", err);
		goto master_release;
	}

	// get resources count
	err = drm_ioctl(filp, DRM_IOCTL_MODE_GETRESOURCES, (long unsigned int)&res);
	if(err) {
		printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_GETRESOURCES (%d)", err);
		goto master_release;
	}

	// set pointers
	res.fb_id_ptr=(PTR_TYPE)res_fb_buf;
	res.crtc_id_ptr=(PTR_TYPE)res_crtc_buf;
	res.connector_id_ptr=(PTR_TYPE)res_conn_buf;
	res.encoder_id_ptr=(PTR_TYPE)res_enc_buf;

	// get resources data
	err = drm_ioctl(filp, DRM_IOCTL_MODE_GETRESOURCES, (long unsigned int)&res);
	if(err) {
		printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_GETRESOURCES (%d)", err);
		goto master_release;
	}

	// print resources info
	printk("fb: %d, crtc: %d, conn: %d, enc: %d\n",res.count_fbs,res.count_crtcs,res.count_connectors,res.count_encoders);

	//Loop though all available connectors
	for (i=0; i<res.count_connectors; i++)
	{
		// clear
		memset(conn_mode_buf, 0, sizeof(struct drm_mode_modeinfo)*20);
		memset(conn_prop_buf, 0, sizeof(PTR_TYPE)*20);
		memset(conn_propval_buf, 0, sizeof(PTR_TYPE)*20);
		memset(conn_enc_buf, 0, sizeof(PTR_TYPE)*20);
		memset(&conn, 0, sizeof(struct drm_mode_get_connector));

		conn.connector_id = res_conn_buf[i];

		//get connector resource counts
		err = drm_ioctl(filp, DRM_IOCTL_MODE_GETCONNECTOR, (long unsigned int)&conn);	
		if(err) {
			printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_GETCONNECTOR (%d)", err);
			goto master_release;
		}

		// set pointers
		conn.modes_ptr=(PTR_TYPE)conn_mode_buf;
		conn.props_ptr=(PTR_TYPE)conn_prop_buf;
		conn.prop_values_ptr=(PTR_TYPE)conn_propval_buf;
		conn.encoders_ptr=(PTR_TYPE)conn_enc_buf;

		// get connector resources
		err = drm_ioctl(filp, DRM_IOCTL_MODE_GETCONNECTOR, (long unsigned int)&conn);
		if(err) {
			printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_GETCONNECTOR (%d)", err);
			goto master_release;
		}

		// check if the connector is connected
		if (conn.count_encoders<1 || conn.count_modes<1 || !conn.encoder_id || !conn.connection) {
			printk("Not connected\n");
			continue;
		}


		// *****************************
		//      create dumb buffer
		// *****************************

		memset(&create_dumb, 0, sizeof(struct drm_mode_create_dumb));
		memset(&map_dumb, 0, sizeof(struct drm_mode_map_dumb));
		memset(&cmd_dumb, 0, sizeof(struct drm_mode_fb_cmd));

		// set screen params
		create_dumb.width = conn_mode_buf[0].hdisplay;
		create_dumb.height = conn_mode_buf[0].vdisplay;
		create_dumb.bpp = 32;
		create_dumb.flags = 0;
		create_dumb.pitch = 0;
		create_dumb.size = 0;
		create_dumb.handle = 0;

		// create dumb buffer
		err = drm_ioctl(filp, DRM_IOCTL_MODE_CREATE_DUMB, (long unsigned int)&create_dumb);
		if(err) {
			printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_CREATE_DUMB (%d)", err);
			goto master_release;
		}

		cmd_dumb.width=create_dumb.width;
		cmd_dumb.height=create_dumb.height;
		cmd_dumb.bpp=create_dumb.bpp;
		cmd_dumb.pitch=create_dumb.pitch;
		cmd_dumb.depth=24;
		cmd_dumb.handle=create_dumb.handle;

		// add framebuffer
		err = drm_ioctl(filp, DRM_IOCTL_MODE_ADDFB, (long unsigned int)&cmd_dumb);
		if(err) {
			printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_ADDFB (%d)", err);
			goto master_release;
		}

		// prepare dumb buffer to mmap
		map_dumb.handle=create_dumb.handle;
		err = drm_ioctl(filp, DRM_IOCTL_MODE_MAP_DUMB, (long unsigned int)&map_dumb);
		if(err) {
			printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_MAP_DUMB (%d)", err);
			goto master_release;
		}

		
		// map buffer to memory
		fb_base[i] = (void *)vm_mmap(filp, 0, create_dumb.size, PROT_READ | PROT_WRITE, MAP_SHARED, map_dumb.offset);

		fb_w[i]=create_dumb.width;
		fb_h[i]=create_dumb.height;


		// *************************
		// kernel mode setting
		// *************************


		printk("%d : mode: %d, prop: %d, enc: %d\n",conn.connection,conn.count_modes,conn.count_props,conn.count_encoders);
		printk("modes: %dx%d FB: %d\n", conn_mode_buf[0].hdisplay, conn_mode_buf[0].vdisplay, (int)fb_base[i]);


		// init encoder
		memset(&enc, 0, sizeof(struct drm_mode_get_encoder));

		enc.encoder_id=conn.encoder_id;

		// get encoder
		err = drm_ioctl(filp, DRM_IOCTL_MODE_GETENCODER, (long unsigned int)&enc);	
		if(err) {
			printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_GETENCODER (%d)", err);
			goto master_release;
		}

		// init crtc
		memset(&crtc, 0, sizeof(struct drm_mode_crtc));

		crtc.crtc_id=enc.crtc_id;

		err = drm_ioctl(filp, DRM_IOCTL_MODE_GETCRTC, (long unsigned int)&crtc);
		if(err) {
			printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_GETCRTC (%d)", err);
			goto master_release;
		}

		crtc.fb_id=cmd_dumb.fb_id;
		crtc.set_connectors_ptr=(PTR_TYPE)&res_conn_buf[i];
		crtc.count_connectors=1;
		crtc.mode=conn_mode_buf[0];
		crtc.mode_valid=1;

		err = drm_ioctl(filp, DRM_IOCTL_MODE_SETCRTC, (long unsigned int)&crtc);
		if(err) {
			printk(KERN_ERR "drmtest: error in drm_ioctl, DRM_IOCTL_MODE_SETCRTC (%d)", err);
			goto master_release;
		}
	}

	connectors_count = res.count_connectors;


	return 0;

master_release:
	exit_drm();	
	return err;
}
Exemplo n.º 20
0
/*
 * non-anonymous, non-stack descriptor mappings only!
 *
 * This routine mostly snarfed from vm/vm_mmap.c
 */
int
fp_mmap(void *addr_arg, size_t size, int prot, int flags, struct file *fp,
    off_t pos, void **resp)
{
    struct thread *td = curthread;
    struct proc *p = td->td_proc;
    vm_size_t pageoff;
    vm_prot_t maxprot;
    vm_offset_t addr;
    void *handle;
    int error;
    vm_object_t obj;
    struct vmspace *vms = p->p_vmspace;
    struct vnode *vp;

    prot &= VM_PROT_ALL;

    if ((ssize_t)size < 0 || (flags & MAP_ANON))
	return(EINVAL);

    pageoff = (pos & PAGE_MASK);
    pos -= pageoff;

    /* Adjust size for rounding (on both ends). */
    size += pageoff;				/* low end... */
    size = (vm_size_t)round_page(size);		/* hi end */
    addr = (vm_offset_t)addr_arg;

    /*
     * Check for illegal addresses.  Watch out for address wrap... Note
     * that VM_*_ADDRESS are not constants due to casts (argh).
     */
    if (flags & MAP_FIXED) {
	/*
	 * The specified address must have the same remainder
	 * as the file offset taken modulo PAGE_SIZE, so it
	 * should be aligned after adjustment by pageoff.
	 */
	addr -= pageoff;
	if (addr & PAGE_MASK)
	    return (EINVAL);
	/* Address range must be all in user VM space. */
	if (VM_MAX_USER_ADDRESS > 0 && addr + size > VM_MAX_USER_ADDRESS)
	    return (EINVAL);
	if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
	    return (EINVAL);
	if (addr + size < addr)
	    return (EINVAL);
    } else if (addr == 0 ||
	(addr >= round_page((vm_offset_t)vms->vm_taddr) &&
	 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))
    ) {
	/*
	 * XXX for non-fixed mappings where no hint is provided or
	 * the hint would fall in the potential heap space,
	 * place it after the end of the largest possible heap.
	 *
	 * There should really be a pmap call to determine a reasonable
	 * location.
	 */
	addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
    }

    /*
     * Mapping file, get fp for validation. Obtain vnode and make
     * sure it is of appropriate type.
     */
    if (fp->f_type != DTYPE_VNODE)
	return (EINVAL);

    /*
     * POSIX shared-memory objects are defined to have
     * kernel persistence, and are not defined to support
     * read(2)/write(2) -- or even open(2).  Thus, we can
     * use MAP_ASYNC to trade on-disk coherence for speed.
     * The shm_open(3) library routine turns on the FPOSIXSHM
     * flag to request this behavior.
     */
    if (fp->f_flag & FPOSIXSHM)
	flags |= MAP_NOSYNC;
    vp = (struct vnode *) fp->f_data;
    if (vp->v_type != VREG && vp->v_type != VCHR)
	return (EINVAL);

    /*
     * Get the proper underlying object
     */
    if (vp->v_type == VREG) {
	if ((obj = vp->v_object) == NULL)
	    return (EINVAL);
	KKASSERT(vp == (struct vnode *)obj->handle);
    }

    /*
     * XXX hack to handle use of /dev/zero to map anon memory (ala
     * SunOS).
     */
    if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
	handle = NULL;
	maxprot = VM_PROT_ALL;
	flags |= MAP_ANON;
	pos = 0;
    } else {
	/*
	 * cdevs does not provide private mappings of any kind.
	 */
	if (vp->v_type == VCHR && 
	    (flags & (MAP_PRIVATE|MAP_COPY))) {
		error = EINVAL;
		goto done;
	}
	/*
	 * Ensure that file and memory protections are
	 * compatible.  Note that we only worry about
	 * writability if mapping is shared; in this case,
	 * current and max prot are dictated by the open file.
	 * XXX use the vnode instead?  Problem is: what
	 * credentials do we use for determination? What if
	 * proc does a setuid?
	 */
	maxprot = VM_PROT_EXECUTE;	/* ??? */
	if (fp->f_flag & FREAD) {
	    maxprot |= VM_PROT_READ;
	} else if (prot & PROT_READ) {
	    error = EACCES;
	    goto done;
	}
	/*
	 * If we are sharing potential changes (either via
	 * MAP_SHARED or via the implicit sharing of character
	 * device mappings), and we are trying to get write
	 * permission although we opened it without asking
	 * for it, bail out.  
	 */

	if ((flags & MAP_SHARED) != 0 ||
	    (vp->v_type == VCHR)
	) {
	    if ((fp->f_flag & FWRITE) != 0) {
		struct vattr va;
		if ((error = VOP_GETATTR(vp, &va))) {
		    goto done;
		}
		if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) {
		    maxprot |= VM_PROT_WRITE;
		} else if (prot & PROT_WRITE) {
		    error = EPERM;
		    goto done;
		}
	    } else if ((prot & PROT_WRITE) != 0) {
		error = EACCES;
		goto done;
	    }
	} else {
	    maxprot |= VM_PROT_WRITE;
	}
	handle = (void *)vp;
    }
    error = vm_mmap(&vms->vm_map, &addr, size, prot, 
		    maxprot, flags, handle, pos);
    if (error == 0 && addr_arg)
	*resp = (void *)addr;
done:
    return (error);
}