static inline int rasmemele_touch(struct RasMemEle* ele, unsigned long block,
	void (*mmupdate)(struct mm_struct*, struct RasMemEle*,long))
{
	struct mm_struct* mm;
	struct task_struct* tsk; 
	ras_retn_if(!ele->args.pid, 0);
	
	tsk = rasmemele_task(ele);
	ras_retn_if(0 == tsk, -ESRCH);
	write_lock(&ele->rwk);
	if(0 == ele->tchhandle)
	{
		ele->tchhandle = tsk;
	}
	write_unlock(&ele->rwk);
	ras_retn_if(tsk != ele->tchhandle, -EFAULT);
	
	mm = get_task_mm(ele->tchhandle);
	ras_retn_if(0 == mm, -EINVAL);
	
	write_lock(&ele->rwk);
	mmupdate(mm, ele, block);
	write_unlock(&ele->rwk);
	mmput(mm);  // for get_task_mm match
	return 0;
}
Пример #2
0
void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
{
	struct mm_struct *mm;

	
	stats->coremem = p->acct_rss_mem1 * PAGE_SIZE / MB;
	stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE / MB;
	mm = get_task_mm(p);
	if (mm) {
		
		stats->hiwater_rss   = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB;
		stats->hiwater_vm    = get_mm_hiwater_vm(mm)  * PAGE_SIZE / KB;
		mmput(mm);
	}
	stats->read_char	= p->ioac.rchar & KB_MASK;
	stats->write_char	= p->ioac.wchar & KB_MASK;
	stats->read_syscalls	= p->ioac.syscr & KB_MASK;
	stats->write_syscalls	= p->ioac.syscw & KB_MASK;
#ifdef CONFIG_TASK_IO_ACCOUNTING
	stats->read_bytes	= p->ioac.read_bytes & KB_MASK;
	stats->write_bytes	= p->ioac.write_bytes & KB_MASK;
	stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
#else
	stats->read_bytes	= 0;
	stats->write_bytes	= 0;
	stats->cancelled_write_bytes = 0;
#endif
}
Пример #3
0
/*
 * fill in extended accounting fields
 */
void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
{
	struct mm_struct *mm;

	/* convert pages-usec to Mbyte-usec */
	stats->coremem = p->acct_rss_mem1 * PAGE_SIZE / MB;
	stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE / MB;
	mm = get_task_mm(p);
	if (mm) {
		/* adjust to KB unit */
		stats->hiwater_rss   = mm->hiwater_rss * PAGE_SIZE / KB;
		stats->hiwater_vm    = mm->hiwater_vm * PAGE_SIZE / KB;
		mmput(mm);
	}
	stats->read_char	= p->ioac.rchar;
	stats->write_char	= p->ioac.wchar;
	stats->read_syscalls	= p->ioac.syscr;
	stats->write_syscalls	= p->ioac.syscw;
#ifdef CONFIG_TASK_IO_ACCOUNTING
	stats->read_bytes	= p->ioac.read_bytes;
	stats->write_bytes	= p->ioac.write_bytes;
	stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes;
#else
	stats->read_bytes	= 0;
	stats->write_bytes	= 0;
	stats->cancelled_write_bytes = 0;
#endif
}
Пример #4
0
/*
 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
 * iteration, but it is not exported to modules.
 */
static
void lttng_enumerate_task_vm_maps(struct lttng_session *session,
		struct task_struct *p)
{
	struct mm_struct *mm;
	struct vm_area_struct *map;
	unsigned long ino;

	/* get_task_mm does a task_lock... */
	mm = get_task_mm(p);
	if (!mm)
		return;

	map = mm->mmap;
	if (map) {
		down_read(&mm->mmap_sem);
		while (map) {
			if (map->vm_file)
				ino = map->vm_file->f_dentry->d_inode->i_ino;
			else
				ino = 0;
			trace_lttng_statedump_vm_map(session, p, map, ino);
			map = map->vm_next;
		}
		up_read(&mm->mmap_sem);
	}
	mmput(mm);
}
Пример #5
0
int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
{
	struct vm_area_struct * vma;
	int result = -ENOENT;
	struct task_struct *task = proc_task(inode);
	struct mm_struct * mm = get_task_mm(task);

	if (!mm)
		goto out;
	down_read(&mm->mmap_sem);

	vma = mm->mmap;
	while (vma) {
		if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
			break;
		vma = vma->vm_next;
	}

	if (vma) {
		*mnt = mntget(vma->vm_file->f_vfsmnt);
		*dentry = dget(vma->vm_file->f_dentry);
		result = 0;
	}

	up_read(&mm->mmap_sem);
	mmput(mm);
out:
	return result;
}
Пример #6
0
static inline void ltt_enumerate_task_vm_maps(struct task_struct *t)
{
	struct mm_struct *mm;
	struct vm_area_struct *	map;
	unsigned long ino;

	/* get_task_mm does a task_lock... */
	mm = get_task_mm(t);
	if (!mm)
		return;

	map = mm->mmap;
	if (map) {
		down_read(&mm->mmap_sem);
		while (map) {
			if (map->vm_file)
				ino = map->vm_file->f_dentry->d_inode->i_ino;
			else
				ino = 0;
			trace_statedump_enumerate_vm_maps(
					t->pid,
					(void *)map->vm_start,
					(void *)map->vm_end,
					map->vm_flags,
					map->vm_pgoff << PAGE_SHIFT,
					ino);
			map = map->vm_next;
		}
		up_read(&mm->mmap_sem);
	}
	mmput(mm);
}
Пример #7
0
struct spu_context *alloc_spu_context(struct address_space *local_store)
{
	struct spu_context *ctx;
	ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
	if (!ctx)
		goto out;
	/* Binding to physical processor deferred
	 * until spu_activate().
	 */
	spu_init_csa(&ctx->csa);
	if (!ctx->csa.lscsa) {
		goto out_free;
	}
	spin_lock_init(&ctx->mmio_lock);
	kref_init(&ctx->kref);
	init_rwsem(&ctx->state_sema);
	init_MUTEX(&ctx->run_sema);
	init_waitqueue_head(&ctx->ibox_wq);
	init_waitqueue_head(&ctx->wbox_wq);
	init_waitqueue_head(&ctx->stop_wq);
	ctx->ibox_fasync = NULL;
	ctx->wbox_fasync = NULL;
	ctx->state = SPU_STATE_SAVED;
	ctx->local_store = local_store;
	ctx->spu = NULL;
	ctx->ops = &spu_backing_ops;
	ctx->owner = get_task_mm(current);
	goto out;
out_free:
	kfree(ctx);
	ctx = NULL;
out:
	return ctx;
}
Пример #8
0
static
void pack_process_status(struct bproc_status_msg_t *msg,
			 struct task_struct *tsk, int pack_mm) {
    msg->state     = tsk->state;
    msg->exit_code = tsk->exit_code;

    msg->utime     = tsk->utime;
    msg->stime     = tsk->stime;

    spin_lock_irq(&current->sighand->siglock);
    /* XXX FIX ME:  there's utime and stime in here too */
    msg->cutime    = tsk->signal->cutime;
    msg->cstime    = tsk->signal->cstime;
    spin_unlock_irq(&current->sighand->siglock);

    msg->minflt    = tsk->min_flt;
    msg->majflt    = tsk->maj_flt;
    /*msg->nswap     = tsk->nswap;*/

    if (pack_mm) {
	struct mm_struct *mm;
	mm = get_task_mm(tsk);
	if (mm) {
	    down_read(&mm->mmap_sem);
	    pack_process_mm_stats(msg, mm);
	    up_read(&mm->mmap_sem);
	    mmput(mm);
	}
    } else {
	msg->vm.statm.size = 0;
    }
}
Пример #9
0
void tasks_test_saved(void)
{
	struct task_struct *p;
	struct cred *cred =NULL;
	struct mm_struct *mm;
	unsigned long vsize = 0, vrss = 0;

	printk("\nThe following trace is a kernel tasks test and not a bug!\n");

	printk("USER\tPID\tVSIZE\tRSS\tSTATE\tNAME\n");
	write_lock_irq(&tasklist_lock);
	for_each_process(p) {

        cred = (struct cred *)get_cred((struct cred *) __task_cred(p));

		vsize = 0;
		vrss = 0;
		mm = get_task_mm(p);
		if (mm) {
			tasks_mem_get(mm, &vsize, &vrss);
		}
		printk("%d\t%d\t%ld\t%ld\t%s\t%s\n", 
			cred->uid,
			task_pid_nr(p), 
			vsize,
			vrss,
			get_task_state(p), 
			p->comm);
	}
	write_unlock_irq(&tasklist_lock);
}
/* 
 * enumerate all modules for process
 * pid: process id
 * lsc: load sample count
 */
static void enum_modules_for_process(pid_t pid, unsigned long lsc)
{
	bool find = false;
	char *pname;
	unsigned long name_offset;
	unsigned int options;
	struct task_struct *task;
	struct vm_area_struct *mmap;
	struct mm_struct *mm;

	task = px_find_task_by_pid(pid);

	if (task == NULL)
		return;

	mm = get_task_mm(task);
	if (mm != NULL)
	{
		down_read(&mm->mmap_sem);

		for (mmap = mm->mmap; mmap; mmap = mmap->vm_next)
		{
			if (is_valid_module(mmap))
			{
				memset(name, 0, (PATH_MAX) * sizeof(char));
				pname = px_d_path(mmap->vm_file, name, PATH_MAX);

				if (pname != NULL)
				{
					options = 0;

					if (find == false)
					{
						options |= MODULE_FLAG_1ST;
						find = true;
					}
					
					name_offset = get_filename_offset(pname);
					
					module_load_notif(pname, name_offset, pid,
						              mmap->vm_start, mmap->vm_end - mmap->vm_start,
						              options, lsc);
				}
			}
		}

		up_read(&mm->mmap_sem);
		mmput(mm);
	}

	if (find == false)
	{
		module_load_notif(task->comm, 0, pid,
				  LINUX_APP_BASE_LOW, 0,//mmap->vm_end - mmap->vm_start,
				  MODULE_FLAG_1ST, lsc);
	}

	return;
}
Пример #11
0
/* Repurposed from fs/proc/base.c, with NULL-replacement for saner printing.
 * Allocates the buffer itself.
 */
char *printable_cmdline(struct task_struct *task)
{
	char *buffer = NULL, *sanitized;
	int res, i;
	unsigned int len;
	struct mm_struct *mm;

	mm = get_task_mm(task);
	if (!mm)
		goto out;

	if (!mm->arg_end)
		goto out_mm;	/* Shh! No looking before we're done */

	buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
	if (!buffer)
		goto out_mm;

	len = mm->arg_end - mm->arg_start;

	if (len > PAGE_SIZE)
		len = PAGE_SIZE;

	res = access_process_vm(task, mm->arg_start, buffer, len, 0);

	/* Space-fill NULLs. */
	if (res > 1)
		for (i = 0; i < res - 2; ++i)
			if (buffer[i] == '\0')
				buffer[i] = ' ';

	/* If the NULL at the end of args has been overwritten, then
	 * assume application is using setproctitle(3).
	 */
	if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
		len = strnlen(buffer, res);
		if (len < res) {
			res = len;
		} else {
			len = mm->env_end - mm->env_start;
			if (len > PAGE_SIZE - res)
				len = PAGE_SIZE - res;
			res += access_process_vm(task, mm->env_start,
						 buffer+res, len, 0);
			res = strnlen(buffer, res);
		}
	}

	/* Make sure result is printable. */
	sanitized = printable(buffer);
	kfree(buffer);
	buffer = sanitized;

out_mm:
	mmput(mm);
out:
	return buffer;
}
Пример #12
0
/* Caller should have device mutex */
static long vhost_dev_set_owner(struct vhost_dev *dev)
{
	/* Is there an owner already? */
	if (dev->mm)
		return -EBUSY;
	/* No owner, become one */
	dev->mm = get_task_mm(current);
	return 0;
}
Пример #13
0
static void *m_start(struct seq_file *m, loff_t *pos)
{
	struct task_struct *task = m->private;
	unsigned long last_addr = m->version;
	struct mm_struct *mm;
	struct vm_area_struct *map, *tail_map;
	loff_t l = *pos;

	/*
	 * We remember last_addr rather than next_addr to hit with
	 * mmap_cache most of the time. We have zero last_addr at
	 * the begining and also after lseek. We will have -1 last_addr
	 * after the end of the maps.
	 */

	if (last_addr == -1UL)
		return NULL;

	mm = get_task_mm(task);
	if (!mm)
		return NULL;

	tail_map = get_gate_vma(task);
	down_read(&mm->mmap_sem);

	/* Start with last addr hint */
	if (last_addr && (map = find_vma(mm, last_addr))) {
		map = map->vm_next;
		goto out;
	}

	/*
	 * Check the map index is within the range and do
	 * sequential scan until m_index.
	 */
	map = NULL;
	if ((unsigned long)l < mm->map_count) {
		map = mm->mmap;
		while (l-- && map)
			map = map->vm_next;
		goto out;
	}

	if (l != mm->map_count)
		tail_map = NULL; /* After gate map */

out:
	if (map)
		return map;

	/* End of maps has reached */
	m->version = (tail_map != NULL)? 0: -1UL;
	up_read(&mm->mmap_sem);
	mmput(mm);
	return tail_map;
}
Пример #14
0
int setup_from_pid(pid_t pid)
{
	struct mm_struct *mm = NULL;
	struct task_struct *tsk;
	int errcode = -EINVAL;

	DLOG("setup_from_pid: examining pid [%d]\n", pid);
	tsk = my_find_task_by_pid(pid);
	if (tsk == NULL) {
		printk (KERN_ALERT
			"/proc/%s: can't find task for pid %d\n",
			PROCFS_NAME, pid);
		goto Exit;
	}
	if (tsk == current) {
		printk (KERN_ALERT
			"/proc/%s: can't self-examine %d\n",
			PROCFS_NAME, pid);
		goto Exit;
	}
	
	mm = get_task_mm(tsk);
	if (mm == NULL) {
		printk (KERN_ALERT
			"/proc/%s: can't get mm for task for pid %d\n",
			PROCFS_NAME, pid);
		goto Exit;
	}

	/* Stop the vma list changing.
	 * We can't take page_table_lock yet because we vmalloc */
	down_read(&mm->mmap_sem);
	if ((errcode = alloc_vmalist_info(mm->mmap)) < 0) {
		printk (KERN_ALERT
			"/proc/%s: failed to alloc for mm for pid %d\n",
			PROCFS_NAME, pid);
		goto Exit;
	}

	/* We've got our space allocated, fill it in */
	spin_lock(&mm->page_table_lock);
	store_vmalist_info(mm->mmap);
	spin_unlock(&mm->page_table_lock);

	errcode = 0;
Exit:
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	
	return errcode;
}
Пример #15
0
static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
				 void *buf, int len, int write)
{
	/* Just copied from kernel for the kernels which doesn't
	 * have access_process_vm() exported */
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	down_read(&mm->mmap_sem);
	/* ignore errors, just check how much was sucessfully transfered */
	while (len) {
		int bytes, rc, offset;
		void *maddr;

		rc = get_user_pages(tsk, mm, addr, 1,
				     write, 1, &page, &vma);
		if (rc <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);
	mmput(mm);

	return buf - old_buf;
}
Пример #16
0
int get_task_altpinfo( struct task_struct * tp, struct task_altpinfo * talt ) {
	struct mm_struct * mm;
	print_debug( "execute get_task_altpinfo().\n" );
/* pid %d, uid %d, comm %s, state %c, vsize %lu, session %d */
	read_lock( &tasklist_lock );
	talt->pid = tp->pid;
	talt->uid = tp->uid;
	snprintf( talt->comm, TASK_COMM_LEN, "%s", tp->comm );
	mm = get_task_mm( tp );
	talt->vsize = ( mm ? PAGE_SIZE * mm->total_vm : 0 );
	talt->state = get_task_state(tp);
	talt->sid = tp->signal->session;
	read_unlock( &tasklist_lock );
	talt->comm[TASK_COMM_LEN] = '\0';
	return 0;		
}
Пример #17
0
/*L:025 This actually initializes a CPU.  For the moment, a Guest is only
 * uniprocessor, so "id" is always 0. */
static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
{
	/* We have a limited number the number of CPUs in the lguest struct. */
	if (id >= ARRAY_SIZE(cpu->lg->cpus))
		return -EINVAL;

	/* Set up this CPU's id, and pointer back to the lguest struct. */
	cpu->id = id;
	cpu->lg = container_of((cpu - id), struct lguest, cpus[0]);
	cpu->lg->nr_cpus++;

	/* Each CPU has a timer it can set. */
	init_clockdev(cpu);

	/* We need a complete page for the Guest registers: they are accessible
	 * to the Guest and we can only grant it access to whole pages. */
	cpu->regs_page = get_zeroed_page(GFP_KERNEL);
	if (!cpu->regs_page)
		return -ENOMEM;

	/* We actually put the registers at the bottom of the page. */
	cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);

	/* Now we initialize the Guest's registers, handing it the start
	 * address. */
	lguest_arch_setup_regs(cpu, start_ip);

	/* Initialize the queue for the Waker to wait on */
	init_waitqueue_head(&cpu->break_wq);

	/* We keep a pointer to the Launcher task (ie. current task) for when
	 * other Guests want to wake this one (eg. console input). */
	cpu->tsk = current;

	/* We need to keep a pointer to the Launcher's memory map, because if
	 * the Launcher dies we need to clean it up.  If we don't keep a
	 * reference, it is destroyed before close() is called. */
	cpu->mm = get_task_mm(cpu->tsk);

	/* We remember which CPU's pages this Guest used last, for optimization
	 * when the same Guest runs on the same CPU twice. */
	cpu->last_pages = NULL;

	/* No error == success. */
	return 0;
}
Пример #18
0
struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
	struct spu_context *ctx;
	struct timespec ts;

	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
	if (!ctx)
		goto out;
	/* Binding to physical processor deferred
	 * until spu_activate().
	 */
	if (spu_init_csa(&ctx->csa))
		goto out_free;
	spin_lock_init(&ctx->mmio_lock);
	mutex_init(&ctx->mapping_lock);
	kref_init(&ctx->kref);
	mutex_init(&ctx->state_mutex);
	mutex_init(&ctx->run_mutex);
	init_waitqueue_head(&ctx->ibox_wq);
	init_waitqueue_head(&ctx->wbox_wq);
	init_waitqueue_head(&ctx->stop_wq);
	init_waitqueue_head(&ctx->mfc_wq);
	init_waitqueue_head(&ctx->run_wq);
	ctx->state = SPU_STATE_SAVED;
	ctx->ops = &spu_backing_ops;
	ctx->owner = get_task_mm(current);
	INIT_LIST_HEAD(&ctx->rq);
	INIT_LIST_HEAD(&ctx->aff_list);
	if (gang)
		spu_gang_add_ctx(gang, ctx);

	__spu_update_sched_info(ctx);
	spu_set_timeslice(ctx);
	ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
	ktime_get_ts(&ts);
	ctx->stats.tstamp = timespec_to_ns(&ts);

	atomic_inc(&nr_spu_contexts);
	goto out;
out_free:
	kfree(ctx);
	ctx = NULL;
out:
	return ctx;
}
Пример #19
0
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
{
	struct mm_struct *mm;
	int err;

	err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
	if (err)
		return ERR_PTR(err);

	mm = get_task_mm(task);
	if (mm && mm != current->mm &&
			!ptrace_may_access(task, mode)) {
		mmput(mm);
		mm = ERR_PTR(-EACCES);
	}
	mutex_unlock(&task->signal->cred_guard_mutex);

	return mm;
}
Пример #20
0
static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
{
	char *path_nm = NULL;
	struct mm_struct *mm;
	struct file *exe_file;

	mm = get_task_mm(tsk);
	if (!mm)
		goto done;

	exe_file = get_mm_exe_file(mm);
	mmput(mm);

	if (exe_file) {
		path_nm = file_path(exe_file, buf, 255);
		fput(exe_file);
	}

done:
	pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
}
Пример #21
0
static int proc_get_exe(struct task_struct *task, char *exe)
{
    struct mm_struct *mm;
    struct file *exe_file;
    char *pathbuf, *path;
    int ret = 0;

    mm = get_task_mm(task);
    if (!mm)
        return -ENOENT;

    exe_file = get_mm_exe_file(mm);
    if (!exe_file) {
        ret = -ENOENT;
        goto put_mm;
    }

    pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
    if (!pathbuf) {
        ret = -ENOMEM;
        goto put_exe_file;
    }

    path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
    if (IS_ERR(path)) {
        ret = PTR_ERR(path);
        goto free_buf;
    }

    strncpy(exe, path, PROC_EXE_LEN);

free_buf:
    kfree(pathbuf);
put_exe_file:
    fput(exe_file);
put_mm:
    mmput(mm);
    return ret;
}
Пример #22
0
static void *m_start(struct seq_file *m, loff_t *pos)
{
	struct task_struct *task = m->private;
	struct mm_struct *mm = get_task_mm(task);
	struct vm_area_struct * map;
	loff_t l = *pos;

	if (!mm)
		return NULL;

	down_read(&mm->mmap_sem);
	map = mm->mmap;
	while (l-- && map)
		map = map->vm_next;
	if (!map) {
		up_read(&mm->mmap_sem);
		mmput(mm);
		if (l == -1)
			map = get_gate_vma(task);
	}
	return map;
}
Пример #23
0
static char* sphinx_detect_package(struct task_struct* t, char* buffer)
{
	char* package = NULL;

	int res = 0;
	unsigned int len;
	struct mm_struct *mm = get_task_mm(t);
	if (!mm)
		goto out;
	if (!mm->arg_end)
		goto out_mm;

 	len = mm->arg_end - mm->arg_start;
 
	if (len > PAGE_SIZE)
		len = PAGE_SIZE;
 
	res = access_process_vm(t, mm->arg_start, buffer, len, 0);

	if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
		len = strnlen(buffer, res);
		if (len < res) {
		    res = len;
		} else {
			len = mm->env_end - mm->env_start;
			if (len > PAGE_SIZE - res)
				len = PAGE_SIZE - res;
			res += access_process_vm(t, mm->env_start, buffer+res, len, 0);
			res = strnlen(buffer, res);
		}
	}
out_mm:
	mmput(mm);
out:
	if(res > 0) package = buffer;

	return package;
}
Пример #24
0
struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
	struct spu_context *ctx;
	ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
	if (!ctx)
		goto out;
	/* Binding to physical processor deferred
	 * until spu_activate().
	 */
	spu_init_csa(&ctx->csa);
	if (!ctx->csa.lscsa) {
		goto out_free;
	}
	spin_lock_init(&ctx->mmio_lock);
	kref_init(&ctx->kref);
	mutex_init(&ctx->state_mutex);
	init_MUTEX(&ctx->run_sema);
	init_waitqueue_head(&ctx->ibox_wq);
	init_waitqueue_head(&ctx->wbox_wq);
	init_waitqueue_head(&ctx->stop_wq);
	init_waitqueue_head(&ctx->mfc_wq);
	ctx->state = SPU_STATE_SAVED;
	ctx->ops = &spu_backing_ops;
	ctx->owner = get_task_mm(current);
	if (gang)
		spu_gang_add_ctx(gang, ctx);
	ctx->rt_priority = current->rt_priority;
	ctx->policy = current->policy;
	ctx->prio = current->prio;
	INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
	goto out;
out_free:
	kfree(ctx);
	ctx = NULL;
out:
	return ctx;
}
Пример #25
0
/* copy from fs/proc/base.c proc_pid_cmdline */
static int rkusb_get_task_cmdline(struct task_struct *task, 
        char * buffer , int size )
{
	int res = 0;
	unsigned int len;
	struct mm_struct *mm = get_task_mm(task);
	if (!mm)
		goto out;
	if (!mm->arg_end)
		goto out_mm;	/* Shh! No looking before we're done */

 	len = mm->arg_end - mm->arg_start;
 
	if (len > size)
		len = size;
 
	res = access_process_vm(task, mm->arg_start, buffer, len, 0);

	// If the nul at the end of args has been overwritten, then
	// assume application is using setproctitle(3).
	if (res > 0 && buffer[res-1] != '\0' && len < size) {
		len = strnlen(buffer, res);
		if (len < res) {
		    res = len;
		} else {
			len = mm->env_end - mm->env_start;
			if (len > size - res)
				len = size - res;
			res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
			res = strnlen(buffer, res);
		}
	}
out_mm:
	mmput(mm);
out:
	return res;
}
static inline void
ltt_enumerate_task_vm_maps(struct ltt_probe_private_data *call_data,
		struct task_struct *t)
{
	struct mm_struct *mm;
	struct vm_area_struct *map;
	unsigned long ino;

	/* get_task_mm does a task_lock... */
	mm = get_task_mm(t);
	if (!mm)
		return;

	map = mm->mmap;
	if (map) {
		down_read(&mm->mmap_sem);
		while (map) {
			if (map->vm_file)
				ino = map->vm_file->f_dentry->d_inode->i_ino;
			else
				ino = 0;
			__trace_mark(0, list_vm_map, call_data,
					"pid %d start %lu end %lu flags %lu "
					"pgoff %lu inode %lu",
					t->pid,
					map->vm_start,
					map->vm_end,
					map->vm_flags,
					map->vm_pgoff << PAGE_SHIFT,
					ino);
			map = map->vm_next;
		}
		up_read(&mm->mmap_sem);
	}
	mmput(mm);
}
Пример #27
0
/**
 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
 *
 * Pins the range of pages passed in the argument, and maps them to
 * DMA addresses. The DMA addresses of the mapped pages is updated in
 * umem->odp_data->dma_list.
 *
 * Returns the number of pages mapped in success, negative error code
 * for failure.
 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
 * the function from completing its task.
 *
 * @umem: the umem to map and pin
 * @user_virt: the address from which we need to map.
 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
 *        bigger due to alignment, and may also be smaller in case of an error
 *        pinning or mapping a page. The actual pages mapped is returned in
 *        the return value.
 * @access_mask: bit mask of the requested access permissions for the given
 *               range.
 * @current_seq: the MMU notifiers sequance value for synchronization with
 *               invalidations. the sequance number is read from
 *               umem->odp_data->notifiers_seq before calling this function
 * @flags: IB_ODP_DMA_MAP_FOR_PREEFTCH is used to indicate that the function
 *	   was called from the prefetch verb. IB_ODP_DMA_MAP_FOR_PAGEFAULT is
 *	   used to indicate that the function was called from a pagefault
 *	   handler.
 */
int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
			      u64 access_mask, unsigned long current_seq,
			      enum ib_odp_dma_map_flags flags)
{
	struct task_struct *owning_process  = NULL;
	struct mm_struct   *owning_mm       = NULL;
	struct page       **local_page_list = NULL;
	u64 off;
	int j, k, ret = 0, start_idx, npages = 0;

	if (access_mask == 0)
		return -EINVAL;

	if (user_virt < ib_umem_start(umem) ||
	    user_virt + bcnt > ib_umem_end(umem))
		return -EFAULT;

	local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
	if (!local_page_list)
		return -ENOMEM;

	off = user_virt & (~PAGE_MASK);
	user_virt = user_virt & PAGE_MASK;
	bcnt += off; /* Charge for the first page offset as well. */

	owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
	if (owning_process == NULL) {
		ret = -EINVAL;
		goto out_no_task;
	}

	owning_mm = get_task_mm(owning_process);
	if (owning_mm == NULL) {
		ret = -EINVAL;
		goto out_put_task;
	}

	start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
	k = start_idx;

	while (bcnt > 0) {
		down_read(&owning_mm->mmap_sem);
		/*
		 * Note: this might result in redundent page getting. We can
		 * avoid this by checking dma_list to be 0 before calling
		 * get_user_pages. However, this make the code much more
		 * complex (and doesn't gain us much performance in most use
		 * cases).
		 */
		npages = get_user_pages(owning_process, owning_mm,
				     user_virt, min_t(size_t,
						      (bcnt - 1 + PAGE_SIZE) /
						      PAGE_SIZE,
						      PAGE_SIZE /
						      sizeof(struct page *)),
				     access_mask & ODP_WRITE_ALLOWED_BIT, 0,
				     local_page_list, NULL);
		up_read(&owning_mm->mmap_sem);

		if (npages < 0)
			break;

		bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
		user_virt += npages << PAGE_SHIFT;
		for (j = 0; j < npages; ++j) {
			ret = ib_umem_odp_map_dma_single_page(umem, k,
				local_page_list[j], access_mask,
				current_seq, flags);
			if (ret < 0)
				break;
			k++;
		}

		if (ret < 0) {
			/* Release left over pages when handling errors. */
			for (++j; j < npages; ++j)
				put_page(local_page_list[j]);
			break;
		}
	}

	if (ret >= 0) {
		if (npages < 0 && k == start_idx)
			ret = npages;
		else
			ret = k - start_idx;
	}

	mmput(owning_mm);
out_put_task:
	put_task_struct(owning_process);
out_no_task:
	free_page((unsigned long) local_page_list);
	return ret;
}
Пример #28
0
void ib_umem_odp_release(struct ib_umem *umem)
{
	struct ib_ucontext *context = umem->context;

	/*
	 * Ensure that no more pages are mapped in the umem.
	 *
	 * It is the driver's responsibility to ensure, before calling us,
	 * that the hardware will not attempt to access the MR any more.
	 */
	ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
				    ib_umem_end(umem));

	down_write(&context->umem_mutex);
	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
		rbt_ib_umem_remove(&umem->odp_data->interval_tree,
				   &context->umem_tree);
	context->odp_mrs_count--;

	/*
	 * Downgrade the lock to a read lock. This ensures that the notifiers
	 * (who lock the mutex for reading) will be able to finish, and we
	 * will be able to enventually obtain the mmu notifiers SRCU. Note
	 * that since we are doing it atomically, no other user could register
	 * and unregister while we do the check.
	 */
	downgrade_write(&context->umem_mutex);
	if (!context->odp_mrs_count) {
		struct task_struct *owning_process = NULL;
		struct mm_struct *owning_mm        = NULL;
		owning_process = get_pid_task(context->tgid,
					      PIDTYPE_PID);
		if (owning_process == NULL)
			/*
			 * The process is already dead, notifier were removed
			 * already.
			 */
			goto out;

		owning_mm = get_task_mm(owning_process);
		if (owning_mm == NULL)
			/*
			 * The process' mm is already dead, notifier were
			 * removed already.
			 */
			goto out_put_task;
		mmu_notifier_unregister(&context->mn, owning_mm);

		mmput(owning_mm);

out_put_task:
		put_task_struct(owning_process);
	}
out:
	up_read(&context->umem_mutex);

	vfree(umem->odp_data->dma_list);
	vfree(umem->odp_data->page_list);
	kfree(umem->odp_data);
	kfree(umem);
}
Пример #29
0
int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
{
	int ret_val;
	struct pid *our_pid;
	struct mm_struct *mm = get_task_mm(current);
	BUG_ON(!mm);

	/* Prevent creating ODP MRs in child processes */
	rcu_read_lock();
	our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
	rcu_read_unlock();
	put_pid(our_pid);
	if (context->tgid != our_pid) {
		ret_val = -EINVAL;
		goto out_mm;
	}

	umem->hugetlb = 0;
	umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
	if (!umem->odp_data) {
		ret_val = -ENOMEM;
		goto out_mm;
	}
	umem->odp_data->umem = umem;

	mutex_init(&umem->odp_data->umem_mutex);

	init_completion(&umem->odp_data->notifier_completion);

	umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
					    sizeof(*umem->odp_data->page_list));
	if (!umem->odp_data->page_list) {
		ret_val = -ENOMEM;
		goto out_odp_data;
	}

	umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
					  sizeof(*umem->odp_data->dma_list));
	if (!umem->odp_data->dma_list) {
		ret_val = -ENOMEM;
		goto out_page_list;
	}

	/*
	 * When using MMU notifiers, we will get a
	 * notification before the "current" task (and MM) is
	 * destroyed. We use the umem_mutex lock to synchronize.
	 */
	down_write(&context->umem_mutex);
	context->odp_mrs_count++;
	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
		rbt_ib_umem_insert(&umem->odp_data->interval_tree,
				   &context->umem_tree);
	downgrade_write(&context->umem_mutex);

	if (context->odp_mrs_count == 1) {
		/*
		 * Note that at this point, no MMU notifier is running
		 * for this context!
		 */
		INIT_HLIST_NODE(&context->mn.hlist);
		context->mn.ops = &ib_umem_notifiers;
		/*
		 * Lock-dep detects a false positive for mmap_sem vs.
		 * umem_mutex, due to not grasping downgrade_write correctly.
		 */
		lockdep_off();
		ret_val = mmu_notifier_register(&context->mn, mm);
		lockdep_on();
		if (ret_val) {
			pr_err("Failed to register mmu_notifier %d\n", ret_val);
			ret_val = -EBUSY;
			goto out_mutex;
		}
	}

	up_read(&context->umem_mutex);

	/*
	 * Note that doing an mmput can cause a notifier for the relevant mm.
	 * If the notifier is called while we hold the umem_mutex, this will
	 * cause a deadlock. Therefore, we release the reference only after we
	 * released the mutex.
	 */
	mmput(mm);
	return 0;

out_mutex:
	up_read(&context->umem_mutex);
	vfree(umem->odp_data->dma_list);
out_page_list:
	vfree(umem->odp_data->page_list);
out_odp_data:
	kfree(umem->odp_data);
out_mm:
	mmput(mm);
	return ret_val;
}
Пример #30
0
static long fimg2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int ret = 0;
	struct fimg2d_context *ctx;
	struct mm_struct *mm;

	ctx = file->private_data;

	switch (cmd) {
	case FIMG2D_BITBLT_BLIT:

		mm = get_task_mm(current);
		if(!mm) {
			fimg2d_err("no mm for ctx\n");
			return -ENXIO;
		}

		fimg2d_clk_on(ctrl);

		g2d_lock(&ctrl->drvlock);
		ctx->mm = mm;

		if (atomic_read(&ctrl->drvact) ||
				atomic_read(&ctrl->suspended)) {
			fimg2d_err("driver is unavailable, do sw fallback\n");
			g2d_unlock(&ctrl->drvlock);
			fimg2d_clk_off(ctrl);
			mmput(mm);
			return -EPERM;
		}

		ret = fimg2d_add_command(ctrl, ctx, (struct fimg2d_blit __user *)arg);
		if (ret) {
			fimg2d_err("add command not allowed.\n");
			g2d_unlock(&ctrl->drvlock);
			fimg2d_clk_off(ctrl);
			mmput(mm);
			return ret;
		}

		ret = fimg2d_request_bitblt(ctrl, ctx);
		if (ret) {
			fimg2d_err("request bitblit not allowed.\n");
			g2d_unlock(&ctrl->drvlock);
			fimg2d_clk_off(ctrl);
			mmput(mm);
			return -EBUSY;
		}
		g2d_unlock(&ctrl->drvlock);
		fimg2d_clk_off(ctrl);
		mmput(mm);
		break;

	case FIMG2D_BITBLT_VERSION:
	{
		struct fimg2d_version ver;
		struct fimg2d_platdata *pdata;

		pdata = to_fimg2d_plat(ctrl->dev);
		ver.hw = pdata->hw_ver;
		ver.sw = 0;
		fimg2d_info("version info. hw(0x%x), sw(0x%x)\n",
				ver.hw, ver.sw);
		if (copy_to_user((void *)arg, &ver, sizeof(ver)))
			return -EFAULT;
		break;
	}
	case FIMG2D_BITBLT_ACTIVATE:
	{
		enum driver_act act;

		if (copy_from_user(&act, (void *)arg, sizeof(act)))
			return -EFAULT;

		g2d_lock(&ctrl->drvlock);
		atomic_set(&ctrl->drvact, act);
		if (act == DRV_ACT)
			fimg2d_info("fimg2d driver is activated\n");
		else
			fimg2d_info("fimg2d driver is deactivated\n");
		g2d_unlock(&ctrl->drvlock);
		break;
	}
	default:
		fimg2d_err("unknown ioctl\n");
		ret = -EFAULT;
		break;
	}

	return ret;
}