Exemplo n.º 1
0
static int __init memtest_init(void){
	struct task_struct *p;
	struct vm_area_struct * temp;
	
	printk("the virtual memory area(VMA) are:\n");
	p = find_task_by_vpid(pid);
	//p = pid_task(find_vpid(pid),PIDTYPE_PID);
	temp = p -> mm -> mmap;
	
	while(temp){
		printk("start:%p\tend:%p\n",(unsigned long *)temp -> vm_start,(unsigned long *)temp -> vm_end);
		temp = temp -> vm_next;
	}

	return 0;
}
Exemplo n.º 2
0
static struct pid *good_sigevent(sigevent_t * event)
{
    struct task_struct *rtn = current->group_leader;

    if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
            (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
             !same_thread_group(rtn, current) ||
             (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
        return NULL;

    if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
            ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
        return NULL;

    return task_pid(rtn);
}
Exemplo n.º 3
0
asmlinkage long our_sys_open(const char* file, int flags, int mode)
{
    long fd = 0;
    long uid, gid;
    struct log_path *p;
    struct passwd_entry *pe = NULL;
    struct process_ids *pids;
    struct task_struct *ptask;
    int is_log;
    int type;

    type = SYSCALL_OPEN;
    is_log = is_log_file(file);
    //if (is_relevant_file(file, &uid, &gid) == 1)
    if(is_log == 0)
    {
        if((flags & O_CREAT) > 0) {
            flags -= O_CREAT;
            fd = original_sys_open_call(file, flags, mode);
            flags += O_CREAT;
            if(fd < 0) {
                type = SYSCALL_CREAT;
                fd = original_sys_open_call(file, flags, mode);
                if(fd < 0) return fd; //Error opening file
            }
        }
        else {
            fd = original_sys_open_call(file, flags, mode);
            if(fd < 0) return fd; //Error opening file
        }

        pids = get_process_ids();
        pe = get_passwd_entry(pids->uid);
        p = find_path();
        ptask = find_task_by_vpid(pids->pid);
        LOG_OPEN(type, pe->username, pids->pid, pids->ppid, pids->audit, pids->paudit, ptask->comm, file, p->name, flags, mode, fd);
        kfree(p);
        kfree(pids);
    }
    else
    {
        //TODO: check process if for rsyslog
        fd = original_sys_open_call(file, flags, mode);
    }
    return fd;
}
Exemplo n.º 4
0
static int krg_get_pid_cap(pid_t pid, kernel_krg_cap_t *resulting_cap)
{
	struct task_struct *tsk;
	int retval = -ESRCH;

	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk)
		retval = krg_get_cap(tsk, resulting_cap);
	rcu_read_unlock();
#ifdef CONFIG_KRG_PROC
	if (!tsk)
		retval = remote_get_pid_cap(pid, resulting_cap);
#endif

	return retval;
}
void notifyWapiApplication()
{ 
	struct task_struct *p;
        
	if(pid_wapi != 0){

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
		p = find_task_by_pid(pid_wapi);
#else
                p = find_task_by_vpid(pid_wapi);
#endif
		if(p){
			send_sig(SIGUSR1,p,0); 
		}else {
			pid_wapi = 0;
		}
	}
}
Exemplo n.º 6
0
static struct pid *good_sigevent(sigevent_t * event)
{
	struct task_struct *rtn = current->group_leader;
	int sig = event->sigev_signo;

	if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
		(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
		 !same_thread_group(rtn, current) ||
		 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
		return NULL;

	if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
	    (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
	     sig_kernel_coredump(sig)))
		return NULL;

	return task_pid(rtn);
}
Exemplo n.º 7
0
asmlinkage long our_sys_fchown(int filefd, uid_t owner, gid_t group)
{
    long fd = 0;
    long uid;
    long audit, pid;
    struct task_struct *task = NULL;
    struct passwd_entry *pe = NULL;

    fd = original_sys_fchown_call(filefd, owner, group);
    if(fd >= 0) {
        pid = current->pid;
        uid = current_uid();
        audit = get_audit_id();
        pe = get_passwd_entry(uid);
        task = find_task_by_vpid(pid);
        LOG_FCHOWN(SYSCALL_FCHOWN, pe->username, pid, audit, task->comm, filefd, owner, group);
    }
    return fd;
}
Exemplo n.º 8
0
asmlinkage long our_sys_fchmod(int filefd, mode_t mode)
{
    long fd = 0;
    long uid;
    long audit, pid;
    struct task_struct *task = NULL;
    struct passwd_entry *pe = NULL;

    fd = original_sys_fchmod_call(filefd, mode);
    if(fd >= 0) {
        pid = current->pid;
        uid = current_uid();
        pe = get_passwd_entry(uid);
        task = find_task_by_vpid(pid);
        audit = get_audit_id();

        LOG_FCHMOD(SYSCALL_FCHMOD, pe->username, pid, audit, task->comm, filefd, mode);
    }
    return fd;
}
Exemplo n.º 9
0
/**
 * ptrace_get_task_struct  --  grab a task struct reference for ptrace
 * @pid:       process id to grab a task_struct reference of
 *
 * This function is a helper for ptrace implementations.  It checks
 * permissions and then grabs a task struct for use of the actual
 * ptrace implementation.
 *
 * Returns the task_struct for @pid or an ERR_PTR() on failure.
 */
struct task_struct *ptrace_get_task_struct(pid_t pid)
{
	struct task_struct *child;

	/*
	 * Tracing init is not allowed.
	 */
	if (pid == 1)
		return ERR_PTR(-EPERM);

	read_lock(&tasklist_lock);
	child = find_task_by_vpid(pid);
	if (child)
		get_task_struct(child);

	read_unlock(&tasklist_lock);
	if (!child)
		return ERR_PTR(-ESRCH);
	return child;
}
Exemplo n.º 10
0
asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
			   compat_size_t __user *len_ptr)
{
	struct compat_robust_list_head __user *head;
	unsigned long ret;
	const struct cred *cred = current_cred(), *pcred;

	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	if (!pid)
		head = current->compat_robust_list;
	else {
		struct task_struct *p;

		ret = -ESRCH;
		read_lock(&tasklist_lock);
		p = find_task_by_vpid(pid);
		if (!p)
			goto err_unlock;
		ret = -EPERM;
		pcred = __task_cred(p);
		if (cred->euid != pcred->euid &&
		    cred->euid != pcred->uid &&
		    !capable(CAP_SYS_PTRACE))
			goto err_unlock;
		head = p->compat_robust_list;
		read_unlock(&tasklist_lock);
	}

	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(ptr_to_compat(head), head_ptr);

err_unlock:
	read_unlock(&tasklist_lock);

	return ret;
}
Exemplo n.º 11
0
asmlinkage long sys_use_signal(int pid) {

    struct task_struct* task;
    task = find_task_by_vpid(pid);

    /*** make a signal ***/
    struct siginfo info;
    memset(&info, 0, sizeof(struct siginfo));
    info.si_signo = SIGUSR1;
    info.si_code = SI_KERNEL;

    /*** send the signal to current process ***/
    int ret = send_sig_info(SIGUSR1, &info, task);
    if(ret < 0) {
        printk(KERN_WARNING "send_sig_info: error sending signal (ret: %d)\n", ret);
    }
    else {
        printk(KERN_DEBUG "send signal to pid: %d\n", task->pid);
    }

    return 0;
}
Exemplo n.º 12
0
asmlinkage long sys_basic_syscall(int pid, int* pArray){

  pArray[0] = 0;
  pArray[1] = 0;
  pArray[2] = 0;
  pArray[3] = 0;

  if (sys_kill(pid, 0) == 0){
    struct task_struct *pid_info = find_task_by_vpid((pid_t) pid);
    
    pArray[0] = pid_info->numFork;
    pArray[1] = pid_info->numVfork;
    pArray[2] = pid_info->numExecve;
    pArray[3] = pid_info->numClone;
    printk("Basic Syscall was successful\n");
    return 0;
  }
  else{
    printk("Basic Syscall was unsuccessful\n");    
    return -1;
  }
}
Exemplo n.º 13
0
static int check_clock(const clockid_t which_clock)
{
	int error = 0;
	struct task_struct *p;
	const pid_t pid = CPUCLOCK_PID(which_clock);

	if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
		return -EINVAL;

	if (pid == 0)
		return 0;

	read_lock(&tasklist_lock);
	p = find_task_by_vpid(pid);
	if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
		   same_thread_group(p, current) : thread_group_leader(p))) {
		error = -EINVAL;
	}
	read_unlock(&tasklist_lock);

	return error;
}
Exemplo n.º 14
0
COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
			compat_uptr_t __user *, head_ptr,
			compat_size_t __user *, len_ptr)
{
	struct compat_robust_list_head __user *head;
	unsigned long ret;
	struct task_struct *p;

	if (!futex_cmpxchg_enabled)
		return -ENOSYS;

	rcu_read_lock();

	ret = -ESRCH;
	if (!pid)
		p = current;
	else {
		p = find_task_by_vpid(pid);
		if (!p)
			goto err_unlock;
	}

	ret = -EPERM;
	if (!ptrace_may_access(p, PTRACE_MODE_READ))
		goto err_unlock;

	head = p->compat_robust_list;
	rcu_read_unlock();

	if (put_user(sizeof(*head), len_ptr))
		return -EFAULT;
	return put_user(ptr_to_compat(head), head_ptr);

err_unlock:
	rcu_read_unlock();

	return ret;
}
Exemplo n.º 15
0
static int rp_sched_process_fork_leave(struct kretprobe_instance *ri, struct pt_regs *regs)
{
    pid_t pid = (pid_t)regs_return_value(regs);

    if (pid) {
        struct task_struct *task = NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
        struct pid *p_pid = find_get_pid(pid);
        task = pid_task(p_pid, PIDTYPE_PID);
        put_pid(p_pid);
#else /* < 2.6.31 */
        rcu_read_lock();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
        task = find_task_by_vpid(pid);
#else /* < 2.6.24 */
        task = find_task_by_pid(pid);
#endif /* 2.6.24 */
        rcu_read_unlock();
#endif /* 2.6.31 */
        vtss_target_fork(current, task);
    }
    return 0;
}
Exemplo n.º 16
0
/*
 * This needs some heavy checking ...
 * I just haven't the stomach for it. I also don't fully
 * understand sessions/pgrp etc. Let somebody who does explain it.
 *
 * OK, I think I have the protection semantics right.... this is really
 * only important on a multi-user system anyway, to make sure one user
 * can't send a signal to a process owned by another.  -TYT, 12/12/91
 *
 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
 * LBT 04.03.94
 */
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
	struct task_struct *p;
	struct task_struct *group_leader = current->group_leader;
	struct pid *pgrp;
	int err;

	if (!pid)
		pid = task_pid_vnr(group_leader);
	if (!pgid)
		pgid = pid;
	if (pgid < 0)
		return -EINVAL;

	/* From this point forward we keep holding onto the tasklist lock
	 * so that our parent does not change from under us. -DaveM
	 */
	write_lock_irq(&tasklist_lock);

	err = -ESRCH;
	p = find_task_by_vpid(pid);
	if (!p)
		goto out;

	err = -EINVAL;
	if (!thread_group_leader(p))
		goto out;

	if (same_thread_group(p->real_parent, group_leader)) {
		err = -EPERM;
		if (task_session(p) != task_session(group_leader))
			goto out;
		err = -EACCES;
		if (p->did_exec)
			goto out;
	} else {
		err = -ESRCH;
		if (p != group_leader)
			goto out;
	}

	err = -EPERM;
	if (p->signal->leader)
		goto out;

	pgrp = task_pid(p);
	if (pgid != pid) {
		struct task_struct *g;

		pgrp = find_vpid(pgid);
		g = pid_task(pgrp, PIDTYPE_PGID);
		if (!g || task_session(g) != task_session(group_leader))
			goto out;
	}

	err = security_task_setpgid(p, pgid);
	if (err)
		goto out;

	if (task_pgrp(p) != pgrp) {
		change_pid(p, PIDTYPE_PGID, pgrp);
		set_task_pgrp(p, pid_nr(pgrp));
	}

	err = 0;
out:
	/* All paths lead to here, thus we are safe. -DaveM */
	write_unlock_irq(&tasklist_lock);
	return err;
}
/**
 * process_vm_rw_core - core of reading/writing pages from task specified
 * @pid: PID of process to read/write from/to
 * @lvec: iovec array specifying where to copy to/from locally
 * @liovcnt: size of lvec array
 * @rvec: iovec array specifying where to copy to/from in the other process
 * @riovcnt: size of rvec array
 * @flags: currently unused
 * @vm_write: 0 if reading from other process, 1 if writing to other process
 * Returns the number of bytes read/written or error code. May
 *  return less bytes than expected if an error occurs during the copying
 *  process.
 */
static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
				  unsigned long liovcnt,
				  const struct iovec *rvec,
				  unsigned long riovcnt,
				  unsigned long flags, int vm_write)
{
	struct task_struct *task;
	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
	struct page **process_pages = pp_stack;
	struct mm_struct *mm;
	unsigned long i;
	ssize_t rc = 0;
	ssize_t bytes_copied_loop;
	ssize_t bytes_copied = 0;
	unsigned long nr_pages = 0;
	unsigned long nr_pages_iov;
	unsigned long iov_l_curr_idx = 0;
	size_t iov_l_curr_offset = 0;
	ssize_t iov_len;

	/*
	 * Work out how many pages of struct pages we're going to need
	 * when eventually calling get_user_pages
	 */
	for (i = 0; i < riovcnt; i++) {
		iov_len = rvec[i].iov_len;
		if (iov_len > 0) {
			nr_pages_iov = ((unsigned long)rvec[i].iov_base
					+ iov_len)
				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
				/ PAGE_SIZE + 1;
			nr_pages = max(nr_pages, nr_pages_iov);
		}
	}

	if (nr_pages == 0)
		return 0;

	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
		/* For reliability don't try to kmalloc more than
		   2 pages worth */
		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
					      sizeof(struct pages *)*nr_pages),
					GFP_KERNEL);

		if (!process_pages)
			return -ENOMEM;
	}

	/* Get process information */
	rcu_read_lock();
	task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();
	if (!task) {
		rc = -ESRCH;
		goto free_proc_pages;
	}

	mm = mm_access(task, PTRACE_MODE_ATTACH);
	if (!mm || IS_ERR(mm)) {
		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
		/*
		 * Explicitly map EACCES to EPERM as EPERM is a more a
		 * appropriate error code for process_vw_readv/writev
		 */
		if (rc == -EACCES)
			rc = -EPERM;
		goto put_task_struct;
	}

	for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
		rc = process_vm_rw_single_vec(
			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
			lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
			process_pages, mm, task, vm_write, &bytes_copied_loop);
		bytes_copied += bytes_copied_loop;
		if (rc != 0) {
			/* If we have managed to copy any data at all then
			   we return the number of bytes copied. Otherwise
			   we return the error code */
			if (bytes_copied)
				rc = bytes_copied;
			goto put_mm;
		}
	}

	rc = bytes_copied;
put_mm:
	mmput(mm);

put_task_struct:
	put_task_struct(task);

free_proc_pages:
	if (process_pages != pp_stack)
		kfree(process_pages);
	return rc;
}
Exemplo n.º 18
0
/* tid is the actual task/thread id (née pid, stored as ->pid),
   pid/tgid is that 2.6 thread group id crap (stored as ->tgid) */
int vperfctr_attach(int tid, int creat)
{
	struct file *filp;
	struct task_struct *tsk;
	struct vperfctr *perfctr;
	int err;
	int fd;

	filp = vperfctr_get_filp();
	if (!filp)
		return -ENOMEM;
	err = fd = get_unused_fd();
	if (err < 0)
		goto err_filp;
	perfctr = NULL;
	if (creat) {
		perfctr = get_empty_vperfctr(); /* may sleep */
		if (IS_ERR(perfctr)) {
			err = PTR_ERR(perfctr);
			goto err_fd;
		}
	}
	tsk = current;
	if (tid != 0 && tid != task_pid_vnr(tsk)) { /* remote? */
		vperfctr_lock_find_task_by_vpid();
		tsk = find_task_by_vpid(tid);
		if (tsk)
			get_task_struct(tsk);
		vperfctr_unlock_find_task_by_vpid();
		err = -ESRCH;
		if (!tsk)
			goto err_perfctr;
		err = ptrace_check_attach(tsk, 0);
		if (err < 0)
			goto err_tsk;
	}
	if (creat) {
		/* check+install must be atomic to prevent remote-control races */
		vperfctr_task_lock(tsk);
		if (!tsk->thread.perfctr) {
			perfctr->owner = tsk;
			tsk->thread.perfctr = perfctr;
			err = 0;
		} else
			err = -EEXIST;
		vperfctr_task_unlock(tsk);
		if (err)
			goto err_tsk;
	} else {
		perfctr = tsk->thread.perfctr;
		/* PERFCTR_ABI and PERFCTR_INFO don't need the perfctr.
		   Hence no non-NULL check here. */
	}
	filp->private_data = perfctr;
	if (perfctr)
		atomic_inc(&perfctr->count);
	if (tsk != current)
		put_task_struct(tsk);
	fd_install(fd, filp);
	return fd;
 err_tsk:
	if (tsk != current)
		put_task_struct(tsk);
 err_perfctr:
	if (perfctr)	/* can only occur if creat != 0 */
		put_vperfctr(perfctr);
 err_fd:
	put_unused_fd(fd);
 err_filp:
	fput(filp);
	return err;
}
Exemplo n.º 19
0
static ssize_t shproc_sysfs_store( struct kobject *kobj, struct attribute *attr,
                                   const char *buf, size_t count )
{
    struct task_struct *p;
    int pid, len;
    unsigned int i;
    int ret = -1;
    char *buff = NULL;
    char *pathname;

    if( buf == NULL ){
        return ret;
    }

    pid = simple_strtol( buf, NULL, 10 );
    if( pid <= 0 ){
        printk( "[vold]pid(%d) error\n", pid );
        return ret;
    }

    buff = kmalloc( PAGE_SIZE, GFP_KERNEL );
    if( !buff ){
        printk( "[vold]kmalloc error\n" );
        return ret;
    }

    rcu_read_lock();

    p = find_task_by_vpid( (pid_t)pid );
    if( !p ){
        rcu_read_unlock();
        kfree( buff );
        printk( "[vold]task_struct get error\n" );
        return ret;
    }

    for( i = 0; i < p->files->fdt->max_fds; i++ ){
        memset( buff, 0x00, PAGE_SIZE );
        if( p->files->fdt->fd[i] == NULL ){
            break;
        }
        pathname = d_path( &(p->files->fdt->fd[i]->f_path) ,buff, PAGE_SIZE );
        ret = PTR_ERR(pathname);
        if( IS_ERR(pathname) ){
            break;
        }
        len = strlen( pathname );
        if( len > 7 ){
            len = 7;
        }
        if( !strncmp(pathname, "/sdcard", len) ){
            printk( "[vold]find pid:%d\n", pid );
            ret = len;
            break;
        }
    }

    rcu_read_unlock();
    kfree( buff );
    return ret;
}
asmlinkage int sys_setProcessBudget(pid_t pid, unsigned long budget, struct timespec period) {

    struct task_struct * curr;    
    struct task_struct * temp;
    unsigned long temp_time;
    struct cpufreq_policy * lastcpupolicy = cpufreq_cpu_get(0);
    unsigned long max_frequency = (lastcpupolicy->cpuinfo).max_freq / 1000 ; //Getting MAX frequency in MHz
    unsigned long sysclock_freq = 0;
    struct timespec task_budget;
    unsigned int ret_freq = 0, temp_freq;
    int ret_val;
    ktime_t p;
    struct task_ct_struct * list;
    
    //Error checks for input arguments
    if (!((period.tv_sec > 0) || (period.tv_nsec > 0))) {
	printk("Invalid time period\n");
	return -EINVAL;
    }

    if (pid <= 0) {
	printk("Invalid PID\n");
	return -EINVAL;
    }

    //checking admission
    write_lock(&tasklist_lock);

    //First get budget from cycles in (millions) / current CPU frequency in (Mhz) * 1000 ns
    temp_time = (budget / (max_frequency)) * 1000;
    task_budget = ns_to_timespec(temp_time);

    printk("Time in ns = %lu and frequency = %lu Mhz\n",temp_time,max_frequency);
    printk("Taks struct budget %ldsec and %ldnsec\n",task_budget.tv_sec,task_budget.tv_nsec);
    
    if(timespec_compare(&task_budget, &period) >= 0){
    	printk("Budget >= Period\n");
	write_unlock(&tasklist_lock);
    	return -EINVAL;
    }
    
    //We need to do it only in the case when we are running tasks without bin packing
    if(is_bin_packing_set == 0){
	if(check_admission(task_budget, period) == 0){
	    printk("Cant add task to the taskset\n");
	    write_unlock(&tasklist_lock);
	    return -EPERM;
	} 
    }

    //Finding task struct given its pid
    curr = (struct task_struct *) find_task_by_vpid(pid);
    if(curr == NULL){
	printk("Couldn't find task\n");
	write_unlock(&tasklist_lock);
	return -ESRCH;
    }

    //If the task already had budget we are essentially changing the budget
    //So the task should go in a new place in the global list.
    //Thus, we delete the task and then reinsert it in the list.
    if(curr->is_budget_set == 1){
	del_periodic_task(&(curr->periodic_task));
    }

    //First check if a period timer already exists from a previous edition of this syscall.
    //If yes then we cancel it.
    //If this syscall returns 0 or 1 then timer is succesfully cancelled  
    if (((curr -> time_period).tv_sec > 0) || ((curr -> time_period).tv_nsec > 0)) {
	hrtimer_cancel(&(curr->period_timer));
    }
    //If timer is being initialized the first time then
    //Initialize timer , set the callback and set  expiry period to Budget 
    else {
	hrtimer_init(&(curr->period_timer),CLOCK_MONOTONIC,HRTIMER_MODE_REL);
	(curr->period_timer).function = &period_timer_callback;
    }

    //First check if a budget timer already exists from a previous edition of this syscall.
    //If yes then we cancel it.
    // If this syscall returns 0 or 1 then timer is succesfully cancelled  
    if (((curr -> budget_time).tv_sec > 0) || ((curr -> budget_time).tv_nsec > 0)) {
	hrtimer_cancel(&(curr->budget_timer));
    }
    //If timer is being initialized the first time then
    //Initialize timer , set the callback and set  expiry period to Budget 
    else {
	hrtimer_init(&(curr->budget_timer),CLOCK_MONOTONIC,HRTIMER_MODE_REL);
	(curr->budget_timer).function = &budget_timer_callback;
    }

    //Setting flag
    if(is_bin_packing_set == 0){
	curr->is_budget_set = 1;
	temp = curr;
	do {
	    //Setting flag
	    temp->is_budget_set = 1;
	}while_each_thread(curr,temp);
    }
Exemplo n.º 21
0
SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
{
	int class = IOPRIO_PRIO_CLASS(ioprio);
	int data = IOPRIO_PRIO_DATA(ioprio);
	struct task_struct *p, *g;
	struct user_struct *user;
	struct pid *pgrp;
	int ret;

	switch (class) {
		case IOPRIO_CLASS_RT:
			if (!capable(CAP_SYS_ADMIN))
				return -EPERM;
			/* fall through, rt has prio field too */
		case IOPRIO_CLASS_BE:
			if (data >= IOPRIO_BE_NR || data < 0)
				return -EINVAL;

			break;
		case IOPRIO_CLASS_IDLE:
			break;
		case IOPRIO_CLASS_NONE:
			if (data)
				return -EINVAL;
			break;
		default:
			return -EINVAL;
	}

	ret = -ESRCH;
	rcu_read_lock();
	switch (which) {
		case IOPRIO_WHO_PROCESS:
			if (!who)
				p = current;
			else
				p = find_task_by_vpid(who);
			if (p)
				ret = set_task_ioprio(p, ioprio);
			break;
		case IOPRIO_WHO_PGRP:
			if (!who)
				pgrp = task_pgrp(current);
			else
				pgrp = find_vpid(who);
			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
				ret = set_task_ioprio(p, ioprio);
				if (ret)
					break;
			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
			break;
		case IOPRIO_WHO_USER:
			if (!who)
				user = current_user();
			else
				user = find_user(who);

			if (!user)
				break;

			do_each_thread(g, p) {
				if (__task_cred(p)->uid != who)
					continue;
				ret = set_task_ioprio(p, ioprio);
				if (ret)
					goto free_uid;
			} while_each_thread(g, p);
free_uid:
			if (who)
				free_uid(user);
			break;
		default:
			ret = -EINVAL;
	}
Exemplo n.º 22
0
static ssize_t package_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
	ssize_t ret = 0;
	struct task_struct* process;
	struct mm_struct* mm;
	struct vm_area_struct* vm_area;
	struct file* file;
	int i;
	char *packbuf = NULL, *pack = NULL;
	char *binbuf = NULL, *bin = NULL;
	char *pathbuf = NULL, *path = NULL;
	char* p_data = NULL;
	int data_len = 0;
	int failed = 0;

	if(package_held_pid == -1) return 0;
	if(current->tgid != digest_manager_pid) return 0;

	sphinx_printk("held pid 2: %d\n", package_held_pid);

	do
	{
		packbuf = kmalloc(PATH_MAX, GFP_KERNEL);

		if(packbuf == NULL) break;

		binbuf = kmalloc(PATH_MAX, GFP_KERNEL);

		if(binbuf == NULL) break;

		pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);

		if(pathbuf == NULL) break;

		p_data = kmalloc(DGSTMGRD_DATALIST_LENGTH, GFP_KERNEL);

		if(p_data == NULL) break;

		/* --- */

		memset(packbuf, 0, PATH_MAX);
		memset(binbuf, 0, PATH_MAX);
		memset(pathbuf, 0, PATH_MAX);
		memset(p_data, 0, PATH_MAX);

		read_lock(&tasklist_lock);

		do
		{
			process = find_task_by_vpid(package_held_pid);

			if(process == NULL) break;
			if(process->mm == NULL) break;
			if(process->mm->mmap == NULL) break;

			bin = sphinx_detect_binary(process, binbuf);

			if(bin == NULL) break;

			if(!(	strstr(bin, LOCAL_SPHINX_DIR_SYSTEMBIN) == bin ||
				strstr(bin, LOCAL_SPHINX_DIR_SYSTEMSHBIN) == bin ||
				strstr(bin, LOCAL_SPHINX_DIR_VENDORBIN) == bin ||
				strstr(bin, LOCAL_SPHINX_DIR_SYSTEMAPP) == bin ||
				strstr(bin, LOCAL_SPHINX_DIR_VENDORAPP) == bin ||
				(strstr(bin, LOCAL_SPHINX_DIR_DATAAPP) == bin && strstr(bin, LOCAL_SPHINX_SUFFIX_APK) != NULL) ||
				(strstr(bin, LOCAL_SPHINX_DIR_DATAAPPPRIVATE) == bin && strstr(bin, LOCAL_SPHINX_SUFFIX_APK) != NULL )
			))
			{
				break;
			}

			if(strcmp(bin, LOCAL_SPHINX_PATH_APPPROCESS) == 0)
			{
				pack = sphinx_detect_package(process, packbuf);

				if(pack == NULL) break;
			}

			mm = process->mm;
			vm_area = mm->mmap;

			for(i = 0; i < mm->map_count; i++, vm_area = vm_area->vm_next)
			{
				if(vm_area == NULL) break;

				if(vm_area->vm_flags & VM_EXEC)
				{
					file = vm_area->vm_file;
		
					if(file != NULL)
					{
						path = d_path(&file->f_path, pathbuf, PATH_MAX);

						if(path == NULL || (long)path == ENAMETOOLONG)
						{
							failed = 1;

							break;
						}
					
						sphinx_printk("path : %s\n", path);

						if(strcmp(path, "/dev/ashmem/dalvik-jit-code-cache") == 0) continue;
						if(strstr(path, LOCAL_SPHINX_DIR_SYSTEM) == path) continue;
						if(strstr(path, LOCAL_SPHINX_DIR_DATADATA) == path && strstr(path, "/lib/") != NULL && strstr(path, ".so") != NULL)
						{
							int l = strlen(path);

							if(data_len + l + 1 >= DGSTMGRD_DATALIST_LENGTH)
							{
								ret = 0;

								break;
							}

							snprintf(p_data + data_len, DGSTMGRD_DATALIST_LENGTH - 1, "\n%s", path);

							data_len = data_len + l + 1;
						}
					}
					else
					{
						const char *name = arch_vma_name(vm_area);

						if(name == NULL)
						{
							if(vm_area->vm_flags & VM_MAYSHARE)
							{
								failed = 1;

								break;
							}

							continue;
						}

						sphinx_printk("name : %s\n", name);

						if(strcmp(name, "[vectors]") != 0)
						{
							failed = 1;

							break;
						}
					}
				}
			}

			if(failed == 1) break;

			ret = strlen(bin);

			if(ret > 0)
			{
				sprintf(buf, "%s", bin);

				if(data_len > 0)
				{
					sprintf(buf + ret, "%s", p_data);

					ret += data_len;
				}
			}
		}
		while(0);

		read_unlock(&tasklist_lock);
	}
	while(0);


	if(pathbuf != NULL)
	{
		kfree(pathbuf);
		pathbuf = NULL;
	}

	if(binbuf != NULL)
	{
		kfree(binbuf);
		binbuf = NULL;
	}

	if(packbuf != NULL)
	{
		kfree(packbuf);
		packbuf = NULL;
	}

	if(p_data != NULL)
	{
		kfree(p_data);
		p_data = NULL;
	}

	/* --- */

	package_held_pid = -1;

	sphinx_printk("package_show returns : %d\n", ret);

	return ret;
}
Exemplo n.º 23
0
/*
 * find_process_by_pid - find a process with a matching PID value.
 * used in sys_sched_set/getaffinity() in kernel/sched.c, so
 * cloned here.
 */
static inline struct task_struct *find_process_by_pid(pid_t pid)
{
	return pid ? find_task_by_vpid(pid) : current;
}
Exemplo n.º 24
0
SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
		unsigned long, idx1, unsigned long, idx2)
{
	struct task_struct *task1, *task2;
	int ret;

	rcu_read_lock();

	/*
	 * Tasks are looked up in caller's PID namespace only.
	 */
	task1 = find_task_by_vpid(pid1);
	task2 = find_task_by_vpid(pid2);
	if (!task1 || !task2)
		goto err_no_task;

	get_task_struct(task1);
	get_task_struct(task2);

	rcu_read_unlock();

	/*
	 * One should have enough rights to inspect task details.
	 */
	ret = kcmp_lock(&task1->signal->cred_guard_mutex,
			&task2->signal->cred_guard_mutex);
	if (ret)
		goto err;
	if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
	    !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
		ret = -EPERM;
		goto err_unlock;
	}

	switch (type) {
	case KCMP_FILE: {
		struct file *filp1, *filp2;

		filp1 = get_file_raw_ptr(task1, idx1);
		filp2 = get_file_raw_ptr(task2, idx2);

		if (filp1 && filp2)
			ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
		else
			ret = -EBADF;
		break;
	}
	case KCMP_VM:
		ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
		break;
	case KCMP_FILES:
		ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
		break;
	case KCMP_FS:
		ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
		break;
	case KCMP_SIGHAND:
		ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
		break;
	case KCMP_IO:
		ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
		break;
	case KCMP_SYSVSEM:
#ifdef CONFIG_SYSVIPC
		ret = kcmp_ptr(task1->sysvsem.undo_list,
			       task2->sysvsem.undo_list,
			       KCMP_SYSVSEM);
#else
		ret = -EOPNOTSUPP;
#endif
		break;
	case KCMP_EPOLL_TFD:
		ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2);
		break;
	default:
		ret = -EINVAL;
		break;
	}

err_unlock:
	kcmp_unlock(&task1->signal->cred_guard_mutex,
		    &task2->signal->cred_guard_mutex);
err:
	put_task_struct(task1);
	put_task_struct(task2);

	return ret;

err_no_task:
	rcu_read_unlock();
	return -ESRCH;
}
/* # echo @ut_type @ut_prio @ut_tid > utest */
static ssize_t pts_utest_write(struct file *flip, const char *ubuf,
			       size_t cnt, loff_t *data)
{
	char buf[32];
	size_t copy_size = cnt;
	unsigned long val;
	int ut_type, ut_tid, ut_prio;
	int ret, i = 0, j;
	struct task_struct *p;


	if (cnt >= sizeof(buf))
		copy_size = 32 - 1;
	buf[copy_size] = '\0';

	if (copy_from_user(&buf, ubuf, copy_size))
		return -EFAULT;

	do { } while (buf[i++] != ' ');
	buf[(i - 1)] = '\0';
	ret = strict_strtoul(buf, 10, &val);
	ut_type = (int)val;

	j = i;
	do { } while (buf[i++] != ' ');
	buf[(i - 1)] = '\0';
	ret = strict_strtoul((const char *)(&buf[j]), 10, &val);
	ut_prio = (int)val;

	ret = strict_strtoul((const char *)(&buf[i]), 10, &val);
	ut_tid = (int)val;

	printk("%s: unit test %s tid %d prio %d j %d i %d", __func__,
	       (ut_type == PTS_USER) ? "user" :
		 ((ut_type == PTS_KRNL) ? "kernel" :
		   ((ut_type == PTS_BNDR) ? "binder" : "unknown")),
	       ut_tid, ut_prio, j, i);


	/* start to test api */
	p = find_task_by_vpid(ut_tid);
	if (!p) goto utest_out;

	if ((ut_prio >= 0) && (ut_prio < MAX_RT_PRIO)) {
		struct sched_param param;

		/* sched_priority is rt priority rather than effective one */
		ut_prio = MAX_RT_PRIO-1 - ut_prio;
		param.sched_priority = ut_prio | MT_ALLOW_RT_PRIO_BIT;

		switch (ut_type) {
		case PTS_USER:
			sched_setscheduler_syscall(p, SCHED_RR, &param);
			break;
		case PTS_KRNL:
			sched_setscheduler_nocheck(p, SCHED_RR, &param);
			break;
		case PTS_BNDR:
			sched_setscheduler_nocheck_binder(p, SCHED_RR, &param);
			break;
		default:
			break;
		}	
	} else { /* assume normal */
		switch (ut_type) {
		case PTS_USER:
			set_user_nice_syscall(p, PRIO_TO_NICE(ut_prio));
			break;
		case PTS_KRNL:
			set_user_nice(p, PRIO_TO_NICE(ut_prio));
			break;
		case PTS_BNDR:
			set_user_nice_binder(p, PRIO_TO_NICE(ut_prio));
			break;
		default:
			break;
		}	
	}

utest_out:
	return cnt;
}
Exemplo n.º 26
0
asmlinkage long our_sys_read(unsigned int fd, char __user *buf, size_t count)
{
    struct file *f;
    long result;
    struct passwd_entry *pe;
    char *hexdata;
    char *p_hexdata;
    unsigned int value;
    int i;
    long offset;
    char *data;
    struct task_struct *atask;
    char *username;
    int is_sock;
    struct process_ids *pids;

    pids = get_process_ids();
    atask = find_task_by_vpid(pids->audit);

    if(atask != NULL && atask->cred != NULL && atask->cred->euid != 0) {

        if(pids->uid != 0) {
            pe = get_passwd_entry(pids->uid);
            username = pe->username;
        }
        else {
            pe = get_passwd_entry(atask->cred->euid);
            username = pe->username_root;
        }

        is_sock = 0;
        /* Get file offset */
        rcu_read_lock();
        f = fcheck_files(current->files, fd);
        if(f) {
            offset = f->f_pos;
            if(((f->f_path.dentry->d_inode->i_mode) & S_IFMT) == S_IFSOCK) is_sock = 1;
        }
        else {
            offset = 0;
        }
        rcu_read_unlock();

        result = original_sys_read_call(fd, buf, count);
        count = (size_t) result;

        if(result > 0) {
            data = kmalloc((count + 1) * sizeof(char), GFP_KERNEL);
            memcpy(data, buf, count + 1);
            data[count] = '\0';

            hexdata = kmalloc((count + 1) * 2 * sizeof(char), GFP_KERNEL);
            p_hexdata = hexdata;
            for(i = 0; i < count; i++) {
                value = data[i];
                value = value & 255;
                sprintf(hexdata + (i * 2), "%02X", value);
            }
            hexdata[count * 2] = '\0';

            if(is_sock) LOG_S_RDWR(SYSCALL_READ, pe->username, pids->pid, pids->ppid, pids->audit, pids->paudit, fd, offset, hexdata);
            else LOG_RDWR(SYSCALL_READ, pe->username, pids->pid, pids->ppid, pids->audit, pids->paudit, fd, offset, hexdata);

            kfree(hexdata);
            kfree(data);
        }
    }
    else {
        result = original_sys_read_call(fd, buf, count);
    }
    kfree(pids);
    return result;
}
Exemplo n.º 27
0
/* Snapshot the Linux specific information */
static int snapshot_os(struct kgsl_device *device,
	void *snapshot, int remain, void *priv)
{
	struct kgsl_snapshot_linux *header = snapshot;
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct task_struct *task;
	pid_t pid;
	int hang = (int) priv;
	int ctxtcount = 0;
	int size = sizeof(*header);

	/* Figure out how many active contexts there are - these will
	 * be appended on the end of the structure */

	idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);

	size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context);

	/* Make sure there is enough room for the data */
	if (remain < size) {
		SNAPSHOT_ERR_NOMEM(device, "OS");
		return 0;
	}

	memset(header, 0, sizeof(*header));

	header->osid = KGSL_SNAPSHOT_OS_LINUX;

	header->state = hang ? SNAPSHOT_STATE_HUNG : SNAPSHOT_STATE_RUNNING;

	/* Get the kernel build information */
	strlcpy(header->release, utsname()->release, sizeof(header->release));
	strlcpy(header->version, utsname()->version, sizeof(header->version));

	/* Get the Unix time for the timestamp */
	header->seconds = get_seconds();

	/* Remember the power information */
	header->power_flags = pwr->power_flags;
	header->power_level = pwr->active_pwrlevel;
	header->power_interval_timeout = pwr->interval_timeout;
	header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]);
	header->busclk = kgsl_get_clkrate(pwr->ebi1_clk);

	/* Future proof for per-context timestamps */
	header->current_context = -1;

	/* Get the current PT base */
	header->ptbase = kgsl_mmu_get_current_ptbase(device);
	/* And the PID for the task leader */
	pid = header->pid = kgsl_mmu_get_ptname_from_ptbase(header->ptbase);

	task = find_task_by_vpid(pid);

	if (task)
		get_task_comm(header->comm, task);

	header->ctxtcount = ctxtcount;

	/* append information for each context */
	_ctxtptr = snapshot + sizeof(*header);
	idr_for_each(&device->context_idr, snapshot_context_info, NULL);

	/* Return the size of the data segment */
	return size;
}
Exemplo n.º 28
0
static void cpufreq_limit_work(struct work_struct *work)
{
	struct cpufreq_limit_data *limit = container_of(work,
						struct cpufreq_limit_data, limit_work);
	struct task_struct *p = NULL, *t = NULL;
	char **s = limit->limit_name;
	char *comm = task_comm;
	int cpu, i = 0, len = limit->limit_num;

	if (!test_bit(STATE_RESUME_DONE, &limit->resume_state))
		goto _exit;

	task_comm[0] = 0;
	for_each_possible_cpu(cpu) {
     	p = curr_task(cpu);
	   	t = find_task_by_vpid(task_tgid_vnr(p));	/* parent */
     	if (t)
     		p = t;

   		if (p->flags & PF_KTHREAD)
			continue;

		if (!likely(p->mm))
			continue;

		cpufreq_cmdline(p, comm);
     	pr_debug("cpu %d  current (%d) %s\n", cpu, p->pid, comm);

		for (i = 0; len > i; i++) {
			/* boost : task is running */
			if (!strncmp(comm, s[i], strlen(s[i]))) {
				limit->time_stamp = 0;
				cpufreq_set_max_frequency(limit, 1);
				pr_debug(": run %s\n", s[i]);
				goto _exit;
			}
		}
	}

	for_each_process(p) {
   		if (p->flags & PF_KTHREAD)
			continue;

		if (!likely(p->mm))
			continue;

		cpufreq_cmdline(p, comm);
		for (i = 0; len > i; i++) {
			if (!strncmp(comm, s[i], strlen(s[i]))) {
				pr_debug("detect %s:%s [%ld.%ld ms]\n",
					s[i], comm, limit->time_stamp, limit->time_stamp%1000);

				limit->current_time_stamp = ktime_to_ms(ktime_get());
				if (0 == limit->time_stamp) {
					limit->time_stamp = limit->current_time_stamp;
				} else {
					/* restore : task is sleep status */
					if ((limit->current_time_stamp - limit->time_stamp) > limit->op_timeout)
						cpufreq_set_max_frequency(limit, 0);
				}
				goto _exit;
			}
		}
	}

	/* restore : not find task */
	cpufreq_set_max_frequency(limit, 0);
	limit->time_stamp = 0;

_exit:
	hrtimer_start(&limit->limit_timer, ms_to_ktime(limit->timer_duration),
		HRTIMER_MODE_REL_PINNED);
}
Exemplo n.º 29
0
/**
 * process_vm_rw_core - core of reading/writing pages from task specified
 * @pid: PID of process to read/write from/to
 * @iter: where to copy to/from locally
 * @rvec: iovec array specifying where to copy to/from in the other process
 * @riovcnt: size of rvec array
 * @flags: currently unused
 * @vm_write: 0 if reading from other process, 1 if writing to other process
 * Returns the number of bytes read/written or error code. May
 *  return less bytes than expected if an error occurs during the copying
 *  process.
 */
static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
				  const struct iovec *rvec,
				  unsigned long riovcnt,
				  unsigned long flags, int vm_write)
{
	struct task_struct *task;
	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
	struct page **process_pages = pp_stack;
	struct mm_struct *mm;
	unsigned long i;
	ssize_t rc = 0;
	unsigned long nr_pages = 0;
	unsigned long nr_pages_iov;
	ssize_t iov_len;
	size_t total_len = iov_iter_count(iter);

	return -ENOSYS; // PaX: until properly audited

	/*
	 * Work out how many pages of struct pages we're going to need
	 * when eventually calling get_user_pages
	 */
	for (i = 0; i < riovcnt; i++) {
		iov_len = rvec[i].iov_len;
		if (iov_len <= 0)
			continue;
		nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
				(unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
		nr_pages = max(nr_pages, nr_pages_iov);
	}

	if (nr_pages == 0)
		return 0;

	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
		/* For reliability don't try to kmalloc more than
		   2 pages worth */
		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
					      sizeof(struct pages *)*nr_pages),
					GFP_KERNEL);

		if (!process_pages)
			return -ENOMEM;
	}

	/* Get process information */
	rcu_read_lock();
	task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();
	if (!task) {
		rc = -ESRCH;
		goto free_proc_pages;
	}

	if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
		rc = -EPERM;
		goto put_task_struct;
	}

	mm = mm_access(task, PTRACE_MODE_ATTACH);
	if (!mm || IS_ERR(mm)) {
		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
		/*
		 * Explicitly map EACCES to EPERM as EPERM is a more a
		 * appropriate error code for process_vw_readv/writev
		 */
		if (rc == -EACCES)
			rc = -EPERM;
		goto put_task_struct;
	}

	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
		rc = process_vm_rw_single_vec(
			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
			iter, process_pages, mm, task, vm_write);

	/* copied = space before - space after */
	total_len -= iov_iter_count(iter);

	/* If we have managed to copy any data at all then
	   we return the number of bytes copied. Otherwise
	   we return the error code */
	if (total_len)
		rc = total_len;

	mmput(mm);

put_task_struct:
	put_task_struct(task);

free_proc_pages:
	if (process_pages != pp_stack)
		kfree(process_pages);
	return rc;
}
Exemplo n.º 30
0
NTSTATUS
STDCALL
create_thread(OUT PHANDLE ThreadHandle,
		IN ACCESS_MASK DesiredAccess,
		IN POBJECT_ATTRIBUTES ObjectAttributes  OPTIONAL,
		IN HANDLE ProcessHandle,
		IN struct eprocess* TargetProcess,   			/* FIXME */
		OUT PCLIENT_ID ClientId,
		IN PCONTEXT ThreadContext,
		IN PINITIAL_TEB InitialTeb,
		IN BOOLEAN CreateSuspended,
		IN PKSTART_ROUTINE StartRoutine OPTIONAL,   	/* FIXME */
		IN PVOID StartContext OPTIONAL)            	/* FIXME */
{
	struct eprocess * process;
	struct ethread * thread, *first_thread, *cur_thread;
	struct task_struct *new_tsk = NULL;
	unsigned clone_flags = 0;
	PTEB teb_base;
	long   cpid;
	HANDLE hthread;
	NTSTATUS status = STATUS_SUCCESS;

	ktrace("\n");

	if (!(cur_thread = get_current_ethread())) {
		return STATUS_INVALID_PARAMETER;
	}

	/* current still be regarded */
	if (ProcessHandle && ProcessHandle != NtCurrentProcess()) {
		status = ref_object_by_handle(ProcessHandle,
				PROCESS_ALL_ACCESS,
				process_object_type,
				KernelMode,
				(PVOID *)&process,
				NULL);
		if (!NT_SUCCESS(status))
			return status;
	} else {
		if (TargetProcess)
			process = (struct eprocess *)TargetProcess;
		else
			process = cur_thread->threads_process;
		ref_object(process);
	}

	if (!process->fork_in_progress) {
		/* second and after */
		if (!ProcessHandle || ProcessHandle == NtCurrentProcess())
			first_thread = cur_thread;
		else
			first_thread = get_first_thread(process);
		if (!first_thread) {
			status = STATUS_INVALID_PARAMETER;
			goto cleanup_process;
		}
		
		clone_flags = SIGCHLD | CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_PARENT | CLONE_THREAD;

		cpid = do_fork_from_task(first_thread->et_task, CREATE_THREAD, clone_flags,
				first_thread->tcb.trap_frame->esp, (struct pt_regs *)first_thread->tcb.trap_frame,
			       	0, NULL, NULL);
		if (cpid < 0) {
			status = STATUS_INVALID_PARAMETER;
			goto cleanup_process;
		}

		new_tsk = find_task_by_vpid(cpid);
		
		memset(&new_tsk->thread.tls_array, 0, sizeof(new_tsk->thread.tls_array));
		set_tls_array(&new_tsk->thread, first_thread->et_task->thread.gs >> 3, 
				(unsigned long)InitialTeb->StackBase + 0x800, 0xfffff);

		/* allocate a Win32 thread object */
		status = create_object(KernelMode,
				thread_object_type,
				ObjectAttributes,
				KernelMode,
				NULL,
				sizeof(struct ethread),
				0,
				0,
				(PVOID *)&thread);
		if (!NT_SUCCESS(status))
			goto cleanup_tsk;

		ethread_init(thread, process, new_tsk);
		deref_object(thread);

	} else {