Ejemplo n.º 1
0
asmlinkage int exe$creprc(unsigned int *pidadr, void *image, void *input, void *output, void *error, struct _generic_64 *prvadr, unsigned int *quota, void*prcnam, unsigned int baspri, unsigned int uic, unsigned short int mbxunt, unsigned int stsflg,...) {
  unsigned long stack_here;
  struct _pcb * p, * cur;
  int retval;

  struct dsc$descriptor * imd = image, * ind = input, * oud = output, * erd = error;

  unsigned long clone_flags=CLONE_VFORK;
  //check pidadr

  ctl$gl_creprc_flags = stsflg;
  // check for PRC$M_NOUAF sometime

  if (stsflg&PRC$M_DETACH) {

  }
  if (uic) {

  }
  //setipl(IPL$_ASTDEL);//postpone this?
  cur=ctl$gl_pcb;
  vmslock(&SPIN_SCHED, IPL$_SCHED);
  vmslock(&SPIN_MMG, IPL$_MMG);
  p = alloc_task_struct();
  //bzero(p,sizeof(struct _pcb));//not wise?
  memset(p,0,sizeof(struct _pcb));

  // check more
  // compensate for no struct clone/copy
  p->sigmask_lock = SPIN_LOCK_UNLOCKED;
  p->alloc_lock = SPIN_LOCK_UNLOCKED;

  qhead_init(&p->pcb$l_astqfl);
  // and enable ast del to all modes

  p->pcb$b_type = DYN$C_PCB;

  p->pcb$b_asten=15;
  p->phd$b_astlvl=4;
  p->pr_astlvl=4;
  p->psl=0;
  p->pslindex=0;

  qhead_init(&p->pcb$l_lockqfl);
  // set capabilities
  p->pcb$l_permanent_capability = sch$gl_default_process_cap;
  p->pcb$l_capability = p->pcb$l_permanent_capability;
  // set affinity
  // set default fileprot
  // set arb
  // set mbx stuff
  // from setprn:
  if (prcnam) {
    struct dsc$descriptor *s=prcnam;
    strncpy(p->pcb$t_lname,s->dsc$a_pointer,s->dsc$w_length);
  }
  // set priv
  p->pcb$l_priv=ctl$gl_pcb->pcb$l_priv;
  // set pris
  p->pcb$b_prib=31-baspri;
  p->pcb$b_pri=31-baspri-6;
  //	if (p->pcb$b_pri<16) p->pcb$b_pri=16;
  p->pcb$w_quant=-QUANTUM;
  
  // set uic
  p->pcb$l_uic=ctl$gl_pcb->pcb$l_uic;
  // set vms pid
  // check process name
  // do something with pqb

  p->pcb$l_pqb=kmalloc(sizeof(struct _pqb),GFP_KERNEL);
  memset(p->pcb$l_pqb,0,sizeof(struct _pqb));

  struct _pqb * pqb = p->pcb$l_pqb;

  pqb->pqb$q_prvmsk = ctl$gq_procpriv;

  if (imd)
    memcpy(pqb->pqb$t_image,imd->dsc$a_pointer,imd->dsc$w_length);
  if (ind)
    memcpy(pqb->pqb$t_input,ind->dsc$a_pointer,ind->dsc$w_length);
  if (oud)
    memcpy(pqb->pqb$t_output,oud->dsc$a_pointer,oud->dsc$w_length);
  if (erd)
    memcpy(pqb->pqb$t_error,erd->dsc$a_pointer,erd->dsc$w_length);

  if (oud) // temp measure
    memcpy(p->pcb$t_terminal,oud->dsc$a_pointer,oud->dsc$w_length);

  // translate some logicals
  // copy security clearance
  // copy msg
  // copy flags
  // set jib
  // do quotas
  // process itmlst
  // set pcb$l_pqb
#if 0
  setipl(IPL$_MMG);
  vmslock(&SPIN_SCHED,-1);
  // find vacant slot in pcb vector
  // and store it
#endif  
  // make ipid and epid
  p->pcb$l_pid=alloc_ipid();
  {
    unsigned long *vec=sch$gl_pcbvec;
    vec[p->pcb$l_pid&0xffff]=p;
  }
  p->pcb$l_epid=exe$ipid_to_epid(p->pcb$l_pid);
  // should invoke sch$chse, put this at bottom?
  // setipl(0) and return

  // now lots of things from fork

	retval = -EAGAIN;
	/*
	 * Check if we are over our maximum process limit, but be sure to
	 * exclude root. This is needed to make it possible for login and
	 * friends to set the per-user process limit to something lower
	 * than the amount of processes root is running. -- Rik
	 */
#if 0
	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur
	              && !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
		goto bad_fork_free;

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
#endif

	/*
	 * Counter increases are protected by
	 * the kernel lock so nr_threads can't
	 * increase under us (but it may decrease).
	 */

	get_exec_domain(p->exec_domain);

	if (p->binfmt && p->binfmt->module)
		__MOD_INC_USE_COUNT(p->binfmt->module);

	p->did_exec = 0;
	p->swappable = 0;
	p->state = TASK_UNINTERRUPTIBLE;

	//copy_flags(clone_flags, p);
	// not here?	p->pcb$l_pid = alloc_ipid();

	p->run_list.next = NULL;
	p->run_list.prev = NULL;

	p->p_cptr = NULL;
	init_waitqueue_head(&p->wait_chldexit);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	p->sigpending = 0;
	init_sigpending(&p->pending);

	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->leader = 0;		/* session leadership doesn't inherit */
	p->tty_old_pgrp = 0;
	p->times.tms_utime = p->times.tms_stime = 0;
	p->times.tms_cutime = p->times.tms_cstime = 0;
	p->lock_depth = -1;		/* -1 = no lock */
	p->start_time = jiffies;

	INIT_LIST_HEAD(&p->local_pages);

	p->files = current->files;
	p->fs = current->fs;
	p->sig = current->sig;

	/* copy all the process information */
	if (copy_files(clone_flags, p))
		goto bad_fork_cleanup;
	if (copy_fs(clone_flags, p))
		goto bad_fork_cleanup_files;
	if (copy_sighand(clone_flags, p))
		goto bad_fork_cleanup_fs;

 bad_fork_cleanup:
 bad_fork_cleanup_files:
 bad_fork_cleanup_fs:

	// now a hole

	// now more from fork

	/* ok, now we should be set up.. */
	p->swappable = 1;
	p->exit_signal = 0;
	p->pdeath_signal = 0;

	/*
	 * "share" dynamic priority between parent and child, thus the
	 * total amount of dynamic priorities in the system doesnt change,
	 * more scheduling fairness. This is only important in the first
	 * timeslice, on the long run the scheduling behaviour is unchanged.
	 */

	/*
	 * Ok, add it to the run-queues and make it
	 * visible to the rest of the system.
	 *
	 * Let it rip!
	 */
	retval = p->pcb$l_epid;
	INIT_LIST_HEAD(&p->thread_group);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT and CLONE_THREAD re-use the old parent */
	p->p_opptr = current->p_opptr;
	p->p_pptr = current->p_pptr;

        p->p_opptr = current /*->p_opptr*/;
        p->p_pptr = current /*->p_pptr*/;

	SET_LINKS(p);

	nr_threads++;
	write_unlock_irq(&tasklist_lock);

	//	printk("fork befwak\n");
	//wake_up_process(p);		/* do this last */
	//	wake_up_process2(p,PRI$_TICOM);		/* do this last */
	//goto fork_out;//??


	// now something from exec

	// wait, better do execve itself

	memcpy(p->rlim, current->rlim, sizeof(p->rlim));

	qhead_init(&p->pcb$l_sqfl);

	struct mm_struct * mm = mm_alloc();
	p->mm = mm;
	p->active_mm = mm;

	p->user = INIT_USER;

	spin_lock(&mmlist_lock);
#if 0
	list_add(&mm->mmlist, &p->p_pptr->mm->mmlist);
#endif
	mmlist_nr++;
	spin_unlock(&mmlist_lock);

	// Now we are getting into the area that is really the swappers

	// To be moved to shell.c and swp$shelinit later

	p->pcb$l_phd=kmalloc(sizeof(struct _phd),GFP_KERNEL);
	init_phd(p->pcb$l_phd);

	init_fork_p1pp(p,p->pcb$l_phd,ctl$gl_pcb,ctl$gl_pcb->pcb$l_phd);
#ifdef __x86_64__
	shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x2000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x2000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ffa0000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ffa0000-0x2000,0x7fffe000);
#else
	shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff80000-0x2000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x1000,0x7fffe000);
	shell_init_other(p,ctl$gl_pcb,0x7ff90000-0x2000,0x7fffe000);
#endif
	int exe$procstrt(struct _pcb * p);
	struct pt_regs * regs = &pidadr;
	//printk("newthread %x\n",p),
	retval = new_thread(0, clone_flags, 0, 0, p, 0);

	int eip=0,esp=0;

	//	start_thread(regs,eip,esp);

	sch$chse(p, PRI$_TICOM);

	vmsunlock(&SPIN_MMG,-1);
	vmsunlock(&SPIN_SCHED,0);

	return SS$_NORMAL;

#if 0
	return sys_execve(((struct dsc$descriptor *)image)->dsc$a_pointer,0,0);

	return SS$_NORMAL;
#endif

#if 0
{
  char * filename=((struct dsc$descriptor *)image)->dsc$a_pointer;
  char ** argv=0;
  char ** envp=0;
  struct pt_regs * regs=0;
  struct linux_binprm bprm;
  struct file *file;
  int retval;
  int i;

	file = open_exec(filename);

	retval = PTR_ERR(file);
	if (IS_ERR(file))
		return retval;

	bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
	memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0])); 

	bprm.file = file;
	bprm.filename = filename;
	bprm.sh_bang = 0;
	bprm.loader = 0;
	bprm.exec = 0;
	if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) {
		allow_write_access(file);
		fput(file);
		//printk("here 7 %x\n",bprm.argc);
		return bprm.argc;
	}

	if ((bprm.envc = count(envp, bprm.p / sizeof(void *))) < 0) {
		allow_write_access(file);
		fput(file);
		//printk("here 6\n");
		return bprm.envc;
	}

	retval = prepare_binprm(&bprm);
	//printk("here 4\n");
	if (retval < 0) 
		goto out; 

	retval = copy_strings_kernel(1, &bprm.filename, &bprm);
	//printk("here 3\n");
	if (retval < 0) 
		goto out; 

	bprm.exec = bprm.p;
	retval = copy_strings(bprm.envc, envp, &bprm);
	//printk("here 2\n");
	if (retval < 0) 
		goto out; 

	retval = copy_strings(bprm.argc, argv, &bprm);
	//printk("here 1\n");
	if (retval < 0) 
		goto out; 

	retval = search_binary_handler(&bprm,regs);
	if (retval >= 0)
		/* execve success */
		return retval;

out:
	/* Something went wrong, return the inode and free the argument pages*/
	allow_write_access(bprm.file);
	if (bprm.file)
		fput(bprm.file);

	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
		struct page * page = bprm.page[i];
		if (page)
			__free_page(page);
	}

	return retval;
}
#endif

fork_out:
	return retval;

bad_fork_free:
	free_task_struct(p);
	goto fork_out;

}
Ejemplo n.º 2
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags).  The actual kick-off is left to the caller.
 */
struct task_struct *copy_process(unsigned long clone_flags,
			         unsigned long stack_start,
			         struct pt_regs *regs,
			         unsigned long stack_size,
			         int *parent_tidptr,
			         int *child_tidptr)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);
	if ((clone_flags & CLONE_DETACHED) && !(clone_flags & CLONE_THREAD))
		return ERR_PTR(-EINVAL);
	if (!(clone_flags & CLONE_DETACHED) && (clone_flags & CLONE_THREAD))
		return ERR_PTR(-EINVAL);

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	p->tux_info = NULL;

	retval = -EAGAIN;

	/*
	 * Increment user->__count before the rlimit test so that it would
	 * be correct if we take the bad_fork_free failure path.
	 */
	atomic_inc(&p->user->__count);
	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
			goto bad_fork_free;
	}

	atomic_inc(&p->user->processes);

	/*
	 * Counter increases are protected by
	 * the kernel lock so nr_threads can't
	 * increase under us (but it may decrease).
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;
	
	get_exec_domain(p->exec_domain);

	if (p->binfmt && p->binfmt->module)
		__MOD_INC_USE_COUNT(p->binfmt->module);

	p->did_exec = 0;
	p->swappable = 0;
	p->state = TASK_UNINTERRUPTIBLE;

	copy_flags(clone_flags, p);
	if (clone_flags & CLONE_IDLETASK)
		p->pid = 0;
	else {
		p->pid = alloc_pidmap();
		if (p->pid == -1)
			goto bad_fork_cleanup;
	}
	
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup;

	INIT_LIST_HEAD(&p->run_list);

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	init_waitqueue_head(&p->wait_chldexit);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);
	spin_lock_init(&p->switch_lock);

	p->sigpending = 0;
	init_sigpending(&p->pending);

	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->leader = 0;		/* session leadership doesn't inherit */
	p->tty_old_pgrp = 0;
	memset(&p->utime, 0, sizeof(p->utime));
	memset(&p->stime, 0, sizeof(p->stime));
	memset(&p->cutime, 0, sizeof(p->cutime));
	memset(&p->cstime, 0, sizeof(p->cstime));
	memset(&p->group_utime, 0, sizeof(p->group_utime));
	memset(&p->group_stime, 0, sizeof(p->group_stime));
	memset(&p->group_cutime, 0, sizeof(p->group_cutime));
	memset(&p->group_cstime, 0, sizeof(p->group_cstime));

#ifdef CONFIG_SMP
	memset(&p->per_cpu_utime, 0, sizeof(p->per_cpu_utime));
	memset(&p->per_cpu_stime, 0, sizeof(p->per_cpu_stime));
#endif

	memset(&p->timing_state, 0, sizeof(p->timing_state));
	p->timing_state.type = PROCESS_TIMING_USER;
	p->last_sigxcpu = 0;
	p->array = NULL;
	p->lock_depth = -1;		/* -1 = no lock */
	p->start_time = jiffies;

	retval = -ENOMEM;
	/* copy all the process information */
	if (copy_files(clone_flags, p))
		goto bad_fork_cleanup;
	if (copy_fs(clone_flags, p))
		goto bad_fork_cleanup_files;
	if (copy_sighand(clone_flags, p))
		goto bad_fork_cleanup_fs;
	if (copy_signal(clone_flags, p))
		goto bad_fork_cleanup_sighand;
	if (copy_mm(clone_flags, p))
		goto bad_fork_cleanup_signal;
	if (copy_namespace(clone_flags, p))
		goto bad_fork_cleanup_mm;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;
	p->semundo = NULL;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID)
		? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID)
		? child_tidptr : NULL;

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->swappable = 1;
	if (clone_flags & CLONE_DETACHED)
		p->exit_signal = -1;
	else
		p->exit_signal = clone_flags & CSIGNAL;
	p->pdeath_signal = 0;

	/*
	 * Share the timeslice between parent and child, thus the
	 * total amount of pending timeslices in the system doesnt change,
	 * resulting in more scheduling fairness.
	 */
	local_irq_disable();
	p->time_slice = (current->time_slice + 1) >> 1;
	p->first_time_slice = 1;
	/*
	 * The remainder of the first timeslice might be recovered by
	 * the parent if the child exits early enough.
	 */
	current->time_slice >>= 1;
	p->last_run = jiffies;
	if (!current->time_slice) {
		/*
		 * This case is rare, it happens when the parent has only
		 * a single jiffy left from its timeslice. Taking the
		 * runqueue lock is not a problem.
		 */
		current->time_slice = 1;
		scheduler_tick(0 /* don't update the time stats */);
	}
	local_irq_enable();

	if ((int)current->time_slice <= 0)
		BUG();
	if ((int)p->time_slice <= 0)
		BUG();

	/*
	 * Ok, add it to the run-queues and make it
	 * visible to the rest of the system.
	 *
	 * Let it rip!
	 */
	p->tgid = p->pid;
	p->group_leader = p;
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);
	/*
	 * Check for pending SIGKILL! The new thread should not be allowed
	 * to slip out of an OOM kill. (or normal SIGKILL.)
	 */
	if (sigismember(&current->pending.signal, SIGKILL)) {
		write_unlock_irq(&tasklist_lock);
		retval = -EINTR;
		goto bad_fork_cleanup_namespace;
	}

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	if (clone_flags & CLONE_THREAD) {
		spin_lock(&current->sighand->siglock);
		/*
		 * Important: if an exit-all has been started then
		 * do not create this new thread - the whole thread
		 * group is supposed to exit anyway.
		 */
		if (current->signal->group_exit) {
			spin_unlock(&current->sighand->siglock);
			write_unlock_irq(&tasklist_lock);
			retval = -EINTR;
			goto bad_fork_cleanup_namespace;
		}
		p->tgid = current->tgid;
		p->group_leader = current->group_leader;

		if (current->signal->group_stop_count > 0) {
			/*
			 * There is an all-stop in progress for the group.
			 * We ourselves will stop as soon as we check signals.
			 * Make the new thread part of that group stop too.
			 */
			current->signal->group_stop_count++;
			p->sigpending = 1;
		}

		spin_unlock(&current->sighand->siglock);
	}

	SET_LINKS(p);
	if (p->ptrace & PT_PTRACED)
		__ptrace_link(p, current->parent);

	attach_pid(p, PIDTYPE_PID, p->pid);
	if (thread_group_leader(p)) {
		attach_pid(p, PIDTYPE_TGID, p->tgid);
		attach_pid(p, PIDTYPE_PGID, p->pgrp);
		attach_pid(p, PIDTYPE_SID, p->session);
	} else {
		link_pid(p, p->pids + PIDTYPE_TGID,
			&p->group_leader->pids[PIDTYPE_TGID].pid);
	}

	/* clear controlling tty of new task if parent's was just cleared */
	if (!current->tty && p->tty)
		p->tty = NULL;

	nr_threads++;
	write_unlock_irq(&tasklist_lock);
	retval = 0;

fork_out:
	if (retval)
		return ERR_PTR(retval);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_mm:
	exit_mm(p);
	if (p->active_mm)
		mmdrop(p->active_mm);
bad_fork_cleanup_signal:
	exit_signal(p);
bad_fork_cleanup_sighand:
	exit_sighand(p);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup:
	if (p->pid > 0)
		free_pidmap(p->pid);
	put_exec_domain(p->exec_domain);
	if (p->binfmt && p->binfmt->module)
		__MOD_DEC_USE_COUNT(p->binfmt->module);
bad_fork_cleanup_count:
	atomic_dec(&p->user->processes);
bad_fork_free:
	p->state = TASK_ZOMBIE; /* debug */
	atomic_dec(&p->usage);
	put_task_struct(p);
	goto fork_out;
}